1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 */ 6 7 #include <platform_config.h> 8 9 #include <arm.h> 10 #include <assert.h> 11 #include <keep.h> 12 #include <kernel/asan.h> 13 #include <kernel/lockdep.h> 14 #include <kernel/misc.h> 15 #include <kernel/msg_param.h> 16 #include <kernel/panic.h> 17 #include <kernel/spinlock.h> 18 #include <kernel/tee_ta_manager.h> 19 #include <kernel/thread_defs.h> 20 #include <kernel/thread.h> 21 #include <mm/core_memprot.h> 22 #include <mm/mobj.h> 23 #include <mm/tee_mm.h> 24 #include <mm/tee_mmu.h> 25 #include <mm/tee_pager.h> 26 #include <optee_msg.h> 27 #include <smccc.h> 28 #include <sm/optee_smc.h> 29 #include <sm/sm.h> 30 #include <tee/tee_cryp_utl.h> 31 #include <tee/tee_fs_rpc.h> 32 #include <trace.h> 33 #include <util.h> 34 35 #include "thread_private.h" 36 37 #ifdef CFG_WITH_ARM_TRUSTED_FW 38 #define STACK_TMP_OFFS 0 39 #else 40 #define STACK_TMP_OFFS SM_STACK_TMP_RESERVE_SIZE 41 #endif 42 43 44 #ifdef ARM32 45 #ifdef CFG_CORE_SANITIZE_KADDRESS 46 #define STACK_TMP_SIZE (3072 + STACK_TMP_OFFS) 47 #else 48 #define STACK_TMP_SIZE (1536 + STACK_TMP_OFFS) 49 #endif 50 #define STACK_THREAD_SIZE 8192 51 52 #ifdef CFG_CORE_SANITIZE_KADDRESS 53 #define STACK_ABT_SIZE 3072 54 #else 55 #define STACK_ABT_SIZE 2048 56 #endif 57 58 #endif /*ARM32*/ 59 60 #ifdef ARM64 61 #define STACK_TMP_SIZE (2048 + STACK_TMP_OFFS) 62 #define STACK_THREAD_SIZE 8192 63 64 #if TRACE_LEVEL > 0 65 #define STACK_ABT_SIZE 3072 66 #else 67 #define STACK_ABT_SIZE 1024 68 #endif 69 #endif /*ARM64*/ 70 71 struct thread_ctx threads[CFG_NUM_THREADS]; 72 73 struct thread_core_local thread_core_local[CFG_TEE_CORE_NB_CORE]; 74 75 #ifdef CFG_WITH_STACK_CANARIES 76 #ifdef ARM32 77 #define STACK_CANARY_SIZE (4 * sizeof(uint32_t)) 78 #endif 79 #ifdef ARM64 80 #define STACK_CANARY_SIZE (8 * sizeof(uint32_t)) 81 #endif 82 #define START_CANARY_VALUE 0xdededede 83 #define END_CANARY_VALUE 0xabababab 84 #define GET_START_CANARY(name, stack_num) name[stack_num][0] 85 #define GET_END_CANARY(name, stack_num) \ 86 name[stack_num][sizeof(name[stack_num]) / sizeof(uint32_t) - 1] 87 #else 88 #define STACK_CANARY_SIZE 0 89 #endif 90 91 #define DECLARE_STACK(name, num_stacks, stack_size, linkage) \ 92 linkage uint32_t name[num_stacks] \ 93 [ROUNDUP(stack_size + STACK_CANARY_SIZE, STACK_ALIGNMENT) / \ 94 sizeof(uint32_t)] \ 95 __attribute__((section(".nozi_stack." # name), \ 96 aligned(STACK_ALIGNMENT))) 97 98 #define STACK_SIZE(stack) (sizeof(stack) - STACK_CANARY_SIZE / 2) 99 100 #define GET_STACK(stack) \ 101 ((vaddr_t)(stack) + STACK_SIZE(stack)) 102 103 DECLARE_STACK(stack_tmp, CFG_TEE_CORE_NB_CORE, STACK_TMP_SIZE, static); 104 DECLARE_STACK(stack_abt, CFG_TEE_CORE_NB_CORE, STACK_ABT_SIZE, static); 105 #ifndef CFG_WITH_PAGER 106 DECLARE_STACK(stack_thread, CFG_NUM_THREADS, STACK_THREAD_SIZE, static); 107 #endif 108 109 const void *stack_tmp_export = (uint8_t *)stack_tmp + sizeof(stack_tmp[0]) - 110 (STACK_TMP_OFFS + STACK_CANARY_SIZE / 2); 111 const uint32_t stack_tmp_stride = sizeof(stack_tmp[0]); 112 113 /* 114 * These stack setup info are required by secondary boot cores before they 115 * each locally enable the pager (the mmu). Hence kept in pager sections. 116 */ 117 KEEP_PAGER(stack_tmp_export); 118 KEEP_PAGER(stack_tmp_stride); 119 120 thread_smc_handler_t thread_std_smc_handler_ptr; 121 static thread_smc_handler_t thread_fast_smc_handler_ptr; 122 thread_nintr_handler_t thread_nintr_handler_ptr; 123 thread_pm_handler_t thread_cpu_on_handler_ptr; 124 thread_pm_handler_t thread_cpu_off_handler_ptr; 125 thread_pm_handler_t thread_cpu_suspend_handler_ptr; 126 thread_pm_handler_t thread_cpu_resume_handler_ptr; 127 thread_pm_handler_t thread_system_off_handler_ptr; 128 thread_pm_handler_t thread_system_reset_handler_ptr; 129 130 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 131 static vaddr_t thread_user_kcode_va; 132 long thread_user_kcode_offset; 133 static size_t thread_user_kcode_size; 134 #endif 135 136 #if defined(CFG_CORE_UNMAP_CORE_AT_EL0) && \ 137 defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) 138 long thread_user_kdata_sp_offset; 139 static uint8_t thread_user_kdata_page[ 140 ROUNDUP(sizeof(thread_core_local), SMALL_PAGE_SIZE)] 141 __aligned(SMALL_PAGE_SIZE) __section(".nozi.kdata_page"); 142 #endif 143 144 static unsigned int thread_global_lock = SPINLOCK_UNLOCK; 145 static bool thread_prealloc_rpc_cache; 146 147 static unsigned int thread_rpc_pnum; 148 149 static void init_canaries(void) 150 { 151 #ifdef CFG_WITH_STACK_CANARIES 152 size_t n; 153 #define INIT_CANARY(name) \ 154 for (n = 0; n < ARRAY_SIZE(name); n++) { \ 155 uint32_t *start_canary = &GET_START_CANARY(name, n); \ 156 uint32_t *end_canary = &GET_END_CANARY(name, n); \ 157 \ 158 *start_canary = START_CANARY_VALUE; \ 159 *end_canary = END_CANARY_VALUE; \ 160 DMSG("#Stack canaries for %s[%zu] with top at %p\n", \ 161 #name, n, (void *)(end_canary - 1)); \ 162 DMSG("watch *%p\n", (void *)end_canary); \ 163 } 164 165 INIT_CANARY(stack_tmp); 166 INIT_CANARY(stack_abt); 167 #ifndef CFG_WITH_PAGER 168 INIT_CANARY(stack_thread); 169 #endif 170 #endif/*CFG_WITH_STACK_CANARIES*/ 171 } 172 173 #define CANARY_DIED(stack, loc, n) \ 174 do { \ 175 EMSG_RAW("Dead canary at %s of '%s[%zu]'", #loc, #stack, n); \ 176 panic(); \ 177 } while (0) 178 179 void thread_check_canaries(void) 180 { 181 #ifdef CFG_WITH_STACK_CANARIES 182 size_t n; 183 184 for (n = 0; n < ARRAY_SIZE(stack_tmp); n++) { 185 if (GET_START_CANARY(stack_tmp, n) != START_CANARY_VALUE) 186 CANARY_DIED(stack_tmp, start, n); 187 if (GET_END_CANARY(stack_tmp, n) != END_CANARY_VALUE) 188 CANARY_DIED(stack_tmp, end, n); 189 } 190 191 for (n = 0; n < ARRAY_SIZE(stack_abt); n++) { 192 if (GET_START_CANARY(stack_abt, n) != START_CANARY_VALUE) 193 CANARY_DIED(stack_abt, start, n); 194 if (GET_END_CANARY(stack_abt, n) != END_CANARY_VALUE) 195 CANARY_DIED(stack_abt, end, n); 196 197 } 198 #ifndef CFG_WITH_PAGER 199 for (n = 0; n < ARRAY_SIZE(stack_thread); n++) { 200 if (GET_START_CANARY(stack_thread, n) != START_CANARY_VALUE) 201 CANARY_DIED(stack_thread, start, n); 202 if (GET_END_CANARY(stack_thread, n) != END_CANARY_VALUE) 203 CANARY_DIED(stack_thread, end, n); 204 } 205 #endif 206 #endif/*CFG_WITH_STACK_CANARIES*/ 207 } 208 209 static void lock_global(void) 210 { 211 cpu_spin_lock(&thread_global_lock); 212 } 213 214 static void unlock_global(void) 215 { 216 cpu_spin_unlock(&thread_global_lock); 217 } 218 219 #ifdef ARM32 220 uint32_t thread_get_exceptions(void) 221 { 222 uint32_t cpsr = read_cpsr(); 223 224 return (cpsr >> CPSR_F_SHIFT) & THREAD_EXCP_ALL; 225 } 226 227 void thread_set_exceptions(uint32_t exceptions) 228 { 229 uint32_t cpsr = read_cpsr(); 230 231 /* Foreign interrupts must not be unmasked while holding a spinlock */ 232 if (!(exceptions & THREAD_EXCP_FOREIGN_INTR)) 233 assert_have_no_spinlock(); 234 235 cpsr &= ~(THREAD_EXCP_ALL << CPSR_F_SHIFT); 236 cpsr |= ((exceptions & THREAD_EXCP_ALL) << CPSR_F_SHIFT); 237 write_cpsr(cpsr); 238 } 239 #endif /*ARM32*/ 240 241 #ifdef ARM64 242 uint32_t thread_get_exceptions(void) 243 { 244 uint32_t daif = read_daif(); 245 246 return (daif >> DAIF_F_SHIFT) & THREAD_EXCP_ALL; 247 } 248 249 void thread_set_exceptions(uint32_t exceptions) 250 { 251 uint32_t daif = read_daif(); 252 253 /* Foreign interrupts must not be unmasked while holding a spinlock */ 254 if (!(exceptions & THREAD_EXCP_FOREIGN_INTR)) 255 assert_have_no_spinlock(); 256 257 daif &= ~(THREAD_EXCP_ALL << DAIF_F_SHIFT); 258 daif |= ((exceptions & THREAD_EXCP_ALL) << DAIF_F_SHIFT); 259 write_daif(daif); 260 } 261 #endif /*ARM64*/ 262 263 uint32_t thread_mask_exceptions(uint32_t exceptions) 264 { 265 uint32_t state = thread_get_exceptions(); 266 267 thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL)); 268 return state; 269 } 270 271 void thread_unmask_exceptions(uint32_t state) 272 { 273 thread_set_exceptions(state & THREAD_EXCP_ALL); 274 } 275 276 277 struct thread_core_local *thread_get_core_local(void) 278 { 279 uint32_t cpu_id = get_core_pos(); 280 281 /* 282 * Foreign interrupts must be disabled before playing with core_local 283 * since we otherwise may be rescheduled to a different core in the 284 * middle of this function. 285 */ 286 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 287 288 assert(cpu_id < CFG_TEE_CORE_NB_CORE); 289 return &thread_core_local[cpu_id]; 290 } 291 292 static void thread_lazy_save_ns_vfp(void) 293 { 294 #ifdef CFG_WITH_VFP 295 struct thread_ctx *thr = threads + thread_get_id(); 296 297 thr->vfp_state.ns_saved = false; 298 #if defined(CFG_WITH_ARM_TRUSTED_FW) 299 /* 300 * ARM TF saves and restores CPACR_EL1, so we must assume NS world 301 * uses VFP and always preserve the register file when secure world 302 * is about to use it 303 */ 304 thr->vfp_state.ns_force_save = true; 305 #endif 306 vfp_lazy_save_state_init(&thr->vfp_state.ns); 307 #endif /*CFG_WITH_VFP*/ 308 } 309 310 static void thread_lazy_restore_ns_vfp(void) 311 { 312 #ifdef CFG_WITH_VFP 313 struct thread_ctx *thr = threads + thread_get_id(); 314 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 315 316 assert(!thr->vfp_state.sec_lazy_saved && !thr->vfp_state.sec_saved); 317 318 if (tuv && tuv->lazy_saved && !tuv->saved) { 319 vfp_lazy_save_state_final(&tuv->vfp, false /*!force_save*/); 320 tuv->saved = true; 321 } 322 323 vfp_lazy_restore_state(&thr->vfp_state.ns, thr->vfp_state.ns_saved); 324 thr->vfp_state.ns_saved = false; 325 #endif /*CFG_WITH_VFP*/ 326 } 327 328 #ifdef ARM32 329 static void init_regs(struct thread_ctx *thread, 330 struct thread_smc_args *args) 331 { 332 thread->regs.pc = (uint32_t)thread_std_smc_entry; 333 334 /* 335 * Stdcalls starts in SVC mode with masked foreign interrupts, masked 336 * Asynchronous abort and unmasked native interrupts. 337 */ 338 thread->regs.cpsr = read_cpsr() & ARM32_CPSR_E; 339 thread->regs.cpsr |= CPSR_MODE_SVC | CPSR_A | 340 (THREAD_EXCP_FOREIGN_INTR << ARM32_CPSR_F_SHIFT); 341 /* Enable thumb mode if it's a thumb instruction */ 342 if (thread->regs.pc & 1) 343 thread->regs.cpsr |= CPSR_T; 344 /* Reinitialize stack pointer */ 345 thread->regs.svc_sp = thread->stack_va_end; 346 347 /* 348 * Copy arguments into context. This will make the 349 * arguments appear in r0-r7 when thread is started. 350 */ 351 thread->regs.r0 = args->a0; 352 thread->regs.r1 = args->a1; 353 thread->regs.r2 = args->a2; 354 thread->regs.r3 = args->a3; 355 thread->regs.r4 = args->a4; 356 thread->regs.r5 = args->a5; 357 thread->regs.r6 = args->a6; 358 thread->regs.r7 = args->a7; 359 } 360 #endif /*ARM32*/ 361 362 #ifdef ARM64 363 static void init_regs(struct thread_ctx *thread, 364 struct thread_smc_args *args) 365 { 366 thread->regs.pc = (uint64_t)thread_std_smc_entry; 367 368 /* 369 * Stdcalls starts in SVC mode with masked foreign interrupts, masked 370 * Asynchronous abort and unmasked native interrupts. 371 */ 372 thread->regs.cpsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, 373 THREAD_EXCP_FOREIGN_INTR | DAIFBIT_ABT); 374 /* Reinitialize stack pointer */ 375 thread->regs.sp = thread->stack_va_end; 376 377 /* 378 * Copy arguments into context. This will make the 379 * arguments appear in x0-x7 when thread is started. 380 */ 381 thread->regs.x[0] = args->a0; 382 thread->regs.x[1] = args->a1; 383 thread->regs.x[2] = args->a2; 384 thread->regs.x[3] = args->a3; 385 thread->regs.x[4] = args->a4; 386 thread->regs.x[5] = args->a5; 387 thread->regs.x[6] = args->a6; 388 thread->regs.x[7] = args->a7; 389 390 /* Set up frame pointer as per the Aarch64 AAPCS */ 391 thread->regs.x[29] = 0; 392 } 393 #endif /*ARM64*/ 394 395 void thread_init_boot_thread(void) 396 { 397 struct thread_core_local *l = thread_get_core_local(); 398 size_t n; 399 400 mutex_lockdep_init(); 401 402 for (n = 0; n < CFG_NUM_THREADS; n++) { 403 TAILQ_INIT(&threads[n].mutexes); 404 TAILQ_INIT(&threads[n].tsd.sess_stack); 405 SLIST_INIT(&threads[n].tsd.pgt_cache); 406 } 407 408 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) 409 thread_core_local[n].curr_thread = -1; 410 411 l->curr_thread = 0; 412 threads[0].state = THREAD_STATE_ACTIVE; 413 } 414 415 void thread_clr_boot_thread(void) 416 { 417 struct thread_core_local *l = thread_get_core_local(); 418 419 assert(l->curr_thread >= 0 && l->curr_thread < CFG_NUM_THREADS); 420 assert(threads[l->curr_thread].state == THREAD_STATE_ACTIVE); 421 assert(TAILQ_EMPTY(&threads[l->curr_thread].mutexes)); 422 threads[l->curr_thread].state = THREAD_STATE_FREE; 423 l->curr_thread = -1; 424 } 425 426 static void thread_alloc_and_run(struct thread_smc_args *args) 427 { 428 size_t n; 429 struct thread_core_local *l = thread_get_core_local(); 430 bool found_thread = false; 431 432 assert(l->curr_thread == -1); 433 434 lock_global(); 435 436 for (n = 0; n < CFG_NUM_THREADS; n++) { 437 if (threads[n].state == THREAD_STATE_FREE) { 438 threads[n].state = THREAD_STATE_ACTIVE; 439 found_thread = true; 440 break; 441 } 442 } 443 444 unlock_global(); 445 446 if (!found_thread) { 447 args->a0 = OPTEE_SMC_RETURN_ETHREAD_LIMIT; 448 return; 449 } 450 451 l->curr_thread = n; 452 453 threads[n].flags = 0; 454 init_regs(threads + n, args); 455 456 /* Save Hypervisor Client ID */ 457 threads[n].hyp_clnt_id = args->a7; 458 459 thread_lazy_save_ns_vfp(); 460 thread_resume(&threads[n].regs); 461 } 462 463 #ifdef ARM32 464 static void copy_a0_to_a5(struct thread_ctx_regs *regs, 465 struct thread_smc_args *args) 466 { 467 /* 468 * Update returned values from RPC, values will appear in 469 * r0-r3 when thread is resumed. 470 */ 471 regs->r0 = args->a0; 472 regs->r1 = args->a1; 473 regs->r2 = args->a2; 474 regs->r3 = args->a3; 475 regs->r4 = args->a4; 476 regs->r5 = args->a5; 477 } 478 #endif /*ARM32*/ 479 480 #ifdef ARM64 481 static void copy_a0_to_a5(struct thread_ctx_regs *regs, 482 struct thread_smc_args *args) 483 { 484 /* 485 * Update returned values from RPC, values will appear in 486 * x0-x3 when thread is resumed. 487 */ 488 regs->x[0] = args->a0; 489 regs->x[1] = args->a1; 490 regs->x[2] = args->a2; 491 regs->x[3] = args->a3; 492 regs->x[4] = args->a4; 493 regs->x[5] = args->a5; 494 } 495 #endif /*ARM64*/ 496 497 #ifdef ARM32 498 static bool is_from_user(uint32_t cpsr) 499 { 500 return (cpsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR; 501 } 502 #endif 503 504 #ifdef ARM64 505 static bool is_from_user(uint32_t cpsr) 506 { 507 if (cpsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT)) 508 return true; 509 if (((cpsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) == 510 SPSR_64_MODE_EL0) 511 return true; 512 return false; 513 } 514 #endif 515 516 static bool is_user_mode(struct thread_ctx_regs *regs) 517 { 518 return is_from_user((uint32_t)regs->cpsr); 519 } 520 521 static void thread_resume_from_rpc(struct thread_smc_args *args) 522 { 523 size_t n = args->a3; /* thread id */ 524 struct thread_core_local *l = thread_get_core_local(); 525 uint32_t rv = 0; 526 527 assert(l->curr_thread == -1); 528 529 lock_global(); 530 531 if (n < CFG_NUM_THREADS && 532 threads[n].state == THREAD_STATE_SUSPENDED && 533 args->a7 == threads[n].hyp_clnt_id) 534 threads[n].state = THREAD_STATE_ACTIVE; 535 else 536 rv = OPTEE_SMC_RETURN_ERESUME; 537 538 unlock_global(); 539 540 if (rv) { 541 args->a0 = rv; 542 return; 543 } 544 545 l->curr_thread = n; 546 547 if (is_user_mode(&threads[n].regs)) 548 tee_ta_update_session_utime_resume(); 549 550 if (threads[n].have_user_map) 551 core_mmu_set_user_map(&threads[n].user_map); 552 553 /* 554 * Return from RPC to request service of a foreign interrupt must not 555 * get parameters from non-secure world. 556 */ 557 if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) { 558 copy_a0_to_a5(&threads[n].regs, args); 559 threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN; 560 } 561 562 thread_lazy_save_ns_vfp(); 563 thread_resume(&threads[n].regs); 564 } 565 566 void thread_handle_fast_smc(struct thread_smc_args *args) 567 { 568 thread_check_canaries(); 569 thread_fast_smc_handler_ptr(args); 570 /* Fast handlers must not unmask any exceptions */ 571 assert(thread_get_exceptions() == THREAD_EXCP_ALL); 572 } 573 574 void thread_handle_std_smc(struct thread_smc_args *args) 575 { 576 thread_check_canaries(); 577 578 if (args->a0 == OPTEE_SMC_CALL_RETURN_FROM_RPC) 579 thread_resume_from_rpc(args); 580 else 581 thread_alloc_and_run(args); 582 } 583 584 /** 585 * Free physical memory previously allocated with thread_rpc_alloc_arg() 586 * 587 * @cookie: cookie received when allocating the buffer 588 */ 589 static void thread_rpc_free_arg(uint64_t cookie) 590 { 591 if (cookie) { 592 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 593 OPTEE_SMC_RETURN_RPC_FREE 594 }; 595 596 reg_pair_from_64(cookie, rpc_args + 1, rpc_args + 2); 597 thread_rpc(rpc_args); 598 } 599 } 600 601 /* 602 * Helper routine for the assembly function thread_std_smc_entry() 603 * 604 * Note: this function is weak just to make it possible to exclude it from 605 * the unpaged area. 606 */ 607 void __weak __thread_std_smc_entry(struct thread_smc_args *args) 608 { 609 thread_std_smc_handler_ptr(args); 610 611 if (args->a0 == OPTEE_SMC_RETURN_OK) { 612 struct thread_ctx *thr = threads + thread_get_id(); 613 614 tee_fs_rpc_cache_clear(&thr->tsd); 615 if (!thread_prealloc_rpc_cache) { 616 thread_rpc_free_arg(mobj_get_cookie(thr->rpc_mobj)); 617 mobj_free(thr->rpc_mobj); 618 thr->rpc_arg = 0; 619 thr->rpc_mobj = NULL; 620 } 621 } 622 } 623 624 void *thread_get_tmp_sp(void) 625 { 626 struct thread_core_local *l = thread_get_core_local(); 627 628 return (void *)l->tmp_stack_va_end; 629 } 630 631 #ifdef ARM64 632 vaddr_t thread_get_saved_thread_sp(void) 633 { 634 struct thread_core_local *l = thread_get_core_local(); 635 int ct = l->curr_thread; 636 637 assert(ct != -1); 638 return threads[ct].kern_sp; 639 } 640 #endif /*ARM64*/ 641 642 vaddr_t thread_stack_start(void) 643 { 644 struct thread_ctx *thr; 645 int ct = thread_get_id_may_fail(); 646 647 if (ct == -1) 648 return 0; 649 650 thr = threads + ct; 651 return thr->stack_va_end - STACK_THREAD_SIZE; 652 } 653 654 size_t thread_stack_size(void) 655 { 656 return STACK_THREAD_SIZE; 657 } 658 659 bool thread_is_from_abort_mode(void) 660 { 661 struct thread_core_local *l = thread_get_core_local(); 662 663 return (l->flags >> THREAD_CLF_SAVED_SHIFT) & THREAD_CLF_ABORT; 664 } 665 666 #ifdef ARM32 667 bool thread_is_in_normal_mode(void) 668 { 669 return (read_cpsr() & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_SVC; 670 } 671 #endif 672 673 #ifdef ARM64 674 bool thread_is_in_normal_mode(void) 675 { 676 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 677 struct thread_core_local *l = thread_get_core_local(); 678 bool ret; 679 680 /* If any bit in l->flags is set we're handling some exception. */ 681 ret = !l->flags; 682 thread_unmask_exceptions(exceptions); 683 684 return ret; 685 } 686 #endif 687 688 void thread_state_free(void) 689 { 690 struct thread_core_local *l = thread_get_core_local(); 691 int ct = l->curr_thread; 692 693 assert(ct != -1); 694 assert(TAILQ_EMPTY(&threads[ct].mutexes)); 695 696 thread_lazy_restore_ns_vfp(); 697 tee_pager_release_phys( 698 (void *)(threads[ct].stack_va_end - STACK_THREAD_SIZE), 699 STACK_THREAD_SIZE); 700 701 lock_global(); 702 703 assert(threads[ct].state == THREAD_STATE_ACTIVE); 704 threads[ct].state = THREAD_STATE_FREE; 705 threads[ct].flags = 0; 706 l->curr_thread = -1; 707 708 unlock_global(); 709 } 710 711 #ifdef CFG_WITH_PAGER 712 static void release_unused_kernel_stack(struct thread_ctx *thr, 713 uint32_t cpsr __maybe_unused) 714 { 715 #ifdef ARM64 716 /* 717 * If we're from user mode then thr->regs.sp is the saved user 718 * stack pointer and thr->kern_sp holds the last kernel stack 719 * pointer. But if we're from kernel mode then thr->kern_sp isn't 720 * up to date so we need to read from thr->regs.sp instead. 721 */ 722 vaddr_t sp = is_from_user(cpsr) ? thr->kern_sp : thr->regs.sp; 723 #else 724 vaddr_t sp = thr->regs.svc_sp; 725 #endif 726 vaddr_t base = thr->stack_va_end - STACK_THREAD_SIZE; 727 size_t len = sp - base; 728 729 tee_pager_release_phys((void *)base, len); 730 } 731 #else 732 static void release_unused_kernel_stack(struct thread_ctx *thr __unused, 733 uint32_t cpsr __unused) 734 { 735 } 736 #endif 737 738 int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc) 739 { 740 struct thread_core_local *l = thread_get_core_local(); 741 int ct = l->curr_thread; 742 743 assert(ct != -1); 744 745 thread_check_canaries(); 746 747 release_unused_kernel_stack(threads + ct, cpsr); 748 749 if (is_from_user(cpsr)) { 750 thread_user_save_vfp(); 751 tee_ta_update_session_utime_suspend(); 752 tee_ta_gprof_sample_pc(pc); 753 } 754 thread_lazy_restore_ns_vfp(); 755 756 lock_global(); 757 758 assert(threads[ct].state == THREAD_STATE_ACTIVE); 759 threads[ct].flags |= flags; 760 threads[ct].regs.cpsr = cpsr; 761 threads[ct].regs.pc = pc; 762 threads[ct].state = THREAD_STATE_SUSPENDED; 763 764 threads[ct].have_user_map = core_mmu_user_mapping_is_active(); 765 if (threads[ct].have_user_map) { 766 core_mmu_get_user_map(&threads[ct].user_map); 767 core_mmu_set_user_map(NULL); 768 } 769 770 l->curr_thread = -1; 771 772 unlock_global(); 773 774 return ct; 775 } 776 777 #ifdef ARM32 778 static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp) 779 { 780 l->tmp_stack_va_end = sp; 781 thread_set_irq_sp(sp); 782 thread_set_fiq_sp(sp); 783 } 784 785 static void set_abt_stack(struct thread_core_local *l, vaddr_t sp) 786 { 787 l->abt_stack_va_end = sp; 788 thread_set_abt_sp((vaddr_t)l); 789 thread_set_und_sp((vaddr_t)l); 790 } 791 #endif /*ARM32*/ 792 793 #ifdef ARM64 794 static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp) 795 { 796 /* 797 * We're already using the tmp stack when this function is called 798 * so there's no need to assign it to any stack pointer. However, 799 * we'll need to restore it at different times so store it here. 800 */ 801 l->tmp_stack_va_end = sp; 802 } 803 804 static void set_abt_stack(struct thread_core_local *l, vaddr_t sp) 805 { 806 l->abt_stack_va_end = sp; 807 } 808 #endif /*ARM64*/ 809 810 bool thread_init_stack(uint32_t thread_id, vaddr_t sp) 811 { 812 if (thread_id >= CFG_NUM_THREADS) 813 return false; 814 threads[thread_id].stack_va_end = sp; 815 return true; 816 } 817 818 int thread_get_id_may_fail(void) 819 { 820 /* 821 * thread_get_core_local() requires foreign interrupts to be disabled 822 */ 823 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 824 struct thread_core_local *l = thread_get_core_local(); 825 int ct = l->curr_thread; 826 827 thread_unmask_exceptions(exceptions); 828 return ct; 829 } 830 831 int thread_get_id(void) 832 { 833 int ct = thread_get_id_may_fail(); 834 835 assert(ct >= 0 && ct < CFG_NUM_THREADS); 836 return ct; 837 } 838 839 static void init_handlers(const struct thread_handlers *handlers) 840 { 841 thread_std_smc_handler_ptr = handlers->std_smc; 842 thread_fast_smc_handler_ptr = handlers->fast_smc; 843 thread_nintr_handler_ptr = handlers->nintr; 844 thread_cpu_on_handler_ptr = handlers->cpu_on; 845 thread_cpu_off_handler_ptr = handlers->cpu_off; 846 thread_cpu_suspend_handler_ptr = handlers->cpu_suspend; 847 thread_cpu_resume_handler_ptr = handlers->cpu_resume; 848 thread_system_off_handler_ptr = handlers->system_off; 849 thread_system_reset_handler_ptr = handlers->system_reset; 850 } 851 852 #ifdef CFG_WITH_PAGER 853 static void init_thread_stacks(void) 854 { 855 size_t n; 856 857 /* 858 * Allocate virtual memory for thread stacks. 859 */ 860 for (n = 0; n < CFG_NUM_THREADS; n++) { 861 tee_mm_entry_t *mm; 862 vaddr_t sp; 863 864 /* Find vmem for thread stack and its protection gap */ 865 mm = tee_mm_alloc(&tee_mm_vcore, 866 SMALL_PAGE_SIZE + STACK_THREAD_SIZE); 867 assert(mm); 868 869 /* Claim eventual physical page */ 870 tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm), 871 true); 872 873 /* Add the area to the pager */ 874 tee_pager_add_core_area(tee_mm_get_smem(mm) + SMALL_PAGE_SIZE, 875 tee_mm_get_bytes(mm) - SMALL_PAGE_SIZE, 876 TEE_MATTR_PRW | TEE_MATTR_LOCKED, 877 NULL, NULL); 878 879 /* init effective stack */ 880 sp = tee_mm_get_smem(mm) + tee_mm_get_bytes(mm); 881 asan_tag_access((void *)tee_mm_get_smem(mm), (void *)sp); 882 if (!thread_init_stack(n, sp)) 883 panic("init stack failed"); 884 } 885 } 886 #else 887 static void init_thread_stacks(void) 888 { 889 size_t n; 890 891 /* Assign the thread stacks */ 892 for (n = 0; n < CFG_NUM_THREADS; n++) { 893 if (!thread_init_stack(n, GET_STACK(stack_thread[n]))) 894 panic("thread_init_stack failed"); 895 } 896 } 897 #endif /*CFG_WITH_PAGER*/ 898 899 static void init_user_kcode(void) 900 { 901 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 902 vaddr_t v = (vaddr_t)thread_excp_vect; 903 vaddr_t ve = (vaddr_t)thread_excp_vect_end; 904 905 thread_user_kcode_va = ROUNDDOWN(v, CORE_MMU_USER_CODE_SIZE); 906 ve = ROUNDUP(ve, CORE_MMU_USER_CODE_SIZE); 907 thread_user_kcode_size = ve - thread_user_kcode_va; 908 909 core_mmu_get_user_va_range(&v, NULL); 910 thread_user_kcode_offset = thread_user_kcode_va - v; 911 912 #if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) 913 /* 914 * When transitioning to EL0 subtract SP with this much to point to 915 * this special kdata page instead. SP is restored by add this much 916 * while transitioning back to EL1. 917 */ 918 v += thread_user_kcode_size; 919 thread_user_kdata_sp_offset = (vaddr_t)thread_core_local - v; 920 #endif 921 #endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 922 } 923 924 void thread_init_primary(const struct thread_handlers *handlers) 925 { 926 init_handlers(handlers); 927 928 /* Initialize canaries around the stacks */ 929 init_canaries(); 930 931 init_thread_stacks(); 932 pgt_init(); 933 934 init_user_kcode(); 935 } 936 937 static void init_sec_mon(size_t pos __maybe_unused) 938 { 939 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 940 /* Initialize secure monitor */ 941 sm_init(GET_STACK(stack_tmp[pos])); 942 #endif 943 } 944 945 static uint32_t __maybe_unused get_midr_implementer(uint32_t midr) 946 { 947 return (midr >> MIDR_IMPLEMENTER_SHIFT) & MIDR_IMPLEMENTER_MASK; 948 } 949 950 static uint32_t __maybe_unused get_midr_primary_part(uint32_t midr) 951 { 952 return (midr >> MIDR_PRIMARY_PART_NUM_SHIFT) & 953 MIDR_PRIMARY_PART_NUM_MASK; 954 } 955 956 #ifdef ARM64 957 static bool probe_workaround_available(void) 958 { 959 int32_t r; 960 961 r = thread_smc(SMCCC_VERSION, 0, 0, 0); 962 if (r < 0) 963 return false; 964 if (r < 0x10001) /* compare with version 1.1 */ 965 return false; 966 967 /* Version >= 1.1, so SMCCC_ARCH_FEATURES is available */ 968 r = thread_smc(SMCCC_ARCH_FEATURES, SMCCC_ARCH_WORKAROUND_1, 0, 0); 969 return r >= 0; 970 } 971 972 static vaddr_t select_vector(vaddr_t a) 973 { 974 if (probe_workaround_available()) { 975 DMSG("SMCCC_ARCH_WORKAROUND_1 (%#08" PRIx32 ") available", 976 SMCCC_ARCH_WORKAROUND_1); 977 DMSG("SMC Workaround for CVE-2017-5715 used"); 978 return a; 979 } 980 981 DMSG("SMCCC_ARCH_WORKAROUND_1 (%#08" PRIx32 ") unavailable", 982 SMCCC_ARCH_WORKAROUND_1); 983 DMSG("SMC Workaround for CVE-2017-5715 not needed (if ARM-TF is up to date)"); 984 return (vaddr_t)thread_excp_vect; 985 } 986 #else 987 static vaddr_t select_vector(vaddr_t a) 988 { 989 return a; 990 } 991 #endif 992 993 static vaddr_t get_excp_vect(void) 994 { 995 #ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC 996 uint32_t midr = read_midr(); 997 998 if (get_midr_implementer(midr) != MIDR_IMPLEMENTER_ARM) 999 return (vaddr_t)thread_excp_vect; 1000 1001 switch (get_midr_primary_part(midr)) { 1002 #ifdef ARM32 1003 case CORTEX_A8_PART_NUM: 1004 case CORTEX_A9_PART_NUM: 1005 case CORTEX_A17_PART_NUM: 1006 #endif 1007 case CORTEX_A57_PART_NUM: 1008 case CORTEX_A72_PART_NUM: 1009 case CORTEX_A73_PART_NUM: 1010 case CORTEX_A75_PART_NUM: 1011 return select_vector((vaddr_t)thread_excp_vect_workaround); 1012 #ifdef ARM32 1013 case CORTEX_A15_PART_NUM: 1014 return select_vector((vaddr_t)thread_excp_vect_workaround_a15); 1015 #endif 1016 default: 1017 return (vaddr_t)thread_excp_vect; 1018 } 1019 #endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/ 1020 1021 return (vaddr_t)thread_excp_vect; 1022 } 1023 1024 void thread_init_per_cpu(void) 1025 { 1026 size_t pos = get_core_pos(); 1027 struct thread_core_local *l = thread_get_core_local(); 1028 1029 init_sec_mon(pos); 1030 1031 set_tmp_stack(l, GET_STACK(stack_tmp[pos]) - STACK_TMP_OFFS); 1032 set_abt_stack(l, GET_STACK(stack_abt[pos])); 1033 1034 thread_init_vbar(get_excp_vect()); 1035 } 1036 1037 struct thread_specific_data *thread_get_tsd(void) 1038 { 1039 return &threads[thread_get_id()].tsd; 1040 } 1041 1042 struct thread_ctx_regs *thread_get_ctx_regs(void) 1043 { 1044 struct thread_core_local *l = thread_get_core_local(); 1045 1046 assert(l->curr_thread != -1); 1047 return &threads[l->curr_thread].regs; 1048 } 1049 1050 void thread_set_foreign_intr(bool enable) 1051 { 1052 /* thread_get_core_local() requires foreign interrupts to be disabled */ 1053 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 1054 struct thread_core_local *l; 1055 1056 l = thread_get_core_local(); 1057 1058 assert(l->curr_thread != -1); 1059 1060 if (enable) { 1061 threads[l->curr_thread].flags |= 1062 THREAD_FLAGS_FOREIGN_INTR_ENABLE; 1063 thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR); 1064 } else { 1065 /* 1066 * No need to disable foreign interrupts here since they're 1067 * already disabled above. 1068 */ 1069 threads[l->curr_thread].flags &= 1070 ~THREAD_FLAGS_FOREIGN_INTR_ENABLE; 1071 } 1072 } 1073 1074 void thread_restore_foreign_intr(void) 1075 { 1076 /* thread_get_core_local() requires foreign interrupts to be disabled */ 1077 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 1078 struct thread_core_local *l; 1079 1080 l = thread_get_core_local(); 1081 1082 assert(l->curr_thread != -1); 1083 1084 if (threads[l->curr_thread].flags & THREAD_FLAGS_FOREIGN_INTR_ENABLE) 1085 thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR); 1086 } 1087 1088 #ifdef CFG_WITH_VFP 1089 uint32_t thread_kernel_enable_vfp(void) 1090 { 1091 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 1092 struct thread_ctx *thr = threads + thread_get_id(); 1093 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 1094 1095 assert(!vfp_is_enabled()); 1096 1097 if (!thr->vfp_state.ns_saved) { 1098 vfp_lazy_save_state_final(&thr->vfp_state.ns, 1099 thr->vfp_state.ns_force_save); 1100 thr->vfp_state.ns_saved = true; 1101 } else if (thr->vfp_state.sec_lazy_saved && 1102 !thr->vfp_state.sec_saved) { 1103 /* 1104 * This happens when we're handling an abort while the 1105 * thread was using the VFP state. 1106 */ 1107 vfp_lazy_save_state_final(&thr->vfp_state.sec, 1108 false /*!force_save*/); 1109 thr->vfp_state.sec_saved = true; 1110 } else if (tuv && tuv->lazy_saved && !tuv->saved) { 1111 /* 1112 * This can happen either during syscall or abort 1113 * processing (while processing a syscall). 1114 */ 1115 vfp_lazy_save_state_final(&tuv->vfp, false /*!force_save*/); 1116 tuv->saved = true; 1117 } 1118 1119 vfp_enable(); 1120 return exceptions; 1121 } 1122 1123 void thread_kernel_disable_vfp(uint32_t state) 1124 { 1125 uint32_t exceptions; 1126 1127 assert(vfp_is_enabled()); 1128 1129 vfp_disable(); 1130 exceptions = thread_get_exceptions(); 1131 assert(exceptions & THREAD_EXCP_FOREIGN_INTR); 1132 exceptions &= ~THREAD_EXCP_FOREIGN_INTR; 1133 exceptions |= state & THREAD_EXCP_FOREIGN_INTR; 1134 thread_set_exceptions(exceptions); 1135 } 1136 1137 void thread_kernel_save_vfp(void) 1138 { 1139 struct thread_ctx *thr = threads + thread_get_id(); 1140 1141 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1142 if (vfp_is_enabled()) { 1143 vfp_lazy_save_state_init(&thr->vfp_state.sec); 1144 thr->vfp_state.sec_lazy_saved = true; 1145 } 1146 } 1147 1148 void thread_kernel_restore_vfp(void) 1149 { 1150 struct thread_ctx *thr = threads + thread_get_id(); 1151 1152 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1153 assert(!vfp_is_enabled()); 1154 if (thr->vfp_state.sec_lazy_saved) { 1155 vfp_lazy_restore_state(&thr->vfp_state.sec, 1156 thr->vfp_state.sec_saved); 1157 thr->vfp_state.sec_saved = false; 1158 thr->vfp_state.sec_lazy_saved = false; 1159 } 1160 } 1161 1162 void thread_user_enable_vfp(struct thread_user_vfp_state *uvfp) 1163 { 1164 struct thread_ctx *thr = threads + thread_get_id(); 1165 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 1166 1167 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1168 assert(!vfp_is_enabled()); 1169 1170 if (!thr->vfp_state.ns_saved) { 1171 vfp_lazy_save_state_final(&thr->vfp_state.ns, 1172 thr->vfp_state.ns_force_save); 1173 thr->vfp_state.ns_saved = true; 1174 } else if (tuv && uvfp != tuv) { 1175 if (tuv->lazy_saved && !tuv->saved) { 1176 vfp_lazy_save_state_final(&tuv->vfp, 1177 false /*!force_save*/); 1178 tuv->saved = true; 1179 } 1180 } 1181 1182 if (uvfp->lazy_saved) 1183 vfp_lazy_restore_state(&uvfp->vfp, uvfp->saved); 1184 uvfp->lazy_saved = false; 1185 uvfp->saved = false; 1186 1187 thr->vfp_state.uvfp = uvfp; 1188 vfp_enable(); 1189 } 1190 1191 void thread_user_save_vfp(void) 1192 { 1193 struct thread_ctx *thr = threads + thread_get_id(); 1194 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 1195 1196 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1197 if (!vfp_is_enabled()) 1198 return; 1199 1200 assert(tuv && !tuv->lazy_saved && !tuv->saved); 1201 vfp_lazy_save_state_init(&tuv->vfp); 1202 tuv->lazy_saved = true; 1203 } 1204 1205 void thread_user_clear_vfp(struct thread_user_vfp_state *uvfp) 1206 { 1207 struct thread_ctx *thr = threads + thread_get_id(); 1208 1209 if (uvfp == thr->vfp_state.uvfp) 1210 thr->vfp_state.uvfp = NULL; 1211 uvfp->lazy_saved = false; 1212 uvfp->saved = false; 1213 } 1214 #endif /*CFG_WITH_VFP*/ 1215 1216 #ifdef ARM32 1217 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr) 1218 { 1219 uint32_t s; 1220 1221 if (!is_32bit) 1222 return false; 1223 1224 s = read_spsr(); 1225 s &= ~(CPSR_MODE_MASK | CPSR_T | CPSR_IT_MASK1 | CPSR_IT_MASK2); 1226 s |= CPSR_MODE_USR; 1227 if (entry_func & 1) 1228 s |= CPSR_T; 1229 *spsr = s; 1230 return true; 1231 } 1232 #endif 1233 1234 #ifdef ARM64 1235 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr) 1236 { 1237 uint32_t s; 1238 1239 if (is_32bit) { 1240 s = read_daif() & (SPSR_32_AIF_MASK << SPSR_32_AIF_SHIFT); 1241 s |= SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT; 1242 s |= (entry_func & SPSR_32_T_MASK) << SPSR_32_T_SHIFT; 1243 } else { 1244 s = read_daif() & (SPSR_64_DAIF_MASK << SPSR_64_DAIF_SHIFT); 1245 } 1246 1247 *spsr = s; 1248 return true; 1249 } 1250 #endif 1251 1252 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1, 1253 unsigned long a2, unsigned long a3, unsigned long user_sp, 1254 unsigned long entry_func, bool is_32bit, 1255 uint32_t *exit_status0, uint32_t *exit_status1) 1256 { 1257 uint32_t spsr; 1258 1259 tee_ta_update_session_utime_resume(); 1260 1261 if (!get_spsr(is_32bit, entry_func, &spsr)) { 1262 *exit_status0 = 1; /* panic */ 1263 *exit_status1 = 0xbadbadba; 1264 return 0; 1265 } 1266 return __thread_enter_user_mode(a0, a1, a2, a3, user_sp, entry_func, 1267 spsr, exit_status0, exit_status1); 1268 } 1269 1270 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 1271 void thread_get_user_kcode(struct mobj **mobj, size_t *offset, 1272 vaddr_t *va, size_t *sz) 1273 { 1274 core_mmu_get_user_va_range(va, NULL); 1275 *mobj = mobj_tee_ram; 1276 *offset = thread_user_kcode_va - TEE_RAM_START; 1277 *sz = thread_user_kcode_size; 1278 } 1279 #endif 1280 1281 #if defined(CFG_CORE_UNMAP_CORE_AT_EL0) && \ 1282 defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) 1283 void thread_get_user_kdata(struct mobj **mobj, size_t *offset, 1284 vaddr_t *va, size_t *sz) 1285 { 1286 vaddr_t v; 1287 1288 core_mmu_get_user_va_range(&v, NULL); 1289 *va = v + thread_user_kcode_size; 1290 *mobj = mobj_tee_ram; 1291 *offset = (vaddr_t)thread_user_kdata_page - TEE_RAM_START; 1292 *sz = sizeof(thread_user_kdata_page); 1293 } 1294 #endif 1295 1296 void thread_add_mutex(struct mutex *m) 1297 { 1298 struct thread_core_local *l = thread_get_core_local(); 1299 int ct = l->curr_thread; 1300 1301 assert(ct != -1 && threads[ct].state == THREAD_STATE_ACTIVE); 1302 assert(m->owner_id == MUTEX_OWNER_ID_NONE); 1303 m->owner_id = ct; 1304 TAILQ_INSERT_TAIL(&threads[ct].mutexes, m, link); 1305 } 1306 1307 void thread_rem_mutex(struct mutex *m) 1308 { 1309 struct thread_core_local *l = thread_get_core_local(); 1310 int ct = l->curr_thread; 1311 1312 assert(ct != -1 && threads[ct].state == THREAD_STATE_ACTIVE); 1313 assert(m->owner_id == ct); 1314 m->owner_id = MUTEX_OWNER_ID_NONE; 1315 TAILQ_REMOVE(&threads[ct].mutexes, m, link); 1316 } 1317 1318 bool thread_disable_prealloc_rpc_cache(uint64_t *cookie) 1319 { 1320 bool rv; 1321 size_t n; 1322 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 1323 1324 lock_global(); 1325 1326 for (n = 0; n < CFG_NUM_THREADS; n++) { 1327 if (threads[n].state != THREAD_STATE_FREE) { 1328 rv = false; 1329 goto out; 1330 } 1331 } 1332 1333 rv = true; 1334 for (n = 0; n < CFG_NUM_THREADS; n++) { 1335 if (threads[n].rpc_arg) { 1336 *cookie = mobj_get_cookie(threads[n].rpc_mobj); 1337 mobj_free(threads[n].rpc_mobj); 1338 threads[n].rpc_arg = NULL; 1339 goto out; 1340 } 1341 } 1342 1343 *cookie = 0; 1344 thread_prealloc_rpc_cache = false; 1345 out: 1346 unlock_global(); 1347 thread_unmask_exceptions(exceptions); 1348 return rv; 1349 } 1350 1351 bool thread_enable_prealloc_rpc_cache(void) 1352 { 1353 bool rv; 1354 size_t n; 1355 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 1356 1357 lock_global(); 1358 1359 for (n = 0; n < CFG_NUM_THREADS; n++) { 1360 if (threads[n].state != THREAD_STATE_FREE) { 1361 rv = false; 1362 goto out; 1363 } 1364 } 1365 1366 rv = true; 1367 thread_prealloc_rpc_cache = true; 1368 out: 1369 unlock_global(); 1370 thread_unmask_exceptions(exceptions); 1371 return rv; 1372 } 1373 1374 /** 1375 * Allocates data for struct optee_msg_arg. 1376 * 1377 * @size: size in bytes of struct optee_msg_arg 1378 * 1379 * @returns mobj that describes allocated buffer or NULL on error 1380 */ 1381 static struct mobj *thread_rpc_alloc_arg(size_t size) 1382 { 1383 paddr_t pa; 1384 uint64_t co; 1385 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 1386 OPTEE_SMC_RETURN_RPC_ALLOC, size 1387 }; 1388 struct mobj *mobj = NULL; 1389 1390 thread_rpc(rpc_args); 1391 1392 pa = reg_pair_to_64(rpc_args[1], rpc_args[2]); 1393 co = reg_pair_to_64(rpc_args[4], rpc_args[5]); 1394 1395 if (!ALIGNMENT_IS_OK(pa, struct optee_msg_arg)) 1396 goto err; 1397 1398 /* Check if this region is in static shared space */ 1399 if (core_pbuf_is(CORE_MEM_NSEC_SHM, pa, size)) 1400 mobj = mobj_shm_alloc(pa, size, co); 1401 else if ((!(pa & SMALL_PAGE_MASK)) && size <= SMALL_PAGE_SIZE) 1402 mobj = mobj_mapped_shm_alloc(&pa, 1, 0, co); 1403 1404 if (!mobj) 1405 goto err; 1406 1407 return mobj; 1408 err: 1409 thread_rpc_free_arg(co); 1410 mobj_free(mobj); 1411 return NULL; 1412 } 1413 1414 static bool get_rpc_arg(uint32_t cmd, size_t num_params, 1415 struct optee_msg_arg **arg_ret, uint64_t *carg_ret) 1416 { 1417 struct thread_ctx *thr = threads + thread_get_id(); 1418 struct optee_msg_arg *arg = thr->rpc_arg; 1419 struct mobj *mobj; 1420 size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS); 1421 1422 if (num_params > THREAD_RPC_MAX_NUM_PARAMS) 1423 return false; 1424 1425 if (!arg) { 1426 mobj = thread_rpc_alloc_arg(sz); 1427 if (!mobj) 1428 return false; 1429 1430 arg = mobj_get_va(mobj, 0); 1431 if (!arg) { 1432 thread_rpc_free_arg(mobj_get_cookie(mobj)); 1433 return false; 1434 } 1435 1436 thr->rpc_arg = arg; 1437 thr->rpc_mobj = mobj; 1438 } 1439 1440 memset(arg, 0, OPTEE_MSG_GET_ARG_SIZE(num_params)); 1441 arg->cmd = cmd; 1442 arg->num_params = num_params; 1443 arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */ 1444 1445 *arg_ret = arg; 1446 *carg_ret = mobj_get_cookie(thr->rpc_mobj); 1447 return true; 1448 } 1449 1450 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params, 1451 struct optee_msg_param *params) 1452 { 1453 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD }; 1454 struct optee_msg_arg *arg; 1455 uint64_t carg; 1456 size_t n; 1457 1458 /* The source CRYPTO_RNG_SRC_JITTER_RPC is safe to use here */ 1459 plat_prng_add_jitter_entropy(CRYPTO_RNG_SRC_JITTER_RPC, 1460 &thread_rpc_pnum); 1461 1462 if (!get_rpc_arg(cmd, num_params, &arg, &carg)) 1463 return TEE_ERROR_OUT_OF_MEMORY; 1464 1465 memcpy(arg->params, params, sizeof(*params) * num_params); 1466 1467 reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2); 1468 thread_rpc(rpc_args); 1469 for (n = 0; n < num_params; n++) { 1470 switch (params[n].attr & OPTEE_MSG_ATTR_TYPE_MASK) { 1471 case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT: 1472 case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT: 1473 case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT: 1474 case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT: 1475 case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT: 1476 case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT: 1477 params[n] = arg->params[n]; 1478 break; 1479 default: 1480 break; 1481 } 1482 } 1483 return arg->ret; 1484 } 1485 1486 /** 1487 * Free physical memory previously allocated with thread_rpc_alloc() 1488 * 1489 * @cookie: cookie received when allocating the buffer 1490 * @bt: must be the same as supplied when allocating 1491 * @mobj: mobj that describes allocated buffer 1492 * 1493 * This function also frees corresponding mobj. 1494 */ 1495 static void thread_rpc_free(unsigned int bt, uint64_t cookie, struct mobj *mobj) 1496 { 1497 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD }; 1498 struct optee_msg_arg *arg; 1499 uint64_t carg; 1500 1501 if (!get_rpc_arg(OPTEE_MSG_RPC_CMD_SHM_FREE, 1, &arg, &carg)) 1502 return; 1503 1504 arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; 1505 arg->params[0].u.value.a = bt; 1506 arg->params[0].u.value.b = cookie; 1507 arg->params[0].u.value.c = 0; 1508 1509 mobj_free(mobj); 1510 1511 reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2); 1512 thread_rpc(rpc_args); 1513 } 1514 1515 /** 1516 * Allocates shared memory buffer via RPC 1517 * 1518 * @size: size in bytes of shared memory buffer 1519 * @align: required alignment of buffer 1520 * @bt: buffer type OPTEE_MSG_RPC_SHM_TYPE_* 1521 * @payload: returned physical pointer to buffer, 0 if allocation 1522 * failed. 1523 * @cookie: returned cookie used when freeing the buffer 1524 */ 1525 static struct mobj *thread_rpc_alloc(size_t size, size_t align, unsigned int bt) 1526 { 1527 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD }; 1528 struct optee_msg_arg *arg; 1529 uint64_t carg; 1530 struct mobj *mobj = NULL; 1531 uint64_t cookie; 1532 1533 if (!get_rpc_arg(OPTEE_MSG_RPC_CMD_SHM_ALLOC, 1, &arg, &carg)) 1534 return NULL; 1535 1536 arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; 1537 arg->params[0].u.value.a = bt; 1538 arg->params[0].u.value.b = size; 1539 arg->params[0].u.value.c = align; 1540 1541 reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2); 1542 thread_rpc(rpc_args); 1543 1544 if (arg->ret != TEE_SUCCESS) 1545 return NULL; 1546 1547 if (arg->num_params != 1) 1548 return NULL; 1549 1550 if (arg->params[0].attr == OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT) { 1551 cookie = arg->params[0].u.tmem.shm_ref; 1552 mobj = mobj_shm_alloc(arg->params[0].u.tmem.buf_ptr, 1553 arg->params[0].u.tmem.size, 1554 cookie); 1555 } else if (arg->params[0].attr == (OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT | 1556 OPTEE_MSG_ATTR_NONCONTIG)) { 1557 cookie = arg->params[0].u.tmem.shm_ref; 1558 mobj = msg_param_mobj_from_noncontig( 1559 arg->params[0].u.tmem.buf_ptr, 1560 arg->params[0].u.tmem.size, 1561 cookie, 1562 true); 1563 } else { 1564 return NULL; 1565 } 1566 1567 if (!mobj) { 1568 thread_rpc_free(bt, cookie, mobj); 1569 return NULL; 1570 } 1571 1572 assert(mobj_is_nonsec(mobj)); 1573 1574 return mobj; 1575 } 1576 1577 struct mobj *thread_rpc_alloc_payload(size_t size) 1578 { 1579 return thread_rpc_alloc(size, 8, OPTEE_MSG_RPC_SHM_TYPE_APPL); 1580 } 1581 1582 void thread_rpc_free_payload(struct mobj *mobj) 1583 { 1584 thread_rpc_free(OPTEE_MSG_RPC_SHM_TYPE_APPL, mobj_get_cookie(mobj), 1585 mobj); 1586 } 1587 1588 struct mobj *thread_rpc_alloc_global_payload(size_t size) 1589 { 1590 return thread_rpc_alloc(size, 8, OPTEE_MSG_RPC_SHM_TYPE_GLOBAL); 1591 } 1592 1593 void thread_rpc_free_global_payload(struct mobj *mobj) 1594 { 1595 thread_rpc_free(OPTEE_MSG_RPC_SHM_TYPE_GLOBAL, mobj_get_cookie(mobj), 1596 mobj); 1597 } 1598