1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright notice, 14 * this list of conditions and the following disclaimer in the documentation 15 * and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 * POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <platform_config.h> 31 32 #include <arm.h> 33 #include <assert.h> 34 #include <keep.h> 35 #include <kernel/asan.h> 36 #include <kernel/misc.h> 37 #include <kernel/msg_param.h> 38 #include <kernel/panic.h> 39 #include <kernel/spinlock.h> 40 #include <kernel/tee_ta_manager.h> 41 #include <kernel/thread_defs.h> 42 #include <kernel/thread.h> 43 #include <mm/core_memprot.h> 44 #include <mm/mobj.h> 45 #include <mm/tee_mm.h> 46 #include <mm/tee_mmu.h> 47 #include <mm/tee_pager.h> 48 #include <optee_msg.h> 49 #include <smccc.h> 50 #include <sm/optee_smc.h> 51 #include <sm/sm.h> 52 #include <tee/tee_cryp_utl.h> 53 #include <tee/tee_fs_rpc.h> 54 #include <trace.h> 55 #include <util.h> 56 57 #include "thread_private.h" 58 59 #ifdef CFG_WITH_ARM_TRUSTED_FW 60 #define STACK_TMP_OFFS 0 61 #else 62 #define STACK_TMP_OFFS SM_STACK_TMP_RESERVE_SIZE 63 #endif 64 65 66 #ifdef ARM32 67 #ifdef CFG_CORE_SANITIZE_KADDRESS 68 #define STACK_TMP_SIZE (3072 + STACK_TMP_OFFS) 69 #else 70 #define STACK_TMP_SIZE (1536 + STACK_TMP_OFFS) 71 #endif 72 #define STACK_THREAD_SIZE 8192 73 74 #ifdef CFG_CORE_SANITIZE_KADDRESS 75 #define STACK_ABT_SIZE 3072 76 #else 77 #define STACK_ABT_SIZE 2048 78 #endif 79 80 #endif /*ARM32*/ 81 82 #ifdef ARM64 83 #define STACK_TMP_SIZE (2048 + STACK_TMP_OFFS) 84 #define STACK_THREAD_SIZE 8192 85 86 #if TRACE_LEVEL > 0 87 #define STACK_ABT_SIZE 3072 88 #else 89 #define STACK_ABT_SIZE 1024 90 #endif 91 #endif /*ARM64*/ 92 93 struct thread_ctx threads[CFG_NUM_THREADS]; 94 95 struct thread_core_local thread_core_local[CFG_TEE_CORE_NB_CORE]; 96 97 #ifdef CFG_WITH_STACK_CANARIES 98 #ifdef ARM32 99 #define STACK_CANARY_SIZE (4 * sizeof(uint32_t)) 100 #endif 101 #ifdef ARM64 102 #define STACK_CANARY_SIZE (8 * sizeof(uint32_t)) 103 #endif 104 #define START_CANARY_VALUE 0xdededede 105 #define END_CANARY_VALUE 0xabababab 106 #define GET_START_CANARY(name, stack_num) name[stack_num][0] 107 #define GET_END_CANARY(name, stack_num) \ 108 name[stack_num][sizeof(name[stack_num]) / sizeof(uint32_t) - 1] 109 #else 110 #define STACK_CANARY_SIZE 0 111 #endif 112 113 #define DECLARE_STACK(name, num_stacks, stack_size, linkage) \ 114 linkage uint32_t name[num_stacks] \ 115 [ROUNDUP(stack_size + STACK_CANARY_SIZE, STACK_ALIGNMENT) / \ 116 sizeof(uint32_t)] \ 117 __attribute__((section(".nozi_stack"), \ 118 aligned(STACK_ALIGNMENT))) 119 120 #define STACK_SIZE(stack) (sizeof(stack) - STACK_CANARY_SIZE / 2) 121 122 #define GET_STACK(stack) \ 123 ((vaddr_t)(stack) + STACK_SIZE(stack)) 124 125 DECLARE_STACK(stack_tmp, CFG_TEE_CORE_NB_CORE, STACK_TMP_SIZE, static); 126 DECLARE_STACK(stack_abt, CFG_TEE_CORE_NB_CORE, STACK_ABT_SIZE, static); 127 #ifndef CFG_WITH_PAGER 128 DECLARE_STACK(stack_thread, CFG_NUM_THREADS, STACK_THREAD_SIZE, static); 129 #endif 130 131 const void *stack_tmp_export = (uint8_t *)stack_tmp + sizeof(stack_tmp[0]) - 132 (STACK_TMP_OFFS + STACK_CANARY_SIZE / 2); 133 const uint32_t stack_tmp_stride = sizeof(stack_tmp[0]); 134 135 /* 136 * These stack setup info are required by secondary boot cores before they 137 * each locally enable the pager (the mmu). Hence kept in pager sections. 138 */ 139 KEEP_PAGER(stack_tmp_export); 140 KEEP_PAGER(stack_tmp_stride); 141 142 thread_smc_handler_t thread_std_smc_handler_ptr; 143 static thread_smc_handler_t thread_fast_smc_handler_ptr; 144 thread_nintr_handler_t thread_nintr_handler_ptr; 145 thread_pm_handler_t thread_cpu_on_handler_ptr; 146 thread_pm_handler_t thread_cpu_off_handler_ptr; 147 thread_pm_handler_t thread_cpu_suspend_handler_ptr; 148 thread_pm_handler_t thread_cpu_resume_handler_ptr; 149 thread_pm_handler_t thread_system_off_handler_ptr; 150 thread_pm_handler_t thread_system_reset_handler_ptr; 151 152 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 153 static vaddr_t thread_user_kcode_va; 154 long thread_user_kcode_offset; 155 static size_t thread_user_kcode_size; 156 #endif 157 158 #if defined(CFG_CORE_UNMAP_CORE_AT_EL0) && \ 159 defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) 160 long thread_user_kdata_sp_offset; 161 static uint8_t thread_user_kdata_page[ 162 ROUNDUP(sizeof(thread_core_local), SMALL_PAGE_SIZE)] 163 __aligned(SMALL_PAGE_SIZE) __section(".nozi.kdata_page"); 164 #endif 165 166 static unsigned int thread_global_lock = SPINLOCK_UNLOCK; 167 static bool thread_prealloc_rpc_cache; 168 169 static void init_canaries(void) 170 { 171 #ifdef CFG_WITH_STACK_CANARIES 172 size_t n; 173 #define INIT_CANARY(name) \ 174 for (n = 0; n < ARRAY_SIZE(name); n++) { \ 175 uint32_t *start_canary = &GET_START_CANARY(name, n); \ 176 uint32_t *end_canary = &GET_END_CANARY(name, n); \ 177 \ 178 *start_canary = START_CANARY_VALUE; \ 179 *end_canary = END_CANARY_VALUE; \ 180 DMSG("#Stack canaries for %s[%zu] with top at %p\n", \ 181 #name, n, (void *)(end_canary - 1)); \ 182 DMSG("watch *%p\n", (void *)end_canary); \ 183 } 184 185 INIT_CANARY(stack_tmp); 186 INIT_CANARY(stack_abt); 187 #ifndef CFG_WITH_PAGER 188 INIT_CANARY(stack_thread); 189 #endif 190 #endif/*CFG_WITH_STACK_CANARIES*/ 191 } 192 193 #define CANARY_DIED(stack, loc, n) \ 194 do { \ 195 EMSG_RAW("Dead canary at %s of '%s[%zu]'", #loc, #stack, n); \ 196 panic(); \ 197 } while (0) 198 199 void thread_check_canaries(void) 200 { 201 #ifdef CFG_WITH_STACK_CANARIES 202 size_t n; 203 204 for (n = 0; n < ARRAY_SIZE(stack_tmp); n++) { 205 if (GET_START_CANARY(stack_tmp, n) != START_CANARY_VALUE) 206 CANARY_DIED(stack_tmp, start, n); 207 if (GET_END_CANARY(stack_tmp, n) != END_CANARY_VALUE) 208 CANARY_DIED(stack_tmp, end, n); 209 } 210 211 for (n = 0; n < ARRAY_SIZE(stack_abt); n++) { 212 if (GET_START_CANARY(stack_abt, n) != START_CANARY_VALUE) 213 CANARY_DIED(stack_abt, start, n); 214 if (GET_END_CANARY(stack_abt, n) != END_CANARY_VALUE) 215 CANARY_DIED(stack_abt, end, n); 216 217 } 218 #ifndef CFG_WITH_PAGER 219 for (n = 0; n < ARRAY_SIZE(stack_thread); n++) { 220 if (GET_START_CANARY(stack_thread, n) != START_CANARY_VALUE) 221 CANARY_DIED(stack_thread, start, n); 222 if (GET_END_CANARY(stack_thread, n) != END_CANARY_VALUE) 223 CANARY_DIED(stack_thread, end, n); 224 } 225 #endif 226 #endif/*CFG_WITH_STACK_CANARIES*/ 227 } 228 229 static void lock_global(void) 230 { 231 cpu_spin_lock(&thread_global_lock); 232 } 233 234 static void unlock_global(void) 235 { 236 cpu_spin_unlock(&thread_global_lock); 237 } 238 239 #ifdef ARM32 240 uint32_t thread_get_exceptions(void) 241 { 242 uint32_t cpsr = read_cpsr(); 243 244 return (cpsr >> CPSR_F_SHIFT) & THREAD_EXCP_ALL; 245 } 246 247 void thread_set_exceptions(uint32_t exceptions) 248 { 249 uint32_t cpsr = read_cpsr(); 250 251 /* Foreign interrupts must not be unmasked while holding a spinlock */ 252 if (!(exceptions & THREAD_EXCP_FOREIGN_INTR)) 253 assert_have_no_spinlock(); 254 255 cpsr &= ~(THREAD_EXCP_ALL << CPSR_F_SHIFT); 256 cpsr |= ((exceptions & THREAD_EXCP_ALL) << CPSR_F_SHIFT); 257 write_cpsr(cpsr); 258 } 259 #endif /*ARM32*/ 260 261 #ifdef ARM64 262 uint32_t thread_get_exceptions(void) 263 { 264 uint32_t daif = read_daif(); 265 266 return (daif >> DAIF_F_SHIFT) & THREAD_EXCP_ALL; 267 } 268 269 void thread_set_exceptions(uint32_t exceptions) 270 { 271 uint32_t daif = read_daif(); 272 273 /* Foreign interrupts must not be unmasked while holding a spinlock */ 274 if (!(exceptions & THREAD_EXCP_FOREIGN_INTR)) 275 assert_have_no_spinlock(); 276 277 daif &= ~(THREAD_EXCP_ALL << DAIF_F_SHIFT); 278 daif |= ((exceptions & THREAD_EXCP_ALL) << DAIF_F_SHIFT); 279 write_daif(daif); 280 } 281 #endif /*ARM64*/ 282 283 uint32_t thread_mask_exceptions(uint32_t exceptions) 284 { 285 uint32_t state = thread_get_exceptions(); 286 287 thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL)); 288 return state; 289 } 290 291 void thread_unmask_exceptions(uint32_t state) 292 { 293 thread_set_exceptions(state & THREAD_EXCP_ALL); 294 } 295 296 297 struct thread_core_local *thread_get_core_local(void) 298 { 299 uint32_t cpu_id = get_core_pos(); 300 301 /* 302 * Foreign interrupts must be disabled before playing with core_local 303 * since we otherwise may be rescheduled to a different core in the 304 * middle of this function. 305 */ 306 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 307 308 assert(cpu_id < CFG_TEE_CORE_NB_CORE); 309 return &thread_core_local[cpu_id]; 310 } 311 312 static void thread_lazy_save_ns_vfp(void) 313 { 314 #ifdef CFG_WITH_VFP 315 struct thread_ctx *thr = threads + thread_get_id(); 316 317 thr->vfp_state.ns_saved = false; 318 #if defined(ARM64) && defined(CFG_WITH_ARM_TRUSTED_FW) 319 /* 320 * ARM TF saves and restores CPACR_EL1, so we must assume NS world 321 * uses VFP and always preserve the register file when secure world 322 * is about to use it 323 */ 324 thr->vfp_state.ns.force_save = true; 325 #endif 326 vfp_lazy_save_state_init(&thr->vfp_state.ns); 327 #endif /*CFG_WITH_VFP*/ 328 } 329 330 static void thread_lazy_restore_ns_vfp(void) 331 { 332 #ifdef CFG_WITH_VFP 333 struct thread_ctx *thr = threads + thread_get_id(); 334 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 335 336 assert(!thr->vfp_state.sec_lazy_saved && !thr->vfp_state.sec_saved); 337 338 if (tuv && tuv->lazy_saved && !tuv->saved) { 339 vfp_lazy_save_state_final(&tuv->vfp); 340 tuv->saved = true; 341 } 342 343 vfp_lazy_restore_state(&thr->vfp_state.ns, thr->vfp_state.ns_saved); 344 thr->vfp_state.ns_saved = false; 345 #endif /*CFG_WITH_VFP*/ 346 } 347 348 #ifdef ARM32 349 static void init_regs(struct thread_ctx *thread, 350 struct thread_smc_args *args) 351 { 352 thread->regs.pc = (uint32_t)thread_std_smc_entry; 353 354 /* 355 * Stdcalls starts in SVC mode with masked foreign interrupts, masked 356 * Asynchronous abort and unmasked native interrupts. 357 */ 358 thread->regs.cpsr = read_cpsr() & ARM32_CPSR_E; 359 thread->regs.cpsr |= CPSR_MODE_SVC | CPSR_A | 360 (THREAD_EXCP_FOREIGN_INTR << ARM32_CPSR_F_SHIFT); 361 /* Enable thumb mode if it's a thumb instruction */ 362 if (thread->regs.pc & 1) 363 thread->regs.cpsr |= CPSR_T; 364 /* Reinitialize stack pointer */ 365 thread->regs.svc_sp = thread->stack_va_end; 366 367 /* 368 * Copy arguments into context. This will make the 369 * arguments appear in r0-r7 when thread is started. 370 */ 371 thread->regs.r0 = args->a0; 372 thread->regs.r1 = args->a1; 373 thread->regs.r2 = args->a2; 374 thread->regs.r3 = args->a3; 375 thread->regs.r4 = args->a4; 376 thread->regs.r5 = args->a5; 377 thread->regs.r6 = args->a6; 378 thread->regs.r7 = args->a7; 379 } 380 #endif /*ARM32*/ 381 382 #ifdef ARM64 383 static void init_regs(struct thread_ctx *thread, 384 struct thread_smc_args *args) 385 { 386 thread->regs.pc = (uint64_t)thread_std_smc_entry; 387 388 /* 389 * Stdcalls starts in SVC mode with masked foreign interrupts, masked 390 * Asynchronous abort and unmasked native interrupts. 391 */ 392 thread->regs.cpsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, 393 THREAD_EXCP_FOREIGN_INTR | DAIFBIT_ABT); 394 /* Reinitialize stack pointer */ 395 thread->regs.sp = thread->stack_va_end; 396 397 /* 398 * Copy arguments into context. This will make the 399 * arguments appear in x0-x7 when thread is started. 400 */ 401 thread->regs.x[0] = args->a0; 402 thread->regs.x[1] = args->a1; 403 thread->regs.x[2] = args->a2; 404 thread->regs.x[3] = args->a3; 405 thread->regs.x[4] = args->a4; 406 thread->regs.x[5] = args->a5; 407 thread->regs.x[6] = args->a6; 408 thread->regs.x[7] = args->a7; 409 410 /* Set up frame pointer as per the Aarch64 AAPCS */ 411 thread->regs.x[29] = 0; 412 } 413 #endif /*ARM64*/ 414 415 void thread_init_boot_thread(void) 416 { 417 struct thread_core_local *l = thread_get_core_local(); 418 size_t n; 419 420 for (n = 0; n < CFG_NUM_THREADS; n++) { 421 TAILQ_INIT(&threads[n].mutexes); 422 TAILQ_INIT(&threads[n].tsd.sess_stack); 423 SLIST_INIT(&threads[n].tsd.pgt_cache); 424 } 425 426 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) 427 thread_core_local[n].curr_thread = -1; 428 429 l->curr_thread = 0; 430 threads[0].state = THREAD_STATE_ACTIVE; 431 } 432 433 void thread_clr_boot_thread(void) 434 { 435 struct thread_core_local *l = thread_get_core_local(); 436 437 assert(l->curr_thread >= 0 && l->curr_thread < CFG_NUM_THREADS); 438 assert(threads[l->curr_thread].state == THREAD_STATE_ACTIVE); 439 assert(TAILQ_EMPTY(&threads[l->curr_thread].mutexes)); 440 threads[l->curr_thread].state = THREAD_STATE_FREE; 441 l->curr_thread = -1; 442 } 443 444 static void thread_alloc_and_run(struct thread_smc_args *args) 445 { 446 size_t n; 447 struct thread_core_local *l = thread_get_core_local(); 448 bool found_thread = false; 449 450 assert(l->curr_thread == -1); 451 452 lock_global(); 453 454 for (n = 0; n < CFG_NUM_THREADS; n++) { 455 if (threads[n].state == THREAD_STATE_FREE) { 456 threads[n].state = THREAD_STATE_ACTIVE; 457 found_thread = true; 458 break; 459 } 460 } 461 462 unlock_global(); 463 464 if (!found_thread) { 465 args->a0 = OPTEE_SMC_RETURN_ETHREAD_LIMIT; 466 return; 467 } 468 469 l->curr_thread = n; 470 471 threads[n].flags = 0; 472 init_regs(threads + n, args); 473 474 /* Save Hypervisor Client ID */ 475 threads[n].hyp_clnt_id = args->a7; 476 477 thread_lazy_save_ns_vfp(); 478 thread_resume(&threads[n].regs); 479 } 480 481 #ifdef ARM32 482 static void copy_a0_to_a5(struct thread_ctx_regs *regs, 483 struct thread_smc_args *args) 484 { 485 /* 486 * Update returned values from RPC, values will appear in 487 * r0-r3 when thread is resumed. 488 */ 489 regs->r0 = args->a0; 490 regs->r1 = args->a1; 491 regs->r2 = args->a2; 492 regs->r3 = args->a3; 493 regs->r4 = args->a4; 494 regs->r5 = args->a5; 495 } 496 #endif /*ARM32*/ 497 498 #ifdef ARM64 499 static void copy_a0_to_a5(struct thread_ctx_regs *regs, 500 struct thread_smc_args *args) 501 { 502 /* 503 * Update returned values from RPC, values will appear in 504 * x0-x3 when thread is resumed. 505 */ 506 regs->x[0] = args->a0; 507 regs->x[1] = args->a1; 508 regs->x[2] = args->a2; 509 regs->x[3] = args->a3; 510 regs->x[4] = args->a4; 511 regs->x[5] = args->a5; 512 } 513 #endif /*ARM64*/ 514 515 #ifdef ARM32 516 static bool is_from_user(uint32_t cpsr) 517 { 518 return (cpsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR; 519 } 520 #endif 521 522 #ifdef ARM64 523 static bool is_from_user(uint32_t cpsr) 524 { 525 if (cpsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT)) 526 return true; 527 if (((cpsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) == 528 SPSR_64_MODE_EL0) 529 return true; 530 return false; 531 } 532 #endif 533 534 static bool is_user_mode(struct thread_ctx_regs *regs) 535 { 536 return is_from_user((uint32_t)regs->cpsr); 537 } 538 539 static void thread_resume_from_rpc(struct thread_smc_args *args) 540 { 541 size_t n = args->a3; /* thread id */ 542 struct thread_core_local *l = thread_get_core_local(); 543 uint32_t rv = 0; 544 545 assert(l->curr_thread == -1); 546 547 lock_global(); 548 549 if (n < CFG_NUM_THREADS && 550 threads[n].state == THREAD_STATE_SUSPENDED && 551 args->a7 == threads[n].hyp_clnt_id) 552 threads[n].state = THREAD_STATE_ACTIVE; 553 else 554 rv = OPTEE_SMC_RETURN_ERESUME; 555 556 unlock_global(); 557 558 if (rv) { 559 args->a0 = rv; 560 return; 561 } 562 563 l->curr_thread = n; 564 565 if (is_user_mode(&threads[n].regs)) 566 tee_ta_update_session_utime_resume(); 567 568 if (threads[n].have_user_map) 569 core_mmu_set_user_map(&threads[n].user_map); 570 571 /* 572 * Return from RPC to request service of a foreign interrupt must not 573 * get parameters from non-secure world. 574 */ 575 if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) { 576 copy_a0_to_a5(&threads[n].regs, args); 577 threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN; 578 } 579 580 thread_lazy_save_ns_vfp(); 581 thread_resume(&threads[n].regs); 582 } 583 584 void thread_handle_fast_smc(struct thread_smc_args *args) 585 { 586 thread_check_canaries(); 587 thread_fast_smc_handler_ptr(args); 588 /* Fast handlers must not unmask any exceptions */ 589 assert(thread_get_exceptions() == THREAD_EXCP_ALL); 590 } 591 592 void thread_handle_std_smc(struct thread_smc_args *args) 593 { 594 thread_check_canaries(); 595 596 if (args->a0 == OPTEE_SMC_CALL_RETURN_FROM_RPC) 597 thread_resume_from_rpc(args); 598 else 599 thread_alloc_and_run(args); 600 } 601 602 /* 603 * Helper routine for the assembly function thread_std_smc_entry() 604 * 605 * Note: this function is weak just to make it possible to exclude it from 606 * the unpaged area. 607 */ 608 void __weak __thread_std_smc_entry(struct thread_smc_args *args) 609 { 610 thread_std_smc_handler_ptr(args); 611 612 if (args->a0 == OPTEE_SMC_RETURN_OK) { 613 struct thread_ctx *thr = threads + thread_get_id(); 614 615 tee_fs_rpc_cache_clear(&thr->tsd); 616 if (!thread_prealloc_rpc_cache) { 617 thread_rpc_free_arg(thr->rpc_carg); 618 mobj_free(thr->rpc_mobj); 619 thr->rpc_carg = 0; 620 thr->rpc_arg = 0; 621 thr->rpc_mobj = NULL; 622 } 623 } 624 } 625 626 void *thread_get_tmp_sp(void) 627 { 628 struct thread_core_local *l = thread_get_core_local(); 629 630 return (void *)l->tmp_stack_va_end; 631 } 632 633 #ifdef ARM64 634 vaddr_t thread_get_saved_thread_sp(void) 635 { 636 struct thread_core_local *l = thread_get_core_local(); 637 int ct = l->curr_thread; 638 639 assert(ct != -1); 640 return threads[ct].kern_sp; 641 } 642 #endif /*ARM64*/ 643 644 vaddr_t thread_stack_start(void) 645 { 646 struct thread_ctx *thr; 647 int ct = thread_get_id_may_fail(); 648 649 if (ct == -1) 650 return 0; 651 652 thr = threads + ct; 653 return thr->stack_va_end - STACK_THREAD_SIZE; 654 } 655 656 size_t thread_stack_size(void) 657 { 658 return STACK_THREAD_SIZE; 659 } 660 661 bool thread_is_from_abort_mode(void) 662 { 663 struct thread_core_local *l = thread_get_core_local(); 664 665 return (l->flags >> THREAD_CLF_SAVED_SHIFT) & THREAD_CLF_ABORT; 666 } 667 668 #ifdef ARM32 669 bool thread_is_in_normal_mode(void) 670 { 671 return (read_cpsr() & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_SVC; 672 } 673 #endif 674 675 #ifdef ARM64 676 bool thread_is_in_normal_mode(void) 677 { 678 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 679 struct thread_core_local *l = thread_get_core_local(); 680 bool ret; 681 682 /* If any bit in l->flags is set we're handling some exception. */ 683 ret = !l->flags; 684 thread_unmask_exceptions(exceptions); 685 686 return ret; 687 } 688 #endif 689 690 void thread_state_free(void) 691 { 692 struct thread_core_local *l = thread_get_core_local(); 693 int ct = l->curr_thread; 694 695 assert(ct != -1); 696 assert(TAILQ_EMPTY(&threads[ct].mutexes)); 697 698 thread_lazy_restore_ns_vfp(); 699 tee_pager_release_phys( 700 (void *)(threads[ct].stack_va_end - STACK_THREAD_SIZE), 701 STACK_THREAD_SIZE); 702 703 lock_global(); 704 705 assert(threads[ct].state == THREAD_STATE_ACTIVE); 706 threads[ct].state = THREAD_STATE_FREE; 707 threads[ct].flags = 0; 708 l->curr_thread = -1; 709 710 unlock_global(); 711 } 712 713 #ifdef CFG_WITH_PAGER 714 static void release_unused_kernel_stack(struct thread_ctx *thr, 715 uint32_t cpsr __maybe_unused) 716 { 717 #ifdef ARM64 718 /* 719 * If we're from user mode then thr->regs.sp is the saved user 720 * stack pointer and thr->kern_sp holds the last kernel stack 721 * pointer. But if we're from kernel mode then thr->kern_sp isn't 722 * up to date so we need to read from thr->regs.sp instead. 723 */ 724 vaddr_t sp = is_from_user(cpsr) ? thr->kern_sp : thr->regs.sp; 725 #else 726 vaddr_t sp = thr->regs.svc_sp; 727 #endif 728 vaddr_t base = thr->stack_va_end - STACK_THREAD_SIZE; 729 size_t len = sp - base; 730 731 tee_pager_release_phys((void *)base, len); 732 } 733 #else 734 static void release_unused_kernel_stack(struct thread_ctx *thr __unused, 735 uint32_t cpsr __unused) 736 { 737 } 738 #endif 739 740 int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc) 741 { 742 struct thread_core_local *l = thread_get_core_local(); 743 int ct = l->curr_thread; 744 745 assert(ct != -1); 746 747 thread_check_canaries(); 748 749 release_unused_kernel_stack(threads + ct, cpsr); 750 751 if (is_from_user(cpsr)) { 752 thread_user_save_vfp(); 753 tee_ta_update_session_utime_suspend(); 754 tee_ta_gprof_sample_pc(pc); 755 } 756 thread_lazy_restore_ns_vfp(); 757 758 lock_global(); 759 760 assert(threads[ct].state == THREAD_STATE_ACTIVE); 761 threads[ct].flags |= flags; 762 threads[ct].regs.cpsr = cpsr; 763 threads[ct].regs.pc = pc; 764 threads[ct].state = THREAD_STATE_SUSPENDED; 765 766 threads[ct].have_user_map = core_mmu_user_mapping_is_active(); 767 if (threads[ct].have_user_map) { 768 core_mmu_get_user_map(&threads[ct].user_map); 769 core_mmu_set_user_map(NULL); 770 } 771 772 l->curr_thread = -1; 773 774 unlock_global(); 775 776 return ct; 777 } 778 779 #ifdef ARM32 780 static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp) 781 { 782 l->tmp_stack_va_end = sp; 783 thread_set_irq_sp(sp); 784 thread_set_fiq_sp(sp); 785 } 786 787 static void set_abt_stack(struct thread_core_local *l, vaddr_t sp) 788 { 789 l->abt_stack_va_end = sp; 790 thread_set_abt_sp((vaddr_t)l); 791 thread_set_und_sp((vaddr_t)l); 792 } 793 #endif /*ARM32*/ 794 795 #ifdef ARM64 796 static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp) 797 { 798 /* 799 * We're already using the tmp stack when this function is called 800 * so there's no need to assign it to any stack pointer. However, 801 * we'll need to restore it at different times so store it here. 802 */ 803 l->tmp_stack_va_end = sp; 804 } 805 806 static void set_abt_stack(struct thread_core_local *l, vaddr_t sp) 807 { 808 l->abt_stack_va_end = sp; 809 } 810 #endif /*ARM64*/ 811 812 bool thread_init_stack(uint32_t thread_id, vaddr_t sp) 813 { 814 if (thread_id >= CFG_NUM_THREADS) 815 return false; 816 threads[thread_id].stack_va_end = sp; 817 return true; 818 } 819 820 int thread_get_id_may_fail(void) 821 { 822 /* 823 * thread_get_core_local() requires foreign interrupts to be disabled 824 */ 825 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 826 struct thread_core_local *l = thread_get_core_local(); 827 int ct = l->curr_thread; 828 829 thread_unmask_exceptions(exceptions); 830 return ct; 831 } 832 833 int thread_get_id(void) 834 { 835 int ct = thread_get_id_may_fail(); 836 837 assert(ct >= 0 && ct < CFG_NUM_THREADS); 838 return ct; 839 } 840 841 static void init_handlers(const struct thread_handlers *handlers) 842 { 843 thread_std_smc_handler_ptr = handlers->std_smc; 844 thread_fast_smc_handler_ptr = handlers->fast_smc; 845 thread_nintr_handler_ptr = handlers->nintr; 846 thread_cpu_on_handler_ptr = handlers->cpu_on; 847 thread_cpu_off_handler_ptr = handlers->cpu_off; 848 thread_cpu_suspend_handler_ptr = handlers->cpu_suspend; 849 thread_cpu_resume_handler_ptr = handlers->cpu_resume; 850 thread_system_off_handler_ptr = handlers->system_off; 851 thread_system_reset_handler_ptr = handlers->system_reset; 852 } 853 854 #ifdef CFG_WITH_PAGER 855 static void init_thread_stacks(void) 856 { 857 size_t n; 858 859 /* 860 * Allocate virtual memory for thread stacks. 861 */ 862 for (n = 0; n < CFG_NUM_THREADS; n++) { 863 tee_mm_entry_t *mm; 864 vaddr_t sp; 865 866 /* Find vmem for thread stack and its protection gap */ 867 mm = tee_mm_alloc(&tee_mm_vcore, 868 SMALL_PAGE_SIZE + STACK_THREAD_SIZE); 869 assert(mm); 870 871 /* Claim eventual physical page */ 872 tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm), 873 true); 874 875 /* Add the area to the pager */ 876 tee_pager_add_core_area(tee_mm_get_smem(mm) + SMALL_PAGE_SIZE, 877 tee_mm_get_bytes(mm) - SMALL_PAGE_SIZE, 878 TEE_MATTR_PRW | TEE_MATTR_LOCKED, 879 NULL, NULL); 880 881 /* init effective stack */ 882 sp = tee_mm_get_smem(mm) + tee_mm_get_bytes(mm); 883 asan_tag_access((void *)tee_mm_get_smem(mm), (void *)sp); 884 if (!thread_init_stack(n, sp)) 885 panic("init stack failed"); 886 } 887 } 888 #else 889 static void init_thread_stacks(void) 890 { 891 size_t n; 892 893 /* Assign the thread stacks */ 894 for (n = 0; n < CFG_NUM_THREADS; n++) { 895 if (!thread_init_stack(n, GET_STACK(stack_thread[n]))) 896 panic("thread_init_stack failed"); 897 } 898 } 899 #endif /*CFG_WITH_PAGER*/ 900 901 static void init_user_kcode(void) 902 { 903 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 904 vaddr_t v; 905 906 v = (vaddr_t)thread_excp_vect; 907 thread_user_kcode_va = ROUNDDOWN(v, CORE_MMU_USER_CODE_SIZE); 908 /* 909 * The maximum size of the exception vector and associated code is 910 * something slightly larger than 2 KiB. Worst case the exception 911 * vector can span two pages. 912 */ 913 thread_user_kcode_size = CORE_MMU_USER_CODE_SIZE * 2; 914 915 core_mmu_get_user_va_range(&v, NULL); 916 thread_user_kcode_offset = thread_user_kcode_va - v; 917 918 #if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) 919 /* 920 * When transitioning to EL0 subtract SP with this much to point to 921 * this special kdata page instead. SP is restored by add this much 922 * while transitioning back to EL1. 923 */ 924 v += thread_user_kcode_size; 925 thread_user_kdata_sp_offset = (vaddr_t)thread_core_local - v; 926 #endif 927 #endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 928 } 929 930 void thread_init_primary(const struct thread_handlers *handlers) 931 { 932 init_handlers(handlers); 933 934 /* Initialize canaries around the stacks */ 935 init_canaries(); 936 937 init_thread_stacks(); 938 pgt_init(); 939 940 init_user_kcode(); 941 } 942 943 static void init_sec_mon(size_t pos __maybe_unused) 944 { 945 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 946 /* Initialize secure monitor */ 947 sm_init(GET_STACK(stack_tmp[pos])); 948 #endif 949 } 950 951 static uint32_t __maybe_unused get_midr_implementer(uint32_t midr) 952 { 953 return (midr >> MIDR_IMPLEMENTER_SHIFT) & MIDR_IMPLEMENTER_MASK; 954 } 955 956 static uint32_t __maybe_unused get_midr_primary_part(uint32_t midr) 957 { 958 return (midr >> MIDR_PRIMARY_PART_NUM_SHIFT) & 959 MIDR_PRIMARY_PART_NUM_MASK; 960 } 961 962 #ifdef ARM64 963 static bool probe_workaround_available(void) 964 { 965 int32_t r; 966 967 r = thread_smc(SMCCC_VERSION, 0, 0, 0); 968 if (r < 0) 969 return false; 970 if (r < 0x10001) /* compare with version 1.1 */ 971 return false; 972 973 /* Version >= 1.1, so SMCCC_ARCH_FEATURES is available */ 974 r = thread_smc(SMCCC_ARCH_FEATURES, SMCCC_ARCH_WORKAROUND_1, 0, 0); 975 return r >= 0; 976 } 977 978 static vaddr_t select_vector(vaddr_t a) 979 { 980 if (probe_workaround_available()) { 981 DMSG("SMCCC_ARCH_WORKAROUND_1 (%#08" PRIx32 ") available", 982 SMCCC_ARCH_WORKAROUND_1); 983 DMSG("SMC Workaround for CVE-2017-5715 used"); 984 return a; 985 } 986 987 DMSG("SMCCC_ARCH_WORKAROUND_1 (%#08" PRIx32 ") unavailable", 988 SMCCC_ARCH_WORKAROUND_1); 989 DMSG("SMC Workaround for CVE-2017-5715 not needed (if ARM-TF is up to date)"); 990 return (vaddr_t)thread_excp_vect; 991 } 992 #else 993 static vaddr_t select_vector(vaddr_t a) 994 { 995 return a; 996 } 997 #endif 998 999 static vaddr_t get_excp_vect(void) 1000 { 1001 #ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC 1002 uint32_t midr = read_midr(); 1003 1004 if (get_midr_implementer(midr) != MIDR_IMPLEMENTER_ARM) 1005 return (vaddr_t)thread_excp_vect; 1006 1007 switch (get_midr_primary_part(midr)) { 1008 #ifdef ARM32 1009 case CORTEX_A8_PART_NUM: 1010 case CORTEX_A9_PART_NUM: 1011 case CORTEX_A17_PART_NUM: 1012 #endif 1013 case CORTEX_A57_PART_NUM: 1014 case CORTEX_A72_PART_NUM: 1015 case CORTEX_A73_PART_NUM: 1016 case CORTEX_A75_PART_NUM: 1017 return select_vector((vaddr_t)thread_excp_vect_workaround); 1018 #ifdef ARM32 1019 case CORTEX_A15_PART_NUM: 1020 return select_vector((vaddr_t)thread_excp_vect_workaround_a15); 1021 #endif 1022 default: 1023 return (vaddr_t)thread_excp_vect; 1024 } 1025 #endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/ 1026 1027 return (vaddr_t)thread_excp_vect; 1028 } 1029 1030 void thread_init_per_cpu(void) 1031 { 1032 size_t pos = get_core_pos(); 1033 struct thread_core_local *l = thread_get_core_local(); 1034 1035 init_sec_mon(pos); 1036 1037 set_tmp_stack(l, GET_STACK(stack_tmp[pos]) - STACK_TMP_OFFS); 1038 set_abt_stack(l, GET_STACK(stack_abt[pos])); 1039 1040 thread_init_vbar(get_excp_vect()); 1041 } 1042 1043 struct thread_specific_data *thread_get_tsd(void) 1044 { 1045 return &threads[thread_get_id()].tsd; 1046 } 1047 1048 struct thread_ctx_regs *thread_get_ctx_regs(void) 1049 { 1050 struct thread_core_local *l = thread_get_core_local(); 1051 1052 assert(l->curr_thread != -1); 1053 return &threads[l->curr_thread].regs; 1054 } 1055 1056 void thread_set_foreign_intr(bool enable) 1057 { 1058 /* thread_get_core_local() requires foreign interrupts to be disabled */ 1059 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 1060 struct thread_core_local *l; 1061 1062 l = thread_get_core_local(); 1063 1064 assert(l->curr_thread != -1); 1065 1066 if (enable) { 1067 threads[l->curr_thread].flags |= 1068 THREAD_FLAGS_FOREIGN_INTR_ENABLE; 1069 thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR); 1070 } else { 1071 /* 1072 * No need to disable foreign interrupts here since they're 1073 * already disabled above. 1074 */ 1075 threads[l->curr_thread].flags &= 1076 ~THREAD_FLAGS_FOREIGN_INTR_ENABLE; 1077 } 1078 } 1079 1080 void thread_restore_foreign_intr(void) 1081 { 1082 /* thread_get_core_local() requires foreign interrupts to be disabled */ 1083 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 1084 struct thread_core_local *l; 1085 1086 l = thread_get_core_local(); 1087 1088 assert(l->curr_thread != -1); 1089 1090 if (threads[l->curr_thread].flags & THREAD_FLAGS_FOREIGN_INTR_ENABLE) 1091 thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR); 1092 } 1093 1094 #ifdef CFG_WITH_VFP 1095 uint32_t thread_kernel_enable_vfp(void) 1096 { 1097 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 1098 struct thread_ctx *thr = threads + thread_get_id(); 1099 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 1100 1101 assert(!vfp_is_enabled()); 1102 1103 if (!thr->vfp_state.ns_saved) { 1104 vfp_lazy_save_state_final(&thr->vfp_state.ns); 1105 thr->vfp_state.ns_saved = true; 1106 } else if (thr->vfp_state.sec_lazy_saved && 1107 !thr->vfp_state.sec_saved) { 1108 /* 1109 * This happens when we're handling an abort while the 1110 * thread was using the VFP state. 1111 */ 1112 vfp_lazy_save_state_final(&thr->vfp_state.sec); 1113 thr->vfp_state.sec_saved = true; 1114 } else if (tuv && tuv->lazy_saved && !tuv->saved) { 1115 /* 1116 * This can happen either during syscall or abort 1117 * processing (while processing a syscall). 1118 */ 1119 vfp_lazy_save_state_final(&tuv->vfp); 1120 tuv->saved = true; 1121 } 1122 1123 vfp_enable(); 1124 return exceptions; 1125 } 1126 1127 void thread_kernel_disable_vfp(uint32_t state) 1128 { 1129 uint32_t exceptions; 1130 1131 assert(vfp_is_enabled()); 1132 1133 vfp_disable(); 1134 exceptions = thread_get_exceptions(); 1135 assert(exceptions & THREAD_EXCP_FOREIGN_INTR); 1136 exceptions &= ~THREAD_EXCP_FOREIGN_INTR; 1137 exceptions |= state & THREAD_EXCP_FOREIGN_INTR; 1138 thread_set_exceptions(exceptions); 1139 } 1140 1141 void thread_kernel_save_vfp(void) 1142 { 1143 struct thread_ctx *thr = threads + thread_get_id(); 1144 1145 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1146 if (vfp_is_enabled()) { 1147 vfp_lazy_save_state_init(&thr->vfp_state.sec); 1148 thr->vfp_state.sec_lazy_saved = true; 1149 } 1150 } 1151 1152 void thread_kernel_restore_vfp(void) 1153 { 1154 struct thread_ctx *thr = threads + thread_get_id(); 1155 1156 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1157 assert(!vfp_is_enabled()); 1158 if (thr->vfp_state.sec_lazy_saved) { 1159 vfp_lazy_restore_state(&thr->vfp_state.sec, 1160 thr->vfp_state.sec_saved); 1161 thr->vfp_state.sec_saved = false; 1162 thr->vfp_state.sec_lazy_saved = false; 1163 } 1164 } 1165 1166 void thread_user_enable_vfp(struct thread_user_vfp_state *uvfp) 1167 { 1168 struct thread_ctx *thr = threads + thread_get_id(); 1169 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 1170 1171 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1172 assert(!vfp_is_enabled()); 1173 1174 if (!thr->vfp_state.ns_saved) { 1175 vfp_lazy_save_state_final(&thr->vfp_state.ns); 1176 thr->vfp_state.ns_saved = true; 1177 } else if (tuv && uvfp != tuv) { 1178 if (tuv->lazy_saved && !tuv->saved) { 1179 vfp_lazy_save_state_final(&tuv->vfp); 1180 tuv->saved = true; 1181 } 1182 } 1183 1184 if (uvfp->lazy_saved) 1185 vfp_lazy_restore_state(&uvfp->vfp, uvfp->saved); 1186 uvfp->lazy_saved = false; 1187 uvfp->saved = false; 1188 1189 thr->vfp_state.uvfp = uvfp; 1190 vfp_enable(); 1191 } 1192 1193 void thread_user_save_vfp(void) 1194 { 1195 struct thread_ctx *thr = threads + thread_get_id(); 1196 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 1197 1198 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1199 if (!vfp_is_enabled()) 1200 return; 1201 1202 assert(tuv && !tuv->lazy_saved && !tuv->saved); 1203 vfp_lazy_save_state_init(&tuv->vfp); 1204 tuv->lazy_saved = true; 1205 } 1206 1207 void thread_user_clear_vfp(struct thread_user_vfp_state *uvfp) 1208 { 1209 struct thread_ctx *thr = threads + thread_get_id(); 1210 1211 if (uvfp == thr->vfp_state.uvfp) 1212 thr->vfp_state.uvfp = NULL; 1213 uvfp->lazy_saved = false; 1214 uvfp->saved = false; 1215 } 1216 #endif /*CFG_WITH_VFP*/ 1217 1218 #ifdef ARM32 1219 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr) 1220 { 1221 uint32_t s; 1222 1223 if (!is_32bit) 1224 return false; 1225 1226 s = read_spsr(); 1227 s &= ~(CPSR_MODE_MASK | CPSR_T | CPSR_IT_MASK1 | CPSR_IT_MASK2); 1228 s |= CPSR_MODE_USR; 1229 if (entry_func & 1) 1230 s |= CPSR_T; 1231 *spsr = s; 1232 return true; 1233 } 1234 #endif 1235 1236 #ifdef ARM64 1237 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr) 1238 { 1239 uint32_t s; 1240 1241 if (is_32bit) { 1242 s = read_daif() & (SPSR_32_AIF_MASK << SPSR_32_AIF_SHIFT); 1243 s |= SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT; 1244 s |= (entry_func & SPSR_32_T_MASK) << SPSR_32_T_SHIFT; 1245 } else { 1246 s = read_daif() & (SPSR_64_DAIF_MASK << SPSR_64_DAIF_SHIFT); 1247 } 1248 1249 *spsr = s; 1250 return true; 1251 } 1252 #endif 1253 1254 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1, 1255 unsigned long a2, unsigned long a3, unsigned long user_sp, 1256 unsigned long entry_func, bool is_32bit, 1257 uint32_t *exit_status0, uint32_t *exit_status1) 1258 { 1259 uint32_t spsr; 1260 1261 tee_ta_update_session_utime_resume(); 1262 1263 if (!get_spsr(is_32bit, entry_func, &spsr)) { 1264 *exit_status0 = 1; /* panic */ 1265 *exit_status1 = 0xbadbadba; 1266 return 0; 1267 } 1268 return __thread_enter_user_mode(a0, a1, a2, a3, user_sp, entry_func, 1269 spsr, exit_status0, exit_status1); 1270 } 1271 1272 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 1273 void thread_get_user_kcode(struct mobj **mobj, size_t *offset, 1274 vaddr_t *va, size_t *sz) 1275 { 1276 core_mmu_get_user_va_range(va, NULL); 1277 *mobj = mobj_tee_ram; 1278 *offset = thread_user_kcode_va - CFG_TEE_RAM_START; 1279 *sz = thread_user_kcode_size; 1280 } 1281 #endif 1282 1283 #if defined(CFG_CORE_UNMAP_CORE_AT_EL0) && \ 1284 defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) 1285 void thread_get_user_kdata(struct mobj **mobj, size_t *offset, 1286 vaddr_t *va, size_t *sz) 1287 { 1288 vaddr_t v; 1289 1290 core_mmu_get_user_va_range(&v, NULL); 1291 *va = v + thread_user_kcode_size; 1292 *mobj = mobj_tee_ram; 1293 *offset = (vaddr_t)thread_user_kdata_page - CFG_TEE_RAM_START; 1294 *sz = sizeof(thread_user_kdata_page); 1295 } 1296 #endif 1297 1298 void thread_add_mutex(struct mutex *m) 1299 { 1300 struct thread_core_local *l = thread_get_core_local(); 1301 int ct = l->curr_thread; 1302 1303 assert(ct != -1 && threads[ct].state == THREAD_STATE_ACTIVE); 1304 assert(m->owner_id == MUTEX_OWNER_ID_NONE); 1305 m->owner_id = ct; 1306 TAILQ_INSERT_TAIL(&threads[ct].mutexes, m, link); 1307 } 1308 1309 void thread_rem_mutex(struct mutex *m) 1310 { 1311 struct thread_core_local *l = thread_get_core_local(); 1312 int ct = l->curr_thread; 1313 1314 assert(ct != -1 && threads[ct].state == THREAD_STATE_ACTIVE); 1315 assert(m->owner_id == ct); 1316 m->owner_id = MUTEX_OWNER_ID_NONE; 1317 TAILQ_REMOVE(&threads[ct].mutexes, m, link); 1318 } 1319 1320 bool thread_disable_prealloc_rpc_cache(uint64_t *cookie) 1321 { 1322 bool rv; 1323 size_t n; 1324 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 1325 1326 lock_global(); 1327 1328 for (n = 0; n < CFG_NUM_THREADS; n++) { 1329 if (threads[n].state != THREAD_STATE_FREE) { 1330 rv = false; 1331 goto out; 1332 } 1333 } 1334 1335 rv = true; 1336 for (n = 0; n < CFG_NUM_THREADS; n++) { 1337 if (threads[n].rpc_arg) { 1338 mobj_free(threads[n].rpc_mobj); 1339 *cookie = threads[n].rpc_carg; 1340 threads[n].rpc_carg = 0; 1341 threads[n].rpc_arg = NULL; 1342 goto out; 1343 } 1344 } 1345 1346 *cookie = 0; 1347 thread_prealloc_rpc_cache = false; 1348 out: 1349 unlock_global(); 1350 thread_unmask_exceptions(exceptions); 1351 return rv; 1352 } 1353 1354 bool thread_enable_prealloc_rpc_cache(void) 1355 { 1356 bool rv; 1357 size_t n; 1358 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 1359 1360 lock_global(); 1361 1362 for (n = 0; n < CFG_NUM_THREADS; n++) { 1363 if (threads[n].state != THREAD_STATE_FREE) { 1364 rv = false; 1365 goto out; 1366 } 1367 } 1368 1369 rv = true; 1370 thread_prealloc_rpc_cache = true; 1371 out: 1372 unlock_global(); 1373 thread_unmask_exceptions(exceptions); 1374 return rv; 1375 } 1376 1377 void thread_rpc_free_arg(uint64_t cookie) 1378 { 1379 if (cookie) { 1380 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 1381 OPTEE_SMC_RETURN_RPC_FREE 1382 }; 1383 1384 reg_pair_from_64(cookie, rpc_args + 1, rpc_args + 2); 1385 thread_rpc(rpc_args); 1386 } 1387 } 1388 1389 struct mobj *thread_rpc_alloc_arg(size_t size, uint64_t *cookie) 1390 { 1391 paddr_t pa; 1392 uint64_t co; 1393 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 1394 OPTEE_SMC_RETURN_RPC_ALLOC, size 1395 }; 1396 struct mobj *mobj = NULL; 1397 1398 thread_rpc(rpc_args); 1399 1400 pa = reg_pair_to_64(rpc_args[1], rpc_args[2]); 1401 co = reg_pair_to_64(rpc_args[4], rpc_args[5]); 1402 1403 if (!ALIGNMENT_IS_OK(pa, struct optee_msg_arg)) 1404 goto err; 1405 1406 /* Check if this region is in static shared space */ 1407 if (core_pbuf_is(CORE_MEM_NSEC_SHM, pa, size)) 1408 mobj = mobj_shm_alloc(pa, size); 1409 else if ((!(pa & SMALL_PAGE_MASK)) && size <= SMALL_PAGE_SIZE) 1410 mobj = mobj_mapped_shm_alloc(&pa, 1, 0, co); 1411 1412 if (!mobj) 1413 goto err; 1414 1415 *cookie = co; 1416 return mobj; 1417 err: 1418 thread_rpc_free_arg(co); 1419 mobj_free(mobj); 1420 *cookie = 0; 1421 return NULL; 1422 } 1423 1424 static bool get_rpc_arg(uint32_t cmd, size_t num_params, 1425 struct optee_msg_arg **arg_ret, uint64_t *carg_ret) 1426 { 1427 struct thread_ctx *thr = threads + thread_get_id(); 1428 struct optee_msg_arg *arg = thr->rpc_arg; 1429 struct mobj *mobj; 1430 size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS); 1431 uint64_t c; 1432 1433 if (num_params > THREAD_RPC_MAX_NUM_PARAMS) 1434 return false; 1435 1436 if (!arg) { 1437 mobj = thread_rpc_alloc_arg(sz, &c); 1438 if (!mobj) 1439 return false; 1440 1441 arg = mobj_get_va(mobj, 0); 1442 if (!arg) 1443 goto bad; 1444 1445 thr->rpc_arg = arg; 1446 thr->rpc_carg = c; 1447 thr->rpc_mobj = mobj; 1448 } 1449 1450 memset(arg, 0, OPTEE_MSG_GET_ARG_SIZE(num_params)); 1451 arg->cmd = cmd; 1452 arg->num_params = num_params; 1453 arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */ 1454 1455 *arg_ret = arg; 1456 *carg_ret = thr->rpc_carg; 1457 return true; 1458 1459 bad: 1460 thread_rpc_free_arg(c); 1461 return false; 1462 } 1463 1464 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params, 1465 struct optee_msg_param *params) 1466 { 1467 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD }; 1468 struct optee_msg_arg *arg; 1469 uint64_t carg; 1470 size_t n; 1471 1472 /* 1473 * Break recursion in case plat_prng_add_jitter_entropy_norpc() 1474 * sleeps on a mutex or unlocks a mutex with a sleeper (contended 1475 * mutex). 1476 */ 1477 if (cmd != OPTEE_MSG_RPC_CMD_WAIT_QUEUE) 1478 plat_prng_add_jitter_entropy_norpc(); 1479 1480 if (!get_rpc_arg(cmd, num_params, &arg, &carg)) 1481 return TEE_ERROR_OUT_OF_MEMORY; 1482 1483 memcpy(arg->params, params, sizeof(*params) * num_params); 1484 1485 reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2); 1486 thread_rpc(rpc_args); 1487 for (n = 0; n < num_params; n++) { 1488 switch (params[n].attr & OPTEE_MSG_ATTR_TYPE_MASK) { 1489 case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT: 1490 case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT: 1491 case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT: 1492 case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT: 1493 case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT: 1494 case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT: 1495 params[n] = arg->params[n]; 1496 break; 1497 default: 1498 break; 1499 } 1500 } 1501 return arg->ret; 1502 } 1503 1504 /** 1505 * Free physical memory previously allocated with thread_rpc_alloc() 1506 * 1507 * @cookie: cookie received when allocating the buffer 1508 * @bt: must be the same as supplied when allocating 1509 * @mobj: mobj that describes allocated buffer 1510 * 1511 * This function also frees corresponding mobj. 1512 */ 1513 static void thread_rpc_free(unsigned int bt, uint64_t cookie, struct mobj *mobj) 1514 { 1515 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD }; 1516 struct optee_msg_arg *arg; 1517 uint64_t carg; 1518 1519 if (!get_rpc_arg(OPTEE_MSG_RPC_CMD_SHM_FREE, 1, &arg, &carg)) 1520 return; 1521 1522 arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; 1523 arg->params[0].u.value.a = bt; 1524 arg->params[0].u.value.b = cookie; 1525 arg->params[0].u.value.c = 0; 1526 1527 mobj_free(mobj); 1528 1529 reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2); 1530 thread_rpc(rpc_args); 1531 } 1532 1533 /** 1534 * Allocates shared memory buffer via RPC 1535 * 1536 * @size: size in bytes of shared memory buffer 1537 * @align: required alignment of buffer 1538 * @bt: buffer type OPTEE_MSG_RPC_SHM_TYPE_* 1539 * @payload: returned physical pointer to buffer, 0 if allocation 1540 * failed. 1541 * @cookie: returned cookie used when freeing the buffer 1542 */ 1543 static struct mobj *thread_rpc_alloc(size_t size, size_t align, unsigned int bt, 1544 uint64_t *cookie) 1545 { 1546 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD }; 1547 struct optee_msg_arg *arg; 1548 uint64_t carg; 1549 struct mobj *mobj = NULL; 1550 1551 if (!get_rpc_arg(OPTEE_MSG_RPC_CMD_SHM_ALLOC, 1, &arg, &carg)) 1552 goto fail; 1553 1554 arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; 1555 arg->params[0].u.value.a = bt; 1556 arg->params[0].u.value.b = size; 1557 arg->params[0].u.value.c = align; 1558 1559 reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2); 1560 thread_rpc(rpc_args); 1561 1562 if (arg->ret != TEE_SUCCESS) 1563 goto fail; 1564 1565 if (arg->num_params != 1) 1566 goto fail; 1567 1568 if (arg->params[0].attr == OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT) { 1569 *cookie = arg->params[0].u.tmem.shm_ref; 1570 mobj = mobj_shm_alloc(arg->params[0].u.tmem.buf_ptr, 1571 arg->params[0].u.tmem.size); 1572 } else if (arg->params[0].attr == (OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT | 1573 OPTEE_MSG_ATTR_NONCONTIG)) { 1574 *cookie = arg->params[0].u.tmem.shm_ref; 1575 mobj = msg_param_mobj_from_noncontig( 1576 arg->params[0].u.tmem.buf_ptr, 1577 arg->params[0].u.tmem.size, 1578 *cookie, 1579 true); 1580 } else 1581 goto fail; 1582 1583 if (!mobj) 1584 goto free_first; 1585 1586 assert(mobj_is_nonsec(mobj)); 1587 return mobj; 1588 1589 free_first: 1590 thread_rpc_free(bt, *cookie, mobj); 1591 fail: 1592 *cookie = 0; 1593 return NULL; 1594 } 1595 1596 struct mobj *thread_rpc_alloc_payload(size_t size, uint64_t *cookie) 1597 { 1598 return thread_rpc_alloc(size, 8, OPTEE_MSG_RPC_SHM_TYPE_APPL, cookie); 1599 } 1600 1601 void thread_rpc_free_payload(uint64_t cookie, struct mobj *mobj) 1602 { 1603 thread_rpc_free(OPTEE_MSG_RPC_SHM_TYPE_APPL, cookie, mobj); 1604 } 1605