1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright notice, 14 * this list of conditions and the following disclaimer in the documentation 15 * and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 * POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <platform_config.h> 31 32 #include <arm.h> 33 #include <assert.h> 34 #include <keep.h> 35 #include <kernel/asan.h> 36 #include <kernel/misc.h> 37 #include <kernel/msg_param.h> 38 #include <kernel/panic.h> 39 #include <kernel/spinlock.h> 40 #include <kernel/tee_ta_manager.h> 41 #include <kernel/thread_defs.h> 42 #include <kernel/thread.h> 43 #include <mm/core_memprot.h> 44 #include <mm/mobj.h> 45 #include <mm/tee_mm.h> 46 #include <mm/tee_mmu.h> 47 #include <mm/tee_pager.h> 48 #include <optee_msg.h> 49 #include <smccc.h> 50 #include <sm/optee_smc.h> 51 #include <sm/sm.h> 52 #include <tee/tee_cryp_utl.h> 53 #include <tee/tee_fs_rpc.h> 54 #include <trace.h> 55 #include <util.h> 56 57 #include "thread_private.h" 58 59 #ifdef CFG_WITH_ARM_TRUSTED_FW 60 #define STACK_TMP_OFFS 0 61 #else 62 #define STACK_TMP_OFFS SM_STACK_TMP_RESERVE_SIZE 63 #endif 64 65 66 #ifdef ARM32 67 #ifdef CFG_CORE_SANITIZE_KADDRESS 68 #define STACK_TMP_SIZE (3072 + STACK_TMP_OFFS) 69 #else 70 #define STACK_TMP_SIZE (1536 + STACK_TMP_OFFS) 71 #endif 72 #define STACK_THREAD_SIZE 8192 73 74 #ifdef CFG_CORE_SANITIZE_KADDRESS 75 #define STACK_ABT_SIZE 3072 76 #else 77 #define STACK_ABT_SIZE 2048 78 #endif 79 80 #endif /*ARM32*/ 81 82 #ifdef ARM64 83 #define STACK_TMP_SIZE (2048 + STACK_TMP_OFFS) 84 #define STACK_THREAD_SIZE 8192 85 86 #if TRACE_LEVEL > 0 87 #define STACK_ABT_SIZE 3072 88 #else 89 #define STACK_ABT_SIZE 1024 90 #endif 91 #endif /*ARM64*/ 92 93 struct thread_ctx threads[CFG_NUM_THREADS]; 94 95 struct thread_core_local thread_core_local[CFG_TEE_CORE_NB_CORE]; 96 97 #ifdef CFG_WITH_STACK_CANARIES 98 #ifdef ARM32 99 #define STACK_CANARY_SIZE (4 * sizeof(uint32_t)) 100 #endif 101 #ifdef ARM64 102 #define STACK_CANARY_SIZE (8 * sizeof(uint32_t)) 103 #endif 104 #define START_CANARY_VALUE 0xdededede 105 #define END_CANARY_VALUE 0xabababab 106 #define GET_START_CANARY(name, stack_num) name[stack_num][0] 107 #define GET_END_CANARY(name, stack_num) \ 108 name[stack_num][sizeof(name[stack_num]) / sizeof(uint32_t) - 1] 109 #else 110 #define STACK_CANARY_SIZE 0 111 #endif 112 113 #define DECLARE_STACK(name, num_stacks, stack_size, linkage) \ 114 linkage uint32_t name[num_stacks] \ 115 [ROUNDUP(stack_size + STACK_CANARY_SIZE, STACK_ALIGNMENT) / \ 116 sizeof(uint32_t)] \ 117 __attribute__((section(".nozi_stack"), \ 118 aligned(STACK_ALIGNMENT))) 119 120 #define STACK_SIZE(stack) (sizeof(stack) - STACK_CANARY_SIZE / 2) 121 122 #define GET_STACK(stack) \ 123 ((vaddr_t)(stack) + STACK_SIZE(stack)) 124 125 DECLARE_STACK(stack_tmp, CFG_TEE_CORE_NB_CORE, STACK_TMP_SIZE, static); 126 DECLARE_STACK(stack_abt, CFG_TEE_CORE_NB_CORE, STACK_ABT_SIZE, static); 127 #ifndef CFG_WITH_PAGER 128 DECLARE_STACK(stack_thread, CFG_NUM_THREADS, STACK_THREAD_SIZE, static); 129 #endif 130 131 const void *stack_tmp_export = (uint8_t *)stack_tmp + sizeof(stack_tmp[0]) - 132 (STACK_TMP_OFFS + STACK_CANARY_SIZE / 2); 133 const uint32_t stack_tmp_stride = sizeof(stack_tmp[0]); 134 135 /* 136 * These stack setup info are required by secondary boot cores before they 137 * each locally enable the pager (the mmu). Hence kept in pager sections. 138 */ 139 KEEP_PAGER(stack_tmp_export); 140 KEEP_PAGER(stack_tmp_stride); 141 142 thread_smc_handler_t thread_std_smc_handler_ptr; 143 static thread_smc_handler_t thread_fast_smc_handler_ptr; 144 thread_nintr_handler_t thread_nintr_handler_ptr; 145 thread_pm_handler_t thread_cpu_on_handler_ptr; 146 thread_pm_handler_t thread_cpu_off_handler_ptr; 147 thread_pm_handler_t thread_cpu_suspend_handler_ptr; 148 thread_pm_handler_t thread_cpu_resume_handler_ptr; 149 thread_pm_handler_t thread_system_off_handler_ptr; 150 thread_pm_handler_t thread_system_reset_handler_ptr; 151 152 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 153 static vaddr_t thread_user_kcode_va; 154 long thread_user_kcode_offset; 155 static size_t thread_user_kcode_size; 156 #endif 157 158 #if defined(CFG_CORE_UNMAP_CORE_AT_EL0) && \ 159 defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) 160 long thread_user_kdata_sp_offset; 161 static uint8_t thread_user_kdata_page[ 162 ROUNDUP(sizeof(thread_core_local), SMALL_PAGE_SIZE)] 163 __aligned(SMALL_PAGE_SIZE) __section(".nozi.kdata_page"); 164 #endif 165 166 static unsigned int thread_global_lock = SPINLOCK_UNLOCK; 167 static bool thread_prealloc_rpc_cache; 168 169 static void init_canaries(void) 170 { 171 #ifdef CFG_WITH_STACK_CANARIES 172 size_t n; 173 #define INIT_CANARY(name) \ 174 for (n = 0; n < ARRAY_SIZE(name); n++) { \ 175 uint32_t *start_canary = &GET_START_CANARY(name, n); \ 176 uint32_t *end_canary = &GET_END_CANARY(name, n); \ 177 \ 178 *start_canary = START_CANARY_VALUE; \ 179 *end_canary = END_CANARY_VALUE; \ 180 DMSG("#Stack canaries for %s[%zu] with top at %p\n", \ 181 #name, n, (void *)(end_canary - 1)); \ 182 DMSG("watch *%p\n", (void *)end_canary); \ 183 } 184 185 INIT_CANARY(stack_tmp); 186 INIT_CANARY(stack_abt); 187 #ifndef CFG_WITH_PAGER 188 INIT_CANARY(stack_thread); 189 #endif 190 #endif/*CFG_WITH_STACK_CANARIES*/ 191 } 192 193 #define CANARY_DIED(stack, loc, n) \ 194 do { \ 195 EMSG_RAW("Dead canary at %s of '%s[%zu]'", #loc, #stack, n); \ 196 panic(); \ 197 } while (0) 198 199 void thread_check_canaries(void) 200 { 201 #ifdef CFG_WITH_STACK_CANARIES 202 size_t n; 203 204 for (n = 0; n < ARRAY_SIZE(stack_tmp); n++) { 205 if (GET_START_CANARY(stack_tmp, n) != START_CANARY_VALUE) 206 CANARY_DIED(stack_tmp, start, n); 207 if (GET_END_CANARY(stack_tmp, n) != END_CANARY_VALUE) 208 CANARY_DIED(stack_tmp, end, n); 209 } 210 211 for (n = 0; n < ARRAY_SIZE(stack_abt); n++) { 212 if (GET_START_CANARY(stack_abt, n) != START_CANARY_VALUE) 213 CANARY_DIED(stack_abt, start, n); 214 if (GET_END_CANARY(stack_abt, n) != END_CANARY_VALUE) 215 CANARY_DIED(stack_abt, end, n); 216 217 } 218 #ifndef CFG_WITH_PAGER 219 for (n = 0; n < ARRAY_SIZE(stack_thread); n++) { 220 if (GET_START_CANARY(stack_thread, n) != START_CANARY_VALUE) 221 CANARY_DIED(stack_thread, start, n); 222 if (GET_END_CANARY(stack_thread, n) != END_CANARY_VALUE) 223 CANARY_DIED(stack_thread, end, n); 224 } 225 #endif 226 #endif/*CFG_WITH_STACK_CANARIES*/ 227 } 228 229 static void lock_global(void) 230 { 231 cpu_spin_lock(&thread_global_lock); 232 } 233 234 static void unlock_global(void) 235 { 236 cpu_spin_unlock(&thread_global_lock); 237 } 238 239 #ifdef ARM32 240 uint32_t thread_get_exceptions(void) 241 { 242 uint32_t cpsr = read_cpsr(); 243 244 return (cpsr >> CPSR_F_SHIFT) & THREAD_EXCP_ALL; 245 } 246 247 void thread_set_exceptions(uint32_t exceptions) 248 { 249 uint32_t cpsr = read_cpsr(); 250 251 /* Foreign interrupts must not be unmasked while holding a spinlock */ 252 if (!(exceptions & THREAD_EXCP_FOREIGN_INTR)) 253 assert_have_no_spinlock(); 254 255 cpsr &= ~(THREAD_EXCP_ALL << CPSR_F_SHIFT); 256 cpsr |= ((exceptions & THREAD_EXCP_ALL) << CPSR_F_SHIFT); 257 write_cpsr(cpsr); 258 } 259 #endif /*ARM32*/ 260 261 #ifdef ARM64 262 uint32_t thread_get_exceptions(void) 263 { 264 uint32_t daif = read_daif(); 265 266 return (daif >> DAIF_F_SHIFT) & THREAD_EXCP_ALL; 267 } 268 269 void thread_set_exceptions(uint32_t exceptions) 270 { 271 uint32_t daif = read_daif(); 272 273 /* Foreign interrupts must not be unmasked while holding a spinlock */ 274 if (!(exceptions & THREAD_EXCP_FOREIGN_INTR)) 275 assert_have_no_spinlock(); 276 277 daif &= ~(THREAD_EXCP_ALL << DAIF_F_SHIFT); 278 daif |= ((exceptions & THREAD_EXCP_ALL) << DAIF_F_SHIFT); 279 write_daif(daif); 280 } 281 #endif /*ARM64*/ 282 283 uint32_t thread_mask_exceptions(uint32_t exceptions) 284 { 285 uint32_t state = thread_get_exceptions(); 286 287 thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL)); 288 return state; 289 } 290 291 void thread_unmask_exceptions(uint32_t state) 292 { 293 thread_set_exceptions(state & THREAD_EXCP_ALL); 294 } 295 296 297 struct thread_core_local *thread_get_core_local(void) 298 { 299 uint32_t cpu_id = get_core_pos(); 300 301 /* 302 * Foreign interrupts must be disabled before playing with core_local 303 * since we otherwise may be rescheduled to a different core in the 304 * middle of this function. 305 */ 306 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 307 308 assert(cpu_id < CFG_TEE_CORE_NB_CORE); 309 return &thread_core_local[cpu_id]; 310 } 311 312 static void thread_lazy_save_ns_vfp(void) 313 { 314 #ifdef CFG_WITH_VFP 315 struct thread_ctx *thr = threads + thread_get_id(); 316 317 thr->vfp_state.ns_saved = false; 318 #if defined(ARM64) && defined(CFG_WITH_ARM_TRUSTED_FW) 319 /* 320 * ARM TF saves and restores CPACR_EL1, so we must assume NS world 321 * uses VFP and always preserve the register file when secure world 322 * is about to use it 323 */ 324 thr->vfp_state.ns.force_save = true; 325 #endif 326 vfp_lazy_save_state_init(&thr->vfp_state.ns); 327 #endif /*CFG_WITH_VFP*/ 328 } 329 330 static void thread_lazy_restore_ns_vfp(void) 331 { 332 #ifdef CFG_WITH_VFP 333 struct thread_ctx *thr = threads + thread_get_id(); 334 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 335 336 assert(!thr->vfp_state.sec_lazy_saved && !thr->vfp_state.sec_saved); 337 338 if (tuv && tuv->lazy_saved && !tuv->saved) { 339 vfp_lazy_save_state_final(&tuv->vfp); 340 tuv->saved = true; 341 } 342 343 vfp_lazy_restore_state(&thr->vfp_state.ns, thr->vfp_state.ns_saved); 344 thr->vfp_state.ns_saved = false; 345 #endif /*CFG_WITH_VFP*/ 346 } 347 348 #ifdef ARM32 349 static void init_regs(struct thread_ctx *thread, 350 struct thread_smc_args *args) 351 { 352 thread->regs.pc = (uint32_t)thread_std_smc_entry; 353 354 /* 355 * Stdcalls starts in SVC mode with masked foreign interrupts, masked 356 * Asynchronous abort and unmasked native interrupts. 357 */ 358 thread->regs.cpsr = read_cpsr() & ARM32_CPSR_E; 359 thread->regs.cpsr |= CPSR_MODE_SVC | CPSR_A | 360 (THREAD_EXCP_FOREIGN_INTR << ARM32_CPSR_F_SHIFT); 361 /* Enable thumb mode if it's a thumb instruction */ 362 if (thread->regs.pc & 1) 363 thread->regs.cpsr |= CPSR_T; 364 /* Reinitialize stack pointer */ 365 thread->regs.svc_sp = thread->stack_va_end; 366 367 /* 368 * Copy arguments into context. This will make the 369 * arguments appear in r0-r7 when thread is started. 370 */ 371 thread->regs.r0 = args->a0; 372 thread->regs.r1 = args->a1; 373 thread->regs.r2 = args->a2; 374 thread->regs.r3 = args->a3; 375 thread->regs.r4 = args->a4; 376 thread->regs.r5 = args->a5; 377 thread->regs.r6 = args->a6; 378 thread->regs.r7 = args->a7; 379 } 380 #endif /*ARM32*/ 381 382 #ifdef ARM64 383 static void init_regs(struct thread_ctx *thread, 384 struct thread_smc_args *args) 385 { 386 thread->regs.pc = (uint64_t)thread_std_smc_entry; 387 388 /* 389 * Stdcalls starts in SVC mode with masked foreign interrupts, masked 390 * Asynchronous abort and unmasked native interrupts. 391 */ 392 thread->regs.cpsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, 393 THREAD_EXCP_FOREIGN_INTR | DAIFBIT_ABT); 394 /* Reinitialize stack pointer */ 395 thread->regs.sp = thread->stack_va_end; 396 397 /* 398 * Copy arguments into context. This will make the 399 * arguments appear in x0-x7 when thread is started. 400 */ 401 thread->regs.x[0] = args->a0; 402 thread->regs.x[1] = args->a1; 403 thread->regs.x[2] = args->a2; 404 thread->regs.x[3] = args->a3; 405 thread->regs.x[4] = args->a4; 406 thread->regs.x[5] = args->a5; 407 thread->regs.x[6] = args->a6; 408 thread->regs.x[7] = args->a7; 409 410 /* Set up frame pointer as per the Aarch64 AAPCS */ 411 thread->regs.x[29] = 0; 412 } 413 #endif /*ARM64*/ 414 415 void thread_init_boot_thread(void) 416 { 417 struct thread_core_local *l = thread_get_core_local(); 418 size_t n; 419 420 for (n = 0; n < CFG_NUM_THREADS; n++) { 421 TAILQ_INIT(&threads[n].mutexes); 422 TAILQ_INIT(&threads[n].tsd.sess_stack); 423 SLIST_INIT(&threads[n].tsd.pgt_cache); 424 } 425 426 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) 427 thread_core_local[n].curr_thread = -1; 428 429 l->curr_thread = 0; 430 threads[0].state = THREAD_STATE_ACTIVE; 431 } 432 433 void thread_clr_boot_thread(void) 434 { 435 struct thread_core_local *l = thread_get_core_local(); 436 437 assert(l->curr_thread >= 0 && l->curr_thread < CFG_NUM_THREADS); 438 assert(threads[l->curr_thread].state == THREAD_STATE_ACTIVE); 439 assert(TAILQ_EMPTY(&threads[l->curr_thread].mutexes)); 440 threads[l->curr_thread].state = THREAD_STATE_FREE; 441 l->curr_thread = -1; 442 } 443 444 static void thread_alloc_and_run(struct thread_smc_args *args) 445 { 446 size_t n; 447 struct thread_core_local *l = thread_get_core_local(); 448 bool found_thread = false; 449 450 assert(l->curr_thread == -1); 451 452 lock_global(); 453 454 for (n = 0; n < CFG_NUM_THREADS; n++) { 455 if (threads[n].state == THREAD_STATE_FREE) { 456 threads[n].state = THREAD_STATE_ACTIVE; 457 found_thread = true; 458 break; 459 } 460 } 461 462 unlock_global(); 463 464 if (!found_thread) { 465 args->a0 = OPTEE_SMC_RETURN_ETHREAD_LIMIT; 466 return; 467 } 468 469 l->curr_thread = n; 470 471 threads[n].flags = 0; 472 init_regs(threads + n, args); 473 474 /* Save Hypervisor Client ID */ 475 threads[n].hyp_clnt_id = args->a7; 476 477 thread_lazy_save_ns_vfp(); 478 thread_resume(&threads[n].regs); 479 } 480 481 #ifdef ARM32 482 static void copy_a0_to_a5(struct thread_ctx_regs *regs, 483 struct thread_smc_args *args) 484 { 485 /* 486 * Update returned values from RPC, values will appear in 487 * r0-r3 when thread is resumed. 488 */ 489 regs->r0 = args->a0; 490 regs->r1 = args->a1; 491 regs->r2 = args->a2; 492 regs->r3 = args->a3; 493 regs->r4 = args->a4; 494 regs->r5 = args->a5; 495 } 496 #endif /*ARM32*/ 497 498 #ifdef ARM64 499 static void copy_a0_to_a5(struct thread_ctx_regs *regs, 500 struct thread_smc_args *args) 501 { 502 /* 503 * Update returned values from RPC, values will appear in 504 * x0-x3 when thread is resumed. 505 */ 506 regs->x[0] = args->a0; 507 regs->x[1] = args->a1; 508 regs->x[2] = args->a2; 509 regs->x[3] = args->a3; 510 regs->x[4] = args->a4; 511 regs->x[5] = args->a5; 512 } 513 #endif /*ARM64*/ 514 515 #ifdef ARM32 516 static bool is_from_user(uint32_t cpsr) 517 { 518 return (cpsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR; 519 } 520 #endif 521 522 #ifdef ARM64 523 static bool is_from_user(uint32_t cpsr) 524 { 525 if (cpsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT)) 526 return true; 527 if (((cpsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) == 528 SPSR_64_MODE_EL0) 529 return true; 530 return false; 531 } 532 #endif 533 534 static bool is_user_mode(struct thread_ctx_regs *regs) 535 { 536 return is_from_user((uint32_t)regs->cpsr); 537 } 538 539 static void thread_resume_from_rpc(struct thread_smc_args *args) 540 { 541 size_t n = args->a3; /* thread id */ 542 struct thread_core_local *l = thread_get_core_local(); 543 uint32_t rv = 0; 544 545 assert(l->curr_thread == -1); 546 547 lock_global(); 548 549 if (n < CFG_NUM_THREADS && 550 threads[n].state == THREAD_STATE_SUSPENDED && 551 args->a7 == threads[n].hyp_clnt_id) 552 threads[n].state = THREAD_STATE_ACTIVE; 553 else 554 rv = OPTEE_SMC_RETURN_ERESUME; 555 556 unlock_global(); 557 558 if (rv) { 559 args->a0 = rv; 560 return; 561 } 562 563 l->curr_thread = n; 564 565 if (is_user_mode(&threads[n].regs)) 566 tee_ta_update_session_utime_resume(); 567 568 if (threads[n].have_user_map) 569 core_mmu_set_user_map(&threads[n].user_map); 570 571 /* 572 * Return from RPC to request service of a foreign interrupt must not 573 * get parameters from non-secure world. 574 */ 575 if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) { 576 copy_a0_to_a5(&threads[n].regs, args); 577 threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN; 578 } 579 580 thread_lazy_save_ns_vfp(); 581 thread_resume(&threads[n].regs); 582 } 583 584 void thread_handle_fast_smc(struct thread_smc_args *args) 585 { 586 thread_check_canaries(); 587 thread_fast_smc_handler_ptr(args); 588 /* Fast handlers must not unmask any exceptions */ 589 assert(thread_get_exceptions() == THREAD_EXCP_ALL); 590 } 591 592 void thread_handle_std_smc(struct thread_smc_args *args) 593 { 594 thread_check_canaries(); 595 596 if (args->a0 == OPTEE_SMC_CALL_RETURN_FROM_RPC) 597 thread_resume_from_rpc(args); 598 else 599 thread_alloc_and_run(args); 600 } 601 602 /* 603 * Helper routine for the assembly function thread_std_smc_entry() 604 * 605 * Note: this function is weak just to make it possible to exclude it from 606 * the unpaged area. 607 */ 608 void __weak __thread_std_smc_entry(struct thread_smc_args *args) 609 { 610 thread_std_smc_handler_ptr(args); 611 612 if (args->a0 == OPTEE_SMC_RETURN_OK) { 613 struct thread_ctx *thr = threads + thread_get_id(); 614 615 tee_fs_rpc_cache_clear(&thr->tsd); 616 if (!thread_prealloc_rpc_cache) { 617 thread_rpc_free_arg(thr->rpc_carg); 618 mobj_free(thr->rpc_mobj); 619 thr->rpc_carg = 0; 620 thr->rpc_arg = 0; 621 thr->rpc_mobj = NULL; 622 } 623 } 624 } 625 626 void *thread_get_tmp_sp(void) 627 { 628 struct thread_core_local *l = thread_get_core_local(); 629 630 return (void *)l->tmp_stack_va_end; 631 } 632 633 #ifdef ARM64 634 vaddr_t thread_get_saved_thread_sp(void) 635 { 636 struct thread_core_local *l = thread_get_core_local(); 637 int ct = l->curr_thread; 638 639 assert(ct != -1); 640 return threads[ct].kern_sp; 641 } 642 #endif /*ARM64*/ 643 644 vaddr_t thread_stack_start(void) 645 { 646 struct thread_ctx *thr; 647 int ct = thread_get_id_may_fail(); 648 649 if (ct == -1) 650 return 0; 651 652 thr = threads + ct; 653 return thr->stack_va_end - STACK_THREAD_SIZE; 654 } 655 656 size_t thread_stack_size(void) 657 { 658 return STACK_THREAD_SIZE; 659 } 660 661 bool thread_is_from_abort_mode(void) 662 { 663 struct thread_core_local *l = thread_get_core_local(); 664 665 return (l->flags >> THREAD_CLF_SAVED_SHIFT) & THREAD_CLF_ABORT; 666 } 667 668 #ifdef ARM32 669 bool thread_is_in_normal_mode(void) 670 { 671 return (read_cpsr() & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_SVC; 672 } 673 #endif 674 675 #ifdef ARM64 676 bool thread_is_in_normal_mode(void) 677 { 678 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 679 struct thread_core_local *l = thread_get_core_local(); 680 bool ret; 681 682 /* If any bit in l->flags is set we're handling some exception. */ 683 ret = !l->flags; 684 thread_unmask_exceptions(exceptions); 685 686 return ret; 687 } 688 #endif 689 690 void thread_state_free(void) 691 { 692 struct thread_core_local *l = thread_get_core_local(); 693 int ct = l->curr_thread; 694 695 assert(ct != -1); 696 assert(TAILQ_EMPTY(&threads[ct].mutexes)); 697 698 thread_lazy_restore_ns_vfp(); 699 tee_pager_release_phys( 700 (void *)(threads[ct].stack_va_end - STACK_THREAD_SIZE), 701 STACK_THREAD_SIZE); 702 703 lock_global(); 704 705 assert(threads[ct].state == THREAD_STATE_ACTIVE); 706 threads[ct].state = THREAD_STATE_FREE; 707 threads[ct].flags = 0; 708 l->curr_thread = -1; 709 710 unlock_global(); 711 } 712 713 #ifdef CFG_WITH_PAGER 714 static void release_unused_kernel_stack(struct thread_ctx *thr, 715 uint32_t cpsr __maybe_unused) 716 { 717 #ifdef ARM64 718 /* 719 * If we're from user mode then thr->regs.sp is the saved user 720 * stack pointer and thr->kern_sp holds the last kernel stack 721 * pointer. But if we're from kernel mode then thr->kern_sp isn't 722 * up to date so we need to read from thr->regs.sp instead. 723 */ 724 vaddr_t sp = is_from_user(cpsr) ? thr->kern_sp : thr->regs.sp; 725 #else 726 vaddr_t sp = thr->regs.svc_sp; 727 #endif 728 vaddr_t base = thr->stack_va_end - STACK_THREAD_SIZE; 729 size_t len = sp - base; 730 731 tee_pager_release_phys((void *)base, len); 732 } 733 #else 734 static void release_unused_kernel_stack(struct thread_ctx *thr __unused, 735 uint32_t cpsr __unused) 736 { 737 } 738 #endif 739 740 int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc) 741 { 742 struct thread_core_local *l = thread_get_core_local(); 743 int ct = l->curr_thread; 744 745 assert(ct != -1); 746 747 thread_check_canaries(); 748 749 release_unused_kernel_stack(threads + ct, cpsr); 750 751 if (is_from_user(cpsr)) { 752 thread_user_save_vfp(); 753 tee_ta_update_session_utime_suspend(); 754 tee_ta_gprof_sample_pc(pc); 755 } 756 thread_lazy_restore_ns_vfp(); 757 758 lock_global(); 759 760 assert(threads[ct].state == THREAD_STATE_ACTIVE); 761 threads[ct].flags |= flags; 762 threads[ct].regs.cpsr = cpsr; 763 threads[ct].regs.pc = pc; 764 threads[ct].state = THREAD_STATE_SUSPENDED; 765 766 threads[ct].have_user_map = core_mmu_user_mapping_is_active(); 767 if (threads[ct].have_user_map) { 768 core_mmu_get_user_map(&threads[ct].user_map); 769 core_mmu_set_user_map(NULL); 770 } 771 772 l->curr_thread = -1; 773 774 unlock_global(); 775 776 return ct; 777 } 778 779 #ifdef ARM32 780 static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp) 781 { 782 l->tmp_stack_va_end = sp; 783 thread_set_irq_sp(sp); 784 thread_set_fiq_sp(sp); 785 } 786 787 static void set_abt_stack(struct thread_core_local *l, vaddr_t sp) 788 { 789 l->abt_stack_va_end = sp; 790 thread_set_abt_sp((vaddr_t)l); 791 thread_set_und_sp((vaddr_t)l); 792 } 793 #endif /*ARM32*/ 794 795 #ifdef ARM64 796 static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp) 797 { 798 /* 799 * We're already using the tmp stack when this function is called 800 * so there's no need to assign it to any stack pointer. However, 801 * we'll need to restore it at different times so store it here. 802 */ 803 l->tmp_stack_va_end = sp; 804 } 805 806 static void set_abt_stack(struct thread_core_local *l, vaddr_t sp) 807 { 808 l->abt_stack_va_end = sp; 809 } 810 #endif /*ARM64*/ 811 812 bool thread_init_stack(uint32_t thread_id, vaddr_t sp) 813 { 814 if (thread_id >= CFG_NUM_THREADS) 815 return false; 816 threads[thread_id].stack_va_end = sp; 817 return true; 818 } 819 820 int thread_get_id_may_fail(void) 821 { 822 /* 823 * thread_get_core_local() requires foreign interrupts to be disabled 824 */ 825 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 826 struct thread_core_local *l = thread_get_core_local(); 827 int ct = l->curr_thread; 828 829 thread_unmask_exceptions(exceptions); 830 return ct; 831 } 832 833 int thread_get_id(void) 834 { 835 int ct = thread_get_id_may_fail(); 836 837 assert(ct >= 0 && ct < CFG_NUM_THREADS); 838 return ct; 839 } 840 841 static void init_handlers(const struct thread_handlers *handlers) 842 { 843 thread_std_smc_handler_ptr = handlers->std_smc; 844 thread_fast_smc_handler_ptr = handlers->fast_smc; 845 thread_nintr_handler_ptr = handlers->nintr; 846 thread_cpu_on_handler_ptr = handlers->cpu_on; 847 thread_cpu_off_handler_ptr = handlers->cpu_off; 848 thread_cpu_suspend_handler_ptr = handlers->cpu_suspend; 849 thread_cpu_resume_handler_ptr = handlers->cpu_resume; 850 thread_system_off_handler_ptr = handlers->system_off; 851 thread_system_reset_handler_ptr = handlers->system_reset; 852 } 853 854 #ifdef CFG_WITH_PAGER 855 static void init_thread_stacks(void) 856 { 857 size_t n; 858 859 /* 860 * Allocate virtual memory for thread stacks. 861 */ 862 for (n = 0; n < CFG_NUM_THREADS; n++) { 863 tee_mm_entry_t *mm; 864 vaddr_t sp; 865 866 /* Find vmem for thread stack and its protection gap */ 867 mm = tee_mm_alloc(&tee_mm_vcore, 868 SMALL_PAGE_SIZE + STACK_THREAD_SIZE); 869 assert(mm); 870 871 /* Claim eventual physical page */ 872 tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm), 873 true); 874 875 /* Add the area to the pager */ 876 tee_pager_add_core_area(tee_mm_get_smem(mm) + SMALL_PAGE_SIZE, 877 tee_mm_get_bytes(mm) - SMALL_PAGE_SIZE, 878 TEE_MATTR_PRW | TEE_MATTR_LOCKED, 879 NULL, NULL); 880 881 /* init effective stack */ 882 sp = tee_mm_get_smem(mm) + tee_mm_get_bytes(mm); 883 asan_tag_access((void *)tee_mm_get_smem(mm), (void *)sp); 884 if (!thread_init_stack(n, sp)) 885 panic("init stack failed"); 886 } 887 } 888 #else 889 static void init_thread_stacks(void) 890 { 891 size_t n; 892 893 /* Assign the thread stacks */ 894 for (n = 0; n < CFG_NUM_THREADS; n++) { 895 if (!thread_init_stack(n, GET_STACK(stack_thread[n]))) 896 panic("thread_init_stack failed"); 897 } 898 } 899 #endif /*CFG_WITH_PAGER*/ 900 901 static void init_user_kcode(void) 902 { 903 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 904 vaddr_t v = (vaddr_t)thread_excp_vect; 905 vaddr_t ve = (vaddr_t)thread_excp_vect_end; 906 907 thread_user_kcode_va = ROUNDDOWN(v, CORE_MMU_USER_CODE_SIZE); 908 ve = ROUNDUP(ve, CORE_MMU_USER_CODE_SIZE); 909 thread_user_kcode_size = ve - thread_user_kcode_va; 910 911 core_mmu_get_user_va_range(&v, NULL); 912 thread_user_kcode_offset = thread_user_kcode_va - v; 913 914 #if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) 915 /* 916 * When transitioning to EL0 subtract SP with this much to point to 917 * this special kdata page instead. SP is restored by add this much 918 * while transitioning back to EL1. 919 */ 920 v += thread_user_kcode_size; 921 thread_user_kdata_sp_offset = (vaddr_t)thread_core_local - v; 922 #endif 923 #endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 924 } 925 926 void thread_init_primary(const struct thread_handlers *handlers) 927 { 928 init_handlers(handlers); 929 930 /* Initialize canaries around the stacks */ 931 init_canaries(); 932 933 init_thread_stacks(); 934 pgt_init(); 935 936 init_user_kcode(); 937 } 938 939 static void init_sec_mon(size_t pos __maybe_unused) 940 { 941 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 942 /* Initialize secure monitor */ 943 sm_init(GET_STACK(stack_tmp[pos])); 944 #endif 945 } 946 947 static uint32_t __maybe_unused get_midr_implementer(uint32_t midr) 948 { 949 return (midr >> MIDR_IMPLEMENTER_SHIFT) & MIDR_IMPLEMENTER_MASK; 950 } 951 952 static uint32_t __maybe_unused get_midr_primary_part(uint32_t midr) 953 { 954 return (midr >> MIDR_PRIMARY_PART_NUM_SHIFT) & 955 MIDR_PRIMARY_PART_NUM_MASK; 956 } 957 958 #ifdef ARM64 959 static bool probe_workaround_available(void) 960 { 961 int32_t r; 962 963 r = thread_smc(SMCCC_VERSION, 0, 0, 0); 964 if (r < 0) 965 return false; 966 if (r < 0x10001) /* compare with version 1.1 */ 967 return false; 968 969 /* Version >= 1.1, so SMCCC_ARCH_FEATURES is available */ 970 r = thread_smc(SMCCC_ARCH_FEATURES, SMCCC_ARCH_WORKAROUND_1, 0, 0); 971 return r >= 0; 972 } 973 974 static vaddr_t select_vector(vaddr_t a) 975 { 976 if (probe_workaround_available()) { 977 DMSG("SMCCC_ARCH_WORKAROUND_1 (%#08" PRIx32 ") available", 978 SMCCC_ARCH_WORKAROUND_1); 979 DMSG("SMC Workaround for CVE-2017-5715 used"); 980 return a; 981 } 982 983 DMSG("SMCCC_ARCH_WORKAROUND_1 (%#08" PRIx32 ") unavailable", 984 SMCCC_ARCH_WORKAROUND_1); 985 DMSG("SMC Workaround for CVE-2017-5715 not needed (if ARM-TF is up to date)"); 986 return (vaddr_t)thread_excp_vect; 987 } 988 #else 989 static vaddr_t select_vector(vaddr_t a) 990 { 991 return a; 992 } 993 #endif 994 995 static vaddr_t get_excp_vect(void) 996 { 997 #ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC 998 uint32_t midr = read_midr(); 999 1000 if (get_midr_implementer(midr) != MIDR_IMPLEMENTER_ARM) 1001 return (vaddr_t)thread_excp_vect; 1002 1003 switch (get_midr_primary_part(midr)) { 1004 #ifdef ARM32 1005 case CORTEX_A8_PART_NUM: 1006 case CORTEX_A9_PART_NUM: 1007 case CORTEX_A17_PART_NUM: 1008 #endif 1009 case CORTEX_A57_PART_NUM: 1010 case CORTEX_A72_PART_NUM: 1011 case CORTEX_A73_PART_NUM: 1012 case CORTEX_A75_PART_NUM: 1013 return select_vector((vaddr_t)thread_excp_vect_workaround); 1014 #ifdef ARM32 1015 case CORTEX_A15_PART_NUM: 1016 return select_vector((vaddr_t)thread_excp_vect_workaround_a15); 1017 #endif 1018 default: 1019 return (vaddr_t)thread_excp_vect; 1020 } 1021 #endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/ 1022 1023 return (vaddr_t)thread_excp_vect; 1024 } 1025 1026 void thread_init_per_cpu(void) 1027 { 1028 size_t pos = get_core_pos(); 1029 struct thread_core_local *l = thread_get_core_local(); 1030 1031 init_sec_mon(pos); 1032 1033 set_tmp_stack(l, GET_STACK(stack_tmp[pos]) - STACK_TMP_OFFS); 1034 set_abt_stack(l, GET_STACK(stack_abt[pos])); 1035 1036 thread_init_vbar(get_excp_vect()); 1037 } 1038 1039 struct thread_specific_data *thread_get_tsd(void) 1040 { 1041 return &threads[thread_get_id()].tsd; 1042 } 1043 1044 struct thread_ctx_regs *thread_get_ctx_regs(void) 1045 { 1046 struct thread_core_local *l = thread_get_core_local(); 1047 1048 assert(l->curr_thread != -1); 1049 return &threads[l->curr_thread].regs; 1050 } 1051 1052 void thread_set_foreign_intr(bool enable) 1053 { 1054 /* thread_get_core_local() requires foreign interrupts to be disabled */ 1055 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 1056 struct thread_core_local *l; 1057 1058 l = thread_get_core_local(); 1059 1060 assert(l->curr_thread != -1); 1061 1062 if (enable) { 1063 threads[l->curr_thread].flags |= 1064 THREAD_FLAGS_FOREIGN_INTR_ENABLE; 1065 thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR); 1066 } else { 1067 /* 1068 * No need to disable foreign interrupts here since they're 1069 * already disabled above. 1070 */ 1071 threads[l->curr_thread].flags &= 1072 ~THREAD_FLAGS_FOREIGN_INTR_ENABLE; 1073 } 1074 } 1075 1076 void thread_restore_foreign_intr(void) 1077 { 1078 /* thread_get_core_local() requires foreign interrupts to be disabled */ 1079 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 1080 struct thread_core_local *l; 1081 1082 l = thread_get_core_local(); 1083 1084 assert(l->curr_thread != -1); 1085 1086 if (threads[l->curr_thread].flags & THREAD_FLAGS_FOREIGN_INTR_ENABLE) 1087 thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR); 1088 } 1089 1090 #ifdef CFG_WITH_VFP 1091 uint32_t thread_kernel_enable_vfp(void) 1092 { 1093 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 1094 struct thread_ctx *thr = threads + thread_get_id(); 1095 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 1096 1097 assert(!vfp_is_enabled()); 1098 1099 if (!thr->vfp_state.ns_saved) { 1100 vfp_lazy_save_state_final(&thr->vfp_state.ns); 1101 thr->vfp_state.ns_saved = true; 1102 } else if (thr->vfp_state.sec_lazy_saved && 1103 !thr->vfp_state.sec_saved) { 1104 /* 1105 * This happens when we're handling an abort while the 1106 * thread was using the VFP state. 1107 */ 1108 vfp_lazy_save_state_final(&thr->vfp_state.sec); 1109 thr->vfp_state.sec_saved = true; 1110 } else if (tuv && tuv->lazy_saved && !tuv->saved) { 1111 /* 1112 * This can happen either during syscall or abort 1113 * processing (while processing a syscall). 1114 */ 1115 vfp_lazy_save_state_final(&tuv->vfp); 1116 tuv->saved = true; 1117 } 1118 1119 vfp_enable(); 1120 return exceptions; 1121 } 1122 1123 void thread_kernel_disable_vfp(uint32_t state) 1124 { 1125 uint32_t exceptions; 1126 1127 assert(vfp_is_enabled()); 1128 1129 vfp_disable(); 1130 exceptions = thread_get_exceptions(); 1131 assert(exceptions & THREAD_EXCP_FOREIGN_INTR); 1132 exceptions &= ~THREAD_EXCP_FOREIGN_INTR; 1133 exceptions |= state & THREAD_EXCP_FOREIGN_INTR; 1134 thread_set_exceptions(exceptions); 1135 } 1136 1137 void thread_kernel_save_vfp(void) 1138 { 1139 struct thread_ctx *thr = threads + thread_get_id(); 1140 1141 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1142 if (vfp_is_enabled()) { 1143 vfp_lazy_save_state_init(&thr->vfp_state.sec); 1144 thr->vfp_state.sec_lazy_saved = true; 1145 } 1146 } 1147 1148 void thread_kernel_restore_vfp(void) 1149 { 1150 struct thread_ctx *thr = threads + thread_get_id(); 1151 1152 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1153 assert(!vfp_is_enabled()); 1154 if (thr->vfp_state.sec_lazy_saved) { 1155 vfp_lazy_restore_state(&thr->vfp_state.sec, 1156 thr->vfp_state.sec_saved); 1157 thr->vfp_state.sec_saved = false; 1158 thr->vfp_state.sec_lazy_saved = false; 1159 } 1160 } 1161 1162 void thread_user_enable_vfp(struct thread_user_vfp_state *uvfp) 1163 { 1164 struct thread_ctx *thr = threads + thread_get_id(); 1165 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 1166 1167 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1168 assert(!vfp_is_enabled()); 1169 1170 if (!thr->vfp_state.ns_saved) { 1171 vfp_lazy_save_state_final(&thr->vfp_state.ns); 1172 thr->vfp_state.ns_saved = true; 1173 } else if (tuv && uvfp != tuv) { 1174 if (tuv->lazy_saved && !tuv->saved) { 1175 vfp_lazy_save_state_final(&tuv->vfp); 1176 tuv->saved = true; 1177 } 1178 } 1179 1180 if (uvfp->lazy_saved) 1181 vfp_lazy_restore_state(&uvfp->vfp, uvfp->saved); 1182 uvfp->lazy_saved = false; 1183 uvfp->saved = false; 1184 1185 thr->vfp_state.uvfp = uvfp; 1186 vfp_enable(); 1187 } 1188 1189 void thread_user_save_vfp(void) 1190 { 1191 struct thread_ctx *thr = threads + thread_get_id(); 1192 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 1193 1194 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1195 if (!vfp_is_enabled()) 1196 return; 1197 1198 assert(tuv && !tuv->lazy_saved && !tuv->saved); 1199 vfp_lazy_save_state_init(&tuv->vfp); 1200 tuv->lazy_saved = true; 1201 } 1202 1203 void thread_user_clear_vfp(struct thread_user_vfp_state *uvfp) 1204 { 1205 struct thread_ctx *thr = threads + thread_get_id(); 1206 1207 if (uvfp == thr->vfp_state.uvfp) 1208 thr->vfp_state.uvfp = NULL; 1209 uvfp->lazy_saved = false; 1210 uvfp->saved = false; 1211 } 1212 #endif /*CFG_WITH_VFP*/ 1213 1214 #ifdef ARM32 1215 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr) 1216 { 1217 uint32_t s; 1218 1219 if (!is_32bit) 1220 return false; 1221 1222 s = read_spsr(); 1223 s &= ~(CPSR_MODE_MASK | CPSR_T | CPSR_IT_MASK1 | CPSR_IT_MASK2); 1224 s |= CPSR_MODE_USR; 1225 if (entry_func & 1) 1226 s |= CPSR_T; 1227 *spsr = s; 1228 return true; 1229 } 1230 #endif 1231 1232 #ifdef ARM64 1233 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr) 1234 { 1235 uint32_t s; 1236 1237 if (is_32bit) { 1238 s = read_daif() & (SPSR_32_AIF_MASK << SPSR_32_AIF_SHIFT); 1239 s |= SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT; 1240 s |= (entry_func & SPSR_32_T_MASK) << SPSR_32_T_SHIFT; 1241 } else { 1242 s = read_daif() & (SPSR_64_DAIF_MASK << SPSR_64_DAIF_SHIFT); 1243 } 1244 1245 *spsr = s; 1246 return true; 1247 } 1248 #endif 1249 1250 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1, 1251 unsigned long a2, unsigned long a3, unsigned long user_sp, 1252 unsigned long entry_func, bool is_32bit, 1253 uint32_t *exit_status0, uint32_t *exit_status1) 1254 { 1255 uint32_t spsr; 1256 1257 tee_ta_update_session_utime_resume(); 1258 1259 if (!get_spsr(is_32bit, entry_func, &spsr)) { 1260 *exit_status0 = 1; /* panic */ 1261 *exit_status1 = 0xbadbadba; 1262 return 0; 1263 } 1264 return __thread_enter_user_mode(a0, a1, a2, a3, user_sp, entry_func, 1265 spsr, exit_status0, exit_status1); 1266 } 1267 1268 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 1269 void thread_get_user_kcode(struct mobj **mobj, size_t *offset, 1270 vaddr_t *va, size_t *sz) 1271 { 1272 core_mmu_get_user_va_range(va, NULL); 1273 *mobj = mobj_tee_ram; 1274 *offset = thread_user_kcode_va - CFG_TEE_RAM_START; 1275 *sz = thread_user_kcode_size; 1276 } 1277 #endif 1278 1279 #if defined(CFG_CORE_UNMAP_CORE_AT_EL0) && \ 1280 defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) 1281 void thread_get_user_kdata(struct mobj **mobj, size_t *offset, 1282 vaddr_t *va, size_t *sz) 1283 { 1284 vaddr_t v; 1285 1286 core_mmu_get_user_va_range(&v, NULL); 1287 *va = v + thread_user_kcode_size; 1288 *mobj = mobj_tee_ram; 1289 *offset = (vaddr_t)thread_user_kdata_page - CFG_TEE_RAM_START; 1290 *sz = sizeof(thread_user_kdata_page); 1291 } 1292 #endif 1293 1294 void thread_add_mutex(struct mutex *m) 1295 { 1296 struct thread_core_local *l = thread_get_core_local(); 1297 int ct = l->curr_thread; 1298 1299 assert(ct != -1 && threads[ct].state == THREAD_STATE_ACTIVE); 1300 assert(m->owner_id == MUTEX_OWNER_ID_NONE); 1301 m->owner_id = ct; 1302 TAILQ_INSERT_TAIL(&threads[ct].mutexes, m, link); 1303 } 1304 1305 void thread_rem_mutex(struct mutex *m) 1306 { 1307 struct thread_core_local *l = thread_get_core_local(); 1308 int ct = l->curr_thread; 1309 1310 assert(ct != -1 && threads[ct].state == THREAD_STATE_ACTIVE); 1311 assert(m->owner_id == ct); 1312 m->owner_id = MUTEX_OWNER_ID_NONE; 1313 TAILQ_REMOVE(&threads[ct].mutexes, m, link); 1314 } 1315 1316 bool thread_disable_prealloc_rpc_cache(uint64_t *cookie) 1317 { 1318 bool rv; 1319 size_t n; 1320 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 1321 1322 lock_global(); 1323 1324 for (n = 0; n < CFG_NUM_THREADS; n++) { 1325 if (threads[n].state != THREAD_STATE_FREE) { 1326 rv = false; 1327 goto out; 1328 } 1329 } 1330 1331 rv = true; 1332 for (n = 0; n < CFG_NUM_THREADS; n++) { 1333 if (threads[n].rpc_arg) { 1334 mobj_free(threads[n].rpc_mobj); 1335 *cookie = threads[n].rpc_carg; 1336 threads[n].rpc_carg = 0; 1337 threads[n].rpc_arg = NULL; 1338 goto out; 1339 } 1340 } 1341 1342 *cookie = 0; 1343 thread_prealloc_rpc_cache = false; 1344 out: 1345 unlock_global(); 1346 thread_unmask_exceptions(exceptions); 1347 return rv; 1348 } 1349 1350 bool thread_enable_prealloc_rpc_cache(void) 1351 { 1352 bool rv; 1353 size_t n; 1354 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 1355 1356 lock_global(); 1357 1358 for (n = 0; n < CFG_NUM_THREADS; n++) { 1359 if (threads[n].state != THREAD_STATE_FREE) { 1360 rv = false; 1361 goto out; 1362 } 1363 } 1364 1365 rv = true; 1366 thread_prealloc_rpc_cache = true; 1367 out: 1368 unlock_global(); 1369 thread_unmask_exceptions(exceptions); 1370 return rv; 1371 } 1372 1373 void thread_rpc_free_arg(uint64_t cookie) 1374 { 1375 if (cookie) { 1376 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 1377 OPTEE_SMC_RETURN_RPC_FREE 1378 }; 1379 1380 reg_pair_from_64(cookie, rpc_args + 1, rpc_args + 2); 1381 thread_rpc(rpc_args); 1382 } 1383 } 1384 1385 struct mobj *thread_rpc_alloc_arg(size_t size, uint64_t *cookie) 1386 { 1387 paddr_t pa; 1388 uint64_t co; 1389 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { 1390 OPTEE_SMC_RETURN_RPC_ALLOC, size 1391 }; 1392 struct mobj *mobj = NULL; 1393 1394 thread_rpc(rpc_args); 1395 1396 pa = reg_pair_to_64(rpc_args[1], rpc_args[2]); 1397 co = reg_pair_to_64(rpc_args[4], rpc_args[5]); 1398 1399 if (!ALIGNMENT_IS_OK(pa, struct optee_msg_arg)) 1400 goto err; 1401 1402 /* Check if this region is in static shared space */ 1403 if (core_pbuf_is(CORE_MEM_NSEC_SHM, pa, size)) 1404 mobj = mobj_shm_alloc(pa, size); 1405 else if ((!(pa & SMALL_PAGE_MASK)) && size <= SMALL_PAGE_SIZE) 1406 mobj = mobj_mapped_shm_alloc(&pa, 1, 0, co); 1407 1408 if (!mobj) 1409 goto err; 1410 1411 *cookie = co; 1412 return mobj; 1413 err: 1414 thread_rpc_free_arg(co); 1415 mobj_free(mobj); 1416 *cookie = 0; 1417 return NULL; 1418 } 1419 1420 static bool get_rpc_arg(uint32_t cmd, size_t num_params, 1421 struct optee_msg_arg **arg_ret, uint64_t *carg_ret) 1422 { 1423 struct thread_ctx *thr = threads + thread_get_id(); 1424 struct optee_msg_arg *arg = thr->rpc_arg; 1425 struct mobj *mobj; 1426 size_t sz = OPTEE_MSG_GET_ARG_SIZE(THREAD_RPC_MAX_NUM_PARAMS); 1427 uint64_t c; 1428 1429 if (num_params > THREAD_RPC_MAX_NUM_PARAMS) 1430 return false; 1431 1432 if (!arg) { 1433 mobj = thread_rpc_alloc_arg(sz, &c); 1434 if (!mobj) 1435 return false; 1436 1437 arg = mobj_get_va(mobj, 0); 1438 if (!arg) 1439 goto bad; 1440 1441 thr->rpc_arg = arg; 1442 thr->rpc_carg = c; 1443 thr->rpc_mobj = mobj; 1444 } 1445 1446 memset(arg, 0, OPTEE_MSG_GET_ARG_SIZE(num_params)); 1447 arg->cmd = cmd; 1448 arg->num_params = num_params; 1449 arg->ret = TEE_ERROR_GENERIC; /* in case value isn't updated */ 1450 1451 *arg_ret = arg; 1452 *carg_ret = thr->rpc_carg; 1453 return true; 1454 1455 bad: 1456 thread_rpc_free_arg(c); 1457 return false; 1458 } 1459 1460 uint32_t thread_rpc_cmd(uint32_t cmd, size_t num_params, 1461 struct optee_msg_param *params) 1462 { 1463 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD }; 1464 struct optee_msg_arg *arg; 1465 uint64_t carg; 1466 size_t n; 1467 1468 /* 1469 * Break recursion in case plat_prng_add_jitter_entropy_norpc() 1470 * sleeps on a mutex or unlocks a mutex with a sleeper (contended 1471 * mutex). 1472 */ 1473 if (cmd != OPTEE_MSG_RPC_CMD_WAIT_QUEUE) 1474 plat_prng_add_jitter_entropy_norpc(); 1475 1476 if (!get_rpc_arg(cmd, num_params, &arg, &carg)) 1477 return TEE_ERROR_OUT_OF_MEMORY; 1478 1479 memcpy(arg->params, params, sizeof(*params) * num_params); 1480 1481 reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2); 1482 thread_rpc(rpc_args); 1483 for (n = 0; n < num_params; n++) { 1484 switch (params[n].attr & OPTEE_MSG_ATTR_TYPE_MASK) { 1485 case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT: 1486 case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT: 1487 case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT: 1488 case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT: 1489 case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT: 1490 case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT: 1491 params[n] = arg->params[n]; 1492 break; 1493 default: 1494 break; 1495 } 1496 } 1497 return arg->ret; 1498 } 1499 1500 /** 1501 * Free physical memory previously allocated with thread_rpc_alloc() 1502 * 1503 * @cookie: cookie received when allocating the buffer 1504 * @bt: must be the same as supplied when allocating 1505 * @mobj: mobj that describes allocated buffer 1506 * 1507 * This function also frees corresponding mobj. 1508 */ 1509 static void thread_rpc_free(unsigned int bt, uint64_t cookie, struct mobj *mobj) 1510 { 1511 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD }; 1512 struct optee_msg_arg *arg; 1513 uint64_t carg; 1514 1515 if (!get_rpc_arg(OPTEE_MSG_RPC_CMD_SHM_FREE, 1, &arg, &carg)) 1516 return; 1517 1518 arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; 1519 arg->params[0].u.value.a = bt; 1520 arg->params[0].u.value.b = cookie; 1521 arg->params[0].u.value.c = 0; 1522 1523 mobj_free(mobj); 1524 1525 reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2); 1526 thread_rpc(rpc_args); 1527 } 1528 1529 /** 1530 * Allocates shared memory buffer via RPC 1531 * 1532 * @size: size in bytes of shared memory buffer 1533 * @align: required alignment of buffer 1534 * @bt: buffer type OPTEE_MSG_RPC_SHM_TYPE_* 1535 * @payload: returned physical pointer to buffer, 0 if allocation 1536 * failed. 1537 * @cookie: returned cookie used when freeing the buffer 1538 */ 1539 static struct mobj *thread_rpc_alloc(size_t size, size_t align, unsigned int bt, 1540 uint64_t *cookie) 1541 { 1542 uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_CMD }; 1543 struct optee_msg_arg *arg; 1544 uint64_t carg; 1545 struct mobj *mobj = NULL; 1546 1547 if (!get_rpc_arg(OPTEE_MSG_RPC_CMD_SHM_ALLOC, 1, &arg, &carg)) 1548 goto fail; 1549 1550 arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; 1551 arg->params[0].u.value.a = bt; 1552 arg->params[0].u.value.b = size; 1553 arg->params[0].u.value.c = align; 1554 1555 reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2); 1556 thread_rpc(rpc_args); 1557 1558 if (arg->ret != TEE_SUCCESS) 1559 goto fail; 1560 1561 if (arg->num_params != 1) 1562 goto fail; 1563 1564 if (arg->params[0].attr == OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT) { 1565 *cookie = arg->params[0].u.tmem.shm_ref; 1566 mobj = mobj_shm_alloc(arg->params[0].u.tmem.buf_ptr, 1567 arg->params[0].u.tmem.size); 1568 } else if (arg->params[0].attr == (OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT | 1569 OPTEE_MSG_ATTR_NONCONTIG)) { 1570 *cookie = arg->params[0].u.tmem.shm_ref; 1571 mobj = msg_param_mobj_from_noncontig( 1572 arg->params[0].u.tmem.buf_ptr, 1573 arg->params[0].u.tmem.size, 1574 *cookie, 1575 true); 1576 } else 1577 goto fail; 1578 1579 if (!mobj) 1580 goto free_first; 1581 1582 assert(mobj_is_nonsec(mobj)); 1583 return mobj; 1584 1585 free_first: 1586 thread_rpc_free(bt, *cookie, mobj); 1587 fail: 1588 *cookie = 0; 1589 return NULL; 1590 } 1591 1592 struct mobj *thread_rpc_alloc_payload(size_t size, uint64_t *cookie) 1593 { 1594 return thread_rpc_alloc(size, 8, OPTEE_MSG_RPC_SHM_TYPE_APPL, cookie); 1595 } 1596 1597 void thread_rpc_free_payload(uint64_t cookie, struct mobj *mobj) 1598 { 1599 thread_rpc_free(OPTEE_MSG_RPC_SHM_TYPE_APPL, cookie, mobj); 1600 } 1601