1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 * Copyright (c) 2020, Arm Limited 6 */ 7 8 #include <platform_config.h> 9 10 #include <arm.h> 11 #include <assert.h> 12 #include <config.h> 13 #include <io.h> 14 #include <keep.h> 15 #include <kernel/asan.h> 16 #include <kernel/boot.h> 17 #include <kernel/linker.h> 18 #include <kernel/lockdep.h> 19 #include <kernel/misc.h> 20 #include <kernel/panic.h> 21 #include <kernel/spinlock.h> 22 #include <kernel/tee_ta_manager.h> 23 #include <kernel/thread_defs.h> 24 #include <kernel/thread.h> 25 #include <kernel/user_mode_ctx_struct.h> 26 #include <kernel/virtualization.h> 27 #include <mm/core_memprot.h> 28 #include <mm/mobj.h> 29 #include <mm/tee_mm.h> 30 #include <mm/tee_pager.h> 31 #include <mm/vm.h> 32 #include <smccc.h> 33 #include <sm/sm.h> 34 #include <trace.h> 35 #include <util.h> 36 37 #include "thread_private.h" 38 39 struct thread_ctx threads[CFG_NUM_THREADS]; 40 41 struct thread_core_local thread_core_local[CFG_TEE_CORE_NB_CORE] __nex_bss; 42 43 /* 44 * Stacks 45 * 46 * [Lower addresses on the left] 47 * 48 * [ STACK_CANARY_SIZE/2 | STACK_CHECK_EXTRA | STACK_XXX_SIZE | STACK_CANARY_SIZE/2 ] 49 * ^ ^ ^ ^ 50 * stack_xxx[n] "hard" top "soft" top bottom 51 */ 52 53 #ifdef CFG_WITH_ARM_TRUSTED_FW 54 #define STACK_TMP_OFFS 0 55 #else 56 #define STACK_TMP_OFFS SM_STACK_TMP_RESERVE_SIZE 57 #endif 58 59 #ifdef ARM32 60 #ifdef CFG_CORE_SANITIZE_KADDRESS 61 #define STACK_TMP_SIZE (3072 + STACK_TMP_OFFS) 62 #else 63 #define STACK_TMP_SIZE (2048 + STACK_TMP_OFFS) 64 #endif 65 #define STACK_THREAD_SIZE 8192 66 67 #if defined(CFG_CORE_SANITIZE_KADDRESS) || defined(__clang__) 68 #define STACK_ABT_SIZE 3072 69 #else 70 #define STACK_ABT_SIZE 2048 71 #endif 72 73 #endif /*ARM32*/ 74 75 #ifdef ARM64 76 #if defined(__clang__) && !defined(__OPTIMIZE_SIZE__) 77 #define STACK_TMP_SIZE (4096 + STACK_TMP_OFFS) 78 #else 79 #define STACK_TMP_SIZE (2048 + STACK_TMP_OFFS) 80 #endif 81 #define STACK_THREAD_SIZE 8192 82 83 #if TRACE_LEVEL > 0 84 #define STACK_ABT_SIZE 3072 85 #else 86 #define STACK_ABT_SIZE 1024 87 #endif 88 #endif /*ARM64*/ 89 90 #ifdef CFG_WITH_STACK_CANARIES 91 #ifdef ARM32 92 #define STACK_CANARY_SIZE (4 * sizeof(uint32_t)) 93 #endif 94 #ifdef ARM64 95 #define STACK_CANARY_SIZE (8 * sizeof(uint32_t)) 96 #endif 97 #define START_CANARY_VALUE 0xdededede 98 #define END_CANARY_VALUE 0xabababab 99 #define GET_START_CANARY(name, stack_num) name[stack_num][0] 100 #define GET_END_CANARY(name, stack_num) \ 101 name[stack_num][sizeof(name[stack_num]) / sizeof(uint32_t) - 1] 102 #else 103 #define STACK_CANARY_SIZE 0 104 #endif 105 106 #ifdef CFG_CORE_DEBUG_CHECK_STACKS 107 /* 108 * Extra space added to each stack in order to reliably detect and dump stack 109 * overflows. Should cover the maximum expected overflow size caused by any C 110 * function (say, 512 bytes; no function should have that much local variables), 111 * plus the maximum stack space needed by __cyg_profile_func_exit(): about 1 KB, 112 * a large part of which is used to print the call stack. Total: 1.5 KB. 113 */ 114 #define STACK_CHECK_EXTRA 1536 115 #else 116 #define STACK_CHECK_EXTRA 0 117 #endif 118 119 #define DECLARE_STACK(name, num_stacks, stack_size, linkage) \ 120 linkage uint32_t name[num_stacks] \ 121 [ROUNDUP(stack_size + STACK_CANARY_SIZE + STACK_CHECK_EXTRA, \ 122 STACK_ALIGNMENT) / sizeof(uint32_t)] \ 123 __attribute__((section(".nozi_stack." # name), \ 124 aligned(STACK_ALIGNMENT))) 125 126 #define GET_STACK(stack) ((vaddr_t)(stack) + STACK_SIZE(stack)) 127 128 DECLARE_STACK(stack_tmp, CFG_TEE_CORE_NB_CORE, 129 STACK_TMP_SIZE + CFG_STACK_TMP_EXTRA, static); 130 DECLARE_STACK(stack_abt, CFG_TEE_CORE_NB_CORE, STACK_ABT_SIZE, static); 131 #ifndef CFG_WITH_PAGER 132 DECLARE_STACK(stack_thread, CFG_NUM_THREADS, 133 STACK_THREAD_SIZE + CFG_STACK_THREAD_EXTRA, static); 134 #endif 135 136 #define GET_STACK_TOP_HARD(stack, n) \ 137 ((vaddr_t)&(stack)[n] + STACK_CANARY_SIZE / 2) 138 #define GET_STACK_TOP_SOFT(stack, n) \ 139 (GET_STACK_TOP_HARD(stack, n) + STACK_CHECK_EXTRA) 140 #define GET_STACK_BOTTOM(stack, n) ((vaddr_t)&(stack)[n] + sizeof(stack[n]) - \ 141 STACK_CANARY_SIZE / 2) 142 143 const void *stack_tmp_export __section(".identity_map.stack_tmp_export") = 144 (void *)(GET_STACK_BOTTOM(stack_tmp, 0) - STACK_TMP_OFFS); 145 const uint32_t stack_tmp_stride __section(".identity_map.stack_tmp_stride") = 146 sizeof(stack_tmp[0]); 147 148 /* 149 * These stack setup info are required by secondary boot cores before they 150 * each locally enable the pager (the mmu). Hence kept in pager sections. 151 */ 152 DECLARE_KEEP_PAGER(stack_tmp_export); 153 DECLARE_KEEP_PAGER(stack_tmp_stride); 154 155 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 156 static vaddr_t thread_user_kcode_va __nex_bss; 157 long thread_user_kcode_offset __nex_bss; 158 static size_t thread_user_kcode_size __nex_bss; 159 #endif 160 161 #if defined(CFG_CORE_UNMAP_CORE_AT_EL0) && \ 162 defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) 163 long thread_user_kdata_sp_offset __nex_bss; 164 static uint8_t thread_user_kdata_page[ 165 ROUNDUP(sizeof(thread_core_local), SMALL_PAGE_SIZE)] 166 __aligned(SMALL_PAGE_SIZE) 167 #ifndef CFG_VIRTUALIZATION 168 __section(".nozi.kdata_page"); 169 #else 170 __section(".nex_nozi.kdata_page"); 171 #endif 172 #endif 173 174 static unsigned int thread_global_lock __nex_bss = SPINLOCK_UNLOCK; 175 176 static void init_canaries(void) 177 { 178 #ifdef CFG_WITH_STACK_CANARIES 179 size_t n; 180 #define INIT_CANARY(name) \ 181 for (n = 0; n < ARRAY_SIZE(name); n++) { \ 182 uint32_t *start_canary = &GET_START_CANARY(name, n); \ 183 uint32_t *end_canary = &GET_END_CANARY(name, n); \ 184 \ 185 *start_canary = START_CANARY_VALUE; \ 186 *end_canary = END_CANARY_VALUE; \ 187 } 188 189 INIT_CANARY(stack_tmp); 190 INIT_CANARY(stack_abt); 191 #if !defined(CFG_WITH_PAGER) && !defined(CFG_VIRTUALIZATION) 192 INIT_CANARY(stack_thread); 193 #endif 194 #endif/*CFG_WITH_STACK_CANARIES*/ 195 } 196 197 #define CANARY_DIED(stack, loc, n, addr) \ 198 do { \ 199 EMSG_RAW("Dead canary at %s of '%s[%zu]' (%p)", #loc, #stack, \ 200 n, (void *)addr); \ 201 panic(); \ 202 } while (0) 203 204 void thread_check_canaries(void) 205 { 206 #ifdef CFG_WITH_STACK_CANARIES 207 uint32_t *canary = NULL; 208 size_t n = 0; 209 210 for (n = 0; n < ARRAY_SIZE(stack_tmp); n++) { 211 canary = &GET_START_CANARY(stack_tmp, n); 212 if (*canary != START_CANARY_VALUE) 213 CANARY_DIED(stack_tmp, start, n, canary); 214 canary = &GET_END_CANARY(stack_tmp, n); 215 if (*canary != END_CANARY_VALUE) 216 CANARY_DIED(stack_tmp, end, n, canary); 217 } 218 219 for (n = 0; n < ARRAY_SIZE(stack_abt); n++) { 220 canary = &GET_START_CANARY(stack_abt, n); 221 if (*canary != START_CANARY_VALUE) 222 CANARY_DIED(stack_abt, start, n, canary); 223 canary = &GET_END_CANARY(stack_abt, n); 224 if (*canary != END_CANARY_VALUE) 225 CANARY_DIED(stack_abt, end, n, canary); 226 227 } 228 #if !defined(CFG_WITH_PAGER) && !defined(CFG_VIRTUALIZATION) 229 for (n = 0; n < ARRAY_SIZE(stack_thread); n++) { 230 canary = &GET_START_CANARY(stack_thread, n); 231 if (*canary != START_CANARY_VALUE) 232 CANARY_DIED(stack_thread, start, n, canary); 233 canary = &GET_END_CANARY(stack_thread, n); 234 if (*canary != END_CANARY_VALUE) 235 CANARY_DIED(stack_thread, end, n, canary); 236 } 237 #endif 238 #endif/*CFG_WITH_STACK_CANARIES*/ 239 } 240 241 void thread_lock_global(void) 242 { 243 cpu_spin_lock(&thread_global_lock); 244 } 245 246 void thread_unlock_global(void) 247 { 248 cpu_spin_unlock(&thread_global_lock); 249 } 250 251 #ifdef ARM32 252 uint32_t __nostackcheck thread_get_exceptions(void) 253 { 254 uint32_t cpsr = read_cpsr(); 255 256 return (cpsr >> CPSR_F_SHIFT) & THREAD_EXCP_ALL; 257 } 258 259 void __nostackcheck thread_set_exceptions(uint32_t exceptions) 260 { 261 uint32_t cpsr = read_cpsr(); 262 263 /* Foreign interrupts must not be unmasked while holding a spinlock */ 264 if (!(exceptions & THREAD_EXCP_FOREIGN_INTR)) 265 assert_have_no_spinlock(); 266 267 cpsr &= ~(THREAD_EXCP_ALL << CPSR_F_SHIFT); 268 cpsr |= ((exceptions & THREAD_EXCP_ALL) << CPSR_F_SHIFT); 269 270 barrier(); 271 write_cpsr(cpsr); 272 barrier(); 273 } 274 #endif /*ARM32*/ 275 276 #ifdef ARM64 277 uint32_t __nostackcheck thread_get_exceptions(void) 278 { 279 uint32_t daif = read_daif(); 280 281 return (daif >> DAIF_F_SHIFT) & THREAD_EXCP_ALL; 282 } 283 284 void __nostackcheck thread_set_exceptions(uint32_t exceptions) 285 { 286 uint32_t daif = read_daif(); 287 288 /* Foreign interrupts must not be unmasked while holding a spinlock */ 289 if (!(exceptions & THREAD_EXCP_FOREIGN_INTR)) 290 assert_have_no_spinlock(); 291 292 daif &= ~(THREAD_EXCP_ALL << DAIF_F_SHIFT); 293 daif |= ((exceptions & THREAD_EXCP_ALL) << DAIF_F_SHIFT); 294 295 barrier(); 296 write_daif(daif); 297 barrier(); 298 } 299 #endif /*ARM64*/ 300 301 uint32_t __nostackcheck thread_mask_exceptions(uint32_t exceptions) 302 { 303 uint32_t state = thread_get_exceptions(); 304 305 thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL)); 306 return state; 307 } 308 309 void __nostackcheck thread_unmask_exceptions(uint32_t state) 310 { 311 thread_set_exceptions(state & THREAD_EXCP_ALL); 312 } 313 314 315 static struct thread_core_local * __nostackcheck 316 get_core_local(unsigned int pos) 317 { 318 /* 319 * Foreign interrupts must be disabled before playing with core_local 320 * since we otherwise may be rescheduled to a different core in the 321 * middle of this function. 322 */ 323 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 324 325 assert(pos < CFG_TEE_CORE_NB_CORE); 326 return &thread_core_local[pos]; 327 } 328 329 struct thread_core_local * __nostackcheck thread_get_core_local(void) 330 { 331 unsigned int pos = get_core_pos(); 332 333 return get_core_local(pos); 334 } 335 336 #ifdef CFG_CORE_DEBUG_CHECK_STACKS 337 static void print_stack_limits(void) 338 { 339 size_t n = 0; 340 vaddr_t __maybe_unused start = 0; 341 vaddr_t __maybe_unused end = 0; 342 343 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) { 344 start = GET_STACK_TOP_SOFT(stack_tmp, n); 345 end = GET_STACK_BOTTOM(stack_tmp, n); 346 DMSG("tmp [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end); 347 } 348 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) { 349 start = GET_STACK_TOP_SOFT(stack_abt, n); 350 end = GET_STACK_BOTTOM(stack_abt, n); 351 DMSG("abt [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end); 352 } 353 for (n = 0; n < CFG_NUM_THREADS; n++) { 354 end = threads[n].stack_va_end; 355 start = end - STACK_THREAD_SIZE; 356 DMSG("thr [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end); 357 } 358 } 359 360 static void check_stack_limits(void) 361 { 362 vaddr_t stack_start = 0; 363 vaddr_t stack_end = 0; 364 /* Any value in the current stack frame will do */ 365 vaddr_t current_sp = (vaddr_t)&stack_start; 366 367 if (!get_stack_soft_limits(&stack_start, &stack_end)) 368 panic("Unknown stack limits"); 369 if (current_sp < stack_start || current_sp > stack_end) { 370 DMSG("Stack pointer out of range (0x%" PRIxVA ")", current_sp); 371 print_stack_limits(); 372 panic(); 373 } 374 } 375 376 static bool * __nostackcheck get_stackcheck_recursion_flag(void) 377 { 378 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 379 unsigned int pos = get_core_pos(); 380 struct thread_core_local *l = get_core_local(pos); 381 int ct = l->curr_thread; 382 bool *p = NULL; 383 384 if (l->flags & (THREAD_CLF_ABORT | THREAD_CLF_TMP)) 385 p = &l->stackcheck_recursion; 386 else if (!l->flags) 387 p = &threads[ct].tsd.stackcheck_recursion; 388 389 thread_unmask_exceptions(exceptions); 390 return p; 391 } 392 393 void __cyg_profile_func_enter(void *this_fn, void *call_site); 394 void __nostackcheck __cyg_profile_func_enter(void *this_fn __unused, 395 void *call_site __unused) 396 { 397 bool *p = get_stackcheck_recursion_flag(); 398 399 assert(p); 400 if (*p) 401 return; 402 *p = true; 403 check_stack_limits(); 404 *p = false; 405 } 406 407 void __cyg_profile_func_exit(void *this_fn, void *call_site); 408 void __nostackcheck __cyg_profile_func_exit(void *this_fn __unused, 409 void *call_site __unused) 410 { 411 } 412 #else 413 static void print_stack_limits(void) 414 { 415 } 416 #endif 417 418 static void thread_lazy_save_ns_vfp(void) 419 { 420 #ifdef CFG_WITH_VFP 421 struct thread_ctx *thr = threads + thread_get_id(); 422 423 thr->vfp_state.ns_saved = false; 424 vfp_lazy_save_state_init(&thr->vfp_state.ns); 425 #endif /*CFG_WITH_VFP*/ 426 } 427 428 static void thread_lazy_restore_ns_vfp(void) 429 { 430 #ifdef CFG_WITH_VFP 431 struct thread_ctx *thr = threads + thread_get_id(); 432 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 433 434 assert(!thr->vfp_state.sec_lazy_saved && !thr->vfp_state.sec_saved); 435 436 if (tuv && tuv->lazy_saved && !tuv->saved) { 437 vfp_lazy_save_state_final(&tuv->vfp, false /*!force_save*/); 438 tuv->saved = true; 439 } 440 441 vfp_lazy_restore_state(&thr->vfp_state.ns, thr->vfp_state.ns_saved); 442 thr->vfp_state.ns_saved = false; 443 #endif /*CFG_WITH_VFP*/ 444 } 445 446 #ifdef ARM32 447 static void init_regs(struct thread_ctx *thread, uint32_t a0, uint32_t a1, 448 uint32_t a2, uint32_t a3) 449 { 450 thread->regs.pc = (uint32_t)thread_std_smc_entry; 451 452 /* 453 * Stdcalls starts in SVC mode with masked foreign interrupts, masked 454 * Asynchronous abort and unmasked native interrupts. 455 */ 456 thread->regs.cpsr = read_cpsr() & ARM32_CPSR_E; 457 thread->regs.cpsr |= CPSR_MODE_SVC | CPSR_A | 458 (THREAD_EXCP_FOREIGN_INTR << ARM32_CPSR_F_SHIFT); 459 /* Enable thumb mode if it's a thumb instruction */ 460 if (thread->regs.pc & 1) 461 thread->regs.cpsr |= CPSR_T; 462 /* Reinitialize stack pointer */ 463 thread->regs.svc_sp = thread->stack_va_end; 464 465 /* 466 * Copy arguments into context. This will make the 467 * arguments appear in r0-r7 when thread is started. 468 */ 469 thread->regs.r0 = a0; 470 thread->regs.r1 = a1; 471 thread->regs.r2 = a2; 472 thread->regs.r3 = a3; 473 thread->regs.r4 = 0; 474 thread->regs.r5 = 0; 475 thread->regs.r6 = 0; 476 thread->regs.r7 = 0; 477 } 478 #endif /*ARM32*/ 479 480 #ifdef ARM64 481 static void init_regs(struct thread_ctx *thread, uint32_t a0, uint32_t a1, 482 uint32_t a2, uint32_t a3) 483 { 484 thread->regs.pc = (uint64_t)thread_std_smc_entry; 485 486 /* 487 * Stdcalls starts in SVC mode with masked foreign interrupts, masked 488 * Asynchronous abort and unmasked native interrupts. 489 */ 490 thread->regs.cpsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, 491 THREAD_EXCP_FOREIGN_INTR | DAIFBIT_ABT); 492 /* Reinitialize stack pointer */ 493 thread->regs.sp = thread->stack_va_end; 494 495 /* 496 * Copy arguments into context. This will make the 497 * arguments appear in x0-x7 when thread is started. 498 */ 499 thread->regs.x[0] = a0; 500 thread->regs.x[1] = a1; 501 thread->regs.x[2] = a2; 502 thread->regs.x[3] = a3; 503 thread->regs.x[4] = 0; 504 thread->regs.x[5] = 0; 505 thread->regs.x[6] = 0; 506 thread->regs.x[7] = 0; 507 508 /* Set up frame pointer as per the Aarch64 AAPCS */ 509 thread->regs.x[29] = 0; 510 } 511 #endif /*ARM64*/ 512 513 void thread_init_boot_thread(void) 514 { 515 struct thread_core_local *l = thread_get_core_local(); 516 517 thread_init_threads(); 518 519 l->curr_thread = 0; 520 threads[0].state = THREAD_STATE_ACTIVE; 521 } 522 523 void __nostackcheck thread_clr_boot_thread(void) 524 { 525 struct thread_core_local *l = thread_get_core_local(); 526 527 assert(l->curr_thread >= 0 && l->curr_thread < CFG_NUM_THREADS); 528 assert(threads[l->curr_thread].state == THREAD_STATE_ACTIVE); 529 threads[l->curr_thread].state = THREAD_STATE_FREE; 530 l->curr_thread = -1; 531 } 532 533 void thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3) 534 { 535 size_t n; 536 struct thread_core_local *l = thread_get_core_local(); 537 bool found_thread = false; 538 539 assert(l->curr_thread == -1); 540 541 thread_lock_global(); 542 543 for (n = 0; n < CFG_NUM_THREADS; n++) { 544 if (threads[n].state == THREAD_STATE_FREE) { 545 threads[n].state = THREAD_STATE_ACTIVE; 546 found_thread = true; 547 break; 548 } 549 } 550 551 thread_unlock_global(); 552 553 if (!found_thread) 554 return; 555 556 l->curr_thread = n; 557 558 threads[n].flags = 0; 559 init_regs(threads + n, a0, a1, a2, a3); 560 561 thread_lazy_save_ns_vfp(); 562 563 l->flags &= ~THREAD_CLF_TMP; 564 thread_resume(&threads[n].regs); 565 /*NOTREACHED*/ 566 panic(); 567 } 568 569 #ifdef ARM32 570 static void copy_a0_to_a3(struct thread_ctx_regs *regs, uint32_t a0, 571 uint32_t a1, uint32_t a2, uint32_t a3) 572 { 573 /* 574 * Update returned values from RPC, values will appear in 575 * r0-r3 when thread is resumed. 576 */ 577 regs->r0 = a0; 578 regs->r1 = a1; 579 regs->r2 = a2; 580 regs->r3 = a3; 581 } 582 #endif /*ARM32*/ 583 584 #ifdef ARM64 585 static void copy_a0_to_a3(struct thread_ctx_regs *regs, uint32_t a0, 586 uint32_t a1, uint32_t a2, uint32_t a3) 587 { 588 /* 589 * Update returned values from RPC, values will appear in 590 * x0-x3 when thread is resumed. 591 */ 592 regs->x[0] = a0; 593 regs->x[1] = a1; 594 regs->x[2] = a2; 595 regs->x[3] = a3; 596 } 597 #endif /*ARM64*/ 598 599 #ifdef ARM32 600 static bool is_from_user(uint32_t cpsr) 601 { 602 return (cpsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR; 603 } 604 #endif 605 606 #ifdef ARM64 607 static bool is_from_user(uint32_t cpsr) 608 { 609 if (cpsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT)) 610 return true; 611 if (((cpsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) == 612 SPSR_64_MODE_EL0) 613 return true; 614 return false; 615 } 616 #endif 617 618 #ifdef CFG_SYSCALL_FTRACE 619 static void __noprof ftrace_suspend(void) 620 { 621 struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack); 622 623 if (s && s->fbuf) 624 s->fbuf->syscall_trace_suspended = true; 625 } 626 627 static void __noprof ftrace_resume(void) 628 { 629 struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack); 630 631 if (s && s->fbuf) 632 s->fbuf->syscall_trace_suspended = false; 633 } 634 #else 635 static void __noprof ftrace_suspend(void) 636 { 637 } 638 639 static void __noprof ftrace_resume(void) 640 { 641 } 642 #endif 643 644 static bool is_user_mode(struct thread_ctx_regs *regs) 645 { 646 return is_from_user((uint32_t)regs->cpsr); 647 } 648 649 void thread_resume_from_rpc(uint32_t thread_id, uint32_t a0, uint32_t a1, 650 uint32_t a2, uint32_t a3) 651 { 652 size_t n = thread_id; 653 struct thread_core_local *l = thread_get_core_local(); 654 bool found_thread = false; 655 656 assert(l->curr_thread == -1); 657 658 thread_lock_global(); 659 660 if (n < CFG_NUM_THREADS && threads[n].state == THREAD_STATE_SUSPENDED) { 661 threads[n].state = THREAD_STATE_ACTIVE; 662 found_thread = true; 663 } 664 665 thread_unlock_global(); 666 667 if (!found_thread) 668 return; 669 670 l->curr_thread = n; 671 672 if (threads[n].have_user_map) { 673 core_mmu_set_user_map(&threads[n].user_map); 674 if (threads[n].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR) 675 tee_ta_ftrace_update_times_resume(); 676 } 677 678 if (is_user_mode(&threads[n].regs)) 679 tee_ta_update_session_utime_resume(); 680 681 /* 682 * Return from RPC to request service of a foreign interrupt must not 683 * get parameters from non-secure world. 684 */ 685 if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) { 686 copy_a0_to_a3(&threads[n].regs, a0, a1, a2, a3); 687 threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN; 688 } 689 690 thread_lazy_save_ns_vfp(); 691 692 if (threads[n].have_user_map) 693 ftrace_resume(); 694 695 l->flags &= ~THREAD_CLF_TMP; 696 thread_resume(&threads[n].regs); 697 /*NOTREACHED*/ 698 panic(); 699 } 700 701 void __nostackcheck *thread_get_tmp_sp(void) 702 { 703 struct thread_core_local *l = thread_get_core_local(); 704 705 /* 706 * Called from assembly when switching to the temporary stack, so flags 707 * need updating 708 */ 709 l->flags |= THREAD_CLF_TMP; 710 711 return (void *)l->tmp_stack_va_end; 712 } 713 714 #ifdef ARM64 715 vaddr_t thread_get_saved_thread_sp(void) 716 { 717 struct thread_core_local *l = thread_get_core_local(); 718 int ct = l->curr_thread; 719 720 assert(ct != -1); 721 return threads[ct].kern_sp; 722 } 723 #endif /*ARM64*/ 724 725 vaddr_t thread_stack_start(void) 726 { 727 struct thread_ctx *thr; 728 int ct = thread_get_id_may_fail(); 729 730 if (ct == -1) 731 return 0; 732 733 thr = threads + ct; 734 return thr->stack_va_end - STACK_THREAD_SIZE; 735 } 736 737 size_t thread_stack_size(void) 738 { 739 return STACK_THREAD_SIZE; 740 } 741 742 bool get_stack_limits(vaddr_t *start, vaddr_t *end, bool hard) 743 { 744 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 745 unsigned int pos = get_core_pos(); 746 struct thread_core_local *l = get_core_local(pos); 747 int ct = l->curr_thread; 748 bool ret = false; 749 750 if (l->flags & THREAD_CLF_TMP) { 751 if (hard) 752 *start = GET_STACK_TOP_HARD(stack_tmp, pos); 753 else 754 *start = GET_STACK_TOP_SOFT(stack_tmp, pos); 755 *end = GET_STACK_BOTTOM(stack_tmp, pos); 756 ret = true; 757 } else if (l->flags & THREAD_CLF_ABORT) { 758 if (hard) 759 *start = GET_STACK_TOP_HARD(stack_abt, pos); 760 else 761 *start = GET_STACK_TOP_SOFT(stack_abt, pos); 762 *end = GET_STACK_BOTTOM(stack_abt, pos); 763 ret = true; 764 } else if (!l->flags) { 765 if (ct < 0 || ct >= CFG_NUM_THREADS) 766 goto out; 767 768 *end = threads[ct].stack_va_end; 769 *start = *end - STACK_THREAD_SIZE; 770 if (!hard) 771 *start += STACK_CHECK_EXTRA; 772 ret = true; 773 } 774 out: 775 thread_unmask_exceptions(exceptions); 776 return ret; 777 } 778 779 bool thread_is_from_abort_mode(void) 780 { 781 struct thread_core_local *l = thread_get_core_local(); 782 783 return (l->flags >> THREAD_CLF_SAVED_SHIFT) & THREAD_CLF_ABORT; 784 } 785 786 #ifdef ARM32 787 bool thread_is_in_normal_mode(void) 788 { 789 return (read_cpsr() & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_SVC; 790 } 791 #endif 792 793 #ifdef ARM64 794 bool thread_is_in_normal_mode(void) 795 { 796 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 797 struct thread_core_local *l = thread_get_core_local(); 798 bool ret; 799 800 /* 801 * If any bit in l->flags is set aside from THREAD_CLF_TMP we're 802 * handling some exception. 803 */ 804 ret = (l->curr_thread != -1) && !(l->flags & ~THREAD_CLF_TMP); 805 thread_unmask_exceptions(exceptions); 806 807 return ret; 808 } 809 #endif 810 811 void thread_state_free(void) 812 { 813 struct thread_core_local *l = thread_get_core_local(); 814 int ct = l->curr_thread; 815 816 assert(ct != -1); 817 818 thread_lazy_restore_ns_vfp(); 819 tee_pager_release_phys( 820 (void *)(threads[ct].stack_va_end - STACK_THREAD_SIZE), 821 STACK_THREAD_SIZE); 822 823 thread_lock_global(); 824 825 assert(threads[ct].state == THREAD_STATE_ACTIVE); 826 threads[ct].state = THREAD_STATE_FREE; 827 threads[ct].flags = 0; 828 l->curr_thread = -1; 829 830 #ifdef CFG_VIRTUALIZATION 831 virt_unset_guest(); 832 #endif 833 thread_unlock_global(); 834 } 835 836 #ifdef CFG_WITH_PAGER 837 static void release_unused_kernel_stack(struct thread_ctx *thr, 838 uint32_t cpsr __maybe_unused) 839 { 840 #ifdef ARM64 841 /* 842 * If we're from user mode then thr->regs.sp is the saved user 843 * stack pointer and thr->kern_sp holds the last kernel stack 844 * pointer. But if we're from kernel mode then thr->kern_sp isn't 845 * up to date so we need to read from thr->regs.sp instead. 846 */ 847 vaddr_t sp = is_from_user(cpsr) ? thr->kern_sp : thr->regs.sp; 848 #else 849 vaddr_t sp = thr->regs.svc_sp; 850 #endif 851 vaddr_t base = thr->stack_va_end - STACK_THREAD_SIZE; 852 size_t len = sp - base; 853 854 tee_pager_release_phys((void *)base, len); 855 } 856 #else 857 static void release_unused_kernel_stack(struct thread_ctx *thr __unused, 858 uint32_t cpsr __unused) 859 { 860 } 861 #endif 862 863 int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc) 864 { 865 struct thread_core_local *l = thread_get_core_local(); 866 int ct = l->curr_thread; 867 868 assert(ct != -1); 869 870 if (core_mmu_user_mapping_is_active()) 871 ftrace_suspend(); 872 873 thread_check_canaries(); 874 875 release_unused_kernel_stack(threads + ct, cpsr); 876 877 if (is_from_user(cpsr)) { 878 thread_user_save_vfp(); 879 tee_ta_update_session_utime_suspend(); 880 tee_ta_gprof_sample_pc(pc); 881 } 882 thread_lazy_restore_ns_vfp(); 883 884 thread_lock_global(); 885 886 assert(threads[ct].state == THREAD_STATE_ACTIVE); 887 threads[ct].flags |= flags; 888 threads[ct].regs.cpsr = cpsr; 889 threads[ct].regs.pc = pc; 890 threads[ct].state = THREAD_STATE_SUSPENDED; 891 892 threads[ct].have_user_map = core_mmu_user_mapping_is_active(); 893 if (threads[ct].have_user_map) { 894 if (threads[ct].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR) 895 tee_ta_ftrace_update_times_suspend(); 896 core_mmu_get_user_map(&threads[ct].user_map); 897 core_mmu_set_user_map(NULL); 898 } 899 900 l->curr_thread = -1; 901 902 #ifdef CFG_VIRTUALIZATION 903 virt_unset_guest(); 904 #endif 905 906 thread_unlock_global(); 907 908 return ct; 909 } 910 911 #ifdef ARM32 912 static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp) 913 { 914 l->tmp_stack_va_end = sp; 915 thread_set_irq_sp(sp); 916 thread_set_fiq_sp(sp); 917 } 918 919 static void set_abt_stack(struct thread_core_local *l, vaddr_t sp) 920 { 921 l->abt_stack_va_end = sp; 922 thread_set_abt_sp((vaddr_t)l); 923 thread_set_und_sp((vaddr_t)l); 924 } 925 #endif /*ARM32*/ 926 927 #ifdef ARM64 928 static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp) 929 { 930 /* 931 * We're already using the tmp stack when this function is called 932 * so there's no need to assign it to any stack pointer. However, 933 * we'll need to restore it at different times so store it here. 934 */ 935 l->tmp_stack_va_end = sp; 936 } 937 938 static void set_abt_stack(struct thread_core_local *l, vaddr_t sp) 939 { 940 l->abt_stack_va_end = sp; 941 } 942 #endif /*ARM64*/ 943 944 bool thread_init_stack(uint32_t thread_id, vaddr_t sp) 945 { 946 if (thread_id >= CFG_NUM_THREADS) 947 return false; 948 threads[thread_id].stack_va_end = sp; 949 return true; 950 } 951 952 short int thread_get_id_may_fail(void) 953 { 954 /* 955 * thread_get_core_local() requires foreign interrupts to be disabled 956 */ 957 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 958 struct thread_core_local *l = thread_get_core_local(); 959 short int ct = l->curr_thread; 960 961 thread_unmask_exceptions(exceptions); 962 return ct; 963 } 964 965 short int thread_get_id(void) 966 { 967 short int ct = thread_get_id_may_fail(); 968 969 /* Thread ID has to fit in a short int */ 970 COMPILE_TIME_ASSERT(CFG_NUM_THREADS <= SHRT_MAX); 971 assert(ct >= 0 && ct < CFG_NUM_THREADS); 972 return ct; 973 } 974 975 #ifdef CFG_WITH_PAGER 976 static void init_thread_stacks(void) 977 { 978 size_t n = 0; 979 980 /* 981 * Allocate virtual memory for thread stacks. 982 */ 983 for (n = 0; n < CFG_NUM_THREADS; n++) { 984 tee_mm_entry_t *mm = NULL; 985 vaddr_t sp = 0; 986 size_t num_pages = 0; 987 struct fobj *fobj = NULL; 988 989 /* Find vmem for thread stack and its protection gap */ 990 mm = tee_mm_alloc(&tee_mm_vcore, 991 SMALL_PAGE_SIZE + STACK_THREAD_SIZE); 992 assert(mm); 993 994 /* Claim eventual physical page */ 995 tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm), 996 true); 997 998 num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE - 1; 999 fobj = fobj_locked_paged_alloc(num_pages); 1000 1001 /* Add the area to the pager */ 1002 tee_pager_add_core_area(tee_mm_get_smem(mm) + SMALL_PAGE_SIZE, 1003 PAGER_AREA_TYPE_LOCK, fobj); 1004 fobj_put(fobj); 1005 1006 /* init effective stack */ 1007 sp = tee_mm_get_smem(mm) + tee_mm_get_bytes(mm); 1008 asan_tag_access((void *)tee_mm_get_smem(mm), (void *)sp); 1009 if (!thread_init_stack(n, sp)) 1010 panic("init stack failed"); 1011 } 1012 } 1013 #else 1014 static void init_thread_stacks(void) 1015 { 1016 size_t n; 1017 1018 /* Assign the thread stacks */ 1019 for (n = 0; n < CFG_NUM_THREADS; n++) { 1020 if (!thread_init_stack(n, GET_STACK_BOTTOM(stack_thread, n))) 1021 panic("thread_init_stack failed"); 1022 } 1023 } 1024 #endif /*CFG_WITH_PAGER*/ 1025 1026 static void init_user_kcode(void) 1027 { 1028 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 1029 vaddr_t v = (vaddr_t)thread_excp_vect; 1030 vaddr_t ve = (vaddr_t)thread_excp_vect_end; 1031 1032 thread_user_kcode_va = ROUNDDOWN(v, CORE_MMU_USER_CODE_SIZE); 1033 ve = ROUNDUP(ve, CORE_MMU_USER_CODE_SIZE); 1034 thread_user_kcode_size = ve - thread_user_kcode_va; 1035 1036 core_mmu_get_user_va_range(&v, NULL); 1037 thread_user_kcode_offset = thread_user_kcode_va - v; 1038 1039 #if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) 1040 /* 1041 * When transitioning to EL0 subtract SP with this much to point to 1042 * this special kdata page instead. SP is restored by add this much 1043 * while transitioning back to EL1. 1044 */ 1045 v += thread_user_kcode_size; 1046 thread_user_kdata_sp_offset = (vaddr_t)thread_core_local - v; 1047 #endif 1048 #endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 1049 } 1050 1051 void thread_init_threads(void) 1052 { 1053 size_t n = 0; 1054 1055 init_thread_stacks(); 1056 print_stack_limits(); 1057 pgt_init(); 1058 1059 mutex_lockdep_init(); 1060 1061 for (n = 0; n < CFG_NUM_THREADS; n++) { 1062 TAILQ_INIT(&threads[n].tsd.sess_stack); 1063 SLIST_INIT(&threads[n].tsd.pgt_cache); 1064 } 1065 } 1066 1067 void __nostackcheck thread_init_thread_core_local(void) 1068 { 1069 size_t n = 0; 1070 struct thread_core_local *tcl = thread_core_local; 1071 1072 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) { 1073 tcl[n].curr_thread = -1; 1074 tcl[n].flags = THREAD_CLF_TMP; 1075 } 1076 1077 tcl[0].tmp_stack_va_end = GET_STACK_BOTTOM(stack_tmp, 0); 1078 } 1079 1080 void thread_init_primary(void) 1081 { 1082 /* Initialize canaries around the stacks */ 1083 init_canaries(); 1084 1085 init_user_kcode(); 1086 } 1087 1088 static void init_sec_mon_stack(size_t pos __maybe_unused) 1089 { 1090 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 1091 /* Initialize secure monitor */ 1092 sm_init(GET_STACK_BOTTOM(stack_tmp, pos)); 1093 #endif 1094 } 1095 1096 static uint32_t __maybe_unused get_midr_implementer(uint32_t midr) 1097 { 1098 return (midr >> MIDR_IMPLEMENTER_SHIFT) & MIDR_IMPLEMENTER_MASK; 1099 } 1100 1101 static uint32_t __maybe_unused get_midr_primary_part(uint32_t midr) 1102 { 1103 return (midr >> MIDR_PRIMARY_PART_NUM_SHIFT) & 1104 MIDR_PRIMARY_PART_NUM_MASK; 1105 } 1106 1107 #ifdef ARM64 1108 static bool probe_workaround_available(void) 1109 { 1110 int32_t r; 1111 1112 r = thread_smc(SMCCC_VERSION, 0, 0, 0); 1113 if (r < 0) 1114 return false; 1115 if (r < 0x10001) /* compare with version 1.1 */ 1116 return false; 1117 1118 /* Version >= 1.1, so SMCCC_ARCH_FEATURES is available */ 1119 r = thread_smc(SMCCC_ARCH_FEATURES, SMCCC_ARCH_WORKAROUND_1, 0, 0); 1120 return r >= 0; 1121 } 1122 1123 static vaddr_t __maybe_unused select_vector(vaddr_t a) 1124 { 1125 if (probe_workaround_available()) { 1126 DMSG("SMCCC_ARCH_WORKAROUND_1 (%#08" PRIx32 ") available", 1127 SMCCC_ARCH_WORKAROUND_1); 1128 DMSG("SMC Workaround for CVE-2017-5715 used"); 1129 return a; 1130 } 1131 1132 DMSG("SMCCC_ARCH_WORKAROUND_1 (%#08" PRIx32 ") unavailable", 1133 SMCCC_ARCH_WORKAROUND_1); 1134 DMSG("SMC Workaround for CVE-2017-5715 not needed (if ARM-TF is up to date)"); 1135 return (vaddr_t)thread_excp_vect; 1136 } 1137 #else 1138 static vaddr_t __maybe_unused select_vector(vaddr_t a) 1139 { 1140 return a; 1141 } 1142 #endif 1143 1144 static vaddr_t get_excp_vect(void) 1145 { 1146 #ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC 1147 uint32_t midr = read_midr(); 1148 1149 if (get_midr_implementer(midr) != MIDR_IMPLEMENTER_ARM) 1150 return (vaddr_t)thread_excp_vect; 1151 1152 switch (get_midr_primary_part(midr)) { 1153 #ifdef ARM32 1154 case CORTEX_A8_PART_NUM: 1155 case CORTEX_A9_PART_NUM: 1156 case CORTEX_A17_PART_NUM: 1157 #endif 1158 case CORTEX_A57_PART_NUM: 1159 case CORTEX_A72_PART_NUM: 1160 case CORTEX_A73_PART_NUM: 1161 case CORTEX_A75_PART_NUM: 1162 return select_vector((vaddr_t)thread_excp_vect_workaround); 1163 #ifdef ARM32 1164 case CORTEX_A15_PART_NUM: 1165 return select_vector((vaddr_t)thread_excp_vect_workaround_a15); 1166 #endif 1167 default: 1168 return (vaddr_t)thread_excp_vect; 1169 } 1170 #endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/ 1171 1172 return (vaddr_t)thread_excp_vect; 1173 } 1174 1175 void thread_init_per_cpu(void) 1176 { 1177 size_t pos = get_core_pos(); 1178 struct thread_core_local *l = thread_get_core_local(); 1179 1180 init_sec_mon_stack(pos); 1181 1182 set_tmp_stack(l, GET_STACK_BOTTOM(stack_tmp, pos) - STACK_TMP_OFFS); 1183 set_abt_stack(l, GET_STACK_BOTTOM(stack_abt, pos)); 1184 1185 thread_init_vbar(get_excp_vect()); 1186 1187 #ifdef CFG_FTRACE_SUPPORT 1188 /* 1189 * Enable accesses to frequency register and physical counter 1190 * register in EL0/PL0 required for timestamping during 1191 * function tracing. 1192 */ 1193 write_cntkctl(read_cntkctl() | CNTKCTL_PL0PCTEN); 1194 #endif 1195 } 1196 1197 struct thread_specific_data *thread_get_tsd(void) 1198 { 1199 return &threads[thread_get_id()].tsd; 1200 } 1201 1202 struct thread_ctx_regs * __nostackcheck thread_get_ctx_regs(void) 1203 { 1204 struct thread_core_local *l = thread_get_core_local(); 1205 1206 assert(l->curr_thread != -1); 1207 return &threads[l->curr_thread].regs; 1208 } 1209 1210 void thread_set_foreign_intr(bool enable) 1211 { 1212 /* thread_get_core_local() requires foreign interrupts to be disabled */ 1213 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 1214 struct thread_core_local *l; 1215 1216 l = thread_get_core_local(); 1217 1218 assert(l->curr_thread != -1); 1219 1220 if (enable) { 1221 threads[l->curr_thread].flags |= 1222 THREAD_FLAGS_FOREIGN_INTR_ENABLE; 1223 thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR); 1224 } else { 1225 /* 1226 * No need to disable foreign interrupts here since they're 1227 * already disabled above. 1228 */ 1229 threads[l->curr_thread].flags &= 1230 ~THREAD_FLAGS_FOREIGN_INTR_ENABLE; 1231 } 1232 } 1233 1234 void thread_restore_foreign_intr(void) 1235 { 1236 /* thread_get_core_local() requires foreign interrupts to be disabled */ 1237 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 1238 struct thread_core_local *l; 1239 1240 l = thread_get_core_local(); 1241 1242 assert(l->curr_thread != -1); 1243 1244 if (threads[l->curr_thread].flags & THREAD_FLAGS_FOREIGN_INTR_ENABLE) 1245 thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR); 1246 } 1247 1248 #ifdef CFG_WITH_VFP 1249 uint32_t thread_kernel_enable_vfp(void) 1250 { 1251 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 1252 struct thread_ctx *thr = threads + thread_get_id(); 1253 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 1254 1255 assert(!vfp_is_enabled()); 1256 1257 if (!thr->vfp_state.ns_saved) { 1258 vfp_lazy_save_state_final(&thr->vfp_state.ns, 1259 true /*force_save*/); 1260 thr->vfp_state.ns_saved = true; 1261 } else if (thr->vfp_state.sec_lazy_saved && 1262 !thr->vfp_state.sec_saved) { 1263 /* 1264 * This happens when we're handling an abort while the 1265 * thread was using the VFP state. 1266 */ 1267 vfp_lazy_save_state_final(&thr->vfp_state.sec, 1268 false /*!force_save*/); 1269 thr->vfp_state.sec_saved = true; 1270 } else if (tuv && tuv->lazy_saved && !tuv->saved) { 1271 /* 1272 * This can happen either during syscall or abort 1273 * processing (while processing a syscall). 1274 */ 1275 vfp_lazy_save_state_final(&tuv->vfp, false /*!force_save*/); 1276 tuv->saved = true; 1277 } 1278 1279 vfp_enable(); 1280 return exceptions; 1281 } 1282 1283 void thread_kernel_disable_vfp(uint32_t state) 1284 { 1285 uint32_t exceptions; 1286 1287 assert(vfp_is_enabled()); 1288 1289 vfp_disable(); 1290 exceptions = thread_get_exceptions(); 1291 assert(exceptions & THREAD_EXCP_FOREIGN_INTR); 1292 exceptions &= ~THREAD_EXCP_FOREIGN_INTR; 1293 exceptions |= state & THREAD_EXCP_FOREIGN_INTR; 1294 thread_set_exceptions(exceptions); 1295 } 1296 1297 void thread_kernel_save_vfp(void) 1298 { 1299 struct thread_ctx *thr = threads + thread_get_id(); 1300 1301 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1302 if (vfp_is_enabled()) { 1303 vfp_lazy_save_state_init(&thr->vfp_state.sec); 1304 thr->vfp_state.sec_lazy_saved = true; 1305 } 1306 } 1307 1308 void thread_kernel_restore_vfp(void) 1309 { 1310 struct thread_ctx *thr = threads + thread_get_id(); 1311 1312 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1313 assert(!vfp_is_enabled()); 1314 if (thr->vfp_state.sec_lazy_saved) { 1315 vfp_lazy_restore_state(&thr->vfp_state.sec, 1316 thr->vfp_state.sec_saved); 1317 thr->vfp_state.sec_saved = false; 1318 thr->vfp_state.sec_lazy_saved = false; 1319 } 1320 } 1321 1322 void thread_user_enable_vfp(struct thread_user_vfp_state *uvfp) 1323 { 1324 struct thread_ctx *thr = threads + thread_get_id(); 1325 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 1326 1327 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1328 assert(!vfp_is_enabled()); 1329 1330 if (!thr->vfp_state.ns_saved) { 1331 vfp_lazy_save_state_final(&thr->vfp_state.ns, 1332 true /*force_save*/); 1333 thr->vfp_state.ns_saved = true; 1334 } else if (tuv && uvfp != tuv) { 1335 if (tuv->lazy_saved && !tuv->saved) { 1336 vfp_lazy_save_state_final(&tuv->vfp, 1337 false /*!force_save*/); 1338 tuv->saved = true; 1339 } 1340 } 1341 1342 if (uvfp->lazy_saved) 1343 vfp_lazy_restore_state(&uvfp->vfp, uvfp->saved); 1344 uvfp->lazy_saved = false; 1345 uvfp->saved = false; 1346 1347 thr->vfp_state.uvfp = uvfp; 1348 vfp_enable(); 1349 } 1350 1351 void thread_user_save_vfp(void) 1352 { 1353 struct thread_ctx *thr = threads + thread_get_id(); 1354 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 1355 1356 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1357 if (!vfp_is_enabled()) 1358 return; 1359 1360 assert(tuv && !tuv->lazy_saved && !tuv->saved); 1361 vfp_lazy_save_state_init(&tuv->vfp); 1362 tuv->lazy_saved = true; 1363 } 1364 1365 void thread_user_clear_vfp(struct user_mode_ctx *uctx) 1366 { 1367 struct thread_user_vfp_state *uvfp = &uctx->vfp; 1368 struct thread_ctx *thr = threads + thread_get_id(); 1369 1370 if (uvfp == thr->vfp_state.uvfp) 1371 thr->vfp_state.uvfp = NULL; 1372 uvfp->lazy_saved = false; 1373 uvfp->saved = false; 1374 } 1375 #endif /*CFG_WITH_VFP*/ 1376 1377 #ifdef ARM32 1378 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr) 1379 { 1380 uint32_t s; 1381 1382 if (!is_32bit) 1383 return false; 1384 1385 s = read_cpsr(); 1386 s &= ~(CPSR_MODE_MASK | CPSR_T | CPSR_IT_MASK1 | CPSR_IT_MASK2); 1387 s |= CPSR_MODE_USR; 1388 if (entry_func & 1) 1389 s |= CPSR_T; 1390 *spsr = s; 1391 return true; 1392 } 1393 #endif 1394 1395 #ifdef ARM64 1396 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr) 1397 { 1398 uint32_t s; 1399 1400 if (is_32bit) { 1401 s = read_daif() & (SPSR_32_AIF_MASK << SPSR_32_AIF_SHIFT); 1402 s |= SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT; 1403 s |= (entry_func & SPSR_32_T_MASK) << SPSR_32_T_SHIFT; 1404 } else { 1405 s = read_daif() & (SPSR_64_DAIF_MASK << SPSR_64_DAIF_SHIFT); 1406 } 1407 1408 *spsr = s; 1409 return true; 1410 } 1411 #endif 1412 1413 static void set_ctx_regs(struct thread_ctx_regs *regs, unsigned long a0, 1414 unsigned long a1, unsigned long a2, unsigned long a3, 1415 unsigned long user_sp, unsigned long entry_func, 1416 uint32_t spsr) 1417 { 1418 /* 1419 * First clear all registers to avoid leaking information from 1420 * other TAs or even the Core itself. 1421 */ 1422 *regs = (struct thread_ctx_regs){ }; 1423 #ifdef ARM32 1424 regs->r0 = a0; 1425 regs->r1 = a1; 1426 regs->r2 = a2; 1427 regs->r3 = a3; 1428 regs->usr_sp = user_sp; 1429 regs->pc = entry_func; 1430 regs->cpsr = spsr; 1431 #endif 1432 #ifdef ARM64 1433 regs->x[0] = a0; 1434 regs->x[1] = a1; 1435 regs->x[2] = a2; 1436 regs->x[3] = a3; 1437 regs->sp = user_sp; 1438 regs->pc = entry_func; 1439 regs->cpsr = spsr; 1440 regs->x[13] = user_sp; /* Used when running TA in Aarch32 */ 1441 regs->sp = user_sp; /* Used when running TA in Aarch64 */ 1442 /* Set frame pointer (user stack can't be unwound past this point) */ 1443 regs->x[29] = 0; 1444 #endif 1445 } 1446 1447 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1, 1448 unsigned long a2, unsigned long a3, unsigned long user_sp, 1449 unsigned long entry_func, bool is_32bit, 1450 uint32_t *exit_status0, uint32_t *exit_status1) 1451 { 1452 uint32_t spsr = 0; 1453 uint32_t exceptions = 0; 1454 uint32_t rc = 0; 1455 struct thread_ctx_regs *regs = NULL; 1456 1457 tee_ta_update_session_utime_resume(); 1458 1459 /* Derive SPSR from current CPSR/PSTATE readout. */ 1460 if (!get_spsr(is_32bit, entry_func, &spsr)) { 1461 *exit_status0 = 1; /* panic */ 1462 *exit_status1 = 0xbadbadba; 1463 return 0; 1464 } 1465 1466 exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 1467 /* 1468 * We're using the per thread location of saved context registers 1469 * for temporary storage. Now that exceptions are masked they will 1470 * not be used for any thing else until they are eventually 1471 * unmasked when user mode has been entered. 1472 */ 1473 regs = thread_get_ctx_regs(); 1474 set_ctx_regs(regs, a0, a1, a2, a3, user_sp, entry_func, spsr); 1475 rc = __thread_enter_user_mode(regs, exit_status0, exit_status1); 1476 thread_unmask_exceptions(exceptions); 1477 return rc; 1478 } 1479 1480 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 1481 void thread_get_user_kcode(struct mobj **mobj, size_t *offset, 1482 vaddr_t *va, size_t *sz) 1483 { 1484 core_mmu_get_user_va_range(va, NULL); 1485 *mobj = mobj_tee_ram; 1486 *offset = thread_user_kcode_va - VCORE_START_VA; 1487 *sz = thread_user_kcode_size; 1488 } 1489 #endif 1490 1491 #if defined(CFG_CORE_UNMAP_CORE_AT_EL0) && \ 1492 defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) 1493 void thread_get_user_kdata(struct mobj **mobj, size_t *offset, 1494 vaddr_t *va, size_t *sz) 1495 { 1496 vaddr_t v; 1497 1498 core_mmu_get_user_va_range(&v, NULL); 1499 *va = v + thread_user_kcode_size; 1500 *mobj = mobj_tee_ram; 1501 *offset = (vaddr_t)thread_user_kdata_page - VCORE_START_VA; 1502 *sz = sizeof(thread_user_kdata_page); 1503 } 1504 #endif 1505 1506 static void setup_unwind_user_mode(struct thread_svc_regs *regs) 1507 { 1508 #ifdef ARM32 1509 regs->lr = (uintptr_t)thread_unwind_user_mode; 1510 regs->spsr = read_cpsr(); 1511 #endif 1512 #ifdef ARM64 1513 regs->elr = (uintptr_t)thread_unwind_user_mode; 1514 regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, 0); 1515 regs->spsr |= read_daif(); 1516 /* 1517 * Regs is the value of stack pointer before calling the SVC 1518 * handler. By the addition matches for the reserved space at the 1519 * beginning of el0_sync_svc(). This prepares the stack when 1520 * returning to thread_unwind_user_mode instead of a normal 1521 * exception return. 1522 */ 1523 regs->sp_el0 = (uint64_t)(regs + 1); 1524 #endif 1525 } 1526 1527 static void gprof_set_status(struct ts_session *s __maybe_unused, 1528 enum ts_gprof_status status __maybe_unused) 1529 { 1530 #ifdef CFG_TA_GPROF_SUPPORT 1531 if (s->ctx->ops->gprof_set_status) 1532 s->ctx->ops->gprof_set_status(status); 1533 #endif 1534 } 1535 1536 /* 1537 * Note: this function is weak just to make it possible to exclude it from 1538 * the unpaged area. 1539 */ 1540 void __weak thread_svc_handler(struct thread_svc_regs *regs) 1541 { 1542 struct ts_session *sess = NULL; 1543 uint32_t state = 0; 1544 1545 /* Enable native interrupts */ 1546 state = thread_get_exceptions(); 1547 thread_unmask_exceptions(state & ~THREAD_EXCP_NATIVE_INTR); 1548 1549 thread_user_save_vfp(); 1550 1551 sess = ts_get_current_session(); 1552 /* 1553 * User mode service has just entered kernel mode, suspend gprof 1554 * collection until we're about to switch back again. 1555 */ 1556 gprof_set_status(sess, TS_GPROF_SUSPEND); 1557 1558 /* Restore foreign interrupts which are disabled on exception entry */ 1559 thread_restore_foreign_intr(); 1560 1561 assert(sess && sess->handle_svc); 1562 if (sess->handle_svc(regs)) { 1563 /* We're about to switch back to user mode */ 1564 gprof_set_status(sess, TS_GPROF_RESUME); 1565 } else { 1566 /* We're returning from __thread_enter_user_mode() */ 1567 setup_unwind_user_mode(regs); 1568 } 1569 } 1570 1571 static struct mobj *alloc_shm(enum thread_shm_type shm_type, size_t size) 1572 { 1573 switch (shm_type) { 1574 case THREAD_SHM_TYPE_APPLICATION: 1575 return thread_rpc_alloc_payload(size); 1576 case THREAD_SHM_TYPE_KERNEL_PRIVATE: 1577 return thread_rpc_alloc_kernel_payload(size); 1578 case THREAD_SHM_TYPE_GLOBAL: 1579 return thread_rpc_alloc_global_payload(size); 1580 default: 1581 return NULL; 1582 } 1583 } 1584 1585 static void clear_shm_cache_entry(struct thread_shm_cache_entry *ce) 1586 { 1587 if (ce->mobj) { 1588 switch (ce->type) { 1589 case THREAD_SHM_TYPE_APPLICATION: 1590 thread_rpc_free_payload(ce->mobj); 1591 break; 1592 case THREAD_SHM_TYPE_KERNEL_PRIVATE: 1593 thread_rpc_free_kernel_payload(ce->mobj); 1594 break; 1595 case THREAD_SHM_TYPE_GLOBAL: 1596 thread_rpc_free_global_payload(ce->mobj); 1597 break; 1598 default: 1599 assert(0); /* "can't happen" */ 1600 break; 1601 } 1602 } 1603 ce->mobj = NULL; 1604 ce->size = 0; 1605 } 1606 1607 static struct thread_shm_cache_entry * 1608 get_shm_cache_entry(enum thread_shm_cache_user user) 1609 { 1610 struct thread_shm_cache *cache = &threads[thread_get_id()].shm_cache; 1611 struct thread_shm_cache_entry *ce = NULL; 1612 1613 SLIST_FOREACH(ce, cache, link) 1614 if (ce->user == user) 1615 return ce; 1616 1617 ce = calloc(1, sizeof(*ce)); 1618 if (ce) { 1619 ce->user = user; 1620 SLIST_INSERT_HEAD(cache, ce, link); 1621 } 1622 1623 return ce; 1624 } 1625 1626 void *thread_rpc_shm_cache_alloc(enum thread_shm_cache_user user, 1627 enum thread_shm_type shm_type, 1628 size_t size, struct mobj **mobj) 1629 { 1630 struct thread_shm_cache_entry *ce = NULL; 1631 size_t sz = size; 1632 paddr_t p = 0; 1633 void *va = NULL; 1634 1635 if (!size) 1636 return NULL; 1637 1638 ce = get_shm_cache_entry(user); 1639 if (!ce) 1640 return NULL; 1641 1642 /* 1643 * Always allocate in page chunks as normal world allocates payload 1644 * memory as complete pages. 1645 */ 1646 sz = ROUNDUP(size, SMALL_PAGE_SIZE); 1647 1648 if (ce->type != shm_type || sz > ce->size) { 1649 clear_shm_cache_entry(ce); 1650 1651 ce->mobj = alloc_shm(shm_type, sz); 1652 if (!ce->mobj) 1653 return NULL; 1654 1655 if (mobj_get_pa(ce->mobj, 0, 0, &p)) 1656 goto err; 1657 1658 if (!ALIGNMENT_IS_OK(p, uint64_t)) 1659 goto err; 1660 1661 va = mobj_get_va(ce->mobj, 0); 1662 if (!va) 1663 goto err; 1664 1665 ce->size = sz; 1666 ce->type = shm_type; 1667 } else { 1668 va = mobj_get_va(ce->mobj, 0); 1669 if (!va) 1670 goto err; 1671 } 1672 *mobj = ce->mobj; 1673 1674 return va; 1675 err: 1676 clear_shm_cache_entry(ce); 1677 return NULL; 1678 } 1679 1680 void thread_rpc_shm_cache_clear(struct thread_shm_cache *cache) 1681 { 1682 while (true) { 1683 struct thread_shm_cache_entry *ce = SLIST_FIRST(cache); 1684 1685 if (!ce) 1686 break; 1687 SLIST_REMOVE_HEAD(cache, link); 1688 clear_shm_cache_entry(ce); 1689 free(ce); 1690 } 1691 } 1692 1693 #ifdef CFG_WITH_ARM_TRUSTED_FW 1694 /* 1695 * These five functions are __weak to allow platforms to override them if 1696 * needed. 1697 */ 1698 unsigned long __weak thread_cpu_off_handler(unsigned long a0 __unused, 1699 unsigned long a1 __unused) 1700 { 1701 return 0; 1702 } 1703 DECLARE_KEEP_PAGER(thread_cpu_off_handler); 1704 1705 unsigned long __weak thread_cpu_suspend_handler(unsigned long a0 __unused, 1706 unsigned long a1 __unused) 1707 { 1708 return 0; 1709 } 1710 DECLARE_KEEP_PAGER(thread_cpu_suspend_handler); 1711 1712 unsigned long __weak thread_cpu_resume_handler(unsigned long a0 __unused, 1713 unsigned long a1 __unused) 1714 { 1715 return 0; 1716 } 1717 DECLARE_KEEP_PAGER(thread_cpu_resume_handler); 1718 1719 unsigned long __weak thread_system_off_handler(unsigned long a0 __unused, 1720 unsigned long a1 __unused) 1721 { 1722 return 0; 1723 } 1724 DECLARE_KEEP_PAGER(thread_system_off_handler); 1725 1726 unsigned long __weak thread_system_reset_handler(unsigned long a0 __unused, 1727 unsigned long a1 __unused) 1728 { 1729 return 0; 1730 } 1731 DECLARE_KEEP_PAGER(thread_system_reset_handler); 1732 #endif /*CFG_WITH_ARM_TRUSTED_FW*/ 1733