1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 */ 6 7 #include <platform_config.h> 8 9 #include <arm.h> 10 #include <assert.h> 11 #include <io.h> 12 #include <keep.h> 13 #include <kernel/asan.h> 14 #include <kernel/linker.h> 15 #include <kernel/lockdep.h> 16 #include <kernel/misc.h> 17 #include <kernel/panic.h> 18 #include <kernel/spinlock.h> 19 #include <kernel/tee_ta_manager.h> 20 #include <kernel/thread_defs.h> 21 #include <kernel/thread.h> 22 #include <kernel/virtualization.h> 23 #include <mm/core_memprot.h> 24 #include <mm/mobj.h> 25 #include <mm/tee_mm.h> 26 #include <mm/tee_mmu.h> 27 #include <mm/tee_pager.h> 28 #include <smccc.h> 29 #include <sm/sm.h> 30 #include <trace.h> 31 #include <util.h> 32 33 #include "thread_private.h" 34 35 #ifdef CFG_WITH_ARM_TRUSTED_FW 36 #define STACK_TMP_OFFS 0 37 #else 38 #define STACK_TMP_OFFS SM_STACK_TMP_RESERVE_SIZE 39 #endif 40 41 42 #ifdef ARM32 43 #ifdef CFG_CORE_SANITIZE_KADDRESS 44 #define STACK_TMP_SIZE (3072 + STACK_TMP_OFFS) 45 #else 46 #define STACK_TMP_SIZE (2048 + STACK_TMP_OFFS) 47 #endif 48 #define STACK_THREAD_SIZE 8192 49 50 #if defined(CFG_CORE_SANITIZE_KADDRESS) || defined(__clang__) 51 #define STACK_ABT_SIZE 3072 52 #else 53 #define STACK_ABT_SIZE 2048 54 #endif 55 56 #endif /*ARM32*/ 57 58 #ifdef ARM64 59 #if defined(__clang__) && !defined(CFG_CC_OPTIMIZE_FOR_SIZE) 60 #define STACK_TMP_SIZE (4096 + STACK_TMP_OFFS) 61 #else 62 #define STACK_TMP_SIZE (2048 + STACK_TMP_OFFS) 63 #endif 64 #define STACK_THREAD_SIZE 8192 65 66 #if TRACE_LEVEL > 0 67 #define STACK_ABT_SIZE 3072 68 #else 69 #define STACK_ABT_SIZE 1024 70 #endif 71 #endif /*ARM64*/ 72 73 struct thread_ctx threads[CFG_NUM_THREADS]; 74 75 struct thread_core_local thread_core_local[CFG_TEE_CORE_NB_CORE] __nex_bss; 76 77 #ifdef CFG_WITH_STACK_CANARIES 78 #ifdef ARM32 79 #define STACK_CANARY_SIZE (4 * sizeof(uint32_t)) 80 #endif 81 #ifdef ARM64 82 #define STACK_CANARY_SIZE (8 * sizeof(uint32_t)) 83 #endif 84 #define START_CANARY_VALUE 0xdededede 85 #define END_CANARY_VALUE 0xabababab 86 #define GET_START_CANARY(name, stack_num) name[stack_num][0] 87 #define GET_END_CANARY(name, stack_num) \ 88 name[stack_num][sizeof(name[stack_num]) / sizeof(uint32_t) - 1] 89 #else 90 #define STACK_CANARY_SIZE 0 91 #endif 92 93 #define DECLARE_STACK(name, num_stacks, stack_size, linkage) \ 94 linkage uint32_t name[num_stacks] \ 95 [ROUNDUP(stack_size + STACK_CANARY_SIZE, STACK_ALIGNMENT) / \ 96 sizeof(uint32_t)] \ 97 __attribute__((section(".nozi_stack." # name), \ 98 aligned(STACK_ALIGNMENT))) 99 100 #define STACK_SIZE(stack) (sizeof(stack) - STACK_CANARY_SIZE / 2) 101 102 #define GET_STACK(stack) \ 103 ((vaddr_t)(stack) + STACK_SIZE(stack)) 104 105 DECLARE_STACK(stack_tmp, CFG_TEE_CORE_NB_CORE, STACK_TMP_SIZE, static); 106 DECLARE_STACK(stack_abt, CFG_TEE_CORE_NB_CORE, STACK_ABT_SIZE, static); 107 #ifndef CFG_WITH_PAGER 108 DECLARE_STACK(stack_thread, CFG_NUM_THREADS, STACK_THREAD_SIZE, static); 109 #endif 110 111 const void *stack_tmp_export __section(".identity_map.stack_tmp_export") = 112 (uint8_t *)stack_tmp + sizeof(stack_tmp[0]) - 113 (STACK_TMP_OFFS + STACK_CANARY_SIZE / 2); 114 const uint32_t stack_tmp_stride __section(".identity_map.stack_tmp_stride") = 115 sizeof(stack_tmp[0]); 116 117 /* 118 * These stack setup info are required by secondary boot cores before they 119 * each locally enable the pager (the mmu). Hence kept in pager sections. 120 */ 121 DECLARE_KEEP_PAGER(stack_tmp_export); 122 DECLARE_KEEP_PAGER(stack_tmp_stride); 123 124 thread_pm_handler_t thread_cpu_on_handler_ptr __nex_bss; 125 thread_pm_handler_t thread_cpu_off_handler_ptr __nex_bss; 126 thread_pm_handler_t thread_cpu_suspend_handler_ptr __nex_bss; 127 thread_pm_handler_t thread_cpu_resume_handler_ptr __nex_bss; 128 thread_pm_handler_t thread_system_off_handler_ptr __nex_bss; 129 thread_pm_handler_t thread_system_reset_handler_ptr __nex_bss; 130 131 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 132 static vaddr_t thread_user_kcode_va __nex_bss; 133 long thread_user_kcode_offset __nex_bss; 134 static size_t thread_user_kcode_size __nex_bss; 135 #endif 136 137 #if defined(CFG_CORE_UNMAP_CORE_AT_EL0) && \ 138 defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) 139 long thread_user_kdata_sp_offset __nex_bss; 140 static uint8_t thread_user_kdata_page[ 141 ROUNDUP(sizeof(thread_core_local), SMALL_PAGE_SIZE)] 142 __aligned(SMALL_PAGE_SIZE) 143 #ifndef CFG_VIRTUALIZATION 144 __section(".nozi.kdata_page"); 145 #else 146 __section(".nex_nozi.kdata_page"); 147 #endif 148 #endif 149 150 static unsigned int thread_global_lock __nex_bss = SPINLOCK_UNLOCK; 151 152 static void init_canaries(void) 153 { 154 #ifdef CFG_WITH_STACK_CANARIES 155 size_t n; 156 #define INIT_CANARY(name) \ 157 for (n = 0; n < ARRAY_SIZE(name); n++) { \ 158 uint32_t *start_canary = &GET_START_CANARY(name, n); \ 159 uint32_t *end_canary = &GET_END_CANARY(name, n); \ 160 \ 161 *start_canary = START_CANARY_VALUE; \ 162 *end_canary = END_CANARY_VALUE; \ 163 DMSG("#Stack canaries for %s[%zu] with top at %p", \ 164 #name, n, (void *)(end_canary - 1)); \ 165 DMSG("watch *%p", (void *)end_canary); \ 166 } 167 168 INIT_CANARY(stack_tmp); 169 INIT_CANARY(stack_abt); 170 #if !defined(CFG_WITH_PAGER) && !defined(CFG_VIRTUALIZATION) 171 INIT_CANARY(stack_thread); 172 #endif 173 #endif/*CFG_WITH_STACK_CANARIES*/ 174 } 175 176 #define CANARY_DIED(stack, loc, n) \ 177 do { \ 178 EMSG_RAW("Dead canary at %s of '%s[%zu]'", #loc, #stack, n); \ 179 panic(); \ 180 } while (0) 181 182 void thread_check_canaries(void) 183 { 184 #ifdef CFG_WITH_STACK_CANARIES 185 size_t n; 186 187 for (n = 0; n < ARRAY_SIZE(stack_tmp); n++) { 188 if (GET_START_CANARY(stack_tmp, n) != START_CANARY_VALUE) 189 CANARY_DIED(stack_tmp, start, n); 190 if (GET_END_CANARY(stack_tmp, n) != END_CANARY_VALUE) 191 CANARY_DIED(stack_tmp, end, n); 192 } 193 194 for (n = 0; n < ARRAY_SIZE(stack_abt); n++) { 195 if (GET_START_CANARY(stack_abt, n) != START_CANARY_VALUE) 196 CANARY_DIED(stack_abt, start, n); 197 if (GET_END_CANARY(stack_abt, n) != END_CANARY_VALUE) 198 CANARY_DIED(stack_abt, end, n); 199 200 } 201 #if !defined(CFG_WITH_PAGER) && !defined(CFG_VIRTUALIZATION) 202 for (n = 0; n < ARRAY_SIZE(stack_thread); n++) { 203 if (GET_START_CANARY(stack_thread, n) != START_CANARY_VALUE) 204 CANARY_DIED(stack_thread, start, n); 205 if (GET_END_CANARY(stack_thread, n) != END_CANARY_VALUE) 206 CANARY_DIED(stack_thread, end, n); 207 } 208 #endif 209 #endif/*CFG_WITH_STACK_CANARIES*/ 210 } 211 212 void thread_lock_global(void) 213 { 214 cpu_spin_lock(&thread_global_lock); 215 } 216 217 void thread_unlock_global(void) 218 { 219 cpu_spin_unlock(&thread_global_lock); 220 } 221 222 #ifdef ARM32 223 uint32_t thread_get_exceptions(void) 224 { 225 uint32_t cpsr = read_cpsr(); 226 227 return (cpsr >> CPSR_F_SHIFT) & THREAD_EXCP_ALL; 228 } 229 230 void thread_set_exceptions(uint32_t exceptions) 231 { 232 uint32_t cpsr = read_cpsr(); 233 234 /* Foreign interrupts must not be unmasked while holding a spinlock */ 235 if (!(exceptions & THREAD_EXCP_FOREIGN_INTR)) 236 assert_have_no_spinlock(); 237 238 cpsr &= ~(THREAD_EXCP_ALL << CPSR_F_SHIFT); 239 cpsr |= ((exceptions & THREAD_EXCP_ALL) << CPSR_F_SHIFT); 240 write_cpsr(cpsr); 241 } 242 #endif /*ARM32*/ 243 244 #ifdef ARM64 245 uint32_t thread_get_exceptions(void) 246 { 247 uint32_t daif = read_daif(); 248 249 return (daif >> DAIF_F_SHIFT) & THREAD_EXCP_ALL; 250 } 251 252 void thread_set_exceptions(uint32_t exceptions) 253 { 254 uint32_t daif = read_daif(); 255 256 /* Foreign interrupts must not be unmasked while holding a spinlock */ 257 if (!(exceptions & THREAD_EXCP_FOREIGN_INTR)) 258 assert_have_no_spinlock(); 259 260 daif &= ~(THREAD_EXCP_ALL << DAIF_F_SHIFT); 261 daif |= ((exceptions & THREAD_EXCP_ALL) << DAIF_F_SHIFT); 262 write_daif(daif); 263 } 264 #endif /*ARM64*/ 265 266 uint32_t thread_mask_exceptions(uint32_t exceptions) 267 { 268 uint32_t state = thread_get_exceptions(); 269 270 thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL)); 271 return state; 272 } 273 274 void thread_unmask_exceptions(uint32_t state) 275 { 276 thread_set_exceptions(state & THREAD_EXCP_ALL); 277 } 278 279 280 static struct thread_core_local *get_core_local(unsigned int pos) 281 { 282 /* 283 * Foreign interrupts must be disabled before playing with core_local 284 * since we otherwise may be rescheduled to a different core in the 285 * middle of this function. 286 */ 287 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 288 289 assert(pos < CFG_TEE_CORE_NB_CORE); 290 return &thread_core_local[pos]; 291 } 292 293 struct thread_core_local *thread_get_core_local(void) 294 { 295 unsigned int pos = get_core_pos(); 296 297 return get_core_local(pos); 298 } 299 300 void thread_core_local_set_tmp_stack_flag(void) 301 { 302 thread_get_core_local()->flags |= THREAD_CLF_TMP; 303 } 304 305 static void thread_lazy_save_ns_vfp(void) 306 { 307 #ifdef CFG_WITH_VFP 308 struct thread_ctx *thr = threads + thread_get_id(); 309 310 thr->vfp_state.ns_saved = false; 311 vfp_lazy_save_state_init(&thr->vfp_state.ns); 312 #endif /*CFG_WITH_VFP*/ 313 } 314 315 static void thread_lazy_restore_ns_vfp(void) 316 { 317 #ifdef CFG_WITH_VFP 318 struct thread_ctx *thr = threads + thread_get_id(); 319 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 320 321 assert(!thr->vfp_state.sec_lazy_saved && !thr->vfp_state.sec_saved); 322 323 if (tuv && tuv->lazy_saved && !tuv->saved) { 324 vfp_lazy_save_state_final(&tuv->vfp, false /*!force_save*/); 325 tuv->saved = true; 326 } 327 328 vfp_lazy_restore_state(&thr->vfp_state.ns, thr->vfp_state.ns_saved); 329 thr->vfp_state.ns_saved = false; 330 #endif /*CFG_WITH_VFP*/ 331 } 332 333 #ifdef ARM32 334 static void init_regs(struct thread_ctx *thread, uint32_t a0, uint32_t a1, 335 uint32_t a2, uint32_t a3) 336 { 337 thread->regs.pc = (uint32_t)thread_std_smc_entry; 338 339 /* 340 * Stdcalls starts in SVC mode with masked foreign interrupts, masked 341 * Asynchronous abort and unmasked native interrupts. 342 */ 343 thread->regs.cpsr = read_cpsr() & ARM32_CPSR_E; 344 thread->regs.cpsr |= CPSR_MODE_SVC | CPSR_A | 345 (THREAD_EXCP_FOREIGN_INTR << ARM32_CPSR_F_SHIFT); 346 /* Enable thumb mode if it's a thumb instruction */ 347 if (thread->regs.pc & 1) 348 thread->regs.cpsr |= CPSR_T; 349 /* Reinitialize stack pointer */ 350 thread->regs.svc_sp = thread->stack_va_end; 351 352 /* 353 * Copy arguments into context. This will make the 354 * arguments appear in r0-r7 when thread is started. 355 */ 356 thread->regs.r0 = a0; 357 thread->regs.r1 = a1; 358 thread->regs.r2 = a2; 359 thread->regs.r3 = a3; 360 thread->regs.r4 = 0; 361 thread->regs.r5 = 0; 362 thread->regs.r6 = 0; 363 thread->regs.r7 = 0; 364 } 365 #endif /*ARM32*/ 366 367 #ifdef ARM64 368 static void init_regs(struct thread_ctx *thread, uint32_t a0, uint32_t a1, 369 uint32_t a2, uint32_t a3) 370 { 371 thread->regs.pc = (uint64_t)thread_std_smc_entry; 372 373 /* 374 * Stdcalls starts in SVC mode with masked foreign interrupts, masked 375 * Asynchronous abort and unmasked native interrupts. 376 */ 377 thread->regs.cpsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, 378 THREAD_EXCP_FOREIGN_INTR | DAIFBIT_ABT); 379 /* Reinitialize stack pointer */ 380 thread->regs.sp = thread->stack_va_end; 381 382 /* 383 * Copy arguments into context. This will make the 384 * arguments appear in x0-x7 when thread is started. 385 */ 386 thread->regs.x[0] = a0; 387 thread->regs.x[1] = a1; 388 thread->regs.x[2] = a2; 389 thread->regs.x[3] = a3; 390 thread->regs.x[4] = 0; 391 thread->regs.x[5] = 0; 392 thread->regs.x[6] = 0; 393 thread->regs.x[7] = 0; 394 395 /* Set up frame pointer as per the Aarch64 AAPCS */ 396 thread->regs.x[29] = 0; 397 } 398 #endif /*ARM64*/ 399 400 void thread_init_boot_thread(void) 401 { 402 struct thread_core_local *l = thread_get_core_local(); 403 404 thread_init_threads(); 405 406 l->curr_thread = 0; 407 threads[0].state = THREAD_STATE_ACTIVE; 408 } 409 410 void thread_clr_boot_thread(void) 411 { 412 struct thread_core_local *l = thread_get_core_local(); 413 414 assert(l->curr_thread >= 0 && l->curr_thread < CFG_NUM_THREADS); 415 assert(threads[l->curr_thread].state == THREAD_STATE_ACTIVE); 416 threads[l->curr_thread].state = THREAD_STATE_FREE; 417 l->curr_thread = -1; 418 l->flags &= ~THREAD_CLF_TMP; 419 } 420 421 void thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3) 422 { 423 size_t n; 424 struct thread_core_local *l = thread_get_core_local(); 425 bool found_thread = false; 426 427 assert(l->curr_thread == -1); 428 429 thread_lock_global(); 430 431 for (n = 0; n < CFG_NUM_THREADS; n++) { 432 if (threads[n].state == THREAD_STATE_FREE) { 433 threads[n].state = THREAD_STATE_ACTIVE; 434 found_thread = true; 435 break; 436 } 437 } 438 439 thread_unlock_global(); 440 441 if (!found_thread) 442 return; 443 444 l->curr_thread = n; 445 446 threads[n].flags = 0; 447 init_regs(threads + n, a0, a1, a2, a3); 448 449 thread_lazy_save_ns_vfp(); 450 451 l->flags &= ~THREAD_CLF_TMP; 452 thread_resume(&threads[n].regs); 453 /*NOTREACHED*/ 454 panic(); 455 } 456 457 #ifdef ARM32 458 static void copy_a0_to_a3(struct thread_ctx_regs *regs, uint32_t a0, 459 uint32_t a1, uint32_t a2, uint32_t a3) 460 { 461 /* 462 * Update returned values from RPC, values will appear in 463 * r0-r3 when thread is resumed. 464 */ 465 regs->r0 = a0; 466 regs->r1 = a1; 467 regs->r2 = a2; 468 regs->r3 = a3; 469 } 470 #endif /*ARM32*/ 471 472 #ifdef ARM64 473 static void copy_a0_to_a3(struct thread_ctx_regs *regs, uint32_t a0, 474 uint32_t a1, uint32_t a2, uint32_t a3) 475 { 476 /* 477 * Update returned values from RPC, values will appear in 478 * x0-x3 when thread is resumed. 479 */ 480 regs->x[0] = a0; 481 regs->x[1] = a1; 482 regs->x[2] = a2; 483 regs->x[3] = a3; 484 } 485 #endif /*ARM64*/ 486 487 #ifdef ARM32 488 static bool is_from_user(uint32_t cpsr) 489 { 490 return (cpsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR; 491 } 492 #endif 493 494 #ifdef ARM64 495 static bool is_from_user(uint32_t cpsr) 496 { 497 if (cpsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT)) 498 return true; 499 if (((cpsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) == 500 SPSR_64_MODE_EL0) 501 return true; 502 return false; 503 } 504 #endif 505 506 #ifdef CFG_SYSCALL_FTRACE 507 static void __noprof ftrace_suspend(void) 508 { 509 struct tee_ta_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack); 510 511 if (!s) 512 return; 513 514 if (s->fbuf) 515 s->fbuf->syscall_trace_suspended = true; 516 } 517 518 static void __noprof ftrace_resume(void) 519 { 520 struct tee_ta_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack); 521 522 if (!s) 523 return; 524 525 if (s->fbuf) 526 s->fbuf->syscall_trace_suspended = false; 527 } 528 #else 529 static void __noprof ftrace_suspend(void) 530 { 531 } 532 533 static void __noprof ftrace_resume(void) 534 { 535 } 536 #endif 537 538 static bool is_user_mode(struct thread_ctx_regs *regs) 539 { 540 return is_from_user((uint32_t)regs->cpsr); 541 } 542 543 void thread_resume_from_rpc(uint32_t thread_id, uint32_t a0, uint32_t a1, 544 uint32_t a2, uint32_t a3) 545 { 546 size_t n = thread_id; 547 struct thread_core_local *l = thread_get_core_local(); 548 bool found_thread = false; 549 550 assert(l->curr_thread == -1); 551 552 thread_lock_global(); 553 554 if (n < CFG_NUM_THREADS && threads[n].state == THREAD_STATE_SUSPENDED) { 555 threads[n].state = THREAD_STATE_ACTIVE; 556 found_thread = true; 557 } 558 559 thread_unlock_global(); 560 561 if (!found_thread) 562 return; 563 564 l->curr_thread = n; 565 566 if (threads[n].have_user_map) { 567 core_mmu_set_user_map(&threads[n].user_map); 568 if (threads[n].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR) 569 tee_ta_ftrace_update_times_resume(); 570 } 571 572 if (is_user_mode(&threads[n].regs)) 573 tee_ta_update_session_utime_resume(); 574 575 /* 576 * Return from RPC to request service of a foreign interrupt must not 577 * get parameters from non-secure world. 578 */ 579 if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) { 580 copy_a0_to_a3(&threads[n].regs, a0, a1, a2, a3); 581 threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN; 582 } 583 584 thread_lazy_save_ns_vfp(); 585 586 if (threads[n].have_user_map) 587 ftrace_resume(); 588 589 l->flags &= ~THREAD_CLF_TMP; 590 thread_resume(&threads[n].regs); 591 /*NOTREACHED*/ 592 panic(); 593 } 594 595 void *thread_get_tmp_sp(void) 596 { 597 struct thread_core_local *l = thread_get_core_local(); 598 599 /* 600 * Called from assembly when switching to the temporary stack, so flags 601 * need updating 602 */ 603 l->flags |= THREAD_CLF_TMP; 604 605 return (void *)l->tmp_stack_va_end; 606 } 607 608 #ifdef ARM64 609 vaddr_t thread_get_saved_thread_sp(void) 610 { 611 struct thread_core_local *l = thread_get_core_local(); 612 int ct = l->curr_thread; 613 614 assert(ct != -1); 615 return threads[ct].kern_sp; 616 } 617 #endif /*ARM64*/ 618 619 vaddr_t thread_stack_start(void) 620 { 621 struct thread_ctx *thr; 622 int ct = thread_get_id_may_fail(); 623 624 if (ct == -1) 625 return 0; 626 627 thr = threads + ct; 628 return thr->stack_va_end - STACK_THREAD_SIZE; 629 } 630 631 size_t thread_stack_size(void) 632 { 633 return STACK_THREAD_SIZE; 634 } 635 636 void get_stack_limits(vaddr_t *start, vaddr_t *end) 637 { 638 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 639 unsigned int pos = get_core_pos(); 640 struct thread_core_local *l = get_core_local(pos); 641 struct thread_ctx *thr = NULL; 642 int ct = -1; 643 644 if (l->flags & THREAD_CLF_TMP) { 645 /* We're using the temporary stack for this core */ 646 *start = (vaddr_t)stack_tmp[pos]; 647 *end = *start + STACK_TMP_SIZE; 648 } else if (l->flags & THREAD_CLF_ABORT) { 649 /* We're using the abort stack for this core */ 650 *start = (vaddr_t)stack_abt[pos]; 651 *end = *start + STACK_ABT_SIZE; 652 } else if (!l->flags) { 653 /* We're using a thread stack */ 654 ct = l->curr_thread; 655 assert(ct >= 0 && ct < CFG_NUM_THREADS); 656 thr = threads + ct; 657 *end = thr->stack_va_end; 658 *start = *end - STACK_THREAD_SIZE; 659 } 660 661 thread_unmask_exceptions(exceptions); 662 } 663 664 bool thread_is_from_abort_mode(void) 665 { 666 struct thread_core_local *l = thread_get_core_local(); 667 668 return (l->flags >> THREAD_CLF_SAVED_SHIFT) & THREAD_CLF_ABORT; 669 } 670 671 #ifdef ARM32 672 bool thread_is_in_normal_mode(void) 673 { 674 return (read_cpsr() & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_SVC; 675 } 676 #endif 677 678 #ifdef ARM64 679 bool thread_is_in_normal_mode(void) 680 { 681 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 682 struct thread_core_local *l = thread_get_core_local(); 683 bool ret; 684 685 /* 686 * If any bit in l->flags is set aside from THREAD_CLF_TMP we're 687 * handling some exception. 688 */ 689 ret = (l->curr_thread != -1) && !(l->flags & ~THREAD_CLF_TMP); 690 thread_unmask_exceptions(exceptions); 691 692 return ret; 693 } 694 #endif 695 696 void thread_state_free(void) 697 { 698 struct thread_core_local *l = thread_get_core_local(); 699 int ct = l->curr_thread; 700 701 assert(ct != -1); 702 703 thread_lazy_restore_ns_vfp(); 704 tee_pager_release_phys( 705 (void *)(threads[ct].stack_va_end - STACK_THREAD_SIZE), 706 STACK_THREAD_SIZE); 707 708 thread_lock_global(); 709 710 assert(threads[ct].state == THREAD_STATE_ACTIVE); 711 threads[ct].state = THREAD_STATE_FREE; 712 threads[ct].flags = 0; 713 l->curr_thread = -1; 714 715 #ifdef CFG_VIRTUALIZATION 716 virt_unset_guest(); 717 #endif 718 thread_unlock_global(); 719 } 720 721 #ifdef CFG_WITH_PAGER 722 static void release_unused_kernel_stack(struct thread_ctx *thr, 723 uint32_t cpsr __maybe_unused) 724 { 725 #ifdef ARM64 726 /* 727 * If we're from user mode then thr->regs.sp is the saved user 728 * stack pointer and thr->kern_sp holds the last kernel stack 729 * pointer. But if we're from kernel mode then thr->kern_sp isn't 730 * up to date so we need to read from thr->regs.sp instead. 731 */ 732 vaddr_t sp = is_from_user(cpsr) ? thr->kern_sp : thr->regs.sp; 733 #else 734 vaddr_t sp = thr->regs.svc_sp; 735 #endif 736 vaddr_t base = thr->stack_va_end - STACK_THREAD_SIZE; 737 size_t len = sp - base; 738 739 tee_pager_release_phys((void *)base, len); 740 } 741 #else 742 static void release_unused_kernel_stack(struct thread_ctx *thr __unused, 743 uint32_t cpsr __unused) 744 { 745 } 746 #endif 747 748 int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc) 749 { 750 struct thread_core_local *l = thread_get_core_local(); 751 int ct = l->curr_thread; 752 753 assert(ct != -1); 754 755 if (core_mmu_user_mapping_is_active()) 756 ftrace_suspend(); 757 758 thread_check_canaries(); 759 760 release_unused_kernel_stack(threads + ct, cpsr); 761 762 if (is_from_user(cpsr)) { 763 thread_user_save_vfp(); 764 tee_ta_update_session_utime_suspend(); 765 tee_ta_gprof_sample_pc(pc); 766 } 767 thread_lazy_restore_ns_vfp(); 768 769 thread_lock_global(); 770 771 assert(threads[ct].state == THREAD_STATE_ACTIVE); 772 threads[ct].flags |= flags; 773 threads[ct].regs.cpsr = cpsr; 774 threads[ct].regs.pc = pc; 775 threads[ct].state = THREAD_STATE_SUSPENDED; 776 777 threads[ct].have_user_map = core_mmu_user_mapping_is_active(); 778 if (threads[ct].have_user_map) { 779 if (threads[ct].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR) 780 tee_ta_ftrace_update_times_suspend(); 781 core_mmu_get_user_map(&threads[ct].user_map); 782 core_mmu_set_user_map(NULL); 783 } 784 785 l->curr_thread = -1; 786 787 #ifdef CFG_VIRTUALIZATION 788 virt_unset_guest(); 789 #endif 790 791 thread_unlock_global(); 792 793 return ct; 794 } 795 796 #ifdef ARM32 797 static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp) 798 { 799 l->tmp_stack_va_end = sp; 800 thread_set_irq_sp(sp); 801 thread_set_fiq_sp(sp); 802 } 803 804 static void set_abt_stack(struct thread_core_local *l, vaddr_t sp) 805 { 806 l->abt_stack_va_end = sp; 807 thread_set_abt_sp((vaddr_t)l); 808 thread_set_und_sp((vaddr_t)l); 809 } 810 #endif /*ARM32*/ 811 812 #ifdef ARM64 813 static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp) 814 { 815 /* 816 * We're already using the tmp stack when this function is called 817 * so there's no need to assign it to any stack pointer. However, 818 * we'll need to restore it at different times so store it here. 819 */ 820 l->tmp_stack_va_end = sp; 821 } 822 823 static void set_abt_stack(struct thread_core_local *l, vaddr_t sp) 824 { 825 l->abt_stack_va_end = sp; 826 } 827 #endif /*ARM64*/ 828 829 bool thread_init_stack(uint32_t thread_id, vaddr_t sp) 830 { 831 if (thread_id >= CFG_NUM_THREADS) 832 return false; 833 threads[thread_id].stack_va_end = sp; 834 return true; 835 } 836 837 short int thread_get_id_may_fail(void) 838 { 839 /* 840 * thread_get_core_local() requires foreign interrupts to be disabled 841 */ 842 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 843 struct thread_core_local *l = thread_get_core_local(); 844 short int ct = l->curr_thread; 845 846 thread_unmask_exceptions(exceptions); 847 return ct; 848 } 849 850 short int thread_get_id(void) 851 { 852 short int ct = thread_get_id_may_fail(); 853 854 /* Thread ID has to fit in a short int */ 855 COMPILE_TIME_ASSERT(CFG_NUM_THREADS <= SHRT_MAX); 856 assert(ct >= 0 && ct < CFG_NUM_THREADS); 857 return ct; 858 } 859 860 static void init_handlers(const struct thread_handlers *handlers) 861 { 862 thread_cpu_on_handler_ptr = handlers->cpu_on; 863 thread_cpu_off_handler_ptr = handlers->cpu_off; 864 thread_cpu_suspend_handler_ptr = handlers->cpu_suspend; 865 thread_cpu_resume_handler_ptr = handlers->cpu_resume; 866 thread_system_off_handler_ptr = handlers->system_off; 867 thread_system_reset_handler_ptr = handlers->system_reset; 868 } 869 870 #ifdef CFG_WITH_PAGER 871 static void init_thread_stacks(void) 872 { 873 size_t n = 0; 874 875 /* 876 * Allocate virtual memory for thread stacks. 877 */ 878 for (n = 0; n < CFG_NUM_THREADS; n++) { 879 tee_mm_entry_t *mm = NULL; 880 vaddr_t sp = 0; 881 size_t num_pages = 0; 882 struct fobj *fobj = NULL; 883 884 /* Find vmem for thread stack and its protection gap */ 885 mm = tee_mm_alloc(&tee_mm_vcore, 886 SMALL_PAGE_SIZE + STACK_THREAD_SIZE); 887 assert(mm); 888 889 /* Claim eventual physical page */ 890 tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm), 891 true); 892 893 num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE - 1; 894 fobj = fobj_locked_paged_alloc(num_pages); 895 896 /* Add the area to the pager */ 897 tee_pager_add_core_area(tee_mm_get_smem(mm) + SMALL_PAGE_SIZE, 898 PAGER_AREA_TYPE_LOCK, fobj); 899 fobj_put(fobj); 900 901 /* init effective stack */ 902 sp = tee_mm_get_smem(mm) + tee_mm_get_bytes(mm); 903 asan_tag_access((void *)tee_mm_get_smem(mm), (void *)sp); 904 if (!thread_init_stack(n, sp)) 905 panic("init stack failed"); 906 } 907 } 908 #else 909 static void init_thread_stacks(void) 910 { 911 size_t n; 912 913 /* Assign the thread stacks */ 914 for (n = 0; n < CFG_NUM_THREADS; n++) { 915 if (!thread_init_stack(n, GET_STACK(stack_thread[n]))) 916 panic("thread_init_stack failed"); 917 } 918 } 919 #endif /*CFG_WITH_PAGER*/ 920 921 static void init_user_kcode(void) 922 { 923 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 924 vaddr_t v = (vaddr_t)thread_excp_vect; 925 vaddr_t ve = (vaddr_t)thread_excp_vect_end; 926 927 thread_user_kcode_va = ROUNDDOWN(v, CORE_MMU_USER_CODE_SIZE); 928 ve = ROUNDUP(ve, CORE_MMU_USER_CODE_SIZE); 929 thread_user_kcode_size = ve - thread_user_kcode_va; 930 931 core_mmu_get_user_va_range(&v, NULL); 932 thread_user_kcode_offset = thread_user_kcode_va - v; 933 934 #if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) 935 /* 936 * When transitioning to EL0 subtract SP with this much to point to 937 * this special kdata page instead. SP is restored by add this much 938 * while transitioning back to EL1. 939 */ 940 v += thread_user_kcode_size; 941 thread_user_kdata_sp_offset = (vaddr_t)thread_core_local - v; 942 #endif 943 #endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 944 } 945 946 void thread_init_threads(void) 947 { 948 size_t n = 0; 949 950 init_thread_stacks(); 951 pgt_init(); 952 953 mutex_lockdep_init(); 954 955 for (n = 0; n < CFG_NUM_THREADS; n++) { 956 TAILQ_INIT(&threads[n].tsd.sess_stack); 957 SLIST_INIT(&threads[n].tsd.pgt_cache); 958 } 959 } 960 961 void thread_clr_thread_core_local(void) 962 { 963 size_t n = 0; 964 965 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) 966 thread_core_local[n].curr_thread = -1; 967 thread_core_local[0].flags |= THREAD_CLF_TMP; 968 } 969 970 void thread_init_primary(const struct thread_handlers *handlers) 971 { 972 init_handlers(handlers); 973 974 /* Initialize canaries around the stacks */ 975 init_canaries(); 976 977 init_user_kcode(); 978 } 979 980 static void init_sec_mon(size_t pos __maybe_unused) 981 { 982 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 983 /* Initialize secure monitor */ 984 sm_init(GET_STACK(stack_tmp[pos])); 985 #endif 986 } 987 988 static uint32_t __maybe_unused get_midr_implementer(uint32_t midr) 989 { 990 return (midr >> MIDR_IMPLEMENTER_SHIFT) & MIDR_IMPLEMENTER_MASK; 991 } 992 993 static uint32_t __maybe_unused get_midr_primary_part(uint32_t midr) 994 { 995 return (midr >> MIDR_PRIMARY_PART_NUM_SHIFT) & 996 MIDR_PRIMARY_PART_NUM_MASK; 997 } 998 999 #ifdef ARM64 1000 static bool probe_workaround_available(void) 1001 { 1002 int32_t r; 1003 1004 r = thread_smc(SMCCC_VERSION, 0, 0, 0); 1005 if (r < 0) 1006 return false; 1007 if (r < 0x10001) /* compare with version 1.1 */ 1008 return false; 1009 1010 /* Version >= 1.1, so SMCCC_ARCH_FEATURES is available */ 1011 r = thread_smc(SMCCC_ARCH_FEATURES, SMCCC_ARCH_WORKAROUND_1, 0, 0); 1012 return r >= 0; 1013 } 1014 1015 static vaddr_t __maybe_unused select_vector(vaddr_t a) 1016 { 1017 if (probe_workaround_available()) { 1018 DMSG("SMCCC_ARCH_WORKAROUND_1 (%#08" PRIx32 ") available", 1019 SMCCC_ARCH_WORKAROUND_1); 1020 DMSG("SMC Workaround for CVE-2017-5715 used"); 1021 return a; 1022 } 1023 1024 DMSG("SMCCC_ARCH_WORKAROUND_1 (%#08" PRIx32 ") unavailable", 1025 SMCCC_ARCH_WORKAROUND_1); 1026 DMSG("SMC Workaround for CVE-2017-5715 not needed (if ARM-TF is up to date)"); 1027 return (vaddr_t)thread_excp_vect; 1028 } 1029 #else 1030 static vaddr_t __maybe_unused select_vector(vaddr_t a) 1031 { 1032 return a; 1033 } 1034 #endif 1035 1036 static vaddr_t get_excp_vect(void) 1037 { 1038 #ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC 1039 uint32_t midr = read_midr(); 1040 1041 if (get_midr_implementer(midr) != MIDR_IMPLEMENTER_ARM) 1042 return (vaddr_t)thread_excp_vect; 1043 1044 switch (get_midr_primary_part(midr)) { 1045 #ifdef ARM32 1046 case CORTEX_A8_PART_NUM: 1047 case CORTEX_A9_PART_NUM: 1048 case CORTEX_A17_PART_NUM: 1049 #endif 1050 case CORTEX_A57_PART_NUM: 1051 case CORTEX_A72_PART_NUM: 1052 case CORTEX_A73_PART_NUM: 1053 case CORTEX_A75_PART_NUM: 1054 return select_vector((vaddr_t)thread_excp_vect_workaround); 1055 #ifdef ARM32 1056 case CORTEX_A15_PART_NUM: 1057 return select_vector((vaddr_t)thread_excp_vect_workaround_a15); 1058 #endif 1059 default: 1060 return (vaddr_t)thread_excp_vect; 1061 } 1062 #endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/ 1063 1064 return (vaddr_t)thread_excp_vect; 1065 } 1066 1067 void thread_init_per_cpu(void) 1068 { 1069 size_t pos = get_core_pos(); 1070 struct thread_core_local *l = thread_get_core_local(); 1071 1072 init_sec_mon(pos); 1073 1074 set_tmp_stack(l, GET_STACK(stack_tmp[pos]) - STACK_TMP_OFFS); 1075 set_abt_stack(l, GET_STACK(stack_abt[pos])); 1076 1077 thread_init_vbar(get_excp_vect()); 1078 1079 #ifdef CFG_FTRACE_SUPPORT 1080 /* 1081 * Enable accesses to frequency register and physical counter 1082 * register in EL0/PL0 required for timestamping during 1083 * function tracing. 1084 */ 1085 write_cntkctl(read_cntkctl() | CNTKCTL_PL0PCTEN); 1086 #endif 1087 } 1088 1089 struct thread_specific_data *thread_get_tsd(void) 1090 { 1091 return &threads[thread_get_id()].tsd; 1092 } 1093 1094 struct thread_ctx_regs *thread_get_ctx_regs(void) 1095 { 1096 struct thread_core_local *l = thread_get_core_local(); 1097 1098 assert(l->curr_thread != -1); 1099 return &threads[l->curr_thread].regs; 1100 } 1101 1102 void thread_set_foreign_intr(bool enable) 1103 { 1104 /* thread_get_core_local() requires foreign interrupts to be disabled */ 1105 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 1106 struct thread_core_local *l; 1107 1108 l = thread_get_core_local(); 1109 1110 assert(l->curr_thread != -1); 1111 1112 if (enable) { 1113 threads[l->curr_thread].flags |= 1114 THREAD_FLAGS_FOREIGN_INTR_ENABLE; 1115 thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR); 1116 } else { 1117 /* 1118 * No need to disable foreign interrupts here since they're 1119 * already disabled above. 1120 */ 1121 threads[l->curr_thread].flags &= 1122 ~THREAD_FLAGS_FOREIGN_INTR_ENABLE; 1123 } 1124 } 1125 1126 void thread_restore_foreign_intr(void) 1127 { 1128 /* thread_get_core_local() requires foreign interrupts to be disabled */ 1129 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 1130 struct thread_core_local *l; 1131 1132 l = thread_get_core_local(); 1133 1134 assert(l->curr_thread != -1); 1135 1136 if (threads[l->curr_thread].flags & THREAD_FLAGS_FOREIGN_INTR_ENABLE) 1137 thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR); 1138 } 1139 1140 #ifdef CFG_WITH_VFP 1141 uint32_t thread_kernel_enable_vfp(void) 1142 { 1143 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 1144 struct thread_ctx *thr = threads + thread_get_id(); 1145 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 1146 1147 assert(!vfp_is_enabled()); 1148 1149 if (!thr->vfp_state.ns_saved) { 1150 vfp_lazy_save_state_final(&thr->vfp_state.ns, 1151 true /*force_save*/); 1152 thr->vfp_state.ns_saved = true; 1153 } else if (thr->vfp_state.sec_lazy_saved && 1154 !thr->vfp_state.sec_saved) { 1155 /* 1156 * This happens when we're handling an abort while the 1157 * thread was using the VFP state. 1158 */ 1159 vfp_lazy_save_state_final(&thr->vfp_state.sec, 1160 false /*!force_save*/); 1161 thr->vfp_state.sec_saved = true; 1162 } else if (tuv && tuv->lazy_saved && !tuv->saved) { 1163 /* 1164 * This can happen either during syscall or abort 1165 * processing (while processing a syscall). 1166 */ 1167 vfp_lazy_save_state_final(&tuv->vfp, false /*!force_save*/); 1168 tuv->saved = true; 1169 } 1170 1171 vfp_enable(); 1172 return exceptions; 1173 } 1174 1175 void thread_kernel_disable_vfp(uint32_t state) 1176 { 1177 uint32_t exceptions; 1178 1179 assert(vfp_is_enabled()); 1180 1181 vfp_disable(); 1182 exceptions = thread_get_exceptions(); 1183 assert(exceptions & THREAD_EXCP_FOREIGN_INTR); 1184 exceptions &= ~THREAD_EXCP_FOREIGN_INTR; 1185 exceptions |= state & THREAD_EXCP_FOREIGN_INTR; 1186 thread_set_exceptions(exceptions); 1187 } 1188 1189 void thread_kernel_save_vfp(void) 1190 { 1191 struct thread_ctx *thr = threads + thread_get_id(); 1192 1193 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1194 if (vfp_is_enabled()) { 1195 vfp_lazy_save_state_init(&thr->vfp_state.sec); 1196 thr->vfp_state.sec_lazy_saved = true; 1197 } 1198 } 1199 1200 void thread_kernel_restore_vfp(void) 1201 { 1202 struct thread_ctx *thr = threads + thread_get_id(); 1203 1204 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1205 assert(!vfp_is_enabled()); 1206 if (thr->vfp_state.sec_lazy_saved) { 1207 vfp_lazy_restore_state(&thr->vfp_state.sec, 1208 thr->vfp_state.sec_saved); 1209 thr->vfp_state.sec_saved = false; 1210 thr->vfp_state.sec_lazy_saved = false; 1211 } 1212 } 1213 1214 void thread_user_enable_vfp(struct thread_user_vfp_state *uvfp) 1215 { 1216 struct thread_ctx *thr = threads + thread_get_id(); 1217 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 1218 1219 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1220 assert(!vfp_is_enabled()); 1221 1222 if (!thr->vfp_state.ns_saved) { 1223 vfp_lazy_save_state_final(&thr->vfp_state.ns, 1224 true /*force_save*/); 1225 thr->vfp_state.ns_saved = true; 1226 } else if (tuv && uvfp != tuv) { 1227 if (tuv->lazy_saved && !tuv->saved) { 1228 vfp_lazy_save_state_final(&tuv->vfp, 1229 false /*!force_save*/); 1230 tuv->saved = true; 1231 } 1232 } 1233 1234 if (uvfp->lazy_saved) 1235 vfp_lazy_restore_state(&uvfp->vfp, uvfp->saved); 1236 uvfp->lazy_saved = false; 1237 uvfp->saved = false; 1238 1239 thr->vfp_state.uvfp = uvfp; 1240 vfp_enable(); 1241 } 1242 1243 void thread_user_save_vfp(void) 1244 { 1245 struct thread_ctx *thr = threads + thread_get_id(); 1246 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 1247 1248 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1249 if (!vfp_is_enabled()) 1250 return; 1251 1252 assert(tuv && !tuv->lazy_saved && !tuv->saved); 1253 vfp_lazy_save_state_init(&tuv->vfp); 1254 tuv->lazy_saved = true; 1255 } 1256 1257 void thread_user_clear_vfp(struct thread_user_vfp_state *uvfp) 1258 { 1259 struct thread_ctx *thr = threads + thread_get_id(); 1260 1261 if (uvfp == thr->vfp_state.uvfp) 1262 thr->vfp_state.uvfp = NULL; 1263 uvfp->lazy_saved = false; 1264 uvfp->saved = false; 1265 } 1266 #endif /*CFG_WITH_VFP*/ 1267 1268 #ifdef ARM32 1269 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr) 1270 { 1271 uint32_t s; 1272 1273 if (!is_32bit) 1274 return false; 1275 1276 s = read_cpsr(); 1277 s &= ~(CPSR_MODE_MASK | CPSR_T | CPSR_IT_MASK1 | CPSR_IT_MASK2); 1278 s |= CPSR_MODE_USR; 1279 if (entry_func & 1) 1280 s |= CPSR_T; 1281 *spsr = s; 1282 return true; 1283 } 1284 #endif 1285 1286 #ifdef ARM64 1287 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr) 1288 { 1289 uint32_t s; 1290 1291 if (is_32bit) { 1292 s = read_daif() & (SPSR_32_AIF_MASK << SPSR_32_AIF_SHIFT); 1293 s |= SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT; 1294 s |= (entry_func & SPSR_32_T_MASK) << SPSR_32_T_SHIFT; 1295 } else { 1296 s = read_daif() & (SPSR_64_DAIF_MASK << SPSR_64_DAIF_SHIFT); 1297 } 1298 1299 *spsr = s; 1300 return true; 1301 } 1302 #endif 1303 1304 static void set_ctx_regs(struct thread_ctx_regs *regs, unsigned long a0, 1305 unsigned long a1, unsigned long a2, unsigned long a3, 1306 unsigned long user_sp, unsigned long entry_func, 1307 uint32_t spsr) 1308 { 1309 /* 1310 * First clear all registers to avoid leaking information from 1311 * other TAs or even the Core itself. 1312 */ 1313 *regs = (struct thread_ctx_regs){ }; 1314 #ifdef ARM32 1315 regs->r0 = a0; 1316 regs->r1 = a1; 1317 regs->r2 = a2; 1318 regs->r3 = a3; 1319 regs->usr_sp = user_sp; 1320 regs->pc = entry_func; 1321 regs->cpsr = spsr; 1322 #endif 1323 #ifdef ARM64 1324 regs->x[0] = a0; 1325 regs->x[1] = a1; 1326 regs->x[2] = a2; 1327 regs->x[3] = a3; 1328 regs->sp = user_sp; 1329 regs->pc = entry_func; 1330 regs->cpsr = spsr; 1331 regs->x[13] = user_sp; /* Used when running TA in Aarch32 */ 1332 regs->sp = user_sp; /* Used when running TA in Aarch64 */ 1333 /* Set frame pointer (user stack can't be unwound past this point) */ 1334 regs->x[29] = 0; 1335 #endif 1336 } 1337 1338 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1, 1339 unsigned long a2, unsigned long a3, unsigned long user_sp, 1340 unsigned long entry_func, bool is_32bit, 1341 uint32_t *exit_status0, uint32_t *exit_status1) 1342 { 1343 uint32_t spsr = 0; 1344 uint32_t exceptions = 0; 1345 uint32_t rc = 0; 1346 struct thread_ctx_regs *regs = NULL; 1347 1348 tee_ta_update_session_utime_resume(); 1349 1350 /* Derive SPSR from current CPSR/PSTATE readout. */ 1351 if (!get_spsr(is_32bit, entry_func, &spsr)) { 1352 *exit_status0 = 1; /* panic */ 1353 *exit_status1 = 0xbadbadba; 1354 return 0; 1355 } 1356 1357 exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 1358 /* 1359 * We're using the per thread location of saved context registers 1360 * for temporary storage. Now that exceptions are masked they will 1361 * not be used for any thing else until they are eventually 1362 * unmasked when user mode has been entered. 1363 */ 1364 regs = thread_get_ctx_regs(); 1365 set_ctx_regs(regs, a0, a1, a2, a3, user_sp, entry_func, spsr); 1366 rc = __thread_enter_user_mode(regs, exit_status0, exit_status1); 1367 thread_unmask_exceptions(exceptions); 1368 return rc; 1369 } 1370 1371 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 1372 void thread_get_user_kcode(struct mobj **mobj, size_t *offset, 1373 vaddr_t *va, size_t *sz) 1374 { 1375 core_mmu_get_user_va_range(va, NULL); 1376 *mobj = mobj_tee_ram; 1377 *offset = thread_user_kcode_va - VCORE_START_VA; 1378 *sz = thread_user_kcode_size; 1379 } 1380 #endif 1381 1382 #if defined(CFG_CORE_UNMAP_CORE_AT_EL0) && \ 1383 defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) 1384 void thread_get_user_kdata(struct mobj **mobj, size_t *offset, 1385 vaddr_t *va, size_t *sz) 1386 { 1387 vaddr_t v; 1388 1389 core_mmu_get_user_va_range(&v, NULL); 1390 *va = v + thread_user_kcode_size; 1391 *mobj = mobj_tee_ram; 1392 *offset = (vaddr_t)thread_user_kdata_page - VCORE_START_VA; 1393 *sz = sizeof(thread_user_kdata_page); 1394 } 1395 #endif 1396 1397 static void setup_unwind_user_mode(struct thread_svc_regs *regs) 1398 { 1399 #ifdef ARM32 1400 regs->lr = (uintptr_t)thread_unwind_user_mode; 1401 regs->spsr = read_cpsr(); 1402 #endif 1403 #ifdef ARM64 1404 regs->elr = (uintptr_t)thread_unwind_user_mode; 1405 regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, 0); 1406 regs->spsr |= read_daif(); 1407 /* 1408 * Regs is the value of stack pointer before calling the SVC 1409 * handler. By the addition matches for the reserved space at the 1410 * beginning of el0_sync_svc(). This prepares the stack when 1411 * returning to thread_unwind_user_mode instead of a normal 1412 * exception return. 1413 */ 1414 regs->sp_el0 = (uint64_t)(regs + 1); 1415 #endif 1416 } 1417 1418 /* 1419 * Note: this function is weak just to make it possible to exclude it from 1420 * the unpaged area. 1421 */ 1422 void __weak thread_svc_handler(struct thread_svc_regs *regs) 1423 { 1424 struct tee_ta_session *sess = NULL; 1425 uint32_t state = 0; 1426 1427 /* Enable native interrupts */ 1428 state = thread_get_exceptions(); 1429 thread_unmask_exceptions(state & ~THREAD_EXCP_NATIVE_INTR); 1430 1431 thread_user_save_vfp(); 1432 1433 /* TA has just entered kernel mode */ 1434 tee_ta_update_session_utime_suspend(); 1435 1436 /* Restore foreign interrupts which are disabled on exception entry */ 1437 thread_restore_foreign_intr(); 1438 1439 tee_ta_get_current_session(&sess); 1440 assert(sess && sess->ctx->ops && sess->ctx->ops->handle_svc); 1441 if (sess->ctx->ops->handle_svc(regs)) { 1442 /* We're about to switch back to user mode */ 1443 tee_ta_update_session_utime_resume(); 1444 } else { 1445 /* We're returning from __thread_enter_user_mode() */ 1446 setup_unwind_user_mode(regs); 1447 } 1448 } 1449