1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 */ 6 7 #include <platform_config.h> 8 9 #include <arm.h> 10 #include <assert.h> 11 #include <io.h> 12 #include <keep.h> 13 #include <kernel/asan.h> 14 #include <kernel/linker.h> 15 #include <kernel/lockdep.h> 16 #include <kernel/misc.h> 17 #include <kernel/panic.h> 18 #include <kernel/spinlock.h> 19 #include <kernel/tee_ta_manager.h> 20 #include <kernel/thread_defs.h> 21 #include <kernel/thread.h> 22 #include <kernel/virtualization.h> 23 #include <mm/core_memprot.h> 24 #include <mm/mobj.h> 25 #include <mm/tee_mm.h> 26 #include <mm/tee_mmu.h> 27 #include <mm/tee_pager.h> 28 #include <smccc.h> 29 #include <sm/sm.h> 30 #include <trace.h> 31 #include <util.h> 32 33 #include "thread_private.h" 34 35 #ifdef CFG_WITH_ARM_TRUSTED_FW 36 #define STACK_TMP_OFFS 0 37 #else 38 #define STACK_TMP_OFFS SM_STACK_TMP_RESERVE_SIZE 39 #endif 40 41 42 #ifdef ARM32 43 #ifdef CFG_CORE_SANITIZE_KADDRESS 44 #define STACK_TMP_SIZE (3072 + STACK_TMP_OFFS) 45 #else 46 #define STACK_TMP_SIZE (2048 + STACK_TMP_OFFS) 47 #endif 48 #define STACK_THREAD_SIZE 8192 49 50 #if defined(CFG_CORE_SANITIZE_KADDRESS) || defined(__clang__) 51 #define STACK_ABT_SIZE 3072 52 #else 53 #define STACK_ABT_SIZE 2048 54 #endif 55 56 #endif /*ARM32*/ 57 58 #ifdef ARM64 59 #define STACK_TMP_SIZE (2048 + STACK_TMP_OFFS) 60 #define STACK_THREAD_SIZE 8192 61 62 #if TRACE_LEVEL > 0 63 #define STACK_ABT_SIZE 3072 64 #else 65 #define STACK_ABT_SIZE 1024 66 #endif 67 #endif /*ARM64*/ 68 69 struct thread_ctx threads[CFG_NUM_THREADS]; 70 71 struct thread_core_local thread_core_local[CFG_TEE_CORE_NB_CORE] __nex_bss; 72 73 #ifdef CFG_WITH_STACK_CANARIES 74 #ifdef ARM32 75 #define STACK_CANARY_SIZE (4 * sizeof(uint32_t)) 76 #endif 77 #ifdef ARM64 78 #define STACK_CANARY_SIZE (8 * sizeof(uint32_t)) 79 #endif 80 #define START_CANARY_VALUE 0xdededede 81 #define END_CANARY_VALUE 0xabababab 82 #define GET_START_CANARY(name, stack_num) name[stack_num][0] 83 #define GET_END_CANARY(name, stack_num) \ 84 name[stack_num][sizeof(name[stack_num]) / sizeof(uint32_t) - 1] 85 #else 86 #define STACK_CANARY_SIZE 0 87 #endif 88 89 #define DECLARE_STACK(name, num_stacks, stack_size, linkage) \ 90 linkage uint32_t name[num_stacks] \ 91 [ROUNDUP(stack_size + STACK_CANARY_SIZE, STACK_ALIGNMENT) / \ 92 sizeof(uint32_t)] \ 93 __attribute__((section(".nozi_stack." # name), \ 94 aligned(STACK_ALIGNMENT))) 95 96 #define STACK_SIZE(stack) (sizeof(stack) - STACK_CANARY_SIZE / 2) 97 98 #define GET_STACK(stack) \ 99 ((vaddr_t)(stack) + STACK_SIZE(stack)) 100 101 DECLARE_STACK(stack_tmp, CFG_TEE_CORE_NB_CORE, STACK_TMP_SIZE, static); 102 DECLARE_STACK(stack_abt, CFG_TEE_CORE_NB_CORE, STACK_ABT_SIZE, static); 103 #ifndef CFG_WITH_PAGER 104 DECLARE_STACK(stack_thread, CFG_NUM_THREADS, STACK_THREAD_SIZE, static); 105 #endif 106 107 const void *stack_tmp_export __section(".identity_map.stack_tmp_export") = 108 (uint8_t *)stack_tmp + sizeof(stack_tmp[0]) - 109 (STACK_TMP_OFFS + STACK_CANARY_SIZE / 2); 110 const uint32_t stack_tmp_stride __section(".identity_map.stack_tmp_stride") = 111 sizeof(stack_tmp[0]); 112 113 /* 114 * These stack setup info are required by secondary boot cores before they 115 * each locally enable the pager (the mmu). Hence kept in pager sections. 116 */ 117 KEEP_PAGER(stack_tmp_export); 118 KEEP_PAGER(stack_tmp_stride); 119 120 thread_pm_handler_t thread_cpu_on_handler_ptr __nex_bss; 121 thread_pm_handler_t thread_cpu_off_handler_ptr __nex_bss; 122 thread_pm_handler_t thread_cpu_suspend_handler_ptr __nex_bss; 123 thread_pm_handler_t thread_cpu_resume_handler_ptr __nex_bss; 124 thread_pm_handler_t thread_system_off_handler_ptr __nex_bss; 125 thread_pm_handler_t thread_system_reset_handler_ptr __nex_bss; 126 127 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 128 static vaddr_t thread_user_kcode_va __nex_bss; 129 long thread_user_kcode_offset __nex_bss; 130 static size_t thread_user_kcode_size __nex_bss; 131 #endif 132 133 #if defined(CFG_CORE_UNMAP_CORE_AT_EL0) && \ 134 defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) 135 long thread_user_kdata_sp_offset __nex_bss; 136 static uint8_t thread_user_kdata_page[ 137 ROUNDUP(sizeof(thread_core_local), SMALL_PAGE_SIZE)] 138 __aligned(SMALL_PAGE_SIZE) 139 #ifndef CFG_VIRTUALIZATION 140 __section(".nozi.kdata_page"); 141 #else 142 __section(".nex_nozi.kdata_page"); 143 #endif 144 #endif 145 146 static unsigned int thread_global_lock __nex_bss = SPINLOCK_UNLOCK; 147 148 static void init_canaries(void) 149 { 150 #ifdef CFG_WITH_STACK_CANARIES 151 size_t n; 152 #define INIT_CANARY(name) \ 153 for (n = 0; n < ARRAY_SIZE(name); n++) { \ 154 uint32_t *start_canary = &GET_START_CANARY(name, n); \ 155 uint32_t *end_canary = &GET_END_CANARY(name, n); \ 156 \ 157 *start_canary = START_CANARY_VALUE; \ 158 *end_canary = END_CANARY_VALUE; \ 159 DMSG("#Stack canaries for %s[%zu] with top at %p", \ 160 #name, n, (void *)(end_canary - 1)); \ 161 DMSG("watch *%p", (void *)end_canary); \ 162 } 163 164 INIT_CANARY(stack_tmp); 165 INIT_CANARY(stack_abt); 166 #if !defined(CFG_WITH_PAGER) && !defined(CFG_VIRTUALIZATION) 167 INIT_CANARY(stack_thread); 168 #endif 169 #endif/*CFG_WITH_STACK_CANARIES*/ 170 } 171 172 #define CANARY_DIED(stack, loc, n) \ 173 do { \ 174 EMSG_RAW("Dead canary at %s of '%s[%zu]'", #loc, #stack, n); \ 175 panic(); \ 176 } while (0) 177 178 void thread_check_canaries(void) 179 { 180 #ifdef CFG_WITH_STACK_CANARIES 181 size_t n; 182 183 for (n = 0; n < ARRAY_SIZE(stack_tmp); n++) { 184 if (GET_START_CANARY(stack_tmp, n) != START_CANARY_VALUE) 185 CANARY_DIED(stack_tmp, start, n); 186 if (GET_END_CANARY(stack_tmp, n) != END_CANARY_VALUE) 187 CANARY_DIED(stack_tmp, end, n); 188 } 189 190 for (n = 0; n < ARRAY_SIZE(stack_abt); n++) { 191 if (GET_START_CANARY(stack_abt, n) != START_CANARY_VALUE) 192 CANARY_DIED(stack_abt, start, n); 193 if (GET_END_CANARY(stack_abt, n) != END_CANARY_VALUE) 194 CANARY_DIED(stack_abt, end, n); 195 196 } 197 #if !defined(CFG_WITH_PAGER) && !defined(CFG_VIRTUALIZATION) 198 for (n = 0; n < ARRAY_SIZE(stack_thread); n++) { 199 if (GET_START_CANARY(stack_thread, n) != START_CANARY_VALUE) 200 CANARY_DIED(stack_thread, start, n); 201 if (GET_END_CANARY(stack_thread, n) != END_CANARY_VALUE) 202 CANARY_DIED(stack_thread, end, n); 203 } 204 #endif 205 #endif/*CFG_WITH_STACK_CANARIES*/ 206 } 207 208 void thread_lock_global(void) 209 { 210 cpu_spin_lock(&thread_global_lock); 211 } 212 213 void thread_unlock_global(void) 214 { 215 cpu_spin_unlock(&thread_global_lock); 216 } 217 218 #ifdef ARM32 219 uint32_t thread_get_exceptions(void) 220 { 221 uint32_t cpsr = read_cpsr(); 222 223 return (cpsr >> CPSR_F_SHIFT) & THREAD_EXCP_ALL; 224 } 225 226 void thread_set_exceptions(uint32_t exceptions) 227 { 228 uint32_t cpsr = read_cpsr(); 229 230 /* Foreign interrupts must not be unmasked while holding a spinlock */ 231 if (!(exceptions & THREAD_EXCP_FOREIGN_INTR)) 232 assert_have_no_spinlock(); 233 234 cpsr &= ~(THREAD_EXCP_ALL << CPSR_F_SHIFT); 235 cpsr |= ((exceptions & THREAD_EXCP_ALL) << CPSR_F_SHIFT); 236 write_cpsr(cpsr); 237 } 238 #endif /*ARM32*/ 239 240 #ifdef ARM64 241 uint32_t thread_get_exceptions(void) 242 { 243 uint32_t daif = read_daif(); 244 245 return (daif >> DAIF_F_SHIFT) & THREAD_EXCP_ALL; 246 } 247 248 void thread_set_exceptions(uint32_t exceptions) 249 { 250 uint32_t daif = read_daif(); 251 252 /* Foreign interrupts must not be unmasked while holding a spinlock */ 253 if (!(exceptions & THREAD_EXCP_FOREIGN_INTR)) 254 assert_have_no_spinlock(); 255 256 daif &= ~(THREAD_EXCP_ALL << DAIF_F_SHIFT); 257 daif |= ((exceptions & THREAD_EXCP_ALL) << DAIF_F_SHIFT); 258 write_daif(daif); 259 } 260 #endif /*ARM64*/ 261 262 uint32_t thread_mask_exceptions(uint32_t exceptions) 263 { 264 uint32_t state = thread_get_exceptions(); 265 266 thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL)); 267 return state; 268 } 269 270 void thread_unmask_exceptions(uint32_t state) 271 { 272 thread_set_exceptions(state & THREAD_EXCP_ALL); 273 } 274 275 276 struct thread_core_local *thread_get_core_local(void) 277 { 278 uint32_t cpu_id = get_core_pos(); 279 280 /* 281 * Foreign interrupts must be disabled before playing with core_local 282 * since we otherwise may be rescheduled to a different core in the 283 * middle of this function. 284 */ 285 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 286 287 assert(cpu_id < CFG_TEE_CORE_NB_CORE); 288 return &thread_core_local[cpu_id]; 289 } 290 291 static void thread_lazy_save_ns_vfp(void) 292 { 293 #ifdef CFG_WITH_VFP 294 struct thread_ctx *thr = threads + thread_get_id(); 295 296 thr->vfp_state.ns_saved = false; 297 vfp_lazy_save_state_init(&thr->vfp_state.ns); 298 #endif /*CFG_WITH_VFP*/ 299 } 300 301 static void thread_lazy_restore_ns_vfp(void) 302 { 303 #ifdef CFG_WITH_VFP 304 struct thread_ctx *thr = threads + thread_get_id(); 305 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 306 307 assert(!thr->vfp_state.sec_lazy_saved && !thr->vfp_state.sec_saved); 308 309 if (tuv && tuv->lazy_saved && !tuv->saved) { 310 vfp_lazy_save_state_final(&tuv->vfp, false /*!force_save*/); 311 tuv->saved = true; 312 } 313 314 vfp_lazy_restore_state(&thr->vfp_state.ns, thr->vfp_state.ns_saved); 315 thr->vfp_state.ns_saved = false; 316 #endif /*CFG_WITH_VFP*/ 317 } 318 319 #ifdef ARM32 320 static void init_regs(struct thread_ctx *thread, uint32_t a0, uint32_t a1, 321 uint32_t a2, uint32_t a3) 322 { 323 thread->regs.pc = (uint32_t)thread_std_smc_entry; 324 325 /* 326 * Stdcalls starts in SVC mode with masked foreign interrupts, masked 327 * Asynchronous abort and unmasked native interrupts. 328 */ 329 thread->regs.cpsr = read_cpsr() & ARM32_CPSR_E; 330 thread->regs.cpsr |= CPSR_MODE_SVC | CPSR_A | 331 (THREAD_EXCP_FOREIGN_INTR << ARM32_CPSR_F_SHIFT); 332 /* Enable thumb mode if it's a thumb instruction */ 333 if (thread->regs.pc & 1) 334 thread->regs.cpsr |= CPSR_T; 335 /* Reinitialize stack pointer */ 336 thread->regs.svc_sp = thread->stack_va_end; 337 338 /* 339 * Copy arguments into context. This will make the 340 * arguments appear in r0-r7 when thread is started. 341 */ 342 thread->regs.r0 = a0; 343 thread->regs.r1 = a1; 344 thread->regs.r2 = a2; 345 thread->regs.r3 = a3; 346 thread->regs.r4 = 0; 347 thread->regs.r5 = 0; 348 thread->regs.r6 = 0; 349 thread->regs.r7 = 0; 350 } 351 #endif /*ARM32*/ 352 353 #ifdef ARM64 354 static void init_regs(struct thread_ctx *thread, uint32_t a0, uint32_t a1, 355 uint32_t a2, uint32_t a3) 356 { 357 thread->regs.pc = (uint64_t)thread_std_smc_entry; 358 359 /* 360 * Stdcalls starts in SVC mode with masked foreign interrupts, masked 361 * Asynchronous abort and unmasked native interrupts. 362 */ 363 thread->regs.cpsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, 364 THREAD_EXCP_FOREIGN_INTR | DAIFBIT_ABT); 365 /* Reinitialize stack pointer */ 366 thread->regs.sp = thread->stack_va_end; 367 368 /* 369 * Copy arguments into context. This will make the 370 * arguments appear in x0-x7 when thread is started. 371 */ 372 thread->regs.x[0] = a0; 373 thread->regs.x[1] = a1; 374 thread->regs.x[2] = a2; 375 thread->regs.x[3] = a3; 376 thread->regs.x[4] = 0; 377 thread->regs.x[5] = 0; 378 thread->regs.x[6] = 0; 379 thread->regs.x[7] = 0; 380 381 /* Set up frame pointer as per the Aarch64 AAPCS */ 382 thread->regs.x[29] = 0; 383 } 384 #endif /*ARM64*/ 385 386 void thread_init_boot_thread(void) 387 { 388 struct thread_core_local *l = thread_get_core_local(); 389 390 thread_init_threads(); 391 392 l->curr_thread = 0; 393 threads[0].state = THREAD_STATE_ACTIVE; 394 } 395 396 void thread_clr_boot_thread(void) 397 { 398 struct thread_core_local *l = thread_get_core_local(); 399 400 assert(l->curr_thread >= 0 && l->curr_thread < CFG_NUM_THREADS); 401 assert(threads[l->curr_thread].state == THREAD_STATE_ACTIVE); 402 threads[l->curr_thread].state = THREAD_STATE_FREE; 403 l->curr_thread = -1; 404 } 405 406 void thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3) 407 { 408 size_t n; 409 struct thread_core_local *l = thread_get_core_local(); 410 bool found_thread = false; 411 412 assert(l->curr_thread == -1); 413 414 thread_lock_global(); 415 416 for (n = 0; n < CFG_NUM_THREADS; n++) { 417 if (threads[n].state == THREAD_STATE_FREE) { 418 threads[n].state = THREAD_STATE_ACTIVE; 419 found_thread = true; 420 break; 421 } 422 } 423 424 thread_unlock_global(); 425 426 if (!found_thread) 427 return; 428 429 l->curr_thread = n; 430 431 threads[n].flags = 0; 432 init_regs(threads + n, a0, a1, a2, a3); 433 434 thread_lazy_save_ns_vfp(); 435 thread_resume(&threads[n].regs); 436 /*NOTREACHED*/ 437 panic(); 438 } 439 440 #ifdef ARM32 441 static void copy_a0_to_a3(struct thread_ctx_regs *regs, uint32_t a0, 442 uint32_t a1, uint32_t a2, uint32_t a3) 443 { 444 /* 445 * Update returned values from RPC, values will appear in 446 * r0-r3 when thread is resumed. 447 */ 448 regs->r0 = a0; 449 regs->r1 = a1; 450 regs->r2 = a2; 451 regs->r3 = a3; 452 } 453 #endif /*ARM32*/ 454 455 #ifdef ARM64 456 static void copy_a0_to_a3(struct thread_ctx_regs *regs, uint32_t a0, 457 uint32_t a1, uint32_t a2, uint32_t a3) 458 { 459 /* 460 * Update returned values from RPC, values will appear in 461 * x0-x3 when thread is resumed. 462 */ 463 regs->x[0] = a0; 464 regs->x[1] = a1; 465 regs->x[2] = a2; 466 regs->x[3] = a3; 467 } 468 #endif /*ARM64*/ 469 470 #ifdef ARM32 471 static bool is_from_user(uint32_t cpsr) 472 { 473 return (cpsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR; 474 } 475 #endif 476 477 #ifdef ARM64 478 static bool is_from_user(uint32_t cpsr) 479 { 480 if (cpsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT)) 481 return true; 482 if (((cpsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) == 483 SPSR_64_MODE_EL0) 484 return true; 485 return false; 486 } 487 #endif 488 489 #ifdef CFG_SYSCALL_FTRACE 490 static void __noprof ftrace_suspend(void) 491 { 492 struct tee_ta_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack); 493 494 if (!s) 495 return; 496 497 if (s->fbuf) 498 s->fbuf->syscall_trace_suspended = true; 499 } 500 501 static void __noprof ftrace_resume(void) 502 { 503 struct tee_ta_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack); 504 505 if (!s) 506 return; 507 508 if (s->fbuf) 509 s->fbuf->syscall_trace_suspended = false; 510 } 511 #else 512 static void __noprof ftrace_suspend(void) 513 { 514 } 515 516 static void __noprof ftrace_resume(void) 517 { 518 } 519 #endif 520 521 static bool is_user_mode(struct thread_ctx_regs *regs) 522 { 523 return is_from_user((uint32_t)regs->cpsr); 524 } 525 526 void thread_resume_from_rpc(uint32_t thread_id, uint32_t a0, uint32_t a1, 527 uint32_t a2, uint32_t a3) 528 { 529 size_t n = thread_id; 530 struct thread_core_local *l = thread_get_core_local(); 531 bool found_thread = false; 532 533 assert(l->curr_thread == -1); 534 535 thread_lock_global(); 536 537 if (n < CFG_NUM_THREADS && threads[n].state == THREAD_STATE_SUSPENDED) { 538 threads[n].state = THREAD_STATE_ACTIVE; 539 found_thread = true; 540 } 541 542 thread_unlock_global(); 543 544 if (!found_thread) 545 return; 546 547 l->curr_thread = n; 548 549 if (threads[n].have_user_map) { 550 core_mmu_set_user_map(&threads[n].user_map); 551 if (threads[n].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR) 552 tee_ta_ftrace_update_times_resume(); 553 } 554 555 if (is_user_mode(&threads[n].regs)) 556 tee_ta_update_session_utime_resume(); 557 558 /* 559 * Return from RPC to request service of a foreign interrupt must not 560 * get parameters from non-secure world. 561 */ 562 if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) { 563 copy_a0_to_a3(&threads[n].regs, a0, a1, a2, a3); 564 threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN; 565 } 566 567 thread_lazy_save_ns_vfp(); 568 569 if (threads[n].have_user_map) 570 ftrace_resume(); 571 572 thread_resume(&threads[n].regs); 573 /*NOTREACHED*/ 574 panic(); 575 } 576 577 void *thread_get_tmp_sp(void) 578 { 579 struct thread_core_local *l = thread_get_core_local(); 580 581 return (void *)l->tmp_stack_va_end; 582 } 583 584 #ifdef ARM64 585 vaddr_t thread_get_saved_thread_sp(void) 586 { 587 struct thread_core_local *l = thread_get_core_local(); 588 int ct = l->curr_thread; 589 590 assert(ct != -1); 591 return threads[ct].kern_sp; 592 } 593 #endif /*ARM64*/ 594 595 vaddr_t thread_stack_start(void) 596 { 597 struct thread_ctx *thr; 598 int ct = thread_get_id_may_fail(); 599 600 if (ct == -1) 601 return 0; 602 603 thr = threads + ct; 604 return thr->stack_va_end - STACK_THREAD_SIZE; 605 } 606 607 size_t thread_stack_size(void) 608 { 609 return STACK_THREAD_SIZE; 610 } 611 612 bool thread_is_from_abort_mode(void) 613 { 614 struct thread_core_local *l = thread_get_core_local(); 615 616 return (l->flags >> THREAD_CLF_SAVED_SHIFT) & THREAD_CLF_ABORT; 617 } 618 619 #ifdef ARM32 620 bool thread_is_in_normal_mode(void) 621 { 622 return (read_cpsr() & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_SVC; 623 } 624 #endif 625 626 #ifdef ARM64 627 bool thread_is_in_normal_mode(void) 628 { 629 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 630 struct thread_core_local *l = thread_get_core_local(); 631 bool ret; 632 633 /* If any bit in l->flags is set we're handling some exception. */ 634 ret = !l->flags; 635 thread_unmask_exceptions(exceptions); 636 637 return ret; 638 } 639 #endif 640 641 void thread_state_free(void) 642 { 643 struct thread_core_local *l = thread_get_core_local(); 644 int ct = l->curr_thread; 645 646 assert(ct != -1); 647 648 thread_lazy_restore_ns_vfp(); 649 tee_pager_release_phys( 650 (void *)(threads[ct].stack_va_end - STACK_THREAD_SIZE), 651 STACK_THREAD_SIZE); 652 653 thread_lock_global(); 654 655 assert(threads[ct].state == THREAD_STATE_ACTIVE); 656 threads[ct].state = THREAD_STATE_FREE; 657 threads[ct].flags = 0; 658 l->curr_thread = -1; 659 660 #ifdef CFG_VIRTUALIZATION 661 virt_unset_guest(); 662 #endif 663 thread_unlock_global(); 664 } 665 666 #ifdef CFG_WITH_PAGER 667 static void release_unused_kernel_stack(struct thread_ctx *thr, 668 uint32_t cpsr __maybe_unused) 669 { 670 #ifdef ARM64 671 /* 672 * If we're from user mode then thr->regs.sp is the saved user 673 * stack pointer and thr->kern_sp holds the last kernel stack 674 * pointer. But if we're from kernel mode then thr->kern_sp isn't 675 * up to date so we need to read from thr->regs.sp instead. 676 */ 677 vaddr_t sp = is_from_user(cpsr) ? thr->kern_sp : thr->regs.sp; 678 #else 679 vaddr_t sp = thr->regs.svc_sp; 680 #endif 681 vaddr_t base = thr->stack_va_end - STACK_THREAD_SIZE; 682 size_t len = sp - base; 683 684 tee_pager_release_phys((void *)base, len); 685 } 686 #else 687 static void release_unused_kernel_stack(struct thread_ctx *thr __unused, 688 uint32_t cpsr __unused) 689 { 690 } 691 #endif 692 693 int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc) 694 { 695 struct thread_core_local *l = thread_get_core_local(); 696 int ct = l->curr_thread; 697 698 assert(ct != -1); 699 700 if (core_mmu_user_mapping_is_active()) 701 ftrace_suspend(); 702 703 thread_check_canaries(); 704 705 release_unused_kernel_stack(threads + ct, cpsr); 706 707 if (is_from_user(cpsr)) { 708 thread_user_save_vfp(); 709 tee_ta_update_session_utime_suspend(); 710 tee_ta_gprof_sample_pc(pc); 711 } 712 thread_lazy_restore_ns_vfp(); 713 714 thread_lock_global(); 715 716 assert(threads[ct].state == THREAD_STATE_ACTIVE); 717 threads[ct].flags |= flags; 718 threads[ct].regs.cpsr = cpsr; 719 threads[ct].regs.pc = pc; 720 threads[ct].state = THREAD_STATE_SUSPENDED; 721 722 threads[ct].have_user_map = core_mmu_user_mapping_is_active(); 723 if (threads[ct].have_user_map) { 724 if (threads[ct].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR) 725 tee_ta_ftrace_update_times_suspend(); 726 core_mmu_get_user_map(&threads[ct].user_map); 727 core_mmu_set_user_map(NULL); 728 } 729 730 l->curr_thread = -1; 731 732 #ifdef CFG_VIRTUALIZATION 733 virt_unset_guest(); 734 #endif 735 736 thread_unlock_global(); 737 738 return ct; 739 } 740 741 #ifdef ARM32 742 static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp) 743 { 744 l->tmp_stack_va_end = sp; 745 thread_set_irq_sp(sp); 746 thread_set_fiq_sp(sp); 747 } 748 749 static void set_abt_stack(struct thread_core_local *l, vaddr_t sp) 750 { 751 l->abt_stack_va_end = sp; 752 thread_set_abt_sp((vaddr_t)l); 753 thread_set_und_sp((vaddr_t)l); 754 } 755 #endif /*ARM32*/ 756 757 #ifdef ARM64 758 static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp) 759 { 760 /* 761 * We're already using the tmp stack when this function is called 762 * so there's no need to assign it to any stack pointer. However, 763 * we'll need to restore it at different times so store it here. 764 */ 765 l->tmp_stack_va_end = sp; 766 } 767 768 static void set_abt_stack(struct thread_core_local *l, vaddr_t sp) 769 { 770 l->abt_stack_va_end = sp; 771 } 772 #endif /*ARM64*/ 773 774 bool thread_init_stack(uint32_t thread_id, vaddr_t sp) 775 { 776 if (thread_id >= CFG_NUM_THREADS) 777 return false; 778 threads[thread_id].stack_va_end = sp; 779 return true; 780 } 781 782 int thread_get_id_may_fail(void) 783 { 784 /* 785 * thread_get_core_local() requires foreign interrupts to be disabled 786 */ 787 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 788 struct thread_core_local *l = thread_get_core_local(); 789 int ct = l->curr_thread; 790 791 thread_unmask_exceptions(exceptions); 792 return ct; 793 } 794 795 int thread_get_id(void) 796 { 797 int ct = thread_get_id_may_fail(); 798 799 assert(ct >= 0 && ct < CFG_NUM_THREADS); 800 return ct; 801 } 802 803 static void init_handlers(const struct thread_handlers *handlers) 804 { 805 thread_cpu_on_handler_ptr = handlers->cpu_on; 806 thread_cpu_off_handler_ptr = handlers->cpu_off; 807 thread_cpu_suspend_handler_ptr = handlers->cpu_suspend; 808 thread_cpu_resume_handler_ptr = handlers->cpu_resume; 809 thread_system_off_handler_ptr = handlers->system_off; 810 thread_system_reset_handler_ptr = handlers->system_reset; 811 } 812 813 #ifdef CFG_WITH_PAGER 814 static void init_thread_stacks(void) 815 { 816 size_t n = 0; 817 818 /* 819 * Allocate virtual memory for thread stacks. 820 */ 821 for (n = 0; n < CFG_NUM_THREADS; n++) { 822 tee_mm_entry_t *mm = NULL; 823 vaddr_t sp = 0; 824 size_t num_pages = 0; 825 struct fobj *fobj = NULL; 826 827 /* Find vmem for thread stack and its protection gap */ 828 mm = tee_mm_alloc(&tee_mm_vcore, 829 SMALL_PAGE_SIZE + STACK_THREAD_SIZE); 830 assert(mm); 831 832 /* Claim eventual physical page */ 833 tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm), 834 true); 835 836 num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE - 1; 837 fobj = fobj_locked_paged_alloc(num_pages); 838 839 /* Add the area to the pager */ 840 tee_pager_add_core_area(tee_mm_get_smem(mm) + SMALL_PAGE_SIZE, 841 PAGER_AREA_TYPE_LOCK, fobj); 842 fobj_put(fobj); 843 844 /* init effective stack */ 845 sp = tee_mm_get_smem(mm) + tee_mm_get_bytes(mm); 846 asan_tag_access((void *)tee_mm_get_smem(mm), (void *)sp); 847 if (!thread_init_stack(n, sp)) 848 panic("init stack failed"); 849 } 850 } 851 #else 852 static void init_thread_stacks(void) 853 { 854 size_t n; 855 856 /* Assign the thread stacks */ 857 for (n = 0; n < CFG_NUM_THREADS; n++) { 858 if (!thread_init_stack(n, GET_STACK(stack_thread[n]))) 859 panic("thread_init_stack failed"); 860 } 861 } 862 #endif /*CFG_WITH_PAGER*/ 863 864 static void init_user_kcode(void) 865 { 866 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 867 vaddr_t v = (vaddr_t)thread_excp_vect; 868 vaddr_t ve = (vaddr_t)thread_excp_vect_end; 869 870 thread_user_kcode_va = ROUNDDOWN(v, CORE_MMU_USER_CODE_SIZE); 871 ve = ROUNDUP(ve, CORE_MMU_USER_CODE_SIZE); 872 thread_user_kcode_size = ve - thread_user_kcode_va; 873 874 core_mmu_get_user_va_range(&v, NULL); 875 thread_user_kcode_offset = thread_user_kcode_va - v; 876 877 #if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) 878 /* 879 * When transitioning to EL0 subtract SP with this much to point to 880 * this special kdata page instead. SP is restored by add this much 881 * while transitioning back to EL1. 882 */ 883 v += thread_user_kcode_size; 884 thread_user_kdata_sp_offset = (vaddr_t)thread_core_local - v; 885 #endif 886 #endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 887 } 888 889 void thread_init_threads(void) 890 { 891 size_t n; 892 893 init_thread_stacks(); 894 pgt_init(); 895 896 mutex_lockdep_init(); 897 898 for (n = 0; n < CFG_NUM_THREADS; n++) { 899 TAILQ_INIT(&threads[n].tsd.sess_stack); 900 SLIST_INIT(&threads[n].tsd.pgt_cache); 901 } 902 903 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) 904 thread_core_local[n].curr_thread = -1; 905 } 906 907 void thread_init_primary(const struct thread_handlers *handlers) 908 { 909 init_handlers(handlers); 910 911 /* Initialize canaries around the stacks */ 912 init_canaries(); 913 914 init_user_kcode(); 915 } 916 917 static void init_sec_mon(size_t pos __maybe_unused) 918 { 919 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 920 /* Initialize secure monitor */ 921 sm_init(GET_STACK(stack_tmp[pos])); 922 #endif 923 } 924 925 static uint32_t __maybe_unused get_midr_implementer(uint32_t midr) 926 { 927 return (midr >> MIDR_IMPLEMENTER_SHIFT) & MIDR_IMPLEMENTER_MASK; 928 } 929 930 static uint32_t __maybe_unused get_midr_primary_part(uint32_t midr) 931 { 932 return (midr >> MIDR_PRIMARY_PART_NUM_SHIFT) & 933 MIDR_PRIMARY_PART_NUM_MASK; 934 } 935 936 #ifdef ARM64 937 static bool probe_workaround_available(void) 938 { 939 int32_t r; 940 941 r = thread_smc(SMCCC_VERSION, 0, 0, 0); 942 if (r < 0) 943 return false; 944 if (r < 0x10001) /* compare with version 1.1 */ 945 return false; 946 947 /* Version >= 1.1, so SMCCC_ARCH_FEATURES is available */ 948 r = thread_smc(SMCCC_ARCH_FEATURES, SMCCC_ARCH_WORKAROUND_1, 0, 0); 949 return r >= 0; 950 } 951 952 static vaddr_t __maybe_unused select_vector(vaddr_t a) 953 { 954 if (probe_workaround_available()) { 955 DMSG("SMCCC_ARCH_WORKAROUND_1 (%#08" PRIx32 ") available", 956 SMCCC_ARCH_WORKAROUND_1); 957 DMSG("SMC Workaround for CVE-2017-5715 used"); 958 return a; 959 } 960 961 DMSG("SMCCC_ARCH_WORKAROUND_1 (%#08" PRIx32 ") unavailable", 962 SMCCC_ARCH_WORKAROUND_1); 963 DMSG("SMC Workaround for CVE-2017-5715 not needed (if ARM-TF is up to date)"); 964 return (vaddr_t)thread_excp_vect; 965 } 966 #else 967 static vaddr_t __maybe_unused select_vector(vaddr_t a) 968 { 969 return a; 970 } 971 #endif 972 973 static vaddr_t get_excp_vect(void) 974 { 975 #ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC 976 uint32_t midr = read_midr(); 977 978 if (get_midr_implementer(midr) != MIDR_IMPLEMENTER_ARM) 979 return (vaddr_t)thread_excp_vect; 980 981 switch (get_midr_primary_part(midr)) { 982 #ifdef ARM32 983 case CORTEX_A8_PART_NUM: 984 case CORTEX_A9_PART_NUM: 985 case CORTEX_A17_PART_NUM: 986 #endif 987 case CORTEX_A57_PART_NUM: 988 case CORTEX_A72_PART_NUM: 989 case CORTEX_A73_PART_NUM: 990 case CORTEX_A75_PART_NUM: 991 return select_vector((vaddr_t)thread_excp_vect_workaround); 992 #ifdef ARM32 993 case CORTEX_A15_PART_NUM: 994 return select_vector((vaddr_t)thread_excp_vect_workaround_a15); 995 #endif 996 default: 997 return (vaddr_t)thread_excp_vect; 998 } 999 #endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/ 1000 1001 return (vaddr_t)thread_excp_vect; 1002 } 1003 1004 void thread_init_per_cpu(void) 1005 { 1006 size_t pos = get_core_pos(); 1007 struct thread_core_local *l = thread_get_core_local(); 1008 1009 init_sec_mon(pos); 1010 1011 set_tmp_stack(l, GET_STACK(stack_tmp[pos]) - STACK_TMP_OFFS); 1012 set_abt_stack(l, GET_STACK(stack_abt[pos])); 1013 1014 thread_init_vbar(get_excp_vect()); 1015 1016 #ifdef CFG_FTRACE_SUPPORT 1017 /* 1018 * Enable accesses to frequency register and physical counter 1019 * register in EL0/PL0 required for timestamping during 1020 * function tracing. 1021 */ 1022 write_cntkctl(read_cntkctl() | CNTKCTL_PL0PCTEN); 1023 #endif 1024 } 1025 1026 struct thread_specific_data *thread_get_tsd(void) 1027 { 1028 return &threads[thread_get_id()].tsd; 1029 } 1030 1031 struct thread_ctx_regs *thread_get_ctx_regs(void) 1032 { 1033 struct thread_core_local *l = thread_get_core_local(); 1034 1035 assert(l->curr_thread != -1); 1036 return &threads[l->curr_thread].regs; 1037 } 1038 1039 void thread_set_foreign_intr(bool enable) 1040 { 1041 /* thread_get_core_local() requires foreign interrupts to be disabled */ 1042 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 1043 struct thread_core_local *l; 1044 1045 l = thread_get_core_local(); 1046 1047 assert(l->curr_thread != -1); 1048 1049 if (enable) { 1050 threads[l->curr_thread].flags |= 1051 THREAD_FLAGS_FOREIGN_INTR_ENABLE; 1052 thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR); 1053 } else { 1054 /* 1055 * No need to disable foreign interrupts here since they're 1056 * already disabled above. 1057 */ 1058 threads[l->curr_thread].flags &= 1059 ~THREAD_FLAGS_FOREIGN_INTR_ENABLE; 1060 } 1061 } 1062 1063 void thread_restore_foreign_intr(void) 1064 { 1065 /* thread_get_core_local() requires foreign interrupts to be disabled */ 1066 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 1067 struct thread_core_local *l; 1068 1069 l = thread_get_core_local(); 1070 1071 assert(l->curr_thread != -1); 1072 1073 if (threads[l->curr_thread].flags & THREAD_FLAGS_FOREIGN_INTR_ENABLE) 1074 thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR); 1075 } 1076 1077 #ifdef CFG_WITH_VFP 1078 uint32_t thread_kernel_enable_vfp(void) 1079 { 1080 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 1081 struct thread_ctx *thr = threads + thread_get_id(); 1082 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 1083 1084 assert(!vfp_is_enabled()); 1085 1086 if (!thr->vfp_state.ns_saved) { 1087 vfp_lazy_save_state_final(&thr->vfp_state.ns, 1088 true /*force_save*/); 1089 thr->vfp_state.ns_saved = true; 1090 } else if (thr->vfp_state.sec_lazy_saved && 1091 !thr->vfp_state.sec_saved) { 1092 /* 1093 * This happens when we're handling an abort while the 1094 * thread was using the VFP state. 1095 */ 1096 vfp_lazy_save_state_final(&thr->vfp_state.sec, 1097 false /*!force_save*/); 1098 thr->vfp_state.sec_saved = true; 1099 } else if (tuv && tuv->lazy_saved && !tuv->saved) { 1100 /* 1101 * This can happen either during syscall or abort 1102 * processing (while processing a syscall). 1103 */ 1104 vfp_lazy_save_state_final(&tuv->vfp, false /*!force_save*/); 1105 tuv->saved = true; 1106 } 1107 1108 vfp_enable(); 1109 return exceptions; 1110 } 1111 1112 void thread_kernel_disable_vfp(uint32_t state) 1113 { 1114 uint32_t exceptions; 1115 1116 assert(vfp_is_enabled()); 1117 1118 vfp_disable(); 1119 exceptions = thread_get_exceptions(); 1120 assert(exceptions & THREAD_EXCP_FOREIGN_INTR); 1121 exceptions &= ~THREAD_EXCP_FOREIGN_INTR; 1122 exceptions |= state & THREAD_EXCP_FOREIGN_INTR; 1123 thread_set_exceptions(exceptions); 1124 } 1125 1126 void thread_kernel_save_vfp(void) 1127 { 1128 struct thread_ctx *thr = threads + thread_get_id(); 1129 1130 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1131 if (vfp_is_enabled()) { 1132 vfp_lazy_save_state_init(&thr->vfp_state.sec); 1133 thr->vfp_state.sec_lazy_saved = true; 1134 } 1135 } 1136 1137 void thread_kernel_restore_vfp(void) 1138 { 1139 struct thread_ctx *thr = threads + thread_get_id(); 1140 1141 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1142 assert(!vfp_is_enabled()); 1143 if (thr->vfp_state.sec_lazy_saved) { 1144 vfp_lazy_restore_state(&thr->vfp_state.sec, 1145 thr->vfp_state.sec_saved); 1146 thr->vfp_state.sec_saved = false; 1147 thr->vfp_state.sec_lazy_saved = false; 1148 } 1149 } 1150 1151 void thread_user_enable_vfp(struct thread_user_vfp_state *uvfp) 1152 { 1153 struct thread_ctx *thr = threads + thread_get_id(); 1154 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 1155 1156 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1157 assert(!vfp_is_enabled()); 1158 1159 if (!thr->vfp_state.ns_saved) { 1160 vfp_lazy_save_state_final(&thr->vfp_state.ns, 1161 true /*force_save*/); 1162 thr->vfp_state.ns_saved = true; 1163 } else if (tuv && uvfp != tuv) { 1164 if (tuv->lazy_saved && !tuv->saved) { 1165 vfp_lazy_save_state_final(&tuv->vfp, 1166 false /*!force_save*/); 1167 tuv->saved = true; 1168 } 1169 } 1170 1171 if (uvfp->lazy_saved) 1172 vfp_lazy_restore_state(&uvfp->vfp, uvfp->saved); 1173 uvfp->lazy_saved = false; 1174 uvfp->saved = false; 1175 1176 thr->vfp_state.uvfp = uvfp; 1177 vfp_enable(); 1178 } 1179 1180 void thread_user_save_vfp(void) 1181 { 1182 struct thread_ctx *thr = threads + thread_get_id(); 1183 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 1184 1185 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1186 if (!vfp_is_enabled()) 1187 return; 1188 1189 assert(tuv && !tuv->lazy_saved && !tuv->saved); 1190 vfp_lazy_save_state_init(&tuv->vfp); 1191 tuv->lazy_saved = true; 1192 } 1193 1194 void thread_user_clear_vfp(struct thread_user_vfp_state *uvfp) 1195 { 1196 struct thread_ctx *thr = threads + thread_get_id(); 1197 1198 if (uvfp == thr->vfp_state.uvfp) 1199 thr->vfp_state.uvfp = NULL; 1200 uvfp->lazy_saved = false; 1201 uvfp->saved = false; 1202 } 1203 #endif /*CFG_WITH_VFP*/ 1204 1205 #ifdef ARM32 1206 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr) 1207 { 1208 uint32_t s; 1209 1210 if (!is_32bit) 1211 return false; 1212 1213 s = read_cpsr(); 1214 s &= ~(CPSR_MODE_MASK | CPSR_T | CPSR_IT_MASK1 | CPSR_IT_MASK2); 1215 s |= CPSR_MODE_USR; 1216 if (entry_func & 1) 1217 s |= CPSR_T; 1218 *spsr = s; 1219 return true; 1220 } 1221 #endif 1222 1223 #ifdef ARM64 1224 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr) 1225 { 1226 uint32_t s; 1227 1228 if (is_32bit) { 1229 s = read_daif() & (SPSR_32_AIF_MASK << SPSR_32_AIF_SHIFT); 1230 s |= SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT; 1231 s |= (entry_func & SPSR_32_T_MASK) << SPSR_32_T_SHIFT; 1232 } else { 1233 s = read_daif() & (SPSR_64_DAIF_MASK << SPSR_64_DAIF_SHIFT); 1234 } 1235 1236 *spsr = s; 1237 return true; 1238 } 1239 #endif 1240 1241 static void set_ctx_regs(struct thread_ctx_regs *regs, unsigned long a0, 1242 unsigned long a1, unsigned long a2, unsigned long a3, 1243 unsigned long user_sp, unsigned long entry_func, 1244 uint32_t spsr) 1245 { 1246 /* 1247 * First clear all registers to avoid leaking information from 1248 * other TAs or even the Core itself. 1249 */ 1250 *regs = (struct thread_ctx_regs){ }; 1251 #ifdef ARM32 1252 regs->r0 = a0; 1253 regs->r1 = a1; 1254 regs->r2 = a2; 1255 regs->r3 = a3; 1256 regs->usr_sp = user_sp; 1257 regs->pc = entry_func; 1258 regs->cpsr = spsr; 1259 #endif 1260 #ifdef ARM64 1261 regs->x[0] = a0; 1262 regs->x[1] = a1; 1263 regs->x[2] = a2; 1264 regs->x[3] = a3; 1265 regs->sp = user_sp; 1266 regs->pc = entry_func; 1267 regs->cpsr = spsr; 1268 regs->x[13] = user_sp; /* Used when running TA in Aarch32 */ 1269 regs->sp = user_sp; /* Used when running TA in Aarch64 */ 1270 /* Set frame pointer (user stack can't be unwound past this point) */ 1271 regs->x[29] = 0; 1272 #endif 1273 } 1274 1275 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1, 1276 unsigned long a2, unsigned long a3, unsigned long user_sp, 1277 unsigned long entry_func, bool is_32bit, 1278 uint32_t *exit_status0, uint32_t *exit_status1) 1279 { 1280 uint32_t spsr = 0; 1281 uint32_t exceptions = 0; 1282 uint32_t rc = 0; 1283 struct thread_ctx_regs *regs = NULL; 1284 1285 tee_ta_update_session_utime_resume(); 1286 1287 /* Derive SPSR from current CPSR/PSTATE readout. */ 1288 if (!get_spsr(is_32bit, entry_func, &spsr)) { 1289 *exit_status0 = 1; /* panic */ 1290 *exit_status1 = 0xbadbadba; 1291 return 0; 1292 } 1293 1294 exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 1295 /* 1296 * We're using the per thread location of saved context registers 1297 * for temporary storage. Now that exceptions are masked they will 1298 * not be used for any thing else until they are eventually 1299 * unmasked when user mode has been entered. 1300 */ 1301 regs = thread_get_ctx_regs(); 1302 set_ctx_regs(regs, a0, a1, a2, a3, user_sp, entry_func, spsr); 1303 rc = __thread_enter_user_mode(regs, exit_status0, exit_status1); 1304 thread_unmask_exceptions(exceptions); 1305 return rc; 1306 } 1307 1308 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 1309 void thread_get_user_kcode(struct mobj **mobj, size_t *offset, 1310 vaddr_t *va, size_t *sz) 1311 { 1312 core_mmu_get_user_va_range(va, NULL); 1313 *mobj = mobj_tee_ram; 1314 *offset = thread_user_kcode_va - VCORE_START_VA; 1315 *sz = thread_user_kcode_size; 1316 } 1317 #endif 1318 1319 #if defined(CFG_CORE_UNMAP_CORE_AT_EL0) && \ 1320 defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) 1321 void thread_get_user_kdata(struct mobj **mobj, size_t *offset, 1322 vaddr_t *va, size_t *sz) 1323 { 1324 vaddr_t v; 1325 1326 core_mmu_get_user_va_range(&v, NULL); 1327 *va = v + thread_user_kcode_size; 1328 *mobj = mobj_tee_ram; 1329 *offset = (vaddr_t)thread_user_kdata_page - VCORE_START_VA; 1330 *sz = sizeof(thread_user_kdata_page); 1331 } 1332 #endif 1333 1334 static void setup_unwind_user_mode(struct thread_svc_regs *regs) 1335 { 1336 #ifdef ARM32 1337 regs->lr = (uintptr_t)thread_unwind_user_mode; 1338 regs->spsr = read_cpsr(); 1339 #endif 1340 #ifdef ARM64 1341 regs->elr = (uintptr_t)thread_unwind_user_mode; 1342 regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, 0); 1343 regs->spsr |= read_daif(); 1344 /* 1345 * Regs is the value of stack pointer before calling the SVC 1346 * handler. By the addition matches for the reserved space at the 1347 * beginning of el0_sync_svc(). This prepares the stack when 1348 * returning to thread_unwind_user_mode instead of a normal 1349 * exception return. 1350 */ 1351 regs->sp_el0 = (uint64_t)(regs + 1); 1352 #endif 1353 } 1354 1355 /* 1356 * Note: this function is weak just to make it possible to exclude it from 1357 * the unpaged area. 1358 */ 1359 void __weak thread_svc_handler(struct thread_svc_regs *regs) 1360 { 1361 struct tee_ta_session *sess = NULL; 1362 uint32_t state = 0; 1363 1364 /* Enable native interrupts */ 1365 state = thread_get_exceptions(); 1366 thread_unmask_exceptions(state & ~THREAD_EXCP_NATIVE_INTR); 1367 1368 thread_user_save_vfp(); 1369 1370 /* TA has just entered kernel mode */ 1371 tee_ta_update_session_utime_suspend(); 1372 1373 /* Restore foreign interrupts which are disabled on exception entry */ 1374 thread_restore_foreign_intr(); 1375 1376 tee_ta_get_current_session(&sess); 1377 assert(sess && sess->ctx->ops && sess->ctx->ops->handle_svc); 1378 if (sess->ctx->ops->handle_svc(regs)) { 1379 /* We're about to switch back to user mode */ 1380 tee_ta_update_session_utime_resume(); 1381 } else { 1382 /* We're returning from __thread_enter_user_mode() */ 1383 setup_unwind_user_mode(regs); 1384 } 1385 } 1386