1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016-2021, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 * Copyright (c) 2020-2021, Arm Limited 6 */ 7 8 #include <platform_config.h> 9 10 #include <arm.h> 11 #include <assert.h> 12 #include <config.h> 13 #include <io.h> 14 #include <keep.h> 15 #include <kernel/asan.h> 16 #include <kernel/boot.h> 17 #include <kernel/linker.h> 18 #include <kernel/lockdep.h> 19 #include <kernel/misc.h> 20 #include <kernel/panic.h> 21 #include <kernel/spinlock.h> 22 #include <kernel/spmc_sp_handler.h> 23 #include <kernel/tee_ta_manager.h> 24 #include <kernel/thread_defs.h> 25 #include <kernel/thread.h> 26 #include <kernel/thread_private.h> 27 #include <kernel/user_mode_ctx_struct.h> 28 #include <kernel/virtualization.h> 29 #include <mm/core_memprot.h> 30 #include <mm/mobj.h> 31 #include <mm/tee_mm.h> 32 #include <mm/tee_pager.h> 33 #include <mm/vm.h> 34 #include <smccc.h> 35 #include <sm/sm.h> 36 #include <trace.h> 37 #include <util.h> 38 39 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 40 static vaddr_t thread_user_kcode_va __nex_bss; 41 long thread_user_kcode_offset __nex_bss; 42 static size_t thread_user_kcode_size __nex_bss; 43 #endif 44 45 #if defined(CFG_CORE_UNMAP_CORE_AT_EL0) && \ 46 defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) 47 long thread_user_kdata_sp_offset __nex_bss; 48 static uint8_t thread_user_kdata_page[ 49 ROUNDUP(sizeof(struct thread_core_local) * CFG_TEE_CORE_NB_CORE, 50 SMALL_PAGE_SIZE)] 51 __aligned(SMALL_PAGE_SIZE) 52 #ifndef CFG_VIRTUALIZATION 53 __section(".nozi.kdata_page"); 54 #else 55 __section(".nex_nozi.kdata_page"); 56 #endif 57 #endif 58 59 #ifdef ARM32 60 uint32_t __nostackcheck thread_get_exceptions(void) 61 { 62 uint32_t cpsr = read_cpsr(); 63 64 return (cpsr >> CPSR_F_SHIFT) & THREAD_EXCP_ALL; 65 } 66 67 void __nostackcheck thread_set_exceptions(uint32_t exceptions) 68 { 69 uint32_t cpsr = read_cpsr(); 70 71 /* Foreign interrupts must not be unmasked while holding a spinlock */ 72 if (!(exceptions & THREAD_EXCP_FOREIGN_INTR)) 73 assert_have_no_spinlock(); 74 75 cpsr &= ~(THREAD_EXCP_ALL << CPSR_F_SHIFT); 76 cpsr |= ((exceptions & THREAD_EXCP_ALL) << CPSR_F_SHIFT); 77 78 barrier(); 79 write_cpsr(cpsr); 80 barrier(); 81 } 82 #endif /*ARM32*/ 83 84 #ifdef ARM64 85 uint32_t __nostackcheck thread_get_exceptions(void) 86 { 87 uint32_t daif = read_daif(); 88 89 return (daif >> DAIF_F_SHIFT) & THREAD_EXCP_ALL; 90 } 91 92 void __nostackcheck thread_set_exceptions(uint32_t exceptions) 93 { 94 uint32_t daif = read_daif(); 95 96 /* Foreign interrupts must not be unmasked while holding a spinlock */ 97 if (!(exceptions & THREAD_EXCP_FOREIGN_INTR)) 98 assert_have_no_spinlock(); 99 100 daif &= ~(THREAD_EXCP_ALL << DAIF_F_SHIFT); 101 daif |= ((exceptions & THREAD_EXCP_ALL) << DAIF_F_SHIFT); 102 103 barrier(); 104 write_daif(daif); 105 barrier(); 106 } 107 #endif /*ARM64*/ 108 109 uint32_t __nostackcheck thread_mask_exceptions(uint32_t exceptions) 110 { 111 uint32_t state = thread_get_exceptions(); 112 113 thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL)); 114 return state; 115 } 116 117 void __nostackcheck thread_unmask_exceptions(uint32_t state) 118 { 119 thread_set_exceptions(state & THREAD_EXCP_ALL); 120 } 121 122 static void thread_lazy_save_ns_vfp(void) 123 { 124 #ifdef CFG_WITH_VFP 125 struct thread_ctx *thr = threads + thread_get_id(); 126 127 thr->vfp_state.ns_saved = false; 128 vfp_lazy_save_state_init(&thr->vfp_state.ns); 129 #endif /*CFG_WITH_VFP*/ 130 } 131 132 static void thread_lazy_restore_ns_vfp(void) 133 { 134 #ifdef CFG_WITH_VFP 135 struct thread_ctx *thr = threads + thread_get_id(); 136 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 137 138 assert(!thr->vfp_state.sec_lazy_saved && !thr->vfp_state.sec_saved); 139 140 if (tuv && tuv->lazy_saved && !tuv->saved) { 141 vfp_lazy_save_state_final(&tuv->vfp, false /*!force_save*/); 142 tuv->saved = true; 143 } 144 145 vfp_lazy_restore_state(&thr->vfp_state.ns, thr->vfp_state.ns_saved); 146 thr->vfp_state.ns_saved = false; 147 #endif /*CFG_WITH_VFP*/ 148 } 149 150 #ifdef ARM32 151 static void init_regs(struct thread_ctx *thread, uint32_t a0, uint32_t a1, 152 uint32_t a2, uint32_t a3, uint32_t a4, uint32_t a5, 153 uint32_t a6, uint32_t a7, void *pc) 154 { 155 thread->regs.pc = (uint32_t)pc; 156 157 /* 158 * Stdcalls starts in SVC mode with masked foreign interrupts, masked 159 * Asynchronous abort and unmasked native interrupts. 160 */ 161 thread->regs.cpsr = read_cpsr() & ARM32_CPSR_E; 162 thread->regs.cpsr |= CPSR_MODE_SVC | CPSR_A | 163 (THREAD_EXCP_FOREIGN_INTR << ARM32_CPSR_F_SHIFT); 164 /* Enable thumb mode if it's a thumb instruction */ 165 if (thread->regs.pc & 1) 166 thread->regs.cpsr |= CPSR_T; 167 /* Reinitialize stack pointer */ 168 thread->regs.svc_sp = thread->stack_va_end; 169 170 /* 171 * Copy arguments into context. This will make the 172 * arguments appear in r0-r7 when thread is started. 173 */ 174 thread->regs.r0 = a0; 175 thread->regs.r1 = a1; 176 thread->regs.r2 = a2; 177 thread->regs.r3 = a3; 178 thread->regs.r4 = a4; 179 thread->regs.r5 = a5; 180 thread->regs.r6 = a6; 181 thread->regs.r7 = a7; 182 } 183 #endif /*ARM32*/ 184 185 #ifdef ARM64 186 static void init_regs(struct thread_ctx *thread, uint32_t a0, uint32_t a1, 187 uint32_t a2, uint32_t a3, uint32_t a4, uint32_t a5, 188 uint32_t a6, uint32_t a7, void *pc) 189 { 190 thread->regs.pc = (uint64_t)pc; 191 192 /* 193 * Stdcalls starts in SVC mode with masked foreign interrupts, masked 194 * Asynchronous abort and unmasked native interrupts. 195 */ 196 thread->regs.cpsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, 197 THREAD_EXCP_FOREIGN_INTR | DAIFBIT_ABT); 198 /* Reinitialize stack pointer */ 199 thread->regs.sp = thread->stack_va_end; 200 201 /* 202 * Copy arguments into context. This will make the 203 * arguments appear in x0-x7 when thread is started. 204 */ 205 thread->regs.x[0] = a0; 206 thread->regs.x[1] = a1; 207 thread->regs.x[2] = a2; 208 thread->regs.x[3] = a3; 209 thread->regs.x[4] = a4; 210 thread->regs.x[5] = a5; 211 thread->regs.x[6] = a6; 212 thread->regs.x[7] = a7; 213 214 /* Set up frame pointer as per the Aarch64 AAPCS */ 215 thread->regs.x[29] = 0; 216 } 217 #endif /*ARM64*/ 218 219 static void __thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2, 220 uint32_t a3, uint32_t a4, uint32_t a5, 221 uint32_t a6, uint32_t a7, 222 void *pc) 223 { 224 size_t n; 225 struct thread_core_local *l = thread_get_core_local(); 226 bool found_thread = false; 227 228 assert(l->curr_thread == THREAD_ID_INVALID); 229 230 thread_lock_global(); 231 232 for (n = 0; n < CFG_NUM_THREADS; n++) { 233 if (threads[n].state == THREAD_STATE_FREE) { 234 threads[n].state = THREAD_STATE_ACTIVE; 235 found_thread = true; 236 break; 237 } 238 } 239 240 thread_unlock_global(); 241 242 if (!found_thread) 243 return; 244 245 l->curr_thread = n; 246 247 threads[n].flags = 0; 248 init_regs(threads + n, a0, a1, a2, a3, a4, a5, a6, a7, pc); 249 250 thread_lazy_save_ns_vfp(); 251 252 l->flags &= ~THREAD_CLF_TMP; 253 thread_resume(&threads[n].regs); 254 /*NOTREACHED*/ 255 panic(); 256 } 257 258 void thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3, 259 uint32_t a4, uint32_t a5) 260 { 261 __thread_alloc_and_run(a0, a1, a2, a3, a4, a5, 0, 0, 262 thread_std_smc_entry); 263 } 264 265 #ifdef CFG_SECURE_PARTITION 266 void thread_sp_alloc_and_run(struct thread_smc_args *args __maybe_unused) 267 { 268 __thread_alloc_and_run(args->a0, args->a1, args->a2, args->a3, args->a4, 269 args->a5, args->a6, args->a7, 270 spmc_sp_thread_entry); 271 } 272 #endif 273 274 #ifdef ARM32 275 static void copy_a0_to_a3(struct thread_ctx_regs *regs, uint32_t a0, 276 uint32_t a1, uint32_t a2, uint32_t a3) 277 { 278 /* 279 * Update returned values from RPC, values will appear in 280 * r0-r3 when thread is resumed. 281 */ 282 regs->r0 = a0; 283 regs->r1 = a1; 284 regs->r2 = a2; 285 regs->r3 = a3; 286 } 287 #endif /*ARM32*/ 288 289 #ifdef ARM64 290 static void copy_a0_to_a3(struct thread_ctx_regs *regs, uint32_t a0, 291 uint32_t a1, uint32_t a2, uint32_t a3) 292 { 293 /* 294 * Update returned values from RPC, values will appear in 295 * x0-x3 when thread is resumed. 296 */ 297 regs->x[0] = a0; 298 regs->x[1] = a1; 299 regs->x[2] = a2; 300 regs->x[3] = a3; 301 } 302 #endif /*ARM64*/ 303 304 #ifdef ARM32 305 static bool is_from_user(uint32_t cpsr) 306 { 307 return (cpsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR; 308 } 309 #endif 310 311 #ifdef ARM64 312 static bool is_from_user(uint32_t cpsr) 313 { 314 if (cpsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT)) 315 return true; 316 if (((cpsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) == 317 SPSR_64_MODE_EL0) 318 return true; 319 return false; 320 } 321 #endif 322 323 #ifdef CFG_SYSCALL_FTRACE 324 static void __noprof ftrace_suspend(void) 325 { 326 struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack); 327 328 if (s && s->fbuf) 329 s->fbuf->syscall_trace_suspended = true; 330 } 331 332 static void __noprof ftrace_resume(void) 333 { 334 struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack); 335 336 if (s && s->fbuf) 337 s->fbuf->syscall_trace_suspended = false; 338 } 339 #else 340 static void __noprof ftrace_suspend(void) 341 { 342 } 343 344 static void __noprof ftrace_resume(void) 345 { 346 } 347 #endif 348 349 static bool is_user_mode(struct thread_ctx_regs *regs) 350 { 351 return is_from_user((uint32_t)regs->cpsr); 352 } 353 354 void thread_resume_from_rpc(uint32_t thread_id, uint32_t a0, uint32_t a1, 355 uint32_t a2, uint32_t a3) 356 { 357 size_t n = thread_id; 358 struct thread_core_local *l = thread_get_core_local(); 359 bool found_thread = false; 360 361 assert(l->curr_thread == THREAD_ID_INVALID); 362 363 thread_lock_global(); 364 365 if (n < CFG_NUM_THREADS && threads[n].state == THREAD_STATE_SUSPENDED) { 366 threads[n].state = THREAD_STATE_ACTIVE; 367 found_thread = true; 368 } 369 370 thread_unlock_global(); 371 372 if (!found_thread) 373 return; 374 375 l->curr_thread = n; 376 377 if (threads[n].have_user_map) { 378 core_mmu_set_user_map(&threads[n].user_map); 379 if (threads[n].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR) 380 tee_ta_ftrace_update_times_resume(); 381 } 382 383 if (is_user_mode(&threads[n].regs)) 384 tee_ta_update_session_utime_resume(); 385 386 /* 387 * Return from RPC to request service of a foreign interrupt must not 388 * get parameters from non-secure world. 389 */ 390 if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) { 391 copy_a0_to_a3(&threads[n].regs, a0, a1, a2, a3); 392 threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN; 393 } 394 395 thread_lazy_save_ns_vfp(); 396 397 if (threads[n].have_user_map) 398 ftrace_resume(); 399 400 l->flags &= ~THREAD_CLF_TMP; 401 thread_resume(&threads[n].regs); 402 /*NOTREACHED*/ 403 panic(); 404 } 405 406 #ifdef ARM64 407 vaddr_t thread_get_saved_thread_sp(void) 408 { 409 struct thread_core_local *l = thread_get_core_local(); 410 int ct = l->curr_thread; 411 412 assert(ct != THREAD_ID_INVALID); 413 return threads[ct].kern_sp; 414 } 415 #endif /*ARM64*/ 416 417 #ifdef ARM32 418 bool thread_is_in_normal_mode(void) 419 { 420 return (read_cpsr() & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_SVC; 421 } 422 #endif 423 424 void thread_state_free(void) 425 { 426 struct thread_core_local *l = thread_get_core_local(); 427 int ct = l->curr_thread; 428 429 assert(ct != THREAD_ID_INVALID); 430 431 thread_lazy_restore_ns_vfp(); 432 tee_pager_release_phys( 433 (void *)(threads[ct].stack_va_end - STACK_THREAD_SIZE), 434 STACK_THREAD_SIZE); 435 436 thread_lock_global(); 437 438 assert(threads[ct].state == THREAD_STATE_ACTIVE); 439 threads[ct].state = THREAD_STATE_FREE; 440 threads[ct].flags = 0; 441 l->curr_thread = THREAD_ID_INVALID; 442 443 if (IS_ENABLED(CFG_VIRTUALIZATION)) 444 virt_unset_guest(); 445 thread_unlock_global(); 446 } 447 448 #ifdef CFG_WITH_PAGER 449 static void release_unused_kernel_stack(struct thread_ctx *thr, 450 uint32_t cpsr __maybe_unused) 451 { 452 #ifdef ARM64 453 /* 454 * If we're from user mode then thr->regs.sp is the saved user 455 * stack pointer and thr->kern_sp holds the last kernel stack 456 * pointer. But if we're from kernel mode then thr->kern_sp isn't 457 * up to date so we need to read from thr->regs.sp instead. 458 */ 459 vaddr_t sp = is_from_user(cpsr) ? thr->kern_sp : thr->regs.sp; 460 #else 461 vaddr_t sp = thr->regs.svc_sp; 462 #endif 463 vaddr_t base = thr->stack_va_end - STACK_THREAD_SIZE; 464 size_t len = sp - base; 465 466 tee_pager_release_phys((void *)base, len); 467 } 468 #else 469 static void release_unused_kernel_stack(struct thread_ctx *thr __unused, 470 uint32_t cpsr __unused) 471 { 472 } 473 #endif 474 475 int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc) 476 { 477 struct thread_core_local *l = thread_get_core_local(); 478 int ct = l->curr_thread; 479 480 assert(ct != THREAD_ID_INVALID); 481 482 if (core_mmu_user_mapping_is_active()) 483 ftrace_suspend(); 484 485 thread_check_canaries(); 486 487 release_unused_kernel_stack(threads + ct, cpsr); 488 489 if (is_from_user(cpsr)) { 490 thread_user_save_vfp(); 491 tee_ta_update_session_utime_suspend(); 492 tee_ta_gprof_sample_pc(pc); 493 } 494 thread_lazy_restore_ns_vfp(); 495 496 thread_lock_global(); 497 498 assert(threads[ct].state == THREAD_STATE_ACTIVE); 499 threads[ct].flags |= flags; 500 threads[ct].regs.cpsr = cpsr; 501 threads[ct].regs.pc = pc; 502 threads[ct].state = THREAD_STATE_SUSPENDED; 503 504 threads[ct].have_user_map = core_mmu_user_mapping_is_active(); 505 if (threads[ct].have_user_map) { 506 if (threads[ct].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR) 507 tee_ta_ftrace_update_times_suspend(); 508 core_mmu_get_user_map(&threads[ct].user_map); 509 core_mmu_set_user_map(NULL); 510 } 511 512 l->curr_thread = THREAD_ID_INVALID; 513 514 if (IS_ENABLED(CFG_VIRTUALIZATION)) 515 virt_unset_guest(); 516 517 thread_unlock_global(); 518 519 return ct; 520 } 521 522 bool thread_init_stack(uint32_t thread_id, vaddr_t sp) 523 { 524 if (thread_id >= CFG_NUM_THREADS) 525 return false; 526 threads[thread_id].stack_va_end = sp; 527 return true; 528 } 529 530 static void init_user_kcode(void) 531 { 532 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 533 vaddr_t v = (vaddr_t)thread_excp_vect; 534 vaddr_t ve = (vaddr_t)thread_excp_vect_end; 535 536 thread_user_kcode_va = ROUNDDOWN(v, CORE_MMU_USER_CODE_SIZE); 537 ve = ROUNDUP(ve, CORE_MMU_USER_CODE_SIZE); 538 thread_user_kcode_size = ve - thread_user_kcode_va; 539 540 core_mmu_get_user_va_range(&v, NULL); 541 thread_user_kcode_offset = thread_user_kcode_va - v; 542 543 #if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) 544 /* 545 * When transitioning to EL0 subtract SP with this much to point to 546 * this special kdata page instead. SP is restored by add this much 547 * while transitioning back to EL1. 548 */ 549 v += thread_user_kcode_size; 550 thread_user_kdata_sp_offset = (vaddr_t)thread_core_local - v; 551 #endif 552 #endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 553 } 554 555 void thread_init_primary(void) 556 { 557 /* Initialize canaries around the stacks */ 558 thread_init_canaries(); 559 560 init_user_kcode(); 561 } 562 563 static uint32_t __maybe_unused get_midr_implementer(uint32_t midr) 564 { 565 return (midr >> MIDR_IMPLEMENTER_SHIFT) & MIDR_IMPLEMENTER_MASK; 566 } 567 568 static uint32_t __maybe_unused get_midr_primary_part(uint32_t midr) 569 { 570 return (midr >> MIDR_PRIMARY_PART_NUM_SHIFT) & 571 MIDR_PRIMARY_PART_NUM_MASK; 572 } 573 574 #ifdef ARM64 575 static bool probe_workaround_available(void) 576 { 577 int32_t r; 578 579 r = thread_smc(SMCCC_VERSION, 0, 0, 0); 580 if (r < 0) 581 return false; 582 if (r < 0x10001) /* compare with version 1.1 */ 583 return false; 584 585 /* Version >= 1.1, so SMCCC_ARCH_FEATURES is available */ 586 r = thread_smc(SMCCC_ARCH_FEATURES, SMCCC_ARCH_WORKAROUND_1, 0, 0); 587 return r >= 0; 588 } 589 590 static vaddr_t __maybe_unused select_vector(vaddr_t a) 591 { 592 if (probe_workaround_available()) { 593 DMSG("SMCCC_ARCH_WORKAROUND_1 (%#08" PRIx32 ") available", 594 SMCCC_ARCH_WORKAROUND_1); 595 DMSG("SMC Workaround for CVE-2017-5715 used"); 596 return a; 597 } 598 599 DMSG("SMCCC_ARCH_WORKAROUND_1 (%#08" PRIx32 ") unavailable", 600 SMCCC_ARCH_WORKAROUND_1); 601 DMSG("SMC Workaround for CVE-2017-5715 not needed (if ARM-TF is up to date)"); 602 return (vaddr_t)thread_excp_vect; 603 } 604 #else 605 static vaddr_t __maybe_unused select_vector(vaddr_t a) 606 { 607 return a; 608 } 609 #endif 610 611 static vaddr_t get_excp_vect(void) 612 { 613 #ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC 614 uint32_t midr = read_midr(); 615 616 if (get_midr_implementer(midr) != MIDR_IMPLEMENTER_ARM) 617 return (vaddr_t)thread_excp_vect; 618 619 switch (get_midr_primary_part(midr)) { 620 #ifdef ARM32 621 case CORTEX_A8_PART_NUM: 622 case CORTEX_A9_PART_NUM: 623 case CORTEX_A17_PART_NUM: 624 #endif 625 case CORTEX_A57_PART_NUM: 626 case CORTEX_A72_PART_NUM: 627 case CORTEX_A73_PART_NUM: 628 case CORTEX_A75_PART_NUM: 629 return select_vector((vaddr_t)thread_excp_vect_workaround); 630 #ifdef ARM32 631 case CORTEX_A15_PART_NUM: 632 return select_vector((vaddr_t)thread_excp_vect_workaround_a15); 633 #endif 634 default: 635 return (vaddr_t)thread_excp_vect; 636 } 637 #endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/ 638 639 return (vaddr_t)thread_excp_vect; 640 } 641 642 void thread_init_per_cpu(void) 643 { 644 #ifdef ARM32 645 struct thread_core_local *l = thread_get_core_local(); 646 647 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 648 /* Initialize secure monitor */ 649 sm_init(l->tmp_stack_va_end + STACK_TMP_OFFS); 650 #endif 651 thread_set_irq_sp(l->tmp_stack_va_end); 652 thread_set_fiq_sp(l->tmp_stack_va_end); 653 thread_set_abt_sp((vaddr_t)l); 654 thread_set_und_sp((vaddr_t)l); 655 #endif 656 657 thread_init_vbar(get_excp_vect()); 658 659 #ifdef CFG_FTRACE_SUPPORT 660 /* 661 * Enable accesses to frequency register and physical counter 662 * register in EL0/PL0 required for timestamping during 663 * function tracing. 664 */ 665 write_cntkctl(read_cntkctl() | CNTKCTL_PL0PCTEN); 666 #endif 667 } 668 669 #ifdef CFG_WITH_VFP 670 uint32_t thread_kernel_enable_vfp(void) 671 { 672 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 673 struct thread_ctx *thr = threads + thread_get_id(); 674 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 675 676 assert(!vfp_is_enabled()); 677 678 if (!thr->vfp_state.ns_saved) { 679 vfp_lazy_save_state_final(&thr->vfp_state.ns, 680 true /*force_save*/); 681 thr->vfp_state.ns_saved = true; 682 } else if (thr->vfp_state.sec_lazy_saved && 683 !thr->vfp_state.sec_saved) { 684 /* 685 * This happens when we're handling an abort while the 686 * thread was using the VFP state. 687 */ 688 vfp_lazy_save_state_final(&thr->vfp_state.sec, 689 false /*!force_save*/); 690 thr->vfp_state.sec_saved = true; 691 } else if (tuv && tuv->lazy_saved && !tuv->saved) { 692 /* 693 * This can happen either during syscall or abort 694 * processing (while processing a syscall). 695 */ 696 vfp_lazy_save_state_final(&tuv->vfp, false /*!force_save*/); 697 tuv->saved = true; 698 } 699 700 vfp_enable(); 701 return exceptions; 702 } 703 704 void thread_kernel_disable_vfp(uint32_t state) 705 { 706 uint32_t exceptions; 707 708 assert(vfp_is_enabled()); 709 710 vfp_disable(); 711 exceptions = thread_get_exceptions(); 712 assert(exceptions & THREAD_EXCP_FOREIGN_INTR); 713 exceptions &= ~THREAD_EXCP_FOREIGN_INTR; 714 exceptions |= state & THREAD_EXCP_FOREIGN_INTR; 715 thread_set_exceptions(exceptions); 716 } 717 718 void thread_kernel_save_vfp(void) 719 { 720 struct thread_ctx *thr = threads + thread_get_id(); 721 722 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 723 if (vfp_is_enabled()) { 724 vfp_lazy_save_state_init(&thr->vfp_state.sec); 725 thr->vfp_state.sec_lazy_saved = true; 726 } 727 } 728 729 void thread_kernel_restore_vfp(void) 730 { 731 struct thread_ctx *thr = threads + thread_get_id(); 732 733 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 734 assert(!vfp_is_enabled()); 735 if (thr->vfp_state.sec_lazy_saved) { 736 vfp_lazy_restore_state(&thr->vfp_state.sec, 737 thr->vfp_state.sec_saved); 738 thr->vfp_state.sec_saved = false; 739 thr->vfp_state.sec_lazy_saved = false; 740 } 741 } 742 743 void thread_user_enable_vfp(struct thread_user_vfp_state *uvfp) 744 { 745 struct thread_ctx *thr = threads + thread_get_id(); 746 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 747 748 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 749 assert(!vfp_is_enabled()); 750 751 if (!thr->vfp_state.ns_saved) { 752 vfp_lazy_save_state_final(&thr->vfp_state.ns, 753 true /*force_save*/); 754 thr->vfp_state.ns_saved = true; 755 } else if (tuv && uvfp != tuv) { 756 if (tuv->lazy_saved && !tuv->saved) { 757 vfp_lazy_save_state_final(&tuv->vfp, 758 false /*!force_save*/); 759 tuv->saved = true; 760 } 761 } 762 763 if (uvfp->lazy_saved) 764 vfp_lazy_restore_state(&uvfp->vfp, uvfp->saved); 765 uvfp->lazy_saved = false; 766 uvfp->saved = false; 767 768 thr->vfp_state.uvfp = uvfp; 769 vfp_enable(); 770 } 771 772 void thread_user_save_vfp(void) 773 { 774 struct thread_ctx *thr = threads + thread_get_id(); 775 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 776 777 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 778 if (!vfp_is_enabled()) 779 return; 780 781 assert(tuv && !tuv->lazy_saved && !tuv->saved); 782 vfp_lazy_save_state_init(&tuv->vfp); 783 tuv->lazy_saved = true; 784 } 785 786 void thread_user_clear_vfp(struct user_mode_ctx *uctx) 787 { 788 struct thread_user_vfp_state *uvfp = &uctx->vfp; 789 struct thread_ctx *thr = threads + thread_get_id(); 790 791 if (uvfp == thr->vfp_state.uvfp) 792 thr->vfp_state.uvfp = NULL; 793 uvfp->lazy_saved = false; 794 uvfp->saved = false; 795 } 796 #endif /*CFG_WITH_VFP*/ 797 798 #ifdef ARM32 799 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr) 800 { 801 uint32_t s; 802 803 if (!is_32bit) 804 return false; 805 806 s = read_cpsr(); 807 s &= ~(CPSR_MODE_MASK | CPSR_T | CPSR_IT_MASK1 | CPSR_IT_MASK2); 808 s |= CPSR_MODE_USR; 809 if (entry_func & 1) 810 s |= CPSR_T; 811 *spsr = s; 812 return true; 813 } 814 #endif 815 816 #ifdef ARM64 817 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr) 818 { 819 uint32_t s; 820 821 if (is_32bit) { 822 s = read_daif() & (SPSR_32_AIF_MASK << SPSR_32_AIF_SHIFT); 823 s |= SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT; 824 s |= (entry_func & SPSR_32_T_MASK) << SPSR_32_T_SHIFT; 825 } else { 826 s = read_daif() & (SPSR_64_DAIF_MASK << SPSR_64_DAIF_SHIFT); 827 } 828 829 *spsr = s; 830 return true; 831 } 832 #endif 833 834 static void set_ctx_regs(struct thread_ctx_regs *regs, unsigned long a0, 835 unsigned long a1, unsigned long a2, unsigned long a3, 836 unsigned long user_sp, unsigned long entry_func, 837 uint32_t spsr, 838 struct thread_pauth_keys *keys __maybe_unused) 839 { 840 /* 841 * First clear all registers to avoid leaking information from 842 * other TAs or even the Core itself. 843 */ 844 *regs = (struct thread_ctx_regs){ }; 845 #ifdef ARM32 846 regs->r0 = a0; 847 regs->r1 = a1; 848 regs->r2 = a2; 849 regs->r3 = a3; 850 regs->usr_sp = user_sp; 851 regs->pc = entry_func; 852 regs->cpsr = spsr; 853 #endif 854 #ifdef ARM64 855 regs->x[0] = a0; 856 regs->x[1] = a1; 857 regs->x[2] = a2; 858 regs->x[3] = a3; 859 regs->sp = user_sp; 860 regs->pc = entry_func; 861 regs->cpsr = spsr; 862 regs->x[13] = user_sp; /* Used when running TA in Aarch32 */ 863 regs->sp = user_sp; /* Used when running TA in Aarch64 */ 864 #ifdef CFG_TA_PAUTH 865 assert(keys); 866 regs->apiakey_hi = keys->hi; 867 regs->apiakey_lo = keys->lo; 868 #endif 869 /* Set frame pointer (user stack can't be unwound past this point) */ 870 regs->x[29] = 0; 871 #endif 872 } 873 874 static struct thread_pauth_keys *thread_get_pauth_keys(void) 875 { 876 #if defined(CFG_TA_PAUTH) 877 struct ts_session *s = ts_get_current_session(); 878 /* Only user TA's support the PAUTH keys */ 879 struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx); 880 881 return &utc->uctx.keys; 882 #else 883 return NULL; 884 #endif 885 } 886 887 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1, 888 unsigned long a2, unsigned long a3, unsigned long user_sp, 889 unsigned long entry_func, bool is_32bit, 890 uint32_t *exit_status0, uint32_t *exit_status1) 891 { 892 uint32_t spsr = 0; 893 uint32_t exceptions = 0; 894 uint32_t rc = 0; 895 struct thread_ctx_regs *regs = NULL; 896 struct thread_pauth_keys *keys = NULL; 897 898 tee_ta_update_session_utime_resume(); 899 900 keys = thread_get_pauth_keys(); 901 902 /* Derive SPSR from current CPSR/PSTATE readout. */ 903 if (!get_spsr(is_32bit, entry_func, &spsr)) { 904 *exit_status0 = 1; /* panic */ 905 *exit_status1 = 0xbadbadba; 906 return 0; 907 } 908 909 exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 910 /* 911 * We're using the per thread location of saved context registers 912 * for temporary storage. Now that exceptions are masked they will 913 * not be used for any thing else until they are eventually 914 * unmasked when user mode has been entered. 915 */ 916 regs = thread_get_ctx_regs(); 917 set_ctx_regs(regs, a0, a1, a2, a3, user_sp, entry_func, spsr, keys); 918 rc = __thread_enter_user_mode(regs, exit_status0, exit_status1); 919 thread_unmask_exceptions(exceptions); 920 return rc; 921 } 922 923 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 924 void thread_get_user_kcode(struct mobj **mobj, size_t *offset, 925 vaddr_t *va, size_t *sz) 926 { 927 core_mmu_get_user_va_range(va, NULL); 928 *mobj = mobj_tee_ram_rx; 929 *sz = thread_user_kcode_size; 930 *offset = thread_user_kcode_va - (vaddr_t)mobj_get_va(*mobj, 0, *sz); 931 } 932 #endif 933 934 #if defined(CFG_CORE_UNMAP_CORE_AT_EL0) && \ 935 defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) 936 void thread_get_user_kdata(struct mobj **mobj, size_t *offset, 937 vaddr_t *va, size_t *sz) 938 { 939 vaddr_t v; 940 941 core_mmu_get_user_va_range(&v, NULL); 942 *va = v + thread_user_kcode_size; 943 *mobj = mobj_tee_ram_rw; 944 *sz = sizeof(thread_user_kdata_page); 945 *offset = (vaddr_t)thread_user_kdata_page - 946 (vaddr_t)mobj_get_va(*mobj, 0, *sz); 947 } 948 #endif 949 950 static void setup_unwind_user_mode(struct thread_svc_regs *regs) 951 { 952 #ifdef ARM32 953 regs->lr = (uintptr_t)thread_unwind_user_mode; 954 regs->spsr = read_cpsr(); 955 #endif 956 #ifdef ARM64 957 regs->elr = (uintptr_t)thread_unwind_user_mode; 958 regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, 0); 959 regs->spsr |= read_daif(); 960 /* 961 * Regs is the value of stack pointer before calling the SVC 962 * handler. By the addition matches for the reserved space at the 963 * beginning of el0_sync_svc(). This prepares the stack when 964 * returning to thread_unwind_user_mode instead of a normal 965 * exception return. 966 */ 967 regs->sp_el0 = (uint64_t)(regs + 1); 968 #endif 969 } 970 971 static void gprof_set_status(struct ts_session *s __maybe_unused, 972 enum ts_gprof_status status __maybe_unused) 973 { 974 #ifdef CFG_TA_GPROF_SUPPORT 975 if (s->ctx->ops->gprof_set_status) 976 s->ctx->ops->gprof_set_status(status); 977 #endif 978 } 979 980 /* 981 * Note: this function is weak just to make it possible to exclude it from 982 * the unpaged area. 983 */ 984 void __weak thread_svc_handler(struct thread_svc_regs *regs) 985 { 986 struct ts_session *sess = NULL; 987 uint32_t state = 0; 988 989 /* Enable native interrupts */ 990 state = thread_get_exceptions(); 991 thread_unmask_exceptions(state & ~THREAD_EXCP_NATIVE_INTR); 992 993 thread_user_save_vfp(); 994 995 sess = ts_get_current_session(); 996 /* 997 * User mode service has just entered kernel mode, suspend gprof 998 * collection until we're about to switch back again. 999 */ 1000 gprof_set_status(sess, TS_GPROF_SUSPEND); 1001 1002 /* Restore foreign interrupts which are disabled on exception entry */ 1003 thread_restore_foreign_intr(); 1004 1005 assert(sess && sess->handle_svc); 1006 if (sess->handle_svc(regs)) { 1007 /* We're about to switch back to user mode */ 1008 gprof_set_status(sess, TS_GPROF_RESUME); 1009 } else { 1010 /* We're returning from __thread_enter_user_mode() */ 1011 setup_unwind_user_mode(regs); 1012 } 1013 } 1014 1015 #ifdef CFG_WITH_ARM_TRUSTED_FW 1016 /* 1017 * These five functions are __weak to allow platforms to override them if 1018 * needed. 1019 */ 1020 unsigned long __weak thread_cpu_off_handler(unsigned long a0 __unused, 1021 unsigned long a1 __unused) 1022 { 1023 return 0; 1024 } 1025 DECLARE_KEEP_PAGER(thread_cpu_off_handler); 1026 1027 unsigned long __weak thread_cpu_suspend_handler(unsigned long a0 __unused, 1028 unsigned long a1 __unused) 1029 { 1030 return 0; 1031 } 1032 DECLARE_KEEP_PAGER(thread_cpu_suspend_handler); 1033 1034 unsigned long __weak thread_cpu_resume_handler(unsigned long a0 __unused, 1035 unsigned long a1 __unused) 1036 { 1037 return 0; 1038 } 1039 DECLARE_KEEP_PAGER(thread_cpu_resume_handler); 1040 1041 unsigned long __weak thread_system_off_handler(unsigned long a0 __unused, 1042 unsigned long a1 __unused) 1043 { 1044 return 0; 1045 } 1046 DECLARE_KEEP_PAGER(thread_system_off_handler); 1047 1048 unsigned long __weak thread_system_reset_handler(unsigned long a0 __unused, 1049 unsigned long a1 __unused) 1050 { 1051 return 0; 1052 } 1053 DECLARE_KEEP_PAGER(thread_system_reset_handler); 1054 #endif /*CFG_WITH_ARM_TRUSTED_FW*/ 1055