1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016-2021, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 * Copyright (c) 2020-2021, Arm Limited 6 */ 7 8 #include <platform_config.h> 9 10 #include <arm.h> 11 #include <assert.h> 12 #include <config.h> 13 #include <io.h> 14 #include <keep.h> 15 #include <kernel/asan.h> 16 #include <kernel/boot.h> 17 #include <kernel/linker.h> 18 #include <kernel/lockdep.h> 19 #include <kernel/misc.h> 20 #include <kernel/panic.h> 21 #include <kernel/spinlock.h> 22 #include <kernel/spmc_sp_handler.h> 23 #include <kernel/tee_ta_manager.h> 24 #include <kernel/thread.h> 25 #include <kernel/thread_private.h> 26 #include <kernel/user_mode_ctx_struct.h> 27 #include <kernel/virtualization.h> 28 #include <mm/core_memprot.h> 29 #include <mm/mobj.h> 30 #include <mm/tee_mm.h> 31 #include <mm/tee_pager.h> 32 #include <mm/vm.h> 33 #include <smccc.h> 34 #include <sm/sm.h> 35 #include <trace.h> 36 #include <util.h> 37 38 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 39 static vaddr_t thread_user_kcode_va __nex_bss; 40 long thread_user_kcode_offset __nex_bss; 41 static size_t thread_user_kcode_size __nex_bss; 42 #endif 43 44 #if defined(CFG_CORE_UNMAP_CORE_AT_EL0) && \ 45 defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) 46 long thread_user_kdata_sp_offset __nex_bss; 47 static uint8_t thread_user_kdata_page[ 48 ROUNDUP(sizeof(struct thread_core_local) * CFG_TEE_CORE_NB_CORE, 49 SMALL_PAGE_SIZE)] 50 __aligned(SMALL_PAGE_SIZE) 51 #ifndef CFG_VIRTUALIZATION 52 __section(".nozi.kdata_page"); 53 #else 54 __section(".nex_nozi.kdata_page"); 55 #endif 56 #endif 57 58 #ifdef ARM32 59 uint32_t __nostackcheck thread_get_exceptions(void) 60 { 61 uint32_t cpsr = read_cpsr(); 62 63 return (cpsr >> CPSR_F_SHIFT) & THREAD_EXCP_ALL; 64 } 65 66 void __nostackcheck thread_set_exceptions(uint32_t exceptions) 67 { 68 uint32_t cpsr = read_cpsr(); 69 70 /* Foreign interrupts must not be unmasked while holding a spinlock */ 71 if (!(exceptions & THREAD_EXCP_FOREIGN_INTR)) 72 assert_have_no_spinlock(); 73 74 cpsr &= ~(THREAD_EXCP_ALL << CPSR_F_SHIFT); 75 cpsr |= ((exceptions & THREAD_EXCP_ALL) << CPSR_F_SHIFT); 76 77 barrier(); 78 write_cpsr(cpsr); 79 barrier(); 80 } 81 #endif /*ARM32*/ 82 83 #ifdef ARM64 84 uint32_t __nostackcheck thread_get_exceptions(void) 85 { 86 uint32_t daif = read_daif(); 87 88 return (daif >> DAIF_F_SHIFT) & THREAD_EXCP_ALL; 89 } 90 91 void __nostackcheck thread_set_exceptions(uint32_t exceptions) 92 { 93 uint32_t daif = read_daif(); 94 95 /* Foreign interrupts must not be unmasked while holding a spinlock */ 96 if (!(exceptions & THREAD_EXCP_FOREIGN_INTR)) 97 assert_have_no_spinlock(); 98 99 daif &= ~(THREAD_EXCP_ALL << DAIF_F_SHIFT); 100 daif |= ((exceptions & THREAD_EXCP_ALL) << DAIF_F_SHIFT); 101 102 barrier(); 103 write_daif(daif); 104 barrier(); 105 } 106 #endif /*ARM64*/ 107 108 uint32_t __nostackcheck thread_mask_exceptions(uint32_t exceptions) 109 { 110 uint32_t state = thread_get_exceptions(); 111 112 thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL)); 113 return state; 114 } 115 116 void __nostackcheck thread_unmask_exceptions(uint32_t state) 117 { 118 thread_set_exceptions(state & THREAD_EXCP_ALL); 119 } 120 121 static void thread_lazy_save_ns_vfp(void) 122 { 123 #ifdef CFG_WITH_VFP 124 struct thread_ctx *thr = threads + thread_get_id(); 125 126 thr->vfp_state.ns_saved = false; 127 vfp_lazy_save_state_init(&thr->vfp_state.ns); 128 #endif /*CFG_WITH_VFP*/ 129 } 130 131 static void thread_lazy_restore_ns_vfp(void) 132 { 133 #ifdef CFG_WITH_VFP 134 struct thread_ctx *thr = threads + thread_get_id(); 135 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 136 137 assert(!thr->vfp_state.sec_lazy_saved && !thr->vfp_state.sec_saved); 138 139 if (tuv && tuv->lazy_saved && !tuv->saved) { 140 vfp_lazy_save_state_final(&tuv->vfp, false /*!force_save*/); 141 tuv->saved = true; 142 } 143 144 vfp_lazy_restore_state(&thr->vfp_state.ns, thr->vfp_state.ns_saved); 145 thr->vfp_state.ns_saved = false; 146 #endif /*CFG_WITH_VFP*/ 147 } 148 149 #ifdef ARM32 150 static void init_regs(struct thread_ctx *thread, uint32_t a0, uint32_t a1, 151 uint32_t a2, uint32_t a3, uint32_t a4, uint32_t a5, 152 uint32_t a6, uint32_t a7, void *pc) 153 { 154 thread->regs.pc = (uint32_t)pc; 155 156 /* 157 * Stdcalls starts in SVC mode with masked foreign interrupts, masked 158 * Asynchronous abort and unmasked native interrupts. 159 */ 160 thread->regs.cpsr = read_cpsr() & ARM32_CPSR_E; 161 thread->regs.cpsr |= CPSR_MODE_SVC | CPSR_A | 162 (THREAD_EXCP_FOREIGN_INTR << ARM32_CPSR_F_SHIFT); 163 /* Enable thumb mode if it's a thumb instruction */ 164 if (thread->regs.pc & 1) 165 thread->regs.cpsr |= CPSR_T; 166 /* Reinitialize stack pointer */ 167 thread->regs.svc_sp = thread->stack_va_end; 168 169 /* 170 * Copy arguments into context. This will make the 171 * arguments appear in r0-r7 when thread is started. 172 */ 173 thread->regs.r0 = a0; 174 thread->regs.r1 = a1; 175 thread->regs.r2 = a2; 176 thread->regs.r3 = a3; 177 thread->regs.r4 = a4; 178 thread->regs.r5 = a5; 179 thread->regs.r6 = a6; 180 thread->regs.r7 = a7; 181 } 182 #endif /*ARM32*/ 183 184 #ifdef ARM64 185 static void init_regs(struct thread_ctx *thread, uint32_t a0, uint32_t a1, 186 uint32_t a2, uint32_t a3, uint32_t a4, uint32_t a5, 187 uint32_t a6, uint32_t a7, void *pc) 188 { 189 thread->regs.pc = (uint64_t)pc; 190 191 /* 192 * Stdcalls starts in SVC mode with masked foreign interrupts, masked 193 * Asynchronous abort and unmasked native interrupts. 194 */ 195 thread->regs.cpsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, 196 THREAD_EXCP_FOREIGN_INTR | DAIFBIT_ABT); 197 /* Reinitialize stack pointer */ 198 thread->regs.sp = thread->stack_va_end; 199 200 /* 201 * Copy arguments into context. This will make the 202 * arguments appear in x0-x7 when thread is started. 203 */ 204 thread->regs.x[0] = a0; 205 thread->regs.x[1] = a1; 206 thread->regs.x[2] = a2; 207 thread->regs.x[3] = a3; 208 thread->regs.x[4] = a4; 209 thread->regs.x[5] = a5; 210 thread->regs.x[6] = a6; 211 thread->regs.x[7] = a7; 212 213 /* Set up frame pointer as per the Aarch64 AAPCS */ 214 thread->regs.x[29] = 0; 215 } 216 #endif /*ARM64*/ 217 218 static void __thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2, 219 uint32_t a3, uint32_t a4, uint32_t a5, 220 uint32_t a6, uint32_t a7, 221 void *pc) 222 { 223 size_t n; 224 struct thread_core_local *l = thread_get_core_local(); 225 bool found_thread = false; 226 227 assert(l->curr_thread == THREAD_ID_INVALID); 228 229 thread_lock_global(); 230 231 for (n = 0; n < CFG_NUM_THREADS; n++) { 232 if (threads[n].state == THREAD_STATE_FREE) { 233 threads[n].state = THREAD_STATE_ACTIVE; 234 found_thread = true; 235 break; 236 } 237 } 238 239 thread_unlock_global(); 240 241 if (!found_thread) 242 return; 243 244 l->curr_thread = n; 245 246 threads[n].flags = 0; 247 init_regs(threads + n, a0, a1, a2, a3, a4, a5, a6, a7, pc); 248 249 thread_lazy_save_ns_vfp(); 250 251 l->flags &= ~THREAD_CLF_TMP; 252 thread_resume(&threads[n].regs); 253 /*NOTREACHED*/ 254 panic(); 255 } 256 257 void thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3, 258 uint32_t a4, uint32_t a5) 259 { 260 __thread_alloc_and_run(a0, a1, a2, a3, a4, a5, 0, 0, 261 thread_std_smc_entry); 262 } 263 264 #ifdef CFG_SECURE_PARTITION 265 void thread_sp_alloc_and_run(struct thread_smc_args *args __maybe_unused) 266 { 267 __thread_alloc_and_run(args->a0, args->a1, args->a2, args->a3, args->a4, 268 args->a5, args->a6, args->a7, 269 spmc_sp_thread_entry); 270 } 271 #endif 272 273 #ifdef ARM32 274 static void copy_a0_to_a3(struct thread_ctx_regs *regs, uint32_t a0, 275 uint32_t a1, uint32_t a2, uint32_t a3) 276 { 277 /* 278 * Update returned values from RPC, values will appear in 279 * r0-r3 when thread is resumed. 280 */ 281 regs->r0 = a0; 282 regs->r1 = a1; 283 regs->r2 = a2; 284 regs->r3 = a3; 285 } 286 #endif /*ARM32*/ 287 288 #ifdef ARM64 289 static void copy_a0_to_a3(struct thread_ctx_regs *regs, uint32_t a0, 290 uint32_t a1, uint32_t a2, uint32_t a3) 291 { 292 /* 293 * Update returned values from RPC, values will appear in 294 * x0-x3 when thread is resumed. 295 */ 296 regs->x[0] = a0; 297 regs->x[1] = a1; 298 regs->x[2] = a2; 299 regs->x[3] = a3; 300 } 301 #endif /*ARM64*/ 302 303 #ifdef ARM32 304 static bool is_from_user(uint32_t cpsr) 305 { 306 return (cpsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR; 307 } 308 #endif 309 310 #ifdef ARM64 311 static bool is_from_user(uint32_t cpsr) 312 { 313 if (cpsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT)) 314 return true; 315 if (((cpsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) == 316 SPSR_64_MODE_EL0) 317 return true; 318 return false; 319 } 320 #endif 321 322 #ifdef CFG_SYSCALL_FTRACE 323 static void __noprof ftrace_suspend(void) 324 { 325 struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack); 326 327 if (s && s->fbuf) 328 s->fbuf->syscall_trace_suspended = true; 329 } 330 331 static void __noprof ftrace_resume(void) 332 { 333 struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack); 334 335 if (s && s->fbuf) 336 s->fbuf->syscall_trace_suspended = false; 337 } 338 #else 339 static void __noprof ftrace_suspend(void) 340 { 341 } 342 343 static void __noprof ftrace_resume(void) 344 { 345 } 346 #endif 347 348 static bool is_user_mode(struct thread_ctx_regs *regs) 349 { 350 return is_from_user((uint32_t)regs->cpsr); 351 } 352 353 void thread_resume_from_rpc(uint32_t thread_id, uint32_t a0, uint32_t a1, 354 uint32_t a2, uint32_t a3) 355 { 356 size_t n = thread_id; 357 struct thread_core_local *l = thread_get_core_local(); 358 bool found_thread = false; 359 360 assert(l->curr_thread == THREAD_ID_INVALID); 361 362 thread_lock_global(); 363 364 if (n < CFG_NUM_THREADS && threads[n].state == THREAD_STATE_SUSPENDED) { 365 threads[n].state = THREAD_STATE_ACTIVE; 366 found_thread = true; 367 } 368 369 thread_unlock_global(); 370 371 if (!found_thread) 372 return; 373 374 l->curr_thread = n; 375 376 if (threads[n].have_user_map) { 377 core_mmu_set_user_map(&threads[n].user_map); 378 if (threads[n].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR) 379 tee_ta_ftrace_update_times_resume(); 380 } 381 382 if (is_user_mode(&threads[n].regs)) 383 tee_ta_update_session_utime_resume(); 384 385 /* 386 * Return from RPC to request service of a foreign interrupt must not 387 * get parameters from non-secure world. 388 */ 389 if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) { 390 copy_a0_to_a3(&threads[n].regs, a0, a1, a2, a3); 391 threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN; 392 } 393 394 thread_lazy_save_ns_vfp(); 395 396 if (threads[n].have_user_map) 397 ftrace_resume(); 398 399 l->flags &= ~THREAD_CLF_TMP; 400 thread_resume(&threads[n].regs); 401 /*NOTREACHED*/ 402 panic(); 403 } 404 405 #ifdef ARM64 406 vaddr_t thread_get_saved_thread_sp(void) 407 { 408 struct thread_core_local *l = thread_get_core_local(); 409 int ct = l->curr_thread; 410 411 assert(ct != THREAD_ID_INVALID); 412 return threads[ct].kern_sp; 413 } 414 #endif /*ARM64*/ 415 416 #ifdef ARM32 417 bool thread_is_in_normal_mode(void) 418 { 419 return (read_cpsr() & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_SVC; 420 } 421 #endif 422 423 void thread_state_free(void) 424 { 425 struct thread_core_local *l = thread_get_core_local(); 426 int ct = l->curr_thread; 427 428 assert(ct != THREAD_ID_INVALID); 429 430 thread_lazy_restore_ns_vfp(); 431 tee_pager_release_phys( 432 (void *)(threads[ct].stack_va_end - STACK_THREAD_SIZE), 433 STACK_THREAD_SIZE); 434 435 thread_lock_global(); 436 437 assert(threads[ct].state == THREAD_STATE_ACTIVE); 438 threads[ct].state = THREAD_STATE_FREE; 439 threads[ct].flags = 0; 440 l->curr_thread = THREAD_ID_INVALID; 441 442 if (IS_ENABLED(CFG_VIRTUALIZATION)) 443 virt_unset_guest(); 444 thread_unlock_global(); 445 } 446 447 #ifdef CFG_WITH_PAGER 448 static void release_unused_kernel_stack(struct thread_ctx *thr, 449 uint32_t cpsr __maybe_unused) 450 { 451 #ifdef ARM64 452 /* 453 * If we're from user mode then thr->regs.sp is the saved user 454 * stack pointer and thr->kern_sp holds the last kernel stack 455 * pointer. But if we're from kernel mode then thr->kern_sp isn't 456 * up to date so we need to read from thr->regs.sp instead. 457 */ 458 vaddr_t sp = is_from_user(cpsr) ? thr->kern_sp : thr->regs.sp; 459 #else 460 vaddr_t sp = thr->regs.svc_sp; 461 #endif 462 vaddr_t base = thr->stack_va_end - STACK_THREAD_SIZE; 463 size_t len = sp - base; 464 465 tee_pager_release_phys((void *)base, len); 466 } 467 #else 468 static void release_unused_kernel_stack(struct thread_ctx *thr __unused, 469 uint32_t cpsr __unused) 470 { 471 } 472 #endif 473 474 int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc) 475 { 476 struct thread_core_local *l = thread_get_core_local(); 477 int ct = l->curr_thread; 478 479 assert(ct != THREAD_ID_INVALID); 480 481 if (core_mmu_user_mapping_is_active()) 482 ftrace_suspend(); 483 484 thread_check_canaries(); 485 486 release_unused_kernel_stack(threads + ct, cpsr); 487 488 if (is_from_user(cpsr)) { 489 thread_user_save_vfp(); 490 tee_ta_update_session_utime_suspend(); 491 tee_ta_gprof_sample_pc(pc); 492 } 493 thread_lazy_restore_ns_vfp(); 494 495 thread_lock_global(); 496 497 assert(threads[ct].state == THREAD_STATE_ACTIVE); 498 threads[ct].flags |= flags; 499 threads[ct].regs.cpsr = cpsr; 500 threads[ct].regs.pc = pc; 501 threads[ct].state = THREAD_STATE_SUSPENDED; 502 503 threads[ct].have_user_map = core_mmu_user_mapping_is_active(); 504 if (threads[ct].have_user_map) { 505 if (threads[ct].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR) 506 tee_ta_ftrace_update_times_suspend(); 507 core_mmu_get_user_map(&threads[ct].user_map); 508 core_mmu_set_user_map(NULL); 509 } 510 511 l->curr_thread = THREAD_ID_INVALID; 512 513 if (IS_ENABLED(CFG_VIRTUALIZATION)) 514 virt_unset_guest(); 515 516 thread_unlock_global(); 517 518 return ct; 519 } 520 521 bool thread_init_stack(uint32_t thread_id, vaddr_t sp) 522 { 523 if (thread_id >= CFG_NUM_THREADS) 524 return false; 525 threads[thread_id].stack_va_end = sp; 526 return true; 527 } 528 529 static void set_core_local_kcode_offset(struct thread_core_local *cls, 530 long offset) 531 { 532 size_t n = 0; 533 534 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) 535 cls[n].kcode_offset = offset; 536 } 537 538 static void init_user_kcode(void) 539 { 540 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 541 vaddr_t v = (vaddr_t)thread_excp_vect; 542 vaddr_t ve = (vaddr_t)thread_excp_vect_end; 543 544 thread_user_kcode_va = ROUNDDOWN(v, CORE_MMU_USER_CODE_SIZE); 545 ve = ROUNDUP(ve, CORE_MMU_USER_CODE_SIZE); 546 thread_user_kcode_size = ve - thread_user_kcode_va; 547 548 core_mmu_get_user_va_range(&v, NULL); 549 thread_user_kcode_offset = thread_user_kcode_va - v; 550 551 set_core_local_kcode_offset(thread_core_local, 552 thread_user_kcode_offset); 553 #if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) 554 set_core_local_kcode_offset((void *)thread_user_kdata_page, 555 thread_user_kcode_offset); 556 /* 557 * When transitioning to EL0 subtract SP with this much to point to 558 * this special kdata page instead. SP is restored by add this much 559 * while transitioning back to EL1. 560 */ 561 v += thread_user_kcode_size; 562 thread_user_kdata_sp_offset = (vaddr_t)thread_core_local - v; 563 #endif 564 #endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 565 } 566 567 void thread_init_primary(void) 568 { 569 /* Initialize canaries around the stacks */ 570 thread_init_canaries(); 571 572 init_user_kcode(); 573 } 574 575 static uint32_t __maybe_unused get_midr_implementer(uint32_t midr) 576 { 577 return (midr >> MIDR_IMPLEMENTER_SHIFT) & MIDR_IMPLEMENTER_MASK; 578 } 579 580 static uint32_t __maybe_unused get_midr_primary_part(uint32_t midr) 581 { 582 return (midr >> MIDR_PRIMARY_PART_NUM_SHIFT) & 583 MIDR_PRIMARY_PART_NUM_MASK; 584 } 585 586 #ifdef ARM64 587 static bool probe_workaround_available(void) 588 { 589 int32_t r; 590 591 r = thread_smc(SMCCC_VERSION, 0, 0, 0); 592 if (r < 0) 593 return false; 594 if (r < 0x10001) /* compare with version 1.1 */ 595 return false; 596 597 /* Version >= 1.1, so SMCCC_ARCH_FEATURES is available */ 598 r = thread_smc(SMCCC_ARCH_FEATURES, SMCCC_ARCH_WORKAROUND_1, 0, 0); 599 return r >= 0; 600 } 601 602 static vaddr_t __maybe_unused select_vector(vaddr_t a) 603 { 604 if (probe_workaround_available()) { 605 DMSG("SMCCC_ARCH_WORKAROUND_1 (%#08" PRIx32 ") available", 606 SMCCC_ARCH_WORKAROUND_1); 607 DMSG("SMC Workaround for CVE-2017-5715 used"); 608 return a; 609 } 610 611 DMSG("SMCCC_ARCH_WORKAROUND_1 (%#08" PRIx32 ") unavailable", 612 SMCCC_ARCH_WORKAROUND_1); 613 DMSG("SMC Workaround for CVE-2017-5715 not needed (if ARM-TF is up to date)"); 614 return (vaddr_t)thread_excp_vect; 615 } 616 #else 617 static vaddr_t __maybe_unused select_vector(vaddr_t a) 618 { 619 return a; 620 } 621 #endif 622 623 static vaddr_t get_excp_vect(void) 624 { 625 #ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC 626 uint32_t midr = read_midr(); 627 628 if (get_midr_implementer(midr) != MIDR_IMPLEMENTER_ARM) 629 return (vaddr_t)thread_excp_vect; 630 631 switch (get_midr_primary_part(midr)) { 632 #ifdef ARM32 633 case CORTEX_A8_PART_NUM: 634 case CORTEX_A9_PART_NUM: 635 case CORTEX_A17_PART_NUM: 636 #endif 637 case CORTEX_A57_PART_NUM: 638 case CORTEX_A72_PART_NUM: 639 case CORTEX_A73_PART_NUM: 640 case CORTEX_A75_PART_NUM: 641 return select_vector((vaddr_t)thread_excp_vect_workaround); 642 #ifdef ARM32 643 case CORTEX_A15_PART_NUM: 644 return select_vector((vaddr_t)thread_excp_vect_workaround_a15); 645 #endif 646 default: 647 return (vaddr_t)thread_excp_vect; 648 } 649 #endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/ 650 651 return (vaddr_t)thread_excp_vect; 652 } 653 654 void thread_init_per_cpu(void) 655 { 656 #ifdef ARM32 657 struct thread_core_local *l = thread_get_core_local(); 658 659 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 660 /* Initialize secure monitor */ 661 sm_init(l->tmp_stack_va_end + STACK_TMP_OFFS); 662 #endif 663 thread_set_irq_sp(l->tmp_stack_va_end); 664 thread_set_fiq_sp(l->tmp_stack_va_end); 665 thread_set_abt_sp((vaddr_t)l); 666 thread_set_und_sp((vaddr_t)l); 667 #endif 668 669 thread_init_vbar(get_excp_vect()); 670 671 #ifdef CFG_FTRACE_SUPPORT 672 /* 673 * Enable accesses to frequency register and physical counter 674 * register in EL0/PL0 required for timestamping during 675 * function tracing. 676 */ 677 write_cntkctl(read_cntkctl() | CNTKCTL_PL0PCTEN); 678 #endif 679 } 680 681 #ifdef CFG_WITH_VFP 682 uint32_t thread_kernel_enable_vfp(void) 683 { 684 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 685 struct thread_ctx *thr = threads + thread_get_id(); 686 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 687 688 assert(!vfp_is_enabled()); 689 690 if (!thr->vfp_state.ns_saved) { 691 vfp_lazy_save_state_final(&thr->vfp_state.ns, 692 true /*force_save*/); 693 thr->vfp_state.ns_saved = true; 694 } else if (thr->vfp_state.sec_lazy_saved && 695 !thr->vfp_state.sec_saved) { 696 /* 697 * This happens when we're handling an abort while the 698 * thread was using the VFP state. 699 */ 700 vfp_lazy_save_state_final(&thr->vfp_state.sec, 701 false /*!force_save*/); 702 thr->vfp_state.sec_saved = true; 703 } else if (tuv && tuv->lazy_saved && !tuv->saved) { 704 /* 705 * This can happen either during syscall or abort 706 * processing (while processing a syscall). 707 */ 708 vfp_lazy_save_state_final(&tuv->vfp, false /*!force_save*/); 709 tuv->saved = true; 710 } 711 712 vfp_enable(); 713 return exceptions; 714 } 715 716 void thread_kernel_disable_vfp(uint32_t state) 717 { 718 uint32_t exceptions; 719 720 assert(vfp_is_enabled()); 721 722 vfp_disable(); 723 exceptions = thread_get_exceptions(); 724 assert(exceptions & THREAD_EXCP_FOREIGN_INTR); 725 exceptions &= ~THREAD_EXCP_FOREIGN_INTR; 726 exceptions |= state & THREAD_EXCP_FOREIGN_INTR; 727 thread_set_exceptions(exceptions); 728 } 729 730 void thread_kernel_save_vfp(void) 731 { 732 struct thread_ctx *thr = threads + thread_get_id(); 733 734 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 735 if (vfp_is_enabled()) { 736 vfp_lazy_save_state_init(&thr->vfp_state.sec); 737 thr->vfp_state.sec_lazy_saved = true; 738 } 739 } 740 741 void thread_kernel_restore_vfp(void) 742 { 743 struct thread_ctx *thr = threads + thread_get_id(); 744 745 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 746 assert(!vfp_is_enabled()); 747 if (thr->vfp_state.sec_lazy_saved) { 748 vfp_lazy_restore_state(&thr->vfp_state.sec, 749 thr->vfp_state.sec_saved); 750 thr->vfp_state.sec_saved = false; 751 thr->vfp_state.sec_lazy_saved = false; 752 } 753 } 754 755 void thread_user_enable_vfp(struct thread_user_vfp_state *uvfp) 756 { 757 struct thread_ctx *thr = threads + thread_get_id(); 758 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 759 760 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 761 assert(!vfp_is_enabled()); 762 763 if (!thr->vfp_state.ns_saved) { 764 vfp_lazy_save_state_final(&thr->vfp_state.ns, 765 true /*force_save*/); 766 thr->vfp_state.ns_saved = true; 767 } else if (tuv && uvfp != tuv) { 768 if (tuv->lazy_saved && !tuv->saved) { 769 vfp_lazy_save_state_final(&tuv->vfp, 770 false /*!force_save*/); 771 tuv->saved = true; 772 } 773 } 774 775 if (uvfp->lazy_saved) 776 vfp_lazy_restore_state(&uvfp->vfp, uvfp->saved); 777 uvfp->lazy_saved = false; 778 uvfp->saved = false; 779 780 thr->vfp_state.uvfp = uvfp; 781 vfp_enable(); 782 } 783 784 void thread_user_save_vfp(void) 785 { 786 struct thread_ctx *thr = threads + thread_get_id(); 787 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 788 789 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 790 if (!vfp_is_enabled()) 791 return; 792 793 assert(tuv && !tuv->lazy_saved && !tuv->saved); 794 vfp_lazy_save_state_init(&tuv->vfp); 795 tuv->lazy_saved = true; 796 } 797 798 void thread_user_clear_vfp(struct user_mode_ctx *uctx) 799 { 800 struct thread_user_vfp_state *uvfp = &uctx->vfp; 801 struct thread_ctx *thr = threads + thread_get_id(); 802 803 if (uvfp == thr->vfp_state.uvfp) 804 thr->vfp_state.uvfp = NULL; 805 uvfp->lazy_saved = false; 806 uvfp->saved = false; 807 } 808 #endif /*CFG_WITH_VFP*/ 809 810 #ifdef ARM32 811 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr) 812 { 813 uint32_t s; 814 815 if (!is_32bit) 816 return false; 817 818 s = read_cpsr(); 819 s &= ~(CPSR_MODE_MASK | CPSR_T | CPSR_IT_MASK1 | CPSR_IT_MASK2); 820 s |= CPSR_MODE_USR; 821 if (entry_func & 1) 822 s |= CPSR_T; 823 *spsr = s; 824 return true; 825 } 826 #endif 827 828 #ifdef ARM64 829 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr) 830 { 831 uint32_t s; 832 833 if (is_32bit) { 834 s = read_daif() & (SPSR_32_AIF_MASK << SPSR_32_AIF_SHIFT); 835 s |= SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT; 836 s |= (entry_func & SPSR_32_T_MASK) << SPSR_32_T_SHIFT; 837 } else { 838 s = read_daif() & (SPSR_64_DAIF_MASK << SPSR_64_DAIF_SHIFT); 839 } 840 841 *spsr = s; 842 return true; 843 } 844 #endif 845 846 static void set_ctx_regs(struct thread_ctx_regs *regs, unsigned long a0, 847 unsigned long a1, unsigned long a2, unsigned long a3, 848 unsigned long user_sp, unsigned long entry_func, 849 uint32_t spsr, 850 struct thread_pauth_keys *keys __maybe_unused) 851 { 852 /* 853 * First clear all registers to avoid leaking information from 854 * other TAs or even the Core itself. 855 */ 856 *regs = (struct thread_ctx_regs){ }; 857 #ifdef ARM32 858 regs->r0 = a0; 859 regs->r1 = a1; 860 regs->r2 = a2; 861 regs->r3 = a3; 862 regs->usr_sp = user_sp; 863 regs->pc = entry_func; 864 regs->cpsr = spsr; 865 #endif 866 #ifdef ARM64 867 regs->x[0] = a0; 868 regs->x[1] = a1; 869 regs->x[2] = a2; 870 regs->x[3] = a3; 871 regs->sp = user_sp; 872 regs->pc = entry_func; 873 regs->cpsr = spsr; 874 regs->x[13] = user_sp; /* Used when running TA in Aarch32 */ 875 regs->sp = user_sp; /* Used when running TA in Aarch64 */ 876 #ifdef CFG_TA_PAUTH 877 assert(keys); 878 regs->apiakey_hi = keys->hi; 879 regs->apiakey_lo = keys->lo; 880 #endif 881 /* Set frame pointer (user stack can't be unwound past this point) */ 882 regs->x[29] = 0; 883 #endif 884 } 885 886 static struct thread_pauth_keys *thread_get_pauth_keys(void) 887 { 888 #if defined(CFG_TA_PAUTH) 889 struct ts_session *s = ts_get_current_session(); 890 /* Only user TA's support the PAUTH keys */ 891 struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx); 892 893 return &utc->uctx.keys; 894 #else 895 return NULL; 896 #endif 897 } 898 899 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1, 900 unsigned long a2, unsigned long a3, unsigned long user_sp, 901 unsigned long entry_func, bool is_32bit, 902 uint32_t *exit_status0, uint32_t *exit_status1) 903 { 904 uint32_t spsr = 0; 905 uint32_t exceptions = 0; 906 uint32_t rc = 0; 907 struct thread_ctx_regs *regs = NULL; 908 struct thread_pauth_keys *keys = NULL; 909 910 tee_ta_update_session_utime_resume(); 911 912 keys = thread_get_pauth_keys(); 913 914 /* Derive SPSR from current CPSR/PSTATE readout. */ 915 if (!get_spsr(is_32bit, entry_func, &spsr)) { 916 *exit_status0 = 1; /* panic */ 917 *exit_status1 = 0xbadbadba; 918 return 0; 919 } 920 921 exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 922 /* 923 * We're using the per thread location of saved context registers 924 * for temporary storage. Now that exceptions are masked they will 925 * not be used for any thing else until they are eventually 926 * unmasked when user mode has been entered. 927 */ 928 regs = thread_get_ctx_regs(); 929 set_ctx_regs(regs, a0, a1, a2, a3, user_sp, entry_func, spsr, keys); 930 rc = __thread_enter_user_mode(regs, exit_status0, exit_status1); 931 thread_unmask_exceptions(exceptions); 932 return rc; 933 } 934 935 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 936 void thread_get_user_kcode(struct mobj **mobj, size_t *offset, 937 vaddr_t *va, size_t *sz) 938 { 939 core_mmu_get_user_va_range(va, NULL); 940 *mobj = mobj_tee_ram_rx; 941 *sz = thread_user_kcode_size; 942 *offset = thread_user_kcode_va - (vaddr_t)mobj_get_va(*mobj, 0, *sz); 943 } 944 #endif 945 946 #if defined(CFG_CORE_UNMAP_CORE_AT_EL0) && \ 947 defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) 948 void thread_get_user_kdata(struct mobj **mobj, size_t *offset, 949 vaddr_t *va, size_t *sz) 950 { 951 vaddr_t v; 952 953 core_mmu_get_user_va_range(&v, NULL); 954 *va = v + thread_user_kcode_size; 955 *mobj = mobj_tee_ram_rw; 956 *sz = sizeof(thread_user_kdata_page); 957 *offset = (vaddr_t)thread_user_kdata_page - 958 (vaddr_t)mobj_get_va(*mobj, 0, *sz); 959 } 960 #endif 961 962 static void setup_unwind_user_mode(struct thread_svc_regs *regs) 963 { 964 #ifdef ARM32 965 regs->lr = (uintptr_t)thread_unwind_user_mode; 966 regs->spsr = read_cpsr(); 967 #endif 968 #ifdef ARM64 969 regs->elr = (uintptr_t)thread_unwind_user_mode; 970 regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, 0); 971 regs->spsr |= read_daif(); 972 /* 973 * Regs is the value of stack pointer before calling the SVC 974 * handler. By the addition matches for the reserved space at the 975 * beginning of el0_sync_svc(). This prepares the stack when 976 * returning to thread_unwind_user_mode instead of a normal 977 * exception return. 978 */ 979 regs->sp_el0 = (uint64_t)(regs + 1); 980 #endif 981 } 982 983 static void gprof_set_status(struct ts_session *s __maybe_unused, 984 enum ts_gprof_status status __maybe_unused) 985 { 986 #ifdef CFG_TA_GPROF_SUPPORT 987 if (s->ctx->ops->gprof_set_status) 988 s->ctx->ops->gprof_set_status(status); 989 #endif 990 } 991 992 /* 993 * Note: this function is weak just to make it possible to exclude it from 994 * the unpaged area. 995 */ 996 void __weak thread_svc_handler(struct thread_svc_regs *regs) 997 { 998 struct ts_session *sess = NULL; 999 uint32_t state = 0; 1000 1001 /* Enable native interrupts */ 1002 state = thread_get_exceptions(); 1003 thread_unmask_exceptions(state & ~THREAD_EXCP_NATIVE_INTR); 1004 1005 thread_user_save_vfp(); 1006 1007 sess = ts_get_current_session(); 1008 /* 1009 * User mode service has just entered kernel mode, suspend gprof 1010 * collection until we're about to switch back again. 1011 */ 1012 gprof_set_status(sess, TS_GPROF_SUSPEND); 1013 1014 /* Restore foreign interrupts which are disabled on exception entry */ 1015 thread_restore_foreign_intr(); 1016 1017 assert(sess && sess->handle_svc); 1018 if (sess->handle_svc(regs)) { 1019 /* We're about to switch back to user mode */ 1020 gprof_set_status(sess, TS_GPROF_RESUME); 1021 } else { 1022 /* We're returning from __thread_enter_user_mode() */ 1023 setup_unwind_user_mode(regs); 1024 } 1025 } 1026 1027 #ifdef CFG_WITH_ARM_TRUSTED_FW 1028 /* 1029 * These five functions are __weak to allow platforms to override them if 1030 * needed. 1031 */ 1032 unsigned long __weak thread_cpu_off_handler(unsigned long a0 __unused, 1033 unsigned long a1 __unused) 1034 { 1035 return 0; 1036 } 1037 DECLARE_KEEP_PAGER(thread_cpu_off_handler); 1038 1039 unsigned long __weak thread_cpu_suspend_handler(unsigned long a0 __unused, 1040 unsigned long a1 __unused) 1041 { 1042 return 0; 1043 } 1044 DECLARE_KEEP_PAGER(thread_cpu_suspend_handler); 1045 1046 unsigned long __weak thread_cpu_resume_handler(unsigned long a0 __unused, 1047 unsigned long a1 __unused) 1048 { 1049 return 0; 1050 } 1051 DECLARE_KEEP_PAGER(thread_cpu_resume_handler); 1052 1053 unsigned long __weak thread_system_off_handler(unsigned long a0 __unused, 1054 unsigned long a1 __unused) 1055 { 1056 return 0; 1057 } 1058 DECLARE_KEEP_PAGER(thread_system_off_handler); 1059 1060 unsigned long __weak thread_system_reset_handler(unsigned long a0 __unused, 1061 unsigned long a1 __unused) 1062 { 1063 return 0; 1064 } 1065 DECLARE_KEEP_PAGER(thread_system_reset_handler); 1066 #endif /*CFG_WITH_ARM_TRUSTED_FW*/ 1067