1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016-2022, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 * Copyright (c) 2020-2021, Arm Limited 6 */ 7 8 #include <platform_config.h> 9 10 #include <arm.h> 11 #include <assert.h> 12 #include <config.h> 13 #include <io.h> 14 #include <keep.h> 15 #include <kernel/asan.h> 16 #include <kernel/boot.h> 17 #include <kernel/interrupt.h> 18 #include <kernel/linker.h> 19 #include <kernel/lockdep.h> 20 #include <kernel/misc.h> 21 #include <kernel/panic.h> 22 #include <kernel/spinlock.h> 23 #include <kernel/spmc_sp_handler.h> 24 #include <kernel/tee_ta_manager.h> 25 #include <kernel/thread.h> 26 #include <kernel/thread_private.h> 27 #include <kernel/user_access.h> 28 #include <kernel/user_mode_ctx_struct.h> 29 #include <kernel/virtualization.h> 30 #include <mm/core_memprot.h> 31 #include <mm/mobj.h> 32 #include <mm/tee_mm.h> 33 #include <mm/tee_pager.h> 34 #include <smccc.h> 35 #include <sm/sm.h> 36 #include <trace.h> 37 #include <util.h> 38 39 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 40 static vaddr_t thread_user_kcode_va __nex_bss; 41 long thread_user_kcode_offset __nex_bss; 42 static size_t thread_user_kcode_size __nex_bss; 43 #endif 44 45 #if defined(CFG_CORE_UNMAP_CORE_AT_EL0) && \ 46 defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) 47 long thread_user_kdata_sp_offset __nex_bss; 48 static uint8_t thread_user_kdata_page[ 49 ROUNDUP(sizeof(struct thread_core_local) * CFG_TEE_CORE_NB_CORE, 50 SMALL_PAGE_SIZE)] 51 __aligned(SMALL_PAGE_SIZE) 52 #ifndef CFG_NS_VIRTUALIZATION 53 __section(".nozi.kdata_page"); 54 #else 55 __section(".nex_nozi.kdata_page"); 56 #endif 57 #endif 58 59 #ifdef ARM32 60 uint32_t __nostackcheck thread_get_exceptions(void) 61 { 62 uint32_t cpsr = read_cpsr(); 63 64 return (cpsr >> CPSR_F_SHIFT) & THREAD_EXCP_ALL; 65 } 66 67 void __nostackcheck thread_set_exceptions(uint32_t exceptions) 68 { 69 uint32_t cpsr = read_cpsr(); 70 71 /* Foreign interrupts must not be unmasked while holding a spinlock */ 72 if (!(exceptions & THREAD_EXCP_FOREIGN_INTR)) 73 assert_have_no_spinlock(); 74 75 cpsr &= ~(THREAD_EXCP_ALL << CPSR_F_SHIFT); 76 cpsr |= ((exceptions & THREAD_EXCP_ALL) << CPSR_F_SHIFT); 77 78 barrier(); 79 write_cpsr(cpsr); 80 barrier(); 81 } 82 #endif /*ARM32*/ 83 84 #ifdef ARM64 85 uint32_t __nostackcheck thread_get_exceptions(void) 86 { 87 uint32_t daif = read_daif(); 88 89 return (daif >> DAIF_F_SHIFT) & THREAD_EXCP_ALL; 90 } 91 92 void __nostackcheck thread_set_exceptions(uint32_t exceptions) 93 { 94 uint32_t daif = read_daif(); 95 96 /* Foreign interrupts must not be unmasked while holding a spinlock */ 97 if (!(exceptions & THREAD_EXCP_FOREIGN_INTR)) 98 assert_have_no_spinlock(); 99 100 daif &= ~(THREAD_EXCP_ALL << DAIF_F_SHIFT); 101 daif |= ((exceptions & THREAD_EXCP_ALL) << DAIF_F_SHIFT); 102 103 barrier(); 104 write_daif(daif); 105 barrier(); 106 } 107 #endif /*ARM64*/ 108 109 uint32_t __nostackcheck thread_mask_exceptions(uint32_t exceptions) 110 { 111 uint32_t state = thread_get_exceptions(); 112 113 thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL)); 114 return state; 115 } 116 117 void __nostackcheck thread_unmask_exceptions(uint32_t state) 118 { 119 thread_set_exceptions(state & THREAD_EXCP_ALL); 120 } 121 122 static void thread_lazy_save_ns_vfp(void) 123 { 124 #ifdef CFG_WITH_VFP 125 struct thread_ctx *thr = threads + thread_get_id(); 126 127 thr->vfp_state.ns_saved = false; 128 vfp_lazy_save_state_init(&thr->vfp_state.ns); 129 #endif /*CFG_WITH_VFP*/ 130 } 131 132 static void thread_lazy_restore_ns_vfp(void) 133 { 134 #ifdef CFG_WITH_VFP 135 struct thread_ctx *thr = threads + thread_get_id(); 136 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 137 138 assert(!thr->vfp_state.sec_lazy_saved && !thr->vfp_state.sec_saved); 139 140 if (tuv && tuv->lazy_saved && !tuv->saved) { 141 vfp_lazy_save_state_final(&tuv->vfp, false /*!force_save*/); 142 tuv->saved = true; 143 } 144 145 vfp_lazy_restore_state(&thr->vfp_state.ns, thr->vfp_state.ns_saved); 146 thr->vfp_state.ns_saved = false; 147 #endif /*CFG_WITH_VFP*/ 148 } 149 150 #ifdef ARM32 151 static void init_regs(struct thread_ctx *thread, uint32_t a0, uint32_t a1, 152 uint32_t a2, uint32_t a3, uint32_t a4, uint32_t a5, 153 uint32_t a6, uint32_t a7, void *pc) 154 { 155 thread->regs.pc = (uint32_t)pc; 156 157 /* 158 * Stdcalls starts in SVC mode with masked foreign interrupts, masked 159 * Asynchronous abort and unmasked native interrupts. 160 */ 161 thread->regs.cpsr = read_cpsr() & ARM32_CPSR_E; 162 thread->regs.cpsr |= CPSR_MODE_SVC | CPSR_A | 163 (THREAD_EXCP_FOREIGN_INTR << ARM32_CPSR_F_SHIFT); 164 /* Enable thumb mode if it's a thumb instruction */ 165 if (thread->regs.pc & 1) 166 thread->regs.cpsr |= CPSR_T; 167 /* Reinitialize stack pointer */ 168 thread->regs.svc_sp = thread->stack_va_end; 169 170 /* 171 * Copy arguments into context. This will make the 172 * arguments appear in r0-r7 when thread is started. 173 */ 174 thread->regs.r0 = a0; 175 thread->regs.r1 = a1; 176 thread->regs.r2 = a2; 177 thread->regs.r3 = a3; 178 thread->regs.r4 = a4; 179 thread->regs.r5 = a5; 180 thread->regs.r6 = a6; 181 thread->regs.r7 = a7; 182 } 183 #endif /*ARM32*/ 184 185 #ifdef ARM64 186 static void init_regs(struct thread_ctx *thread, uint32_t a0, uint32_t a1, 187 uint32_t a2, uint32_t a3, uint32_t a4, uint32_t a5, 188 uint32_t a6, uint32_t a7, void *pc) 189 { 190 thread->regs.pc = (uint64_t)pc; 191 192 /* 193 * Stdcalls starts in SVC mode with masked foreign interrupts, masked 194 * Asynchronous abort and unmasked native interrupts. 195 */ 196 thread->regs.cpsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, 197 THREAD_EXCP_FOREIGN_INTR | DAIFBIT_ABT); 198 /* Reinitialize stack pointer */ 199 thread->regs.sp = thread->stack_va_end; 200 201 /* 202 * Copy arguments into context. This will make the 203 * arguments appear in x0-x7 when thread is started. 204 */ 205 thread->regs.x[0] = a0; 206 thread->regs.x[1] = a1; 207 thread->regs.x[2] = a2; 208 thread->regs.x[3] = a3; 209 thread->regs.x[4] = a4; 210 thread->regs.x[5] = a5; 211 thread->regs.x[6] = a6; 212 thread->regs.x[7] = a7; 213 214 /* Set up frame pointer as per the Aarch64 AAPCS */ 215 thread->regs.x[29] = 0; 216 } 217 #endif /*ARM64*/ 218 219 static void __thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2, 220 uint32_t a3, uint32_t a4, uint32_t a5, 221 uint32_t a6, uint32_t a7, 222 void *pc, uint32_t flags) 223 { 224 struct thread_core_local *l = thread_get_core_local(); 225 bool found_thread = false; 226 size_t n = 0; 227 228 assert(l->curr_thread == THREAD_ID_INVALID); 229 230 thread_lock_global(); 231 232 for (n = 0; n < CFG_NUM_THREADS; n++) { 233 if (threads[n].state == THREAD_STATE_FREE) { 234 threads[n].state = THREAD_STATE_ACTIVE; 235 found_thread = true; 236 break; 237 } 238 } 239 240 thread_unlock_global(); 241 242 if (!found_thread) 243 return; 244 245 l->curr_thread = n; 246 247 threads[n].flags = flags; 248 init_regs(threads + n, a0, a1, a2, a3, a4, a5, a6, a7, pc); 249 #ifdef CFG_CORE_PAUTH 250 /* 251 * Copy the APIA key into the registers to be restored with 252 * thread_resume(). 253 */ 254 threads[n].regs.apiakey_hi = threads[n].keys.apia_hi; 255 threads[n].regs.apiakey_lo = threads[n].keys.apia_lo; 256 #endif 257 258 thread_lazy_save_ns_vfp(); 259 260 l->flags &= ~THREAD_CLF_TMP; 261 thread_resume(&threads[n].regs); 262 /*NOTREACHED*/ 263 panic(); 264 } 265 266 void thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3, 267 uint32_t a4, uint32_t a5) 268 { 269 __thread_alloc_and_run(a0, a1, a2, a3, a4, a5, 0, 0, 270 thread_std_smc_entry, 0); 271 } 272 273 #ifdef CFG_SECURE_PARTITION 274 void thread_sp_alloc_and_run(struct thread_smc_args *args __maybe_unused) 275 { 276 __thread_alloc_and_run(args->a0, args->a1, args->a2, args->a3, args->a4, 277 args->a5, args->a6, args->a7, 278 spmc_sp_thread_entry, THREAD_FLAGS_FFA_ONLY); 279 } 280 #endif 281 282 #ifdef ARM32 283 static void copy_a0_to_a3(struct thread_ctx_regs *regs, uint32_t a0, 284 uint32_t a1, uint32_t a2, uint32_t a3) 285 { 286 /* 287 * Update returned values from RPC, values will appear in 288 * r0-r3 when thread is resumed. 289 */ 290 regs->r0 = a0; 291 regs->r1 = a1; 292 regs->r2 = a2; 293 regs->r3 = a3; 294 } 295 #endif /*ARM32*/ 296 297 #ifdef ARM64 298 static void copy_a0_to_a3(struct thread_ctx_regs *regs, uint32_t a0, 299 uint32_t a1, uint32_t a2, uint32_t a3) 300 { 301 /* 302 * Update returned values from RPC, values will appear in 303 * x0-x3 when thread is resumed. 304 */ 305 regs->x[0] = a0; 306 regs->x[1] = a1; 307 regs->x[2] = a2; 308 regs->x[3] = a3; 309 } 310 #endif /*ARM64*/ 311 312 #ifdef ARM32 313 static bool is_from_user(uint32_t cpsr) 314 { 315 return (cpsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR; 316 } 317 #endif 318 319 #ifdef ARM64 320 static bool is_from_user(uint32_t cpsr) 321 { 322 if (cpsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT)) 323 return true; 324 if (((cpsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) == 325 SPSR_64_MODE_EL0) 326 return true; 327 return false; 328 } 329 #endif 330 331 #ifdef CFG_SYSCALL_FTRACE 332 static void __noprof ftrace_suspend(void) 333 { 334 struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack); 335 336 if (s && s->fbuf) 337 s->fbuf->syscall_trace_suspended = true; 338 } 339 340 static void __noprof ftrace_resume(void) 341 { 342 struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack); 343 344 if (s && s->fbuf) 345 s->fbuf->syscall_trace_suspended = false; 346 } 347 #else 348 static void __noprof ftrace_suspend(void) 349 { 350 } 351 352 static void __noprof ftrace_resume(void) 353 { 354 } 355 #endif 356 357 static bool is_user_mode(struct thread_ctx_regs *regs) 358 { 359 return is_from_user((uint32_t)regs->cpsr); 360 } 361 362 void thread_resume_from_rpc(uint32_t thread_id, uint32_t a0, uint32_t a1, 363 uint32_t a2, uint32_t a3) 364 { 365 size_t n = thread_id; 366 struct thread_core_local *l = thread_get_core_local(); 367 bool found_thread = false; 368 369 assert(l->curr_thread == THREAD_ID_INVALID); 370 371 thread_lock_global(); 372 373 if (n < CFG_NUM_THREADS && threads[n].state == THREAD_STATE_SUSPENDED) { 374 threads[n].state = THREAD_STATE_ACTIVE; 375 found_thread = true; 376 } 377 378 thread_unlock_global(); 379 380 if (!found_thread) 381 return; 382 383 l->curr_thread = n; 384 385 if (threads[n].have_user_map) { 386 core_mmu_set_user_map(&threads[n].user_map); 387 if (threads[n].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR) 388 tee_ta_ftrace_update_times_resume(); 389 } 390 391 if (is_user_mode(&threads[n].regs)) 392 tee_ta_update_session_utime_resume(); 393 394 /* 395 * Return from RPC to request service of a foreign interrupt must not 396 * get parameters from non-secure world. 397 */ 398 if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) { 399 copy_a0_to_a3(&threads[n].regs, a0, a1, a2, a3); 400 threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN; 401 } 402 403 thread_lazy_save_ns_vfp(); 404 405 if (threads[n].have_user_map) 406 ftrace_resume(); 407 408 l->flags &= ~THREAD_CLF_TMP; 409 thread_resume(&threads[n].regs); 410 /*NOTREACHED*/ 411 panic(); 412 } 413 414 #ifdef ARM64 415 static uint64_t spsr_from_pstate(void) 416 { 417 uint64_t spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, 0); 418 419 spsr |= read_daif(); 420 if (IS_ENABLED(CFG_PAN) && feat_pan_implemented() && read_pan()) 421 spsr |= SPSR_64_PAN; 422 423 return spsr; 424 } 425 426 void __thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) 427 { 428 thread_rpc_spsr(rv, spsr_from_pstate()); 429 } 430 431 vaddr_t thread_get_saved_thread_sp(void) 432 { 433 struct thread_core_local *l = thread_get_core_local(); 434 int ct = l->curr_thread; 435 436 assert(ct != THREAD_ID_INVALID); 437 return threads[ct].kern_sp; 438 } 439 #endif /*ARM64*/ 440 441 #ifdef ARM32 442 bool thread_is_in_normal_mode(void) 443 { 444 return (read_cpsr() & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_SVC; 445 } 446 #endif 447 448 void thread_state_free(void) 449 { 450 struct thread_core_local *l = thread_get_core_local(); 451 int ct = l->curr_thread; 452 453 assert(ct != THREAD_ID_INVALID); 454 455 thread_lazy_restore_ns_vfp(); 456 tee_pager_release_phys( 457 (void *)(threads[ct].stack_va_end - STACK_THREAD_SIZE), 458 STACK_THREAD_SIZE); 459 460 thread_lock_global(); 461 462 assert(threads[ct].state == THREAD_STATE_ACTIVE); 463 threads[ct].state = THREAD_STATE_FREE; 464 threads[ct].flags = 0; 465 l->curr_thread = THREAD_ID_INVALID; 466 467 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) 468 virt_unset_guest(); 469 thread_unlock_global(); 470 } 471 472 #ifdef CFG_WITH_PAGER 473 static void release_unused_kernel_stack(struct thread_ctx *thr, 474 uint32_t cpsr __maybe_unused) 475 { 476 #ifdef ARM64 477 /* 478 * If we're from user mode then thr->regs.sp is the saved user 479 * stack pointer and thr->kern_sp holds the last kernel stack 480 * pointer. But if we're from kernel mode then thr->kern_sp isn't 481 * up to date so we need to read from thr->regs.sp instead. 482 */ 483 vaddr_t sp = is_from_user(cpsr) ? thr->kern_sp : thr->regs.sp; 484 #else 485 vaddr_t sp = thr->regs.svc_sp; 486 #endif 487 vaddr_t base = thr->stack_va_end - STACK_THREAD_SIZE; 488 size_t len = sp - base; 489 490 tee_pager_release_phys((void *)base, len); 491 } 492 #else 493 static void release_unused_kernel_stack(struct thread_ctx *thr __unused, 494 uint32_t cpsr __unused) 495 { 496 } 497 #endif 498 499 int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc) 500 { 501 struct thread_core_local *l = thread_get_core_local(); 502 int ct = l->curr_thread; 503 504 assert(ct != THREAD_ID_INVALID); 505 506 if (core_mmu_user_mapping_is_active()) 507 ftrace_suspend(); 508 509 thread_check_canaries(); 510 511 release_unused_kernel_stack(threads + ct, cpsr); 512 513 if (is_from_user(cpsr)) { 514 thread_user_save_vfp(); 515 tee_ta_update_session_utime_suspend(); 516 tee_ta_gprof_sample_pc(pc); 517 } 518 thread_lazy_restore_ns_vfp(); 519 520 thread_lock_global(); 521 522 assert(threads[ct].state == THREAD_STATE_ACTIVE); 523 threads[ct].flags |= flags; 524 threads[ct].regs.cpsr = cpsr; 525 threads[ct].regs.pc = pc; 526 threads[ct].state = THREAD_STATE_SUSPENDED; 527 528 threads[ct].have_user_map = core_mmu_user_mapping_is_active(); 529 if (threads[ct].have_user_map) { 530 if (threads[ct].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR) 531 tee_ta_ftrace_update_times_suspend(); 532 core_mmu_get_user_map(&threads[ct].user_map); 533 core_mmu_set_user_map(NULL); 534 } 535 536 if (IS_ENABLED(CFG_SECURE_PARTITION)) { 537 struct ts_session *ts_sess = 538 TAILQ_FIRST(&threads[ct].tsd.sess_stack); 539 540 spmc_sp_set_to_preempted(ts_sess); 541 } 542 543 l->curr_thread = THREAD_ID_INVALID; 544 545 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) 546 virt_unset_guest(); 547 548 thread_unlock_global(); 549 550 return ct; 551 } 552 553 static void __maybe_unused 554 set_core_local_kcode_offset(struct thread_core_local *cls, long offset) 555 { 556 size_t n = 0; 557 558 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) 559 cls[n].kcode_offset = offset; 560 } 561 562 static void init_user_kcode(void) 563 { 564 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 565 vaddr_t v = (vaddr_t)thread_excp_vect; 566 vaddr_t ve = (vaddr_t)thread_excp_vect_end; 567 568 thread_user_kcode_va = ROUNDDOWN(v, CORE_MMU_USER_CODE_SIZE); 569 ve = ROUNDUP(ve, CORE_MMU_USER_CODE_SIZE); 570 thread_user_kcode_size = ve - thread_user_kcode_va; 571 572 core_mmu_get_user_va_range(&v, NULL); 573 thread_user_kcode_offset = thread_user_kcode_va - v; 574 575 set_core_local_kcode_offset(thread_core_local, 576 thread_user_kcode_offset); 577 #if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) 578 set_core_local_kcode_offset((void *)thread_user_kdata_page, 579 thread_user_kcode_offset); 580 /* 581 * When transitioning to EL0 subtract SP with this much to point to 582 * this special kdata page instead. SP is restored by add this much 583 * while transitioning back to EL1. 584 */ 585 v += thread_user_kcode_size; 586 thread_user_kdata_sp_offset = (vaddr_t)thread_core_local - v; 587 #endif 588 #endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 589 } 590 591 void thread_init_primary(void) 592 { 593 /* Initialize canaries around the stacks */ 594 thread_init_canaries(); 595 596 init_user_kcode(); 597 } 598 599 static uint32_t __maybe_unused get_midr_implementer(uint32_t midr) 600 { 601 return (midr >> MIDR_IMPLEMENTER_SHIFT) & MIDR_IMPLEMENTER_MASK; 602 } 603 604 static uint32_t __maybe_unused get_midr_primary_part(uint32_t midr) 605 { 606 return (midr >> MIDR_PRIMARY_PART_NUM_SHIFT) & 607 MIDR_PRIMARY_PART_NUM_MASK; 608 } 609 610 static uint32_t __maybe_unused get_midr_variant(uint32_t midr) 611 { 612 return (midr >> MIDR_VARIANT_SHIFT) & MIDR_VARIANT_MASK; 613 } 614 615 static uint32_t __maybe_unused get_midr_revision(uint32_t midr) 616 { 617 return (midr >> MIDR_REVISION_SHIFT) & MIDR_REVISION_MASK; 618 } 619 620 #ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC 621 #ifdef ARM64 622 static bool probe_workaround_available(uint32_t wa_id) 623 { 624 int32_t r; 625 626 r = thread_smc(SMCCC_VERSION, 0, 0, 0); 627 if (r < 0) 628 return false; 629 if (r < 0x10001) /* compare with version 1.1 */ 630 return false; 631 632 /* Version >= 1.1, so SMCCC_ARCH_FEATURES is available */ 633 r = thread_smc(SMCCC_ARCH_FEATURES, wa_id, 0, 0); 634 return r >= 0; 635 } 636 637 static vaddr_t __maybe_unused select_vector_wa_spectre_v2(void) 638 { 639 if (probe_workaround_available(SMCCC_ARCH_WORKAROUND_1)) { 640 DMSG("SMCCC_ARCH_WORKAROUND_1 (%#08" PRIx32 ") available", 641 SMCCC_ARCH_WORKAROUND_1); 642 DMSG("SMC Workaround for CVE-2017-5715 used"); 643 return (vaddr_t)thread_excp_vect_wa_spectre_v2; 644 } 645 646 DMSG("SMCCC_ARCH_WORKAROUND_1 (%#08" PRIx32 ") unavailable", 647 SMCCC_ARCH_WORKAROUND_1); 648 DMSG("SMC Workaround for CVE-2017-5715 not needed (if ARM-TF is up to date)"); 649 return (vaddr_t)thread_excp_vect; 650 } 651 #else 652 static vaddr_t __maybe_unused select_vector_wa_spectre_v2(void) 653 { 654 return (vaddr_t)thread_excp_vect_wa_spectre_v2; 655 } 656 #endif 657 #endif 658 659 #ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC 660 static vaddr_t select_vector_wa_spectre_bhb(uint8_t loop_count __maybe_unused) 661 { 662 /* 663 * Spectre-BHB has only been analyzed for AArch64 so far. For 664 * AArch32 fall back to the Spectre-V2 workaround which is likely 665 * to work even if perhaps a bit more expensive than a more 666 * optimized workaround. 667 */ 668 #ifdef ARM64 669 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 670 struct thread_core_local *cl = (void *)thread_user_kdata_page; 671 672 cl[get_core_pos()].bhb_loop_count = loop_count; 673 #endif 674 thread_get_core_local()->bhb_loop_count = loop_count; 675 676 DMSG("Spectre-BHB CVE-2022-23960 workaround enabled with \"K\" = %u", 677 loop_count); 678 679 return (vaddr_t)thread_excp_vect_wa_spectre_bhb; 680 #else 681 return select_vector_wa_spectre_v2(); 682 #endif 683 } 684 #endif 685 686 static vaddr_t get_excp_vect(void) 687 { 688 #ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC 689 uint32_t midr = read_midr(); 690 uint8_t vers = 0; 691 692 if (get_midr_implementer(midr) != MIDR_IMPLEMENTER_ARM) 693 return (vaddr_t)thread_excp_vect; 694 /* 695 * Variant rx, Revision py, for instance 696 * Variant 2 Revision 0 = r2p0 = 0x20 697 */ 698 vers = (get_midr_variant(midr) << 4) | get_midr_revision(midr); 699 700 /* 701 * Spectre-V2 (CVE-2017-5715) software workarounds covers what's 702 * needed for Spectre-BHB (CVE-2022-23960) too. The workaround for 703 * Spectre-V2 is more expensive than the one for Spectre-BHB so if 704 * possible select the workaround for Spectre-BHB. 705 */ 706 switch (get_midr_primary_part(midr)) { 707 #ifdef ARM32 708 /* Spectre-V2 */ 709 case CORTEX_A8_PART_NUM: 710 case CORTEX_A9_PART_NUM: 711 case CORTEX_A17_PART_NUM: 712 #endif 713 /* Spectre-V2 */ 714 case CORTEX_A57_PART_NUM: 715 case CORTEX_A73_PART_NUM: 716 case CORTEX_A75_PART_NUM: 717 return select_vector_wa_spectre_v2(); 718 #ifdef ARM32 719 /* Spectre-V2 */ 720 case CORTEX_A15_PART_NUM: 721 return (vaddr_t)thread_excp_vect_wa_a15_spectre_v2; 722 #endif 723 /* 724 * Spectre-V2 for vers < r1p0 725 * Spectre-BHB for vers >= r1p0 726 */ 727 case CORTEX_A72_PART_NUM: 728 if (vers < 0x10) 729 return select_vector_wa_spectre_v2(); 730 return select_vector_wa_spectre_bhb(8); 731 732 /* 733 * Doing the more safe but expensive Spectre-V2 workaround for CPUs 734 * still being researched on the best mitigation sequence. 735 */ 736 case CORTEX_A65_PART_NUM: 737 case CORTEX_A65AE_PART_NUM: 738 case NEOVERSE_E1_PART_NUM: 739 return select_vector_wa_spectre_v2(); 740 741 /* Spectre-BHB */ 742 case CORTEX_A76_PART_NUM: 743 case CORTEX_A76AE_PART_NUM: 744 case CORTEX_A77_PART_NUM: 745 return select_vector_wa_spectre_bhb(24); 746 case CORTEX_A78_PART_NUM: 747 case CORTEX_A78AE_PART_NUM: 748 case CORTEX_A78C_PART_NUM: 749 case CORTEX_A710_PART_NUM: 750 case CORTEX_X1_PART_NUM: 751 case CORTEX_X2_PART_NUM: 752 return select_vector_wa_spectre_bhb(32); 753 case NEOVERSE_N1_PART_NUM: 754 return select_vector_wa_spectre_bhb(24); 755 case NEOVERSE_N2_PART_NUM: 756 case NEOVERSE_V1_PART_NUM: 757 return select_vector_wa_spectre_bhb(32); 758 759 default: 760 return (vaddr_t)thread_excp_vect; 761 } 762 #endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/ 763 764 return (vaddr_t)thread_excp_vect; 765 } 766 767 void thread_init_per_cpu(void) 768 { 769 #ifdef ARM32 770 struct thread_core_local *l = thread_get_core_local(); 771 772 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 773 /* Initialize secure monitor */ 774 sm_init(l->tmp_stack_va_end + STACK_TMP_OFFS); 775 #endif 776 thread_set_irq_sp(l->tmp_stack_va_end); 777 thread_set_fiq_sp(l->tmp_stack_va_end); 778 thread_set_abt_sp((vaddr_t)l); 779 thread_set_und_sp((vaddr_t)l); 780 #endif 781 782 thread_init_vbar(get_excp_vect()); 783 784 #ifdef CFG_FTRACE_SUPPORT 785 /* 786 * Enable accesses to frequency register and physical counter 787 * register in EL0/PL0 required for timestamping during 788 * function tracing. 789 */ 790 write_cntkctl(read_cntkctl() | CNTKCTL_PL0PCTEN); 791 #endif 792 } 793 794 #ifdef CFG_WITH_VFP 795 uint32_t thread_kernel_enable_vfp(void) 796 { 797 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 798 struct thread_ctx *thr = threads + thread_get_id(); 799 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 800 801 assert(!vfp_is_enabled()); 802 803 if (!thr->vfp_state.ns_saved) { 804 vfp_lazy_save_state_final(&thr->vfp_state.ns, 805 true /*force_save*/); 806 thr->vfp_state.ns_saved = true; 807 } else if (thr->vfp_state.sec_lazy_saved && 808 !thr->vfp_state.sec_saved) { 809 /* 810 * This happens when we're handling an abort while the 811 * thread was using the VFP state. 812 */ 813 vfp_lazy_save_state_final(&thr->vfp_state.sec, 814 false /*!force_save*/); 815 thr->vfp_state.sec_saved = true; 816 } else if (tuv && tuv->lazy_saved && !tuv->saved) { 817 /* 818 * This can happen either during syscall or abort 819 * processing (while processing a syscall). 820 */ 821 vfp_lazy_save_state_final(&tuv->vfp, false /*!force_save*/); 822 tuv->saved = true; 823 } 824 825 vfp_enable(); 826 return exceptions; 827 } 828 829 void thread_kernel_disable_vfp(uint32_t state) 830 { 831 uint32_t exceptions; 832 833 assert(vfp_is_enabled()); 834 835 vfp_disable(); 836 exceptions = thread_get_exceptions(); 837 assert(exceptions & THREAD_EXCP_FOREIGN_INTR); 838 exceptions &= ~THREAD_EXCP_FOREIGN_INTR; 839 exceptions |= state & THREAD_EXCP_FOREIGN_INTR; 840 thread_set_exceptions(exceptions); 841 } 842 843 void thread_kernel_save_vfp(void) 844 { 845 struct thread_ctx *thr = threads + thread_get_id(); 846 847 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 848 if (vfp_is_enabled()) { 849 vfp_lazy_save_state_init(&thr->vfp_state.sec); 850 thr->vfp_state.sec_lazy_saved = true; 851 } 852 } 853 854 void thread_kernel_restore_vfp(void) 855 { 856 struct thread_ctx *thr = threads + thread_get_id(); 857 858 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 859 assert(!vfp_is_enabled()); 860 if (thr->vfp_state.sec_lazy_saved) { 861 vfp_lazy_restore_state(&thr->vfp_state.sec, 862 thr->vfp_state.sec_saved); 863 thr->vfp_state.sec_saved = false; 864 thr->vfp_state.sec_lazy_saved = false; 865 } 866 } 867 868 void thread_user_enable_vfp(struct thread_user_vfp_state *uvfp) 869 { 870 struct thread_ctx *thr = threads + thread_get_id(); 871 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 872 873 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 874 assert(!vfp_is_enabled()); 875 876 if (!thr->vfp_state.ns_saved) { 877 vfp_lazy_save_state_final(&thr->vfp_state.ns, 878 true /*force_save*/); 879 thr->vfp_state.ns_saved = true; 880 } else if (tuv && uvfp != tuv) { 881 if (tuv->lazy_saved && !tuv->saved) { 882 vfp_lazy_save_state_final(&tuv->vfp, 883 false /*!force_save*/); 884 tuv->saved = true; 885 } 886 } 887 888 if (uvfp->lazy_saved) 889 vfp_lazy_restore_state(&uvfp->vfp, uvfp->saved); 890 uvfp->lazy_saved = false; 891 uvfp->saved = false; 892 893 thr->vfp_state.uvfp = uvfp; 894 vfp_enable(); 895 } 896 897 void thread_user_save_vfp(void) 898 { 899 struct thread_ctx *thr = threads + thread_get_id(); 900 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 901 902 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 903 if (!vfp_is_enabled()) 904 return; 905 906 assert(tuv && !tuv->lazy_saved && !tuv->saved); 907 vfp_lazy_save_state_init(&tuv->vfp); 908 tuv->lazy_saved = true; 909 } 910 911 void thread_user_clear_vfp(struct user_mode_ctx *uctx) 912 { 913 struct thread_user_vfp_state *uvfp = &uctx->vfp; 914 struct thread_ctx *thr = threads + thread_get_id(); 915 916 if (uvfp == thr->vfp_state.uvfp) 917 thr->vfp_state.uvfp = NULL; 918 uvfp->lazy_saved = false; 919 uvfp->saved = false; 920 } 921 #endif /*CFG_WITH_VFP*/ 922 923 #ifdef ARM32 924 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr) 925 { 926 uint32_t s; 927 928 if (!is_32bit) 929 return false; 930 931 s = read_cpsr(); 932 s &= ~(CPSR_MODE_MASK | CPSR_T | CPSR_IT_MASK1 | CPSR_IT_MASK2); 933 s |= CPSR_MODE_USR; 934 if (entry_func & 1) 935 s |= CPSR_T; 936 *spsr = s; 937 return true; 938 } 939 #endif 940 941 #ifdef ARM64 942 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr) 943 { 944 uint32_t s; 945 946 if (is_32bit) { 947 s = read_daif() & (SPSR_32_AIF_MASK << SPSR_32_AIF_SHIFT); 948 s |= SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT; 949 s |= (entry_func & SPSR_32_T_MASK) << SPSR_32_T_SHIFT; 950 } else { 951 s = read_daif() & (SPSR_64_DAIF_MASK << SPSR_64_DAIF_SHIFT); 952 } 953 954 *spsr = s; 955 return true; 956 } 957 #endif 958 959 static void set_ctx_regs(struct thread_ctx_regs *regs, unsigned long a0, 960 unsigned long a1, unsigned long a2, unsigned long a3, 961 unsigned long user_sp, unsigned long entry_func, 962 uint32_t spsr, 963 struct thread_pauth_keys *keys __maybe_unused) 964 { 965 /* 966 * First clear all registers to avoid leaking information from 967 * other TAs or even the Core itself. 968 */ 969 *regs = (struct thread_ctx_regs){ }; 970 #ifdef ARM32 971 regs->r0 = a0; 972 regs->r1 = a1; 973 regs->r2 = a2; 974 regs->r3 = a3; 975 regs->usr_sp = user_sp; 976 regs->pc = entry_func; 977 regs->cpsr = spsr; 978 #endif 979 #ifdef ARM64 980 regs->x[0] = a0; 981 regs->x[1] = a1; 982 regs->x[2] = a2; 983 regs->x[3] = a3; 984 regs->pc = entry_func; 985 regs->cpsr = spsr; 986 regs->x[13] = user_sp; /* Used when running TA in Aarch32 */ 987 regs->sp = user_sp; /* Used when running TA in Aarch64 */ 988 #ifdef CFG_TA_PAUTH 989 assert(keys); 990 regs->apiakey_hi = keys->apia_hi; 991 regs->apiakey_lo = keys->apia_lo; 992 #endif 993 /* Set frame pointer (user stack can't be unwound past this point) */ 994 regs->x[29] = 0; 995 #endif 996 } 997 998 static struct thread_pauth_keys *thread_get_pauth_keys(void) 999 { 1000 #if defined(CFG_TA_PAUTH) 1001 struct ts_session *s = ts_get_current_session(); 1002 1003 if (is_user_ta_ctx(s->ctx)) { 1004 struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx); 1005 1006 return &utc->uctx.keys; 1007 } else if (is_sp_ctx(s->ctx)) { 1008 struct sp_ctx *spc = to_sp_ctx(s->ctx); 1009 1010 return &spc->uctx.keys; 1011 } 1012 1013 panic("[abort] Only user TAs and SPs support PAUTH keys"); 1014 #else 1015 return NULL; 1016 #endif 1017 } 1018 1019 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1, 1020 unsigned long a2, unsigned long a3, unsigned long user_sp, 1021 unsigned long entry_func, bool is_32bit, 1022 uint32_t *exit_status0, uint32_t *exit_status1) 1023 { 1024 uint32_t spsr = 0; 1025 uint32_t exceptions = 0; 1026 uint32_t rc = 0; 1027 struct thread_ctx_regs *regs = NULL; 1028 struct thread_pauth_keys *keys = NULL; 1029 1030 tee_ta_update_session_utime_resume(); 1031 1032 keys = thread_get_pauth_keys(); 1033 1034 /* Derive SPSR from current CPSR/PSTATE readout. */ 1035 if (!get_spsr(is_32bit, entry_func, &spsr)) { 1036 *exit_status0 = 1; /* panic */ 1037 *exit_status1 = 0xbadbadba; 1038 return 0; 1039 } 1040 1041 exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 1042 /* 1043 * We're using the per thread location of saved context registers 1044 * for temporary storage. Now that exceptions are masked they will 1045 * not be used for any thing else until they are eventually 1046 * unmasked when user mode has been entered. 1047 */ 1048 regs = thread_get_ctx_regs(); 1049 set_ctx_regs(regs, a0, a1, a2, a3, user_sp, entry_func, spsr, keys); 1050 rc = __thread_enter_user_mode(regs, exit_status0, exit_status1); 1051 thread_unmask_exceptions(exceptions); 1052 return rc; 1053 } 1054 1055 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 1056 void thread_get_user_kcode(struct mobj **mobj, size_t *offset, 1057 vaddr_t *va, size_t *sz) 1058 { 1059 core_mmu_get_user_va_range(va, NULL); 1060 *mobj = mobj_tee_ram_rx; 1061 *sz = thread_user_kcode_size; 1062 *offset = thread_user_kcode_va - (vaddr_t)mobj_get_va(*mobj, 0, *sz); 1063 } 1064 #endif 1065 1066 #if defined(CFG_CORE_UNMAP_CORE_AT_EL0) && \ 1067 defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) 1068 void thread_get_user_kdata(struct mobj **mobj, size_t *offset, 1069 vaddr_t *va, size_t *sz) 1070 { 1071 vaddr_t v; 1072 1073 core_mmu_get_user_va_range(&v, NULL); 1074 *va = v + thread_user_kcode_size; 1075 *mobj = mobj_tee_ram_rw; 1076 *sz = sizeof(thread_user_kdata_page); 1077 *offset = (vaddr_t)thread_user_kdata_page - 1078 (vaddr_t)mobj_get_va(*mobj, 0, *sz); 1079 } 1080 #endif 1081 1082 static void setup_unwind_user_mode(struct thread_scall_regs *regs) 1083 { 1084 #ifdef ARM32 1085 regs->lr = (uintptr_t)thread_unwind_user_mode; 1086 regs->spsr = read_cpsr(); 1087 #endif 1088 #ifdef ARM64 1089 regs->elr = (uintptr_t)thread_unwind_user_mode; 1090 regs->spsr = spsr_from_pstate(); 1091 /* 1092 * Regs is the value of stack pointer before calling the SVC 1093 * handler. By the addition matches for the reserved space at the 1094 * beginning of el0_sync_svc(). This prepares the stack when 1095 * returning to thread_unwind_user_mode instead of a normal 1096 * exception return. 1097 */ 1098 regs->sp_el0 = (uint64_t)(regs + 1); 1099 #endif 1100 } 1101 1102 static void gprof_set_status(struct ts_session *s __maybe_unused, 1103 enum ts_gprof_status status __maybe_unused) 1104 { 1105 #ifdef CFG_TA_GPROF_SUPPORT 1106 if (s->ctx->ops->gprof_set_status) 1107 s->ctx->ops->gprof_set_status(status); 1108 #endif 1109 } 1110 1111 /* 1112 * Note: this function is weak just to make it possible to exclude it from 1113 * the unpaged area. 1114 */ 1115 void __weak thread_scall_handler(struct thread_scall_regs *regs) 1116 { 1117 struct ts_session *sess = NULL; 1118 uint32_t state = 0; 1119 1120 /* Enable native interrupts */ 1121 state = thread_get_exceptions(); 1122 thread_unmask_exceptions(state & ~THREAD_EXCP_NATIVE_INTR); 1123 1124 thread_user_save_vfp(); 1125 1126 sess = ts_get_current_session(); 1127 /* 1128 * User mode service has just entered kernel mode, suspend gprof 1129 * collection until we're about to switch back again. 1130 */ 1131 gprof_set_status(sess, TS_GPROF_SUSPEND); 1132 1133 /* Restore foreign interrupts which are disabled on exception entry */ 1134 thread_restore_foreign_intr(); 1135 1136 assert(sess && sess->handle_scall); 1137 if (sess->handle_scall(regs)) { 1138 /* We're about to switch back to user mode */ 1139 gprof_set_status(sess, TS_GPROF_RESUME); 1140 } else { 1141 /* We're returning from __thread_enter_user_mode() */ 1142 setup_unwind_user_mode(regs); 1143 } 1144 } 1145 1146 #ifdef CFG_WITH_ARM_TRUSTED_FW 1147 /* 1148 * These five functions are __weak to allow platforms to override them if 1149 * needed. 1150 */ 1151 unsigned long __weak thread_cpu_off_handler(unsigned long a0 __unused, 1152 unsigned long a1 __unused) 1153 { 1154 return 0; 1155 } 1156 DECLARE_KEEP_PAGER(thread_cpu_off_handler); 1157 1158 unsigned long __weak thread_cpu_suspend_handler(unsigned long a0 __unused, 1159 unsigned long a1 __unused) 1160 { 1161 return 0; 1162 } 1163 DECLARE_KEEP_PAGER(thread_cpu_suspend_handler); 1164 1165 unsigned long __weak thread_cpu_resume_handler(unsigned long a0 __unused, 1166 unsigned long a1 __unused) 1167 { 1168 return 0; 1169 } 1170 DECLARE_KEEP_PAGER(thread_cpu_resume_handler); 1171 1172 unsigned long __weak thread_system_off_handler(unsigned long a0 __unused, 1173 unsigned long a1 __unused) 1174 { 1175 return 0; 1176 } 1177 DECLARE_KEEP_PAGER(thread_system_off_handler); 1178 1179 unsigned long __weak thread_system_reset_handler(unsigned long a0 __unused, 1180 unsigned long a1 __unused) 1181 { 1182 return 0; 1183 } 1184 DECLARE_KEEP_PAGER(thread_system_reset_handler); 1185 #endif /*CFG_WITH_ARM_TRUSTED_FW*/ 1186 1187 #ifdef CFG_CORE_WORKAROUND_ARM_NMFI 1188 void __noreturn interrupt_main_handler(void) 1189 { 1190 /* 1191 * Note: overrides the default implementation of this function so that 1192 * if there would be another handler defined there would be duplicate 1193 * symbol error during linking. 1194 */ 1195 panic("Secure interrupt received but it is not supported"); 1196 } 1197 #endif 1198