1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016-2022, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 * Copyright (c) 2020-2021, Arm Limited 6 */ 7 8 #include <platform_config.h> 9 10 #include <arm.h> 11 #include <assert.h> 12 #include <config.h> 13 #include <io.h> 14 #include <keep.h> 15 #include <kernel/asan.h> 16 #include <kernel/boot.h> 17 #include <kernel/interrupt.h> 18 #include <kernel/linker.h> 19 #include <kernel/lockdep.h> 20 #include <kernel/misc.h> 21 #include <kernel/panic.h> 22 #include <kernel/spinlock.h> 23 #include <kernel/spmc_sp_handler.h> 24 #include <kernel/tee_ta_manager.h> 25 #include <kernel/thread.h> 26 #include <kernel/thread_private.h> 27 #include <kernel/user_access.h> 28 #include <kernel/user_mode_ctx_struct.h> 29 #include <kernel/virtualization.h> 30 #include <mm/core_memprot.h> 31 #include <mm/mobj.h> 32 #include <mm/tee_mm.h> 33 #include <mm/tee_pager.h> 34 #include <mm/vm.h> 35 #include <smccc.h> 36 #include <sm/sm.h> 37 #include <trace.h> 38 #include <util.h> 39 40 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 41 static vaddr_t thread_user_kcode_va __nex_bss; 42 long thread_user_kcode_offset __nex_bss; 43 static size_t thread_user_kcode_size __nex_bss; 44 #endif 45 46 #if defined(CFG_CORE_UNMAP_CORE_AT_EL0) && \ 47 defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) 48 long thread_user_kdata_sp_offset __nex_bss; 49 static uint8_t thread_user_kdata_page[ 50 ROUNDUP(sizeof(struct thread_core_local) * CFG_TEE_CORE_NB_CORE, 51 SMALL_PAGE_SIZE)] 52 __aligned(SMALL_PAGE_SIZE) 53 #ifndef CFG_NS_VIRTUALIZATION 54 __section(".nozi.kdata_page"); 55 #else 56 __section(".nex_nozi.kdata_page"); 57 #endif 58 #endif 59 60 #ifdef ARM32 61 uint32_t __nostackcheck thread_get_exceptions(void) 62 { 63 uint32_t cpsr = read_cpsr(); 64 65 return (cpsr >> CPSR_F_SHIFT) & THREAD_EXCP_ALL; 66 } 67 68 void __nostackcheck thread_set_exceptions(uint32_t exceptions) 69 { 70 uint32_t cpsr = read_cpsr(); 71 72 /* Foreign interrupts must not be unmasked while holding a spinlock */ 73 if (!(exceptions & THREAD_EXCP_FOREIGN_INTR)) 74 assert_have_no_spinlock(); 75 76 cpsr &= ~(THREAD_EXCP_ALL << CPSR_F_SHIFT); 77 cpsr |= ((exceptions & THREAD_EXCP_ALL) << CPSR_F_SHIFT); 78 79 barrier(); 80 write_cpsr(cpsr); 81 barrier(); 82 } 83 #endif /*ARM32*/ 84 85 #ifdef ARM64 86 uint32_t __nostackcheck thread_get_exceptions(void) 87 { 88 uint32_t daif = read_daif(); 89 90 return (daif >> DAIF_F_SHIFT) & THREAD_EXCP_ALL; 91 } 92 93 void __nostackcheck thread_set_exceptions(uint32_t exceptions) 94 { 95 uint32_t daif = read_daif(); 96 97 /* Foreign interrupts must not be unmasked while holding a spinlock */ 98 if (!(exceptions & THREAD_EXCP_FOREIGN_INTR)) 99 assert_have_no_spinlock(); 100 101 daif &= ~(THREAD_EXCP_ALL << DAIF_F_SHIFT); 102 daif |= ((exceptions & THREAD_EXCP_ALL) << DAIF_F_SHIFT); 103 104 barrier(); 105 write_daif(daif); 106 barrier(); 107 } 108 #endif /*ARM64*/ 109 110 uint32_t __nostackcheck thread_mask_exceptions(uint32_t exceptions) 111 { 112 uint32_t state = thread_get_exceptions(); 113 114 thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL)); 115 return state; 116 } 117 118 void __nostackcheck thread_unmask_exceptions(uint32_t state) 119 { 120 thread_set_exceptions(state & THREAD_EXCP_ALL); 121 } 122 123 static void thread_lazy_save_ns_vfp(void) 124 { 125 #ifdef CFG_WITH_VFP 126 struct thread_ctx *thr = threads + thread_get_id(); 127 128 thr->vfp_state.ns_saved = false; 129 vfp_lazy_save_state_init(&thr->vfp_state.ns); 130 #endif /*CFG_WITH_VFP*/ 131 } 132 133 static void thread_lazy_restore_ns_vfp(void) 134 { 135 #ifdef CFG_WITH_VFP 136 struct thread_ctx *thr = threads + thread_get_id(); 137 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 138 139 assert(!thr->vfp_state.sec_lazy_saved && !thr->vfp_state.sec_saved); 140 141 if (tuv && tuv->lazy_saved && !tuv->saved) { 142 vfp_lazy_save_state_final(&tuv->vfp, false /*!force_save*/); 143 tuv->saved = true; 144 } 145 146 vfp_lazy_restore_state(&thr->vfp_state.ns, thr->vfp_state.ns_saved); 147 thr->vfp_state.ns_saved = false; 148 #endif /*CFG_WITH_VFP*/ 149 } 150 151 #ifdef ARM32 152 static void init_regs(struct thread_ctx *thread, uint32_t a0, uint32_t a1, 153 uint32_t a2, uint32_t a3, uint32_t a4, uint32_t a5, 154 uint32_t a6, uint32_t a7, void *pc) 155 { 156 thread->regs.pc = (uint32_t)pc; 157 158 /* 159 * Stdcalls starts in SVC mode with masked foreign interrupts, masked 160 * Asynchronous abort and unmasked native interrupts. 161 */ 162 thread->regs.cpsr = read_cpsr() & ARM32_CPSR_E; 163 thread->regs.cpsr |= CPSR_MODE_SVC | CPSR_A | 164 (THREAD_EXCP_FOREIGN_INTR << ARM32_CPSR_F_SHIFT); 165 /* Enable thumb mode if it's a thumb instruction */ 166 if (thread->regs.pc & 1) 167 thread->regs.cpsr |= CPSR_T; 168 /* Reinitialize stack pointer */ 169 thread->regs.svc_sp = thread->stack_va_end; 170 171 /* 172 * Copy arguments into context. This will make the 173 * arguments appear in r0-r7 when thread is started. 174 */ 175 thread->regs.r0 = a0; 176 thread->regs.r1 = a1; 177 thread->regs.r2 = a2; 178 thread->regs.r3 = a3; 179 thread->regs.r4 = a4; 180 thread->regs.r5 = a5; 181 thread->regs.r6 = a6; 182 thread->regs.r7 = a7; 183 } 184 #endif /*ARM32*/ 185 186 #ifdef ARM64 187 static void init_regs(struct thread_ctx *thread, uint32_t a0, uint32_t a1, 188 uint32_t a2, uint32_t a3, uint32_t a4, uint32_t a5, 189 uint32_t a6, uint32_t a7, void *pc) 190 { 191 thread->regs.pc = (uint64_t)pc; 192 193 /* 194 * Stdcalls starts in SVC mode with masked foreign interrupts, masked 195 * Asynchronous abort and unmasked native interrupts. 196 */ 197 thread->regs.cpsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, 198 THREAD_EXCP_FOREIGN_INTR | DAIFBIT_ABT); 199 /* Reinitialize stack pointer */ 200 thread->regs.sp = thread->stack_va_end; 201 202 /* 203 * Copy arguments into context. This will make the 204 * arguments appear in x0-x7 when thread is started. 205 */ 206 thread->regs.x[0] = a0; 207 thread->regs.x[1] = a1; 208 thread->regs.x[2] = a2; 209 thread->regs.x[3] = a3; 210 thread->regs.x[4] = a4; 211 thread->regs.x[5] = a5; 212 thread->regs.x[6] = a6; 213 thread->regs.x[7] = a7; 214 215 /* Set up frame pointer as per the Aarch64 AAPCS */ 216 thread->regs.x[29] = 0; 217 } 218 #endif /*ARM64*/ 219 220 static void __thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2, 221 uint32_t a3, uint32_t a4, uint32_t a5, 222 uint32_t a6, uint32_t a7, 223 void *pc, uint32_t flags) 224 { 225 struct thread_core_local *l = thread_get_core_local(); 226 bool found_thread = false; 227 size_t n = 0; 228 229 assert(l->curr_thread == THREAD_ID_INVALID); 230 231 thread_lock_global(); 232 233 for (n = 0; n < CFG_NUM_THREADS; n++) { 234 if (threads[n].state == THREAD_STATE_FREE) { 235 threads[n].state = THREAD_STATE_ACTIVE; 236 found_thread = true; 237 break; 238 } 239 } 240 241 thread_unlock_global(); 242 243 if (!found_thread) 244 return; 245 246 l->curr_thread = n; 247 248 threads[n].flags = flags; 249 init_regs(threads + n, a0, a1, a2, a3, a4, a5, a6, a7, pc); 250 #ifdef CFG_CORE_PAUTH 251 /* 252 * Copy the APIA key into the registers to be restored with 253 * thread_resume(). 254 */ 255 threads[n].regs.apiakey_hi = threads[n].keys.apia_hi; 256 threads[n].regs.apiakey_lo = threads[n].keys.apia_lo; 257 #endif 258 259 thread_lazy_save_ns_vfp(); 260 261 l->flags &= ~THREAD_CLF_TMP; 262 thread_resume(&threads[n].regs); 263 /*NOTREACHED*/ 264 panic(); 265 } 266 267 void thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3, 268 uint32_t a4, uint32_t a5) 269 { 270 __thread_alloc_and_run(a0, a1, a2, a3, a4, a5, 0, 0, 271 thread_std_smc_entry, 0); 272 } 273 274 #ifdef CFG_SECURE_PARTITION 275 void thread_sp_alloc_and_run(struct thread_smc_args *args __maybe_unused) 276 { 277 __thread_alloc_and_run(args->a0, args->a1, args->a2, args->a3, args->a4, 278 args->a5, args->a6, args->a7, 279 spmc_sp_thread_entry, THREAD_FLAGS_FFA_ONLY); 280 } 281 #endif 282 283 #ifdef ARM32 284 static void copy_a0_to_a3(struct thread_ctx_regs *regs, uint32_t a0, 285 uint32_t a1, uint32_t a2, uint32_t a3) 286 { 287 /* 288 * Update returned values from RPC, values will appear in 289 * r0-r3 when thread is resumed. 290 */ 291 regs->r0 = a0; 292 regs->r1 = a1; 293 regs->r2 = a2; 294 regs->r3 = a3; 295 } 296 #endif /*ARM32*/ 297 298 #ifdef ARM64 299 static void copy_a0_to_a3(struct thread_ctx_regs *regs, uint32_t a0, 300 uint32_t a1, uint32_t a2, uint32_t a3) 301 { 302 /* 303 * Update returned values from RPC, values will appear in 304 * x0-x3 when thread is resumed. 305 */ 306 regs->x[0] = a0; 307 regs->x[1] = a1; 308 regs->x[2] = a2; 309 regs->x[3] = a3; 310 } 311 #endif /*ARM64*/ 312 313 #ifdef ARM32 314 static bool is_from_user(uint32_t cpsr) 315 { 316 return (cpsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR; 317 } 318 #endif 319 320 #ifdef ARM64 321 static bool is_from_user(uint32_t cpsr) 322 { 323 if (cpsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT)) 324 return true; 325 if (((cpsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) == 326 SPSR_64_MODE_EL0) 327 return true; 328 return false; 329 } 330 #endif 331 332 #ifdef CFG_SYSCALL_FTRACE 333 static void __noprof ftrace_suspend(void) 334 { 335 struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack); 336 337 if (s && s->fbuf) 338 s->fbuf->syscall_trace_suspended = true; 339 } 340 341 static void __noprof ftrace_resume(void) 342 { 343 struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack); 344 345 if (s && s->fbuf) 346 s->fbuf->syscall_trace_suspended = false; 347 } 348 #else 349 static void __noprof ftrace_suspend(void) 350 { 351 } 352 353 static void __noprof ftrace_resume(void) 354 { 355 } 356 #endif 357 358 static bool is_user_mode(struct thread_ctx_regs *regs) 359 { 360 return is_from_user((uint32_t)regs->cpsr); 361 } 362 363 void thread_resume_from_rpc(uint32_t thread_id, uint32_t a0, uint32_t a1, 364 uint32_t a2, uint32_t a3) 365 { 366 size_t n = thread_id; 367 struct thread_core_local *l = thread_get_core_local(); 368 bool found_thread = false; 369 370 assert(l->curr_thread == THREAD_ID_INVALID); 371 372 thread_lock_global(); 373 374 if (n < CFG_NUM_THREADS && threads[n].state == THREAD_STATE_SUSPENDED) { 375 threads[n].state = THREAD_STATE_ACTIVE; 376 found_thread = true; 377 } 378 379 thread_unlock_global(); 380 381 if (!found_thread) 382 return; 383 384 l->curr_thread = n; 385 386 if (threads[n].have_user_map) { 387 core_mmu_set_user_map(&threads[n].user_map); 388 if (threads[n].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR) 389 tee_ta_ftrace_update_times_resume(); 390 } 391 392 if (is_user_mode(&threads[n].regs)) 393 tee_ta_update_session_utime_resume(); 394 395 /* 396 * Return from RPC to request service of a foreign interrupt must not 397 * get parameters from non-secure world. 398 */ 399 if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) { 400 copy_a0_to_a3(&threads[n].regs, a0, a1, a2, a3); 401 threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN; 402 } 403 404 thread_lazy_save_ns_vfp(); 405 406 if (threads[n].have_user_map) 407 ftrace_resume(); 408 409 l->flags &= ~THREAD_CLF_TMP; 410 thread_resume(&threads[n].regs); 411 /*NOTREACHED*/ 412 panic(); 413 } 414 415 #ifdef ARM64 416 static uint64_t spsr_from_pstate(void) 417 { 418 uint64_t spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, 0); 419 420 spsr |= read_daif(); 421 if (IS_ENABLED(CFG_PAN) && feat_pan_implemented() && read_pan()) 422 spsr |= SPSR_64_PAN; 423 424 return spsr; 425 } 426 427 void __thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) 428 { 429 thread_rpc_spsr(rv, spsr_from_pstate()); 430 } 431 432 vaddr_t thread_get_saved_thread_sp(void) 433 { 434 struct thread_core_local *l = thread_get_core_local(); 435 int ct = l->curr_thread; 436 437 assert(ct != THREAD_ID_INVALID); 438 return threads[ct].kern_sp; 439 } 440 #endif /*ARM64*/ 441 442 #ifdef ARM32 443 bool thread_is_in_normal_mode(void) 444 { 445 return (read_cpsr() & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_SVC; 446 } 447 #endif 448 449 void thread_state_free(void) 450 { 451 struct thread_core_local *l = thread_get_core_local(); 452 int ct = l->curr_thread; 453 454 assert(ct != THREAD_ID_INVALID); 455 456 thread_lazy_restore_ns_vfp(); 457 tee_pager_release_phys( 458 (void *)(threads[ct].stack_va_end - STACK_THREAD_SIZE), 459 STACK_THREAD_SIZE); 460 461 thread_lock_global(); 462 463 assert(threads[ct].state == THREAD_STATE_ACTIVE); 464 threads[ct].state = THREAD_STATE_FREE; 465 threads[ct].flags = 0; 466 l->curr_thread = THREAD_ID_INVALID; 467 468 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) 469 virt_unset_guest(); 470 thread_unlock_global(); 471 } 472 473 #ifdef CFG_WITH_PAGER 474 static void release_unused_kernel_stack(struct thread_ctx *thr, 475 uint32_t cpsr __maybe_unused) 476 { 477 #ifdef ARM64 478 /* 479 * If we're from user mode then thr->regs.sp is the saved user 480 * stack pointer and thr->kern_sp holds the last kernel stack 481 * pointer. But if we're from kernel mode then thr->kern_sp isn't 482 * up to date so we need to read from thr->regs.sp instead. 483 */ 484 vaddr_t sp = is_from_user(cpsr) ? thr->kern_sp : thr->regs.sp; 485 #else 486 vaddr_t sp = thr->regs.svc_sp; 487 #endif 488 vaddr_t base = thr->stack_va_end - STACK_THREAD_SIZE; 489 size_t len = sp - base; 490 491 tee_pager_release_phys((void *)base, len); 492 } 493 #else 494 static void release_unused_kernel_stack(struct thread_ctx *thr __unused, 495 uint32_t cpsr __unused) 496 { 497 } 498 #endif 499 500 int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc) 501 { 502 struct thread_core_local *l = thread_get_core_local(); 503 int ct = l->curr_thread; 504 505 assert(ct != THREAD_ID_INVALID); 506 507 if (core_mmu_user_mapping_is_active()) 508 ftrace_suspend(); 509 510 thread_check_canaries(); 511 512 release_unused_kernel_stack(threads + ct, cpsr); 513 514 if (is_from_user(cpsr)) { 515 thread_user_save_vfp(); 516 tee_ta_update_session_utime_suspend(); 517 tee_ta_gprof_sample_pc(pc); 518 } 519 thread_lazy_restore_ns_vfp(); 520 521 thread_lock_global(); 522 523 assert(threads[ct].state == THREAD_STATE_ACTIVE); 524 threads[ct].flags |= flags; 525 threads[ct].regs.cpsr = cpsr; 526 threads[ct].regs.pc = pc; 527 threads[ct].state = THREAD_STATE_SUSPENDED; 528 529 threads[ct].have_user_map = core_mmu_user_mapping_is_active(); 530 if (threads[ct].have_user_map) { 531 if (threads[ct].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR) 532 tee_ta_ftrace_update_times_suspend(); 533 core_mmu_get_user_map(&threads[ct].user_map); 534 core_mmu_set_user_map(NULL); 535 } 536 537 if (IS_ENABLED(CFG_SECURE_PARTITION)) { 538 struct ts_session *ts_sess = 539 TAILQ_FIRST(&threads[ct].tsd.sess_stack); 540 541 spmc_sp_set_to_preempted(ts_sess); 542 } 543 544 l->curr_thread = THREAD_ID_INVALID; 545 546 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) 547 virt_unset_guest(); 548 549 thread_unlock_global(); 550 551 return ct; 552 } 553 554 bool thread_init_stack(uint32_t thread_id, vaddr_t sp) 555 { 556 if (thread_id >= CFG_NUM_THREADS) 557 return false; 558 threads[thread_id].stack_va_end = sp; 559 return true; 560 } 561 562 static void __maybe_unused 563 set_core_local_kcode_offset(struct thread_core_local *cls, long offset) 564 { 565 size_t n = 0; 566 567 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) 568 cls[n].kcode_offset = offset; 569 } 570 571 static void init_user_kcode(void) 572 { 573 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 574 vaddr_t v = (vaddr_t)thread_excp_vect; 575 vaddr_t ve = (vaddr_t)thread_excp_vect_end; 576 577 thread_user_kcode_va = ROUNDDOWN(v, CORE_MMU_USER_CODE_SIZE); 578 ve = ROUNDUP(ve, CORE_MMU_USER_CODE_SIZE); 579 thread_user_kcode_size = ve - thread_user_kcode_va; 580 581 core_mmu_get_user_va_range(&v, NULL); 582 thread_user_kcode_offset = thread_user_kcode_va - v; 583 584 set_core_local_kcode_offset(thread_core_local, 585 thread_user_kcode_offset); 586 #if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) 587 set_core_local_kcode_offset((void *)thread_user_kdata_page, 588 thread_user_kcode_offset); 589 /* 590 * When transitioning to EL0 subtract SP with this much to point to 591 * this special kdata page instead. SP is restored by add this much 592 * while transitioning back to EL1. 593 */ 594 v += thread_user_kcode_size; 595 thread_user_kdata_sp_offset = (vaddr_t)thread_core_local - v; 596 #endif 597 #endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 598 } 599 600 void thread_init_primary(void) 601 { 602 /* Initialize canaries around the stacks */ 603 thread_init_canaries(); 604 605 init_user_kcode(); 606 } 607 608 static uint32_t __maybe_unused get_midr_implementer(uint32_t midr) 609 { 610 return (midr >> MIDR_IMPLEMENTER_SHIFT) & MIDR_IMPLEMENTER_MASK; 611 } 612 613 static uint32_t __maybe_unused get_midr_primary_part(uint32_t midr) 614 { 615 return (midr >> MIDR_PRIMARY_PART_NUM_SHIFT) & 616 MIDR_PRIMARY_PART_NUM_MASK; 617 } 618 619 static uint32_t __maybe_unused get_midr_variant(uint32_t midr) 620 { 621 return (midr >> MIDR_VARIANT_SHIFT) & MIDR_VARIANT_MASK; 622 } 623 624 static uint32_t __maybe_unused get_midr_revision(uint32_t midr) 625 { 626 return (midr >> MIDR_REVISION_SHIFT) & MIDR_REVISION_MASK; 627 } 628 629 #ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC 630 #ifdef ARM64 631 static bool probe_workaround_available(uint32_t wa_id) 632 { 633 int32_t r; 634 635 r = thread_smc(SMCCC_VERSION, 0, 0, 0); 636 if (r < 0) 637 return false; 638 if (r < 0x10001) /* compare with version 1.1 */ 639 return false; 640 641 /* Version >= 1.1, so SMCCC_ARCH_FEATURES is available */ 642 r = thread_smc(SMCCC_ARCH_FEATURES, wa_id, 0, 0); 643 return r >= 0; 644 } 645 646 static vaddr_t __maybe_unused select_vector_wa_spectre_v2(void) 647 { 648 if (probe_workaround_available(SMCCC_ARCH_WORKAROUND_1)) { 649 DMSG("SMCCC_ARCH_WORKAROUND_1 (%#08" PRIx32 ") available", 650 SMCCC_ARCH_WORKAROUND_1); 651 DMSG("SMC Workaround for CVE-2017-5715 used"); 652 return (vaddr_t)thread_excp_vect_wa_spectre_v2; 653 } 654 655 DMSG("SMCCC_ARCH_WORKAROUND_1 (%#08" PRIx32 ") unavailable", 656 SMCCC_ARCH_WORKAROUND_1); 657 DMSG("SMC Workaround for CVE-2017-5715 not needed (if ARM-TF is up to date)"); 658 return (vaddr_t)thread_excp_vect; 659 } 660 #else 661 static vaddr_t __maybe_unused select_vector_wa_spectre_v2(void) 662 { 663 return (vaddr_t)thread_excp_vect_wa_spectre_v2; 664 } 665 #endif 666 #endif 667 668 #ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC 669 static vaddr_t select_vector_wa_spectre_bhb(uint8_t loop_count __maybe_unused) 670 { 671 /* 672 * Spectre-BHB has only been analyzed for AArch64 so far. For 673 * AArch32 fall back to the Spectre-V2 workaround which is likely 674 * to work even if perhaps a bit more expensive than a more 675 * optimized workaround. 676 */ 677 #ifdef ARM64 678 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 679 struct thread_core_local *cl = (void *)thread_user_kdata_page; 680 681 cl[get_core_pos()].bhb_loop_count = loop_count; 682 #endif 683 thread_get_core_local()->bhb_loop_count = loop_count; 684 685 DMSG("Spectre-BHB CVE-2022-23960 workaround enabled with \"K\" = %u", 686 loop_count); 687 688 return (vaddr_t)thread_excp_vect_wa_spectre_bhb; 689 #else 690 return select_vector_wa_spectre_v2(); 691 #endif 692 } 693 #endif 694 695 static vaddr_t get_excp_vect(void) 696 { 697 #ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC 698 uint32_t midr = read_midr(); 699 uint8_t vers = 0; 700 701 if (get_midr_implementer(midr) != MIDR_IMPLEMENTER_ARM) 702 return (vaddr_t)thread_excp_vect; 703 /* 704 * Variant rx, Revision py, for instance 705 * Variant 2 Revision 0 = r2p0 = 0x20 706 */ 707 vers = (get_midr_variant(midr) << 4) | get_midr_revision(midr); 708 709 /* 710 * Spectre-V2 (CVE-2017-5715) software workarounds covers what's 711 * needed for Spectre-BHB (CVE-2022-23960) too. The workaround for 712 * Spectre-V2 is more expensive than the one for Spectre-BHB so if 713 * possible select the workaround for Spectre-BHB. 714 */ 715 switch (get_midr_primary_part(midr)) { 716 #ifdef ARM32 717 /* Spectre-V2 */ 718 case CORTEX_A8_PART_NUM: 719 case CORTEX_A9_PART_NUM: 720 case CORTEX_A17_PART_NUM: 721 #endif 722 /* Spectre-V2 */ 723 case CORTEX_A57_PART_NUM: 724 case CORTEX_A73_PART_NUM: 725 case CORTEX_A75_PART_NUM: 726 return select_vector_wa_spectre_v2(); 727 #ifdef ARM32 728 /* Spectre-V2 */ 729 case CORTEX_A15_PART_NUM: 730 return (vaddr_t)thread_excp_vect_wa_a15_spectre_v2; 731 #endif 732 /* 733 * Spectre-V2 for vers < r1p0 734 * Spectre-BHB for vers >= r1p0 735 */ 736 case CORTEX_A72_PART_NUM: 737 if (vers < 0x10) 738 return select_vector_wa_spectre_v2(); 739 return select_vector_wa_spectre_bhb(8); 740 741 /* 742 * Doing the more safe but expensive Spectre-V2 workaround for CPUs 743 * still being researched on the best mitigation sequence. 744 */ 745 case CORTEX_A65_PART_NUM: 746 case CORTEX_A65AE_PART_NUM: 747 case NEOVERSE_E1_PART_NUM: 748 return select_vector_wa_spectre_v2(); 749 750 /* Spectre-BHB */ 751 case CORTEX_A76_PART_NUM: 752 case CORTEX_A76AE_PART_NUM: 753 case CORTEX_A77_PART_NUM: 754 return select_vector_wa_spectre_bhb(24); 755 case CORTEX_A78_PART_NUM: 756 case CORTEX_A78AE_PART_NUM: 757 case CORTEX_A78C_PART_NUM: 758 case CORTEX_A710_PART_NUM: 759 case CORTEX_X1_PART_NUM: 760 case CORTEX_X2_PART_NUM: 761 return select_vector_wa_spectre_bhb(32); 762 case NEOVERSE_N1_PART_NUM: 763 return select_vector_wa_spectre_bhb(24); 764 case NEOVERSE_N2_PART_NUM: 765 case NEOVERSE_V1_PART_NUM: 766 return select_vector_wa_spectre_bhb(32); 767 768 default: 769 return (vaddr_t)thread_excp_vect; 770 } 771 #endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/ 772 773 return (vaddr_t)thread_excp_vect; 774 } 775 776 void thread_init_per_cpu(void) 777 { 778 #ifdef ARM32 779 struct thread_core_local *l = thread_get_core_local(); 780 781 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 782 /* Initialize secure monitor */ 783 sm_init(l->tmp_stack_va_end + STACK_TMP_OFFS); 784 #endif 785 thread_set_irq_sp(l->tmp_stack_va_end); 786 thread_set_fiq_sp(l->tmp_stack_va_end); 787 thread_set_abt_sp((vaddr_t)l); 788 thread_set_und_sp((vaddr_t)l); 789 #endif 790 791 thread_init_vbar(get_excp_vect()); 792 793 #ifdef CFG_FTRACE_SUPPORT 794 /* 795 * Enable accesses to frequency register and physical counter 796 * register in EL0/PL0 required for timestamping during 797 * function tracing. 798 */ 799 write_cntkctl(read_cntkctl() | CNTKCTL_PL0PCTEN); 800 #endif 801 } 802 803 #ifdef CFG_WITH_VFP 804 uint32_t thread_kernel_enable_vfp(void) 805 { 806 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 807 struct thread_ctx *thr = threads + thread_get_id(); 808 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 809 810 assert(!vfp_is_enabled()); 811 812 if (!thr->vfp_state.ns_saved) { 813 vfp_lazy_save_state_final(&thr->vfp_state.ns, 814 true /*force_save*/); 815 thr->vfp_state.ns_saved = true; 816 } else if (thr->vfp_state.sec_lazy_saved && 817 !thr->vfp_state.sec_saved) { 818 /* 819 * This happens when we're handling an abort while the 820 * thread was using the VFP state. 821 */ 822 vfp_lazy_save_state_final(&thr->vfp_state.sec, 823 false /*!force_save*/); 824 thr->vfp_state.sec_saved = true; 825 } else if (tuv && tuv->lazy_saved && !tuv->saved) { 826 /* 827 * This can happen either during syscall or abort 828 * processing (while processing a syscall). 829 */ 830 vfp_lazy_save_state_final(&tuv->vfp, false /*!force_save*/); 831 tuv->saved = true; 832 } 833 834 vfp_enable(); 835 return exceptions; 836 } 837 838 void thread_kernel_disable_vfp(uint32_t state) 839 { 840 uint32_t exceptions; 841 842 assert(vfp_is_enabled()); 843 844 vfp_disable(); 845 exceptions = thread_get_exceptions(); 846 assert(exceptions & THREAD_EXCP_FOREIGN_INTR); 847 exceptions &= ~THREAD_EXCP_FOREIGN_INTR; 848 exceptions |= state & THREAD_EXCP_FOREIGN_INTR; 849 thread_set_exceptions(exceptions); 850 } 851 852 void thread_kernel_save_vfp(void) 853 { 854 struct thread_ctx *thr = threads + thread_get_id(); 855 856 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 857 if (vfp_is_enabled()) { 858 vfp_lazy_save_state_init(&thr->vfp_state.sec); 859 thr->vfp_state.sec_lazy_saved = true; 860 } 861 } 862 863 void thread_kernel_restore_vfp(void) 864 { 865 struct thread_ctx *thr = threads + thread_get_id(); 866 867 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 868 assert(!vfp_is_enabled()); 869 if (thr->vfp_state.sec_lazy_saved) { 870 vfp_lazy_restore_state(&thr->vfp_state.sec, 871 thr->vfp_state.sec_saved); 872 thr->vfp_state.sec_saved = false; 873 thr->vfp_state.sec_lazy_saved = false; 874 } 875 } 876 877 void thread_user_enable_vfp(struct thread_user_vfp_state *uvfp) 878 { 879 struct thread_ctx *thr = threads + thread_get_id(); 880 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 881 882 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 883 assert(!vfp_is_enabled()); 884 885 if (!thr->vfp_state.ns_saved) { 886 vfp_lazy_save_state_final(&thr->vfp_state.ns, 887 true /*force_save*/); 888 thr->vfp_state.ns_saved = true; 889 } else if (tuv && uvfp != tuv) { 890 if (tuv->lazy_saved && !tuv->saved) { 891 vfp_lazy_save_state_final(&tuv->vfp, 892 false /*!force_save*/); 893 tuv->saved = true; 894 } 895 } 896 897 if (uvfp->lazy_saved) 898 vfp_lazy_restore_state(&uvfp->vfp, uvfp->saved); 899 uvfp->lazy_saved = false; 900 uvfp->saved = false; 901 902 thr->vfp_state.uvfp = uvfp; 903 vfp_enable(); 904 } 905 906 void thread_user_save_vfp(void) 907 { 908 struct thread_ctx *thr = threads + thread_get_id(); 909 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 910 911 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 912 if (!vfp_is_enabled()) 913 return; 914 915 assert(tuv && !tuv->lazy_saved && !tuv->saved); 916 vfp_lazy_save_state_init(&tuv->vfp); 917 tuv->lazy_saved = true; 918 } 919 920 void thread_user_clear_vfp(struct user_mode_ctx *uctx) 921 { 922 struct thread_user_vfp_state *uvfp = &uctx->vfp; 923 struct thread_ctx *thr = threads + thread_get_id(); 924 925 if (uvfp == thr->vfp_state.uvfp) 926 thr->vfp_state.uvfp = NULL; 927 uvfp->lazy_saved = false; 928 uvfp->saved = false; 929 } 930 #endif /*CFG_WITH_VFP*/ 931 932 #ifdef ARM32 933 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr) 934 { 935 uint32_t s; 936 937 if (!is_32bit) 938 return false; 939 940 s = read_cpsr(); 941 s &= ~(CPSR_MODE_MASK | CPSR_T | CPSR_IT_MASK1 | CPSR_IT_MASK2); 942 s |= CPSR_MODE_USR; 943 if (entry_func & 1) 944 s |= CPSR_T; 945 *spsr = s; 946 return true; 947 } 948 #endif 949 950 #ifdef ARM64 951 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr) 952 { 953 uint32_t s; 954 955 if (is_32bit) { 956 s = read_daif() & (SPSR_32_AIF_MASK << SPSR_32_AIF_SHIFT); 957 s |= SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT; 958 s |= (entry_func & SPSR_32_T_MASK) << SPSR_32_T_SHIFT; 959 } else { 960 s = read_daif() & (SPSR_64_DAIF_MASK << SPSR_64_DAIF_SHIFT); 961 } 962 963 *spsr = s; 964 return true; 965 } 966 #endif 967 968 static void set_ctx_regs(struct thread_ctx_regs *regs, unsigned long a0, 969 unsigned long a1, unsigned long a2, unsigned long a3, 970 unsigned long user_sp, unsigned long entry_func, 971 uint32_t spsr, 972 struct thread_pauth_keys *keys __maybe_unused) 973 { 974 /* 975 * First clear all registers to avoid leaking information from 976 * other TAs or even the Core itself. 977 */ 978 *regs = (struct thread_ctx_regs){ }; 979 #ifdef ARM32 980 regs->r0 = a0; 981 regs->r1 = a1; 982 regs->r2 = a2; 983 regs->r3 = a3; 984 regs->usr_sp = user_sp; 985 regs->pc = entry_func; 986 regs->cpsr = spsr; 987 #endif 988 #ifdef ARM64 989 regs->x[0] = a0; 990 regs->x[1] = a1; 991 regs->x[2] = a2; 992 regs->x[3] = a3; 993 regs->sp = user_sp; 994 regs->pc = entry_func; 995 regs->cpsr = spsr; 996 regs->x[13] = user_sp; /* Used when running TA in Aarch32 */ 997 regs->sp = user_sp; /* Used when running TA in Aarch64 */ 998 #ifdef CFG_TA_PAUTH 999 assert(keys); 1000 regs->apiakey_hi = keys->apia_hi; 1001 regs->apiakey_lo = keys->apia_lo; 1002 #endif 1003 /* Set frame pointer (user stack can't be unwound past this point) */ 1004 regs->x[29] = 0; 1005 #endif 1006 } 1007 1008 static struct thread_pauth_keys *thread_get_pauth_keys(void) 1009 { 1010 #if defined(CFG_TA_PAUTH) 1011 struct ts_session *s = ts_get_current_session(); 1012 /* Only user TA's support the PAUTH keys */ 1013 struct user_ta_ctx *utc = to_user_ta_ctx(s->ctx); 1014 1015 return &utc->uctx.keys; 1016 #else 1017 return NULL; 1018 #endif 1019 } 1020 1021 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1, 1022 unsigned long a2, unsigned long a3, unsigned long user_sp, 1023 unsigned long entry_func, bool is_32bit, 1024 uint32_t *exit_status0, uint32_t *exit_status1) 1025 { 1026 uint32_t spsr = 0; 1027 uint32_t exceptions = 0; 1028 uint32_t rc = 0; 1029 struct thread_ctx_regs *regs = NULL; 1030 struct thread_pauth_keys *keys = NULL; 1031 1032 tee_ta_update_session_utime_resume(); 1033 1034 keys = thread_get_pauth_keys(); 1035 1036 /* Derive SPSR from current CPSR/PSTATE readout. */ 1037 if (!get_spsr(is_32bit, entry_func, &spsr)) { 1038 *exit_status0 = 1; /* panic */ 1039 *exit_status1 = 0xbadbadba; 1040 return 0; 1041 } 1042 1043 exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 1044 /* 1045 * We're using the per thread location of saved context registers 1046 * for temporary storage. Now that exceptions are masked they will 1047 * not be used for any thing else until they are eventually 1048 * unmasked when user mode has been entered. 1049 */ 1050 regs = thread_get_ctx_regs(); 1051 set_ctx_regs(regs, a0, a1, a2, a3, user_sp, entry_func, spsr, keys); 1052 rc = __thread_enter_user_mode(regs, exit_status0, exit_status1); 1053 thread_unmask_exceptions(exceptions); 1054 return rc; 1055 } 1056 1057 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 1058 void thread_get_user_kcode(struct mobj **mobj, size_t *offset, 1059 vaddr_t *va, size_t *sz) 1060 { 1061 core_mmu_get_user_va_range(va, NULL); 1062 *mobj = mobj_tee_ram_rx; 1063 *sz = thread_user_kcode_size; 1064 *offset = thread_user_kcode_va - (vaddr_t)mobj_get_va(*mobj, 0, *sz); 1065 } 1066 #endif 1067 1068 #if defined(CFG_CORE_UNMAP_CORE_AT_EL0) && \ 1069 defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) 1070 void thread_get_user_kdata(struct mobj **mobj, size_t *offset, 1071 vaddr_t *va, size_t *sz) 1072 { 1073 vaddr_t v; 1074 1075 core_mmu_get_user_va_range(&v, NULL); 1076 *va = v + thread_user_kcode_size; 1077 *mobj = mobj_tee_ram_rw; 1078 *sz = sizeof(thread_user_kdata_page); 1079 *offset = (vaddr_t)thread_user_kdata_page - 1080 (vaddr_t)mobj_get_va(*mobj, 0, *sz); 1081 } 1082 #endif 1083 1084 static void setup_unwind_user_mode(struct thread_scall_regs *regs) 1085 { 1086 #ifdef ARM32 1087 regs->lr = (uintptr_t)thread_unwind_user_mode; 1088 regs->spsr = read_cpsr(); 1089 #endif 1090 #ifdef ARM64 1091 regs->elr = (uintptr_t)thread_unwind_user_mode; 1092 regs->spsr = spsr_from_pstate(); 1093 /* 1094 * Regs is the value of stack pointer before calling the SVC 1095 * handler. By the addition matches for the reserved space at the 1096 * beginning of el0_sync_svc(). This prepares the stack when 1097 * returning to thread_unwind_user_mode instead of a normal 1098 * exception return. 1099 */ 1100 regs->sp_el0 = (uint64_t)(regs + 1); 1101 #endif 1102 } 1103 1104 static void gprof_set_status(struct ts_session *s __maybe_unused, 1105 enum ts_gprof_status status __maybe_unused) 1106 { 1107 #ifdef CFG_TA_GPROF_SUPPORT 1108 if (s->ctx->ops->gprof_set_status) 1109 s->ctx->ops->gprof_set_status(status); 1110 #endif 1111 } 1112 1113 /* 1114 * Note: this function is weak just to make it possible to exclude it from 1115 * the unpaged area. 1116 */ 1117 void __weak thread_scall_handler(struct thread_scall_regs *regs) 1118 { 1119 struct ts_session *sess = NULL; 1120 uint32_t state = 0; 1121 1122 /* Enable native interrupts */ 1123 state = thread_get_exceptions(); 1124 thread_unmask_exceptions(state & ~THREAD_EXCP_NATIVE_INTR); 1125 1126 thread_user_save_vfp(); 1127 1128 sess = ts_get_current_session(); 1129 /* 1130 * User mode service has just entered kernel mode, suspend gprof 1131 * collection until we're about to switch back again. 1132 */ 1133 gprof_set_status(sess, TS_GPROF_SUSPEND); 1134 1135 /* Restore foreign interrupts which are disabled on exception entry */ 1136 thread_restore_foreign_intr(); 1137 1138 assert(sess && sess->handle_scall); 1139 if (sess->handle_scall(regs)) { 1140 /* We're about to switch back to user mode */ 1141 gprof_set_status(sess, TS_GPROF_RESUME); 1142 } else { 1143 /* We're returning from __thread_enter_user_mode() */ 1144 setup_unwind_user_mode(regs); 1145 } 1146 } 1147 1148 #ifdef CFG_WITH_ARM_TRUSTED_FW 1149 /* 1150 * These five functions are __weak to allow platforms to override them if 1151 * needed. 1152 */ 1153 unsigned long __weak thread_cpu_off_handler(unsigned long a0 __unused, 1154 unsigned long a1 __unused) 1155 { 1156 return 0; 1157 } 1158 DECLARE_KEEP_PAGER(thread_cpu_off_handler); 1159 1160 unsigned long __weak thread_cpu_suspend_handler(unsigned long a0 __unused, 1161 unsigned long a1 __unused) 1162 { 1163 return 0; 1164 } 1165 DECLARE_KEEP_PAGER(thread_cpu_suspend_handler); 1166 1167 unsigned long __weak thread_cpu_resume_handler(unsigned long a0 __unused, 1168 unsigned long a1 __unused) 1169 { 1170 return 0; 1171 } 1172 DECLARE_KEEP_PAGER(thread_cpu_resume_handler); 1173 1174 unsigned long __weak thread_system_off_handler(unsigned long a0 __unused, 1175 unsigned long a1 __unused) 1176 { 1177 return 0; 1178 } 1179 DECLARE_KEEP_PAGER(thread_system_off_handler); 1180 1181 unsigned long __weak thread_system_reset_handler(unsigned long a0 __unused, 1182 unsigned long a1 __unused) 1183 { 1184 return 0; 1185 } 1186 DECLARE_KEEP_PAGER(thread_system_reset_handler); 1187 #endif /*CFG_WITH_ARM_TRUSTED_FW*/ 1188 1189 #ifdef CFG_CORE_WORKAROUND_ARM_NMFI 1190 void __noreturn interrupt_main_handler(void) 1191 { 1192 /* 1193 * Note: overrides the default implementation of this function so that 1194 * if there would be another handler defined there would be duplicate 1195 * symbol error during linking. 1196 */ 1197 panic("Secure interrupt received but it is not supported"); 1198 } 1199 #endif 1200