1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 */ 6 7 #include <platform_config.h> 8 9 #include <arm.h> 10 #include <assert.h> 11 #include <io.h> 12 #include <keep.h> 13 #include <kernel/asan.h> 14 #include <kernel/lockdep.h> 15 #include <kernel/misc.h> 16 #include <kernel/panic.h> 17 #include <kernel/spinlock.h> 18 #include <kernel/tee_ta_manager.h> 19 #include <kernel/thread_defs.h> 20 #include <kernel/thread.h> 21 #include <kernel/virtualization.h> 22 #include <mm/core_memprot.h> 23 #include <mm/mobj.h> 24 #include <mm/tee_mm.h> 25 #include <mm/tee_mmu.h> 26 #include <mm/tee_pager.h> 27 #include <smccc.h> 28 #include <sm/sm.h> 29 #include <trace.h> 30 #include <util.h> 31 32 #include "thread_private.h" 33 34 #ifdef CFG_WITH_ARM_TRUSTED_FW 35 #define STACK_TMP_OFFS 0 36 #else 37 #define STACK_TMP_OFFS SM_STACK_TMP_RESERVE_SIZE 38 #endif 39 40 41 #ifdef ARM32 42 #ifdef CFG_CORE_SANITIZE_KADDRESS 43 #define STACK_TMP_SIZE (3072 + STACK_TMP_OFFS) 44 #else 45 #define STACK_TMP_SIZE (2048 + STACK_TMP_OFFS) 46 #endif 47 #define STACK_THREAD_SIZE 8192 48 49 #ifdef CFG_CORE_SANITIZE_KADDRESS 50 #define STACK_ABT_SIZE 3072 51 #else 52 #define STACK_ABT_SIZE 2048 53 #endif 54 55 #endif /*ARM32*/ 56 57 #ifdef ARM64 58 #define STACK_TMP_SIZE (2048 + STACK_TMP_OFFS) 59 #define STACK_THREAD_SIZE 8192 60 61 #if TRACE_LEVEL > 0 62 #define STACK_ABT_SIZE 3072 63 #else 64 #define STACK_ABT_SIZE 1024 65 #endif 66 #endif /*ARM64*/ 67 68 struct thread_ctx threads[CFG_NUM_THREADS]; 69 70 struct thread_core_local thread_core_local[CFG_TEE_CORE_NB_CORE] __nex_bss; 71 72 #ifdef CFG_WITH_STACK_CANARIES 73 #ifdef ARM32 74 #define STACK_CANARY_SIZE (4 * sizeof(uint32_t)) 75 #endif 76 #ifdef ARM64 77 #define STACK_CANARY_SIZE (8 * sizeof(uint32_t)) 78 #endif 79 #define START_CANARY_VALUE 0xdededede 80 #define END_CANARY_VALUE 0xabababab 81 #define GET_START_CANARY(name, stack_num) name[stack_num][0] 82 #define GET_END_CANARY(name, stack_num) \ 83 name[stack_num][sizeof(name[stack_num]) / sizeof(uint32_t) - 1] 84 #else 85 #define STACK_CANARY_SIZE 0 86 #endif 87 88 #define DECLARE_STACK(name, num_stacks, stack_size, linkage) \ 89 linkage uint32_t name[num_stacks] \ 90 [ROUNDUP(stack_size + STACK_CANARY_SIZE, STACK_ALIGNMENT) / \ 91 sizeof(uint32_t)] \ 92 __attribute__((section(".nozi_stack." # name), \ 93 aligned(STACK_ALIGNMENT))) 94 95 #define STACK_SIZE(stack) (sizeof(stack) - STACK_CANARY_SIZE / 2) 96 97 #define GET_STACK(stack) \ 98 ((vaddr_t)(stack) + STACK_SIZE(stack)) 99 100 DECLARE_STACK(stack_tmp, CFG_TEE_CORE_NB_CORE, STACK_TMP_SIZE, static); 101 DECLARE_STACK(stack_abt, CFG_TEE_CORE_NB_CORE, STACK_ABT_SIZE, static); 102 #ifndef CFG_WITH_PAGER 103 DECLARE_STACK(stack_thread, CFG_NUM_THREADS, STACK_THREAD_SIZE, static); 104 #endif 105 106 const void *stack_tmp_export = (uint8_t *)stack_tmp + sizeof(stack_tmp[0]) - 107 (STACK_TMP_OFFS + STACK_CANARY_SIZE / 2); 108 const uint32_t stack_tmp_stride = sizeof(stack_tmp[0]); 109 110 /* 111 * These stack setup info are required by secondary boot cores before they 112 * each locally enable the pager (the mmu). Hence kept in pager sections. 113 */ 114 KEEP_PAGER(stack_tmp_export); 115 KEEP_PAGER(stack_tmp_stride); 116 117 thread_smc_handler_t thread_std_smc_handler_ptr __nex_bss; 118 thread_smc_handler_t thread_fast_smc_handler_ptr __nex_bss; 119 thread_nintr_handler_t thread_nintr_handler_ptr __nex_bss; 120 thread_pm_handler_t thread_cpu_on_handler_ptr __nex_bss; 121 thread_pm_handler_t thread_cpu_off_handler_ptr __nex_bss; 122 thread_pm_handler_t thread_cpu_suspend_handler_ptr __nex_bss; 123 thread_pm_handler_t thread_cpu_resume_handler_ptr __nex_bss; 124 thread_pm_handler_t thread_system_off_handler_ptr __nex_bss; 125 thread_pm_handler_t thread_system_reset_handler_ptr __nex_bss; 126 127 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 128 static vaddr_t thread_user_kcode_va __nex_bss; 129 long thread_user_kcode_offset __nex_bss; 130 static size_t thread_user_kcode_size __nex_bss; 131 #endif 132 133 #if defined(CFG_CORE_UNMAP_CORE_AT_EL0) && \ 134 defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) 135 long thread_user_kdata_sp_offset __nex_bss; 136 static uint8_t thread_user_kdata_page[ 137 ROUNDUP(sizeof(thread_core_local), SMALL_PAGE_SIZE)] 138 __aligned(SMALL_PAGE_SIZE) 139 #ifndef CFG_VIRTUALIZATION 140 __section(".nozi.kdata_page"); 141 #else 142 __section(".nex_nozi.kdata_page"); 143 #endif 144 #endif 145 146 static unsigned int thread_global_lock __nex_bss = SPINLOCK_UNLOCK; 147 148 static void init_canaries(void) 149 { 150 #ifdef CFG_WITH_STACK_CANARIES 151 size_t n; 152 #define INIT_CANARY(name) \ 153 for (n = 0; n < ARRAY_SIZE(name); n++) { \ 154 uint32_t *start_canary = &GET_START_CANARY(name, n); \ 155 uint32_t *end_canary = &GET_END_CANARY(name, n); \ 156 \ 157 *start_canary = START_CANARY_VALUE; \ 158 *end_canary = END_CANARY_VALUE; \ 159 DMSG("#Stack canaries for %s[%zu] with top at %p", \ 160 #name, n, (void *)(end_canary - 1)); \ 161 DMSG("watch *%p", (void *)end_canary); \ 162 } 163 164 INIT_CANARY(stack_tmp); 165 INIT_CANARY(stack_abt); 166 #if !defined(CFG_WITH_PAGER) && !defined(CFG_VIRTUALIZATION) 167 INIT_CANARY(stack_thread); 168 #endif 169 #endif/*CFG_WITH_STACK_CANARIES*/ 170 } 171 172 #define CANARY_DIED(stack, loc, n) \ 173 do { \ 174 EMSG_RAW("Dead canary at %s of '%s[%zu]'", #loc, #stack, n); \ 175 panic(); \ 176 } while (0) 177 178 void thread_check_canaries(void) 179 { 180 #ifdef CFG_WITH_STACK_CANARIES 181 size_t n; 182 183 for (n = 0; n < ARRAY_SIZE(stack_tmp); n++) { 184 if (GET_START_CANARY(stack_tmp, n) != START_CANARY_VALUE) 185 CANARY_DIED(stack_tmp, start, n); 186 if (GET_END_CANARY(stack_tmp, n) != END_CANARY_VALUE) 187 CANARY_DIED(stack_tmp, end, n); 188 } 189 190 for (n = 0; n < ARRAY_SIZE(stack_abt); n++) { 191 if (GET_START_CANARY(stack_abt, n) != START_CANARY_VALUE) 192 CANARY_DIED(stack_abt, start, n); 193 if (GET_END_CANARY(stack_abt, n) != END_CANARY_VALUE) 194 CANARY_DIED(stack_abt, end, n); 195 196 } 197 #if !defined(CFG_WITH_PAGER) && !defined(CFG_VIRTUALIZATION) 198 for (n = 0; n < ARRAY_SIZE(stack_thread); n++) { 199 if (GET_START_CANARY(stack_thread, n) != START_CANARY_VALUE) 200 CANARY_DIED(stack_thread, start, n); 201 if (GET_END_CANARY(stack_thread, n) != END_CANARY_VALUE) 202 CANARY_DIED(stack_thread, end, n); 203 } 204 #endif 205 #endif/*CFG_WITH_STACK_CANARIES*/ 206 } 207 208 void thread_lock_global(void) 209 { 210 cpu_spin_lock(&thread_global_lock); 211 } 212 213 void thread_unlock_global(void) 214 { 215 cpu_spin_unlock(&thread_global_lock); 216 } 217 218 #ifdef ARM32 219 uint32_t thread_get_exceptions(void) 220 { 221 uint32_t cpsr = read_cpsr(); 222 223 return (cpsr >> CPSR_F_SHIFT) & THREAD_EXCP_ALL; 224 } 225 226 void thread_set_exceptions(uint32_t exceptions) 227 { 228 uint32_t cpsr = read_cpsr(); 229 230 /* Foreign interrupts must not be unmasked while holding a spinlock */ 231 if (!(exceptions & THREAD_EXCP_FOREIGN_INTR)) 232 assert_have_no_spinlock(); 233 234 cpsr &= ~(THREAD_EXCP_ALL << CPSR_F_SHIFT); 235 cpsr |= ((exceptions & THREAD_EXCP_ALL) << CPSR_F_SHIFT); 236 write_cpsr(cpsr); 237 } 238 #endif /*ARM32*/ 239 240 #ifdef ARM64 241 uint32_t thread_get_exceptions(void) 242 { 243 uint32_t daif = read_daif(); 244 245 return (daif >> DAIF_F_SHIFT) & THREAD_EXCP_ALL; 246 } 247 248 void thread_set_exceptions(uint32_t exceptions) 249 { 250 uint32_t daif = read_daif(); 251 252 /* Foreign interrupts must not be unmasked while holding a spinlock */ 253 if (!(exceptions & THREAD_EXCP_FOREIGN_INTR)) 254 assert_have_no_spinlock(); 255 256 daif &= ~(THREAD_EXCP_ALL << DAIF_F_SHIFT); 257 daif |= ((exceptions & THREAD_EXCP_ALL) << DAIF_F_SHIFT); 258 write_daif(daif); 259 } 260 #endif /*ARM64*/ 261 262 uint32_t thread_mask_exceptions(uint32_t exceptions) 263 { 264 uint32_t state = thread_get_exceptions(); 265 266 thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL)); 267 return state; 268 } 269 270 void thread_unmask_exceptions(uint32_t state) 271 { 272 thread_set_exceptions(state & THREAD_EXCP_ALL); 273 } 274 275 276 struct thread_core_local *thread_get_core_local(void) 277 { 278 uint32_t cpu_id = get_core_pos(); 279 280 /* 281 * Foreign interrupts must be disabled before playing with core_local 282 * since we otherwise may be rescheduled to a different core in the 283 * middle of this function. 284 */ 285 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 286 287 assert(cpu_id < CFG_TEE_CORE_NB_CORE); 288 return &thread_core_local[cpu_id]; 289 } 290 291 static void thread_lazy_save_ns_vfp(void) 292 { 293 #ifdef CFG_WITH_VFP 294 struct thread_ctx *thr = threads + thread_get_id(); 295 296 thr->vfp_state.ns_saved = false; 297 vfp_lazy_save_state_init(&thr->vfp_state.ns); 298 #endif /*CFG_WITH_VFP*/ 299 } 300 301 static void thread_lazy_restore_ns_vfp(void) 302 { 303 #ifdef CFG_WITH_VFP 304 struct thread_ctx *thr = threads + thread_get_id(); 305 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 306 307 assert(!thr->vfp_state.sec_lazy_saved && !thr->vfp_state.sec_saved); 308 309 if (tuv && tuv->lazy_saved && !tuv->saved) { 310 vfp_lazy_save_state_final(&tuv->vfp, false /*!force_save*/); 311 tuv->saved = true; 312 } 313 314 vfp_lazy_restore_state(&thr->vfp_state.ns, thr->vfp_state.ns_saved); 315 thr->vfp_state.ns_saved = false; 316 #endif /*CFG_WITH_VFP*/ 317 } 318 319 #ifdef ARM32 320 static void init_regs(struct thread_ctx *thread, uint32_t a0, uint32_t a1, 321 uint32_t a2, uint32_t a3) 322 { 323 thread->regs.pc = (uint32_t)thread_std_smc_entry; 324 325 /* 326 * Stdcalls starts in SVC mode with masked foreign interrupts, masked 327 * Asynchronous abort and unmasked native interrupts. 328 */ 329 thread->regs.cpsr = read_cpsr() & ARM32_CPSR_E; 330 thread->regs.cpsr |= CPSR_MODE_SVC | CPSR_A | 331 (THREAD_EXCP_FOREIGN_INTR << ARM32_CPSR_F_SHIFT); 332 /* Enable thumb mode if it's a thumb instruction */ 333 if (thread->regs.pc & 1) 334 thread->regs.cpsr |= CPSR_T; 335 /* Reinitialize stack pointer */ 336 thread->regs.svc_sp = thread->stack_va_end; 337 338 /* 339 * Copy arguments into context. This will make the 340 * arguments appear in r0-r7 when thread is started. 341 */ 342 thread->regs.r0 = a0; 343 thread->regs.r1 = a1; 344 thread->regs.r2 = a2; 345 thread->regs.r3 = a3; 346 thread->regs.r4 = 0; 347 thread->regs.r5 = 0; 348 thread->regs.r6 = 0; 349 thread->regs.r7 = 0; 350 } 351 #endif /*ARM32*/ 352 353 #ifdef ARM64 354 static void init_regs(struct thread_ctx *thread, uint32_t a0, uint32_t a1, 355 uint32_t a2, uint32_t a3) 356 { 357 thread->regs.pc = (uint64_t)thread_std_smc_entry; 358 359 /* 360 * Stdcalls starts in SVC mode with masked foreign interrupts, masked 361 * Asynchronous abort and unmasked native interrupts. 362 */ 363 thread->regs.cpsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, 364 THREAD_EXCP_FOREIGN_INTR | DAIFBIT_ABT); 365 /* Reinitialize stack pointer */ 366 thread->regs.sp = thread->stack_va_end; 367 368 /* 369 * Copy arguments into context. This will make the 370 * arguments appear in x0-x7 when thread is started. 371 */ 372 thread->regs.x[0] = a0; 373 thread->regs.x[1] = a1; 374 thread->regs.x[2] = a2; 375 thread->regs.x[3] = a3; 376 thread->regs.x[4] = 0; 377 thread->regs.x[5] = 0; 378 thread->regs.x[6] = 0; 379 thread->regs.x[7] = 0; 380 381 /* Set up frame pointer as per the Aarch64 AAPCS */ 382 thread->regs.x[29] = 0; 383 } 384 #endif /*ARM64*/ 385 386 void thread_init_boot_thread(void) 387 { 388 struct thread_core_local *l = thread_get_core_local(); 389 390 thread_init_threads(); 391 392 l->curr_thread = 0; 393 threads[0].state = THREAD_STATE_ACTIVE; 394 } 395 396 void thread_clr_boot_thread(void) 397 { 398 struct thread_core_local *l = thread_get_core_local(); 399 400 assert(l->curr_thread >= 0 && l->curr_thread < CFG_NUM_THREADS); 401 assert(threads[l->curr_thread].state == THREAD_STATE_ACTIVE); 402 threads[l->curr_thread].state = THREAD_STATE_FREE; 403 l->curr_thread = -1; 404 } 405 406 void thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3) 407 { 408 size_t n; 409 struct thread_core_local *l = thread_get_core_local(); 410 bool found_thread = false; 411 412 assert(l->curr_thread == -1); 413 414 thread_lock_global(); 415 416 for (n = 0; n < CFG_NUM_THREADS; n++) { 417 if (threads[n].state == THREAD_STATE_FREE) { 418 threads[n].state = THREAD_STATE_ACTIVE; 419 found_thread = true; 420 break; 421 } 422 } 423 424 thread_unlock_global(); 425 426 if (!found_thread) 427 return; 428 429 l->curr_thread = n; 430 431 threads[n].flags = 0; 432 init_regs(threads + n, a0, a1, a2, a3); 433 434 thread_lazy_save_ns_vfp(); 435 thread_resume(&threads[n].regs); 436 /*NOTREACHED*/ 437 panic(); 438 } 439 440 #ifdef ARM32 441 static void copy_a0_to_a5(struct thread_ctx_regs *regs, uint32_t a0, 442 uint32_t a1, uint32_t a2, uint32_t a3, uint32_t a4, 443 uint32_t a5) 444 { 445 /* 446 * Update returned values from RPC, values will appear in 447 * r0-r3 when thread is resumed. 448 */ 449 regs->r0 = a0; 450 regs->r1 = a1; 451 regs->r2 = a2; 452 regs->r3 = a3; 453 regs->r4 = a4; 454 regs->r5 = a5; 455 } 456 #endif /*ARM32*/ 457 458 #ifdef ARM64 459 static void copy_a0_to_a5(struct thread_ctx_regs *regs, uint32_t a0, 460 uint32_t a1, uint32_t a2, uint32_t a3, uint32_t a4, 461 uint32_t a5) 462 { 463 /* 464 * Update returned values from RPC, values will appear in 465 * x0-x3 when thread is resumed. 466 */ 467 regs->x[0] = a0; 468 regs->x[1] = a1; 469 regs->x[2] = a2; 470 regs->x[3] = a3; 471 regs->x[4] = a4; 472 regs->x[5] = a5; 473 } 474 #endif /*ARM64*/ 475 476 #ifdef ARM32 477 static bool is_from_user(uint32_t cpsr) 478 { 479 return (cpsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR; 480 } 481 #endif 482 483 #ifdef ARM64 484 static bool is_from_user(uint32_t cpsr) 485 { 486 if (cpsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT)) 487 return true; 488 if (((cpsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) == 489 SPSR_64_MODE_EL0) 490 return true; 491 return false; 492 } 493 #endif 494 495 static bool is_user_mode(struct thread_ctx_regs *regs) 496 { 497 return is_from_user((uint32_t)regs->cpsr); 498 } 499 500 void thread_resume_from_rpc(uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3, 501 uint32_t a4, uint32_t a5) 502 { 503 size_t n = a3; /* thread id */ 504 struct thread_core_local *l = thread_get_core_local(); 505 bool found_thread = false; 506 507 assert(l->curr_thread == -1); 508 509 thread_lock_global(); 510 511 if (n < CFG_NUM_THREADS && threads[n].state == THREAD_STATE_SUSPENDED) { 512 threads[n].state = THREAD_STATE_ACTIVE; 513 found_thread = true; 514 } 515 516 thread_unlock_global(); 517 518 if (!found_thread) 519 return; 520 521 l->curr_thread = n; 522 523 if (threads[n].have_user_map) 524 core_mmu_set_user_map(&threads[n].user_map); 525 526 if (is_user_mode(&threads[n].regs)) 527 tee_ta_update_session_utime_resume(); 528 529 /* 530 * Return from RPC to request service of a foreign interrupt must not 531 * get parameters from non-secure world. 532 */ 533 if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) { 534 copy_a0_to_a5(&threads[n].regs, a0, a1, a2, a3, a4, a5); 535 threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN; 536 } 537 538 thread_lazy_save_ns_vfp(); 539 thread_resume(&threads[n].regs); 540 /*NOTREACHED*/ 541 panic(); 542 } 543 544 void *thread_get_tmp_sp(void) 545 { 546 struct thread_core_local *l = thread_get_core_local(); 547 548 return (void *)l->tmp_stack_va_end; 549 } 550 551 #ifdef ARM64 552 vaddr_t thread_get_saved_thread_sp(void) 553 { 554 struct thread_core_local *l = thread_get_core_local(); 555 int ct = l->curr_thread; 556 557 assert(ct != -1); 558 return threads[ct].kern_sp; 559 } 560 #endif /*ARM64*/ 561 562 vaddr_t thread_stack_start(void) 563 { 564 struct thread_ctx *thr; 565 int ct = thread_get_id_may_fail(); 566 567 if (ct == -1) 568 return 0; 569 570 thr = threads + ct; 571 return thr->stack_va_end - STACK_THREAD_SIZE; 572 } 573 574 size_t thread_stack_size(void) 575 { 576 return STACK_THREAD_SIZE; 577 } 578 579 bool thread_is_from_abort_mode(void) 580 { 581 struct thread_core_local *l = thread_get_core_local(); 582 583 return (l->flags >> THREAD_CLF_SAVED_SHIFT) & THREAD_CLF_ABORT; 584 } 585 586 #ifdef ARM32 587 bool thread_is_in_normal_mode(void) 588 { 589 return (read_cpsr() & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_SVC; 590 } 591 #endif 592 593 #ifdef ARM64 594 bool thread_is_in_normal_mode(void) 595 { 596 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 597 struct thread_core_local *l = thread_get_core_local(); 598 bool ret; 599 600 /* If any bit in l->flags is set we're handling some exception. */ 601 ret = !l->flags; 602 thread_unmask_exceptions(exceptions); 603 604 return ret; 605 } 606 #endif 607 608 void thread_state_free(void) 609 { 610 struct thread_core_local *l = thread_get_core_local(); 611 int ct = l->curr_thread; 612 613 assert(ct != -1); 614 615 thread_lazy_restore_ns_vfp(); 616 tee_pager_release_phys( 617 (void *)(threads[ct].stack_va_end - STACK_THREAD_SIZE), 618 STACK_THREAD_SIZE); 619 620 thread_lock_global(); 621 622 assert(threads[ct].state == THREAD_STATE_ACTIVE); 623 threads[ct].state = THREAD_STATE_FREE; 624 threads[ct].flags = 0; 625 l->curr_thread = -1; 626 627 #ifdef CFG_VIRTUALIZATION 628 virt_unset_guest(); 629 #endif 630 thread_unlock_global(); 631 } 632 633 #ifdef CFG_WITH_PAGER 634 static void release_unused_kernel_stack(struct thread_ctx *thr, 635 uint32_t cpsr __maybe_unused) 636 { 637 #ifdef ARM64 638 /* 639 * If we're from user mode then thr->regs.sp is the saved user 640 * stack pointer and thr->kern_sp holds the last kernel stack 641 * pointer. But if we're from kernel mode then thr->kern_sp isn't 642 * up to date so we need to read from thr->regs.sp instead. 643 */ 644 vaddr_t sp = is_from_user(cpsr) ? thr->kern_sp : thr->regs.sp; 645 #else 646 vaddr_t sp = thr->regs.svc_sp; 647 #endif 648 vaddr_t base = thr->stack_va_end - STACK_THREAD_SIZE; 649 size_t len = sp - base; 650 651 tee_pager_release_phys((void *)base, len); 652 } 653 #else 654 static void release_unused_kernel_stack(struct thread_ctx *thr __unused, 655 uint32_t cpsr __unused) 656 { 657 } 658 #endif 659 660 int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc) 661 { 662 struct thread_core_local *l = thread_get_core_local(); 663 int ct = l->curr_thread; 664 665 assert(ct != -1); 666 667 thread_check_canaries(); 668 669 release_unused_kernel_stack(threads + ct, cpsr); 670 671 if (is_from_user(cpsr)) { 672 thread_user_save_vfp(); 673 tee_ta_update_session_utime_suspend(); 674 tee_ta_gprof_sample_pc(pc); 675 } 676 thread_lazy_restore_ns_vfp(); 677 678 thread_lock_global(); 679 680 assert(threads[ct].state == THREAD_STATE_ACTIVE); 681 threads[ct].flags |= flags; 682 threads[ct].regs.cpsr = cpsr; 683 threads[ct].regs.pc = pc; 684 threads[ct].state = THREAD_STATE_SUSPENDED; 685 686 threads[ct].have_user_map = core_mmu_user_mapping_is_active(); 687 if (threads[ct].have_user_map) { 688 core_mmu_get_user_map(&threads[ct].user_map); 689 core_mmu_set_user_map(NULL); 690 } 691 692 l->curr_thread = -1; 693 694 #ifdef CFG_VIRTUALIZATION 695 virt_unset_guest(); 696 #endif 697 698 thread_unlock_global(); 699 700 return ct; 701 } 702 703 #ifdef ARM32 704 static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp) 705 { 706 l->tmp_stack_va_end = sp; 707 thread_set_irq_sp(sp); 708 thread_set_fiq_sp(sp); 709 } 710 711 static void set_abt_stack(struct thread_core_local *l, vaddr_t sp) 712 { 713 l->abt_stack_va_end = sp; 714 thread_set_abt_sp((vaddr_t)l); 715 thread_set_und_sp((vaddr_t)l); 716 } 717 #endif /*ARM32*/ 718 719 #ifdef ARM64 720 static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp) 721 { 722 /* 723 * We're already using the tmp stack when this function is called 724 * so there's no need to assign it to any stack pointer. However, 725 * we'll need to restore it at different times so store it here. 726 */ 727 l->tmp_stack_va_end = sp; 728 } 729 730 static void set_abt_stack(struct thread_core_local *l, vaddr_t sp) 731 { 732 l->abt_stack_va_end = sp; 733 } 734 #endif /*ARM64*/ 735 736 bool thread_init_stack(uint32_t thread_id, vaddr_t sp) 737 { 738 if (thread_id >= CFG_NUM_THREADS) 739 return false; 740 threads[thread_id].stack_va_end = sp; 741 return true; 742 } 743 744 int thread_get_id_may_fail(void) 745 { 746 /* 747 * thread_get_core_local() requires foreign interrupts to be disabled 748 */ 749 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 750 struct thread_core_local *l = thread_get_core_local(); 751 int ct = l->curr_thread; 752 753 thread_unmask_exceptions(exceptions); 754 return ct; 755 } 756 757 int thread_get_id(void) 758 { 759 int ct = thread_get_id_may_fail(); 760 761 assert(ct >= 0 && ct < CFG_NUM_THREADS); 762 return ct; 763 } 764 765 static void init_handlers(const struct thread_handlers *handlers) 766 { 767 thread_std_smc_handler_ptr = handlers->std_smc; 768 thread_fast_smc_handler_ptr = handlers->fast_smc; 769 thread_nintr_handler_ptr = handlers->nintr; 770 thread_cpu_on_handler_ptr = handlers->cpu_on; 771 thread_cpu_off_handler_ptr = handlers->cpu_off; 772 thread_cpu_suspend_handler_ptr = handlers->cpu_suspend; 773 thread_cpu_resume_handler_ptr = handlers->cpu_resume; 774 thread_system_off_handler_ptr = handlers->system_off; 775 thread_system_reset_handler_ptr = handlers->system_reset; 776 } 777 778 #ifdef CFG_WITH_PAGER 779 static void init_thread_stacks(void) 780 { 781 size_t n = 0; 782 783 /* 784 * Allocate virtual memory for thread stacks. 785 */ 786 for (n = 0; n < CFG_NUM_THREADS; n++) { 787 tee_mm_entry_t *mm = NULL; 788 vaddr_t sp = 0; 789 size_t num_pages = 0; 790 struct fobj *fobj = NULL; 791 792 /* Find vmem for thread stack and its protection gap */ 793 mm = tee_mm_alloc(&tee_mm_vcore, 794 SMALL_PAGE_SIZE + STACK_THREAD_SIZE); 795 assert(mm); 796 797 /* Claim eventual physical page */ 798 tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm), 799 true); 800 801 num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE - 1; 802 fobj = fobj_locked_paged_alloc(num_pages); 803 804 /* Add the area to the pager */ 805 tee_pager_add_core_area(tee_mm_get_smem(mm) + SMALL_PAGE_SIZE, 806 PAGER_AREA_TYPE_LOCK, fobj); 807 fobj_put(fobj); 808 809 /* init effective stack */ 810 sp = tee_mm_get_smem(mm) + tee_mm_get_bytes(mm); 811 asan_tag_access((void *)tee_mm_get_smem(mm), (void *)sp); 812 if (!thread_init_stack(n, sp)) 813 panic("init stack failed"); 814 } 815 } 816 #else 817 static void init_thread_stacks(void) 818 { 819 size_t n; 820 821 /* Assign the thread stacks */ 822 for (n = 0; n < CFG_NUM_THREADS; n++) { 823 if (!thread_init_stack(n, GET_STACK(stack_thread[n]))) 824 panic("thread_init_stack failed"); 825 } 826 } 827 #endif /*CFG_WITH_PAGER*/ 828 829 static void init_user_kcode(void) 830 { 831 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 832 vaddr_t v = (vaddr_t)thread_excp_vect; 833 vaddr_t ve = (vaddr_t)thread_excp_vect_end; 834 835 thread_user_kcode_va = ROUNDDOWN(v, CORE_MMU_USER_CODE_SIZE); 836 ve = ROUNDUP(ve, CORE_MMU_USER_CODE_SIZE); 837 thread_user_kcode_size = ve - thread_user_kcode_va; 838 839 core_mmu_get_user_va_range(&v, NULL); 840 thread_user_kcode_offset = thread_user_kcode_va - v; 841 842 #if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) 843 /* 844 * When transitioning to EL0 subtract SP with this much to point to 845 * this special kdata page instead. SP is restored by add this much 846 * while transitioning back to EL1. 847 */ 848 v += thread_user_kcode_size; 849 thread_user_kdata_sp_offset = (vaddr_t)thread_core_local - v; 850 #endif 851 #endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 852 } 853 854 void thread_init_threads(void) 855 { 856 size_t n; 857 858 init_thread_stacks(); 859 pgt_init(); 860 861 mutex_lockdep_init(); 862 863 for (n = 0; n < CFG_NUM_THREADS; n++) { 864 TAILQ_INIT(&threads[n].tsd.sess_stack); 865 SLIST_INIT(&threads[n].tsd.pgt_cache); 866 } 867 868 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) 869 thread_core_local[n].curr_thread = -1; 870 } 871 872 void thread_init_primary(const struct thread_handlers *handlers) 873 { 874 init_handlers(handlers); 875 876 /* Initialize canaries around the stacks */ 877 init_canaries(); 878 879 init_user_kcode(); 880 } 881 882 static void init_sec_mon(size_t pos __maybe_unused) 883 { 884 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 885 /* Initialize secure monitor */ 886 sm_init(GET_STACK(stack_tmp[pos])); 887 #endif 888 } 889 890 static uint32_t __maybe_unused get_midr_implementer(uint32_t midr) 891 { 892 return (midr >> MIDR_IMPLEMENTER_SHIFT) & MIDR_IMPLEMENTER_MASK; 893 } 894 895 static uint32_t __maybe_unused get_midr_primary_part(uint32_t midr) 896 { 897 return (midr >> MIDR_PRIMARY_PART_NUM_SHIFT) & 898 MIDR_PRIMARY_PART_NUM_MASK; 899 } 900 901 #ifdef ARM64 902 static bool probe_workaround_available(void) 903 { 904 int32_t r; 905 906 r = thread_smc(SMCCC_VERSION, 0, 0, 0); 907 if (r < 0) 908 return false; 909 if (r < 0x10001) /* compare with version 1.1 */ 910 return false; 911 912 /* Version >= 1.1, so SMCCC_ARCH_FEATURES is available */ 913 r = thread_smc(SMCCC_ARCH_FEATURES, SMCCC_ARCH_WORKAROUND_1, 0, 0); 914 return r >= 0; 915 } 916 917 static vaddr_t __maybe_unused select_vector(vaddr_t a) 918 { 919 if (probe_workaround_available()) { 920 DMSG("SMCCC_ARCH_WORKAROUND_1 (%#08" PRIx32 ") available", 921 SMCCC_ARCH_WORKAROUND_1); 922 DMSG("SMC Workaround for CVE-2017-5715 used"); 923 return a; 924 } 925 926 DMSG("SMCCC_ARCH_WORKAROUND_1 (%#08" PRIx32 ") unavailable", 927 SMCCC_ARCH_WORKAROUND_1); 928 DMSG("SMC Workaround for CVE-2017-5715 not needed (if ARM-TF is up to date)"); 929 return (vaddr_t)thread_excp_vect; 930 } 931 #else 932 static vaddr_t __maybe_unused select_vector(vaddr_t a) 933 { 934 return a; 935 } 936 #endif 937 938 static vaddr_t get_excp_vect(void) 939 { 940 #ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC 941 uint32_t midr = read_midr(); 942 943 if (get_midr_implementer(midr) != MIDR_IMPLEMENTER_ARM) 944 return (vaddr_t)thread_excp_vect; 945 946 switch (get_midr_primary_part(midr)) { 947 #ifdef ARM32 948 case CORTEX_A8_PART_NUM: 949 case CORTEX_A9_PART_NUM: 950 case CORTEX_A17_PART_NUM: 951 #endif 952 case CORTEX_A57_PART_NUM: 953 case CORTEX_A72_PART_NUM: 954 case CORTEX_A73_PART_NUM: 955 case CORTEX_A75_PART_NUM: 956 return select_vector((vaddr_t)thread_excp_vect_workaround); 957 #ifdef ARM32 958 case CORTEX_A15_PART_NUM: 959 return select_vector((vaddr_t)thread_excp_vect_workaround_a15); 960 #endif 961 default: 962 return (vaddr_t)thread_excp_vect; 963 } 964 #endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/ 965 966 return (vaddr_t)thread_excp_vect; 967 } 968 969 void thread_init_per_cpu(void) 970 { 971 size_t pos = get_core_pos(); 972 struct thread_core_local *l = thread_get_core_local(); 973 974 init_sec_mon(pos); 975 976 set_tmp_stack(l, GET_STACK(stack_tmp[pos]) - STACK_TMP_OFFS); 977 set_abt_stack(l, GET_STACK(stack_abt[pos])); 978 979 thread_init_vbar(get_excp_vect()); 980 981 #ifdef CFG_TA_FTRACE_SUPPORT 982 /* 983 * Enable accesses to frequency register and physical counter 984 * register in EL0/PL0 required for timestamping during 985 * function tracing. 986 */ 987 write_cntkctl(read_cntkctl() | CNTKCTL_PL0PCTEN); 988 #endif 989 } 990 991 struct thread_specific_data *thread_get_tsd(void) 992 { 993 return &threads[thread_get_id()].tsd; 994 } 995 996 struct thread_ctx_regs *thread_get_ctx_regs(void) 997 { 998 struct thread_core_local *l = thread_get_core_local(); 999 1000 assert(l->curr_thread != -1); 1001 return &threads[l->curr_thread].regs; 1002 } 1003 1004 void thread_set_foreign_intr(bool enable) 1005 { 1006 /* thread_get_core_local() requires foreign interrupts to be disabled */ 1007 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 1008 struct thread_core_local *l; 1009 1010 l = thread_get_core_local(); 1011 1012 assert(l->curr_thread != -1); 1013 1014 if (enable) { 1015 threads[l->curr_thread].flags |= 1016 THREAD_FLAGS_FOREIGN_INTR_ENABLE; 1017 thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR); 1018 } else { 1019 /* 1020 * No need to disable foreign interrupts here since they're 1021 * already disabled above. 1022 */ 1023 threads[l->curr_thread].flags &= 1024 ~THREAD_FLAGS_FOREIGN_INTR_ENABLE; 1025 } 1026 } 1027 1028 void thread_restore_foreign_intr(void) 1029 { 1030 /* thread_get_core_local() requires foreign interrupts to be disabled */ 1031 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 1032 struct thread_core_local *l; 1033 1034 l = thread_get_core_local(); 1035 1036 assert(l->curr_thread != -1); 1037 1038 if (threads[l->curr_thread].flags & THREAD_FLAGS_FOREIGN_INTR_ENABLE) 1039 thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR); 1040 } 1041 1042 #ifdef CFG_WITH_VFP 1043 uint32_t thread_kernel_enable_vfp(void) 1044 { 1045 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 1046 struct thread_ctx *thr = threads + thread_get_id(); 1047 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 1048 1049 assert(!vfp_is_enabled()); 1050 1051 if (!thr->vfp_state.ns_saved) { 1052 vfp_lazy_save_state_final(&thr->vfp_state.ns, 1053 true /*force_save*/); 1054 thr->vfp_state.ns_saved = true; 1055 } else if (thr->vfp_state.sec_lazy_saved && 1056 !thr->vfp_state.sec_saved) { 1057 /* 1058 * This happens when we're handling an abort while the 1059 * thread was using the VFP state. 1060 */ 1061 vfp_lazy_save_state_final(&thr->vfp_state.sec, 1062 false /*!force_save*/); 1063 thr->vfp_state.sec_saved = true; 1064 } else if (tuv && tuv->lazy_saved && !tuv->saved) { 1065 /* 1066 * This can happen either during syscall or abort 1067 * processing (while processing a syscall). 1068 */ 1069 vfp_lazy_save_state_final(&tuv->vfp, false /*!force_save*/); 1070 tuv->saved = true; 1071 } 1072 1073 vfp_enable(); 1074 return exceptions; 1075 } 1076 1077 void thread_kernel_disable_vfp(uint32_t state) 1078 { 1079 uint32_t exceptions; 1080 1081 assert(vfp_is_enabled()); 1082 1083 vfp_disable(); 1084 exceptions = thread_get_exceptions(); 1085 assert(exceptions & THREAD_EXCP_FOREIGN_INTR); 1086 exceptions &= ~THREAD_EXCP_FOREIGN_INTR; 1087 exceptions |= state & THREAD_EXCP_FOREIGN_INTR; 1088 thread_set_exceptions(exceptions); 1089 } 1090 1091 void thread_kernel_save_vfp(void) 1092 { 1093 struct thread_ctx *thr = threads + thread_get_id(); 1094 1095 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1096 if (vfp_is_enabled()) { 1097 vfp_lazy_save_state_init(&thr->vfp_state.sec); 1098 thr->vfp_state.sec_lazy_saved = true; 1099 } 1100 } 1101 1102 void thread_kernel_restore_vfp(void) 1103 { 1104 struct thread_ctx *thr = threads + thread_get_id(); 1105 1106 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1107 assert(!vfp_is_enabled()); 1108 if (thr->vfp_state.sec_lazy_saved) { 1109 vfp_lazy_restore_state(&thr->vfp_state.sec, 1110 thr->vfp_state.sec_saved); 1111 thr->vfp_state.sec_saved = false; 1112 thr->vfp_state.sec_lazy_saved = false; 1113 } 1114 } 1115 1116 void thread_user_enable_vfp(struct thread_user_vfp_state *uvfp) 1117 { 1118 struct thread_ctx *thr = threads + thread_get_id(); 1119 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 1120 1121 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1122 assert(!vfp_is_enabled()); 1123 1124 if (!thr->vfp_state.ns_saved) { 1125 vfp_lazy_save_state_final(&thr->vfp_state.ns, 1126 true /*force_save*/); 1127 thr->vfp_state.ns_saved = true; 1128 } else if (tuv && uvfp != tuv) { 1129 if (tuv->lazy_saved && !tuv->saved) { 1130 vfp_lazy_save_state_final(&tuv->vfp, 1131 false /*!force_save*/); 1132 tuv->saved = true; 1133 } 1134 } 1135 1136 if (uvfp->lazy_saved) 1137 vfp_lazy_restore_state(&uvfp->vfp, uvfp->saved); 1138 uvfp->lazy_saved = false; 1139 uvfp->saved = false; 1140 1141 thr->vfp_state.uvfp = uvfp; 1142 vfp_enable(); 1143 } 1144 1145 void thread_user_save_vfp(void) 1146 { 1147 struct thread_ctx *thr = threads + thread_get_id(); 1148 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 1149 1150 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1151 if (!vfp_is_enabled()) 1152 return; 1153 1154 assert(tuv && !tuv->lazy_saved && !tuv->saved); 1155 vfp_lazy_save_state_init(&tuv->vfp); 1156 tuv->lazy_saved = true; 1157 } 1158 1159 void thread_user_clear_vfp(struct thread_user_vfp_state *uvfp) 1160 { 1161 struct thread_ctx *thr = threads + thread_get_id(); 1162 1163 if (uvfp == thr->vfp_state.uvfp) 1164 thr->vfp_state.uvfp = NULL; 1165 uvfp->lazy_saved = false; 1166 uvfp->saved = false; 1167 } 1168 #endif /*CFG_WITH_VFP*/ 1169 1170 #ifdef ARM32 1171 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr) 1172 { 1173 uint32_t s; 1174 1175 if (!is_32bit) 1176 return false; 1177 1178 s = read_spsr(); 1179 s &= ~(CPSR_MODE_MASK | CPSR_T | CPSR_IT_MASK1 | CPSR_IT_MASK2); 1180 s |= CPSR_MODE_USR; 1181 if (entry_func & 1) 1182 s |= CPSR_T; 1183 *spsr = s; 1184 return true; 1185 } 1186 #endif 1187 1188 #ifdef ARM64 1189 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr) 1190 { 1191 uint32_t s; 1192 1193 if (is_32bit) { 1194 s = read_daif() & (SPSR_32_AIF_MASK << SPSR_32_AIF_SHIFT); 1195 s |= SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT; 1196 s |= (entry_func & SPSR_32_T_MASK) << SPSR_32_T_SHIFT; 1197 } else { 1198 s = read_daif() & (SPSR_64_DAIF_MASK << SPSR_64_DAIF_SHIFT); 1199 } 1200 1201 *spsr = s; 1202 return true; 1203 } 1204 #endif 1205 1206 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1, 1207 unsigned long a2, unsigned long a3, unsigned long user_sp, 1208 unsigned long entry_func, bool is_32bit, 1209 uint32_t *exit_status0, uint32_t *exit_status1) 1210 { 1211 uint32_t spsr; 1212 1213 tee_ta_update_session_utime_resume(); 1214 1215 if (!get_spsr(is_32bit, entry_func, &spsr)) { 1216 *exit_status0 = 1; /* panic */ 1217 *exit_status1 = 0xbadbadba; 1218 return 0; 1219 } 1220 return __thread_enter_user_mode(a0, a1, a2, a3, user_sp, entry_func, 1221 spsr, exit_status0, exit_status1); 1222 } 1223 1224 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 1225 void thread_get_user_kcode(struct mobj **mobj, size_t *offset, 1226 vaddr_t *va, size_t *sz) 1227 { 1228 core_mmu_get_user_va_range(va, NULL); 1229 *mobj = mobj_tee_ram; 1230 *offset = thread_user_kcode_va - TEE_RAM_START; 1231 *sz = thread_user_kcode_size; 1232 } 1233 #endif 1234 1235 #if defined(CFG_CORE_UNMAP_CORE_AT_EL0) && \ 1236 defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) 1237 void thread_get_user_kdata(struct mobj **mobj, size_t *offset, 1238 vaddr_t *va, size_t *sz) 1239 { 1240 vaddr_t v; 1241 1242 core_mmu_get_user_va_range(&v, NULL); 1243 *va = v + thread_user_kcode_size; 1244 *mobj = mobj_tee_ram; 1245 *offset = (vaddr_t)thread_user_kdata_page - TEE_RAM_START; 1246 *sz = sizeof(thread_user_kdata_page); 1247 } 1248 #endif 1249