1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016-2021, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 * Copyright (c) 2020-2021, Arm Limited 6 */ 7 8 #include <platform_config.h> 9 10 #include <arm.h> 11 #include <assert.h> 12 #include <config.h> 13 #include <io.h> 14 #include <keep.h> 15 #include <kernel/asan.h> 16 #include <kernel/boot.h> 17 #include <kernel/linker.h> 18 #include <kernel/lockdep.h> 19 #include <kernel/misc.h> 20 #include <kernel/panic.h> 21 #include <kernel/spinlock.h> 22 #include <kernel/spmc_sp_handler.h> 23 #include <kernel/tee_ta_manager.h> 24 #include <kernel/thread_defs.h> 25 #include <kernel/thread.h> 26 #include <kernel/user_mode_ctx_struct.h> 27 #include <kernel/virtualization.h> 28 #include <mm/core_memprot.h> 29 #include <mm/mobj.h> 30 #include <mm/tee_mm.h> 31 #include <mm/tee_pager.h> 32 #include <mm/vm.h> 33 #include <smccc.h> 34 #include <sm/sm.h> 35 #include <trace.h> 36 #include <util.h> 37 38 #include "thread_private.h" 39 40 struct thread_ctx threads[CFG_NUM_THREADS]; 41 42 struct thread_core_local thread_core_local[CFG_TEE_CORE_NB_CORE] __nex_bss; 43 44 /* 45 * Stacks 46 * 47 * [Lower addresses on the left] 48 * 49 * [ STACK_CANARY_SIZE/2 | STACK_CHECK_EXTRA | STACK_XXX_SIZE | STACK_CANARY_SIZE/2 ] 50 * ^ ^ ^ ^ 51 * stack_xxx[n] "hard" top "soft" top bottom 52 */ 53 54 #ifdef CFG_WITH_ARM_TRUSTED_FW 55 #define STACK_TMP_OFFS 0 56 #else 57 #define STACK_TMP_OFFS SM_STACK_TMP_RESERVE_SIZE 58 #endif 59 60 #ifdef ARM32 61 #ifdef CFG_CORE_SANITIZE_KADDRESS 62 #define STACK_TMP_SIZE (3072 + STACK_TMP_OFFS) 63 #else 64 #define STACK_TMP_SIZE (2048 + STACK_TMP_OFFS) 65 #endif 66 #define STACK_THREAD_SIZE 8192 67 68 #if defined(CFG_CORE_SANITIZE_KADDRESS) || defined(__clang__) || \ 69 !defined(CFG_CRYPTO_WITH_CE) 70 #define STACK_ABT_SIZE 3072 71 #else 72 #define STACK_ABT_SIZE 2048 73 #endif 74 75 #endif /*ARM32*/ 76 77 #ifdef ARM64 78 #if defined(__clang__) && !defined(__OPTIMIZE_SIZE__) 79 #define STACK_TMP_SIZE (4096 + STACK_TMP_OFFS) 80 #else 81 #define STACK_TMP_SIZE (2048 + STACK_TMP_OFFS) 82 #endif 83 #define STACK_THREAD_SIZE 8192 84 85 #if TRACE_LEVEL > 0 86 #define STACK_ABT_SIZE 3072 87 #else 88 #define STACK_ABT_SIZE 1024 89 #endif 90 #endif /*ARM64*/ 91 92 #ifdef CFG_WITH_STACK_CANARIES 93 #ifdef ARM32 94 #define STACK_CANARY_SIZE (4 * sizeof(uint32_t)) 95 #endif 96 #ifdef ARM64 97 #define STACK_CANARY_SIZE (8 * sizeof(uint32_t)) 98 #endif 99 #define START_CANARY_VALUE 0xdededede 100 #define END_CANARY_VALUE 0xabababab 101 #define GET_START_CANARY(name, stack_num) name[stack_num][0] 102 #define GET_END_CANARY(name, stack_num) \ 103 name[stack_num][sizeof(name[stack_num]) / sizeof(uint32_t) - 1] 104 #else 105 #define STACK_CANARY_SIZE 0 106 #endif 107 108 #ifdef CFG_CORE_DEBUG_CHECK_STACKS 109 /* 110 * Extra space added to each stack in order to reliably detect and dump stack 111 * overflows. Should cover the maximum expected overflow size caused by any C 112 * function (say, 512 bytes; no function should have that much local variables), 113 * plus the maximum stack space needed by __cyg_profile_func_exit(): about 1 KB, 114 * a large part of which is used to print the call stack. Total: 1.5 KB. 115 */ 116 #define STACK_CHECK_EXTRA 1536 117 #else 118 #define STACK_CHECK_EXTRA 0 119 #endif 120 121 #define DECLARE_STACK(name, num_stacks, stack_size, linkage) \ 122 linkage uint32_t name[num_stacks] \ 123 [ROUNDUP(stack_size + STACK_CANARY_SIZE + STACK_CHECK_EXTRA, \ 124 STACK_ALIGNMENT) / sizeof(uint32_t)] \ 125 __attribute__((section(".nozi_stack." # name), \ 126 aligned(STACK_ALIGNMENT))) 127 128 #define GET_STACK(stack) ((vaddr_t)(stack) + STACK_SIZE(stack)) 129 130 DECLARE_STACK(stack_tmp, CFG_TEE_CORE_NB_CORE, 131 STACK_TMP_SIZE + CFG_STACK_TMP_EXTRA, static); 132 DECLARE_STACK(stack_abt, CFG_TEE_CORE_NB_CORE, STACK_ABT_SIZE, static); 133 #ifndef CFG_WITH_PAGER 134 DECLARE_STACK(stack_thread, CFG_NUM_THREADS, 135 STACK_THREAD_SIZE + CFG_STACK_THREAD_EXTRA, static); 136 #endif 137 138 #define GET_STACK_TOP_HARD(stack, n) \ 139 ((vaddr_t)&(stack)[n] + STACK_CANARY_SIZE / 2) 140 #define GET_STACK_TOP_SOFT(stack, n) \ 141 (GET_STACK_TOP_HARD(stack, n) + STACK_CHECK_EXTRA) 142 #define GET_STACK_BOTTOM(stack, n) ((vaddr_t)&(stack)[n] + sizeof(stack[n]) - \ 143 STACK_CANARY_SIZE / 2) 144 145 const void *stack_tmp_export __section(".identity_map.stack_tmp_export") = 146 (void *)(GET_STACK_BOTTOM(stack_tmp, 0) - STACK_TMP_OFFS); 147 const uint32_t stack_tmp_stride __section(".identity_map.stack_tmp_stride") = 148 sizeof(stack_tmp[0]); 149 150 /* 151 * These stack setup info are required by secondary boot cores before they 152 * each locally enable the pager (the mmu). Hence kept in pager sections. 153 */ 154 DECLARE_KEEP_PAGER(stack_tmp_export); 155 DECLARE_KEEP_PAGER(stack_tmp_stride); 156 157 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 158 static vaddr_t thread_user_kcode_va __nex_bss; 159 long thread_user_kcode_offset __nex_bss; 160 static size_t thread_user_kcode_size __nex_bss; 161 #endif 162 163 #if defined(CFG_CORE_UNMAP_CORE_AT_EL0) && \ 164 defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) 165 long thread_user_kdata_sp_offset __nex_bss; 166 static uint8_t thread_user_kdata_page[ 167 ROUNDUP(sizeof(thread_core_local), SMALL_PAGE_SIZE)] 168 __aligned(SMALL_PAGE_SIZE) 169 #ifndef CFG_VIRTUALIZATION 170 __section(".nozi.kdata_page"); 171 #else 172 __section(".nex_nozi.kdata_page"); 173 #endif 174 #endif 175 176 static unsigned int thread_global_lock __nex_bss = SPINLOCK_UNLOCK; 177 178 static void init_canaries(void) 179 { 180 #ifdef CFG_WITH_STACK_CANARIES 181 size_t n; 182 #define INIT_CANARY(name) \ 183 for (n = 0; n < ARRAY_SIZE(name); n++) { \ 184 uint32_t *start_canary = &GET_START_CANARY(name, n); \ 185 uint32_t *end_canary = &GET_END_CANARY(name, n); \ 186 \ 187 *start_canary = START_CANARY_VALUE; \ 188 *end_canary = END_CANARY_VALUE; \ 189 } 190 191 INIT_CANARY(stack_tmp); 192 INIT_CANARY(stack_abt); 193 #if !defined(CFG_WITH_PAGER) && !defined(CFG_VIRTUALIZATION) 194 INIT_CANARY(stack_thread); 195 #endif 196 #endif/*CFG_WITH_STACK_CANARIES*/ 197 } 198 199 #define CANARY_DIED(stack, loc, n, addr) \ 200 do { \ 201 EMSG_RAW("Dead canary at %s of '%s[%zu]' (%p)", #loc, #stack, \ 202 n, (void *)addr); \ 203 panic(); \ 204 } while (0) 205 206 void thread_check_canaries(void) 207 { 208 #ifdef CFG_WITH_STACK_CANARIES 209 uint32_t *canary = NULL; 210 size_t n = 0; 211 212 for (n = 0; n < ARRAY_SIZE(stack_tmp); n++) { 213 canary = &GET_START_CANARY(stack_tmp, n); 214 if (*canary != START_CANARY_VALUE) 215 CANARY_DIED(stack_tmp, start, n, canary); 216 canary = &GET_END_CANARY(stack_tmp, n); 217 if (*canary != END_CANARY_VALUE) 218 CANARY_DIED(stack_tmp, end, n, canary); 219 } 220 221 for (n = 0; n < ARRAY_SIZE(stack_abt); n++) { 222 canary = &GET_START_CANARY(stack_abt, n); 223 if (*canary != START_CANARY_VALUE) 224 CANARY_DIED(stack_abt, start, n, canary); 225 canary = &GET_END_CANARY(stack_abt, n); 226 if (*canary != END_CANARY_VALUE) 227 CANARY_DIED(stack_abt, end, n, canary); 228 229 } 230 #if !defined(CFG_WITH_PAGER) && !defined(CFG_VIRTUALIZATION) 231 for (n = 0; n < ARRAY_SIZE(stack_thread); n++) { 232 canary = &GET_START_CANARY(stack_thread, n); 233 if (*canary != START_CANARY_VALUE) 234 CANARY_DIED(stack_thread, start, n, canary); 235 canary = &GET_END_CANARY(stack_thread, n); 236 if (*canary != END_CANARY_VALUE) 237 CANARY_DIED(stack_thread, end, n, canary); 238 } 239 #endif 240 #endif/*CFG_WITH_STACK_CANARIES*/ 241 } 242 243 void thread_lock_global(void) 244 { 245 cpu_spin_lock(&thread_global_lock); 246 } 247 248 void thread_unlock_global(void) 249 { 250 cpu_spin_unlock(&thread_global_lock); 251 } 252 253 #ifdef ARM32 254 uint32_t __nostackcheck thread_get_exceptions(void) 255 { 256 uint32_t cpsr = read_cpsr(); 257 258 return (cpsr >> CPSR_F_SHIFT) & THREAD_EXCP_ALL; 259 } 260 261 void __nostackcheck thread_set_exceptions(uint32_t exceptions) 262 { 263 uint32_t cpsr = read_cpsr(); 264 265 /* Foreign interrupts must not be unmasked while holding a spinlock */ 266 if (!(exceptions & THREAD_EXCP_FOREIGN_INTR)) 267 assert_have_no_spinlock(); 268 269 cpsr &= ~(THREAD_EXCP_ALL << CPSR_F_SHIFT); 270 cpsr |= ((exceptions & THREAD_EXCP_ALL) << CPSR_F_SHIFT); 271 272 barrier(); 273 write_cpsr(cpsr); 274 barrier(); 275 } 276 #endif /*ARM32*/ 277 278 #ifdef ARM64 279 uint32_t __nostackcheck thread_get_exceptions(void) 280 { 281 uint32_t daif = read_daif(); 282 283 return (daif >> DAIF_F_SHIFT) & THREAD_EXCP_ALL; 284 } 285 286 void __nostackcheck thread_set_exceptions(uint32_t exceptions) 287 { 288 uint32_t daif = read_daif(); 289 290 /* Foreign interrupts must not be unmasked while holding a spinlock */ 291 if (!(exceptions & THREAD_EXCP_FOREIGN_INTR)) 292 assert_have_no_spinlock(); 293 294 daif &= ~(THREAD_EXCP_ALL << DAIF_F_SHIFT); 295 daif |= ((exceptions & THREAD_EXCP_ALL) << DAIF_F_SHIFT); 296 297 barrier(); 298 write_daif(daif); 299 barrier(); 300 } 301 #endif /*ARM64*/ 302 303 uint32_t __nostackcheck thread_mask_exceptions(uint32_t exceptions) 304 { 305 uint32_t state = thread_get_exceptions(); 306 307 thread_set_exceptions(state | (exceptions & THREAD_EXCP_ALL)); 308 return state; 309 } 310 311 void __nostackcheck thread_unmask_exceptions(uint32_t state) 312 { 313 thread_set_exceptions(state & THREAD_EXCP_ALL); 314 } 315 316 317 static struct thread_core_local * __nostackcheck 318 get_core_local(unsigned int pos) 319 { 320 /* 321 * Foreign interrupts must be disabled before playing with core_local 322 * since we otherwise may be rescheduled to a different core in the 323 * middle of this function. 324 */ 325 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 326 327 assert(pos < CFG_TEE_CORE_NB_CORE); 328 return &thread_core_local[pos]; 329 } 330 331 struct thread_core_local * __nostackcheck thread_get_core_local(void) 332 { 333 unsigned int pos = get_core_pos(); 334 335 return get_core_local(pos); 336 } 337 338 #ifdef CFG_CORE_DEBUG_CHECK_STACKS 339 static void print_stack_limits(void) 340 { 341 size_t n = 0; 342 vaddr_t __maybe_unused start = 0; 343 vaddr_t __maybe_unused end = 0; 344 345 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) { 346 start = GET_STACK_TOP_SOFT(stack_tmp, n); 347 end = GET_STACK_BOTTOM(stack_tmp, n); 348 DMSG("tmp [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end); 349 } 350 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) { 351 start = GET_STACK_TOP_SOFT(stack_abt, n); 352 end = GET_STACK_BOTTOM(stack_abt, n); 353 DMSG("abt [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end); 354 } 355 for (n = 0; n < CFG_NUM_THREADS; n++) { 356 end = threads[n].stack_va_end; 357 start = end - STACK_THREAD_SIZE; 358 DMSG("thr [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end); 359 } 360 } 361 362 static void check_stack_limits(void) 363 { 364 vaddr_t stack_start = 0; 365 vaddr_t stack_end = 0; 366 /* Any value in the current stack frame will do */ 367 vaddr_t current_sp = (vaddr_t)&stack_start; 368 369 if (!get_stack_soft_limits(&stack_start, &stack_end)) 370 panic("Unknown stack limits"); 371 if (current_sp < stack_start || current_sp > stack_end) { 372 DMSG("Stack pointer out of range (0x%" PRIxVA ")", current_sp); 373 print_stack_limits(); 374 panic(); 375 } 376 } 377 378 static bool * __nostackcheck get_stackcheck_recursion_flag(void) 379 { 380 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 381 unsigned int pos = get_core_pos(); 382 struct thread_core_local *l = get_core_local(pos); 383 int ct = l->curr_thread; 384 bool *p = NULL; 385 386 if (l->flags & (THREAD_CLF_ABORT | THREAD_CLF_TMP)) 387 p = &l->stackcheck_recursion; 388 else if (!l->flags) 389 p = &threads[ct].tsd.stackcheck_recursion; 390 391 thread_unmask_exceptions(exceptions); 392 return p; 393 } 394 395 void __cyg_profile_func_enter(void *this_fn, void *call_site); 396 void __nostackcheck __cyg_profile_func_enter(void *this_fn __unused, 397 void *call_site __unused) 398 { 399 bool *p = get_stackcheck_recursion_flag(); 400 401 assert(p); 402 if (*p) 403 return; 404 *p = true; 405 check_stack_limits(); 406 *p = false; 407 } 408 409 void __cyg_profile_func_exit(void *this_fn, void *call_site); 410 void __nostackcheck __cyg_profile_func_exit(void *this_fn __unused, 411 void *call_site __unused) 412 { 413 } 414 #else 415 static void print_stack_limits(void) 416 { 417 } 418 #endif 419 420 static void thread_lazy_save_ns_vfp(void) 421 { 422 #ifdef CFG_WITH_VFP 423 struct thread_ctx *thr = threads + thread_get_id(); 424 425 thr->vfp_state.ns_saved = false; 426 vfp_lazy_save_state_init(&thr->vfp_state.ns); 427 #endif /*CFG_WITH_VFP*/ 428 } 429 430 static void thread_lazy_restore_ns_vfp(void) 431 { 432 #ifdef CFG_WITH_VFP 433 struct thread_ctx *thr = threads + thread_get_id(); 434 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 435 436 assert(!thr->vfp_state.sec_lazy_saved && !thr->vfp_state.sec_saved); 437 438 if (tuv && tuv->lazy_saved && !tuv->saved) { 439 vfp_lazy_save_state_final(&tuv->vfp, false /*!force_save*/); 440 tuv->saved = true; 441 } 442 443 vfp_lazy_restore_state(&thr->vfp_state.ns, thr->vfp_state.ns_saved); 444 thr->vfp_state.ns_saved = false; 445 #endif /*CFG_WITH_VFP*/ 446 } 447 448 #ifdef ARM32 449 static void init_regs(struct thread_ctx *thread, uint32_t a0, uint32_t a1, 450 uint32_t a2, uint32_t a3, uint32_t a4, uint32_t a5, 451 uint32_t a6, uint32_t a7, void *pc) 452 { 453 thread->regs.pc = (uint32_t)pc; 454 455 /* 456 * Stdcalls starts in SVC mode with masked foreign interrupts, masked 457 * Asynchronous abort and unmasked native interrupts. 458 */ 459 thread->regs.cpsr = read_cpsr() & ARM32_CPSR_E; 460 thread->regs.cpsr |= CPSR_MODE_SVC | CPSR_A | 461 (THREAD_EXCP_FOREIGN_INTR << ARM32_CPSR_F_SHIFT); 462 /* Enable thumb mode if it's a thumb instruction */ 463 if (thread->regs.pc & 1) 464 thread->regs.cpsr |= CPSR_T; 465 /* Reinitialize stack pointer */ 466 thread->regs.svc_sp = thread->stack_va_end; 467 468 /* 469 * Copy arguments into context. This will make the 470 * arguments appear in r0-r7 when thread is started. 471 */ 472 thread->regs.r0 = a0; 473 thread->regs.r1 = a1; 474 thread->regs.r2 = a2; 475 thread->regs.r3 = a3; 476 thread->regs.r4 = a4; 477 thread->regs.r5 = a5; 478 thread->regs.r6 = a6; 479 thread->regs.r7 = a7; 480 } 481 #endif /*ARM32*/ 482 483 #ifdef ARM64 484 static void init_regs(struct thread_ctx *thread, uint32_t a0, uint32_t a1, 485 uint32_t a2, uint32_t a3, uint32_t a4, uint32_t a5, 486 uint32_t a6, uint32_t a7, void *pc) 487 { 488 thread->regs.pc = (uint64_t)pc; 489 490 /* 491 * Stdcalls starts in SVC mode with masked foreign interrupts, masked 492 * Asynchronous abort and unmasked native interrupts. 493 */ 494 thread->regs.cpsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, 495 THREAD_EXCP_FOREIGN_INTR | DAIFBIT_ABT); 496 /* Reinitialize stack pointer */ 497 thread->regs.sp = thread->stack_va_end; 498 499 /* 500 * Copy arguments into context. This will make the 501 * arguments appear in x0-x7 when thread is started. 502 */ 503 thread->regs.x[0] = a0; 504 thread->regs.x[1] = a1; 505 thread->regs.x[2] = a2; 506 thread->regs.x[3] = a3; 507 thread->regs.x[4] = a4; 508 thread->regs.x[5] = a5; 509 thread->regs.x[6] = a6; 510 thread->regs.x[7] = a7; 511 512 /* Set up frame pointer as per the Aarch64 AAPCS */ 513 thread->regs.x[29] = 0; 514 } 515 #endif /*ARM64*/ 516 517 void thread_init_boot_thread(void) 518 { 519 struct thread_core_local *l = thread_get_core_local(); 520 521 thread_init_threads(); 522 523 l->curr_thread = 0; 524 threads[0].state = THREAD_STATE_ACTIVE; 525 } 526 527 void __nostackcheck thread_clr_boot_thread(void) 528 { 529 struct thread_core_local *l = thread_get_core_local(); 530 531 assert(l->curr_thread >= 0 && l->curr_thread < CFG_NUM_THREADS); 532 assert(threads[l->curr_thread].state == THREAD_STATE_ACTIVE); 533 threads[l->curr_thread].state = THREAD_STATE_FREE; 534 l->curr_thread = THREAD_ID_INVALID; 535 } 536 537 static void __thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2, 538 uint32_t a3, uint32_t a4, uint32_t a5, 539 uint32_t a6, uint32_t a7, 540 void *pc) 541 { 542 size_t n; 543 struct thread_core_local *l = thread_get_core_local(); 544 bool found_thread = false; 545 546 assert(l->curr_thread == THREAD_ID_INVALID); 547 548 thread_lock_global(); 549 550 for (n = 0; n < CFG_NUM_THREADS; n++) { 551 if (threads[n].state == THREAD_STATE_FREE) { 552 threads[n].state = THREAD_STATE_ACTIVE; 553 found_thread = true; 554 break; 555 } 556 } 557 558 thread_unlock_global(); 559 560 if (!found_thread) 561 return; 562 563 l->curr_thread = n; 564 565 threads[n].flags = 0; 566 init_regs(threads + n, a0, a1, a2, a3, a4, a5, a6, a7, pc); 567 568 thread_lazy_save_ns_vfp(); 569 570 l->flags &= ~THREAD_CLF_TMP; 571 thread_resume(&threads[n].regs); 572 /*NOTREACHED*/ 573 panic(); 574 } 575 576 void thread_alloc_and_run(uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3, 577 uint32_t a4, uint32_t a5) 578 { 579 __thread_alloc_and_run(a0, a1, a2, a3, a4, a5, 0, 0, 580 thread_std_smc_entry); 581 } 582 583 #ifdef CFG_SECURE_PARTITION 584 void thread_sp_alloc_and_run(struct thread_smc_args *args __maybe_unused) 585 { 586 __thread_alloc_and_run(args->a0, args->a1, args->a2, args->a3, args->a4, 587 args->a5, args->a6, args->a7, 588 spmc_sp_thread_entry); 589 } 590 #endif 591 592 #ifdef ARM32 593 static void copy_a0_to_a3(struct thread_ctx_regs *regs, uint32_t a0, 594 uint32_t a1, uint32_t a2, uint32_t a3) 595 { 596 /* 597 * Update returned values from RPC, values will appear in 598 * r0-r3 when thread is resumed. 599 */ 600 regs->r0 = a0; 601 regs->r1 = a1; 602 regs->r2 = a2; 603 regs->r3 = a3; 604 } 605 #endif /*ARM32*/ 606 607 #ifdef ARM64 608 static void copy_a0_to_a3(struct thread_ctx_regs *regs, uint32_t a0, 609 uint32_t a1, uint32_t a2, uint32_t a3) 610 { 611 /* 612 * Update returned values from RPC, values will appear in 613 * x0-x3 when thread is resumed. 614 */ 615 regs->x[0] = a0; 616 regs->x[1] = a1; 617 regs->x[2] = a2; 618 regs->x[3] = a3; 619 } 620 #endif /*ARM64*/ 621 622 #ifdef ARM32 623 static bool is_from_user(uint32_t cpsr) 624 { 625 return (cpsr & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_USR; 626 } 627 #endif 628 629 #ifdef ARM64 630 static bool is_from_user(uint32_t cpsr) 631 { 632 if (cpsr & (SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT)) 633 return true; 634 if (((cpsr >> SPSR_64_MODE_EL_SHIFT) & SPSR_64_MODE_EL_MASK) == 635 SPSR_64_MODE_EL0) 636 return true; 637 return false; 638 } 639 #endif 640 641 #ifdef CFG_SYSCALL_FTRACE 642 static void __noprof ftrace_suspend(void) 643 { 644 struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack); 645 646 if (s && s->fbuf) 647 s->fbuf->syscall_trace_suspended = true; 648 } 649 650 static void __noprof ftrace_resume(void) 651 { 652 struct ts_session *s = TAILQ_FIRST(&thread_get_tsd()->sess_stack); 653 654 if (s && s->fbuf) 655 s->fbuf->syscall_trace_suspended = false; 656 } 657 #else 658 static void __noprof ftrace_suspend(void) 659 { 660 } 661 662 static void __noprof ftrace_resume(void) 663 { 664 } 665 #endif 666 667 static bool is_user_mode(struct thread_ctx_regs *regs) 668 { 669 return is_from_user((uint32_t)regs->cpsr); 670 } 671 672 void thread_resume_from_rpc(uint32_t thread_id, uint32_t a0, uint32_t a1, 673 uint32_t a2, uint32_t a3) 674 { 675 size_t n = thread_id; 676 struct thread_core_local *l = thread_get_core_local(); 677 bool found_thread = false; 678 679 assert(l->curr_thread == THREAD_ID_INVALID); 680 681 thread_lock_global(); 682 683 if (n < CFG_NUM_THREADS && threads[n].state == THREAD_STATE_SUSPENDED) { 684 threads[n].state = THREAD_STATE_ACTIVE; 685 found_thread = true; 686 } 687 688 thread_unlock_global(); 689 690 if (!found_thread) 691 return; 692 693 l->curr_thread = n; 694 695 if (threads[n].have_user_map) { 696 core_mmu_set_user_map(&threads[n].user_map); 697 if (threads[n].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR) 698 tee_ta_ftrace_update_times_resume(); 699 } 700 701 if (is_user_mode(&threads[n].regs)) 702 tee_ta_update_session_utime_resume(); 703 704 /* 705 * Return from RPC to request service of a foreign interrupt must not 706 * get parameters from non-secure world. 707 */ 708 if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) { 709 copy_a0_to_a3(&threads[n].regs, a0, a1, a2, a3); 710 threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN; 711 } 712 713 thread_lazy_save_ns_vfp(); 714 715 if (threads[n].have_user_map) 716 ftrace_resume(); 717 718 l->flags &= ~THREAD_CLF_TMP; 719 thread_resume(&threads[n].regs); 720 /*NOTREACHED*/ 721 panic(); 722 } 723 724 void __nostackcheck *thread_get_tmp_sp(void) 725 { 726 struct thread_core_local *l = thread_get_core_local(); 727 728 /* 729 * Called from assembly when switching to the temporary stack, so flags 730 * need updating 731 */ 732 l->flags |= THREAD_CLF_TMP; 733 734 return (void *)l->tmp_stack_va_end; 735 } 736 737 #ifdef ARM64 738 vaddr_t thread_get_saved_thread_sp(void) 739 { 740 struct thread_core_local *l = thread_get_core_local(); 741 int ct = l->curr_thread; 742 743 assert(ct != THREAD_ID_INVALID); 744 return threads[ct].kern_sp; 745 } 746 #endif /*ARM64*/ 747 748 vaddr_t thread_stack_start(void) 749 { 750 struct thread_ctx *thr; 751 int ct = thread_get_id_may_fail(); 752 753 if (ct == THREAD_ID_INVALID) 754 return 0; 755 756 thr = threads + ct; 757 return thr->stack_va_end - STACK_THREAD_SIZE; 758 } 759 760 size_t thread_stack_size(void) 761 { 762 return STACK_THREAD_SIZE; 763 } 764 765 bool get_stack_limits(vaddr_t *start, vaddr_t *end, bool hard) 766 { 767 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 768 unsigned int pos = get_core_pos(); 769 struct thread_core_local *l = get_core_local(pos); 770 int ct = l->curr_thread; 771 bool ret = false; 772 773 if (l->flags & THREAD_CLF_TMP) { 774 if (hard) 775 *start = GET_STACK_TOP_HARD(stack_tmp, pos); 776 else 777 *start = GET_STACK_TOP_SOFT(stack_tmp, pos); 778 *end = GET_STACK_BOTTOM(stack_tmp, pos); 779 ret = true; 780 } else if (l->flags & THREAD_CLF_ABORT) { 781 if (hard) 782 *start = GET_STACK_TOP_HARD(stack_abt, pos); 783 else 784 *start = GET_STACK_TOP_SOFT(stack_abt, pos); 785 *end = GET_STACK_BOTTOM(stack_abt, pos); 786 ret = true; 787 } else if (!l->flags) { 788 if (ct < 0 || ct >= CFG_NUM_THREADS) 789 goto out; 790 791 *end = threads[ct].stack_va_end; 792 *start = *end - STACK_THREAD_SIZE; 793 if (!hard) 794 *start += STACK_CHECK_EXTRA; 795 ret = true; 796 } 797 out: 798 thread_unmask_exceptions(exceptions); 799 return ret; 800 } 801 802 bool thread_is_from_abort_mode(void) 803 { 804 struct thread_core_local *l = thread_get_core_local(); 805 806 return (l->flags >> THREAD_CLF_SAVED_SHIFT) & THREAD_CLF_ABORT; 807 } 808 809 #ifdef ARM32 810 bool thread_is_in_normal_mode(void) 811 { 812 return (read_cpsr() & ARM32_CPSR_MODE_MASK) == ARM32_CPSR_MODE_SVC; 813 } 814 #endif 815 816 #ifdef ARM64 817 bool thread_is_in_normal_mode(void) 818 { 819 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 820 struct thread_core_local *l = thread_get_core_local(); 821 bool ret; 822 823 /* 824 * If any bit in l->flags is set aside from THREAD_CLF_TMP we're 825 * handling some exception. 826 */ 827 ret = (l->curr_thread != THREAD_ID_INVALID) && 828 !(l->flags & ~THREAD_CLF_TMP); 829 thread_unmask_exceptions(exceptions); 830 831 return ret; 832 } 833 #endif 834 835 void thread_state_free(void) 836 { 837 struct thread_core_local *l = thread_get_core_local(); 838 int ct = l->curr_thread; 839 840 assert(ct != THREAD_ID_INVALID); 841 842 thread_lazy_restore_ns_vfp(); 843 tee_pager_release_phys( 844 (void *)(threads[ct].stack_va_end - STACK_THREAD_SIZE), 845 STACK_THREAD_SIZE); 846 847 thread_lock_global(); 848 849 assert(threads[ct].state == THREAD_STATE_ACTIVE); 850 threads[ct].state = THREAD_STATE_FREE; 851 threads[ct].flags = 0; 852 l->curr_thread = THREAD_ID_INVALID; 853 854 #ifdef CFG_VIRTUALIZATION 855 virt_unset_guest(); 856 #endif 857 thread_unlock_global(); 858 } 859 860 #ifdef CFG_WITH_PAGER 861 static void release_unused_kernel_stack(struct thread_ctx *thr, 862 uint32_t cpsr __maybe_unused) 863 { 864 #ifdef ARM64 865 /* 866 * If we're from user mode then thr->regs.sp is the saved user 867 * stack pointer and thr->kern_sp holds the last kernel stack 868 * pointer. But if we're from kernel mode then thr->kern_sp isn't 869 * up to date so we need to read from thr->regs.sp instead. 870 */ 871 vaddr_t sp = is_from_user(cpsr) ? thr->kern_sp : thr->regs.sp; 872 #else 873 vaddr_t sp = thr->regs.svc_sp; 874 #endif 875 vaddr_t base = thr->stack_va_end - STACK_THREAD_SIZE; 876 size_t len = sp - base; 877 878 tee_pager_release_phys((void *)base, len); 879 } 880 #else 881 static void release_unused_kernel_stack(struct thread_ctx *thr __unused, 882 uint32_t cpsr __unused) 883 { 884 } 885 #endif 886 887 int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc) 888 { 889 struct thread_core_local *l = thread_get_core_local(); 890 int ct = l->curr_thread; 891 892 assert(ct != THREAD_ID_INVALID); 893 894 if (core_mmu_user_mapping_is_active()) 895 ftrace_suspend(); 896 897 thread_check_canaries(); 898 899 release_unused_kernel_stack(threads + ct, cpsr); 900 901 if (is_from_user(cpsr)) { 902 thread_user_save_vfp(); 903 tee_ta_update_session_utime_suspend(); 904 tee_ta_gprof_sample_pc(pc); 905 } 906 thread_lazy_restore_ns_vfp(); 907 908 thread_lock_global(); 909 910 assert(threads[ct].state == THREAD_STATE_ACTIVE); 911 threads[ct].flags |= flags; 912 threads[ct].regs.cpsr = cpsr; 913 threads[ct].regs.pc = pc; 914 threads[ct].state = THREAD_STATE_SUSPENDED; 915 916 threads[ct].have_user_map = core_mmu_user_mapping_is_active(); 917 if (threads[ct].have_user_map) { 918 if (threads[ct].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR) 919 tee_ta_ftrace_update_times_suspend(); 920 core_mmu_get_user_map(&threads[ct].user_map); 921 core_mmu_set_user_map(NULL); 922 } 923 924 l->curr_thread = THREAD_ID_INVALID; 925 926 #ifdef CFG_VIRTUALIZATION 927 virt_unset_guest(); 928 #endif 929 930 thread_unlock_global(); 931 932 return ct; 933 } 934 935 #ifdef ARM32 936 static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp) 937 { 938 l->tmp_stack_va_end = sp; 939 thread_set_irq_sp(sp); 940 thread_set_fiq_sp(sp); 941 } 942 943 static void set_abt_stack(struct thread_core_local *l, vaddr_t sp) 944 { 945 l->abt_stack_va_end = sp; 946 thread_set_abt_sp((vaddr_t)l); 947 thread_set_und_sp((vaddr_t)l); 948 } 949 #endif /*ARM32*/ 950 951 #ifdef ARM64 952 static void set_tmp_stack(struct thread_core_local *l, vaddr_t sp) 953 { 954 /* 955 * We're already using the tmp stack when this function is called 956 * so there's no need to assign it to any stack pointer. However, 957 * we'll need to restore it at different times so store it here. 958 */ 959 l->tmp_stack_va_end = sp; 960 } 961 962 static void set_abt_stack(struct thread_core_local *l, vaddr_t sp) 963 { 964 l->abt_stack_va_end = sp; 965 } 966 #endif /*ARM64*/ 967 968 bool thread_init_stack(uint32_t thread_id, vaddr_t sp) 969 { 970 if (thread_id >= CFG_NUM_THREADS) 971 return false; 972 threads[thread_id].stack_va_end = sp; 973 return true; 974 } 975 976 short int thread_get_id_may_fail(void) 977 { 978 /* 979 * thread_get_core_local() requires foreign interrupts to be disabled 980 */ 981 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 982 struct thread_core_local *l = thread_get_core_local(); 983 short int ct = l->curr_thread; 984 985 thread_unmask_exceptions(exceptions); 986 return ct; 987 } 988 989 short int thread_get_id(void) 990 { 991 short int ct = thread_get_id_may_fail(); 992 993 /* Thread ID has to fit in a short int */ 994 COMPILE_TIME_ASSERT(CFG_NUM_THREADS <= SHRT_MAX); 995 assert(ct >= 0 && ct < CFG_NUM_THREADS); 996 return ct; 997 } 998 999 #ifdef CFG_WITH_PAGER 1000 static void init_thread_stacks(void) 1001 { 1002 size_t n = 0; 1003 1004 /* 1005 * Allocate virtual memory for thread stacks. 1006 */ 1007 for (n = 0; n < CFG_NUM_THREADS; n++) { 1008 tee_mm_entry_t *mm = NULL; 1009 vaddr_t sp = 0; 1010 size_t num_pages = 0; 1011 struct fobj *fobj = NULL; 1012 1013 /* Find vmem for thread stack and its protection gap */ 1014 mm = tee_mm_alloc(&tee_mm_vcore, 1015 SMALL_PAGE_SIZE + STACK_THREAD_SIZE); 1016 assert(mm); 1017 1018 /* Claim eventual physical page */ 1019 tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm), 1020 true); 1021 1022 num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE - 1; 1023 fobj = fobj_locked_paged_alloc(num_pages); 1024 1025 /* Add the region to the pager */ 1026 tee_pager_add_core_region(tee_mm_get_smem(mm) + SMALL_PAGE_SIZE, 1027 PAGED_REGION_TYPE_LOCK, fobj); 1028 fobj_put(fobj); 1029 1030 /* init effective stack */ 1031 sp = tee_mm_get_smem(mm) + tee_mm_get_bytes(mm); 1032 asan_tag_access((void *)tee_mm_get_smem(mm), (void *)sp); 1033 if (!thread_init_stack(n, sp)) 1034 panic("init stack failed"); 1035 } 1036 } 1037 #else 1038 static void init_thread_stacks(void) 1039 { 1040 size_t n; 1041 1042 /* Assign the thread stacks */ 1043 for (n = 0; n < CFG_NUM_THREADS; n++) { 1044 if (!thread_init_stack(n, GET_STACK_BOTTOM(stack_thread, n))) 1045 panic("thread_init_stack failed"); 1046 } 1047 } 1048 #endif /*CFG_WITH_PAGER*/ 1049 1050 static void init_user_kcode(void) 1051 { 1052 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 1053 vaddr_t v = (vaddr_t)thread_excp_vect; 1054 vaddr_t ve = (vaddr_t)thread_excp_vect_end; 1055 1056 thread_user_kcode_va = ROUNDDOWN(v, CORE_MMU_USER_CODE_SIZE); 1057 ve = ROUNDUP(ve, CORE_MMU_USER_CODE_SIZE); 1058 thread_user_kcode_size = ve - thread_user_kcode_va; 1059 1060 core_mmu_get_user_va_range(&v, NULL); 1061 thread_user_kcode_offset = thread_user_kcode_va - v; 1062 1063 #if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) 1064 /* 1065 * When transitioning to EL0 subtract SP with this much to point to 1066 * this special kdata page instead. SP is restored by add this much 1067 * while transitioning back to EL1. 1068 */ 1069 v += thread_user_kcode_size; 1070 thread_user_kdata_sp_offset = (vaddr_t)thread_core_local - v; 1071 #endif 1072 #endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/ 1073 } 1074 1075 void thread_init_threads(void) 1076 { 1077 size_t n = 0; 1078 1079 init_thread_stacks(); 1080 print_stack_limits(); 1081 pgt_init(); 1082 1083 mutex_lockdep_init(); 1084 1085 for (n = 0; n < CFG_NUM_THREADS; n++) { 1086 TAILQ_INIT(&threads[n].tsd.sess_stack); 1087 SLIST_INIT(&threads[n].tsd.pgt_cache); 1088 } 1089 } 1090 1091 void __nostackcheck thread_init_thread_core_local(void) 1092 { 1093 size_t n = 0; 1094 struct thread_core_local *tcl = thread_core_local; 1095 1096 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) { 1097 tcl[n].curr_thread = THREAD_ID_INVALID; 1098 tcl[n].flags = THREAD_CLF_TMP; 1099 } 1100 1101 tcl[0].tmp_stack_va_end = GET_STACK_BOTTOM(stack_tmp, 0); 1102 } 1103 1104 void thread_init_primary(void) 1105 { 1106 /* Initialize canaries around the stacks */ 1107 init_canaries(); 1108 1109 init_user_kcode(); 1110 } 1111 1112 static void init_sec_mon_stack(size_t pos __maybe_unused) 1113 { 1114 #if !defined(CFG_WITH_ARM_TRUSTED_FW) 1115 /* Initialize secure monitor */ 1116 sm_init(GET_STACK_BOTTOM(stack_tmp, pos)); 1117 #endif 1118 } 1119 1120 static uint32_t __maybe_unused get_midr_implementer(uint32_t midr) 1121 { 1122 return (midr >> MIDR_IMPLEMENTER_SHIFT) & MIDR_IMPLEMENTER_MASK; 1123 } 1124 1125 static uint32_t __maybe_unused get_midr_primary_part(uint32_t midr) 1126 { 1127 return (midr >> MIDR_PRIMARY_PART_NUM_SHIFT) & 1128 MIDR_PRIMARY_PART_NUM_MASK; 1129 } 1130 1131 #ifdef ARM64 1132 static bool probe_workaround_available(void) 1133 { 1134 int32_t r; 1135 1136 r = thread_smc(SMCCC_VERSION, 0, 0, 0); 1137 if (r < 0) 1138 return false; 1139 if (r < 0x10001) /* compare with version 1.1 */ 1140 return false; 1141 1142 /* Version >= 1.1, so SMCCC_ARCH_FEATURES is available */ 1143 r = thread_smc(SMCCC_ARCH_FEATURES, SMCCC_ARCH_WORKAROUND_1, 0, 0); 1144 return r >= 0; 1145 } 1146 1147 static vaddr_t __maybe_unused select_vector(vaddr_t a) 1148 { 1149 if (probe_workaround_available()) { 1150 DMSG("SMCCC_ARCH_WORKAROUND_1 (%#08" PRIx32 ") available", 1151 SMCCC_ARCH_WORKAROUND_1); 1152 DMSG("SMC Workaround for CVE-2017-5715 used"); 1153 return a; 1154 } 1155 1156 DMSG("SMCCC_ARCH_WORKAROUND_1 (%#08" PRIx32 ") unavailable", 1157 SMCCC_ARCH_WORKAROUND_1); 1158 DMSG("SMC Workaround for CVE-2017-5715 not needed (if ARM-TF is up to date)"); 1159 return (vaddr_t)thread_excp_vect; 1160 } 1161 #else 1162 static vaddr_t __maybe_unused select_vector(vaddr_t a) 1163 { 1164 return a; 1165 } 1166 #endif 1167 1168 static vaddr_t get_excp_vect(void) 1169 { 1170 #ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC 1171 uint32_t midr = read_midr(); 1172 1173 if (get_midr_implementer(midr) != MIDR_IMPLEMENTER_ARM) 1174 return (vaddr_t)thread_excp_vect; 1175 1176 switch (get_midr_primary_part(midr)) { 1177 #ifdef ARM32 1178 case CORTEX_A8_PART_NUM: 1179 case CORTEX_A9_PART_NUM: 1180 case CORTEX_A17_PART_NUM: 1181 #endif 1182 case CORTEX_A57_PART_NUM: 1183 case CORTEX_A72_PART_NUM: 1184 case CORTEX_A73_PART_NUM: 1185 case CORTEX_A75_PART_NUM: 1186 return select_vector((vaddr_t)thread_excp_vect_workaround); 1187 #ifdef ARM32 1188 case CORTEX_A15_PART_NUM: 1189 return select_vector((vaddr_t)thread_excp_vect_workaround_a15); 1190 #endif 1191 default: 1192 return (vaddr_t)thread_excp_vect; 1193 } 1194 #endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/ 1195 1196 return (vaddr_t)thread_excp_vect; 1197 } 1198 1199 void thread_init_per_cpu(void) 1200 { 1201 size_t pos = get_core_pos(); 1202 struct thread_core_local *l = thread_get_core_local(); 1203 1204 init_sec_mon_stack(pos); 1205 1206 set_tmp_stack(l, GET_STACK_BOTTOM(stack_tmp, pos) - STACK_TMP_OFFS); 1207 set_abt_stack(l, GET_STACK_BOTTOM(stack_abt, pos)); 1208 1209 thread_init_vbar(get_excp_vect()); 1210 1211 #ifdef CFG_FTRACE_SUPPORT 1212 /* 1213 * Enable accesses to frequency register and physical counter 1214 * register in EL0/PL0 required for timestamping during 1215 * function tracing. 1216 */ 1217 write_cntkctl(read_cntkctl() | CNTKCTL_PL0PCTEN); 1218 #endif 1219 } 1220 1221 struct thread_specific_data *thread_get_tsd(void) 1222 { 1223 return &threads[thread_get_id()].tsd; 1224 } 1225 1226 struct thread_ctx_regs * __nostackcheck thread_get_ctx_regs(void) 1227 { 1228 struct thread_core_local *l = thread_get_core_local(); 1229 1230 assert(l->curr_thread != THREAD_ID_INVALID); 1231 return &threads[l->curr_thread].regs; 1232 } 1233 1234 void thread_set_foreign_intr(bool enable) 1235 { 1236 /* thread_get_core_local() requires foreign interrupts to be disabled */ 1237 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 1238 struct thread_core_local *l; 1239 1240 l = thread_get_core_local(); 1241 1242 assert(l->curr_thread != THREAD_ID_INVALID); 1243 1244 if (enable) { 1245 threads[l->curr_thread].flags |= 1246 THREAD_FLAGS_FOREIGN_INTR_ENABLE; 1247 thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR); 1248 } else { 1249 /* 1250 * No need to disable foreign interrupts here since they're 1251 * already disabled above. 1252 */ 1253 threads[l->curr_thread].flags &= 1254 ~THREAD_FLAGS_FOREIGN_INTR_ENABLE; 1255 } 1256 } 1257 1258 void thread_restore_foreign_intr(void) 1259 { 1260 /* thread_get_core_local() requires foreign interrupts to be disabled */ 1261 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 1262 struct thread_core_local *l; 1263 1264 l = thread_get_core_local(); 1265 1266 assert(l->curr_thread != THREAD_ID_INVALID); 1267 1268 if (threads[l->curr_thread].flags & THREAD_FLAGS_FOREIGN_INTR_ENABLE) 1269 thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR); 1270 } 1271 1272 #ifdef CFG_WITH_VFP 1273 uint32_t thread_kernel_enable_vfp(void) 1274 { 1275 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 1276 struct thread_ctx *thr = threads + thread_get_id(); 1277 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 1278 1279 assert(!vfp_is_enabled()); 1280 1281 if (!thr->vfp_state.ns_saved) { 1282 vfp_lazy_save_state_final(&thr->vfp_state.ns, 1283 true /*force_save*/); 1284 thr->vfp_state.ns_saved = true; 1285 } else if (thr->vfp_state.sec_lazy_saved && 1286 !thr->vfp_state.sec_saved) { 1287 /* 1288 * This happens when we're handling an abort while the 1289 * thread was using the VFP state. 1290 */ 1291 vfp_lazy_save_state_final(&thr->vfp_state.sec, 1292 false /*!force_save*/); 1293 thr->vfp_state.sec_saved = true; 1294 } else if (tuv && tuv->lazy_saved && !tuv->saved) { 1295 /* 1296 * This can happen either during syscall or abort 1297 * processing (while processing a syscall). 1298 */ 1299 vfp_lazy_save_state_final(&tuv->vfp, false /*!force_save*/); 1300 tuv->saved = true; 1301 } 1302 1303 vfp_enable(); 1304 return exceptions; 1305 } 1306 1307 void thread_kernel_disable_vfp(uint32_t state) 1308 { 1309 uint32_t exceptions; 1310 1311 assert(vfp_is_enabled()); 1312 1313 vfp_disable(); 1314 exceptions = thread_get_exceptions(); 1315 assert(exceptions & THREAD_EXCP_FOREIGN_INTR); 1316 exceptions &= ~THREAD_EXCP_FOREIGN_INTR; 1317 exceptions |= state & THREAD_EXCP_FOREIGN_INTR; 1318 thread_set_exceptions(exceptions); 1319 } 1320 1321 void thread_kernel_save_vfp(void) 1322 { 1323 struct thread_ctx *thr = threads + thread_get_id(); 1324 1325 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1326 if (vfp_is_enabled()) { 1327 vfp_lazy_save_state_init(&thr->vfp_state.sec); 1328 thr->vfp_state.sec_lazy_saved = true; 1329 } 1330 } 1331 1332 void thread_kernel_restore_vfp(void) 1333 { 1334 struct thread_ctx *thr = threads + thread_get_id(); 1335 1336 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1337 assert(!vfp_is_enabled()); 1338 if (thr->vfp_state.sec_lazy_saved) { 1339 vfp_lazy_restore_state(&thr->vfp_state.sec, 1340 thr->vfp_state.sec_saved); 1341 thr->vfp_state.sec_saved = false; 1342 thr->vfp_state.sec_lazy_saved = false; 1343 } 1344 } 1345 1346 void thread_user_enable_vfp(struct thread_user_vfp_state *uvfp) 1347 { 1348 struct thread_ctx *thr = threads + thread_get_id(); 1349 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 1350 1351 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1352 assert(!vfp_is_enabled()); 1353 1354 if (!thr->vfp_state.ns_saved) { 1355 vfp_lazy_save_state_final(&thr->vfp_state.ns, 1356 true /*force_save*/); 1357 thr->vfp_state.ns_saved = true; 1358 } else if (tuv && uvfp != tuv) { 1359 if (tuv->lazy_saved && !tuv->saved) { 1360 vfp_lazy_save_state_final(&tuv->vfp, 1361 false /*!force_save*/); 1362 tuv->saved = true; 1363 } 1364 } 1365 1366 if (uvfp->lazy_saved) 1367 vfp_lazy_restore_state(&uvfp->vfp, uvfp->saved); 1368 uvfp->lazy_saved = false; 1369 uvfp->saved = false; 1370 1371 thr->vfp_state.uvfp = uvfp; 1372 vfp_enable(); 1373 } 1374 1375 void thread_user_save_vfp(void) 1376 { 1377 struct thread_ctx *thr = threads + thread_get_id(); 1378 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; 1379 1380 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 1381 if (!vfp_is_enabled()) 1382 return; 1383 1384 assert(tuv && !tuv->lazy_saved && !tuv->saved); 1385 vfp_lazy_save_state_init(&tuv->vfp); 1386 tuv->lazy_saved = true; 1387 } 1388 1389 void thread_user_clear_vfp(struct user_mode_ctx *uctx) 1390 { 1391 struct thread_user_vfp_state *uvfp = &uctx->vfp; 1392 struct thread_ctx *thr = threads + thread_get_id(); 1393 1394 if (uvfp == thr->vfp_state.uvfp) 1395 thr->vfp_state.uvfp = NULL; 1396 uvfp->lazy_saved = false; 1397 uvfp->saved = false; 1398 } 1399 #endif /*CFG_WITH_VFP*/ 1400 1401 #ifdef ARM32 1402 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr) 1403 { 1404 uint32_t s; 1405 1406 if (!is_32bit) 1407 return false; 1408 1409 s = read_cpsr(); 1410 s &= ~(CPSR_MODE_MASK | CPSR_T | CPSR_IT_MASK1 | CPSR_IT_MASK2); 1411 s |= CPSR_MODE_USR; 1412 if (entry_func & 1) 1413 s |= CPSR_T; 1414 *spsr = s; 1415 return true; 1416 } 1417 #endif 1418 1419 #ifdef ARM64 1420 static bool get_spsr(bool is_32bit, unsigned long entry_func, uint32_t *spsr) 1421 { 1422 uint32_t s; 1423 1424 if (is_32bit) { 1425 s = read_daif() & (SPSR_32_AIF_MASK << SPSR_32_AIF_SHIFT); 1426 s |= SPSR_MODE_RW_32 << SPSR_MODE_RW_SHIFT; 1427 s |= (entry_func & SPSR_32_T_MASK) << SPSR_32_T_SHIFT; 1428 } else { 1429 s = read_daif() & (SPSR_64_DAIF_MASK << SPSR_64_DAIF_SHIFT); 1430 } 1431 1432 *spsr = s; 1433 return true; 1434 } 1435 #endif 1436 1437 static void set_ctx_regs(struct thread_ctx_regs *regs, unsigned long a0, 1438 unsigned long a1, unsigned long a2, unsigned long a3, 1439 unsigned long user_sp, unsigned long entry_func, 1440 uint32_t spsr) 1441 { 1442 /* 1443 * First clear all registers to avoid leaking information from 1444 * other TAs or even the Core itself. 1445 */ 1446 *regs = (struct thread_ctx_regs){ }; 1447 #ifdef ARM32 1448 regs->r0 = a0; 1449 regs->r1 = a1; 1450 regs->r2 = a2; 1451 regs->r3 = a3; 1452 regs->usr_sp = user_sp; 1453 regs->pc = entry_func; 1454 regs->cpsr = spsr; 1455 #endif 1456 #ifdef ARM64 1457 regs->x[0] = a0; 1458 regs->x[1] = a1; 1459 regs->x[2] = a2; 1460 regs->x[3] = a3; 1461 regs->sp = user_sp; 1462 regs->pc = entry_func; 1463 regs->cpsr = spsr; 1464 regs->x[13] = user_sp; /* Used when running TA in Aarch32 */ 1465 regs->sp = user_sp; /* Used when running TA in Aarch64 */ 1466 /* Set frame pointer (user stack can't be unwound past this point) */ 1467 regs->x[29] = 0; 1468 #endif 1469 } 1470 1471 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1, 1472 unsigned long a2, unsigned long a3, unsigned long user_sp, 1473 unsigned long entry_func, bool is_32bit, 1474 uint32_t *exit_status0, uint32_t *exit_status1) 1475 { 1476 uint32_t spsr = 0; 1477 uint32_t exceptions = 0; 1478 uint32_t rc = 0; 1479 struct thread_ctx_regs *regs = NULL; 1480 1481 tee_ta_update_session_utime_resume(); 1482 1483 /* Derive SPSR from current CPSR/PSTATE readout. */ 1484 if (!get_spsr(is_32bit, entry_func, &spsr)) { 1485 *exit_status0 = 1; /* panic */ 1486 *exit_status1 = 0xbadbadba; 1487 return 0; 1488 } 1489 1490 exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 1491 /* 1492 * We're using the per thread location of saved context registers 1493 * for temporary storage. Now that exceptions are masked they will 1494 * not be used for any thing else until they are eventually 1495 * unmasked when user mode has been entered. 1496 */ 1497 regs = thread_get_ctx_regs(); 1498 set_ctx_regs(regs, a0, a1, a2, a3, user_sp, entry_func, spsr); 1499 rc = __thread_enter_user_mode(regs, exit_status0, exit_status1); 1500 thread_unmask_exceptions(exceptions); 1501 return rc; 1502 } 1503 1504 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 1505 void thread_get_user_kcode(struct mobj **mobj, size_t *offset, 1506 vaddr_t *va, size_t *sz) 1507 { 1508 core_mmu_get_user_va_range(va, NULL); 1509 *mobj = mobj_tee_ram_rx; 1510 *offset = thread_user_kcode_va - (vaddr_t)mobj_get_va(*mobj, 0); 1511 *sz = thread_user_kcode_size; 1512 } 1513 #endif 1514 1515 #if defined(CFG_CORE_UNMAP_CORE_AT_EL0) && \ 1516 defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) 1517 void thread_get_user_kdata(struct mobj **mobj, size_t *offset, 1518 vaddr_t *va, size_t *sz) 1519 { 1520 vaddr_t v; 1521 1522 core_mmu_get_user_va_range(&v, NULL); 1523 *va = v + thread_user_kcode_size; 1524 *mobj = mobj_tee_ram_rw; 1525 *offset = (vaddr_t)thread_user_kdata_page - 1526 (vaddr_t)mobj_get_va(*mobj, 0); 1527 *sz = sizeof(thread_user_kdata_page); 1528 } 1529 #endif 1530 1531 static void setup_unwind_user_mode(struct thread_svc_regs *regs) 1532 { 1533 #ifdef ARM32 1534 regs->lr = (uintptr_t)thread_unwind_user_mode; 1535 regs->spsr = read_cpsr(); 1536 #endif 1537 #ifdef ARM64 1538 regs->elr = (uintptr_t)thread_unwind_user_mode; 1539 regs->spsr = SPSR_64(SPSR_64_MODE_EL1, SPSR_64_MODE_SP_EL0, 0); 1540 regs->spsr |= read_daif(); 1541 /* 1542 * Regs is the value of stack pointer before calling the SVC 1543 * handler. By the addition matches for the reserved space at the 1544 * beginning of el0_sync_svc(). This prepares the stack when 1545 * returning to thread_unwind_user_mode instead of a normal 1546 * exception return. 1547 */ 1548 regs->sp_el0 = (uint64_t)(regs + 1); 1549 #endif 1550 } 1551 1552 static void gprof_set_status(struct ts_session *s __maybe_unused, 1553 enum ts_gprof_status status __maybe_unused) 1554 { 1555 #ifdef CFG_TA_GPROF_SUPPORT 1556 if (s->ctx->ops->gprof_set_status) 1557 s->ctx->ops->gprof_set_status(status); 1558 #endif 1559 } 1560 1561 /* 1562 * Note: this function is weak just to make it possible to exclude it from 1563 * the unpaged area. 1564 */ 1565 void __weak thread_svc_handler(struct thread_svc_regs *regs) 1566 { 1567 struct ts_session *sess = NULL; 1568 uint32_t state = 0; 1569 1570 /* Enable native interrupts */ 1571 state = thread_get_exceptions(); 1572 thread_unmask_exceptions(state & ~THREAD_EXCP_NATIVE_INTR); 1573 1574 thread_user_save_vfp(); 1575 1576 sess = ts_get_current_session(); 1577 /* 1578 * User mode service has just entered kernel mode, suspend gprof 1579 * collection until we're about to switch back again. 1580 */ 1581 gprof_set_status(sess, TS_GPROF_SUSPEND); 1582 1583 /* Restore foreign interrupts which are disabled on exception entry */ 1584 thread_restore_foreign_intr(); 1585 1586 assert(sess && sess->handle_svc); 1587 if (sess->handle_svc(regs)) { 1588 /* We're about to switch back to user mode */ 1589 gprof_set_status(sess, TS_GPROF_RESUME); 1590 } else { 1591 /* We're returning from __thread_enter_user_mode() */ 1592 setup_unwind_user_mode(regs); 1593 } 1594 } 1595 1596 static struct mobj *alloc_shm(enum thread_shm_type shm_type, size_t size) 1597 { 1598 switch (shm_type) { 1599 case THREAD_SHM_TYPE_APPLICATION: 1600 return thread_rpc_alloc_payload(size); 1601 case THREAD_SHM_TYPE_KERNEL_PRIVATE: 1602 return thread_rpc_alloc_kernel_payload(size); 1603 case THREAD_SHM_TYPE_GLOBAL: 1604 return thread_rpc_alloc_global_payload(size); 1605 default: 1606 return NULL; 1607 } 1608 } 1609 1610 static void clear_shm_cache_entry(struct thread_shm_cache_entry *ce) 1611 { 1612 if (ce->mobj) { 1613 switch (ce->type) { 1614 case THREAD_SHM_TYPE_APPLICATION: 1615 thread_rpc_free_payload(ce->mobj); 1616 break; 1617 case THREAD_SHM_TYPE_KERNEL_PRIVATE: 1618 thread_rpc_free_kernel_payload(ce->mobj); 1619 break; 1620 case THREAD_SHM_TYPE_GLOBAL: 1621 thread_rpc_free_global_payload(ce->mobj); 1622 break; 1623 default: 1624 assert(0); /* "can't happen" */ 1625 break; 1626 } 1627 } 1628 ce->mobj = NULL; 1629 ce->size = 0; 1630 } 1631 1632 static struct thread_shm_cache_entry * 1633 get_shm_cache_entry(enum thread_shm_cache_user user) 1634 { 1635 struct thread_shm_cache *cache = &threads[thread_get_id()].shm_cache; 1636 struct thread_shm_cache_entry *ce = NULL; 1637 1638 SLIST_FOREACH(ce, cache, link) 1639 if (ce->user == user) 1640 return ce; 1641 1642 ce = calloc(1, sizeof(*ce)); 1643 if (ce) { 1644 ce->user = user; 1645 SLIST_INSERT_HEAD(cache, ce, link); 1646 } 1647 1648 return ce; 1649 } 1650 1651 void *thread_rpc_shm_cache_alloc(enum thread_shm_cache_user user, 1652 enum thread_shm_type shm_type, 1653 size_t size, struct mobj **mobj) 1654 { 1655 struct thread_shm_cache_entry *ce = NULL; 1656 size_t sz = size; 1657 paddr_t p = 0; 1658 void *va = NULL; 1659 1660 if (!size) 1661 return NULL; 1662 1663 ce = get_shm_cache_entry(user); 1664 if (!ce) 1665 return NULL; 1666 1667 /* 1668 * Always allocate in page chunks as normal world allocates payload 1669 * memory as complete pages. 1670 */ 1671 sz = ROUNDUP(size, SMALL_PAGE_SIZE); 1672 1673 if (ce->type != shm_type || sz > ce->size) { 1674 clear_shm_cache_entry(ce); 1675 1676 ce->mobj = alloc_shm(shm_type, sz); 1677 if (!ce->mobj) 1678 return NULL; 1679 1680 if (mobj_get_pa(ce->mobj, 0, 0, &p)) 1681 goto err; 1682 1683 if (!ALIGNMENT_IS_OK(p, uint64_t)) 1684 goto err; 1685 1686 va = mobj_get_va(ce->mobj, 0); 1687 if (!va) 1688 goto err; 1689 1690 ce->size = sz; 1691 ce->type = shm_type; 1692 } else { 1693 va = mobj_get_va(ce->mobj, 0); 1694 if (!va) 1695 goto err; 1696 } 1697 *mobj = ce->mobj; 1698 1699 return va; 1700 err: 1701 clear_shm_cache_entry(ce); 1702 return NULL; 1703 } 1704 1705 void thread_rpc_shm_cache_clear(struct thread_shm_cache *cache) 1706 { 1707 while (true) { 1708 struct thread_shm_cache_entry *ce = SLIST_FIRST(cache); 1709 1710 if (!ce) 1711 break; 1712 SLIST_REMOVE_HEAD(cache, link); 1713 clear_shm_cache_entry(ce); 1714 free(ce); 1715 } 1716 } 1717 1718 #ifdef CFG_WITH_ARM_TRUSTED_FW 1719 /* 1720 * These five functions are __weak to allow platforms to override them if 1721 * needed. 1722 */ 1723 unsigned long __weak thread_cpu_off_handler(unsigned long a0 __unused, 1724 unsigned long a1 __unused) 1725 { 1726 return 0; 1727 } 1728 DECLARE_KEEP_PAGER(thread_cpu_off_handler); 1729 1730 unsigned long __weak thread_cpu_suspend_handler(unsigned long a0 __unused, 1731 unsigned long a1 __unused) 1732 { 1733 return 0; 1734 } 1735 DECLARE_KEEP_PAGER(thread_cpu_suspend_handler); 1736 1737 unsigned long __weak thread_cpu_resume_handler(unsigned long a0 __unused, 1738 unsigned long a1 __unused) 1739 { 1740 return 0; 1741 } 1742 DECLARE_KEEP_PAGER(thread_cpu_resume_handler); 1743 1744 unsigned long __weak thread_system_off_handler(unsigned long a0 __unused, 1745 unsigned long a1 __unused) 1746 { 1747 return 0; 1748 } 1749 DECLARE_KEEP_PAGER(thread_system_off_handler); 1750 1751 unsigned long __weak thread_system_reset_handler(unsigned long a0 __unused, 1752 unsigned long a1 __unused) 1753 { 1754 return 0; 1755 } 1756 DECLARE_KEEP_PAGER(thread_system_reset_handler); 1757 #endif /*CFG_WITH_ARM_TRUSTED_FW*/ 1758