1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016-2022, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 * Copyright (c) 2020-2021, Arm Limited 6 */ 7 8 #include <config.h> 9 #include <crypto/crypto.h> 10 #include <kernel/asan.h> 11 #include <kernel/boot.h> 12 #include <kernel/lockdep.h> 13 #include <kernel/misc.h> 14 #include <kernel/panic.h> 15 #include <kernel/spinlock.h> 16 #include <kernel/thread.h> 17 #include <kernel/thread_private.h> 18 #include <mm/mobj.h> 19 #include <mm/page_alloc.h> 20 #include <stdalign.h> 21 22 struct thread_ctx threads[CFG_NUM_THREADS]; 23 24 #if defined(CFG_DYN_STACK_CONFIG) 25 struct thread_core_local *thread_core_local __nex_bss; 26 size_t thread_core_count __nex_bss; 27 #else 28 static struct thread_core_local 29 __thread_core_local[CFG_TEE_CORE_NB_CORE] __nex_bss; 30 struct thread_core_local *thread_core_local __nex_data = __thread_core_local; 31 size_t thread_core_count __nex_data = CFG_TEE_CORE_NB_CORE; 32 #endif 33 unsigned long thread_core_local_pa __nex_bss; 34 struct thread_core_local *__thread_core_local_new __nex_bss; 35 size_t __thread_core_count_new __nex_bss; 36 37 /* 38 * Stacks 39 * 40 * [Lower addresses on the left] 41 * 42 * [ STACK_CANARY_SIZE/2 | STACK_CHECK_EXTRA | STACK_XXX_SIZE | STACK_CANARY_SIZE/2 ] 43 * ^ ^ ^ ^ 44 * stack_xxx[n] "hard" top "soft" top bottom 45 */ 46 47 static uint32_t start_canary_value = 0xdedede00; 48 static uint32_t end_canary_value = 0xababab00; 49 50 #define DECLARE_STACK(name, num_stacks, stack_size, linkage) \ 51 linkage uint32_t name[num_stacks] \ 52 [ROUNDUP(stack_size + STACK_CANARY_SIZE + STACK_CHECK_EXTRA, \ 53 STACK_ALIGNMENT) / sizeof(uint32_t)] \ 54 __attribute__((section(".nozi_stack." # name), \ 55 aligned(STACK_ALIGNMENT))) 56 57 #ifndef CFG_DYN_STACK_CONFIG 58 DECLARE_STACK(stack_tmp, CFG_TEE_CORE_NB_CORE, STACK_TMP_SIZE, 59 /* global linkage */); 60 DECLARE_STACK(stack_abt, CFG_TEE_CORE_NB_CORE, STACK_ABT_SIZE, static); 61 #define GET_STACK_BOTTOM(stack, n) ((vaddr_t)&(stack)[n] + sizeof(stack[n]) - \ 62 STACK_CANARY_SIZE / 2) 63 #else 64 /* Not used */ 65 #define GET_STACK_BOTTOM(stack, n) 0 66 #endif 67 #ifndef CFG_WITH_PAGER 68 DECLARE_STACK(stack_thread, CFG_NUM_THREADS, STACK_THREAD_SIZE, static); 69 #define GET_STACK_THREAD_BOTTOM(n) \ 70 ((vaddr_t)&stack_thread[n] + sizeof(stack_thread[n]) - \ 71 STACK_CANARY_SIZE / 2) 72 #endif 73 74 75 #ifndef CFG_DYN_STACK_CONFIG 76 const uint32_t stack_tmp_stride __section(".identity_map.stack_tmp_stride") = 77 sizeof(stack_tmp[0]); 78 79 /* 80 * This stack setup info is required by secondary boot cores before they 81 * each locally enable the pager (the mmu). Hence kept in pager sections. 82 */ 83 DECLARE_KEEP_PAGER(stack_tmp_stride); 84 #endif 85 86 static unsigned int thread_global_lock __nex_bss = SPINLOCK_UNLOCK; 87 88 static size_t stack_size_to_alloc_size(size_t stack_size) 89 { 90 return ROUNDUP(stack_size + STACK_CANARY_SIZE + STACK_CHECK_EXTRA, 91 STACK_ALIGNMENT); 92 } 93 94 static vaddr_t stack_end_va_to_top_hard(size_t stack_size, vaddr_t end_va) 95 { 96 size_t l = stack_size_to_alloc_size(stack_size); 97 98 return end_va - l + STACK_CANARY_SIZE; 99 } 100 101 static vaddr_t stack_end_va_to_top_soft(size_t stack_size, vaddr_t end_va) 102 { 103 return stack_end_va_to_top_hard(stack_size, end_va) + STACK_CHECK_EXTRA; 104 } 105 106 static vaddr_t stack_end_va_to_bottom(size_t stack_size __unused, 107 vaddr_t end_va) 108 { 109 return end_va; 110 } 111 112 static uint32_t *stack_end_va_to_start_canary(size_t stack_size, vaddr_t end_va) 113 { 114 return (uint32_t *)(stack_end_va_to_top_hard(stack_size, end_va) - 115 STACK_CANARY_SIZE / 2); 116 } 117 118 static uint32_t *stack_end_va_to_end_canary(size_t stack_size __unused, 119 vaddr_t end_va) 120 { 121 return (uint32_t *)(end_va + STACK_CANARY_SIZE / 2 - sizeof(uint32_t)); 122 } 123 124 static void init_canaries(size_t stack_size, vaddr_t va_end) 125 { 126 uint32_t *canary = NULL; 127 128 assert(va_end); 129 canary = stack_end_va_to_start_canary(stack_size, va_end); 130 *canary = start_canary_value; 131 canary = stack_end_va_to_end_canary(stack_size, va_end); 132 *canary = end_canary_value; 133 } 134 135 void thread_init_canaries(void) 136 { 137 vaddr_t va = 0; 138 size_t n = 0; 139 140 if (IS_ENABLED(CFG_WITH_STACK_CANARIES)) { 141 for (n = 0; n < thread_core_count; n++) { 142 if (thread_core_local[n].tmp_stack_va_end) { 143 va = thread_core_local[n].tmp_stack_va_end + 144 STACK_TMP_OFFS; 145 init_canaries(STACK_TMP_SIZE, va); 146 } 147 va = thread_core_local[n].abt_stack_va_end; 148 if (va) 149 init_canaries(STACK_ABT_SIZE, va); 150 } 151 152 } 153 154 if (IS_ENABLED(CFG_WITH_STACK_CANARIES) && 155 !IS_ENABLED(CFG_WITH_PAGER) && !IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 156 for (n = 0; n < CFG_NUM_THREADS; n++) { 157 va = threads[n].stack_va_end; 158 if (va) 159 init_canaries(STACK_THREAD_SIZE, va); 160 } 161 } 162 } 163 164 #if defined(CFG_WITH_STACK_CANARIES) 165 void thread_update_canaries(void) 166 { 167 uint32_t canary[2] = { }; 168 uint32_t exceptions = 0; 169 170 plat_get_random_stack_canaries(canary, ARRAY_SIZE(canary), 171 sizeof(canary[0])); 172 173 exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 174 175 thread_check_canaries(); 176 177 start_canary_value = canary[0]; 178 end_canary_value = canary[1]; 179 thread_init_canaries(); 180 181 thread_unmask_exceptions(exceptions); 182 } 183 #endif 184 185 static void check_stack_canary(const char *stack_name __maybe_unused, 186 size_t n __maybe_unused, 187 size_t stack_size, vaddr_t end_va) 188 { 189 uint32_t *canary = NULL; 190 191 canary = stack_end_va_to_start_canary(stack_size, end_va); 192 if (*canary != start_canary_value) { 193 EMSG_RAW("Dead canary at start of '%s[%zu]' (%p)", 194 stack_name, n, (void *)canary); 195 panic(); 196 } 197 198 canary = stack_end_va_to_end_canary(stack_size, end_va); 199 if (*canary != end_canary_value) { 200 EMSG_RAW("Dead canary at end of '%s[%zu]' (%p)", 201 stack_name, n, (void *)canary); 202 panic(); 203 } 204 } 205 206 void thread_check_canaries(void) 207 { 208 vaddr_t va = 0; 209 size_t n = 0; 210 211 if (IS_ENABLED(CFG_WITH_STACK_CANARIES)) { 212 for (n = 0; n < thread_core_count; n++) { 213 if (thread_core_local[n].tmp_stack_va_end) { 214 va = thread_core_local[n].tmp_stack_va_end + 215 STACK_TMP_OFFS; 216 check_stack_canary("tmp_stack", n, 217 STACK_TMP_SIZE, va); 218 } 219 220 va = thread_core_local[n].abt_stack_va_end; 221 if (va) 222 check_stack_canary("abt_stack", n, 223 STACK_ABT_SIZE, va); 224 } 225 } 226 227 if (IS_ENABLED(CFG_WITH_STACK_CANARIES) && 228 !IS_ENABLED(CFG_WITH_PAGER) && !IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 229 for (n = 0; n < CFG_NUM_THREADS; n++) { 230 va = threads[n].stack_va_end; 231 if (va) 232 check_stack_canary("thread_stack", n, 233 STACK_THREAD_SIZE, va); 234 } 235 } 236 } 237 238 void thread_lock_global(void) 239 { 240 cpu_spin_lock(&thread_global_lock); 241 } 242 243 void thread_unlock_global(void) 244 { 245 cpu_spin_unlock(&thread_global_lock); 246 } 247 248 static struct thread_core_local * __nostackcheck 249 get_core_local(unsigned int pos) 250 { 251 /* 252 * Foreign interrupts must be disabled before playing with core_local 253 * since we otherwise may be rescheduled to a different core in the 254 * middle of this function. 255 */ 256 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 257 258 /* 259 * With CFG_BOOT_INIT_CURRENT_THREAD_CORE_LOCAL, we boot on a 260 * single core and have allocated only one struct thread_core_local 261 * so we return that regardless of pos. 262 */ 263 if (IS_ENABLED(CFG_DYN_STACK_CONFIG) && 264 thread_core_local != __thread_core_local_new) 265 return thread_core_local; 266 267 assert(pos < thread_core_count); 268 return &thread_core_local[pos]; 269 } 270 271 struct thread_core_local * __nostackcheck thread_get_core_local(void) 272 { 273 unsigned int pos = get_core_pos(); 274 275 return get_core_local(pos); 276 } 277 278 #ifdef CFG_CORE_DEBUG_CHECK_STACKS 279 static void print_stack_limits(void) 280 { 281 size_t n = 0; 282 vaddr_t __maybe_unused start = 0; 283 vaddr_t __maybe_unused end = 0; 284 vaddr_t va = 0; 285 286 for (n = 0; n < thread_core_count; n++) { 287 va = thread_core_local[n].tmp_stack_va_end + STACK_TMP_OFFS; 288 start = stack_end_va_to_top_soft(STACK_TMP_SIZE, va); 289 end = stack_end_va_to_bottom(STACK_TMP_SIZE, va); 290 DMSG("tmp [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end); 291 292 va = thread_core_local[n].abt_stack_va_end; 293 start = stack_end_va_to_top_soft(STACK_ABT_SIZE, va); 294 end = stack_end_va_to_bottom(STACK_ABT_SIZE, va); 295 DMSG("abt [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end); 296 } 297 298 for (n = 0; n < CFG_NUM_THREADS; n++) { 299 va = threads[n].stack_va_end; 300 start = stack_end_va_to_top_soft(STACK_THREAD_SIZE, va); 301 end = stack_end_va_to_bottom(STACK_THREAD_SIZE, va); 302 DMSG("thr [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end); 303 } 304 } 305 306 static void check_stack_limits(void) 307 { 308 vaddr_t stack_start = 0; 309 vaddr_t stack_end = 0; 310 /* Any value in the current stack frame will do */ 311 vaddr_t current_sp = (vaddr_t)&stack_start; 312 313 if (!get_stack_soft_limits(&stack_start, &stack_end)) 314 panic("Unknown stack limits"); 315 if (current_sp < stack_start || current_sp > stack_end) { 316 EMSG("Stack pointer out of range: 0x%" PRIxVA " not in [0x%" 317 PRIxVA " .. 0x%" PRIxVA "]", current_sp, stack_start, 318 stack_end); 319 print_stack_limits(); 320 panic(); 321 } 322 } 323 324 static bool * __nostackcheck get_stackcheck_recursion_flag(void) 325 { 326 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 327 unsigned int pos = get_core_pos(); 328 struct thread_core_local *l = get_core_local(pos); 329 int ct = l->curr_thread; 330 bool *p = NULL; 331 332 if (l->flags & (THREAD_CLF_ABORT | THREAD_CLF_TMP)) 333 p = &l->stackcheck_recursion; 334 else if (!l->flags) 335 p = &threads[ct].tsd.stackcheck_recursion; 336 337 thread_unmask_exceptions(exceptions); 338 return p; 339 } 340 341 void __cyg_profile_func_enter(void *this_fn, void *call_site); 342 void __nostackcheck __cyg_profile_func_enter(void *this_fn __unused, 343 void *call_site __unused) 344 { 345 bool *p = get_stackcheck_recursion_flag(); 346 347 assert(p); 348 if (*p) 349 return; 350 *p = true; 351 check_stack_limits(); 352 *p = false; 353 } 354 355 void __cyg_profile_func_exit(void *this_fn, void *call_site); 356 void __nostackcheck __cyg_profile_func_exit(void *this_fn __unused, 357 void *call_site __unused) 358 { 359 } 360 #else 361 static void print_stack_limits(void) 362 { 363 } 364 #endif 365 366 void thread_init_boot_thread(void) 367 { 368 struct thread_core_local *l = thread_get_core_local(); 369 370 thread_init_threads(); 371 372 l->curr_thread = 0; 373 threads[0].state = THREAD_STATE_ACTIVE; 374 } 375 376 void __nostackcheck thread_clr_boot_thread(void) 377 { 378 struct thread_core_local *l = thread_get_core_local(); 379 380 assert(l->curr_thread >= 0 && l->curr_thread < CFG_NUM_THREADS); 381 assert(threads[l->curr_thread].state == THREAD_STATE_ACTIVE); 382 threads[l->curr_thread].state = THREAD_STATE_FREE; 383 l->curr_thread = THREAD_ID_INVALID; 384 print_stack_limits(); 385 } 386 387 void __nostackcheck *thread_get_tmp_sp(void) 388 { 389 struct thread_core_local *l = thread_get_core_local(); 390 391 /* 392 * Called from assembly when switching to the temporary stack, so flags 393 * need updating 394 */ 395 l->flags |= THREAD_CLF_TMP; 396 397 return (void *)l->tmp_stack_va_end; 398 } 399 400 vaddr_t thread_stack_start(void) 401 { 402 struct thread_ctx *thr; 403 int ct = thread_get_id_may_fail(); 404 405 if (ct == THREAD_ID_INVALID) 406 return 0; 407 408 thr = threads + ct; 409 return stack_end_va_to_top_soft(STACK_THREAD_SIZE, thr->stack_va_end); 410 } 411 412 size_t thread_stack_size(void) 413 { 414 return STACK_THREAD_SIZE; 415 } 416 417 bool get_stack_limits(vaddr_t *start, vaddr_t *end, bool hard) 418 { 419 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 420 unsigned int pos = get_core_pos(); 421 struct thread_core_local *l = get_core_local(pos); 422 int ct = l->curr_thread; 423 size_t stack_size = 0; 424 bool ret = true; 425 vaddr_t va = 0; 426 427 if (l->flags & THREAD_CLF_TMP) { 428 va = l->tmp_stack_va_end + STACK_TMP_OFFS; 429 stack_size = STACK_TMP_SIZE; 430 } else if (l->flags & THREAD_CLF_ABORT) { 431 va = l->abt_stack_va_end; 432 stack_size = STACK_ABT_SIZE; 433 } else if (!l->flags && ct >= 0 && ct < CFG_NUM_THREADS) { 434 va = threads[ct].stack_va_end; 435 stack_size = STACK_THREAD_SIZE; 436 } else { 437 ret = false; 438 goto out; 439 } 440 441 *end = stack_end_va_to_bottom(stack_size, va); 442 if (hard) 443 *start = stack_end_va_to_top_hard(stack_size, va); 444 else 445 *start = stack_end_va_to_top_soft(stack_size, va); 446 out: 447 thread_unmask_exceptions(exceptions); 448 return ret; 449 } 450 451 bool thread_is_from_abort_mode(void) 452 { 453 struct thread_core_local *l = thread_get_core_local(); 454 455 return (l->flags >> THREAD_CLF_SAVED_SHIFT) & THREAD_CLF_ABORT; 456 } 457 458 /* 459 * This function should always be accurate, but it might be possible to 460 * implement a more efficient depending on cpu architecture. 461 */ 462 bool __weak __noprof thread_is_in_normal_mode(void) 463 { 464 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 465 struct thread_core_local *l = thread_get_core_local(); 466 bool ret; 467 468 /* 469 * If any bit in l->flags is set aside from THREAD_CLF_TMP we're 470 * handling some exception. 471 */ 472 ret = (l->curr_thread != THREAD_ID_INVALID) && 473 !(l->flags & ~THREAD_CLF_TMP); 474 thread_unmask_exceptions(exceptions); 475 476 return ret; 477 } 478 479 short int __noprof thread_get_id_may_fail(void) 480 { 481 /* 482 * thread_get_core_local() requires foreign interrupts to be disabled 483 */ 484 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 485 struct thread_core_local *l = thread_get_core_local(); 486 short int ct = l->curr_thread; 487 488 thread_unmask_exceptions(exceptions); 489 return ct; 490 } 491 492 short int __noprof thread_get_id(void) 493 { 494 short int ct = thread_get_id_may_fail(); 495 496 /* Thread ID has to fit in a short int */ 497 COMPILE_TIME_ASSERT(CFG_NUM_THREADS <= SHRT_MAX); 498 assert(ct >= 0 && ct < CFG_NUM_THREADS); 499 return ct; 500 } 501 502 #ifdef CFG_WITH_PAGER 503 static void init_thread_stacks(void) 504 { 505 size_t n = 0; 506 507 /* 508 * Allocate virtual memory for thread stacks. 509 */ 510 for (n = 0; n < CFG_NUM_THREADS; n++) { 511 tee_mm_entry_t *mm = NULL; 512 vaddr_t sp = 0; 513 size_t num_pages = 0; 514 struct fobj *fobj = NULL; 515 516 /* Find vmem for thread stack and its protection gap */ 517 mm = tee_mm_alloc(&core_virt_mem_pool, 518 SMALL_PAGE_SIZE + STACK_THREAD_SIZE); 519 assert(mm); 520 521 /* Claim eventual physical page */ 522 tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm), 523 true); 524 525 num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE - 1; 526 fobj = fobj_locked_paged_alloc(num_pages); 527 528 /* Add the region to the pager */ 529 tee_pager_add_core_region(tee_mm_get_smem(mm) + SMALL_PAGE_SIZE, 530 PAGED_REGION_TYPE_LOCK, fobj); 531 fobj_put(fobj); 532 533 /* init effective stack */ 534 sp = tee_mm_get_smem(mm) + tee_mm_get_bytes(mm); 535 asan_tag_access((void *)tee_mm_get_smem(mm), (void *)sp); 536 threads[n].stack_va_end = sp; 537 } 538 } 539 #else 540 static void init_thread_stacks(void) 541 { 542 vaddr_t va = 0; 543 size_t n = 0; 544 545 /* Assign the thread stacks */ 546 for (n = 0; n < CFG_NUM_THREADS; n++) { 547 va = GET_STACK_THREAD_BOTTOM(n); 548 threads[n].stack_va_end = va; 549 if (IS_ENABLED(CFG_WITH_STACK_CANARIES)) 550 init_canaries(STACK_THREAD_SIZE, va); 551 } 552 } 553 #endif /*CFG_WITH_PAGER*/ 554 555 void thread_init_threads(void) 556 { 557 size_t n = 0; 558 559 init_thread_stacks(); 560 print_stack_limits(); 561 pgt_init(); 562 563 mutex_lockdep_init(); 564 565 for (n = 0; n < CFG_NUM_THREADS; n++) 566 TAILQ_INIT(&threads[n].tsd.sess_stack); 567 } 568 569 #ifndef CFG_DYN_STACK_CONFIG 570 vaddr_t __nostackcheck thread_get_abt_stack(void) 571 { 572 return GET_STACK_BOTTOM(stack_abt, get_core_pos()); 573 } 574 #endif 575 576 #ifdef CFG_BOOT_INIT_CURRENT_THREAD_CORE_LOCAL 577 static vaddr_t alloc_stack(size_t stack_size, bool nex) 578 { 579 size_t l = stack_size_to_alloc_size(stack_size); 580 size_t rl = ROUNDUP(l, SMALL_PAGE_SIZE); 581 uint32_t flags = MAF_GUARD_HEAD; 582 vaddr_t end_va = 0; 583 vaddr_t va = 0; 584 585 if (nex) 586 flags |= MAF_NEX; 587 va = virt_page_alloc(rl / SMALL_PAGE_SIZE, flags); 588 if (!va) 589 panic(); 590 591 end_va = va + l - STACK_CANARY_SIZE / 2; 592 if (IS_ENABLED(CFG_WITH_STACK_CANARIES)) 593 init_canaries(stack_size, end_va); 594 595 return end_va; 596 } 597 598 void thread_init_thread_core_local(size_t core_count) 599 { 600 struct thread_core_local *tcl = NULL; 601 const size_t core_pos = get_core_pos(); 602 vaddr_t va = 0; 603 size_t n = 0; 604 605 if (IS_ENABLED(CFG_DYN_STACK_CONFIG)) { 606 assert(core_count <= CFG_TEE_CORE_NB_CORE); 607 tcl = nex_calloc(core_count, sizeof(*tcl)); 608 if (!tcl) 609 panic(); 610 __thread_core_local_new = tcl; 611 __thread_core_count_new = core_count; 612 } else { 613 tcl = thread_core_local; 614 assert(core_count == CFG_TEE_CORE_NB_CORE); 615 616 for (n = 0; n < thread_core_count; n++) { 617 init_canaries(STACK_TMP_SIZE, 618 GET_STACK_BOTTOM(stack_tmp, n)); 619 init_canaries(STACK_ABT_SIZE, 620 GET_STACK_BOTTOM(stack_abt, n)); 621 } 622 } 623 624 for (n = 0; n < core_count; n++) { 625 if (n == core_pos) { 626 if (IS_ENABLED(CFG_DYN_STACK_CONFIG)) 627 tcl[n] = thread_core_local[0]; 628 else 629 continue; 630 } else { 631 tcl[n].curr_thread = THREAD_ID_INVALID; 632 tcl[n].flags = THREAD_CLF_TMP; 633 } 634 635 if (IS_ENABLED(CFG_DYN_STACK_CONFIG)) 636 va = alloc_stack(STACK_TMP_SIZE, true); 637 else 638 va = GET_STACK_BOTTOM(stack_tmp, n); 639 tcl[n].tmp_stack_va_end = va - STACK_TMP_OFFS; 640 #ifdef ARM32 641 tcl[n].tmp_stack_pa_end = 642 vaddr_to_phys(tcl[n].tmp_stack_va_end); 643 #endif 644 645 if (IS_ENABLED(CFG_DYN_STACK_CONFIG)) 646 va = alloc_stack(STACK_ABT_SIZE, true); 647 else 648 va = GET_STACK_BOTTOM(stack_abt, n); 649 tcl[n].abt_stack_va_end = va; 650 } 651 } 652 #else 653 void __nostackcheck 654 thread_init_thread_core_local(size_t core_count __maybe_unused) 655 { 656 size_t n = 0; 657 struct thread_core_local *tcl = thread_core_local; 658 659 assert(core_count == CFG_TEE_CORE_NB_CORE); 660 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) { 661 tcl[n].curr_thread = THREAD_ID_INVALID; 662 tcl[n].flags = THREAD_CLF_TMP; 663 } 664 tcl[0].tmp_stack_va_end = GET_STACK_BOTTOM(stack_tmp, 0); 665 } 666 667 void __nostackcheck thread_init_core_local_stacks(void) 668 { 669 size_t n = 0; 670 struct thread_core_local *tcl = thread_core_local; 671 672 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) { 673 tcl[n].tmp_stack_va_end = GET_STACK_BOTTOM(stack_tmp, n) - 674 STACK_TMP_OFFS; 675 tcl[n].abt_stack_va_end = GET_STACK_BOTTOM(stack_abt, n); 676 } 677 } 678 #endif /*CFG_BOOT_INIT_CURRENT_THREAD_CORE_LOCAL*/ 679 680 #if defined(CFG_CORE_PAUTH) 681 void thread_init_thread_pauth_keys(void) 682 { 683 size_t n = 0; 684 685 for (n = 0; n < CFG_NUM_THREADS; n++) 686 if (crypto_rng_read(&threads[n].keys, sizeof(threads[n].keys))) 687 panic("Failed to init thread pauth keys"); 688 } 689 690 void thread_init_core_local_pauth_keys(void) 691 { 692 struct thread_core_local *tcl = thread_core_local; 693 size_t n = 0; 694 695 for (n = 0; n < thread_core_count; n++) 696 if (crypto_rng_read(&tcl[n].keys, sizeof(tcl[n].keys))) 697 panic("Failed to init core local pauth keys"); 698 } 699 #endif 700 701 struct thread_specific_data * __noprof thread_get_tsd(void) 702 { 703 return &threads[thread_get_id()].tsd; 704 } 705 706 struct thread_ctx_regs * __nostackcheck thread_get_ctx_regs(void) 707 { 708 struct thread_core_local *l = thread_get_core_local(); 709 710 assert(l->curr_thread != THREAD_ID_INVALID); 711 return &threads[l->curr_thread].regs; 712 } 713 714 void thread_set_foreign_intr(bool enable) 715 { 716 /* thread_get_core_local() requires foreign interrupts to be disabled */ 717 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 718 struct thread_core_local *l; 719 720 l = thread_get_core_local(); 721 722 assert(l->curr_thread != THREAD_ID_INVALID); 723 724 if (enable) { 725 threads[l->curr_thread].flags |= 726 THREAD_FLAGS_FOREIGN_INTR_ENABLE; 727 thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR); 728 } else { 729 /* 730 * No need to disable foreign interrupts here since they're 731 * already disabled above. 732 */ 733 threads[l->curr_thread].flags &= 734 ~THREAD_FLAGS_FOREIGN_INTR_ENABLE; 735 } 736 } 737 738 void thread_restore_foreign_intr(void) 739 { 740 /* thread_get_core_local() requires foreign interrupts to be disabled */ 741 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 742 struct thread_core_local *l; 743 744 l = thread_get_core_local(); 745 746 assert(l->curr_thread != THREAD_ID_INVALID); 747 748 if (threads[l->curr_thread].flags & THREAD_FLAGS_FOREIGN_INTR_ENABLE) 749 thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR); 750 } 751 752 static struct mobj *alloc_shm(enum thread_shm_type shm_type, size_t size) 753 { 754 switch (shm_type) { 755 case THREAD_SHM_TYPE_APPLICATION: 756 return thread_rpc_alloc_payload(size); 757 case THREAD_SHM_TYPE_KERNEL_PRIVATE: 758 return thread_rpc_alloc_kernel_payload(size); 759 case THREAD_SHM_TYPE_GLOBAL: 760 return thread_rpc_alloc_global_payload(size); 761 default: 762 return NULL; 763 } 764 } 765 766 static void clear_shm_cache_entry(struct thread_shm_cache_entry *ce) 767 { 768 if (ce->mobj) { 769 switch (ce->type) { 770 case THREAD_SHM_TYPE_APPLICATION: 771 thread_rpc_free_payload(ce->mobj); 772 break; 773 case THREAD_SHM_TYPE_KERNEL_PRIVATE: 774 thread_rpc_free_kernel_payload(ce->mobj); 775 break; 776 case THREAD_SHM_TYPE_GLOBAL: 777 thread_rpc_free_global_payload(ce->mobj); 778 break; 779 default: 780 assert(0); /* "can't happen" */ 781 break; 782 } 783 } 784 ce->mobj = NULL; 785 ce->size = 0; 786 } 787 788 static struct thread_shm_cache_entry * 789 get_shm_cache_entry(enum thread_shm_cache_user user) 790 { 791 struct thread_shm_cache *cache = &threads[thread_get_id()].shm_cache; 792 struct thread_shm_cache_entry *ce = NULL; 793 794 SLIST_FOREACH(ce, cache, link) 795 if (ce->user == user) 796 return ce; 797 798 ce = calloc(1, sizeof(*ce)); 799 if (ce) { 800 ce->user = user; 801 SLIST_INSERT_HEAD(cache, ce, link); 802 } 803 804 return ce; 805 } 806 807 void *thread_rpc_shm_cache_alloc(enum thread_shm_cache_user user, 808 enum thread_shm_type shm_type, 809 size_t size, struct mobj **mobj) 810 { 811 struct thread_shm_cache_entry *ce = NULL; 812 size_t sz = size; 813 paddr_t p = 0; 814 void *va = NULL; 815 816 if (!size) 817 return NULL; 818 819 ce = get_shm_cache_entry(user); 820 if (!ce) 821 return NULL; 822 823 /* 824 * Always allocate in page chunks as normal world allocates payload 825 * memory as complete pages. 826 */ 827 sz = ROUNDUP(size, SMALL_PAGE_SIZE); 828 829 if (ce->type != shm_type || sz > ce->size) { 830 clear_shm_cache_entry(ce); 831 832 ce->mobj = alloc_shm(shm_type, sz); 833 if (!ce->mobj) 834 return NULL; 835 836 if (mobj_get_pa(ce->mobj, 0, 0, &p)) 837 goto err; 838 839 if (!IS_ALIGNED_WITH_TYPE(p, uint64_t)) 840 goto err; 841 842 va = mobj_get_va(ce->mobj, 0, sz); 843 if (!va) 844 goto err; 845 846 ce->size = sz; 847 ce->type = shm_type; 848 } else { 849 va = mobj_get_va(ce->mobj, 0, sz); 850 if (!va) 851 goto err; 852 } 853 *mobj = ce->mobj; 854 855 return va; 856 err: 857 clear_shm_cache_entry(ce); 858 return NULL; 859 } 860 861 void thread_rpc_shm_cache_clear(struct thread_shm_cache *cache) 862 { 863 while (true) { 864 struct thread_shm_cache_entry *ce = SLIST_FIRST(cache); 865 866 if (!ce) 867 break; 868 SLIST_REMOVE_HEAD(cache, link); 869 clear_shm_cache_entry(ce); 870 free(ce); 871 } 872 } 873