1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016-2022, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 * Copyright (c) 2020-2021, Arm Limited 6 */ 7 8 #include <config.h> 9 #include <crypto/crypto.h> 10 #include <kernel/asan.h> 11 #include <kernel/boot.h> 12 #include <kernel/lockdep.h> 13 #include <kernel/misc.h> 14 #include <kernel/panic.h> 15 #include <kernel/spinlock.h> 16 #include <kernel/thread.h> 17 #include <kernel/thread_private.h> 18 #include <mm/mobj.h> 19 #include <mm/page_alloc.h> 20 #include <stdalign.h> 21 22 #if defined(CFG_DYN_STACK_CONFIG) 23 struct thread_core_local *thread_core_local __nex_bss; 24 size_t thread_core_count __nex_bss; 25 struct thread_ctx *threads; 26 size_t thread_count; 27 #else 28 static struct thread_core_local 29 __thread_core_local[CFG_TEE_CORE_NB_CORE] __nex_bss; 30 struct thread_core_local *thread_core_local __nex_data = __thread_core_local; 31 size_t thread_core_count __nex_data = CFG_TEE_CORE_NB_CORE; 32 static struct thread_ctx __threads[CFG_NUM_THREADS]; 33 struct thread_ctx *threads = __threads; 34 size_t thread_count = CFG_NUM_THREADS; 35 #endif 36 unsigned long thread_core_local_pa __nex_bss; 37 struct thread_core_local *__thread_core_local_new __nex_bss; 38 size_t __thread_core_count_new __nex_bss; 39 40 /* 41 * Stacks 42 * 43 * [Lower addresses on the left] 44 * 45 * [ STACK_CANARY_SIZE/2 | STACK_CHECK_EXTRA | STACK_XXX_SIZE | STACK_CANARY_SIZE/2 ] 46 * ^ ^ ^ ^ 47 * stack_xxx[n] "hard" top "soft" top bottom 48 */ 49 50 static uint32_t start_canary_value = 0xdedede00; 51 static uint32_t end_canary_value = 0xababab00; 52 53 #define DECLARE_STACK(name, num_stacks, stack_size, linkage) \ 54 linkage uint32_t name[num_stacks] \ 55 [ROUNDUP(stack_size + STACK_CANARY_SIZE + STACK_CHECK_EXTRA, \ 56 STACK_ALIGNMENT) / sizeof(uint32_t)] \ 57 __attribute__((section(".nozi_stack." # name), \ 58 aligned(STACK_ALIGNMENT))) 59 60 #ifndef CFG_DYN_STACK_CONFIG 61 DECLARE_STACK(stack_tmp, CFG_TEE_CORE_NB_CORE, STACK_TMP_SIZE, 62 /* global linkage */); 63 DECLARE_STACK(stack_abt, CFG_TEE_CORE_NB_CORE, STACK_ABT_SIZE, static); 64 #define GET_STACK_BOTTOM(stack, n) ((vaddr_t)&(stack)[n] + sizeof(stack[n]) - \ 65 STACK_CANARY_SIZE / 2) 66 #else 67 /* Not used */ 68 #define GET_STACK_BOTTOM(stack, n) 0 69 #endif 70 71 #if defined(CFG_DYN_STACK_CONFIG) || defined(CFG_WITH_PAGER) 72 /* Not used */ 73 #define GET_STACK_THREAD_BOTTOM(n) 0 74 #else 75 DECLARE_STACK(stack_thread, CFG_NUM_THREADS, STACK_THREAD_SIZE, static); 76 #define GET_STACK_THREAD_BOTTOM(n) \ 77 ((vaddr_t)&stack_thread[n] + sizeof(stack_thread[n]) - \ 78 STACK_CANARY_SIZE / 2) 79 #endif 80 81 #ifndef CFG_DYN_STACK_CONFIG 82 const uint32_t stack_tmp_stride __section(".identity_map.stack_tmp_stride") = 83 sizeof(stack_tmp[0]); 84 85 /* 86 * This stack setup info is required by secondary boot cores before they 87 * each locally enable the pager (the mmu). Hence kept in pager sections. 88 */ 89 DECLARE_KEEP_PAGER(stack_tmp_stride); 90 #endif 91 92 static unsigned int thread_global_lock __nex_bss = SPINLOCK_UNLOCK; 93 94 static size_t stack_size_to_alloc_size(size_t stack_size) 95 { 96 return ROUNDUP(stack_size + STACK_CANARY_SIZE + STACK_CHECK_EXTRA, 97 STACK_ALIGNMENT); 98 } 99 100 static vaddr_t stack_end_va_to_top_hard(size_t stack_size, vaddr_t end_va) 101 { 102 size_t l = stack_size_to_alloc_size(stack_size); 103 104 return end_va - l + STACK_CANARY_SIZE; 105 } 106 107 static vaddr_t stack_end_va_to_top_soft(size_t stack_size, vaddr_t end_va) 108 { 109 return stack_end_va_to_top_hard(stack_size, end_va) + STACK_CHECK_EXTRA; 110 } 111 112 static vaddr_t stack_end_va_to_bottom(size_t stack_size __unused, 113 vaddr_t end_va) 114 { 115 return end_va; 116 } 117 118 static uint32_t *stack_end_va_to_start_canary(size_t stack_size, vaddr_t end_va) 119 { 120 return (uint32_t *)(stack_end_va_to_top_hard(stack_size, end_va) - 121 STACK_CANARY_SIZE / 2); 122 } 123 124 static uint32_t *stack_end_va_to_end_canary(size_t stack_size __unused, 125 vaddr_t end_va) 126 { 127 return (uint32_t *)(end_va + STACK_CANARY_SIZE / 2 - sizeof(uint32_t)); 128 } 129 130 static void init_canaries(size_t stack_size, vaddr_t va_end) 131 { 132 uint32_t *canary = NULL; 133 134 assert(va_end); 135 canary = stack_end_va_to_start_canary(stack_size, va_end); 136 *canary = start_canary_value; 137 canary = stack_end_va_to_end_canary(stack_size, va_end); 138 *canary = end_canary_value; 139 } 140 141 void thread_init_canaries(void) 142 { 143 vaddr_t va = 0; 144 size_t n = 0; 145 146 if (IS_ENABLED(CFG_WITH_STACK_CANARIES)) { 147 for (n = 0; n < thread_core_count; n++) { 148 if (thread_core_local[n].tmp_stack_va_end) { 149 va = thread_core_local[n].tmp_stack_va_end + 150 STACK_TMP_OFFS; 151 init_canaries(STACK_TMP_SIZE, va); 152 } 153 va = thread_core_local[n].abt_stack_va_end; 154 if (va) 155 init_canaries(STACK_ABT_SIZE, va); 156 } 157 158 } 159 160 if (IS_ENABLED(CFG_WITH_STACK_CANARIES) && 161 !IS_ENABLED(CFG_WITH_PAGER) && 162 !IS_ENABLED(CFG_NS_VIRTUALIZATION) && threads) { 163 for (n = 0; n < thread_count; n++) { 164 va = threads[n].stack_va_end; 165 if (va) 166 init_canaries(STACK_THREAD_SIZE, va); 167 } 168 } 169 } 170 171 #if defined(CFG_WITH_STACK_CANARIES) 172 void thread_update_canaries(void) 173 { 174 uint32_t canary[2] = { }; 175 uint32_t exceptions = 0; 176 177 plat_get_random_stack_canaries(canary, ARRAY_SIZE(canary), 178 sizeof(canary[0])); 179 180 exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 181 182 thread_check_canaries(); 183 184 start_canary_value = canary[0]; 185 end_canary_value = canary[1]; 186 thread_init_canaries(); 187 188 thread_unmask_exceptions(exceptions); 189 } 190 #endif 191 192 static void check_stack_canary(const char *stack_name __maybe_unused, 193 size_t n __maybe_unused, 194 size_t stack_size, vaddr_t end_va) 195 { 196 uint32_t *canary = NULL; 197 198 canary = stack_end_va_to_start_canary(stack_size, end_va); 199 if (*canary != start_canary_value) { 200 EMSG_RAW("Dead canary at start of '%s[%zu]' (%p)", 201 stack_name, n, (void *)canary); 202 panic(); 203 } 204 205 canary = stack_end_va_to_end_canary(stack_size, end_va); 206 if (*canary != end_canary_value) { 207 EMSG_RAW("Dead canary at end of '%s[%zu]' (%p)", 208 stack_name, n, (void *)canary); 209 panic(); 210 } 211 } 212 213 void thread_check_canaries(void) 214 { 215 vaddr_t va = 0; 216 size_t n = 0; 217 218 if (IS_ENABLED(CFG_WITH_STACK_CANARIES)) { 219 for (n = 0; n < thread_core_count; n++) { 220 if (thread_core_local[n].tmp_stack_va_end) { 221 va = thread_core_local[n].tmp_stack_va_end + 222 STACK_TMP_OFFS; 223 check_stack_canary("tmp_stack", n, 224 STACK_TMP_SIZE, va); 225 } 226 227 va = thread_core_local[n].abt_stack_va_end; 228 if (va) 229 check_stack_canary("abt_stack", n, 230 STACK_ABT_SIZE, va); 231 } 232 } 233 234 if (IS_ENABLED(CFG_WITH_STACK_CANARIES) && 235 !IS_ENABLED(CFG_WITH_PAGER) && !IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 236 for (n = 0; n < thread_count; n++) { 237 va = threads[n].stack_va_end; 238 if (va) 239 check_stack_canary("thread_stack", n, 240 STACK_THREAD_SIZE, va); 241 } 242 } 243 } 244 245 void thread_lock_global(void) 246 { 247 cpu_spin_lock(&thread_global_lock); 248 } 249 250 void thread_unlock_global(void) 251 { 252 cpu_spin_unlock(&thread_global_lock); 253 } 254 255 static struct thread_core_local * __nostackcheck 256 get_core_local(unsigned int pos) 257 { 258 /* 259 * Foreign interrupts must be disabled before playing with core_local 260 * since we otherwise may be rescheduled to a different core in the 261 * middle of this function. 262 */ 263 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 264 265 /* 266 * With CFG_BOOT_INIT_CURRENT_THREAD_CORE_LOCAL, we boot on a 267 * single core and have allocated only one struct thread_core_local 268 * so we return that regardless of pos. 269 */ 270 if (IS_ENABLED(CFG_DYN_STACK_CONFIG) && 271 thread_core_local != __thread_core_local_new) 272 return thread_core_local; 273 274 assert(pos < thread_core_count); 275 return &thread_core_local[pos]; 276 } 277 278 struct thread_core_local * __nostackcheck thread_get_core_local(void) 279 { 280 unsigned int pos = get_core_pos(); 281 282 return get_core_local(pos); 283 } 284 285 #ifdef CFG_CORE_DEBUG_CHECK_STACKS 286 static void print_stack_limits(void) 287 { 288 size_t n = 0; 289 vaddr_t __maybe_unused start = 0; 290 vaddr_t __maybe_unused end = 0; 291 vaddr_t va = 0; 292 293 for (n = 0; n < thread_core_count; n++) { 294 va = thread_core_local[n].tmp_stack_va_end + STACK_TMP_OFFS; 295 start = stack_end_va_to_top_soft(STACK_TMP_SIZE, va); 296 end = stack_end_va_to_bottom(STACK_TMP_SIZE, va); 297 DMSG("tmp [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end); 298 299 va = thread_core_local[n].abt_stack_va_end; 300 start = stack_end_va_to_top_soft(STACK_ABT_SIZE, va); 301 end = stack_end_va_to_bottom(STACK_ABT_SIZE, va); 302 DMSG("abt [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end); 303 } 304 305 for (n = 0; n < thread_count; n++) { 306 va = threads[n].stack_va_end; 307 start = stack_end_va_to_top_soft(STACK_THREAD_SIZE, va); 308 end = stack_end_va_to_bottom(STACK_THREAD_SIZE, va); 309 DMSG("thr [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end); 310 } 311 } 312 313 static void check_stack_limits(void) 314 { 315 vaddr_t stack_start = 0; 316 vaddr_t stack_end = 0; 317 /* Any value in the current stack frame will do */ 318 vaddr_t current_sp = (vaddr_t)&stack_start; 319 320 if (!get_stack_soft_limits(&stack_start, &stack_end)) 321 panic("Unknown stack limits"); 322 if (current_sp < stack_start || current_sp > stack_end) { 323 EMSG("Stack pointer out of range: 0x%" PRIxVA " not in [0x%" 324 PRIxVA " .. 0x%" PRIxVA "]", current_sp, stack_start, 325 stack_end); 326 print_stack_limits(); 327 panic(); 328 } 329 } 330 331 static bool * __nostackcheck get_stackcheck_recursion_flag(void) 332 { 333 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 334 unsigned int pos = get_core_pos(); 335 struct thread_core_local *l = get_core_local(pos); 336 int ct = l->curr_thread; 337 bool *p = NULL; 338 339 if (l->flags & (THREAD_CLF_ABORT | THREAD_CLF_TMP)) 340 p = &l->stackcheck_recursion; 341 else if (!l->flags) 342 p = &threads[ct].tsd.stackcheck_recursion; 343 344 thread_unmask_exceptions(exceptions); 345 return p; 346 } 347 348 void __cyg_profile_func_enter(void *this_fn, void *call_site); 349 void __nostackcheck __cyg_profile_func_enter(void *this_fn __unused, 350 void *call_site __unused) 351 { 352 bool *p = get_stackcheck_recursion_flag(); 353 354 assert(p); 355 if (*p) 356 return; 357 *p = true; 358 check_stack_limits(); 359 *p = false; 360 } 361 362 void __cyg_profile_func_exit(void *this_fn, void *call_site); 363 void __nostackcheck __cyg_profile_func_exit(void *this_fn __unused, 364 void *call_site __unused) 365 { 366 } 367 #else 368 static void print_stack_limits(void) 369 { 370 } 371 #endif 372 373 void thread_init_boot_thread(void) 374 { 375 struct thread_core_local *l = thread_get_core_local(); 376 377 l->curr_thread = 0; 378 threads[0].state = THREAD_STATE_ACTIVE; 379 } 380 381 void __nostackcheck thread_clr_boot_thread(void) 382 { 383 struct thread_core_local *l = thread_get_core_local(); 384 385 assert(l->curr_thread >= 0 && l->curr_thread < CFG_NUM_THREADS); 386 assert(threads[l->curr_thread].state == THREAD_STATE_ACTIVE); 387 threads[l->curr_thread].state = THREAD_STATE_FREE; 388 l->curr_thread = THREAD_ID_INVALID; 389 print_stack_limits(); 390 } 391 392 void __nostackcheck *thread_get_tmp_sp(void) 393 { 394 struct thread_core_local *l = thread_get_core_local(); 395 396 /* 397 * Called from assembly when switching to the temporary stack, so flags 398 * need updating 399 */ 400 l->flags |= THREAD_CLF_TMP; 401 402 return (void *)l->tmp_stack_va_end; 403 } 404 405 vaddr_t thread_stack_start(void) 406 { 407 struct thread_ctx *thr; 408 int ct = thread_get_id_may_fail(); 409 410 if (ct == THREAD_ID_INVALID) 411 return 0; 412 413 thr = threads + ct; 414 return stack_end_va_to_top_soft(STACK_THREAD_SIZE, thr->stack_va_end); 415 } 416 417 size_t thread_stack_size(void) 418 { 419 return STACK_THREAD_SIZE; 420 } 421 422 bool get_stack_limits(vaddr_t *start, vaddr_t *end, bool hard) 423 { 424 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 425 unsigned int pos = get_core_pos(); 426 struct thread_core_local *l = get_core_local(pos); 427 int ct = l->curr_thread; 428 size_t stack_size = 0; 429 bool ret = true; 430 vaddr_t va = 0; 431 432 if (l->flags & THREAD_CLF_TMP) { 433 va = l->tmp_stack_va_end + STACK_TMP_OFFS; 434 stack_size = STACK_TMP_SIZE; 435 } else if (l->flags & THREAD_CLF_ABORT) { 436 va = l->abt_stack_va_end; 437 stack_size = STACK_ABT_SIZE; 438 } else if (!l->flags && ct >= 0 && (size_t)ct < thread_count) { 439 va = threads[ct].stack_va_end; 440 stack_size = STACK_THREAD_SIZE; 441 } else { 442 ret = false; 443 goto out; 444 } 445 446 *end = stack_end_va_to_bottom(stack_size, va); 447 if (hard) 448 *start = stack_end_va_to_top_hard(stack_size, va); 449 else 450 *start = stack_end_va_to_top_soft(stack_size, va); 451 out: 452 thread_unmask_exceptions(exceptions); 453 return ret; 454 } 455 456 bool thread_is_from_abort_mode(void) 457 { 458 struct thread_core_local *l = thread_get_core_local(); 459 460 return (l->flags >> THREAD_CLF_SAVED_SHIFT) & THREAD_CLF_ABORT; 461 } 462 463 /* 464 * This function should always be accurate, but it might be possible to 465 * implement a more efficient depending on cpu architecture. 466 */ 467 bool __weak __noprof thread_is_in_normal_mode(void) 468 { 469 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 470 struct thread_core_local *l = thread_get_core_local(); 471 bool ret; 472 473 /* 474 * If any bit in l->flags is set aside from THREAD_CLF_TMP we're 475 * handling some exception. 476 */ 477 ret = (l->curr_thread != THREAD_ID_INVALID) && 478 !(l->flags & ~THREAD_CLF_TMP); 479 thread_unmask_exceptions(exceptions); 480 481 return ret; 482 } 483 484 short int __noprof thread_get_id_may_fail(void) 485 { 486 /* 487 * thread_get_core_local() requires foreign interrupts to be disabled 488 */ 489 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 490 struct thread_core_local *l = thread_get_core_local(); 491 short int ct = l->curr_thread; 492 493 thread_unmask_exceptions(exceptions); 494 return ct; 495 } 496 497 short int __noprof thread_get_id(void) 498 { 499 short int ct = thread_get_id_may_fail(); 500 501 /* Thread ID has to fit in a short int */ 502 COMPILE_TIME_ASSERT(CFG_NUM_THREADS <= SHRT_MAX); 503 assert(ct >= 0 && ct < CFG_NUM_THREADS); 504 return ct; 505 } 506 507 static vaddr_t alloc_stack(size_t stack_size, bool nex) 508 { 509 size_t l = stack_size_to_alloc_size(stack_size); 510 size_t rl = ROUNDUP(l, SMALL_PAGE_SIZE); 511 uint32_t flags = MAF_GUARD_HEAD; 512 vaddr_t end_va = 0; 513 vaddr_t va = 0; 514 515 if (nex) 516 flags |= MAF_NEX; 517 va = virt_page_alloc(rl / SMALL_PAGE_SIZE, flags); 518 if (!va) 519 panic(); 520 521 end_va = va + l - STACK_CANARY_SIZE / 2; 522 if (IS_ENABLED(CFG_WITH_STACK_CANARIES)) 523 init_canaries(stack_size, end_va); 524 525 return end_va; 526 } 527 528 #ifdef CFG_WITH_PAGER 529 static void init_thread_stacks(void) 530 { 531 size_t n = 0; 532 533 /* 534 * Allocate virtual memory for thread stacks. 535 */ 536 for (n = 0; n < thread_count; n++) { 537 tee_mm_entry_t *mm = NULL; 538 vaddr_t sp = 0; 539 size_t num_pages = 0; 540 struct fobj *fobj = NULL; 541 542 /* Find vmem for thread stack and its protection gap */ 543 mm = tee_mm_alloc(&core_virt_mem_pool, 544 SMALL_PAGE_SIZE + STACK_THREAD_SIZE); 545 assert(mm); 546 547 /* Claim eventual physical page */ 548 tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm), 549 true); 550 551 num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE - 1; 552 fobj = fobj_locked_paged_alloc(num_pages); 553 554 /* Add the region to the pager */ 555 tee_pager_add_core_region(tee_mm_get_smem(mm) + SMALL_PAGE_SIZE, 556 PAGED_REGION_TYPE_LOCK, fobj); 557 fobj_put(fobj); 558 559 /* init effective stack */ 560 sp = tee_mm_get_smem(mm) + tee_mm_get_bytes(mm); 561 asan_tag_access((void *)tee_mm_get_smem(mm), (void *)sp); 562 threads[n].stack_va_end = sp; 563 } 564 } 565 #else 566 static void init_thread_stacks(void) 567 { 568 vaddr_t va = 0; 569 size_t n = 0; 570 571 /* Assign the thread stacks */ 572 for (n = 0; n < thread_count; n++) { 573 if (IS_ENABLED(CFG_DYN_STACK_CONFIG)) 574 va = alloc_stack(STACK_THREAD_SIZE, false); 575 else 576 va = GET_STACK_THREAD_BOTTOM(n); 577 threads[n].stack_va_end = va; 578 if (IS_ENABLED(CFG_WITH_STACK_CANARIES)) 579 init_canaries(STACK_THREAD_SIZE, va); 580 } 581 } 582 #endif /*CFG_WITH_PAGER*/ 583 584 void thread_init_threads(size_t count) 585 { 586 size_t n = 0; 587 588 if (IS_ENABLED(CFG_DYN_STACK_CONFIG)) { 589 assert(count <= CFG_NUM_THREADS); 590 threads = calloc(count, sizeof(*threads)); 591 if (!threads) 592 panic(); 593 thread_count = count; 594 } else { 595 assert(count == CFG_NUM_THREADS); 596 } 597 598 init_thread_stacks(); 599 print_stack_limits(); 600 pgt_init(); 601 602 mutex_lockdep_init(); 603 604 for (n = 0; n < thread_count; n++) 605 TAILQ_INIT(&threads[n].tsd.sess_stack); 606 } 607 608 #ifndef CFG_DYN_STACK_CONFIG 609 vaddr_t __nostackcheck thread_get_abt_stack(void) 610 { 611 return GET_STACK_BOTTOM(stack_abt, get_core_pos()); 612 } 613 #endif 614 615 #ifdef CFG_BOOT_INIT_CURRENT_THREAD_CORE_LOCAL 616 void thread_init_thread_core_local(size_t core_count) 617 { 618 struct thread_core_local *tcl = NULL; 619 const size_t core_pos = get_core_pos(); 620 vaddr_t va = 0; 621 size_t n = 0; 622 623 if (IS_ENABLED(CFG_DYN_STACK_CONFIG)) { 624 assert(core_count <= CFG_TEE_CORE_NB_CORE); 625 tcl = nex_calloc(core_count, sizeof(*tcl)); 626 if (!tcl) 627 panic(); 628 __thread_core_local_new = tcl; 629 __thread_core_count_new = core_count; 630 } else { 631 tcl = thread_core_local; 632 assert(core_count == CFG_TEE_CORE_NB_CORE); 633 634 for (n = 0; n < thread_core_count; n++) { 635 init_canaries(STACK_TMP_SIZE, 636 GET_STACK_BOTTOM(stack_tmp, n)); 637 init_canaries(STACK_ABT_SIZE, 638 GET_STACK_BOTTOM(stack_abt, n)); 639 } 640 } 641 642 for (n = 0; n < core_count; n++) { 643 if (n == core_pos) { 644 if (IS_ENABLED(CFG_DYN_STACK_CONFIG)) 645 tcl[n] = thread_core_local[0]; 646 else 647 continue; 648 } else { 649 tcl[n].curr_thread = THREAD_ID_INVALID; 650 tcl[n].flags = THREAD_CLF_TMP; 651 } 652 653 if (IS_ENABLED(CFG_DYN_STACK_CONFIG)) 654 va = alloc_stack(STACK_TMP_SIZE, true); 655 else 656 va = GET_STACK_BOTTOM(stack_tmp, n); 657 tcl[n].tmp_stack_va_end = va - STACK_TMP_OFFS; 658 #ifdef ARM32 659 tcl[n].tmp_stack_pa_end = 660 vaddr_to_phys(tcl[n].tmp_stack_va_end); 661 #endif 662 663 if (IS_ENABLED(CFG_DYN_STACK_CONFIG)) 664 va = alloc_stack(STACK_ABT_SIZE, true); 665 else 666 va = GET_STACK_BOTTOM(stack_abt, n); 667 tcl[n].abt_stack_va_end = va; 668 } 669 } 670 #else 671 void __nostackcheck 672 thread_init_thread_core_local(size_t core_count __maybe_unused) 673 { 674 size_t n = 0; 675 struct thread_core_local *tcl = thread_core_local; 676 677 assert(core_count == CFG_TEE_CORE_NB_CORE); 678 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) { 679 tcl[n].curr_thread = THREAD_ID_INVALID; 680 tcl[n].flags = THREAD_CLF_TMP; 681 } 682 tcl[0].tmp_stack_va_end = GET_STACK_BOTTOM(stack_tmp, 0); 683 } 684 685 void __nostackcheck thread_init_core_local_stacks(void) 686 { 687 size_t n = 0; 688 struct thread_core_local *tcl = thread_core_local; 689 690 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) { 691 tcl[n].tmp_stack_va_end = GET_STACK_BOTTOM(stack_tmp, n) - 692 STACK_TMP_OFFS; 693 tcl[n].abt_stack_va_end = GET_STACK_BOTTOM(stack_abt, n); 694 } 695 } 696 #endif /*CFG_BOOT_INIT_CURRENT_THREAD_CORE_LOCAL*/ 697 698 #if defined(CFG_CORE_PAUTH) 699 void thread_init_thread_pauth_keys(void) 700 { 701 size_t n = 0; 702 703 for (n = 0; n < thread_count; n++) 704 if (crypto_rng_read(&threads[n].keys, sizeof(threads[n].keys))) 705 panic("Failed to init thread pauth keys"); 706 } 707 708 void thread_init_core_local_pauth_keys(void) 709 { 710 struct thread_core_local *tcl = thread_core_local; 711 size_t n = 0; 712 713 for (n = 0; n < thread_core_count; n++) 714 if (crypto_rng_read(&tcl[n].keys, sizeof(tcl[n].keys))) 715 panic("Failed to init core local pauth keys"); 716 } 717 #endif 718 719 struct thread_specific_data * __noprof thread_get_tsd(void) 720 { 721 return &threads[thread_get_id()].tsd; 722 } 723 724 struct thread_ctx_regs * __nostackcheck thread_get_ctx_regs(void) 725 { 726 struct thread_core_local *l = thread_get_core_local(); 727 728 assert(l->curr_thread != THREAD_ID_INVALID); 729 return &threads[l->curr_thread].regs; 730 } 731 732 void thread_set_foreign_intr(bool enable) 733 { 734 /* thread_get_core_local() requires foreign interrupts to be disabled */ 735 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 736 struct thread_core_local *l; 737 738 l = thread_get_core_local(); 739 740 assert(l->curr_thread != THREAD_ID_INVALID); 741 742 if (enable) { 743 threads[l->curr_thread].flags |= 744 THREAD_FLAGS_FOREIGN_INTR_ENABLE; 745 thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR); 746 } else { 747 /* 748 * No need to disable foreign interrupts here since they're 749 * already disabled above. 750 */ 751 threads[l->curr_thread].flags &= 752 ~THREAD_FLAGS_FOREIGN_INTR_ENABLE; 753 } 754 } 755 756 void thread_restore_foreign_intr(void) 757 { 758 /* thread_get_core_local() requires foreign interrupts to be disabled */ 759 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 760 struct thread_core_local *l; 761 762 l = thread_get_core_local(); 763 764 assert(l->curr_thread != THREAD_ID_INVALID); 765 766 if (threads[l->curr_thread].flags & THREAD_FLAGS_FOREIGN_INTR_ENABLE) 767 thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR); 768 } 769 770 static struct mobj *alloc_shm(enum thread_shm_type shm_type, size_t size) 771 { 772 switch (shm_type) { 773 case THREAD_SHM_TYPE_APPLICATION: 774 return thread_rpc_alloc_payload(size); 775 case THREAD_SHM_TYPE_KERNEL_PRIVATE: 776 return thread_rpc_alloc_kernel_payload(size); 777 case THREAD_SHM_TYPE_GLOBAL: 778 return thread_rpc_alloc_global_payload(size); 779 default: 780 return NULL; 781 } 782 } 783 784 static void clear_shm_cache_entry(struct thread_shm_cache_entry *ce) 785 { 786 if (ce->mobj) { 787 switch (ce->type) { 788 case THREAD_SHM_TYPE_APPLICATION: 789 thread_rpc_free_payload(ce->mobj); 790 break; 791 case THREAD_SHM_TYPE_KERNEL_PRIVATE: 792 thread_rpc_free_kernel_payload(ce->mobj); 793 break; 794 case THREAD_SHM_TYPE_GLOBAL: 795 thread_rpc_free_global_payload(ce->mobj); 796 break; 797 default: 798 assert(0); /* "can't happen" */ 799 break; 800 } 801 } 802 ce->mobj = NULL; 803 ce->size = 0; 804 } 805 806 static struct thread_shm_cache_entry * 807 get_shm_cache_entry(enum thread_shm_cache_user user) 808 { 809 struct thread_shm_cache *cache = &threads[thread_get_id()].shm_cache; 810 struct thread_shm_cache_entry *ce = NULL; 811 812 SLIST_FOREACH(ce, cache, link) 813 if (ce->user == user) 814 return ce; 815 816 ce = calloc(1, sizeof(*ce)); 817 if (ce) { 818 ce->user = user; 819 SLIST_INSERT_HEAD(cache, ce, link); 820 } 821 822 return ce; 823 } 824 825 void *thread_rpc_shm_cache_alloc(enum thread_shm_cache_user user, 826 enum thread_shm_type shm_type, 827 size_t size, struct mobj **mobj) 828 { 829 struct thread_shm_cache_entry *ce = NULL; 830 size_t sz = size; 831 paddr_t p = 0; 832 void *va = NULL; 833 834 if (!size) 835 return NULL; 836 837 ce = get_shm_cache_entry(user); 838 if (!ce) 839 return NULL; 840 841 /* 842 * Always allocate in page chunks as normal world allocates payload 843 * memory as complete pages. 844 */ 845 sz = ROUNDUP(size, SMALL_PAGE_SIZE); 846 847 if (ce->type != shm_type || sz > ce->size) { 848 clear_shm_cache_entry(ce); 849 850 ce->mobj = alloc_shm(shm_type, sz); 851 if (!ce->mobj) 852 return NULL; 853 854 if (mobj_get_pa(ce->mobj, 0, 0, &p)) 855 goto err; 856 857 if (!IS_ALIGNED_WITH_TYPE(p, uint64_t)) 858 goto err; 859 860 va = mobj_get_va(ce->mobj, 0, sz); 861 if (!va) 862 goto err; 863 864 ce->size = sz; 865 ce->type = shm_type; 866 } else { 867 va = mobj_get_va(ce->mobj, 0, sz); 868 if (!va) 869 goto err; 870 } 871 *mobj = ce->mobj; 872 873 return va; 874 err: 875 clear_shm_cache_entry(ce); 876 return NULL; 877 } 878 879 void thread_rpc_shm_cache_clear(struct thread_shm_cache *cache) 880 { 881 while (true) { 882 struct thread_shm_cache_entry *ce = SLIST_FIRST(cache); 883 884 if (!ce) 885 break; 886 SLIST_REMOVE_HEAD(cache, link); 887 clear_shm_cache_entry(ce); 888 free(ce); 889 } 890 } 891