1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2016-2022, Linaro Limited 4 * Copyright (c) 2014, STMicroelectronics International N.V. 5 * Copyright (c) 2020-2021, Arm Limited 6 */ 7 8 #include <config.h> 9 #include <crypto/crypto.h> 10 #include <kernel/asan.h> 11 #include <kernel/boot.h> 12 #include <kernel/lockdep.h> 13 #include <kernel/misc.h> 14 #include <kernel/panic.h> 15 #include <kernel/spinlock.h> 16 #include <kernel/thread.h> 17 #include <kernel/thread_private.h> 18 #include <mm/mobj.h> 19 20 struct thread_ctx threads[CFG_NUM_THREADS]; 21 22 struct thread_core_local thread_core_local[CFG_TEE_CORE_NB_CORE] __nex_bss; 23 24 /* 25 * Stacks 26 * 27 * [Lower addresses on the left] 28 * 29 * [ STACK_CANARY_SIZE/2 | STACK_CHECK_EXTRA | STACK_XXX_SIZE | STACK_CANARY_SIZE/2 ] 30 * ^ ^ ^ ^ 31 * stack_xxx[n] "hard" top "soft" top bottom 32 */ 33 34 static uint32_t start_canary_value = 0xdedede00; 35 static uint32_t end_canary_value = 0xababab00; 36 37 #define DECLARE_STACK(name, num_stacks, stack_size, linkage) \ 38 linkage uint32_t name[num_stacks] \ 39 [ROUNDUP(stack_size + STACK_CANARY_SIZE + STACK_CHECK_EXTRA, \ 40 STACK_ALIGNMENT) / sizeof(uint32_t)] \ 41 __attribute__((section(".nozi_stack." # name), \ 42 aligned(STACK_ALIGNMENT))) 43 44 DECLARE_STACK(stack_tmp, CFG_TEE_CORE_NB_CORE, STACK_TMP_SIZE, 45 /* global linkage */); 46 DECLARE_STACK(stack_abt, CFG_TEE_CORE_NB_CORE, STACK_ABT_SIZE, static); 47 #ifndef CFG_WITH_PAGER 48 DECLARE_STACK(stack_thread, CFG_NUM_THREADS, STACK_THREAD_SIZE, static); 49 #endif 50 51 #define GET_STACK_BOTTOM(stack, n) ((vaddr_t)&(stack)[n] + sizeof(stack[n]) - \ 52 STACK_CANARY_SIZE / 2) 53 54 const uint32_t stack_tmp_stride __section(".identity_map.stack_tmp_stride") = 55 sizeof(stack_tmp[0]); 56 57 /* 58 * This stack setup info is required by secondary boot cores before they 59 * each locally enable the pager (the mmu). Hence kept in pager sections. 60 */ 61 DECLARE_KEEP_PAGER(stack_tmp_stride); 62 63 static unsigned int thread_global_lock __nex_bss = SPINLOCK_UNLOCK; 64 65 static size_t stack_size_to_alloc_size(size_t stack_size) 66 { 67 return ROUNDUP(stack_size + STACK_CANARY_SIZE + STACK_CHECK_EXTRA, 68 STACK_ALIGNMENT); 69 } 70 71 static vaddr_t stack_end_va_to_top_hard(size_t stack_size, vaddr_t end_va) 72 { 73 size_t l = stack_size_to_alloc_size(stack_size); 74 75 return end_va - l + STACK_CANARY_SIZE; 76 } 77 78 static vaddr_t stack_end_va_to_top_soft(size_t stack_size, vaddr_t end_va) 79 { 80 return stack_end_va_to_top_hard(stack_size, end_va) + STACK_CHECK_EXTRA; 81 } 82 83 static vaddr_t stack_end_va_to_bottom(size_t stack_size __unused, 84 vaddr_t end_va) 85 { 86 return end_va; 87 } 88 89 static uint32_t *stack_end_va_to_start_canary(size_t stack_size, vaddr_t end_va) 90 { 91 return (uint32_t *)(stack_end_va_to_top_hard(stack_size, end_va) - 92 STACK_CANARY_SIZE / 2); 93 } 94 95 static uint32_t *stack_end_va_to_end_canary(size_t stack_size __unused, 96 vaddr_t end_va) 97 { 98 return (uint32_t *)(end_va + STACK_CANARY_SIZE / 2 - sizeof(uint32_t)); 99 } 100 101 static void init_canaries(size_t stack_size, vaddr_t va_end) 102 { 103 uint32_t *canary = NULL; 104 105 assert(va_end); 106 canary = stack_end_va_to_start_canary(stack_size, va_end); 107 *canary = start_canary_value; 108 canary = stack_end_va_to_end_canary(stack_size, va_end); 109 *canary = end_canary_value; 110 } 111 112 void thread_init_canaries(void) 113 { 114 vaddr_t va = 0; 115 size_t n = 0; 116 117 if (IS_ENABLED(CFG_WITH_STACK_CANARIES)) { 118 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) { 119 if (thread_core_local[n].tmp_stack_va_end) { 120 va = thread_core_local[n].tmp_stack_va_end + 121 STACK_TMP_OFFS; 122 init_canaries(STACK_TMP_SIZE, va); 123 } 124 va = thread_core_local[n].abt_stack_va_end; 125 if (va) 126 init_canaries(STACK_ABT_SIZE, va); 127 } 128 129 } 130 131 if (IS_ENABLED(CFG_WITH_STACK_CANARIES) && 132 !IS_ENABLED(CFG_WITH_PAGER) && !IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 133 for (n = 0; n < CFG_NUM_THREADS; n++) { 134 va = threads[n].stack_va_end; 135 if (va) 136 init_canaries(STACK_THREAD_SIZE, va); 137 } 138 } 139 } 140 141 #if defined(CFG_WITH_STACK_CANARIES) 142 void thread_update_canaries(void) 143 { 144 uint32_t canary[2] = { }; 145 uint32_t exceptions = 0; 146 147 plat_get_random_stack_canaries(canary, ARRAY_SIZE(canary), 148 sizeof(canary[0])); 149 150 exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 151 152 thread_check_canaries(); 153 154 start_canary_value = canary[0]; 155 end_canary_value = canary[1]; 156 thread_init_canaries(); 157 158 thread_unmask_exceptions(exceptions); 159 } 160 #endif 161 162 static void check_stack_canary(const char *stack_name __maybe_unused, 163 size_t n __maybe_unused, 164 size_t stack_size, vaddr_t end_va) 165 { 166 uint32_t *canary = NULL; 167 168 canary = stack_end_va_to_start_canary(stack_size, end_va); 169 if (*canary != start_canary_value) { 170 EMSG_RAW("Dead canary at start of '%s[%zu]' (%p)", 171 stack_name, n, (void *)canary); 172 panic(); 173 } 174 175 canary = stack_end_va_to_end_canary(stack_size, end_va); 176 if (*canary != end_canary_value) { 177 EMSG_RAW("Dead canary at end of '%s[%zu]' (%p)", 178 stack_name, n, (void *)canary); 179 panic(); 180 } 181 } 182 183 void thread_check_canaries(void) 184 { 185 vaddr_t va = 0; 186 size_t n = 0; 187 188 if (IS_ENABLED(CFG_WITH_STACK_CANARIES)) { 189 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) { 190 if (thread_core_local[n].tmp_stack_va_end) { 191 va = thread_core_local[n].tmp_stack_va_end + 192 STACK_TMP_OFFS; 193 check_stack_canary("tmp_stack", n, 194 STACK_TMP_SIZE, va); 195 } 196 197 va = thread_core_local[n].abt_stack_va_end; 198 if (va) 199 check_stack_canary("abt_stack", n, 200 STACK_ABT_SIZE, va); 201 } 202 } 203 204 if (IS_ENABLED(CFG_WITH_STACK_CANARIES) && 205 !IS_ENABLED(CFG_WITH_PAGER) && !IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 206 for (n = 0; n < CFG_NUM_THREADS; n++) { 207 va = threads[n].stack_va_end; 208 if (va) 209 check_stack_canary("thread_stack", n, 210 STACK_THREAD_SIZE, va); 211 } 212 } 213 } 214 215 void thread_lock_global(void) 216 { 217 cpu_spin_lock(&thread_global_lock); 218 } 219 220 void thread_unlock_global(void) 221 { 222 cpu_spin_unlock(&thread_global_lock); 223 } 224 225 static struct thread_core_local * __nostackcheck 226 get_core_local(unsigned int pos) 227 { 228 /* 229 * Foreign interrupts must be disabled before playing with core_local 230 * since we otherwise may be rescheduled to a different core in the 231 * middle of this function. 232 */ 233 assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 234 235 assert(pos < CFG_TEE_CORE_NB_CORE); 236 return &thread_core_local[pos]; 237 } 238 239 struct thread_core_local * __nostackcheck thread_get_core_local(void) 240 { 241 unsigned int pos = get_core_pos(); 242 243 return get_core_local(pos); 244 } 245 246 #ifdef CFG_CORE_DEBUG_CHECK_STACKS 247 static void print_stack_limits(void) 248 { 249 size_t n = 0; 250 vaddr_t __maybe_unused start = 0; 251 vaddr_t __maybe_unused end = 0; 252 vaddr_t va = 0; 253 254 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) { 255 va = thread_core_local[n].tmp_stack_va_end + STACK_TMP_OFFS; 256 start = stack_end_va_to_top_soft(STACK_TMP_SIZE, va); 257 end = stack_end_va_to_bottom(STACK_TMP_SIZE, va); 258 DMSG("tmp [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end); 259 260 va = thread_core_local[n].abt_stack_va_end; 261 start = stack_end_va_to_top_soft(STACK_ABT_SIZE, va); 262 end = stack_end_va_to_bottom(STACK_ABT_SIZE, va); 263 DMSG("abt [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end); 264 } 265 266 for (n = 0; n < CFG_NUM_THREADS; n++) { 267 va = threads[n].stack_va_end; 268 start = stack_end_va_to_top_soft(STACK_THREAD_SIZE, va); 269 end = stack_end_va_to_bottom(STACK_THREAD_SIZE, va); 270 DMSG("thr [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end); 271 } 272 } 273 274 static void check_stack_limits(void) 275 { 276 vaddr_t stack_start = 0; 277 vaddr_t stack_end = 0; 278 /* Any value in the current stack frame will do */ 279 vaddr_t current_sp = (vaddr_t)&stack_start; 280 281 if (!get_stack_soft_limits(&stack_start, &stack_end)) 282 panic("Unknown stack limits"); 283 if (current_sp < stack_start || current_sp > stack_end) { 284 EMSG("Stack pointer out of range: 0x%" PRIxVA " not in [0x%" 285 PRIxVA " .. 0x%" PRIxVA "]", current_sp, stack_start, 286 stack_end); 287 print_stack_limits(); 288 panic(); 289 } 290 } 291 292 static bool * __nostackcheck get_stackcheck_recursion_flag(void) 293 { 294 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 295 unsigned int pos = get_core_pos(); 296 struct thread_core_local *l = get_core_local(pos); 297 int ct = l->curr_thread; 298 bool *p = NULL; 299 300 if (l->flags & (THREAD_CLF_ABORT | THREAD_CLF_TMP)) 301 p = &l->stackcheck_recursion; 302 else if (!l->flags) 303 p = &threads[ct].tsd.stackcheck_recursion; 304 305 thread_unmask_exceptions(exceptions); 306 return p; 307 } 308 309 void __cyg_profile_func_enter(void *this_fn, void *call_site); 310 void __nostackcheck __cyg_profile_func_enter(void *this_fn __unused, 311 void *call_site __unused) 312 { 313 bool *p = get_stackcheck_recursion_flag(); 314 315 assert(p); 316 if (*p) 317 return; 318 *p = true; 319 check_stack_limits(); 320 *p = false; 321 } 322 323 void __cyg_profile_func_exit(void *this_fn, void *call_site); 324 void __nostackcheck __cyg_profile_func_exit(void *this_fn __unused, 325 void *call_site __unused) 326 { 327 } 328 #else 329 static void print_stack_limits(void) 330 { 331 } 332 #endif 333 334 void thread_init_boot_thread(void) 335 { 336 struct thread_core_local *l = thread_get_core_local(); 337 338 thread_init_threads(); 339 340 l->curr_thread = 0; 341 threads[0].state = THREAD_STATE_ACTIVE; 342 } 343 344 void __nostackcheck thread_clr_boot_thread(void) 345 { 346 struct thread_core_local *l = thread_get_core_local(); 347 348 assert(l->curr_thread >= 0 && l->curr_thread < CFG_NUM_THREADS); 349 assert(threads[l->curr_thread].state == THREAD_STATE_ACTIVE); 350 threads[l->curr_thread].state = THREAD_STATE_FREE; 351 l->curr_thread = THREAD_ID_INVALID; 352 } 353 354 void __nostackcheck *thread_get_tmp_sp(void) 355 { 356 struct thread_core_local *l = thread_get_core_local(); 357 358 /* 359 * Called from assembly when switching to the temporary stack, so flags 360 * need updating 361 */ 362 l->flags |= THREAD_CLF_TMP; 363 364 return (void *)l->tmp_stack_va_end; 365 } 366 367 vaddr_t thread_stack_start(void) 368 { 369 struct thread_ctx *thr; 370 int ct = thread_get_id_may_fail(); 371 372 if (ct == THREAD_ID_INVALID) 373 return 0; 374 375 thr = threads + ct; 376 return stack_end_va_to_top_soft(STACK_THREAD_SIZE, thr->stack_va_end); 377 } 378 379 size_t thread_stack_size(void) 380 { 381 return STACK_THREAD_SIZE; 382 } 383 384 bool get_stack_limits(vaddr_t *start, vaddr_t *end, bool hard) 385 { 386 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 387 unsigned int pos = get_core_pos(); 388 struct thread_core_local *l = get_core_local(pos); 389 int ct = l->curr_thread; 390 size_t stack_size = 0; 391 bool ret = true; 392 vaddr_t va = 0; 393 394 if (l->flags & THREAD_CLF_TMP) { 395 va = l->tmp_stack_va_end + STACK_TMP_OFFS; 396 stack_size = STACK_TMP_SIZE; 397 } else if (l->flags & THREAD_CLF_ABORT) { 398 va = l->abt_stack_va_end; 399 stack_size = STACK_ABT_SIZE; 400 } else if (!l->flags && ct >= 0 && ct < CFG_NUM_THREADS) { 401 va = threads[ct].stack_va_end; 402 stack_size = STACK_THREAD_SIZE; 403 } else { 404 ret = false; 405 goto out; 406 } 407 408 *end = stack_end_va_to_bottom(stack_size, va); 409 if (hard) 410 *start = stack_end_va_to_top_hard(stack_size, va); 411 else 412 *start = stack_end_va_to_top_soft(stack_size, va); 413 out: 414 thread_unmask_exceptions(exceptions); 415 return ret; 416 } 417 418 bool thread_is_from_abort_mode(void) 419 { 420 struct thread_core_local *l = thread_get_core_local(); 421 422 return (l->flags >> THREAD_CLF_SAVED_SHIFT) & THREAD_CLF_ABORT; 423 } 424 425 /* 426 * This function should always be accurate, but it might be possible to 427 * implement a more efficient depending on cpu architecture. 428 */ 429 bool __weak __noprof thread_is_in_normal_mode(void) 430 { 431 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 432 struct thread_core_local *l = thread_get_core_local(); 433 bool ret; 434 435 /* 436 * If any bit in l->flags is set aside from THREAD_CLF_TMP we're 437 * handling some exception. 438 */ 439 ret = (l->curr_thread != THREAD_ID_INVALID) && 440 !(l->flags & ~THREAD_CLF_TMP); 441 thread_unmask_exceptions(exceptions); 442 443 return ret; 444 } 445 446 short int __noprof thread_get_id_may_fail(void) 447 { 448 /* 449 * thread_get_core_local() requires foreign interrupts to be disabled 450 */ 451 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 452 struct thread_core_local *l = thread_get_core_local(); 453 short int ct = l->curr_thread; 454 455 thread_unmask_exceptions(exceptions); 456 return ct; 457 } 458 459 short int __noprof thread_get_id(void) 460 { 461 short int ct = thread_get_id_may_fail(); 462 463 /* Thread ID has to fit in a short int */ 464 COMPILE_TIME_ASSERT(CFG_NUM_THREADS <= SHRT_MAX); 465 assert(ct >= 0 && ct < CFG_NUM_THREADS); 466 return ct; 467 } 468 469 #ifdef CFG_WITH_PAGER 470 static void init_thread_stacks(void) 471 { 472 size_t n = 0; 473 474 /* 475 * Allocate virtual memory for thread stacks. 476 */ 477 for (n = 0; n < CFG_NUM_THREADS; n++) { 478 tee_mm_entry_t *mm = NULL; 479 vaddr_t sp = 0; 480 size_t num_pages = 0; 481 struct fobj *fobj = NULL; 482 483 /* Find vmem for thread stack and its protection gap */ 484 mm = tee_mm_alloc(&core_virt_mem_pool, 485 SMALL_PAGE_SIZE + STACK_THREAD_SIZE); 486 assert(mm); 487 488 /* Claim eventual physical page */ 489 tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm), 490 true); 491 492 num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE - 1; 493 fobj = fobj_locked_paged_alloc(num_pages); 494 495 /* Add the region to the pager */ 496 tee_pager_add_core_region(tee_mm_get_smem(mm) + SMALL_PAGE_SIZE, 497 PAGED_REGION_TYPE_LOCK, fobj); 498 fobj_put(fobj); 499 500 /* init effective stack */ 501 sp = tee_mm_get_smem(mm) + tee_mm_get_bytes(mm); 502 asan_tag_access((void *)tee_mm_get_smem(mm), (void *)sp); 503 threads[n].stack_va_end = sp; 504 } 505 } 506 #else 507 static void init_thread_stacks(void) 508 { 509 vaddr_t va = 0; 510 size_t n = 0; 511 512 /* Assign the thread stacks */ 513 for (n = 0; n < CFG_NUM_THREADS; n++) { 514 va = GET_STACK_BOTTOM(stack_thread, n); 515 threads[n].stack_va_end = va; 516 if (IS_ENABLED(CFG_WITH_STACK_CANARIES)) 517 init_canaries(STACK_THREAD_SIZE, va); 518 } 519 } 520 #endif /*CFG_WITH_PAGER*/ 521 522 void thread_init_threads(void) 523 { 524 size_t n = 0; 525 526 init_thread_stacks(); 527 print_stack_limits(); 528 pgt_init(); 529 530 mutex_lockdep_init(); 531 532 for (n = 0; n < CFG_NUM_THREADS; n++) 533 TAILQ_INIT(&threads[n].tsd.sess_stack); 534 } 535 536 vaddr_t __nostackcheck thread_get_abt_stack(void) 537 { 538 return GET_STACK_BOTTOM(stack_abt, get_core_pos()); 539 } 540 541 #ifdef CFG_BOOT_INIT_CURRENT_THREAD_CORE_LOCAL 542 void thread_init_thread_core_local(void) 543 { 544 struct thread_core_local *tcl = thread_core_local; 545 const size_t core_pos = get_core_pos(); 546 vaddr_t va = 0; 547 size_t n = 0; 548 549 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) { 550 if (n == core_pos) 551 continue; /* Already initialized */ 552 tcl[n].curr_thread = THREAD_ID_INVALID; 553 tcl[n].flags = THREAD_CLF_TMP; 554 555 va = GET_STACK_BOTTOM(stack_tmp, n); 556 tcl[n].tmp_stack_va_end = va - STACK_TMP_OFFS; 557 if (IS_ENABLED(CFG_WITH_STACK_CANARIES)) 558 init_canaries(STACK_TMP_SIZE, va); 559 va = GET_STACK_BOTTOM(stack_abt, n); 560 tcl[n].abt_stack_va_end = va; 561 if (IS_ENABLED(CFG_WITH_STACK_CANARIES)) 562 init_canaries(STACK_ABT_SIZE, va); 563 } 564 } 565 #else 566 void __nostackcheck thread_init_thread_core_local(void) 567 { 568 size_t n = 0; 569 struct thread_core_local *tcl = thread_core_local; 570 571 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) { 572 tcl[n].curr_thread = THREAD_ID_INVALID; 573 tcl[n].flags = THREAD_CLF_TMP; 574 } 575 tcl[0].tmp_stack_va_end = GET_STACK_BOTTOM(stack_tmp, 0); 576 } 577 578 void __nostackcheck thread_init_core_local_stacks(void) 579 { 580 size_t n = 0; 581 struct thread_core_local *tcl = thread_core_local; 582 583 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) { 584 tcl[n].tmp_stack_va_end = GET_STACK_BOTTOM(stack_tmp, n) - 585 STACK_TMP_OFFS; 586 tcl[n].abt_stack_va_end = GET_STACK_BOTTOM(stack_abt, n); 587 } 588 } 589 #endif /*CFG_BOOT_INIT_CURRENT_THREAD_CORE_LOCAL*/ 590 591 #if defined(CFG_CORE_PAUTH) 592 void thread_init_thread_pauth_keys(void) 593 { 594 size_t n = 0; 595 596 for (n = 0; n < CFG_NUM_THREADS; n++) 597 if (crypto_rng_read(&threads[n].keys, sizeof(threads[n].keys))) 598 panic("Failed to init thread pauth keys"); 599 } 600 601 void thread_init_core_local_pauth_keys(void) 602 { 603 struct thread_core_local *tcl = thread_core_local; 604 size_t n = 0; 605 606 for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) 607 if (crypto_rng_read(&tcl[n].keys, sizeof(tcl[n].keys))) 608 panic("Failed to init core local pauth keys"); 609 } 610 #endif 611 612 struct thread_specific_data * __noprof thread_get_tsd(void) 613 { 614 return &threads[thread_get_id()].tsd; 615 } 616 617 struct thread_ctx_regs * __nostackcheck thread_get_ctx_regs(void) 618 { 619 struct thread_core_local *l = thread_get_core_local(); 620 621 assert(l->curr_thread != THREAD_ID_INVALID); 622 return &threads[l->curr_thread].regs; 623 } 624 625 void thread_set_foreign_intr(bool enable) 626 { 627 /* thread_get_core_local() requires foreign interrupts to be disabled */ 628 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 629 struct thread_core_local *l; 630 631 l = thread_get_core_local(); 632 633 assert(l->curr_thread != THREAD_ID_INVALID); 634 635 if (enable) { 636 threads[l->curr_thread].flags |= 637 THREAD_FLAGS_FOREIGN_INTR_ENABLE; 638 thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR); 639 } else { 640 /* 641 * No need to disable foreign interrupts here since they're 642 * already disabled above. 643 */ 644 threads[l->curr_thread].flags &= 645 ~THREAD_FLAGS_FOREIGN_INTR_ENABLE; 646 } 647 } 648 649 void thread_restore_foreign_intr(void) 650 { 651 /* thread_get_core_local() requires foreign interrupts to be disabled */ 652 uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 653 struct thread_core_local *l; 654 655 l = thread_get_core_local(); 656 657 assert(l->curr_thread != THREAD_ID_INVALID); 658 659 if (threads[l->curr_thread].flags & THREAD_FLAGS_FOREIGN_INTR_ENABLE) 660 thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR); 661 } 662 663 static struct mobj *alloc_shm(enum thread_shm_type shm_type, size_t size) 664 { 665 switch (shm_type) { 666 case THREAD_SHM_TYPE_APPLICATION: 667 return thread_rpc_alloc_payload(size); 668 case THREAD_SHM_TYPE_KERNEL_PRIVATE: 669 return thread_rpc_alloc_kernel_payload(size); 670 case THREAD_SHM_TYPE_GLOBAL: 671 return thread_rpc_alloc_global_payload(size); 672 default: 673 return NULL; 674 } 675 } 676 677 static void clear_shm_cache_entry(struct thread_shm_cache_entry *ce) 678 { 679 if (ce->mobj) { 680 switch (ce->type) { 681 case THREAD_SHM_TYPE_APPLICATION: 682 thread_rpc_free_payload(ce->mobj); 683 break; 684 case THREAD_SHM_TYPE_KERNEL_PRIVATE: 685 thread_rpc_free_kernel_payload(ce->mobj); 686 break; 687 case THREAD_SHM_TYPE_GLOBAL: 688 thread_rpc_free_global_payload(ce->mobj); 689 break; 690 default: 691 assert(0); /* "can't happen" */ 692 break; 693 } 694 } 695 ce->mobj = NULL; 696 ce->size = 0; 697 } 698 699 static struct thread_shm_cache_entry * 700 get_shm_cache_entry(enum thread_shm_cache_user user) 701 { 702 struct thread_shm_cache *cache = &threads[thread_get_id()].shm_cache; 703 struct thread_shm_cache_entry *ce = NULL; 704 705 SLIST_FOREACH(ce, cache, link) 706 if (ce->user == user) 707 return ce; 708 709 ce = calloc(1, sizeof(*ce)); 710 if (ce) { 711 ce->user = user; 712 SLIST_INSERT_HEAD(cache, ce, link); 713 } 714 715 return ce; 716 } 717 718 void *thread_rpc_shm_cache_alloc(enum thread_shm_cache_user user, 719 enum thread_shm_type shm_type, 720 size_t size, struct mobj **mobj) 721 { 722 struct thread_shm_cache_entry *ce = NULL; 723 size_t sz = size; 724 paddr_t p = 0; 725 void *va = NULL; 726 727 if (!size) 728 return NULL; 729 730 ce = get_shm_cache_entry(user); 731 if (!ce) 732 return NULL; 733 734 /* 735 * Always allocate in page chunks as normal world allocates payload 736 * memory as complete pages. 737 */ 738 sz = ROUNDUP(size, SMALL_PAGE_SIZE); 739 740 if (ce->type != shm_type || sz > ce->size) { 741 clear_shm_cache_entry(ce); 742 743 ce->mobj = alloc_shm(shm_type, sz); 744 if (!ce->mobj) 745 return NULL; 746 747 if (mobj_get_pa(ce->mobj, 0, 0, &p)) 748 goto err; 749 750 if (!IS_ALIGNED_WITH_TYPE(p, uint64_t)) 751 goto err; 752 753 va = mobj_get_va(ce->mobj, 0, sz); 754 if (!va) 755 goto err; 756 757 ce->size = sz; 758 ce->type = shm_type; 759 } else { 760 va = mobj_get_va(ce->mobj, 0, sz); 761 if (!va) 762 goto err; 763 } 764 *mobj = ce->mobj; 765 766 return va; 767 err: 768 clear_shm_cache_entry(ce); 769 return NULL; 770 } 771 772 void thread_rpc_shm_cache_clear(struct thread_shm_cache *cache) 773 { 774 while (true) { 775 struct thread_shm_cache_entry *ce = SLIST_FIRST(cache); 776 777 if (!ce) 778 break; 779 SLIST_REMOVE_HEAD(cache, link); 780 clear_shm_cache_entry(ce); 781 free(ce); 782 } 783 } 784