1ca825890SJens Wiklander // SPDX-License-Identifier: BSD-2-Clause 2ca825890SJens Wiklander /* 393dc6b29SJens Wiklander * Copyright (c) 2016-2022, Linaro Limited 4ca825890SJens Wiklander * Copyright (c) 2014, STMicroelectronics International N.V. 5ca825890SJens Wiklander * Copyright (c) 2020-2021, Arm Limited 6ca825890SJens Wiklander */ 7ca825890SJens Wiklander 8ca825890SJens Wiklander #include <config.h> 993dc6b29SJens Wiklander #include <crypto/crypto.h> 10ca825890SJens Wiklander #include <kernel/asan.h> 11b89b3da2SVincent Chuang #include <kernel/boot.h> 12ca825890SJens Wiklander #include <kernel/lockdep.h> 13ca825890SJens Wiklander #include <kernel/misc.h> 14ca825890SJens Wiklander #include <kernel/panic.h> 15ca825890SJens Wiklander #include <kernel/spinlock.h> 16ca825890SJens Wiklander #include <kernel/thread.h> 17ca825890SJens Wiklander #include <kernel/thread_private.h> 18ca825890SJens Wiklander #include <mm/mobj.h> 1959724f22SJens Wiklander #include <mm/page_alloc.h> 2059724f22SJens Wiklander #include <stdalign.h> 21ca825890SJens Wiklander 2259724f22SJens Wiklander #if defined(CFG_DYN_STACK_CONFIG) 2359724f22SJens Wiklander struct thread_core_local *thread_core_local __nex_bss; 2459724f22SJens Wiklander size_t thread_core_count __nex_bss; 25*aa0620cfSJens Wiklander struct thread_ctx *threads; 26*aa0620cfSJens Wiklander size_t thread_count; 2759724f22SJens Wiklander #else 28a4c2e0cbSJens Wiklander static struct thread_core_local 29a4c2e0cbSJens Wiklander __thread_core_local[CFG_TEE_CORE_NB_CORE] __nex_bss; 30a4c2e0cbSJens Wiklander struct thread_core_local *thread_core_local __nex_data = __thread_core_local; 31a4c2e0cbSJens Wiklander size_t thread_core_count __nex_data = CFG_TEE_CORE_NB_CORE; 32*aa0620cfSJens Wiklander static struct thread_ctx __threads[CFG_NUM_THREADS]; 33*aa0620cfSJens Wiklander struct thread_ctx *threads = __threads; 3491d4649dSJens Wiklander size_t thread_count = CFG_NUM_THREADS; 35*aa0620cfSJens Wiklander #endif 36a4c2e0cbSJens Wiklander unsigned long thread_core_local_pa __nex_bss; 3759724f22SJens Wiklander struct thread_core_local *__thread_core_local_new __nex_bss; 3859724f22SJens Wiklander size_t __thread_core_count_new __nex_bss; 39ca825890SJens Wiklander 40ca825890SJens Wiklander /* 41ca825890SJens Wiklander * Stacks 42ca825890SJens Wiklander * 43ca825890SJens Wiklander * [Lower addresses on the left] 44ca825890SJens Wiklander * 45ca825890SJens Wiklander * [ STACK_CANARY_SIZE/2 | STACK_CHECK_EXTRA | STACK_XXX_SIZE | STACK_CANARY_SIZE/2 ] 46ca825890SJens Wiklander * ^ ^ ^ ^ 47ca825890SJens Wiklander * stack_xxx[n] "hard" top "soft" top bottom 48ca825890SJens Wiklander */ 49ca825890SJens Wiklander 50b89b3da2SVincent Chuang static uint32_t start_canary_value = 0xdedede00; 51b89b3da2SVincent Chuang static uint32_t end_canary_value = 0xababab00; 52ca825890SJens Wiklander 53ca825890SJens Wiklander #define DECLARE_STACK(name, num_stacks, stack_size, linkage) \ 54ca825890SJens Wiklander linkage uint32_t name[num_stacks] \ 55ca825890SJens Wiklander [ROUNDUP(stack_size + STACK_CANARY_SIZE + STACK_CHECK_EXTRA, \ 56ca825890SJens Wiklander STACK_ALIGNMENT) / sizeof(uint32_t)] \ 57ca825890SJens Wiklander __attribute__((section(".nozi_stack." # name), \ 58ca825890SJens Wiklander aligned(STACK_ALIGNMENT))) 59ca825890SJens Wiklander 6059724f22SJens Wiklander #ifndef CFG_DYN_STACK_CONFIG 615956c77eSJerome Forissier DECLARE_STACK(stack_tmp, CFG_TEE_CORE_NB_CORE, STACK_TMP_SIZE, 625956c77eSJerome Forissier /* global linkage */); 63ca825890SJens Wiklander DECLARE_STACK(stack_abt, CFG_TEE_CORE_NB_CORE, STACK_ABT_SIZE, static); 64ca825890SJens Wiklander #define GET_STACK_BOTTOM(stack, n) ((vaddr_t)&(stack)[n] + sizeof(stack[n]) - \ 65ca825890SJens Wiklander STACK_CANARY_SIZE / 2) 6659724f22SJens Wiklander #else 6759724f22SJens Wiklander /* Not used */ 6859724f22SJens Wiklander #define GET_STACK_BOTTOM(stack, n) 0 6959724f22SJens Wiklander #endif 70*aa0620cfSJens Wiklander 71*aa0620cfSJens Wiklander #if defined(CFG_DYN_STACK_CONFIG) || defined(CFG_WITH_PAGER) 72*aa0620cfSJens Wiklander /* Not used */ 73*aa0620cfSJens Wiklander #define GET_STACK_THREAD_BOTTOM(n) 0 74*aa0620cfSJens Wiklander #else 7559724f22SJens Wiklander DECLARE_STACK(stack_thread, CFG_NUM_THREADS, STACK_THREAD_SIZE, static); 7659724f22SJens Wiklander #define GET_STACK_THREAD_BOTTOM(n) \ 7759724f22SJens Wiklander ((vaddr_t)&stack_thread[n] + sizeof(stack_thread[n]) - \ 7859724f22SJens Wiklander STACK_CANARY_SIZE / 2) 7959724f22SJens Wiklander #endif 80ca825890SJens Wiklander 8159724f22SJens Wiklander #ifndef CFG_DYN_STACK_CONFIG 82ca825890SJens Wiklander const uint32_t stack_tmp_stride __section(".identity_map.stack_tmp_stride") = 83ca825890SJens Wiklander sizeof(stack_tmp[0]); 84ca825890SJens Wiklander 85ca825890SJens Wiklander /* 86528dabb2SJerome Forissier * This stack setup info is required by secondary boot cores before they 87ca825890SJens Wiklander * each locally enable the pager (the mmu). Hence kept in pager sections. 88ca825890SJens Wiklander */ 89ca825890SJens Wiklander DECLARE_KEEP_PAGER(stack_tmp_stride); 9059724f22SJens Wiklander #endif 91ca825890SJens Wiklander 92ca825890SJens Wiklander static unsigned int thread_global_lock __nex_bss = SPINLOCK_UNLOCK; 93ca825890SJens Wiklander 9405994c76SJens Wiklander static size_t stack_size_to_alloc_size(size_t stack_size) 9505994c76SJens Wiklander { 9605994c76SJens Wiklander return ROUNDUP(stack_size + STACK_CANARY_SIZE + STACK_CHECK_EXTRA, 9705994c76SJens Wiklander STACK_ALIGNMENT); 9805994c76SJens Wiklander } 9905994c76SJens Wiklander 10005994c76SJens Wiklander static vaddr_t stack_end_va_to_top_hard(size_t stack_size, vaddr_t end_va) 10105994c76SJens Wiklander { 10205994c76SJens Wiklander size_t l = stack_size_to_alloc_size(stack_size); 10305994c76SJens Wiklander 10405994c76SJens Wiklander return end_va - l + STACK_CANARY_SIZE; 10505994c76SJens Wiklander } 10605994c76SJens Wiklander 10705994c76SJens Wiklander static vaddr_t stack_end_va_to_top_soft(size_t stack_size, vaddr_t end_va) 10805994c76SJens Wiklander { 10905994c76SJens Wiklander return stack_end_va_to_top_hard(stack_size, end_va) + STACK_CHECK_EXTRA; 11005994c76SJens Wiklander } 11105994c76SJens Wiklander 11205994c76SJens Wiklander static vaddr_t stack_end_va_to_bottom(size_t stack_size __unused, 11305994c76SJens Wiklander vaddr_t end_va) 11405994c76SJens Wiklander { 11505994c76SJens Wiklander return end_va; 11605994c76SJens Wiklander } 11705994c76SJens Wiklander 11805994c76SJens Wiklander static uint32_t *stack_end_va_to_start_canary(size_t stack_size, vaddr_t end_va) 11905994c76SJens Wiklander { 12005994c76SJens Wiklander return (uint32_t *)(stack_end_va_to_top_hard(stack_size, end_va) - 12105994c76SJens Wiklander STACK_CANARY_SIZE / 2); 12205994c76SJens Wiklander } 12305994c76SJens Wiklander 12405994c76SJens Wiklander static uint32_t *stack_end_va_to_end_canary(size_t stack_size __unused, 12505994c76SJens Wiklander vaddr_t end_va) 12605994c76SJens Wiklander { 12705994c76SJens Wiklander return (uint32_t *)(end_va + STACK_CANARY_SIZE / 2 - sizeof(uint32_t)); 12805994c76SJens Wiklander } 12905994c76SJens Wiklander 130ad94da2aSJens Wiklander static void init_canaries(size_t stack_size, vaddr_t va_end) 131ca825890SJens Wiklander { 132ad94da2aSJens Wiklander uint32_t *canary = NULL; 133ad94da2aSJens Wiklander 134ad94da2aSJens Wiklander assert(va_end); 135ad94da2aSJens Wiklander canary = stack_end_va_to_start_canary(stack_size, va_end); 136ad94da2aSJens Wiklander *canary = start_canary_value; 137ad94da2aSJens Wiklander canary = stack_end_va_to_end_canary(stack_size, va_end); 138ad94da2aSJens Wiklander *canary = end_canary_value; 139ca825890SJens Wiklander } 140ca825890SJens Wiklander 141ad94da2aSJens Wiklander void thread_init_canaries(void) 142ad94da2aSJens Wiklander { 143ad94da2aSJens Wiklander vaddr_t va = 0; 144ad94da2aSJens Wiklander size_t n = 0; 145ad94da2aSJens Wiklander 146ad94da2aSJens Wiklander if (IS_ENABLED(CFG_WITH_STACK_CANARIES)) { 14759724f22SJens Wiklander for (n = 0; n < thread_core_count; n++) { 148ad94da2aSJens Wiklander if (thread_core_local[n].tmp_stack_va_end) { 149ad94da2aSJens Wiklander va = thread_core_local[n].tmp_stack_va_end + 150ad94da2aSJens Wiklander STACK_TMP_OFFS; 151ad94da2aSJens Wiklander init_canaries(STACK_TMP_SIZE, va); 152ad94da2aSJens Wiklander } 153ad94da2aSJens Wiklander va = thread_core_local[n].abt_stack_va_end; 154ad94da2aSJens Wiklander if (va) 155ad94da2aSJens Wiklander init_canaries(STACK_ABT_SIZE, va); 156ad94da2aSJens Wiklander } 157ad94da2aSJens Wiklander 158ad94da2aSJens Wiklander } 159ad94da2aSJens Wiklander 160ad94da2aSJens Wiklander if (IS_ENABLED(CFG_WITH_STACK_CANARIES) && 161*aa0620cfSJens Wiklander !IS_ENABLED(CFG_WITH_PAGER) && 162*aa0620cfSJens Wiklander !IS_ENABLED(CFG_NS_VIRTUALIZATION) && threads) { 163*aa0620cfSJens Wiklander for (n = 0; n < thread_count; n++) { 164ad94da2aSJens Wiklander va = threads[n].stack_va_end; 165ad94da2aSJens Wiklander if (va) 166ad94da2aSJens Wiklander init_canaries(STACK_THREAD_SIZE, va); 167ad94da2aSJens Wiklander } 168ad94da2aSJens Wiklander } 169ca825890SJens Wiklander } 170ca825890SJens Wiklander 171b89b3da2SVincent Chuang #if defined(CFG_WITH_STACK_CANARIES) 172b89b3da2SVincent Chuang void thread_update_canaries(void) 173b89b3da2SVincent Chuang { 174b89b3da2SVincent Chuang uint32_t canary[2] = { }; 175b89b3da2SVincent Chuang uint32_t exceptions = 0; 176b89b3da2SVincent Chuang 177b89b3da2SVincent Chuang plat_get_random_stack_canaries(canary, ARRAY_SIZE(canary), 178b89b3da2SVincent Chuang sizeof(canary[0])); 179b89b3da2SVincent Chuang 180b89b3da2SVincent Chuang exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 181b89b3da2SVincent Chuang 182b89b3da2SVincent Chuang thread_check_canaries(); 183b89b3da2SVincent Chuang 184b89b3da2SVincent Chuang start_canary_value = canary[0]; 185b89b3da2SVincent Chuang end_canary_value = canary[1]; 186b89b3da2SVincent Chuang thread_init_canaries(); 187b89b3da2SVincent Chuang 188b89b3da2SVincent Chuang thread_unmask_exceptions(exceptions); 189b89b3da2SVincent Chuang } 190b89b3da2SVincent Chuang #endif 191b89b3da2SVincent Chuang 19205994c76SJens Wiklander static void check_stack_canary(const char *stack_name __maybe_unused, 19305994c76SJens Wiklander size_t n __maybe_unused, 19405994c76SJens Wiklander size_t stack_size, vaddr_t end_va) 19505994c76SJens Wiklander { 19605994c76SJens Wiklander uint32_t *canary = NULL; 19705994c76SJens Wiklander 19805994c76SJens Wiklander canary = stack_end_va_to_start_canary(stack_size, end_va); 19905994c76SJens Wiklander if (*canary != start_canary_value) { 20005994c76SJens Wiklander EMSG_RAW("Dead canary at start of '%s[%zu]' (%p)", 20105994c76SJens Wiklander stack_name, n, (void *)canary); 20205994c76SJens Wiklander panic(); 20305994c76SJens Wiklander } 20405994c76SJens Wiklander 20505994c76SJens Wiklander canary = stack_end_va_to_end_canary(stack_size, end_va); 20605994c76SJens Wiklander if (*canary != end_canary_value) { 20705994c76SJens Wiklander EMSG_RAW("Dead canary at end of '%s[%zu]' (%p)", 20805994c76SJens Wiklander stack_name, n, (void *)canary); 20905994c76SJens Wiklander panic(); 21005994c76SJens Wiklander } 21105994c76SJens Wiklander } 212ca825890SJens Wiklander 213ca825890SJens Wiklander void thread_check_canaries(void) 214ca825890SJens Wiklander { 21505994c76SJens Wiklander vaddr_t va = 0; 216ca825890SJens Wiklander size_t n = 0; 217ca825890SJens Wiklander 21805994c76SJens Wiklander if (IS_ENABLED(CFG_WITH_STACK_CANARIES)) { 21959724f22SJens Wiklander for (n = 0; n < thread_core_count; n++) { 220ad94da2aSJens Wiklander if (thread_core_local[n].tmp_stack_va_end) { 22105994c76SJens Wiklander va = thread_core_local[n].tmp_stack_va_end + 22205994c76SJens Wiklander STACK_TMP_OFFS; 223ad94da2aSJens Wiklander check_stack_canary("tmp_stack", n, 224ad94da2aSJens Wiklander STACK_TMP_SIZE, va); 225ad94da2aSJens Wiklander } 22605994c76SJens Wiklander 22705994c76SJens Wiklander va = thread_core_local[n].abt_stack_va_end; 228ad94da2aSJens Wiklander if (va) 229ad94da2aSJens Wiklander check_stack_canary("abt_stack", n, 230ad94da2aSJens Wiklander STACK_ABT_SIZE, va); 23105994c76SJens Wiklander } 232ca825890SJens Wiklander } 233ca825890SJens Wiklander 23405994c76SJens Wiklander if (IS_ENABLED(CFG_WITH_STACK_CANARIES) && 23505994c76SJens Wiklander !IS_ENABLED(CFG_WITH_PAGER) && !IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 236*aa0620cfSJens Wiklander for (n = 0; n < thread_count; n++) { 23705994c76SJens Wiklander va = threads[n].stack_va_end; 238ad94da2aSJens Wiklander if (va) 23905994c76SJens Wiklander check_stack_canary("thread_stack", n, 24005994c76SJens Wiklander STACK_THREAD_SIZE, va); 241ca825890SJens Wiklander } 242ca825890SJens Wiklander } 243ca825890SJens Wiklander } 244ca825890SJens Wiklander 245ca825890SJens Wiklander void thread_lock_global(void) 246ca825890SJens Wiklander { 247ca825890SJens Wiklander cpu_spin_lock(&thread_global_lock); 248ca825890SJens Wiklander } 249ca825890SJens Wiklander 250ca825890SJens Wiklander void thread_unlock_global(void) 251ca825890SJens Wiklander { 252ca825890SJens Wiklander cpu_spin_unlock(&thread_global_lock); 253ca825890SJens Wiklander } 254ca825890SJens Wiklander 255ca825890SJens Wiklander static struct thread_core_local * __nostackcheck 256ca825890SJens Wiklander get_core_local(unsigned int pos) 257ca825890SJens Wiklander { 258ca825890SJens Wiklander /* 259ca825890SJens Wiklander * Foreign interrupts must be disabled before playing with core_local 260ca825890SJens Wiklander * since we otherwise may be rescheduled to a different core in the 261ca825890SJens Wiklander * middle of this function. 262ca825890SJens Wiklander */ 263ca825890SJens Wiklander assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 264ca825890SJens Wiklander 26559724f22SJens Wiklander /* 26659724f22SJens Wiklander * With CFG_BOOT_INIT_CURRENT_THREAD_CORE_LOCAL, we boot on a 26759724f22SJens Wiklander * single core and have allocated only one struct thread_core_local 26859724f22SJens Wiklander * so we return that regardless of pos. 26959724f22SJens Wiklander */ 27059724f22SJens Wiklander if (IS_ENABLED(CFG_DYN_STACK_CONFIG) && 27159724f22SJens Wiklander thread_core_local != __thread_core_local_new) 27259724f22SJens Wiklander return thread_core_local; 27359724f22SJens Wiklander 27459724f22SJens Wiklander assert(pos < thread_core_count); 275ca825890SJens Wiklander return &thread_core_local[pos]; 276ca825890SJens Wiklander } 277ca825890SJens Wiklander 278ca825890SJens Wiklander struct thread_core_local * __nostackcheck thread_get_core_local(void) 279ca825890SJens Wiklander { 280ca825890SJens Wiklander unsigned int pos = get_core_pos(); 281ca825890SJens Wiklander 282ca825890SJens Wiklander return get_core_local(pos); 283ca825890SJens Wiklander } 284ca825890SJens Wiklander 285ca825890SJens Wiklander #ifdef CFG_CORE_DEBUG_CHECK_STACKS 286ca825890SJens Wiklander static void print_stack_limits(void) 287ca825890SJens Wiklander { 288ca825890SJens Wiklander size_t n = 0; 289ca825890SJens Wiklander vaddr_t __maybe_unused start = 0; 290ca825890SJens Wiklander vaddr_t __maybe_unused end = 0; 29105994c76SJens Wiklander vaddr_t va = 0; 292ca825890SJens Wiklander 29359724f22SJens Wiklander for (n = 0; n < thread_core_count; n++) { 29405994c76SJens Wiklander va = thread_core_local[n].tmp_stack_va_end + STACK_TMP_OFFS; 29505994c76SJens Wiklander start = stack_end_va_to_top_soft(STACK_TMP_SIZE, va); 29605994c76SJens Wiklander end = stack_end_va_to_bottom(STACK_TMP_SIZE, va); 297ca825890SJens Wiklander DMSG("tmp [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end); 29805994c76SJens Wiklander 29905994c76SJens Wiklander va = thread_core_local[n].abt_stack_va_end; 30005994c76SJens Wiklander start = stack_end_va_to_top_soft(STACK_ABT_SIZE, va); 30105994c76SJens Wiklander end = stack_end_va_to_bottom(STACK_ABT_SIZE, va); 302ca825890SJens Wiklander DMSG("abt [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end); 303ca825890SJens Wiklander } 30405994c76SJens Wiklander 305*aa0620cfSJens Wiklander for (n = 0; n < thread_count; n++) { 30605994c76SJens Wiklander va = threads[n].stack_va_end; 30705994c76SJens Wiklander start = stack_end_va_to_top_soft(STACK_THREAD_SIZE, va); 30805994c76SJens Wiklander end = stack_end_va_to_bottom(STACK_THREAD_SIZE, va); 309ca825890SJens Wiklander DMSG("thr [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end); 310ca825890SJens Wiklander } 311ca825890SJens Wiklander } 312ca825890SJens Wiklander 313ca825890SJens Wiklander static void check_stack_limits(void) 314ca825890SJens Wiklander { 315ca825890SJens Wiklander vaddr_t stack_start = 0; 316ca825890SJens Wiklander vaddr_t stack_end = 0; 317ca825890SJens Wiklander /* Any value in the current stack frame will do */ 318ca825890SJens Wiklander vaddr_t current_sp = (vaddr_t)&stack_start; 319ca825890SJens Wiklander 320ca825890SJens Wiklander if (!get_stack_soft_limits(&stack_start, &stack_end)) 321ca825890SJens Wiklander panic("Unknown stack limits"); 322ca825890SJens Wiklander if (current_sp < stack_start || current_sp > stack_end) { 32328d6e35aSJerome Forissier EMSG("Stack pointer out of range: 0x%" PRIxVA " not in [0x%" 32428d6e35aSJerome Forissier PRIxVA " .. 0x%" PRIxVA "]", current_sp, stack_start, 32528d6e35aSJerome Forissier stack_end); 326ca825890SJens Wiklander print_stack_limits(); 327ca825890SJens Wiklander panic(); 328ca825890SJens Wiklander } 329ca825890SJens Wiklander } 330ca825890SJens Wiklander 331ca825890SJens Wiklander static bool * __nostackcheck get_stackcheck_recursion_flag(void) 332ca825890SJens Wiklander { 333ca825890SJens Wiklander uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 334ca825890SJens Wiklander unsigned int pos = get_core_pos(); 335ca825890SJens Wiklander struct thread_core_local *l = get_core_local(pos); 336ca825890SJens Wiklander int ct = l->curr_thread; 337ca825890SJens Wiklander bool *p = NULL; 338ca825890SJens Wiklander 339ca825890SJens Wiklander if (l->flags & (THREAD_CLF_ABORT | THREAD_CLF_TMP)) 340ca825890SJens Wiklander p = &l->stackcheck_recursion; 341ca825890SJens Wiklander else if (!l->flags) 342ca825890SJens Wiklander p = &threads[ct].tsd.stackcheck_recursion; 343ca825890SJens Wiklander 344ca825890SJens Wiklander thread_unmask_exceptions(exceptions); 345ca825890SJens Wiklander return p; 346ca825890SJens Wiklander } 347ca825890SJens Wiklander 348ca825890SJens Wiklander void __cyg_profile_func_enter(void *this_fn, void *call_site); 349ca825890SJens Wiklander void __nostackcheck __cyg_profile_func_enter(void *this_fn __unused, 350ca825890SJens Wiklander void *call_site __unused) 351ca825890SJens Wiklander { 352ca825890SJens Wiklander bool *p = get_stackcheck_recursion_flag(); 353ca825890SJens Wiklander 354ca825890SJens Wiklander assert(p); 355ca825890SJens Wiklander if (*p) 356ca825890SJens Wiklander return; 357ca825890SJens Wiklander *p = true; 358ca825890SJens Wiklander check_stack_limits(); 359ca825890SJens Wiklander *p = false; 360ca825890SJens Wiklander } 361ca825890SJens Wiklander 362ca825890SJens Wiklander void __cyg_profile_func_exit(void *this_fn, void *call_site); 363ca825890SJens Wiklander void __nostackcheck __cyg_profile_func_exit(void *this_fn __unused, 364ca825890SJens Wiklander void *call_site __unused) 365ca825890SJens Wiklander { 366ca825890SJens Wiklander } 367ca825890SJens Wiklander #else 368ca825890SJens Wiklander static void print_stack_limits(void) 369ca825890SJens Wiklander { 370ca825890SJens Wiklander } 371ca825890SJens Wiklander #endif 372ca825890SJens Wiklander 373ca825890SJens Wiklander void thread_init_boot_thread(void) 374ca825890SJens Wiklander { 375ca825890SJens Wiklander struct thread_core_local *l = thread_get_core_local(); 376ca825890SJens Wiklander 377ca825890SJens Wiklander l->curr_thread = 0; 378ca825890SJens Wiklander threads[0].state = THREAD_STATE_ACTIVE; 379ca825890SJens Wiklander } 380ca825890SJens Wiklander 381ca825890SJens Wiklander void __nostackcheck thread_clr_boot_thread(void) 382ca825890SJens Wiklander { 383ca825890SJens Wiklander struct thread_core_local *l = thread_get_core_local(); 384ca825890SJens Wiklander 385ca825890SJens Wiklander assert(l->curr_thread >= 0 && l->curr_thread < CFG_NUM_THREADS); 386ca825890SJens Wiklander assert(threads[l->curr_thread].state == THREAD_STATE_ACTIVE); 387ca825890SJens Wiklander threads[l->curr_thread].state = THREAD_STATE_FREE; 388ca825890SJens Wiklander l->curr_thread = THREAD_ID_INVALID; 38959724f22SJens Wiklander print_stack_limits(); 390ca825890SJens Wiklander } 391ca825890SJens Wiklander 392ca825890SJens Wiklander void __nostackcheck *thread_get_tmp_sp(void) 393ca825890SJens Wiklander { 394ca825890SJens Wiklander struct thread_core_local *l = thread_get_core_local(); 395ca825890SJens Wiklander 396ca825890SJens Wiklander /* 397ca825890SJens Wiklander * Called from assembly when switching to the temporary stack, so flags 398ca825890SJens Wiklander * need updating 399ca825890SJens Wiklander */ 400ca825890SJens Wiklander l->flags |= THREAD_CLF_TMP; 401ca825890SJens Wiklander 402ca825890SJens Wiklander return (void *)l->tmp_stack_va_end; 403ca825890SJens Wiklander } 404ca825890SJens Wiklander 405ca825890SJens Wiklander vaddr_t thread_stack_start(void) 406ca825890SJens Wiklander { 407ca825890SJens Wiklander struct thread_ctx *thr; 408ca825890SJens Wiklander int ct = thread_get_id_may_fail(); 409ca825890SJens Wiklander 410ca825890SJens Wiklander if (ct == THREAD_ID_INVALID) 411ca825890SJens Wiklander return 0; 412ca825890SJens Wiklander 413ca825890SJens Wiklander thr = threads + ct; 41405994c76SJens Wiklander return stack_end_va_to_top_soft(STACK_THREAD_SIZE, thr->stack_va_end); 415ca825890SJens Wiklander } 416ca825890SJens Wiklander 417ca825890SJens Wiklander size_t thread_stack_size(void) 418ca825890SJens Wiklander { 419ca825890SJens Wiklander return STACK_THREAD_SIZE; 420ca825890SJens Wiklander } 421ca825890SJens Wiklander 422ca825890SJens Wiklander bool get_stack_limits(vaddr_t *start, vaddr_t *end, bool hard) 423ca825890SJens Wiklander { 424ca825890SJens Wiklander uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 425ca825890SJens Wiklander unsigned int pos = get_core_pos(); 426ca825890SJens Wiklander struct thread_core_local *l = get_core_local(pos); 427ca825890SJens Wiklander int ct = l->curr_thread; 42805994c76SJens Wiklander size_t stack_size = 0; 42905994c76SJens Wiklander bool ret = true; 43005994c76SJens Wiklander vaddr_t va = 0; 431ca825890SJens Wiklander 432ca825890SJens Wiklander if (l->flags & THREAD_CLF_TMP) { 43305994c76SJens Wiklander va = l->tmp_stack_va_end + STACK_TMP_OFFS; 43405994c76SJens Wiklander stack_size = STACK_TMP_SIZE; 435ca825890SJens Wiklander } else if (l->flags & THREAD_CLF_ABORT) { 43605994c76SJens Wiklander va = l->abt_stack_va_end; 43705994c76SJens Wiklander stack_size = STACK_ABT_SIZE; 438*aa0620cfSJens Wiklander } else if (!l->flags && ct >= 0 && (size_t)ct < thread_count) { 43905994c76SJens Wiklander va = threads[ct].stack_va_end; 44005994c76SJens Wiklander stack_size = STACK_THREAD_SIZE; 44105994c76SJens Wiklander } else { 44205994c76SJens Wiklander ret = false; 443ca825890SJens Wiklander goto out; 444ca825890SJens Wiklander } 44505994c76SJens Wiklander 44605994c76SJens Wiklander *end = stack_end_va_to_bottom(stack_size, va); 44705994c76SJens Wiklander if (hard) 44805994c76SJens Wiklander *start = stack_end_va_to_top_hard(stack_size, va); 44905994c76SJens Wiklander else 45005994c76SJens Wiklander *start = stack_end_va_to_top_soft(stack_size, va); 451ca825890SJens Wiklander out: 452ca825890SJens Wiklander thread_unmask_exceptions(exceptions); 453ca825890SJens Wiklander return ret; 454ca825890SJens Wiklander } 455ca825890SJens Wiklander 456ca825890SJens Wiklander bool thread_is_from_abort_mode(void) 457ca825890SJens Wiklander { 458ca825890SJens Wiklander struct thread_core_local *l = thread_get_core_local(); 459ca825890SJens Wiklander 460ca825890SJens Wiklander return (l->flags >> THREAD_CLF_SAVED_SHIFT) & THREAD_CLF_ABORT; 461ca825890SJens Wiklander } 462ca825890SJens Wiklander 463ca825890SJens Wiklander /* 464ca825890SJens Wiklander * This function should always be accurate, but it might be possible to 465ca825890SJens Wiklander * implement a more efficient depending on cpu architecture. 466ca825890SJens Wiklander */ 46745c754ceSJens Wiklander bool __weak __noprof thread_is_in_normal_mode(void) 468ca825890SJens Wiklander { 469ca825890SJens Wiklander uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 470ca825890SJens Wiklander struct thread_core_local *l = thread_get_core_local(); 471ca825890SJens Wiklander bool ret; 472ca825890SJens Wiklander 473ca825890SJens Wiklander /* 474ca825890SJens Wiklander * If any bit in l->flags is set aside from THREAD_CLF_TMP we're 475ca825890SJens Wiklander * handling some exception. 476ca825890SJens Wiklander */ 477ca825890SJens Wiklander ret = (l->curr_thread != THREAD_ID_INVALID) && 478ca825890SJens Wiklander !(l->flags & ~THREAD_CLF_TMP); 479ca825890SJens Wiklander thread_unmask_exceptions(exceptions); 480ca825890SJens Wiklander 481ca825890SJens Wiklander return ret; 482ca825890SJens Wiklander } 483ca825890SJens Wiklander 484239420cbSJerome Forissier short int __noprof thread_get_id_may_fail(void) 485ca825890SJens Wiklander { 486ca825890SJens Wiklander /* 487ca825890SJens Wiklander * thread_get_core_local() requires foreign interrupts to be disabled 488ca825890SJens Wiklander */ 489ca825890SJens Wiklander uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 490ca825890SJens Wiklander struct thread_core_local *l = thread_get_core_local(); 491ca825890SJens Wiklander short int ct = l->curr_thread; 492ca825890SJens Wiklander 493ca825890SJens Wiklander thread_unmask_exceptions(exceptions); 494ca825890SJens Wiklander return ct; 495ca825890SJens Wiklander } 496ca825890SJens Wiklander 4978577287cSJerome Forissier short int __noprof thread_get_id(void) 498ca825890SJens Wiklander { 499ca825890SJens Wiklander short int ct = thread_get_id_may_fail(); 500ca825890SJens Wiklander 501ca825890SJens Wiklander /* Thread ID has to fit in a short int */ 502ca825890SJens Wiklander COMPILE_TIME_ASSERT(CFG_NUM_THREADS <= SHRT_MAX); 503ca825890SJens Wiklander assert(ct >= 0 && ct < CFG_NUM_THREADS); 504ca825890SJens Wiklander return ct; 505ca825890SJens Wiklander } 506ca825890SJens Wiklander 507*aa0620cfSJens Wiklander static vaddr_t alloc_stack(size_t stack_size, bool nex) 508*aa0620cfSJens Wiklander { 509*aa0620cfSJens Wiklander size_t l = stack_size_to_alloc_size(stack_size); 510*aa0620cfSJens Wiklander size_t rl = ROUNDUP(l, SMALL_PAGE_SIZE); 511*aa0620cfSJens Wiklander uint32_t flags = MAF_GUARD_HEAD; 512*aa0620cfSJens Wiklander vaddr_t end_va = 0; 513*aa0620cfSJens Wiklander vaddr_t va = 0; 514*aa0620cfSJens Wiklander 515*aa0620cfSJens Wiklander if (nex) 516*aa0620cfSJens Wiklander flags |= MAF_NEX; 517*aa0620cfSJens Wiklander va = virt_page_alloc(rl / SMALL_PAGE_SIZE, flags); 518*aa0620cfSJens Wiklander if (!va) 519*aa0620cfSJens Wiklander panic(); 520*aa0620cfSJens Wiklander 521*aa0620cfSJens Wiklander end_va = va + l - STACK_CANARY_SIZE / 2; 522*aa0620cfSJens Wiklander if (IS_ENABLED(CFG_WITH_STACK_CANARIES)) 523*aa0620cfSJens Wiklander init_canaries(stack_size, end_va); 524*aa0620cfSJens Wiklander 525*aa0620cfSJens Wiklander return end_va; 526*aa0620cfSJens Wiklander } 527*aa0620cfSJens Wiklander 528ca825890SJens Wiklander #ifdef CFG_WITH_PAGER 529ca825890SJens Wiklander static void init_thread_stacks(void) 530ca825890SJens Wiklander { 531ca825890SJens Wiklander size_t n = 0; 532ca825890SJens Wiklander 533ca825890SJens Wiklander /* 534ca825890SJens Wiklander * Allocate virtual memory for thread stacks. 535ca825890SJens Wiklander */ 536*aa0620cfSJens Wiklander for (n = 0; n < thread_count; n++) { 537ca825890SJens Wiklander tee_mm_entry_t *mm = NULL; 538ca825890SJens Wiklander vaddr_t sp = 0; 539ca825890SJens Wiklander size_t num_pages = 0; 540ca825890SJens Wiklander struct fobj *fobj = NULL; 541ca825890SJens Wiklander 542ca825890SJens Wiklander /* Find vmem for thread stack and its protection gap */ 5439b0ee59dSJens Wiklander mm = tee_mm_alloc(&core_virt_mem_pool, 544ca825890SJens Wiklander SMALL_PAGE_SIZE + STACK_THREAD_SIZE); 545ca825890SJens Wiklander assert(mm); 546ca825890SJens Wiklander 547ca825890SJens Wiklander /* Claim eventual physical page */ 548ca825890SJens Wiklander tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm), 549ca825890SJens Wiklander true); 550ca825890SJens Wiklander 551ca825890SJens Wiklander num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE - 1; 552ca825890SJens Wiklander fobj = fobj_locked_paged_alloc(num_pages); 553ca825890SJens Wiklander 554ca825890SJens Wiklander /* Add the region to the pager */ 555ca825890SJens Wiklander tee_pager_add_core_region(tee_mm_get_smem(mm) + SMALL_PAGE_SIZE, 556ca825890SJens Wiklander PAGED_REGION_TYPE_LOCK, fobj); 557ca825890SJens Wiklander fobj_put(fobj); 558ca825890SJens Wiklander 559ca825890SJens Wiklander /* init effective stack */ 560ca825890SJens Wiklander sp = tee_mm_get_smem(mm) + tee_mm_get_bytes(mm); 561ca825890SJens Wiklander asan_tag_access((void *)tee_mm_get_smem(mm), (void *)sp); 562980d32c4SJens Wiklander threads[n].stack_va_end = sp; 563ca825890SJens Wiklander } 564ca825890SJens Wiklander } 565ca825890SJens Wiklander #else 566ca825890SJens Wiklander static void init_thread_stacks(void) 567ca825890SJens Wiklander { 568ad94da2aSJens Wiklander vaddr_t va = 0; 569ad94da2aSJens Wiklander size_t n = 0; 570ca825890SJens Wiklander 571ca825890SJens Wiklander /* Assign the thread stacks */ 572*aa0620cfSJens Wiklander for (n = 0; n < thread_count; n++) { 573*aa0620cfSJens Wiklander if (IS_ENABLED(CFG_DYN_STACK_CONFIG)) 574*aa0620cfSJens Wiklander va = alloc_stack(STACK_THREAD_SIZE, false); 575*aa0620cfSJens Wiklander else 57659724f22SJens Wiklander va = GET_STACK_THREAD_BOTTOM(n); 577ad94da2aSJens Wiklander threads[n].stack_va_end = va; 578ad94da2aSJens Wiklander if (IS_ENABLED(CFG_WITH_STACK_CANARIES)) 579ad94da2aSJens Wiklander init_canaries(STACK_THREAD_SIZE, va); 580ad94da2aSJens Wiklander } 581ca825890SJens Wiklander } 582ca825890SJens Wiklander #endif /*CFG_WITH_PAGER*/ 583ca825890SJens Wiklander 584*aa0620cfSJens Wiklander void thread_init_threads(size_t count) 585ca825890SJens Wiklander { 586ca825890SJens Wiklander size_t n = 0; 587ca825890SJens Wiklander 588*aa0620cfSJens Wiklander if (IS_ENABLED(CFG_DYN_STACK_CONFIG)) { 589*aa0620cfSJens Wiklander assert(count <= CFG_NUM_THREADS); 590*aa0620cfSJens Wiklander threads = calloc(count, sizeof(*threads)); 591*aa0620cfSJens Wiklander if (!threads) 592*aa0620cfSJens Wiklander panic(); 593*aa0620cfSJens Wiklander thread_count = count; 594*aa0620cfSJens Wiklander } else { 59591d4649dSJens Wiklander assert(count == CFG_NUM_THREADS); 596*aa0620cfSJens Wiklander } 597*aa0620cfSJens Wiklander 598ca825890SJens Wiklander init_thread_stacks(); 599ca825890SJens Wiklander print_stack_limits(); 600ca825890SJens Wiklander pgt_init(); 601ca825890SJens Wiklander 602ca825890SJens Wiklander mutex_lockdep_init(); 603ca825890SJens Wiklander 604*aa0620cfSJens Wiklander for (n = 0; n < thread_count; n++) 605ca825890SJens Wiklander TAILQ_INIT(&threads[n].tsd.sess_stack); 606ca825890SJens Wiklander } 607ca825890SJens Wiklander 60859724f22SJens Wiklander #ifndef CFG_DYN_STACK_CONFIG 609b5ec8152SJens Wiklander vaddr_t __nostackcheck thread_get_abt_stack(void) 610b5ec8152SJens Wiklander { 611b5ec8152SJens Wiklander return GET_STACK_BOTTOM(stack_abt, get_core_pos()); 612b5ec8152SJens Wiklander } 61359724f22SJens Wiklander #endif 614b5ec8152SJens Wiklander 615758c3687SJens Wiklander #ifdef CFG_BOOT_INIT_CURRENT_THREAD_CORE_LOCAL 61659724f22SJens Wiklander void thread_init_thread_core_local(size_t core_count) 61759724f22SJens Wiklander { 61859724f22SJens Wiklander struct thread_core_local *tcl = NULL; 619758c3687SJens Wiklander const size_t core_pos = get_core_pos(); 620ad94da2aSJens Wiklander vaddr_t va = 0; 621ad94da2aSJens Wiklander size_t n = 0; 622b5ec8152SJens Wiklander 62359724f22SJens Wiklander if (IS_ENABLED(CFG_DYN_STACK_CONFIG)) { 62459724f22SJens Wiklander assert(core_count <= CFG_TEE_CORE_NB_CORE); 62559724f22SJens Wiklander tcl = nex_calloc(core_count, sizeof(*tcl)); 62659724f22SJens Wiklander if (!tcl) 62759724f22SJens Wiklander panic(); 62859724f22SJens Wiklander __thread_core_local_new = tcl; 62959724f22SJens Wiklander __thread_core_count_new = core_count; 63059724f22SJens Wiklander } else { 63159724f22SJens Wiklander tcl = thread_core_local; 632a4c2e0cbSJens Wiklander assert(core_count == CFG_TEE_CORE_NB_CORE); 63359724f22SJens Wiklander 63459724f22SJens Wiklander for (n = 0; n < thread_core_count; n++) { 63559724f22SJens Wiklander init_canaries(STACK_TMP_SIZE, 63659724f22SJens Wiklander GET_STACK_BOTTOM(stack_tmp, n)); 63759724f22SJens Wiklander init_canaries(STACK_ABT_SIZE, 63859724f22SJens Wiklander GET_STACK_BOTTOM(stack_abt, n)); 63959724f22SJens Wiklander } 64059724f22SJens Wiklander } 64159724f22SJens Wiklander 64259724f22SJens Wiklander for (n = 0; n < core_count; n++) { 64359724f22SJens Wiklander if (n == core_pos) { 64459724f22SJens Wiklander if (IS_ENABLED(CFG_DYN_STACK_CONFIG)) 64559724f22SJens Wiklander tcl[n] = thread_core_local[0]; 64659724f22SJens Wiklander else 64759724f22SJens Wiklander continue; 64859724f22SJens Wiklander } else { 649b5ec8152SJens Wiklander tcl[n].curr_thread = THREAD_ID_INVALID; 650b5ec8152SJens Wiklander tcl[n].flags = THREAD_CLF_TMP; 65159724f22SJens Wiklander } 652ad94da2aSJens Wiklander 65359724f22SJens Wiklander if (IS_ENABLED(CFG_DYN_STACK_CONFIG)) 65459724f22SJens Wiklander va = alloc_stack(STACK_TMP_SIZE, true); 65559724f22SJens Wiklander else 656ad94da2aSJens Wiklander va = GET_STACK_BOTTOM(stack_tmp, n); 657ad94da2aSJens Wiklander tcl[n].tmp_stack_va_end = va - STACK_TMP_OFFS; 65859724f22SJens Wiklander #ifdef ARM32 65959724f22SJens Wiklander tcl[n].tmp_stack_pa_end = 66059724f22SJens Wiklander vaddr_to_phys(tcl[n].tmp_stack_va_end); 66159724f22SJens Wiklander #endif 66259724f22SJens Wiklander 66359724f22SJens Wiklander if (IS_ENABLED(CFG_DYN_STACK_CONFIG)) 66459724f22SJens Wiklander va = alloc_stack(STACK_ABT_SIZE, true); 66559724f22SJens Wiklander else 666ad94da2aSJens Wiklander va = GET_STACK_BOTTOM(stack_abt, n); 667ad94da2aSJens Wiklander tcl[n].abt_stack_va_end = va; 668b5ec8152SJens Wiklander } 669b5ec8152SJens Wiklander } 670b5ec8152SJens Wiklander #else 67159724f22SJens Wiklander void __nostackcheck 67259724f22SJens Wiklander thread_init_thread_core_local(size_t core_count __maybe_unused) 673ca825890SJens Wiklander { 674ca825890SJens Wiklander size_t n = 0; 675ca825890SJens Wiklander struct thread_core_local *tcl = thread_core_local; 676ca825890SJens Wiklander 677a4c2e0cbSJens Wiklander assert(core_count == CFG_TEE_CORE_NB_CORE); 678ca825890SJens Wiklander for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) { 679ca825890SJens Wiklander tcl[n].curr_thread = THREAD_ID_INVALID; 680ca825890SJens Wiklander tcl[n].flags = THREAD_CLF_TMP; 681ca825890SJens Wiklander } 682ca825890SJens Wiklander tcl[0].tmp_stack_va_end = GET_STACK_BOTTOM(stack_tmp, 0); 683ca825890SJens Wiklander } 684ca825890SJens Wiklander 685a7a0664eSJerome Forissier void __nostackcheck thread_init_core_local_stacks(void) 686ca825890SJens Wiklander { 687ca825890SJens Wiklander size_t n = 0; 688ca825890SJens Wiklander struct thread_core_local *tcl = thread_core_local; 689ca825890SJens Wiklander 690ca825890SJens Wiklander for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) { 691ca825890SJens Wiklander tcl[n].tmp_stack_va_end = GET_STACK_BOTTOM(stack_tmp, n) - 692ca825890SJens Wiklander STACK_TMP_OFFS; 693ca825890SJens Wiklander tcl[n].abt_stack_va_end = GET_STACK_BOTTOM(stack_abt, n); 694ca825890SJens Wiklander } 695ca825890SJens Wiklander } 696758c3687SJens Wiklander #endif /*CFG_BOOT_INIT_CURRENT_THREAD_CORE_LOCAL*/ 697ca825890SJens Wiklander 69893dc6b29SJens Wiklander #if defined(CFG_CORE_PAUTH) 69993dc6b29SJens Wiklander void thread_init_thread_pauth_keys(void) 70093dc6b29SJens Wiklander { 70193dc6b29SJens Wiklander size_t n = 0; 70293dc6b29SJens Wiklander 703*aa0620cfSJens Wiklander for (n = 0; n < thread_count; n++) 70493dc6b29SJens Wiklander if (crypto_rng_read(&threads[n].keys, sizeof(threads[n].keys))) 70593dc6b29SJens Wiklander panic("Failed to init thread pauth keys"); 70693dc6b29SJens Wiklander } 70793dc6b29SJens Wiklander 70893dc6b29SJens Wiklander void thread_init_core_local_pauth_keys(void) 70993dc6b29SJens Wiklander { 71093dc6b29SJens Wiklander struct thread_core_local *tcl = thread_core_local; 71193dc6b29SJens Wiklander size_t n = 0; 71293dc6b29SJens Wiklander 71359724f22SJens Wiklander for (n = 0; n < thread_core_count; n++) 71493dc6b29SJens Wiklander if (crypto_rng_read(&tcl[n].keys, sizeof(tcl[n].keys))) 71593dc6b29SJens Wiklander panic("Failed to init core local pauth keys"); 71693dc6b29SJens Wiklander } 71793dc6b29SJens Wiklander #endif 71893dc6b29SJens Wiklander 7198577287cSJerome Forissier struct thread_specific_data * __noprof thread_get_tsd(void) 720ca825890SJens Wiklander { 721ca825890SJens Wiklander return &threads[thread_get_id()].tsd; 722ca825890SJens Wiklander } 723ca825890SJens Wiklander 724ca825890SJens Wiklander struct thread_ctx_regs * __nostackcheck thread_get_ctx_regs(void) 725ca825890SJens Wiklander { 726ca825890SJens Wiklander struct thread_core_local *l = thread_get_core_local(); 727ca825890SJens Wiklander 728ca825890SJens Wiklander assert(l->curr_thread != THREAD_ID_INVALID); 729ca825890SJens Wiklander return &threads[l->curr_thread].regs; 730ca825890SJens Wiklander } 731ca825890SJens Wiklander 732ca825890SJens Wiklander void thread_set_foreign_intr(bool enable) 733ca825890SJens Wiklander { 734ca825890SJens Wiklander /* thread_get_core_local() requires foreign interrupts to be disabled */ 735ca825890SJens Wiklander uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 736ca825890SJens Wiklander struct thread_core_local *l; 737ca825890SJens Wiklander 738ca825890SJens Wiklander l = thread_get_core_local(); 739ca825890SJens Wiklander 740ca825890SJens Wiklander assert(l->curr_thread != THREAD_ID_INVALID); 741ca825890SJens Wiklander 742ca825890SJens Wiklander if (enable) { 743ca825890SJens Wiklander threads[l->curr_thread].flags |= 744ca825890SJens Wiklander THREAD_FLAGS_FOREIGN_INTR_ENABLE; 745ca825890SJens Wiklander thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR); 746ca825890SJens Wiklander } else { 747ca825890SJens Wiklander /* 748ca825890SJens Wiklander * No need to disable foreign interrupts here since they're 749ca825890SJens Wiklander * already disabled above. 750ca825890SJens Wiklander */ 751ca825890SJens Wiklander threads[l->curr_thread].flags &= 752ca825890SJens Wiklander ~THREAD_FLAGS_FOREIGN_INTR_ENABLE; 753ca825890SJens Wiklander } 754ca825890SJens Wiklander } 755ca825890SJens Wiklander 756ca825890SJens Wiklander void thread_restore_foreign_intr(void) 757ca825890SJens Wiklander { 758ca825890SJens Wiklander /* thread_get_core_local() requires foreign interrupts to be disabled */ 759ca825890SJens Wiklander uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 760ca825890SJens Wiklander struct thread_core_local *l; 761ca825890SJens Wiklander 762ca825890SJens Wiklander l = thread_get_core_local(); 763ca825890SJens Wiklander 764ca825890SJens Wiklander assert(l->curr_thread != THREAD_ID_INVALID); 765ca825890SJens Wiklander 766ca825890SJens Wiklander if (threads[l->curr_thread].flags & THREAD_FLAGS_FOREIGN_INTR_ENABLE) 767ca825890SJens Wiklander thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR); 768ca825890SJens Wiklander } 769ca825890SJens Wiklander 770ca825890SJens Wiklander static struct mobj *alloc_shm(enum thread_shm_type shm_type, size_t size) 771ca825890SJens Wiklander { 772ca825890SJens Wiklander switch (shm_type) { 773ca825890SJens Wiklander case THREAD_SHM_TYPE_APPLICATION: 774ca825890SJens Wiklander return thread_rpc_alloc_payload(size); 775ca825890SJens Wiklander case THREAD_SHM_TYPE_KERNEL_PRIVATE: 776ca825890SJens Wiklander return thread_rpc_alloc_kernel_payload(size); 777ca825890SJens Wiklander case THREAD_SHM_TYPE_GLOBAL: 778ca825890SJens Wiklander return thread_rpc_alloc_global_payload(size); 779ca825890SJens Wiklander default: 780ca825890SJens Wiklander return NULL; 781ca825890SJens Wiklander } 782ca825890SJens Wiklander } 783ca825890SJens Wiklander 784ca825890SJens Wiklander static void clear_shm_cache_entry(struct thread_shm_cache_entry *ce) 785ca825890SJens Wiklander { 786ca825890SJens Wiklander if (ce->mobj) { 787ca825890SJens Wiklander switch (ce->type) { 788ca825890SJens Wiklander case THREAD_SHM_TYPE_APPLICATION: 789ca825890SJens Wiklander thread_rpc_free_payload(ce->mobj); 790ca825890SJens Wiklander break; 791ca825890SJens Wiklander case THREAD_SHM_TYPE_KERNEL_PRIVATE: 792ca825890SJens Wiklander thread_rpc_free_kernel_payload(ce->mobj); 793ca825890SJens Wiklander break; 794ca825890SJens Wiklander case THREAD_SHM_TYPE_GLOBAL: 795ca825890SJens Wiklander thread_rpc_free_global_payload(ce->mobj); 796ca825890SJens Wiklander break; 797ca825890SJens Wiklander default: 798ca825890SJens Wiklander assert(0); /* "can't happen" */ 799ca825890SJens Wiklander break; 800ca825890SJens Wiklander } 801ca825890SJens Wiklander } 802ca825890SJens Wiklander ce->mobj = NULL; 803ca825890SJens Wiklander ce->size = 0; 804ca825890SJens Wiklander } 805ca825890SJens Wiklander 806ca825890SJens Wiklander static struct thread_shm_cache_entry * 807ca825890SJens Wiklander get_shm_cache_entry(enum thread_shm_cache_user user) 808ca825890SJens Wiklander { 809ca825890SJens Wiklander struct thread_shm_cache *cache = &threads[thread_get_id()].shm_cache; 810ca825890SJens Wiklander struct thread_shm_cache_entry *ce = NULL; 811ca825890SJens Wiklander 812ca825890SJens Wiklander SLIST_FOREACH(ce, cache, link) 813ca825890SJens Wiklander if (ce->user == user) 814ca825890SJens Wiklander return ce; 815ca825890SJens Wiklander 816ca825890SJens Wiklander ce = calloc(1, sizeof(*ce)); 817ca825890SJens Wiklander if (ce) { 818ca825890SJens Wiklander ce->user = user; 819ca825890SJens Wiklander SLIST_INSERT_HEAD(cache, ce, link); 820ca825890SJens Wiklander } 821ca825890SJens Wiklander 822ca825890SJens Wiklander return ce; 823ca825890SJens Wiklander } 824ca825890SJens Wiklander 825ca825890SJens Wiklander void *thread_rpc_shm_cache_alloc(enum thread_shm_cache_user user, 826ca825890SJens Wiklander enum thread_shm_type shm_type, 827ca825890SJens Wiklander size_t size, struct mobj **mobj) 828ca825890SJens Wiklander { 829ca825890SJens Wiklander struct thread_shm_cache_entry *ce = NULL; 830ca825890SJens Wiklander size_t sz = size; 831ca825890SJens Wiklander paddr_t p = 0; 832ca825890SJens Wiklander void *va = NULL; 833ca825890SJens Wiklander 834ca825890SJens Wiklander if (!size) 835ca825890SJens Wiklander return NULL; 836ca825890SJens Wiklander 837ca825890SJens Wiklander ce = get_shm_cache_entry(user); 838ca825890SJens Wiklander if (!ce) 839ca825890SJens Wiklander return NULL; 840ca825890SJens Wiklander 841ca825890SJens Wiklander /* 842ca825890SJens Wiklander * Always allocate in page chunks as normal world allocates payload 843ca825890SJens Wiklander * memory as complete pages. 844ca825890SJens Wiklander */ 845ca825890SJens Wiklander sz = ROUNDUP(size, SMALL_PAGE_SIZE); 846ca825890SJens Wiklander 847ca825890SJens Wiklander if (ce->type != shm_type || sz > ce->size) { 848ca825890SJens Wiklander clear_shm_cache_entry(ce); 849ca825890SJens Wiklander 850ca825890SJens Wiklander ce->mobj = alloc_shm(shm_type, sz); 851ca825890SJens Wiklander if (!ce->mobj) 852ca825890SJens Wiklander return NULL; 853ca825890SJens Wiklander 854ca825890SJens Wiklander if (mobj_get_pa(ce->mobj, 0, 0, &p)) 855ca825890SJens Wiklander goto err; 856ca825890SJens Wiklander 857ca825890SJens Wiklander if (!IS_ALIGNED_WITH_TYPE(p, uint64_t)) 858ca825890SJens Wiklander goto err; 859ca825890SJens Wiklander 860ca825890SJens Wiklander va = mobj_get_va(ce->mobj, 0, sz); 861ca825890SJens Wiklander if (!va) 862ca825890SJens Wiklander goto err; 863ca825890SJens Wiklander 864ca825890SJens Wiklander ce->size = sz; 865ca825890SJens Wiklander ce->type = shm_type; 866ca825890SJens Wiklander } else { 867ca825890SJens Wiklander va = mobj_get_va(ce->mobj, 0, sz); 868ca825890SJens Wiklander if (!va) 869ca825890SJens Wiklander goto err; 870ca825890SJens Wiklander } 871ca825890SJens Wiklander *mobj = ce->mobj; 872ca825890SJens Wiklander 873ca825890SJens Wiklander return va; 874ca825890SJens Wiklander err: 875ca825890SJens Wiklander clear_shm_cache_entry(ce); 876ca825890SJens Wiklander return NULL; 877ca825890SJens Wiklander } 878ca825890SJens Wiklander 879ca825890SJens Wiklander void thread_rpc_shm_cache_clear(struct thread_shm_cache *cache) 880ca825890SJens Wiklander { 881ca825890SJens Wiklander while (true) { 882ca825890SJens Wiklander struct thread_shm_cache_entry *ce = SLIST_FIRST(cache); 883ca825890SJens Wiklander 884ca825890SJens Wiklander if (!ce) 885ca825890SJens Wiklander break; 886ca825890SJens Wiklander SLIST_REMOVE_HEAD(cache, link); 887ca825890SJens Wiklander clear_shm_cache_entry(ce); 888ca825890SJens Wiklander free(ce); 889ca825890SJens Wiklander } 890ca825890SJens Wiklander } 891