1ca825890SJens Wiklander // SPDX-License-Identifier: BSD-2-Clause 2ca825890SJens Wiklander /* 393dc6b29SJens Wiklander * Copyright (c) 2016-2022, Linaro Limited 4ca825890SJens Wiklander * Copyright (c) 2014, STMicroelectronics International N.V. 5ca825890SJens Wiklander * Copyright (c) 2020-2021, Arm Limited 6ca825890SJens Wiklander */ 7ca825890SJens Wiklander 8ca825890SJens Wiklander #include <config.h> 993dc6b29SJens Wiklander #include <crypto/crypto.h> 10ca825890SJens Wiklander #include <kernel/asan.h> 11b89b3da2SVincent Chuang #include <kernel/boot.h> 12ca825890SJens Wiklander #include <kernel/lockdep.h> 13ca825890SJens Wiklander #include <kernel/misc.h> 14ca825890SJens Wiklander #include <kernel/panic.h> 15ca825890SJens Wiklander #include <kernel/spinlock.h> 16ca825890SJens Wiklander #include <kernel/thread.h> 17ca825890SJens Wiklander #include <kernel/thread_private.h> 18ca825890SJens Wiklander #include <mm/mobj.h> 19ca825890SJens Wiklander 20ca825890SJens Wiklander struct thread_ctx threads[CFG_NUM_THREADS]; 21ca825890SJens Wiklander 22ca825890SJens Wiklander struct thread_core_local thread_core_local[CFG_TEE_CORE_NB_CORE] __nex_bss; 23ca825890SJens Wiklander 24ca825890SJens Wiklander /* 25ca825890SJens Wiklander * Stacks 26ca825890SJens Wiklander * 27ca825890SJens Wiklander * [Lower addresses on the left] 28ca825890SJens Wiklander * 29ca825890SJens Wiklander * [ STACK_CANARY_SIZE/2 | STACK_CHECK_EXTRA | STACK_XXX_SIZE | STACK_CANARY_SIZE/2 ] 30ca825890SJens Wiklander * ^ ^ ^ ^ 31ca825890SJens Wiklander * stack_xxx[n] "hard" top "soft" top bottom 32ca825890SJens Wiklander */ 33ca825890SJens Wiklander 34b89b3da2SVincent Chuang static uint32_t start_canary_value = 0xdedede00; 35b89b3da2SVincent Chuang static uint32_t end_canary_value = 0xababab00; 36ca825890SJens Wiklander 37ca825890SJens Wiklander #define DECLARE_STACK(name, num_stacks, stack_size, linkage) \ 38ca825890SJens Wiklander linkage uint32_t name[num_stacks] \ 39ca825890SJens Wiklander [ROUNDUP(stack_size + STACK_CANARY_SIZE + STACK_CHECK_EXTRA, \ 40ca825890SJens Wiklander STACK_ALIGNMENT) / sizeof(uint32_t)] \ 41ca825890SJens Wiklander __attribute__((section(".nozi_stack." # name), \ 42ca825890SJens Wiklander aligned(STACK_ALIGNMENT))) 43ca825890SJens Wiklander 445956c77eSJerome Forissier DECLARE_STACK(stack_tmp, CFG_TEE_CORE_NB_CORE, STACK_TMP_SIZE, 455956c77eSJerome Forissier /* global linkage */); 46ca825890SJens Wiklander DECLARE_STACK(stack_abt, CFG_TEE_CORE_NB_CORE, STACK_ABT_SIZE, static); 47ca825890SJens Wiklander #ifndef CFG_WITH_PAGER 485956c77eSJerome Forissier DECLARE_STACK(stack_thread, CFG_NUM_THREADS, STACK_THREAD_SIZE, static); 49ca825890SJens Wiklander #endif 50ca825890SJens Wiklander 51ca825890SJens Wiklander #define GET_STACK_BOTTOM(stack, n) ((vaddr_t)&(stack)[n] + sizeof(stack[n]) - \ 52ca825890SJens Wiklander STACK_CANARY_SIZE / 2) 53ca825890SJens Wiklander 54ca825890SJens Wiklander const uint32_t stack_tmp_stride __section(".identity_map.stack_tmp_stride") = 55ca825890SJens Wiklander sizeof(stack_tmp[0]); 56ca825890SJens Wiklander 57ca825890SJens Wiklander /* 58528dabb2SJerome Forissier * This stack setup info is required by secondary boot cores before they 59ca825890SJens Wiklander * each locally enable the pager (the mmu). Hence kept in pager sections. 60ca825890SJens Wiklander */ 61ca825890SJens Wiklander DECLARE_KEEP_PAGER(stack_tmp_stride); 62ca825890SJens Wiklander 63ca825890SJens Wiklander static unsigned int thread_global_lock __nex_bss = SPINLOCK_UNLOCK; 64ca825890SJens Wiklander 6505994c76SJens Wiklander static size_t stack_size_to_alloc_size(size_t stack_size) 6605994c76SJens Wiklander { 6705994c76SJens Wiklander return ROUNDUP(stack_size + STACK_CANARY_SIZE + STACK_CHECK_EXTRA, 6805994c76SJens Wiklander STACK_ALIGNMENT); 6905994c76SJens Wiklander } 7005994c76SJens Wiklander 7105994c76SJens Wiklander static vaddr_t stack_end_va_to_top_hard(size_t stack_size, vaddr_t end_va) 7205994c76SJens Wiklander { 7305994c76SJens Wiklander size_t l = stack_size_to_alloc_size(stack_size); 7405994c76SJens Wiklander 7505994c76SJens Wiklander return end_va - l + STACK_CANARY_SIZE; 7605994c76SJens Wiklander } 7705994c76SJens Wiklander 7805994c76SJens Wiklander static vaddr_t stack_end_va_to_top_soft(size_t stack_size, vaddr_t end_va) 7905994c76SJens Wiklander { 8005994c76SJens Wiklander return stack_end_va_to_top_hard(stack_size, end_va) + STACK_CHECK_EXTRA; 8105994c76SJens Wiklander } 8205994c76SJens Wiklander 8305994c76SJens Wiklander static vaddr_t stack_end_va_to_bottom(size_t stack_size __unused, 8405994c76SJens Wiklander vaddr_t end_va) 8505994c76SJens Wiklander { 8605994c76SJens Wiklander return end_va; 8705994c76SJens Wiklander } 8805994c76SJens Wiklander 8905994c76SJens Wiklander static uint32_t *stack_end_va_to_start_canary(size_t stack_size, vaddr_t end_va) 9005994c76SJens Wiklander { 9105994c76SJens Wiklander return (uint32_t *)(stack_end_va_to_top_hard(stack_size, end_va) - 9205994c76SJens Wiklander STACK_CANARY_SIZE / 2); 9305994c76SJens Wiklander } 9405994c76SJens Wiklander 9505994c76SJens Wiklander static uint32_t *stack_end_va_to_end_canary(size_t stack_size __unused, 9605994c76SJens Wiklander vaddr_t end_va) 9705994c76SJens Wiklander { 9805994c76SJens Wiklander return (uint32_t *)(end_va + STACK_CANARY_SIZE / 2 - sizeof(uint32_t)); 9905994c76SJens Wiklander } 10005994c76SJens Wiklander 101ad94da2aSJens Wiklander static void init_canaries(size_t stack_size, vaddr_t va_end) 102ca825890SJens Wiklander { 103ad94da2aSJens Wiklander uint32_t *canary = NULL; 104ad94da2aSJens Wiklander 105ad94da2aSJens Wiklander assert(va_end); 106ad94da2aSJens Wiklander canary = stack_end_va_to_start_canary(stack_size, va_end); 107ad94da2aSJens Wiklander *canary = start_canary_value; 108ad94da2aSJens Wiklander canary = stack_end_va_to_end_canary(stack_size, va_end); 109ad94da2aSJens Wiklander *canary = end_canary_value; 110ca825890SJens Wiklander } 111ca825890SJens Wiklander 112ad94da2aSJens Wiklander void thread_init_canaries(void) 113ad94da2aSJens Wiklander { 114ad94da2aSJens Wiklander vaddr_t va = 0; 115ad94da2aSJens Wiklander size_t n = 0; 116ad94da2aSJens Wiklander 117ad94da2aSJens Wiklander if (IS_ENABLED(CFG_WITH_STACK_CANARIES)) { 118ad94da2aSJens Wiklander for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) { 119ad94da2aSJens Wiklander if (thread_core_local[n].tmp_stack_va_end) { 120ad94da2aSJens Wiklander va = thread_core_local[n].tmp_stack_va_end + 121ad94da2aSJens Wiklander STACK_TMP_OFFS; 122ad94da2aSJens Wiklander init_canaries(STACK_TMP_SIZE, va); 123ad94da2aSJens Wiklander } 124ad94da2aSJens Wiklander va = thread_core_local[n].abt_stack_va_end; 125ad94da2aSJens Wiklander if (va) 126ad94da2aSJens Wiklander init_canaries(STACK_ABT_SIZE, va); 127ad94da2aSJens Wiklander } 128ad94da2aSJens Wiklander 129ad94da2aSJens Wiklander } 130ad94da2aSJens Wiklander 131ad94da2aSJens Wiklander if (IS_ENABLED(CFG_WITH_STACK_CANARIES) && 132ad94da2aSJens Wiklander !IS_ENABLED(CFG_WITH_PAGER) && !IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 133ad94da2aSJens Wiklander for (n = 0; n < CFG_NUM_THREADS; n++) { 134ad94da2aSJens Wiklander va = threads[n].stack_va_end; 135ad94da2aSJens Wiklander if (va) 136ad94da2aSJens Wiklander init_canaries(STACK_THREAD_SIZE, va); 137ad94da2aSJens Wiklander } 138ad94da2aSJens Wiklander } 139ca825890SJens Wiklander } 140ca825890SJens Wiklander 141b89b3da2SVincent Chuang #if defined(CFG_WITH_STACK_CANARIES) 142b89b3da2SVincent Chuang void thread_update_canaries(void) 143b89b3da2SVincent Chuang { 144b89b3da2SVincent Chuang uint32_t canary[2] = { }; 145b89b3da2SVincent Chuang uint32_t exceptions = 0; 146b89b3da2SVincent Chuang 147b89b3da2SVincent Chuang plat_get_random_stack_canaries(canary, ARRAY_SIZE(canary), 148b89b3da2SVincent Chuang sizeof(canary[0])); 149b89b3da2SVincent Chuang 150b89b3da2SVincent Chuang exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); 151b89b3da2SVincent Chuang 152b89b3da2SVincent Chuang thread_check_canaries(); 153b89b3da2SVincent Chuang 154b89b3da2SVincent Chuang start_canary_value = canary[0]; 155b89b3da2SVincent Chuang end_canary_value = canary[1]; 156b89b3da2SVincent Chuang thread_init_canaries(); 157b89b3da2SVincent Chuang 158b89b3da2SVincent Chuang thread_unmask_exceptions(exceptions); 159b89b3da2SVincent Chuang } 160b89b3da2SVincent Chuang #endif 161b89b3da2SVincent Chuang 16205994c76SJens Wiklander static void check_stack_canary(const char *stack_name __maybe_unused, 16305994c76SJens Wiklander size_t n __maybe_unused, 16405994c76SJens Wiklander size_t stack_size, vaddr_t end_va) 16505994c76SJens Wiklander { 16605994c76SJens Wiklander uint32_t *canary = NULL; 16705994c76SJens Wiklander 16805994c76SJens Wiklander canary = stack_end_va_to_start_canary(stack_size, end_va); 16905994c76SJens Wiklander if (*canary != start_canary_value) { 17005994c76SJens Wiklander EMSG_RAW("Dead canary at start of '%s[%zu]' (%p)", 17105994c76SJens Wiklander stack_name, n, (void *)canary); 17205994c76SJens Wiklander panic(); 17305994c76SJens Wiklander } 17405994c76SJens Wiklander 17505994c76SJens Wiklander canary = stack_end_va_to_end_canary(stack_size, end_va); 17605994c76SJens Wiklander if (*canary != end_canary_value) { 17705994c76SJens Wiklander EMSG_RAW("Dead canary at end of '%s[%zu]' (%p)", 17805994c76SJens Wiklander stack_name, n, (void *)canary); 17905994c76SJens Wiklander panic(); 18005994c76SJens Wiklander } 18105994c76SJens Wiklander } 182ca825890SJens Wiklander 183ca825890SJens Wiklander void thread_check_canaries(void) 184ca825890SJens Wiklander { 18505994c76SJens Wiklander vaddr_t va = 0; 186ca825890SJens Wiklander size_t n = 0; 187ca825890SJens Wiklander 18805994c76SJens Wiklander if (IS_ENABLED(CFG_WITH_STACK_CANARIES)) { 18905994c76SJens Wiklander for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) { 190ad94da2aSJens Wiklander if (thread_core_local[n].tmp_stack_va_end) { 19105994c76SJens Wiklander va = thread_core_local[n].tmp_stack_va_end + 19205994c76SJens Wiklander STACK_TMP_OFFS; 193ad94da2aSJens Wiklander check_stack_canary("tmp_stack", n, 194ad94da2aSJens Wiklander STACK_TMP_SIZE, va); 195ad94da2aSJens Wiklander } 19605994c76SJens Wiklander 19705994c76SJens Wiklander va = thread_core_local[n].abt_stack_va_end; 198ad94da2aSJens Wiklander if (va) 199ad94da2aSJens Wiklander check_stack_canary("abt_stack", n, 200ad94da2aSJens Wiklander STACK_ABT_SIZE, va); 20105994c76SJens Wiklander } 202ca825890SJens Wiklander } 203ca825890SJens Wiklander 20405994c76SJens Wiklander if (IS_ENABLED(CFG_WITH_STACK_CANARIES) && 20505994c76SJens Wiklander !IS_ENABLED(CFG_WITH_PAGER) && !IS_ENABLED(CFG_NS_VIRTUALIZATION)) { 20605994c76SJens Wiklander for (n = 0; n < CFG_NUM_THREADS; n++) { 20705994c76SJens Wiklander va = threads[n].stack_va_end; 208ad94da2aSJens Wiklander if (va) 20905994c76SJens Wiklander check_stack_canary("thread_stack", n, 21005994c76SJens Wiklander STACK_THREAD_SIZE, va); 211ca825890SJens Wiklander } 212ca825890SJens Wiklander } 213ca825890SJens Wiklander } 214ca825890SJens Wiklander 215ca825890SJens Wiklander void thread_lock_global(void) 216ca825890SJens Wiklander { 217ca825890SJens Wiklander cpu_spin_lock(&thread_global_lock); 218ca825890SJens Wiklander } 219ca825890SJens Wiklander 220ca825890SJens Wiklander void thread_unlock_global(void) 221ca825890SJens Wiklander { 222ca825890SJens Wiklander cpu_spin_unlock(&thread_global_lock); 223ca825890SJens Wiklander } 224ca825890SJens Wiklander 225ca825890SJens Wiklander static struct thread_core_local * __nostackcheck 226ca825890SJens Wiklander get_core_local(unsigned int pos) 227ca825890SJens Wiklander { 228ca825890SJens Wiklander /* 229ca825890SJens Wiklander * Foreign interrupts must be disabled before playing with core_local 230ca825890SJens Wiklander * since we otherwise may be rescheduled to a different core in the 231ca825890SJens Wiklander * middle of this function. 232ca825890SJens Wiklander */ 233ca825890SJens Wiklander assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 234ca825890SJens Wiklander 235ca825890SJens Wiklander assert(pos < CFG_TEE_CORE_NB_CORE); 236ca825890SJens Wiklander return &thread_core_local[pos]; 237ca825890SJens Wiklander } 238ca825890SJens Wiklander 239ca825890SJens Wiklander struct thread_core_local * __nostackcheck thread_get_core_local(void) 240ca825890SJens Wiklander { 241ca825890SJens Wiklander unsigned int pos = get_core_pos(); 242ca825890SJens Wiklander 243ca825890SJens Wiklander return get_core_local(pos); 244ca825890SJens Wiklander } 245ca825890SJens Wiklander 246ca825890SJens Wiklander #ifdef CFG_CORE_DEBUG_CHECK_STACKS 247ca825890SJens Wiklander static void print_stack_limits(void) 248ca825890SJens Wiklander { 249ca825890SJens Wiklander size_t n = 0; 250ca825890SJens Wiklander vaddr_t __maybe_unused start = 0; 251ca825890SJens Wiklander vaddr_t __maybe_unused end = 0; 25205994c76SJens Wiklander vaddr_t va = 0; 253ca825890SJens Wiklander 254ca825890SJens Wiklander for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) { 25505994c76SJens Wiklander va = thread_core_local[n].tmp_stack_va_end + STACK_TMP_OFFS; 25605994c76SJens Wiklander start = stack_end_va_to_top_soft(STACK_TMP_SIZE, va); 25705994c76SJens Wiklander end = stack_end_va_to_bottom(STACK_TMP_SIZE, va); 258ca825890SJens Wiklander DMSG("tmp [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end); 25905994c76SJens Wiklander 26005994c76SJens Wiklander va = thread_core_local[n].abt_stack_va_end; 26105994c76SJens Wiklander start = stack_end_va_to_top_soft(STACK_ABT_SIZE, va); 26205994c76SJens Wiklander end = stack_end_va_to_bottom(STACK_ABT_SIZE, va); 263ca825890SJens Wiklander DMSG("abt [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end); 264ca825890SJens Wiklander } 26505994c76SJens Wiklander 266ca825890SJens Wiklander for (n = 0; n < CFG_NUM_THREADS; n++) { 26705994c76SJens Wiklander va = threads[n].stack_va_end; 26805994c76SJens Wiklander start = stack_end_va_to_top_soft(STACK_THREAD_SIZE, va); 26905994c76SJens Wiklander end = stack_end_va_to_bottom(STACK_THREAD_SIZE, va); 270ca825890SJens Wiklander DMSG("thr [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end); 271ca825890SJens Wiklander } 272ca825890SJens Wiklander } 273ca825890SJens Wiklander 274ca825890SJens Wiklander static void check_stack_limits(void) 275ca825890SJens Wiklander { 276ca825890SJens Wiklander vaddr_t stack_start = 0; 277ca825890SJens Wiklander vaddr_t stack_end = 0; 278ca825890SJens Wiklander /* Any value in the current stack frame will do */ 279ca825890SJens Wiklander vaddr_t current_sp = (vaddr_t)&stack_start; 280ca825890SJens Wiklander 281ca825890SJens Wiklander if (!get_stack_soft_limits(&stack_start, &stack_end)) 282ca825890SJens Wiklander panic("Unknown stack limits"); 283ca825890SJens Wiklander if (current_sp < stack_start || current_sp > stack_end) { 28428d6e35aSJerome Forissier EMSG("Stack pointer out of range: 0x%" PRIxVA " not in [0x%" 28528d6e35aSJerome Forissier PRIxVA " .. 0x%" PRIxVA "]", current_sp, stack_start, 28628d6e35aSJerome Forissier stack_end); 287ca825890SJens Wiklander print_stack_limits(); 288ca825890SJens Wiklander panic(); 289ca825890SJens Wiklander } 290ca825890SJens Wiklander } 291ca825890SJens Wiklander 292ca825890SJens Wiklander static bool * __nostackcheck get_stackcheck_recursion_flag(void) 293ca825890SJens Wiklander { 294ca825890SJens Wiklander uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 295ca825890SJens Wiklander unsigned int pos = get_core_pos(); 296ca825890SJens Wiklander struct thread_core_local *l = get_core_local(pos); 297ca825890SJens Wiklander int ct = l->curr_thread; 298ca825890SJens Wiklander bool *p = NULL; 299ca825890SJens Wiklander 300ca825890SJens Wiklander if (l->flags & (THREAD_CLF_ABORT | THREAD_CLF_TMP)) 301ca825890SJens Wiklander p = &l->stackcheck_recursion; 302ca825890SJens Wiklander else if (!l->flags) 303ca825890SJens Wiklander p = &threads[ct].tsd.stackcheck_recursion; 304ca825890SJens Wiklander 305ca825890SJens Wiklander thread_unmask_exceptions(exceptions); 306ca825890SJens Wiklander return p; 307ca825890SJens Wiklander } 308ca825890SJens Wiklander 309ca825890SJens Wiklander void __cyg_profile_func_enter(void *this_fn, void *call_site); 310ca825890SJens Wiklander void __nostackcheck __cyg_profile_func_enter(void *this_fn __unused, 311ca825890SJens Wiklander void *call_site __unused) 312ca825890SJens Wiklander { 313ca825890SJens Wiklander bool *p = get_stackcheck_recursion_flag(); 314ca825890SJens Wiklander 315ca825890SJens Wiklander assert(p); 316ca825890SJens Wiklander if (*p) 317ca825890SJens Wiklander return; 318ca825890SJens Wiklander *p = true; 319ca825890SJens Wiklander check_stack_limits(); 320ca825890SJens Wiklander *p = false; 321ca825890SJens Wiklander } 322ca825890SJens Wiklander 323ca825890SJens Wiklander void __cyg_profile_func_exit(void *this_fn, void *call_site); 324ca825890SJens Wiklander void __nostackcheck __cyg_profile_func_exit(void *this_fn __unused, 325ca825890SJens Wiklander void *call_site __unused) 326ca825890SJens Wiklander { 327ca825890SJens Wiklander } 328ca825890SJens Wiklander #else 329ca825890SJens Wiklander static void print_stack_limits(void) 330ca825890SJens Wiklander { 331ca825890SJens Wiklander } 332ca825890SJens Wiklander #endif 333ca825890SJens Wiklander 334ca825890SJens Wiklander void thread_init_boot_thread(void) 335ca825890SJens Wiklander { 336ca825890SJens Wiklander struct thread_core_local *l = thread_get_core_local(); 337ca825890SJens Wiklander 338ca825890SJens Wiklander thread_init_threads(); 339ca825890SJens Wiklander 340ca825890SJens Wiklander l->curr_thread = 0; 341ca825890SJens Wiklander threads[0].state = THREAD_STATE_ACTIVE; 342ca825890SJens Wiklander } 343ca825890SJens Wiklander 344ca825890SJens Wiklander void __nostackcheck thread_clr_boot_thread(void) 345ca825890SJens Wiklander { 346ca825890SJens Wiklander struct thread_core_local *l = thread_get_core_local(); 347ca825890SJens Wiklander 348ca825890SJens Wiklander assert(l->curr_thread >= 0 && l->curr_thread < CFG_NUM_THREADS); 349ca825890SJens Wiklander assert(threads[l->curr_thread].state == THREAD_STATE_ACTIVE); 350ca825890SJens Wiklander threads[l->curr_thread].state = THREAD_STATE_FREE; 351ca825890SJens Wiklander l->curr_thread = THREAD_ID_INVALID; 352ca825890SJens Wiklander } 353ca825890SJens Wiklander 354ca825890SJens Wiklander void __nostackcheck *thread_get_tmp_sp(void) 355ca825890SJens Wiklander { 356ca825890SJens Wiklander struct thread_core_local *l = thread_get_core_local(); 357ca825890SJens Wiklander 358ca825890SJens Wiklander /* 359ca825890SJens Wiklander * Called from assembly when switching to the temporary stack, so flags 360ca825890SJens Wiklander * need updating 361ca825890SJens Wiklander */ 362ca825890SJens Wiklander l->flags |= THREAD_CLF_TMP; 363ca825890SJens Wiklander 364ca825890SJens Wiklander return (void *)l->tmp_stack_va_end; 365ca825890SJens Wiklander } 366ca825890SJens Wiklander 367ca825890SJens Wiklander vaddr_t thread_stack_start(void) 368ca825890SJens Wiklander { 369ca825890SJens Wiklander struct thread_ctx *thr; 370ca825890SJens Wiklander int ct = thread_get_id_may_fail(); 371ca825890SJens Wiklander 372ca825890SJens Wiklander if (ct == THREAD_ID_INVALID) 373ca825890SJens Wiklander return 0; 374ca825890SJens Wiklander 375ca825890SJens Wiklander thr = threads + ct; 37605994c76SJens Wiklander return stack_end_va_to_top_soft(STACK_THREAD_SIZE, thr->stack_va_end); 377ca825890SJens Wiklander } 378ca825890SJens Wiklander 379ca825890SJens Wiklander size_t thread_stack_size(void) 380ca825890SJens Wiklander { 381ca825890SJens Wiklander return STACK_THREAD_SIZE; 382ca825890SJens Wiklander } 383ca825890SJens Wiklander 384ca825890SJens Wiklander bool get_stack_limits(vaddr_t *start, vaddr_t *end, bool hard) 385ca825890SJens Wiklander { 386ca825890SJens Wiklander uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 387ca825890SJens Wiklander unsigned int pos = get_core_pos(); 388ca825890SJens Wiklander struct thread_core_local *l = get_core_local(pos); 389ca825890SJens Wiklander int ct = l->curr_thread; 39005994c76SJens Wiklander size_t stack_size = 0; 39105994c76SJens Wiklander bool ret = true; 39205994c76SJens Wiklander vaddr_t va = 0; 393ca825890SJens Wiklander 394ca825890SJens Wiklander if (l->flags & THREAD_CLF_TMP) { 39505994c76SJens Wiklander va = l->tmp_stack_va_end + STACK_TMP_OFFS; 39605994c76SJens Wiklander stack_size = STACK_TMP_SIZE; 397ca825890SJens Wiklander } else if (l->flags & THREAD_CLF_ABORT) { 39805994c76SJens Wiklander va = l->abt_stack_va_end; 39905994c76SJens Wiklander stack_size = STACK_ABT_SIZE; 40005994c76SJens Wiklander } else if (!l->flags && ct >= 0 && ct < CFG_NUM_THREADS) { 40105994c76SJens Wiklander va = threads[ct].stack_va_end; 40205994c76SJens Wiklander stack_size = STACK_THREAD_SIZE; 40305994c76SJens Wiklander } else { 40405994c76SJens Wiklander ret = false; 405ca825890SJens Wiklander goto out; 406ca825890SJens Wiklander } 40705994c76SJens Wiklander 40805994c76SJens Wiklander *end = stack_end_va_to_bottom(stack_size, va); 40905994c76SJens Wiklander if (hard) 41005994c76SJens Wiklander *start = stack_end_va_to_top_hard(stack_size, va); 41105994c76SJens Wiklander else 41205994c76SJens Wiklander *start = stack_end_va_to_top_soft(stack_size, va); 413ca825890SJens Wiklander out: 414ca825890SJens Wiklander thread_unmask_exceptions(exceptions); 415ca825890SJens Wiklander return ret; 416ca825890SJens Wiklander } 417ca825890SJens Wiklander 418ca825890SJens Wiklander bool thread_is_from_abort_mode(void) 419ca825890SJens Wiklander { 420ca825890SJens Wiklander struct thread_core_local *l = thread_get_core_local(); 421ca825890SJens Wiklander 422ca825890SJens Wiklander return (l->flags >> THREAD_CLF_SAVED_SHIFT) & THREAD_CLF_ABORT; 423ca825890SJens Wiklander } 424ca825890SJens Wiklander 425ca825890SJens Wiklander /* 426ca825890SJens Wiklander * This function should always be accurate, but it might be possible to 427ca825890SJens Wiklander * implement a more efficient depending on cpu architecture. 428ca825890SJens Wiklander */ 429*45c754ceSJens Wiklander bool __weak __noprof thread_is_in_normal_mode(void) 430ca825890SJens Wiklander { 431ca825890SJens Wiklander uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 432ca825890SJens Wiklander struct thread_core_local *l = thread_get_core_local(); 433ca825890SJens Wiklander bool ret; 434ca825890SJens Wiklander 435ca825890SJens Wiklander /* 436ca825890SJens Wiklander * If any bit in l->flags is set aside from THREAD_CLF_TMP we're 437ca825890SJens Wiklander * handling some exception. 438ca825890SJens Wiklander */ 439ca825890SJens Wiklander ret = (l->curr_thread != THREAD_ID_INVALID) && 440ca825890SJens Wiklander !(l->flags & ~THREAD_CLF_TMP); 441ca825890SJens Wiklander thread_unmask_exceptions(exceptions); 442ca825890SJens Wiklander 443ca825890SJens Wiklander return ret; 444ca825890SJens Wiklander } 445ca825890SJens Wiklander 446239420cbSJerome Forissier short int __noprof thread_get_id_may_fail(void) 447ca825890SJens Wiklander { 448ca825890SJens Wiklander /* 449ca825890SJens Wiklander * thread_get_core_local() requires foreign interrupts to be disabled 450ca825890SJens Wiklander */ 451ca825890SJens Wiklander uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 452ca825890SJens Wiklander struct thread_core_local *l = thread_get_core_local(); 453ca825890SJens Wiklander short int ct = l->curr_thread; 454ca825890SJens Wiklander 455ca825890SJens Wiklander thread_unmask_exceptions(exceptions); 456ca825890SJens Wiklander return ct; 457ca825890SJens Wiklander } 458ca825890SJens Wiklander 4598577287cSJerome Forissier short int __noprof thread_get_id(void) 460ca825890SJens Wiklander { 461ca825890SJens Wiklander short int ct = thread_get_id_may_fail(); 462ca825890SJens Wiklander 463ca825890SJens Wiklander /* Thread ID has to fit in a short int */ 464ca825890SJens Wiklander COMPILE_TIME_ASSERT(CFG_NUM_THREADS <= SHRT_MAX); 465ca825890SJens Wiklander assert(ct >= 0 && ct < CFG_NUM_THREADS); 466ca825890SJens Wiklander return ct; 467ca825890SJens Wiklander } 468ca825890SJens Wiklander 469ca825890SJens Wiklander #ifdef CFG_WITH_PAGER 470ca825890SJens Wiklander static void init_thread_stacks(void) 471ca825890SJens Wiklander { 472ca825890SJens Wiklander size_t n = 0; 473ca825890SJens Wiklander 474ca825890SJens Wiklander /* 475ca825890SJens Wiklander * Allocate virtual memory for thread stacks. 476ca825890SJens Wiklander */ 477ca825890SJens Wiklander for (n = 0; n < CFG_NUM_THREADS; n++) { 478ca825890SJens Wiklander tee_mm_entry_t *mm = NULL; 479ca825890SJens Wiklander vaddr_t sp = 0; 480ca825890SJens Wiklander size_t num_pages = 0; 481ca825890SJens Wiklander struct fobj *fobj = NULL; 482ca825890SJens Wiklander 483ca825890SJens Wiklander /* Find vmem for thread stack and its protection gap */ 4849b0ee59dSJens Wiklander mm = tee_mm_alloc(&core_virt_mem_pool, 485ca825890SJens Wiklander SMALL_PAGE_SIZE + STACK_THREAD_SIZE); 486ca825890SJens Wiklander assert(mm); 487ca825890SJens Wiklander 488ca825890SJens Wiklander /* Claim eventual physical page */ 489ca825890SJens Wiklander tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm), 490ca825890SJens Wiklander true); 491ca825890SJens Wiklander 492ca825890SJens Wiklander num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE - 1; 493ca825890SJens Wiklander fobj = fobj_locked_paged_alloc(num_pages); 494ca825890SJens Wiklander 495ca825890SJens Wiklander /* Add the region to the pager */ 496ca825890SJens Wiklander tee_pager_add_core_region(tee_mm_get_smem(mm) + SMALL_PAGE_SIZE, 497ca825890SJens Wiklander PAGED_REGION_TYPE_LOCK, fobj); 498ca825890SJens Wiklander fobj_put(fobj); 499ca825890SJens Wiklander 500ca825890SJens Wiklander /* init effective stack */ 501ca825890SJens Wiklander sp = tee_mm_get_smem(mm) + tee_mm_get_bytes(mm); 502ca825890SJens Wiklander asan_tag_access((void *)tee_mm_get_smem(mm), (void *)sp); 503980d32c4SJens Wiklander threads[n].stack_va_end = sp; 504ca825890SJens Wiklander } 505ca825890SJens Wiklander } 506ca825890SJens Wiklander #else 507ca825890SJens Wiklander static void init_thread_stacks(void) 508ca825890SJens Wiklander { 509ad94da2aSJens Wiklander vaddr_t va = 0; 510ad94da2aSJens Wiklander size_t n = 0; 511ca825890SJens Wiklander 512ca825890SJens Wiklander /* Assign the thread stacks */ 513ad94da2aSJens Wiklander for (n = 0; n < CFG_NUM_THREADS; n++) { 514ad94da2aSJens Wiklander va = GET_STACK_BOTTOM(stack_thread, n); 515ad94da2aSJens Wiklander threads[n].stack_va_end = va; 516ad94da2aSJens Wiklander if (IS_ENABLED(CFG_WITH_STACK_CANARIES)) 517ad94da2aSJens Wiklander init_canaries(STACK_THREAD_SIZE, va); 518ad94da2aSJens Wiklander } 519ca825890SJens Wiklander } 520ca825890SJens Wiklander #endif /*CFG_WITH_PAGER*/ 521ca825890SJens Wiklander 522ca825890SJens Wiklander void thread_init_threads(void) 523ca825890SJens Wiklander { 524ca825890SJens Wiklander size_t n = 0; 525ca825890SJens Wiklander 526ca825890SJens Wiklander init_thread_stacks(); 527ca825890SJens Wiklander print_stack_limits(); 528ca825890SJens Wiklander pgt_init(); 529ca825890SJens Wiklander 530ca825890SJens Wiklander mutex_lockdep_init(); 531ca825890SJens Wiklander 532e17e7a56SJens Wiklander for (n = 0; n < CFG_NUM_THREADS; n++) 533ca825890SJens Wiklander TAILQ_INIT(&threads[n].tsd.sess_stack); 534ca825890SJens Wiklander } 535ca825890SJens Wiklander 536b5ec8152SJens Wiklander vaddr_t __nostackcheck thread_get_abt_stack(void) 537b5ec8152SJens Wiklander { 538b5ec8152SJens Wiklander return GET_STACK_BOTTOM(stack_abt, get_core_pos()); 539b5ec8152SJens Wiklander } 540b5ec8152SJens Wiklander 541758c3687SJens Wiklander #ifdef CFG_BOOT_INIT_CURRENT_THREAD_CORE_LOCAL 542b5ec8152SJens Wiklander void thread_init_thread_core_local(void) 543b5ec8152SJens Wiklander { 544b5ec8152SJens Wiklander struct thread_core_local *tcl = thread_core_local; 545758c3687SJens Wiklander const size_t core_pos = get_core_pos(); 546ad94da2aSJens Wiklander vaddr_t va = 0; 547ad94da2aSJens Wiklander size_t n = 0; 548b5ec8152SJens Wiklander 549758c3687SJens Wiklander for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) { 550758c3687SJens Wiklander if (n == core_pos) 551758c3687SJens Wiklander continue; /* Already initialized */ 552b5ec8152SJens Wiklander tcl[n].curr_thread = THREAD_ID_INVALID; 553b5ec8152SJens Wiklander tcl[n].flags = THREAD_CLF_TMP; 554ad94da2aSJens Wiklander 555ad94da2aSJens Wiklander va = GET_STACK_BOTTOM(stack_tmp, n); 556ad94da2aSJens Wiklander tcl[n].tmp_stack_va_end = va - STACK_TMP_OFFS; 557ad94da2aSJens Wiklander if (IS_ENABLED(CFG_WITH_STACK_CANARIES)) 558ad94da2aSJens Wiklander init_canaries(STACK_TMP_SIZE, va); 559ad94da2aSJens Wiklander va = GET_STACK_BOTTOM(stack_abt, n); 560ad94da2aSJens Wiklander tcl[n].abt_stack_va_end = va; 561ad94da2aSJens Wiklander if (IS_ENABLED(CFG_WITH_STACK_CANARIES)) 562ad94da2aSJens Wiklander init_canaries(STACK_ABT_SIZE, va); 563b5ec8152SJens Wiklander } 564b5ec8152SJens Wiklander } 565b5ec8152SJens Wiklander #else 566ca825890SJens Wiklander void __nostackcheck thread_init_thread_core_local(void) 567ca825890SJens Wiklander { 568ca825890SJens Wiklander size_t n = 0; 569ca825890SJens Wiklander struct thread_core_local *tcl = thread_core_local; 570ca825890SJens Wiklander 571ca825890SJens Wiklander for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) { 572ca825890SJens Wiklander tcl[n].curr_thread = THREAD_ID_INVALID; 573ca825890SJens Wiklander tcl[n].flags = THREAD_CLF_TMP; 574ca825890SJens Wiklander } 575ca825890SJens Wiklander tcl[0].tmp_stack_va_end = GET_STACK_BOTTOM(stack_tmp, 0); 576ca825890SJens Wiklander } 577ca825890SJens Wiklander 578a7a0664eSJerome Forissier void __nostackcheck thread_init_core_local_stacks(void) 579ca825890SJens Wiklander { 580ca825890SJens Wiklander size_t n = 0; 581ca825890SJens Wiklander struct thread_core_local *tcl = thread_core_local; 582ca825890SJens Wiklander 583ca825890SJens Wiklander for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) { 584ca825890SJens Wiklander tcl[n].tmp_stack_va_end = GET_STACK_BOTTOM(stack_tmp, n) - 585ca825890SJens Wiklander STACK_TMP_OFFS; 586ca825890SJens Wiklander tcl[n].abt_stack_va_end = GET_STACK_BOTTOM(stack_abt, n); 587ca825890SJens Wiklander } 588ca825890SJens Wiklander } 589758c3687SJens Wiklander #endif /*CFG_BOOT_INIT_CURRENT_THREAD_CORE_LOCAL*/ 590ca825890SJens Wiklander 59193dc6b29SJens Wiklander #if defined(CFG_CORE_PAUTH) 59293dc6b29SJens Wiklander void thread_init_thread_pauth_keys(void) 59393dc6b29SJens Wiklander { 59493dc6b29SJens Wiklander size_t n = 0; 59593dc6b29SJens Wiklander 59693dc6b29SJens Wiklander for (n = 0; n < CFG_NUM_THREADS; n++) 59793dc6b29SJens Wiklander if (crypto_rng_read(&threads[n].keys, sizeof(threads[n].keys))) 59893dc6b29SJens Wiklander panic("Failed to init thread pauth keys"); 59993dc6b29SJens Wiklander } 60093dc6b29SJens Wiklander 60193dc6b29SJens Wiklander void thread_init_core_local_pauth_keys(void) 60293dc6b29SJens Wiklander { 60393dc6b29SJens Wiklander struct thread_core_local *tcl = thread_core_local; 60493dc6b29SJens Wiklander size_t n = 0; 60593dc6b29SJens Wiklander 60693dc6b29SJens Wiklander for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) 60793dc6b29SJens Wiklander if (crypto_rng_read(&tcl[n].keys, sizeof(tcl[n].keys))) 60893dc6b29SJens Wiklander panic("Failed to init core local pauth keys"); 60993dc6b29SJens Wiklander } 61093dc6b29SJens Wiklander #endif 61193dc6b29SJens Wiklander 6128577287cSJerome Forissier struct thread_specific_data * __noprof thread_get_tsd(void) 613ca825890SJens Wiklander { 614ca825890SJens Wiklander return &threads[thread_get_id()].tsd; 615ca825890SJens Wiklander } 616ca825890SJens Wiklander 617ca825890SJens Wiklander struct thread_ctx_regs * __nostackcheck thread_get_ctx_regs(void) 618ca825890SJens Wiklander { 619ca825890SJens Wiklander struct thread_core_local *l = thread_get_core_local(); 620ca825890SJens Wiklander 621ca825890SJens Wiklander assert(l->curr_thread != THREAD_ID_INVALID); 622ca825890SJens Wiklander return &threads[l->curr_thread].regs; 623ca825890SJens Wiklander } 624ca825890SJens Wiklander 625ca825890SJens Wiklander void thread_set_foreign_intr(bool enable) 626ca825890SJens Wiklander { 627ca825890SJens Wiklander /* thread_get_core_local() requires foreign interrupts to be disabled */ 628ca825890SJens Wiklander uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 629ca825890SJens Wiklander struct thread_core_local *l; 630ca825890SJens Wiklander 631ca825890SJens Wiklander l = thread_get_core_local(); 632ca825890SJens Wiklander 633ca825890SJens Wiklander assert(l->curr_thread != THREAD_ID_INVALID); 634ca825890SJens Wiklander 635ca825890SJens Wiklander if (enable) { 636ca825890SJens Wiklander threads[l->curr_thread].flags |= 637ca825890SJens Wiklander THREAD_FLAGS_FOREIGN_INTR_ENABLE; 638ca825890SJens Wiklander thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR); 639ca825890SJens Wiklander } else { 640ca825890SJens Wiklander /* 641ca825890SJens Wiklander * No need to disable foreign interrupts here since they're 642ca825890SJens Wiklander * already disabled above. 643ca825890SJens Wiklander */ 644ca825890SJens Wiklander threads[l->curr_thread].flags &= 645ca825890SJens Wiklander ~THREAD_FLAGS_FOREIGN_INTR_ENABLE; 646ca825890SJens Wiklander } 647ca825890SJens Wiklander } 648ca825890SJens Wiklander 649ca825890SJens Wiklander void thread_restore_foreign_intr(void) 650ca825890SJens Wiklander { 651ca825890SJens Wiklander /* thread_get_core_local() requires foreign interrupts to be disabled */ 652ca825890SJens Wiklander uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 653ca825890SJens Wiklander struct thread_core_local *l; 654ca825890SJens Wiklander 655ca825890SJens Wiklander l = thread_get_core_local(); 656ca825890SJens Wiklander 657ca825890SJens Wiklander assert(l->curr_thread != THREAD_ID_INVALID); 658ca825890SJens Wiklander 659ca825890SJens Wiklander if (threads[l->curr_thread].flags & THREAD_FLAGS_FOREIGN_INTR_ENABLE) 660ca825890SJens Wiklander thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR); 661ca825890SJens Wiklander } 662ca825890SJens Wiklander 663ca825890SJens Wiklander static struct mobj *alloc_shm(enum thread_shm_type shm_type, size_t size) 664ca825890SJens Wiklander { 665ca825890SJens Wiklander switch (shm_type) { 666ca825890SJens Wiklander case THREAD_SHM_TYPE_APPLICATION: 667ca825890SJens Wiklander return thread_rpc_alloc_payload(size); 668ca825890SJens Wiklander case THREAD_SHM_TYPE_KERNEL_PRIVATE: 669ca825890SJens Wiklander return thread_rpc_alloc_kernel_payload(size); 670ca825890SJens Wiklander case THREAD_SHM_TYPE_GLOBAL: 671ca825890SJens Wiklander return thread_rpc_alloc_global_payload(size); 672ca825890SJens Wiklander default: 673ca825890SJens Wiklander return NULL; 674ca825890SJens Wiklander } 675ca825890SJens Wiklander } 676ca825890SJens Wiklander 677ca825890SJens Wiklander static void clear_shm_cache_entry(struct thread_shm_cache_entry *ce) 678ca825890SJens Wiklander { 679ca825890SJens Wiklander if (ce->mobj) { 680ca825890SJens Wiklander switch (ce->type) { 681ca825890SJens Wiklander case THREAD_SHM_TYPE_APPLICATION: 682ca825890SJens Wiklander thread_rpc_free_payload(ce->mobj); 683ca825890SJens Wiklander break; 684ca825890SJens Wiklander case THREAD_SHM_TYPE_KERNEL_PRIVATE: 685ca825890SJens Wiklander thread_rpc_free_kernel_payload(ce->mobj); 686ca825890SJens Wiklander break; 687ca825890SJens Wiklander case THREAD_SHM_TYPE_GLOBAL: 688ca825890SJens Wiklander thread_rpc_free_global_payload(ce->mobj); 689ca825890SJens Wiklander break; 690ca825890SJens Wiklander default: 691ca825890SJens Wiklander assert(0); /* "can't happen" */ 692ca825890SJens Wiklander break; 693ca825890SJens Wiklander } 694ca825890SJens Wiklander } 695ca825890SJens Wiklander ce->mobj = NULL; 696ca825890SJens Wiklander ce->size = 0; 697ca825890SJens Wiklander } 698ca825890SJens Wiklander 699ca825890SJens Wiklander static struct thread_shm_cache_entry * 700ca825890SJens Wiklander get_shm_cache_entry(enum thread_shm_cache_user user) 701ca825890SJens Wiklander { 702ca825890SJens Wiklander struct thread_shm_cache *cache = &threads[thread_get_id()].shm_cache; 703ca825890SJens Wiklander struct thread_shm_cache_entry *ce = NULL; 704ca825890SJens Wiklander 705ca825890SJens Wiklander SLIST_FOREACH(ce, cache, link) 706ca825890SJens Wiklander if (ce->user == user) 707ca825890SJens Wiklander return ce; 708ca825890SJens Wiklander 709ca825890SJens Wiklander ce = calloc(1, sizeof(*ce)); 710ca825890SJens Wiklander if (ce) { 711ca825890SJens Wiklander ce->user = user; 712ca825890SJens Wiklander SLIST_INSERT_HEAD(cache, ce, link); 713ca825890SJens Wiklander } 714ca825890SJens Wiklander 715ca825890SJens Wiklander return ce; 716ca825890SJens Wiklander } 717ca825890SJens Wiklander 718ca825890SJens Wiklander void *thread_rpc_shm_cache_alloc(enum thread_shm_cache_user user, 719ca825890SJens Wiklander enum thread_shm_type shm_type, 720ca825890SJens Wiklander size_t size, struct mobj **mobj) 721ca825890SJens Wiklander { 722ca825890SJens Wiklander struct thread_shm_cache_entry *ce = NULL; 723ca825890SJens Wiklander size_t sz = size; 724ca825890SJens Wiklander paddr_t p = 0; 725ca825890SJens Wiklander void *va = NULL; 726ca825890SJens Wiklander 727ca825890SJens Wiklander if (!size) 728ca825890SJens Wiklander return NULL; 729ca825890SJens Wiklander 730ca825890SJens Wiklander ce = get_shm_cache_entry(user); 731ca825890SJens Wiklander if (!ce) 732ca825890SJens Wiklander return NULL; 733ca825890SJens Wiklander 734ca825890SJens Wiklander /* 735ca825890SJens Wiklander * Always allocate in page chunks as normal world allocates payload 736ca825890SJens Wiklander * memory as complete pages. 737ca825890SJens Wiklander */ 738ca825890SJens Wiklander sz = ROUNDUP(size, SMALL_PAGE_SIZE); 739ca825890SJens Wiklander 740ca825890SJens Wiklander if (ce->type != shm_type || sz > ce->size) { 741ca825890SJens Wiklander clear_shm_cache_entry(ce); 742ca825890SJens Wiklander 743ca825890SJens Wiklander ce->mobj = alloc_shm(shm_type, sz); 744ca825890SJens Wiklander if (!ce->mobj) 745ca825890SJens Wiklander return NULL; 746ca825890SJens Wiklander 747ca825890SJens Wiklander if (mobj_get_pa(ce->mobj, 0, 0, &p)) 748ca825890SJens Wiklander goto err; 749ca825890SJens Wiklander 750ca825890SJens Wiklander if (!IS_ALIGNED_WITH_TYPE(p, uint64_t)) 751ca825890SJens Wiklander goto err; 752ca825890SJens Wiklander 753ca825890SJens Wiklander va = mobj_get_va(ce->mobj, 0, sz); 754ca825890SJens Wiklander if (!va) 755ca825890SJens Wiklander goto err; 756ca825890SJens Wiklander 757ca825890SJens Wiklander ce->size = sz; 758ca825890SJens Wiklander ce->type = shm_type; 759ca825890SJens Wiklander } else { 760ca825890SJens Wiklander va = mobj_get_va(ce->mobj, 0, sz); 761ca825890SJens Wiklander if (!va) 762ca825890SJens Wiklander goto err; 763ca825890SJens Wiklander } 764ca825890SJens Wiklander *mobj = ce->mobj; 765ca825890SJens Wiklander 766ca825890SJens Wiklander return va; 767ca825890SJens Wiklander err: 768ca825890SJens Wiklander clear_shm_cache_entry(ce); 769ca825890SJens Wiklander return NULL; 770ca825890SJens Wiklander } 771ca825890SJens Wiklander 772ca825890SJens Wiklander void thread_rpc_shm_cache_clear(struct thread_shm_cache *cache) 773ca825890SJens Wiklander { 774ca825890SJens Wiklander while (true) { 775ca825890SJens Wiklander struct thread_shm_cache_entry *ce = SLIST_FIRST(cache); 776ca825890SJens Wiklander 777ca825890SJens Wiklander if (!ce) 778ca825890SJens Wiklander break; 779ca825890SJens Wiklander SLIST_REMOVE_HEAD(cache, link); 780ca825890SJens Wiklander clear_shm_cache_entry(ce); 781ca825890SJens Wiklander free(ce); 782ca825890SJens Wiklander } 783ca825890SJens Wiklander } 784