1ca825890SJens Wiklander // SPDX-License-Identifier: BSD-2-Clause 2ca825890SJens Wiklander /* 3*93dc6b29SJens Wiklander * Copyright (c) 2016-2022, Linaro Limited 4ca825890SJens Wiklander * Copyright (c) 2014, STMicroelectronics International N.V. 5ca825890SJens Wiklander * Copyright (c) 2020-2021, Arm Limited 6ca825890SJens Wiklander */ 7ca825890SJens Wiklander 8ca825890SJens Wiklander #include <config.h> 9*93dc6b29SJens Wiklander #include <crypto/crypto.h> 10ca825890SJens Wiklander #include <kernel/asan.h> 11ca825890SJens Wiklander #include <kernel/lockdep.h> 12ca825890SJens Wiklander #include <kernel/misc.h> 13ca825890SJens Wiklander #include <kernel/panic.h> 14ca825890SJens Wiklander #include <kernel/spinlock.h> 15ca825890SJens Wiklander #include <kernel/thread.h> 16ca825890SJens Wiklander #include <kernel/thread_private.h> 17ca825890SJens Wiklander #include <mm/mobj.h> 18ca825890SJens Wiklander 19ca825890SJens Wiklander struct thread_ctx threads[CFG_NUM_THREADS]; 20ca825890SJens Wiklander 21ca825890SJens Wiklander struct thread_core_local thread_core_local[CFG_TEE_CORE_NB_CORE] __nex_bss; 22ca825890SJens Wiklander 23ca825890SJens Wiklander /* 24ca825890SJens Wiklander * Stacks 25ca825890SJens Wiklander * 26ca825890SJens Wiklander * [Lower addresses on the left] 27ca825890SJens Wiklander * 28ca825890SJens Wiklander * [ STACK_CANARY_SIZE/2 | STACK_CHECK_EXTRA | STACK_XXX_SIZE | STACK_CANARY_SIZE/2 ] 29ca825890SJens Wiklander * ^ ^ ^ ^ 30ca825890SJens Wiklander * stack_xxx[n] "hard" top "soft" top bottom 31ca825890SJens Wiklander */ 32ca825890SJens Wiklander 33ca825890SJens Wiklander #ifdef CFG_WITH_STACK_CANARIES 34ca825890SJens Wiklander #define START_CANARY_VALUE 0xdededede 35ca825890SJens Wiklander #define END_CANARY_VALUE 0xabababab 36ca825890SJens Wiklander #define GET_START_CANARY(name, stack_num) name[stack_num][0] 37ca825890SJens Wiklander #define GET_END_CANARY(name, stack_num) \ 38ca825890SJens Wiklander name[stack_num][sizeof(name[stack_num]) / sizeof(uint32_t) - 1] 39ca825890SJens Wiklander #endif 40ca825890SJens Wiklander 41ca825890SJens Wiklander #define DECLARE_STACK(name, num_stacks, stack_size, linkage) \ 42ca825890SJens Wiklander linkage uint32_t name[num_stacks] \ 43ca825890SJens Wiklander [ROUNDUP(stack_size + STACK_CANARY_SIZE + STACK_CHECK_EXTRA, \ 44ca825890SJens Wiklander STACK_ALIGNMENT) / sizeof(uint32_t)] \ 45ca825890SJens Wiklander __attribute__((section(".nozi_stack." # name), \ 46ca825890SJens Wiklander aligned(STACK_ALIGNMENT))) 47ca825890SJens Wiklander 48ca825890SJens Wiklander #define GET_STACK(stack) ((vaddr_t)(stack) + STACK_SIZE(stack)) 49ca825890SJens Wiklander 505956c77eSJerome Forissier DECLARE_STACK(stack_tmp, CFG_TEE_CORE_NB_CORE, STACK_TMP_SIZE, 515956c77eSJerome Forissier /* global linkage */); 52ca825890SJens Wiklander DECLARE_STACK(stack_abt, CFG_TEE_CORE_NB_CORE, STACK_ABT_SIZE, static); 53ca825890SJens Wiklander #ifndef CFG_WITH_PAGER 545956c77eSJerome Forissier DECLARE_STACK(stack_thread, CFG_NUM_THREADS, STACK_THREAD_SIZE, static); 55ca825890SJens Wiklander #endif 56ca825890SJens Wiklander 57ca825890SJens Wiklander #define GET_STACK_TOP_HARD(stack, n) \ 58ca825890SJens Wiklander ((vaddr_t)&(stack)[n] + STACK_CANARY_SIZE / 2) 59ca825890SJens Wiklander #define GET_STACK_TOP_SOFT(stack, n) \ 60ca825890SJens Wiklander (GET_STACK_TOP_HARD(stack, n) + STACK_CHECK_EXTRA) 61ca825890SJens Wiklander #define GET_STACK_BOTTOM(stack, n) ((vaddr_t)&(stack)[n] + sizeof(stack[n]) - \ 62ca825890SJens Wiklander STACK_CANARY_SIZE / 2) 63ca825890SJens Wiklander 64ca825890SJens Wiklander const uint32_t stack_tmp_stride __section(".identity_map.stack_tmp_stride") = 65ca825890SJens Wiklander sizeof(stack_tmp[0]); 66ca825890SJens Wiklander 67ca825890SJens Wiklander /* 68528dabb2SJerome Forissier * This stack setup info is required by secondary boot cores before they 69ca825890SJens Wiklander * each locally enable the pager (the mmu). Hence kept in pager sections. 70ca825890SJens Wiklander */ 71ca825890SJens Wiklander DECLARE_KEEP_PAGER(stack_tmp_stride); 72ca825890SJens Wiklander 73ca825890SJens Wiklander static unsigned int thread_global_lock __nex_bss = SPINLOCK_UNLOCK; 74ca825890SJens Wiklander 75ca825890SJens Wiklander void thread_init_canaries(void) 76ca825890SJens Wiklander { 77ca825890SJens Wiklander #ifdef CFG_WITH_STACK_CANARIES 78ca825890SJens Wiklander size_t n; 79ca825890SJens Wiklander #define INIT_CANARY(name) \ 80ca825890SJens Wiklander for (n = 0; n < ARRAY_SIZE(name); n++) { \ 81ca825890SJens Wiklander uint32_t *start_canary = &GET_START_CANARY(name, n); \ 82ca825890SJens Wiklander uint32_t *end_canary = &GET_END_CANARY(name, n); \ 83ca825890SJens Wiklander \ 84ca825890SJens Wiklander *start_canary = START_CANARY_VALUE; \ 85ca825890SJens Wiklander *end_canary = END_CANARY_VALUE; \ 86ca825890SJens Wiklander } 87ca825890SJens Wiklander 88ca825890SJens Wiklander INIT_CANARY(stack_tmp); 89ca825890SJens Wiklander INIT_CANARY(stack_abt); 90ca825890SJens Wiklander #if !defined(CFG_WITH_PAGER) && !defined(CFG_VIRTUALIZATION) 91ca825890SJens Wiklander INIT_CANARY(stack_thread); 92ca825890SJens Wiklander #endif 93ca825890SJens Wiklander #endif/*CFG_WITH_STACK_CANARIES*/ 94ca825890SJens Wiklander } 95ca825890SJens Wiklander 96ca825890SJens Wiklander #define CANARY_DIED(stack, loc, n, addr) \ 97ca825890SJens Wiklander do { \ 98ca825890SJens Wiklander EMSG_RAW("Dead canary at %s of '%s[%zu]' (%p)", #loc, #stack, \ 99ca825890SJens Wiklander n, (void *)addr); \ 100ca825890SJens Wiklander panic(); \ 101ca825890SJens Wiklander } while (0) 102ca825890SJens Wiklander 103ca825890SJens Wiklander void thread_check_canaries(void) 104ca825890SJens Wiklander { 105ca825890SJens Wiklander #ifdef CFG_WITH_STACK_CANARIES 106ca825890SJens Wiklander uint32_t *canary = NULL; 107ca825890SJens Wiklander size_t n = 0; 108ca825890SJens Wiklander 109ca825890SJens Wiklander for (n = 0; n < ARRAY_SIZE(stack_tmp); n++) { 110ca825890SJens Wiklander canary = &GET_START_CANARY(stack_tmp, n); 111ca825890SJens Wiklander if (*canary != START_CANARY_VALUE) 112ca825890SJens Wiklander CANARY_DIED(stack_tmp, start, n, canary); 113ca825890SJens Wiklander canary = &GET_END_CANARY(stack_tmp, n); 114ca825890SJens Wiklander if (*canary != END_CANARY_VALUE) 115ca825890SJens Wiklander CANARY_DIED(stack_tmp, end, n, canary); 116ca825890SJens Wiklander } 117ca825890SJens Wiklander 118ca825890SJens Wiklander for (n = 0; n < ARRAY_SIZE(stack_abt); n++) { 119ca825890SJens Wiklander canary = &GET_START_CANARY(stack_abt, n); 120ca825890SJens Wiklander if (*canary != START_CANARY_VALUE) 121ca825890SJens Wiklander CANARY_DIED(stack_abt, start, n, canary); 122ca825890SJens Wiklander canary = &GET_END_CANARY(stack_abt, n); 123ca825890SJens Wiklander if (*canary != END_CANARY_VALUE) 124ca825890SJens Wiklander CANARY_DIED(stack_abt, end, n, canary); 125ca825890SJens Wiklander } 126ca825890SJens Wiklander #if !defined(CFG_WITH_PAGER) && !defined(CFG_VIRTUALIZATION) 127ca825890SJens Wiklander for (n = 0; n < ARRAY_SIZE(stack_thread); n++) { 128ca825890SJens Wiklander canary = &GET_START_CANARY(stack_thread, n); 129ca825890SJens Wiklander if (*canary != START_CANARY_VALUE) 130ca825890SJens Wiklander CANARY_DIED(stack_thread, start, n, canary); 131ca825890SJens Wiklander canary = &GET_END_CANARY(stack_thread, n); 132ca825890SJens Wiklander if (*canary != END_CANARY_VALUE) 133ca825890SJens Wiklander CANARY_DIED(stack_thread, end, n, canary); 134ca825890SJens Wiklander } 135ca825890SJens Wiklander #endif 136ca825890SJens Wiklander #endif/*CFG_WITH_STACK_CANARIES*/ 137ca825890SJens Wiklander } 138ca825890SJens Wiklander 139ca825890SJens Wiklander void thread_lock_global(void) 140ca825890SJens Wiklander { 141ca825890SJens Wiklander cpu_spin_lock(&thread_global_lock); 142ca825890SJens Wiklander } 143ca825890SJens Wiklander 144ca825890SJens Wiklander void thread_unlock_global(void) 145ca825890SJens Wiklander { 146ca825890SJens Wiklander cpu_spin_unlock(&thread_global_lock); 147ca825890SJens Wiklander } 148ca825890SJens Wiklander 149ca825890SJens Wiklander static struct thread_core_local * __nostackcheck 150ca825890SJens Wiklander get_core_local(unsigned int pos) 151ca825890SJens Wiklander { 152ca825890SJens Wiklander /* 153ca825890SJens Wiklander * Foreign interrupts must be disabled before playing with core_local 154ca825890SJens Wiklander * since we otherwise may be rescheduled to a different core in the 155ca825890SJens Wiklander * middle of this function. 156ca825890SJens Wiklander */ 157ca825890SJens Wiklander assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 158ca825890SJens Wiklander 159ca825890SJens Wiklander assert(pos < CFG_TEE_CORE_NB_CORE); 160ca825890SJens Wiklander return &thread_core_local[pos]; 161ca825890SJens Wiklander } 162ca825890SJens Wiklander 163ca825890SJens Wiklander struct thread_core_local * __nostackcheck thread_get_core_local(void) 164ca825890SJens Wiklander { 165ca825890SJens Wiklander unsigned int pos = get_core_pos(); 166ca825890SJens Wiklander 167ca825890SJens Wiklander return get_core_local(pos); 168ca825890SJens Wiklander } 169ca825890SJens Wiklander 170ca825890SJens Wiklander #ifdef CFG_CORE_DEBUG_CHECK_STACKS 171ca825890SJens Wiklander static void print_stack_limits(void) 172ca825890SJens Wiklander { 173ca825890SJens Wiklander size_t n = 0; 174ca825890SJens Wiklander vaddr_t __maybe_unused start = 0; 175ca825890SJens Wiklander vaddr_t __maybe_unused end = 0; 176ca825890SJens Wiklander 177ca825890SJens Wiklander for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) { 178ca825890SJens Wiklander start = GET_STACK_TOP_SOFT(stack_tmp, n); 179ca825890SJens Wiklander end = GET_STACK_BOTTOM(stack_tmp, n); 180ca825890SJens Wiklander DMSG("tmp [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end); 181ca825890SJens Wiklander } 182ca825890SJens Wiklander for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) { 183ca825890SJens Wiklander start = GET_STACK_TOP_SOFT(stack_abt, n); 184ca825890SJens Wiklander end = GET_STACK_BOTTOM(stack_abt, n); 185ca825890SJens Wiklander DMSG("abt [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end); 186ca825890SJens Wiklander } 187ca825890SJens Wiklander for (n = 0; n < CFG_NUM_THREADS; n++) { 188ca825890SJens Wiklander end = threads[n].stack_va_end; 18928d6e35aSJerome Forissier start = end - STACK_THREAD_SIZE + STACK_CHECK_EXTRA; 190ca825890SJens Wiklander DMSG("thr [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end); 191ca825890SJens Wiklander } 192ca825890SJens Wiklander } 193ca825890SJens Wiklander 194ca825890SJens Wiklander static void check_stack_limits(void) 195ca825890SJens Wiklander { 196ca825890SJens Wiklander vaddr_t stack_start = 0; 197ca825890SJens Wiklander vaddr_t stack_end = 0; 198ca825890SJens Wiklander /* Any value in the current stack frame will do */ 199ca825890SJens Wiklander vaddr_t current_sp = (vaddr_t)&stack_start; 200ca825890SJens Wiklander 201ca825890SJens Wiklander if (!get_stack_soft_limits(&stack_start, &stack_end)) 202ca825890SJens Wiklander panic("Unknown stack limits"); 203ca825890SJens Wiklander if (current_sp < stack_start || current_sp > stack_end) { 20428d6e35aSJerome Forissier EMSG("Stack pointer out of range: 0x%" PRIxVA " not in [0x%" 20528d6e35aSJerome Forissier PRIxVA " .. 0x%" PRIxVA "]", current_sp, stack_start, 20628d6e35aSJerome Forissier stack_end); 207ca825890SJens Wiklander print_stack_limits(); 208ca825890SJens Wiklander panic(); 209ca825890SJens Wiklander } 210ca825890SJens Wiklander } 211ca825890SJens Wiklander 212ca825890SJens Wiklander static bool * __nostackcheck get_stackcheck_recursion_flag(void) 213ca825890SJens Wiklander { 214ca825890SJens Wiklander uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 215ca825890SJens Wiklander unsigned int pos = get_core_pos(); 216ca825890SJens Wiklander struct thread_core_local *l = get_core_local(pos); 217ca825890SJens Wiklander int ct = l->curr_thread; 218ca825890SJens Wiklander bool *p = NULL; 219ca825890SJens Wiklander 220ca825890SJens Wiklander if (l->flags & (THREAD_CLF_ABORT | THREAD_CLF_TMP)) 221ca825890SJens Wiklander p = &l->stackcheck_recursion; 222ca825890SJens Wiklander else if (!l->flags) 223ca825890SJens Wiklander p = &threads[ct].tsd.stackcheck_recursion; 224ca825890SJens Wiklander 225ca825890SJens Wiklander thread_unmask_exceptions(exceptions); 226ca825890SJens Wiklander return p; 227ca825890SJens Wiklander } 228ca825890SJens Wiklander 229ca825890SJens Wiklander void __cyg_profile_func_enter(void *this_fn, void *call_site); 230ca825890SJens Wiklander void __nostackcheck __cyg_profile_func_enter(void *this_fn __unused, 231ca825890SJens Wiklander void *call_site __unused) 232ca825890SJens Wiklander { 233ca825890SJens Wiklander bool *p = get_stackcheck_recursion_flag(); 234ca825890SJens Wiklander 235ca825890SJens Wiklander assert(p); 236ca825890SJens Wiklander if (*p) 237ca825890SJens Wiklander return; 238ca825890SJens Wiklander *p = true; 239ca825890SJens Wiklander check_stack_limits(); 240ca825890SJens Wiklander *p = false; 241ca825890SJens Wiklander } 242ca825890SJens Wiklander 243ca825890SJens Wiklander void __cyg_profile_func_exit(void *this_fn, void *call_site); 244ca825890SJens Wiklander void __nostackcheck __cyg_profile_func_exit(void *this_fn __unused, 245ca825890SJens Wiklander void *call_site __unused) 246ca825890SJens Wiklander { 247ca825890SJens Wiklander } 248ca825890SJens Wiklander #else 249ca825890SJens Wiklander static void print_stack_limits(void) 250ca825890SJens Wiklander { 251ca825890SJens Wiklander } 252ca825890SJens Wiklander #endif 253ca825890SJens Wiklander 254ca825890SJens Wiklander void thread_init_boot_thread(void) 255ca825890SJens Wiklander { 256ca825890SJens Wiklander struct thread_core_local *l = thread_get_core_local(); 257ca825890SJens Wiklander 258ca825890SJens Wiklander thread_init_threads(); 259ca825890SJens Wiklander 260ca825890SJens Wiklander l->curr_thread = 0; 261ca825890SJens Wiklander threads[0].state = THREAD_STATE_ACTIVE; 262ca825890SJens Wiklander } 263ca825890SJens Wiklander 264ca825890SJens Wiklander void __nostackcheck thread_clr_boot_thread(void) 265ca825890SJens Wiklander { 266ca825890SJens Wiklander struct thread_core_local *l = thread_get_core_local(); 267ca825890SJens Wiklander 268ca825890SJens Wiklander assert(l->curr_thread >= 0 && l->curr_thread < CFG_NUM_THREADS); 269ca825890SJens Wiklander assert(threads[l->curr_thread].state == THREAD_STATE_ACTIVE); 270ca825890SJens Wiklander threads[l->curr_thread].state = THREAD_STATE_FREE; 271ca825890SJens Wiklander l->curr_thread = THREAD_ID_INVALID; 272ca825890SJens Wiklander } 273ca825890SJens Wiklander 274ca825890SJens Wiklander void __nostackcheck *thread_get_tmp_sp(void) 275ca825890SJens Wiklander { 276ca825890SJens Wiklander struct thread_core_local *l = thread_get_core_local(); 277ca825890SJens Wiklander 278ca825890SJens Wiklander /* 279ca825890SJens Wiklander * Called from assembly when switching to the temporary stack, so flags 280ca825890SJens Wiklander * need updating 281ca825890SJens Wiklander */ 282ca825890SJens Wiklander l->flags |= THREAD_CLF_TMP; 283ca825890SJens Wiklander 284ca825890SJens Wiklander return (void *)l->tmp_stack_va_end; 285ca825890SJens Wiklander } 286ca825890SJens Wiklander 287ca825890SJens Wiklander vaddr_t thread_stack_start(void) 288ca825890SJens Wiklander { 289ca825890SJens Wiklander struct thread_ctx *thr; 290ca825890SJens Wiklander int ct = thread_get_id_may_fail(); 291ca825890SJens Wiklander 292ca825890SJens Wiklander if (ct == THREAD_ID_INVALID) 293ca825890SJens Wiklander return 0; 294ca825890SJens Wiklander 295ca825890SJens Wiklander thr = threads + ct; 296ca825890SJens Wiklander return thr->stack_va_end - STACK_THREAD_SIZE; 297ca825890SJens Wiklander } 298ca825890SJens Wiklander 299ca825890SJens Wiklander size_t thread_stack_size(void) 300ca825890SJens Wiklander { 301ca825890SJens Wiklander return STACK_THREAD_SIZE; 302ca825890SJens Wiklander } 303ca825890SJens Wiklander 304ca825890SJens Wiklander bool get_stack_limits(vaddr_t *start, vaddr_t *end, bool hard) 305ca825890SJens Wiklander { 306ca825890SJens Wiklander uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 307ca825890SJens Wiklander unsigned int pos = get_core_pos(); 308ca825890SJens Wiklander struct thread_core_local *l = get_core_local(pos); 309ca825890SJens Wiklander int ct = l->curr_thread; 310ca825890SJens Wiklander bool ret = false; 311ca825890SJens Wiklander 312ca825890SJens Wiklander if (l->flags & THREAD_CLF_TMP) { 313ca825890SJens Wiklander if (hard) 314ca825890SJens Wiklander *start = GET_STACK_TOP_HARD(stack_tmp, pos); 315ca825890SJens Wiklander else 316ca825890SJens Wiklander *start = GET_STACK_TOP_SOFT(stack_tmp, pos); 317ca825890SJens Wiklander *end = GET_STACK_BOTTOM(stack_tmp, pos); 318ca825890SJens Wiklander ret = true; 319ca825890SJens Wiklander } else if (l->flags & THREAD_CLF_ABORT) { 320ca825890SJens Wiklander if (hard) 321ca825890SJens Wiklander *start = GET_STACK_TOP_HARD(stack_abt, pos); 322ca825890SJens Wiklander else 323ca825890SJens Wiklander *start = GET_STACK_TOP_SOFT(stack_abt, pos); 324ca825890SJens Wiklander *end = GET_STACK_BOTTOM(stack_abt, pos); 325ca825890SJens Wiklander ret = true; 326ca825890SJens Wiklander } else if (!l->flags) { 327ca825890SJens Wiklander if (ct < 0 || ct >= CFG_NUM_THREADS) 328ca825890SJens Wiklander goto out; 329ca825890SJens Wiklander 330ca825890SJens Wiklander *end = threads[ct].stack_va_end; 331ca825890SJens Wiklander *start = *end - STACK_THREAD_SIZE; 332ca825890SJens Wiklander if (!hard) 333ca825890SJens Wiklander *start += STACK_CHECK_EXTRA; 334ca825890SJens Wiklander ret = true; 335ca825890SJens Wiklander } 336ca825890SJens Wiklander out: 337ca825890SJens Wiklander thread_unmask_exceptions(exceptions); 338ca825890SJens Wiklander return ret; 339ca825890SJens Wiklander } 340ca825890SJens Wiklander 341ca825890SJens Wiklander bool thread_is_from_abort_mode(void) 342ca825890SJens Wiklander { 343ca825890SJens Wiklander struct thread_core_local *l = thread_get_core_local(); 344ca825890SJens Wiklander 345ca825890SJens Wiklander return (l->flags >> THREAD_CLF_SAVED_SHIFT) & THREAD_CLF_ABORT; 346ca825890SJens Wiklander } 347ca825890SJens Wiklander 348ca825890SJens Wiklander /* 349ca825890SJens Wiklander * This function should always be accurate, but it might be possible to 350ca825890SJens Wiklander * implement a more efficient depending on cpu architecture. 351ca825890SJens Wiklander */ 352ca825890SJens Wiklander bool __weak thread_is_in_normal_mode(void) 353ca825890SJens Wiklander { 354ca825890SJens Wiklander uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 355ca825890SJens Wiklander struct thread_core_local *l = thread_get_core_local(); 356ca825890SJens Wiklander bool ret; 357ca825890SJens Wiklander 358ca825890SJens Wiklander /* 359ca825890SJens Wiklander * If any bit in l->flags is set aside from THREAD_CLF_TMP we're 360ca825890SJens Wiklander * handling some exception. 361ca825890SJens Wiklander */ 362ca825890SJens Wiklander ret = (l->curr_thread != THREAD_ID_INVALID) && 363ca825890SJens Wiklander !(l->flags & ~THREAD_CLF_TMP); 364ca825890SJens Wiklander thread_unmask_exceptions(exceptions); 365ca825890SJens Wiklander 366ca825890SJens Wiklander return ret; 367ca825890SJens Wiklander } 368ca825890SJens Wiklander 369ca825890SJens Wiklander short int thread_get_id_may_fail(void) 370ca825890SJens Wiklander { 371ca825890SJens Wiklander /* 372ca825890SJens Wiklander * thread_get_core_local() requires foreign interrupts to be disabled 373ca825890SJens Wiklander */ 374ca825890SJens Wiklander uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 375ca825890SJens Wiklander struct thread_core_local *l = thread_get_core_local(); 376ca825890SJens Wiklander short int ct = l->curr_thread; 377ca825890SJens Wiklander 378ca825890SJens Wiklander thread_unmask_exceptions(exceptions); 379ca825890SJens Wiklander return ct; 380ca825890SJens Wiklander } 381ca825890SJens Wiklander 382ca825890SJens Wiklander short int thread_get_id(void) 383ca825890SJens Wiklander { 384ca825890SJens Wiklander short int ct = thread_get_id_may_fail(); 385ca825890SJens Wiklander 386ca825890SJens Wiklander /* Thread ID has to fit in a short int */ 387ca825890SJens Wiklander COMPILE_TIME_ASSERT(CFG_NUM_THREADS <= SHRT_MAX); 388ca825890SJens Wiklander assert(ct >= 0 && ct < CFG_NUM_THREADS); 389ca825890SJens Wiklander return ct; 390ca825890SJens Wiklander } 391ca825890SJens Wiklander 392ca825890SJens Wiklander #ifdef CFG_WITH_PAGER 393ca825890SJens Wiklander static void init_thread_stacks(void) 394ca825890SJens Wiklander { 395ca825890SJens Wiklander size_t n = 0; 396ca825890SJens Wiklander 397ca825890SJens Wiklander /* 398ca825890SJens Wiklander * Allocate virtual memory for thread stacks. 399ca825890SJens Wiklander */ 400ca825890SJens Wiklander for (n = 0; n < CFG_NUM_THREADS; n++) { 401ca825890SJens Wiklander tee_mm_entry_t *mm = NULL; 402ca825890SJens Wiklander vaddr_t sp = 0; 403ca825890SJens Wiklander size_t num_pages = 0; 404ca825890SJens Wiklander struct fobj *fobj = NULL; 405ca825890SJens Wiklander 406ca825890SJens Wiklander /* Find vmem for thread stack and its protection gap */ 407ca825890SJens Wiklander mm = tee_mm_alloc(&tee_mm_vcore, 408ca825890SJens Wiklander SMALL_PAGE_SIZE + STACK_THREAD_SIZE); 409ca825890SJens Wiklander assert(mm); 410ca825890SJens Wiklander 411ca825890SJens Wiklander /* Claim eventual physical page */ 412ca825890SJens Wiklander tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm), 413ca825890SJens Wiklander true); 414ca825890SJens Wiklander 415ca825890SJens Wiklander num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE - 1; 416ca825890SJens Wiklander fobj = fobj_locked_paged_alloc(num_pages); 417ca825890SJens Wiklander 418ca825890SJens Wiklander /* Add the region to the pager */ 419ca825890SJens Wiklander tee_pager_add_core_region(tee_mm_get_smem(mm) + SMALL_PAGE_SIZE, 420ca825890SJens Wiklander PAGED_REGION_TYPE_LOCK, fobj); 421ca825890SJens Wiklander fobj_put(fobj); 422ca825890SJens Wiklander 423ca825890SJens Wiklander /* init effective stack */ 424ca825890SJens Wiklander sp = tee_mm_get_smem(mm) + tee_mm_get_bytes(mm); 425ca825890SJens Wiklander asan_tag_access((void *)tee_mm_get_smem(mm), (void *)sp); 426ca825890SJens Wiklander if (!thread_init_stack(n, sp)) 427ca825890SJens Wiklander panic("init stack failed"); 428ca825890SJens Wiklander } 429ca825890SJens Wiklander } 430ca825890SJens Wiklander #else 431ca825890SJens Wiklander static void init_thread_stacks(void) 432ca825890SJens Wiklander { 433ca825890SJens Wiklander size_t n; 434ca825890SJens Wiklander 435ca825890SJens Wiklander /* Assign the thread stacks */ 436ca825890SJens Wiklander for (n = 0; n < CFG_NUM_THREADS; n++) { 437ca825890SJens Wiklander if (!thread_init_stack(n, GET_STACK_BOTTOM(stack_thread, n))) 438ca825890SJens Wiklander panic("thread_init_stack failed"); 439ca825890SJens Wiklander } 440ca825890SJens Wiklander } 441ca825890SJens Wiklander #endif /*CFG_WITH_PAGER*/ 442ca825890SJens Wiklander 443ca825890SJens Wiklander void thread_init_threads(void) 444ca825890SJens Wiklander { 445ca825890SJens Wiklander size_t n = 0; 446ca825890SJens Wiklander 447ca825890SJens Wiklander init_thread_stacks(); 448ca825890SJens Wiklander print_stack_limits(); 449ca825890SJens Wiklander pgt_init(); 450ca825890SJens Wiklander 451ca825890SJens Wiklander mutex_lockdep_init(); 452ca825890SJens Wiklander 453ca825890SJens Wiklander for (n = 0; n < CFG_NUM_THREADS; n++) { 454ca825890SJens Wiklander TAILQ_INIT(&threads[n].tsd.sess_stack); 455ca825890SJens Wiklander SLIST_INIT(&threads[n].tsd.pgt_cache); 456ca825890SJens Wiklander } 457ca825890SJens Wiklander } 458ca825890SJens Wiklander 459ca825890SJens Wiklander void __nostackcheck thread_init_thread_core_local(void) 460ca825890SJens Wiklander { 461ca825890SJens Wiklander size_t n = 0; 462ca825890SJens Wiklander struct thread_core_local *tcl = thread_core_local; 463ca825890SJens Wiklander 464ca825890SJens Wiklander for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) { 465ca825890SJens Wiklander tcl[n].curr_thread = THREAD_ID_INVALID; 466ca825890SJens Wiklander tcl[n].flags = THREAD_CLF_TMP; 467ca825890SJens Wiklander } 468ca825890SJens Wiklander tcl[0].tmp_stack_va_end = GET_STACK_BOTTOM(stack_tmp, 0); 469ca825890SJens Wiklander } 470ca825890SJens Wiklander 471ca825890SJens Wiklander void thread_init_core_local_stacks(void) 472ca825890SJens Wiklander { 473ca825890SJens Wiklander size_t n = 0; 474ca825890SJens Wiklander struct thread_core_local *tcl = thread_core_local; 475ca825890SJens Wiklander 476ca825890SJens Wiklander for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) { 477ca825890SJens Wiklander tcl[n].tmp_stack_va_end = GET_STACK_BOTTOM(stack_tmp, n) - 478ca825890SJens Wiklander STACK_TMP_OFFS; 479ca825890SJens Wiklander tcl[n].abt_stack_va_end = GET_STACK_BOTTOM(stack_abt, n); 480ca825890SJens Wiklander } 481ca825890SJens Wiklander } 482ca825890SJens Wiklander 483*93dc6b29SJens Wiklander #if defined(CFG_CORE_PAUTH) 484*93dc6b29SJens Wiklander void thread_init_thread_pauth_keys(void) 485*93dc6b29SJens Wiklander { 486*93dc6b29SJens Wiklander size_t n = 0; 487*93dc6b29SJens Wiklander 488*93dc6b29SJens Wiklander for (n = 0; n < CFG_NUM_THREADS; n++) 489*93dc6b29SJens Wiklander if (crypto_rng_read(&threads[n].keys, sizeof(threads[n].keys))) 490*93dc6b29SJens Wiklander panic("Failed to init thread pauth keys"); 491*93dc6b29SJens Wiklander } 492*93dc6b29SJens Wiklander 493*93dc6b29SJens Wiklander void thread_init_core_local_pauth_keys(void) 494*93dc6b29SJens Wiklander { 495*93dc6b29SJens Wiklander struct thread_core_local *tcl = thread_core_local; 496*93dc6b29SJens Wiklander size_t n = 0; 497*93dc6b29SJens Wiklander 498*93dc6b29SJens Wiklander for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) 499*93dc6b29SJens Wiklander if (crypto_rng_read(&tcl[n].keys, sizeof(tcl[n].keys))) 500*93dc6b29SJens Wiklander panic("Failed to init core local pauth keys"); 501*93dc6b29SJens Wiklander } 502*93dc6b29SJens Wiklander #endif 503*93dc6b29SJens Wiklander 504ca825890SJens Wiklander struct thread_specific_data *thread_get_tsd(void) 505ca825890SJens Wiklander { 506ca825890SJens Wiklander return &threads[thread_get_id()].tsd; 507ca825890SJens Wiklander } 508ca825890SJens Wiklander 509ca825890SJens Wiklander struct thread_ctx_regs * __nostackcheck thread_get_ctx_regs(void) 510ca825890SJens Wiklander { 511ca825890SJens Wiklander struct thread_core_local *l = thread_get_core_local(); 512ca825890SJens Wiklander 513ca825890SJens Wiklander assert(l->curr_thread != THREAD_ID_INVALID); 514ca825890SJens Wiklander return &threads[l->curr_thread].regs; 515ca825890SJens Wiklander } 516ca825890SJens Wiklander 517ca825890SJens Wiklander void thread_set_foreign_intr(bool enable) 518ca825890SJens Wiklander { 519ca825890SJens Wiklander /* thread_get_core_local() requires foreign interrupts to be disabled */ 520ca825890SJens Wiklander uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 521ca825890SJens Wiklander struct thread_core_local *l; 522ca825890SJens Wiklander 523ca825890SJens Wiklander l = thread_get_core_local(); 524ca825890SJens Wiklander 525ca825890SJens Wiklander assert(l->curr_thread != THREAD_ID_INVALID); 526ca825890SJens Wiklander 527ca825890SJens Wiklander if (enable) { 528ca825890SJens Wiklander threads[l->curr_thread].flags |= 529ca825890SJens Wiklander THREAD_FLAGS_FOREIGN_INTR_ENABLE; 530ca825890SJens Wiklander thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR); 531ca825890SJens Wiklander } else { 532ca825890SJens Wiklander /* 533ca825890SJens Wiklander * No need to disable foreign interrupts here since they're 534ca825890SJens Wiklander * already disabled above. 535ca825890SJens Wiklander */ 536ca825890SJens Wiklander threads[l->curr_thread].flags &= 537ca825890SJens Wiklander ~THREAD_FLAGS_FOREIGN_INTR_ENABLE; 538ca825890SJens Wiklander } 539ca825890SJens Wiklander } 540ca825890SJens Wiklander 541ca825890SJens Wiklander void thread_restore_foreign_intr(void) 542ca825890SJens Wiklander { 543ca825890SJens Wiklander /* thread_get_core_local() requires foreign interrupts to be disabled */ 544ca825890SJens Wiklander uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 545ca825890SJens Wiklander struct thread_core_local *l; 546ca825890SJens Wiklander 547ca825890SJens Wiklander l = thread_get_core_local(); 548ca825890SJens Wiklander 549ca825890SJens Wiklander assert(l->curr_thread != THREAD_ID_INVALID); 550ca825890SJens Wiklander 551ca825890SJens Wiklander if (threads[l->curr_thread].flags & THREAD_FLAGS_FOREIGN_INTR_ENABLE) 552ca825890SJens Wiklander thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR); 553ca825890SJens Wiklander } 554ca825890SJens Wiklander 555ca825890SJens Wiklander static struct mobj *alloc_shm(enum thread_shm_type shm_type, size_t size) 556ca825890SJens Wiklander { 557ca825890SJens Wiklander switch (shm_type) { 558ca825890SJens Wiklander case THREAD_SHM_TYPE_APPLICATION: 559ca825890SJens Wiklander return thread_rpc_alloc_payload(size); 560ca825890SJens Wiklander case THREAD_SHM_TYPE_KERNEL_PRIVATE: 561ca825890SJens Wiklander return thread_rpc_alloc_kernel_payload(size); 562ca825890SJens Wiklander case THREAD_SHM_TYPE_GLOBAL: 563ca825890SJens Wiklander return thread_rpc_alloc_global_payload(size); 564ca825890SJens Wiklander default: 565ca825890SJens Wiklander return NULL; 566ca825890SJens Wiklander } 567ca825890SJens Wiklander } 568ca825890SJens Wiklander 569ca825890SJens Wiklander static void clear_shm_cache_entry(struct thread_shm_cache_entry *ce) 570ca825890SJens Wiklander { 571ca825890SJens Wiklander if (ce->mobj) { 572ca825890SJens Wiklander switch (ce->type) { 573ca825890SJens Wiklander case THREAD_SHM_TYPE_APPLICATION: 574ca825890SJens Wiklander thread_rpc_free_payload(ce->mobj); 575ca825890SJens Wiklander break; 576ca825890SJens Wiklander case THREAD_SHM_TYPE_KERNEL_PRIVATE: 577ca825890SJens Wiklander thread_rpc_free_kernel_payload(ce->mobj); 578ca825890SJens Wiklander break; 579ca825890SJens Wiklander case THREAD_SHM_TYPE_GLOBAL: 580ca825890SJens Wiklander thread_rpc_free_global_payload(ce->mobj); 581ca825890SJens Wiklander break; 582ca825890SJens Wiklander default: 583ca825890SJens Wiklander assert(0); /* "can't happen" */ 584ca825890SJens Wiklander break; 585ca825890SJens Wiklander } 586ca825890SJens Wiklander } 587ca825890SJens Wiklander ce->mobj = NULL; 588ca825890SJens Wiklander ce->size = 0; 589ca825890SJens Wiklander } 590ca825890SJens Wiklander 591ca825890SJens Wiklander static struct thread_shm_cache_entry * 592ca825890SJens Wiklander get_shm_cache_entry(enum thread_shm_cache_user user) 593ca825890SJens Wiklander { 594ca825890SJens Wiklander struct thread_shm_cache *cache = &threads[thread_get_id()].shm_cache; 595ca825890SJens Wiklander struct thread_shm_cache_entry *ce = NULL; 596ca825890SJens Wiklander 597ca825890SJens Wiklander SLIST_FOREACH(ce, cache, link) 598ca825890SJens Wiklander if (ce->user == user) 599ca825890SJens Wiklander return ce; 600ca825890SJens Wiklander 601ca825890SJens Wiklander ce = calloc(1, sizeof(*ce)); 602ca825890SJens Wiklander if (ce) { 603ca825890SJens Wiklander ce->user = user; 604ca825890SJens Wiklander SLIST_INSERT_HEAD(cache, ce, link); 605ca825890SJens Wiklander } 606ca825890SJens Wiklander 607ca825890SJens Wiklander return ce; 608ca825890SJens Wiklander } 609ca825890SJens Wiklander 610ca825890SJens Wiklander void *thread_rpc_shm_cache_alloc(enum thread_shm_cache_user user, 611ca825890SJens Wiklander enum thread_shm_type shm_type, 612ca825890SJens Wiklander size_t size, struct mobj **mobj) 613ca825890SJens Wiklander { 614ca825890SJens Wiklander struct thread_shm_cache_entry *ce = NULL; 615ca825890SJens Wiklander size_t sz = size; 616ca825890SJens Wiklander paddr_t p = 0; 617ca825890SJens Wiklander void *va = NULL; 618ca825890SJens Wiklander 619ca825890SJens Wiklander if (!size) 620ca825890SJens Wiklander return NULL; 621ca825890SJens Wiklander 622ca825890SJens Wiklander ce = get_shm_cache_entry(user); 623ca825890SJens Wiklander if (!ce) 624ca825890SJens Wiklander return NULL; 625ca825890SJens Wiklander 626ca825890SJens Wiklander /* 627ca825890SJens Wiklander * Always allocate in page chunks as normal world allocates payload 628ca825890SJens Wiklander * memory as complete pages. 629ca825890SJens Wiklander */ 630ca825890SJens Wiklander sz = ROUNDUP(size, SMALL_PAGE_SIZE); 631ca825890SJens Wiklander 632ca825890SJens Wiklander if (ce->type != shm_type || sz > ce->size) { 633ca825890SJens Wiklander clear_shm_cache_entry(ce); 634ca825890SJens Wiklander 635ca825890SJens Wiklander ce->mobj = alloc_shm(shm_type, sz); 636ca825890SJens Wiklander if (!ce->mobj) 637ca825890SJens Wiklander return NULL; 638ca825890SJens Wiklander 639ca825890SJens Wiklander if (mobj_get_pa(ce->mobj, 0, 0, &p)) 640ca825890SJens Wiklander goto err; 641ca825890SJens Wiklander 642ca825890SJens Wiklander if (!IS_ALIGNED_WITH_TYPE(p, uint64_t)) 643ca825890SJens Wiklander goto err; 644ca825890SJens Wiklander 645ca825890SJens Wiklander va = mobj_get_va(ce->mobj, 0, sz); 646ca825890SJens Wiklander if (!va) 647ca825890SJens Wiklander goto err; 648ca825890SJens Wiklander 649ca825890SJens Wiklander ce->size = sz; 650ca825890SJens Wiklander ce->type = shm_type; 651ca825890SJens Wiklander } else { 652ca825890SJens Wiklander va = mobj_get_va(ce->mobj, 0, sz); 653ca825890SJens Wiklander if (!va) 654ca825890SJens Wiklander goto err; 655ca825890SJens Wiklander } 656ca825890SJens Wiklander *mobj = ce->mobj; 657ca825890SJens Wiklander 658ca825890SJens Wiklander return va; 659ca825890SJens Wiklander err: 660ca825890SJens Wiklander clear_shm_cache_entry(ce); 661ca825890SJens Wiklander return NULL; 662ca825890SJens Wiklander } 663ca825890SJens Wiklander 664ca825890SJens Wiklander void thread_rpc_shm_cache_clear(struct thread_shm_cache *cache) 665ca825890SJens Wiklander { 666ca825890SJens Wiklander while (true) { 667ca825890SJens Wiklander struct thread_shm_cache_entry *ce = SLIST_FIRST(cache); 668ca825890SJens Wiklander 669ca825890SJens Wiklander if (!ce) 670ca825890SJens Wiklander break; 671ca825890SJens Wiklander SLIST_REMOVE_HEAD(cache, link); 672ca825890SJens Wiklander clear_shm_cache_entry(ce); 673ca825890SJens Wiklander free(ce); 674ca825890SJens Wiklander } 675ca825890SJens Wiklander } 676