1*ca825890SJens Wiklander // SPDX-License-Identifier: BSD-2-Clause 2*ca825890SJens Wiklander /* 3*ca825890SJens Wiklander * Copyright (c) 2016-2021, Linaro Limited 4*ca825890SJens Wiklander * Copyright (c) 2014, STMicroelectronics International N.V. 5*ca825890SJens Wiklander * Copyright (c) 2020-2021, Arm Limited 6*ca825890SJens Wiklander */ 7*ca825890SJens Wiklander 8*ca825890SJens Wiklander #include <config.h> 9*ca825890SJens Wiklander #include <kernel/asan.h> 10*ca825890SJens Wiklander #include <kernel/lockdep.h> 11*ca825890SJens Wiklander #include <kernel/misc.h> 12*ca825890SJens Wiklander #include <kernel/panic.h> 13*ca825890SJens Wiklander #include <kernel/spinlock.h> 14*ca825890SJens Wiklander #include <kernel/thread_defs.h> 15*ca825890SJens Wiklander #include <kernel/thread.h> 16*ca825890SJens Wiklander #include <kernel/thread_private.h> 17*ca825890SJens Wiklander #include <mm/mobj.h> 18*ca825890SJens Wiklander 19*ca825890SJens Wiklander struct thread_ctx threads[CFG_NUM_THREADS]; 20*ca825890SJens Wiklander 21*ca825890SJens Wiklander struct thread_core_local thread_core_local[CFG_TEE_CORE_NB_CORE] __nex_bss; 22*ca825890SJens Wiklander 23*ca825890SJens Wiklander /* 24*ca825890SJens Wiklander * Stacks 25*ca825890SJens Wiklander * 26*ca825890SJens Wiklander * [Lower addresses on the left] 27*ca825890SJens Wiklander * 28*ca825890SJens Wiklander * [ STACK_CANARY_SIZE/2 | STACK_CHECK_EXTRA | STACK_XXX_SIZE | STACK_CANARY_SIZE/2 ] 29*ca825890SJens Wiklander * ^ ^ ^ ^ 30*ca825890SJens Wiklander * stack_xxx[n] "hard" top "soft" top bottom 31*ca825890SJens Wiklander */ 32*ca825890SJens Wiklander 33*ca825890SJens Wiklander #ifdef CFG_WITH_STACK_CANARIES 34*ca825890SJens Wiklander #define STACK_CANARY_SIZE (4 * sizeof(long)) 35*ca825890SJens Wiklander #define START_CANARY_VALUE 0xdededede 36*ca825890SJens Wiklander #define END_CANARY_VALUE 0xabababab 37*ca825890SJens Wiklander #define GET_START_CANARY(name, stack_num) name[stack_num][0] 38*ca825890SJens Wiklander #define GET_END_CANARY(name, stack_num) \ 39*ca825890SJens Wiklander name[stack_num][sizeof(name[stack_num]) / sizeof(uint32_t) - 1] 40*ca825890SJens Wiklander #else 41*ca825890SJens Wiklander #define STACK_CANARY_SIZE 0 42*ca825890SJens Wiklander #endif 43*ca825890SJens Wiklander 44*ca825890SJens Wiklander #define DECLARE_STACK(name, num_stacks, stack_size, linkage) \ 45*ca825890SJens Wiklander linkage uint32_t name[num_stacks] \ 46*ca825890SJens Wiklander [ROUNDUP(stack_size + STACK_CANARY_SIZE + STACK_CHECK_EXTRA, \ 47*ca825890SJens Wiklander STACK_ALIGNMENT) / sizeof(uint32_t)] \ 48*ca825890SJens Wiklander __attribute__((section(".nozi_stack." # name), \ 49*ca825890SJens Wiklander aligned(STACK_ALIGNMENT))) 50*ca825890SJens Wiklander 51*ca825890SJens Wiklander #define GET_STACK(stack) ((vaddr_t)(stack) + STACK_SIZE(stack)) 52*ca825890SJens Wiklander 53*ca825890SJens Wiklander DECLARE_STACK(stack_tmp, CFG_TEE_CORE_NB_CORE, 54*ca825890SJens Wiklander STACK_TMP_SIZE + CFG_STACK_TMP_EXTRA, static); 55*ca825890SJens Wiklander DECLARE_STACK(stack_abt, CFG_TEE_CORE_NB_CORE, STACK_ABT_SIZE, static); 56*ca825890SJens Wiklander #ifndef CFG_WITH_PAGER 57*ca825890SJens Wiklander DECLARE_STACK(stack_thread, CFG_NUM_THREADS, 58*ca825890SJens Wiklander STACK_THREAD_SIZE + CFG_STACK_THREAD_EXTRA, static); 59*ca825890SJens Wiklander #endif 60*ca825890SJens Wiklander 61*ca825890SJens Wiklander #define GET_STACK_TOP_HARD(stack, n) \ 62*ca825890SJens Wiklander ((vaddr_t)&(stack)[n] + STACK_CANARY_SIZE / 2) 63*ca825890SJens Wiklander #define GET_STACK_TOP_SOFT(stack, n) \ 64*ca825890SJens Wiklander (GET_STACK_TOP_HARD(stack, n) + STACK_CHECK_EXTRA) 65*ca825890SJens Wiklander #define GET_STACK_BOTTOM(stack, n) ((vaddr_t)&(stack)[n] + sizeof(stack[n]) - \ 66*ca825890SJens Wiklander STACK_CANARY_SIZE / 2) 67*ca825890SJens Wiklander 68*ca825890SJens Wiklander const void *stack_tmp_export __section(".identity_map.stack_tmp_export") = 69*ca825890SJens Wiklander (void *)(GET_STACK_BOTTOM(stack_tmp, 0) - STACK_TMP_OFFS); 70*ca825890SJens Wiklander const uint32_t stack_tmp_stride __section(".identity_map.stack_tmp_stride") = 71*ca825890SJens Wiklander sizeof(stack_tmp[0]); 72*ca825890SJens Wiklander 73*ca825890SJens Wiklander /* 74*ca825890SJens Wiklander * These stack setup info are required by secondary boot cores before they 75*ca825890SJens Wiklander * each locally enable the pager (the mmu). Hence kept in pager sections. 76*ca825890SJens Wiklander */ 77*ca825890SJens Wiklander DECLARE_KEEP_PAGER(stack_tmp_export); 78*ca825890SJens Wiklander DECLARE_KEEP_PAGER(stack_tmp_stride); 79*ca825890SJens Wiklander 80*ca825890SJens Wiklander static unsigned int thread_global_lock __nex_bss = SPINLOCK_UNLOCK; 81*ca825890SJens Wiklander 82*ca825890SJens Wiklander void thread_init_canaries(void) 83*ca825890SJens Wiklander { 84*ca825890SJens Wiklander #ifdef CFG_WITH_STACK_CANARIES 85*ca825890SJens Wiklander size_t n; 86*ca825890SJens Wiklander #define INIT_CANARY(name) \ 87*ca825890SJens Wiklander for (n = 0; n < ARRAY_SIZE(name); n++) { \ 88*ca825890SJens Wiklander uint32_t *start_canary = &GET_START_CANARY(name, n); \ 89*ca825890SJens Wiklander uint32_t *end_canary = &GET_END_CANARY(name, n); \ 90*ca825890SJens Wiklander \ 91*ca825890SJens Wiklander *start_canary = START_CANARY_VALUE; \ 92*ca825890SJens Wiklander *end_canary = END_CANARY_VALUE; \ 93*ca825890SJens Wiklander } 94*ca825890SJens Wiklander 95*ca825890SJens Wiklander INIT_CANARY(stack_tmp); 96*ca825890SJens Wiklander INIT_CANARY(stack_abt); 97*ca825890SJens Wiklander #if !defined(CFG_WITH_PAGER) && !defined(CFG_VIRTUALIZATION) 98*ca825890SJens Wiklander INIT_CANARY(stack_thread); 99*ca825890SJens Wiklander #endif 100*ca825890SJens Wiklander #endif/*CFG_WITH_STACK_CANARIES*/ 101*ca825890SJens Wiklander } 102*ca825890SJens Wiklander 103*ca825890SJens Wiklander #define CANARY_DIED(stack, loc, n, addr) \ 104*ca825890SJens Wiklander do { \ 105*ca825890SJens Wiklander EMSG_RAW("Dead canary at %s of '%s[%zu]' (%p)", #loc, #stack, \ 106*ca825890SJens Wiklander n, (void *)addr); \ 107*ca825890SJens Wiklander panic(); \ 108*ca825890SJens Wiklander } while (0) 109*ca825890SJens Wiklander 110*ca825890SJens Wiklander void thread_check_canaries(void) 111*ca825890SJens Wiklander { 112*ca825890SJens Wiklander #ifdef CFG_WITH_STACK_CANARIES 113*ca825890SJens Wiklander uint32_t *canary = NULL; 114*ca825890SJens Wiklander size_t n = 0; 115*ca825890SJens Wiklander 116*ca825890SJens Wiklander for (n = 0; n < ARRAY_SIZE(stack_tmp); n++) { 117*ca825890SJens Wiklander canary = &GET_START_CANARY(stack_tmp, n); 118*ca825890SJens Wiklander if (*canary != START_CANARY_VALUE) 119*ca825890SJens Wiklander CANARY_DIED(stack_tmp, start, n, canary); 120*ca825890SJens Wiklander canary = &GET_END_CANARY(stack_tmp, n); 121*ca825890SJens Wiklander if (*canary != END_CANARY_VALUE) 122*ca825890SJens Wiklander CANARY_DIED(stack_tmp, end, n, canary); 123*ca825890SJens Wiklander } 124*ca825890SJens Wiklander 125*ca825890SJens Wiklander for (n = 0; n < ARRAY_SIZE(stack_abt); n++) { 126*ca825890SJens Wiklander canary = &GET_START_CANARY(stack_abt, n); 127*ca825890SJens Wiklander if (*canary != START_CANARY_VALUE) 128*ca825890SJens Wiklander CANARY_DIED(stack_abt, start, n, canary); 129*ca825890SJens Wiklander canary = &GET_END_CANARY(stack_abt, n); 130*ca825890SJens Wiklander if (*canary != END_CANARY_VALUE) 131*ca825890SJens Wiklander CANARY_DIED(stack_abt, end, n, canary); 132*ca825890SJens Wiklander } 133*ca825890SJens Wiklander #if !defined(CFG_WITH_PAGER) && !defined(CFG_VIRTUALIZATION) 134*ca825890SJens Wiklander for (n = 0; n < ARRAY_SIZE(stack_thread); n++) { 135*ca825890SJens Wiklander canary = &GET_START_CANARY(stack_thread, n); 136*ca825890SJens Wiklander if (*canary != START_CANARY_VALUE) 137*ca825890SJens Wiklander CANARY_DIED(stack_thread, start, n, canary); 138*ca825890SJens Wiklander canary = &GET_END_CANARY(stack_thread, n); 139*ca825890SJens Wiklander if (*canary != END_CANARY_VALUE) 140*ca825890SJens Wiklander CANARY_DIED(stack_thread, end, n, canary); 141*ca825890SJens Wiklander } 142*ca825890SJens Wiklander #endif 143*ca825890SJens Wiklander #endif/*CFG_WITH_STACK_CANARIES*/ 144*ca825890SJens Wiklander } 145*ca825890SJens Wiklander 146*ca825890SJens Wiklander void thread_lock_global(void) 147*ca825890SJens Wiklander { 148*ca825890SJens Wiklander cpu_spin_lock(&thread_global_lock); 149*ca825890SJens Wiklander } 150*ca825890SJens Wiklander 151*ca825890SJens Wiklander void thread_unlock_global(void) 152*ca825890SJens Wiklander { 153*ca825890SJens Wiklander cpu_spin_unlock(&thread_global_lock); 154*ca825890SJens Wiklander } 155*ca825890SJens Wiklander 156*ca825890SJens Wiklander static struct thread_core_local * __nostackcheck 157*ca825890SJens Wiklander get_core_local(unsigned int pos) 158*ca825890SJens Wiklander { 159*ca825890SJens Wiklander /* 160*ca825890SJens Wiklander * Foreign interrupts must be disabled before playing with core_local 161*ca825890SJens Wiklander * since we otherwise may be rescheduled to a different core in the 162*ca825890SJens Wiklander * middle of this function. 163*ca825890SJens Wiklander */ 164*ca825890SJens Wiklander assert(thread_get_exceptions() & THREAD_EXCP_FOREIGN_INTR); 165*ca825890SJens Wiklander 166*ca825890SJens Wiklander assert(pos < CFG_TEE_CORE_NB_CORE); 167*ca825890SJens Wiklander return &thread_core_local[pos]; 168*ca825890SJens Wiklander } 169*ca825890SJens Wiklander 170*ca825890SJens Wiklander struct thread_core_local * __nostackcheck thread_get_core_local(void) 171*ca825890SJens Wiklander { 172*ca825890SJens Wiklander unsigned int pos = get_core_pos(); 173*ca825890SJens Wiklander 174*ca825890SJens Wiklander return get_core_local(pos); 175*ca825890SJens Wiklander } 176*ca825890SJens Wiklander 177*ca825890SJens Wiklander #ifdef CFG_CORE_DEBUG_CHECK_STACKS 178*ca825890SJens Wiklander static void print_stack_limits(void) 179*ca825890SJens Wiklander { 180*ca825890SJens Wiklander size_t n = 0; 181*ca825890SJens Wiklander vaddr_t __maybe_unused start = 0; 182*ca825890SJens Wiklander vaddr_t __maybe_unused end = 0; 183*ca825890SJens Wiklander 184*ca825890SJens Wiklander for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) { 185*ca825890SJens Wiklander start = GET_STACK_TOP_SOFT(stack_tmp, n); 186*ca825890SJens Wiklander end = GET_STACK_BOTTOM(stack_tmp, n); 187*ca825890SJens Wiklander DMSG("tmp [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end); 188*ca825890SJens Wiklander } 189*ca825890SJens Wiklander for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) { 190*ca825890SJens Wiklander start = GET_STACK_TOP_SOFT(stack_abt, n); 191*ca825890SJens Wiklander end = GET_STACK_BOTTOM(stack_abt, n); 192*ca825890SJens Wiklander DMSG("abt [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end); 193*ca825890SJens Wiklander } 194*ca825890SJens Wiklander for (n = 0; n < CFG_NUM_THREADS; n++) { 195*ca825890SJens Wiklander end = threads[n].stack_va_end; 196*ca825890SJens Wiklander start = end - STACK_THREAD_SIZE; 197*ca825890SJens Wiklander DMSG("thr [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end); 198*ca825890SJens Wiklander } 199*ca825890SJens Wiklander } 200*ca825890SJens Wiklander 201*ca825890SJens Wiklander static void check_stack_limits(void) 202*ca825890SJens Wiklander { 203*ca825890SJens Wiklander vaddr_t stack_start = 0; 204*ca825890SJens Wiklander vaddr_t stack_end = 0; 205*ca825890SJens Wiklander /* Any value in the current stack frame will do */ 206*ca825890SJens Wiklander vaddr_t current_sp = (vaddr_t)&stack_start; 207*ca825890SJens Wiklander 208*ca825890SJens Wiklander if (!get_stack_soft_limits(&stack_start, &stack_end)) 209*ca825890SJens Wiklander panic("Unknown stack limits"); 210*ca825890SJens Wiklander if (current_sp < stack_start || current_sp > stack_end) { 211*ca825890SJens Wiklander DMSG("Stack pointer out of range (0x%" PRIxVA ")", current_sp); 212*ca825890SJens Wiklander print_stack_limits(); 213*ca825890SJens Wiklander panic(); 214*ca825890SJens Wiklander } 215*ca825890SJens Wiklander } 216*ca825890SJens Wiklander 217*ca825890SJens Wiklander static bool * __nostackcheck get_stackcheck_recursion_flag(void) 218*ca825890SJens Wiklander { 219*ca825890SJens Wiklander uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 220*ca825890SJens Wiklander unsigned int pos = get_core_pos(); 221*ca825890SJens Wiklander struct thread_core_local *l = get_core_local(pos); 222*ca825890SJens Wiklander int ct = l->curr_thread; 223*ca825890SJens Wiklander bool *p = NULL; 224*ca825890SJens Wiklander 225*ca825890SJens Wiklander if (l->flags & (THREAD_CLF_ABORT | THREAD_CLF_TMP)) 226*ca825890SJens Wiklander p = &l->stackcheck_recursion; 227*ca825890SJens Wiklander else if (!l->flags) 228*ca825890SJens Wiklander p = &threads[ct].tsd.stackcheck_recursion; 229*ca825890SJens Wiklander 230*ca825890SJens Wiklander thread_unmask_exceptions(exceptions); 231*ca825890SJens Wiklander return p; 232*ca825890SJens Wiklander } 233*ca825890SJens Wiklander 234*ca825890SJens Wiklander void __cyg_profile_func_enter(void *this_fn, void *call_site); 235*ca825890SJens Wiklander void __nostackcheck __cyg_profile_func_enter(void *this_fn __unused, 236*ca825890SJens Wiklander void *call_site __unused) 237*ca825890SJens Wiklander { 238*ca825890SJens Wiklander bool *p = get_stackcheck_recursion_flag(); 239*ca825890SJens Wiklander 240*ca825890SJens Wiklander assert(p); 241*ca825890SJens Wiklander if (*p) 242*ca825890SJens Wiklander return; 243*ca825890SJens Wiklander *p = true; 244*ca825890SJens Wiklander check_stack_limits(); 245*ca825890SJens Wiklander *p = false; 246*ca825890SJens Wiklander } 247*ca825890SJens Wiklander 248*ca825890SJens Wiklander void __cyg_profile_func_exit(void *this_fn, void *call_site); 249*ca825890SJens Wiklander void __nostackcheck __cyg_profile_func_exit(void *this_fn __unused, 250*ca825890SJens Wiklander void *call_site __unused) 251*ca825890SJens Wiklander { 252*ca825890SJens Wiklander } 253*ca825890SJens Wiklander #else 254*ca825890SJens Wiklander static void print_stack_limits(void) 255*ca825890SJens Wiklander { 256*ca825890SJens Wiklander } 257*ca825890SJens Wiklander #endif 258*ca825890SJens Wiklander 259*ca825890SJens Wiklander void thread_init_boot_thread(void) 260*ca825890SJens Wiklander { 261*ca825890SJens Wiklander struct thread_core_local *l = thread_get_core_local(); 262*ca825890SJens Wiklander 263*ca825890SJens Wiklander thread_init_threads(); 264*ca825890SJens Wiklander 265*ca825890SJens Wiklander l->curr_thread = 0; 266*ca825890SJens Wiklander threads[0].state = THREAD_STATE_ACTIVE; 267*ca825890SJens Wiklander } 268*ca825890SJens Wiklander 269*ca825890SJens Wiklander void __nostackcheck thread_clr_boot_thread(void) 270*ca825890SJens Wiklander { 271*ca825890SJens Wiklander struct thread_core_local *l = thread_get_core_local(); 272*ca825890SJens Wiklander 273*ca825890SJens Wiklander assert(l->curr_thread >= 0 && l->curr_thread < CFG_NUM_THREADS); 274*ca825890SJens Wiklander assert(threads[l->curr_thread].state == THREAD_STATE_ACTIVE); 275*ca825890SJens Wiklander threads[l->curr_thread].state = THREAD_STATE_FREE; 276*ca825890SJens Wiklander l->curr_thread = THREAD_ID_INVALID; 277*ca825890SJens Wiklander } 278*ca825890SJens Wiklander 279*ca825890SJens Wiklander void __nostackcheck *thread_get_tmp_sp(void) 280*ca825890SJens Wiklander { 281*ca825890SJens Wiklander struct thread_core_local *l = thread_get_core_local(); 282*ca825890SJens Wiklander 283*ca825890SJens Wiklander /* 284*ca825890SJens Wiklander * Called from assembly when switching to the temporary stack, so flags 285*ca825890SJens Wiklander * need updating 286*ca825890SJens Wiklander */ 287*ca825890SJens Wiklander l->flags |= THREAD_CLF_TMP; 288*ca825890SJens Wiklander 289*ca825890SJens Wiklander return (void *)l->tmp_stack_va_end; 290*ca825890SJens Wiklander } 291*ca825890SJens Wiklander 292*ca825890SJens Wiklander vaddr_t thread_stack_start(void) 293*ca825890SJens Wiklander { 294*ca825890SJens Wiklander struct thread_ctx *thr; 295*ca825890SJens Wiklander int ct = thread_get_id_may_fail(); 296*ca825890SJens Wiklander 297*ca825890SJens Wiklander if (ct == THREAD_ID_INVALID) 298*ca825890SJens Wiklander return 0; 299*ca825890SJens Wiklander 300*ca825890SJens Wiklander thr = threads + ct; 301*ca825890SJens Wiklander return thr->stack_va_end - STACK_THREAD_SIZE; 302*ca825890SJens Wiklander } 303*ca825890SJens Wiklander 304*ca825890SJens Wiklander size_t thread_stack_size(void) 305*ca825890SJens Wiklander { 306*ca825890SJens Wiklander return STACK_THREAD_SIZE; 307*ca825890SJens Wiklander } 308*ca825890SJens Wiklander 309*ca825890SJens Wiklander bool get_stack_limits(vaddr_t *start, vaddr_t *end, bool hard) 310*ca825890SJens Wiklander { 311*ca825890SJens Wiklander uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 312*ca825890SJens Wiklander unsigned int pos = get_core_pos(); 313*ca825890SJens Wiklander struct thread_core_local *l = get_core_local(pos); 314*ca825890SJens Wiklander int ct = l->curr_thread; 315*ca825890SJens Wiklander bool ret = false; 316*ca825890SJens Wiklander 317*ca825890SJens Wiklander if (l->flags & THREAD_CLF_TMP) { 318*ca825890SJens Wiklander if (hard) 319*ca825890SJens Wiklander *start = GET_STACK_TOP_HARD(stack_tmp, pos); 320*ca825890SJens Wiklander else 321*ca825890SJens Wiklander *start = GET_STACK_TOP_SOFT(stack_tmp, pos); 322*ca825890SJens Wiklander *end = GET_STACK_BOTTOM(stack_tmp, pos); 323*ca825890SJens Wiklander ret = true; 324*ca825890SJens Wiklander } else if (l->flags & THREAD_CLF_ABORT) { 325*ca825890SJens Wiklander if (hard) 326*ca825890SJens Wiklander *start = GET_STACK_TOP_HARD(stack_abt, pos); 327*ca825890SJens Wiklander else 328*ca825890SJens Wiklander *start = GET_STACK_TOP_SOFT(stack_abt, pos); 329*ca825890SJens Wiklander *end = GET_STACK_BOTTOM(stack_abt, pos); 330*ca825890SJens Wiklander ret = true; 331*ca825890SJens Wiklander } else if (!l->flags) { 332*ca825890SJens Wiklander if (ct < 0 || ct >= CFG_NUM_THREADS) 333*ca825890SJens Wiklander goto out; 334*ca825890SJens Wiklander 335*ca825890SJens Wiklander *end = threads[ct].stack_va_end; 336*ca825890SJens Wiklander *start = *end - STACK_THREAD_SIZE; 337*ca825890SJens Wiklander if (!hard) 338*ca825890SJens Wiklander *start += STACK_CHECK_EXTRA; 339*ca825890SJens Wiklander ret = true; 340*ca825890SJens Wiklander } 341*ca825890SJens Wiklander out: 342*ca825890SJens Wiklander thread_unmask_exceptions(exceptions); 343*ca825890SJens Wiklander return ret; 344*ca825890SJens Wiklander } 345*ca825890SJens Wiklander 346*ca825890SJens Wiklander bool thread_is_from_abort_mode(void) 347*ca825890SJens Wiklander { 348*ca825890SJens Wiklander struct thread_core_local *l = thread_get_core_local(); 349*ca825890SJens Wiklander 350*ca825890SJens Wiklander return (l->flags >> THREAD_CLF_SAVED_SHIFT) & THREAD_CLF_ABORT; 351*ca825890SJens Wiklander } 352*ca825890SJens Wiklander 353*ca825890SJens Wiklander /* 354*ca825890SJens Wiklander * This function should always be accurate, but it might be possible to 355*ca825890SJens Wiklander * implement a more efficient depending on cpu architecture. 356*ca825890SJens Wiklander */ 357*ca825890SJens Wiklander bool __weak thread_is_in_normal_mode(void) 358*ca825890SJens Wiklander { 359*ca825890SJens Wiklander uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 360*ca825890SJens Wiklander struct thread_core_local *l = thread_get_core_local(); 361*ca825890SJens Wiklander bool ret; 362*ca825890SJens Wiklander 363*ca825890SJens Wiklander /* 364*ca825890SJens Wiklander * If any bit in l->flags is set aside from THREAD_CLF_TMP we're 365*ca825890SJens Wiklander * handling some exception. 366*ca825890SJens Wiklander */ 367*ca825890SJens Wiklander ret = (l->curr_thread != THREAD_ID_INVALID) && 368*ca825890SJens Wiklander !(l->flags & ~THREAD_CLF_TMP); 369*ca825890SJens Wiklander thread_unmask_exceptions(exceptions); 370*ca825890SJens Wiklander 371*ca825890SJens Wiklander return ret; 372*ca825890SJens Wiklander } 373*ca825890SJens Wiklander 374*ca825890SJens Wiklander short int thread_get_id_may_fail(void) 375*ca825890SJens Wiklander { 376*ca825890SJens Wiklander /* 377*ca825890SJens Wiklander * thread_get_core_local() requires foreign interrupts to be disabled 378*ca825890SJens Wiklander */ 379*ca825890SJens Wiklander uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 380*ca825890SJens Wiklander struct thread_core_local *l = thread_get_core_local(); 381*ca825890SJens Wiklander short int ct = l->curr_thread; 382*ca825890SJens Wiklander 383*ca825890SJens Wiklander thread_unmask_exceptions(exceptions); 384*ca825890SJens Wiklander return ct; 385*ca825890SJens Wiklander } 386*ca825890SJens Wiklander 387*ca825890SJens Wiklander short int thread_get_id(void) 388*ca825890SJens Wiklander { 389*ca825890SJens Wiklander short int ct = thread_get_id_may_fail(); 390*ca825890SJens Wiklander 391*ca825890SJens Wiklander /* Thread ID has to fit in a short int */ 392*ca825890SJens Wiklander COMPILE_TIME_ASSERT(CFG_NUM_THREADS <= SHRT_MAX); 393*ca825890SJens Wiklander assert(ct >= 0 && ct < CFG_NUM_THREADS); 394*ca825890SJens Wiklander return ct; 395*ca825890SJens Wiklander } 396*ca825890SJens Wiklander 397*ca825890SJens Wiklander #ifdef CFG_WITH_PAGER 398*ca825890SJens Wiklander static void init_thread_stacks(void) 399*ca825890SJens Wiklander { 400*ca825890SJens Wiklander size_t n = 0; 401*ca825890SJens Wiklander 402*ca825890SJens Wiklander /* 403*ca825890SJens Wiklander * Allocate virtual memory for thread stacks. 404*ca825890SJens Wiklander */ 405*ca825890SJens Wiklander for (n = 0; n < CFG_NUM_THREADS; n++) { 406*ca825890SJens Wiklander tee_mm_entry_t *mm = NULL; 407*ca825890SJens Wiklander vaddr_t sp = 0; 408*ca825890SJens Wiklander size_t num_pages = 0; 409*ca825890SJens Wiklander struct fobj *fobj = NULL; 410*ca825890SJens Wiklander 411*ca825890SJens Wiklander /* Find vmem for thread stack and its protection gap */ 412*ca825890SJens Wiklander mm = tee_mm_alloc(&tee_mm_vcore, 413*ca825890SJens Wiklander SMALL_PAGE_SIZE + STACK_THREAD_SIZE); 414*ca825890SJens Wiklander assert(mm); 415*ca825890SJens Wiklander 416*ca825890SJens Wiklander /* Claim eventual physical page */ 417*ca825890SJens Wiklander tee_pager_add_pages(tee_mm_get_smem(mm), tee_mm_get_size(mm), 418*ca825890SJens Wiklander true); 419*ca825890SJens Wiklander 420*ca825890SJens Wiklander num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE - 1; 421*ca825890SJens Wiklander fobj = fobj_locked_paged_alloc(num_pages); 422*ca825890SJens Wiklander 423*ca825890SJens Wiklander /* Add the region to the pager */ 424*ca825890SJens Wiklander tee_pager_add_core_region(tee_mm_get_smem(mm) + SMALL_PAGE_SIZE, 425*ca825890SJens Wiklander PAGED_REGION_TYPE_LOCK, fobj); 426*ca825890SJens Wiklander fobj_put(fobj); 427*ca825890SJens Wiklander 428*ca825890SJens Wiklander /* init effective stack */ 429*ca825890SJens Wiklander sp = tee_mm_get_smem(mm) + tee_mm_get_bytes(mm); 430*ca825890SJens Wiklander asan_tag_access((void *)tee_mm_get_smem(mm), (void *)sp); 431*ca825890SJens Wiklander if (!thread_init_stack(n, sp)) 432*ca825890SJens Wiklander panic("init stack failed"); 433*ca825890SJens Wiklander } 434*ca825890SJens Wiklander } 435*ca825890SJens Wiklander #else 436*ca825890SJens Wiklander static void init_thread_stacks(void) 437*ca825890SJens Wiklander { 438*ca825890SJens Wiklander size_t n; 439*ca825890SJens Wiklander 440*ca825890SJens Wiklander /* Assign the thread stacks */ 441*ca825890SJens Wiklander for (n = 0; n < CFG_NUM_THREADS; n++) { 442*ca825890SJens Wiklander if (!thread_init_stack(n, GET_STACK_BOTTOM(stack_thread, n))) 443*ca825890SJens Wiklander panic("thread_init_stack failed"); 444*ca825890SJens Wiklander } 445*ca825890SJens Wiklander } 446*ca825890SJens Wiklander #endif /*CFG_WITH_PAGER*/ 447*ca825890SJens Wiklander 448*ca825890SJens Wiklander void thread_init_threads(void) 449*ca825890SJens Wiklander { 450*ca825890SJens Wiklander size_t n = 0; 451*ca825890SJens Wiklander 452*ca825890SJens Wiklander init_thread_stacks(); 453*ca825890SJens Wiklander print_stack_limits(); 454*ca825890SJens Wiklander pgt_init(); 455*ca825890SJens Wiklander 456*ca825890SJens Wiklander mutex_lockdep_init(); 457*ca825890SJens Wiklander 458*ca825890SJens Wiklander for (n = 0; n < CFG_NUM_THREADS; n++) { 459*ca825890SJens Wiklander TAILQ_INIT(&threads[n].tsd.sess_stack); 460*ca825890SJens Wiklander SLIST_INIT(&threads[n].tsd.pgt_cache); 461*ca825890SJens Wiklander } 462*ca825890SJens Wiklander } 463*ca825890SJens Wiklander 464*ca825890SJens Wiklander void __nostackcheck thread_init_thread_core_local(void) 465*ca825890SJens Wiklander { 466*ca825890SJens Wiklander size_t n = 0; 467*ca825890SJens Wiklander struct thread_core_local *tcl = thread_core_local; 468*ca825890SJens Wiklander 469*ca825890SJens Wiklander for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) { 470*ca825890SJens Wiklander tcl[n].curr_thread = THREAD_ID_INVALID; 471*ca825890SJens Wiklander tcl[n].flags = THREAD_CLF_TMP; 472*ca825890SJens Wiklander } 473*ca825890SJens Wiklander tcl[0].tmp_stack_va_end = GET_STACK_BOTTOM(stack_tmp, 0); 474*ca825890SJens Wiklander } 475*ca825890SJens Wiklander 476*ca825890SJens Wiklander void thread_init_core_local_stacks(void) 477*ca825890SJens Wiklander { 478*ca825890SJens Wiklander size_t n = 0; 479*ca825890SJens Wiklander struct thread_core_local *tcl = thread_core_local; 480*ca825890SJens Wiklander 481*ca825890SJens Wiklander for (n = 0; n < CFG_TEE_CORE_NB_CORE; n++) { 482*ca825890SJens Wiklander tcl[n].tmp_stack_va_end = GET_STACK_BOTTOM(stack_tmp, n) - 483*ca825890SJens Wiklander STACK_TMP_OFFS; 484*ca825890SJens Wiklander tcl[n].abt_stack_va_end = GET_STACK_BOTTOM(stack_abt, n); 485*ca825890SJens Wiklander } 486*ca825890SJens Wiklander } 487*ca825890SJens Wiklander 488*ca825890SJens Wiklander struct thread_specific_data *thread_get_tsd(void) 489*ca825890SJens Wiklander { 490*ca825890SJens Wiklander return &threads[thread_get_id()].tsd; 491*ca825890SJens Wiklander } 492*ca825890SJens Wiklander 493*ca825890SJens Wiklander struct thread_ctx_regs * __nostackcheck thread_get_ctx_regs(void) 494*ca825890SJens Wiklander { 495*ca825890SJens Wiklander struct thread_core_local *l = thread_get_core_local(); 496*ca825890SJens Wiklander 497*ca825890SJens Wiklander assert(l->curr_thread != THREAD_ID_INVALID); 498*ca825890SJens Wiklander return &threads[l->curr_thread].regs; 499*ca825890SJens Wiklander } 500*ca825890SJens Wiklander 501*ca825890SJens Wiklander void thread_set_foreign_intr(bool enable) 502*ca825890SJens Wiklander { 503*ca825890SJens Wiklander /* thread_get_core_local() requires foreign interrupts to be disabled */ 504*ca825890SJens Wiklander uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 505*ca825890SJens Wiklander struct thread_core_local *l; 506*ca825890SJens Wiklander 507*ca825890SJens Wiklander l = thread_get_core_local(); 508*ca825890SJens Wiklander 509*ca825890SJens Wiklander assert(l->curr_thread != THREAD_ID_INVALID); 510*ca825890SJens Wiklander 511*ca825890SJens Wiklander if (enable) { 512*ca825890SJens Wiklander threads[l->curr_thread].flags |= 513*ca825890SJens Wiklander THREAD_FLAGS_FOREIGN_INTR_ENABLE; 514*ca825890SJens Wiklander thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR); 515*ca825890SJens Wiklander } else { 516*ca825890SJens Wiklander /* 517*ca825890SJens Wiklander * No need to disable foreign interrupts here since they're 518*ca825890SJens Wiklander * already disabled above. 519*ca825890SJens Wiklander */ 520*ca825890SJens Wiklander threads[l->curr_thread].flags &= 521*ca825890SJens Wiklander ~THREAD_FLAGS_FOREIGN_INTR_ENABLE; 522*ca825890SJens Wiklander } 523*ca825890SJens Wiklander } 524*ca825890SJens Wiklander 525*ca825890SJens Wiklander void thread_restore_foreign_intr(void) 526*ca825890SJens Wiklander { 527*ca825890SJens Wiklander /* thread_get_core_local() requires foreign interrupts to be disabled */ 528*ca825890SJens Wiklander uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_FOREIGN_INTR); 529*ca825890SJens Wiklander struct thread_core_local *l; 530*ca825890SJens Wiklander 531*ca825890SJens Wiklander l = thread_get_core_local(); 532*ca825890SJens Wiklander 533*ca825890SJens Wiklander assert(l->curr_thread != THREAD_ID_INVALID); 534*ca825890SJens Wiklander 535*ca825890SJens Wiklander if (threads[l->curr_thread].flags & THREAD_FLAGS_FOREIGN_INTR_ENABLE) 536*ca825890SJens Wiklander thread_set_exceptions(exceptions & ~THREAD_EXCP_FOREIGN_INTR); 537*ca825890SJens Wiklander } 538*ca825890SJens Wiklander 539*ca825890SJens Wiklander static struct mobj *alloc_shm(enum thread_shm_type shm_type, size_t size) 540*ca825890SJens Wiklander { 541*ca825890SJens Wiklander switch (shm_type) { 542*ca825890SJens Wiklander case THREAD_SHM_TYPE_APPLICATION: 543*ca825890SJens Wiklander return thread_rpc_alloc_payload(size); 544*ca825890SJens Wiklander case THREAD_SHM_TYPE_KERNEL_PRIVATE: 545*ca825890SJens Wiklander return thread_rpc_alloc_kernel_payload(size); 546*ca825890SJens Wiklander case THREAD_SHM_TYPE_GLOBAL: 547*ca825890SJens Wiklander return thread_rpc_alloc_global_payload(size); 548*ca825890SJens Wiklander default: 549*ca825890SJens Wiklander return NULL; 550*ca825890SJens Wiklander } 551*ca825890SJens Wiklander } 552*ca825890SJens Wiklander 553*ca825890SJens Wiklander static void clear_shm_cache_entry(struct thread_shm_cache_entry *ce) 554*ca825890SJens Wiklander { 555*ca825890SJens Wiklander if (ce->mobj) { 556*ca825890SJens Wiklander switch (ce->type) { 557*ca825890SJens Wiklander case THREAD_SHM_TYPE_APPLICATION: 558*ca825890SJens Wiklander thread_rpc_free_payload(ce->mobj); 559*ca825890SJens Wiklander break; 560*ca825890SJens Wiklander case THREAD_SHM_TYPE_KERNEL_PRIVATE: 561*ca825890SJens Wiklander thread_rpc_free_kernel_payload(ce->mobj); 562*ca825890SJens Wiklander break; 563*ca825890SJens Wiklander case THREAD_SHM_TYPE_GLOBAL: 564*ca825890SJens Wiklander thread_rpc_free_global_payload(ce->mobj); 565*ca825890SJens Wiklander break; 566*ca825890SJens Wiklander default: 567*ca825890SJens Wiklander assert(0); /* "can't happen" */ 568*ca825890SJens Wiklander break; 569*ca825890SJens Wiklander } 570*ca825890SJens Wiklander } 571*ca825890SJens Wiklander ce->mobj = NULL; 572*ca825890SJens Wiklander ce->size = 0; 573*ca825890SJens Wiklander } 574*ca825890SJens Wiklander 575*ca825890SJens Wiklander static struct thread_shm_cache_entry * 576*ca825890SJens Wiklander get_shm_cache_entry(enum thread_shm_cache_user user) 577*ca825890SJens Wiklander { 578*ca825890SJens Wiklander struct thread_shm_cache *cache = &threads[thread_get_id()].shm_cache; 579*ca825890SJens Wiklander struct thread_shm_cache_entry *ce = NULL; 580*ca825890SJens Wiklander 581*ca825890SJens Wiklander SLIST_FOREACH(ce, cache, link) 582*ca825890SJens Wiklander if (ce->user == user) 583*ca825890SJens Wiklander return ce; 584*ca825890SJens Wiklander 585*ca825890SJens Wiklander ce = calloc(1, sizeof(*ce)); 586*ca825890SJens Wiklander if (ce) { 587*ca825890SJens Wiklander ce->user = user; 588*ca825890SJens Wiklander SLIST_INSERT_HEAD(cache, ce, link); 589*ca825890SJens Wiklander } 590*ca825890SJens Wiklander 591*ca825890SJens Wiklander return ce; 592*ca825890SJens Wiklander } 593*ca825890SJens Wiklander 594*ca825890SJens Wiklander void *thread_rpc_shm_cache_alloc(enum thread_shm_cache_user user, 595*ca825890SJens Wiklander enum thread_shm_type shm_type, 596*ca825890SJens Wiklander size_t size, struct mobj **mobj) 597*ca825890SJens Wiklander { 598*ca825890SJens Wiklander struct thread_shm_cache_entry *ce = NULL; 599*ca825890SJens Wiklander size_t sz = size; 600*ca825890SJens Wiklander paddr_t p = 0; 601*ca825890SJens Wiklander void *va = NULL; 602*ca825890SJens Wiklander 603*ca825890SJens Wiklander if (!size) 604*ca825890SJens Wiklander return NULL; 605*ca825890SJens Wiklander 606*ca825890SJens Wiklander ce = get_shm_cache_entry(user); 607*ca825890SJens Wiklander if (!ce) 608*ca825890SJens Wiklander return NULL; 609*ca825890SJens Wiklander 610*ca825890SJens Wiklander /* 611*ca825890SJens Wiklander * Always allocate in page chunks as normal world allocates payload 612*ca825890SJens Wiklander * memory as complete pages. 613*ca825890SJens Wiklander */ 614*ca825890SJens Wiklander sz = ROUNDUP(size, SMALL_PAGE_SIZE); 615*ca825890SJens Wiklander 616*ca825890SJens Wiklander if (ce->type != shm_type || sz > ce->size) { 617*ca825890SJens Wiklander clear_shm_cache_entry(ce); 618*ca825890SJens Wiklander 619*ca825890SJens Wiklander ce->mobj = alloc_shm(shm_type, sz); 620*ca825890SJens Wiklander if (!ce->mobj) 621*ca825890SJens Wiklander return NULL; 622*ca825890SJens Wiklander 623*ca825890SJens Wiklander if (mobj_get_pa(ce->mobj, 0, 0, &p)) 624*ca825890SJens Wiklander goto err; 625*ca825890SJens Wiklander 626*ca825890SJens Wiklander if (!IS_ALIGNED_WITH_TYPE(p, uint64_t)) 627*ca825890SJens Wiklander goto err; 628*ca825890SJens Wiklander 629*ca825890SJens Wiklander va = mobj_get_va(ce->mobj, 0, sz); 630*ca825890SJens Wiklander if (!va) 631*ca825890SJens Wiklander goto err; 632*ca825890SJens Wiklander 633*ca825890SJens Wiklander ce->size = sz; 634*ca825890SJens Wiklander ce->type = shm_type; 635*ca825890SJens Wiklander } else { 636*ca825890SJens Wiklander va = mobj_get_va(ce->mobj, 0, sz); 637*ca825890SJens Wiklander if (!va) 638*ca825890SJens Wiklander goto err; 639*ca825890SJens Wiklander } 640*ca825890SJens Wiklander *mobj = ce->mobj; 641*ca825890SJens Wiklander 642*ca825890SJens Wiklander return va; 643*ca825890SJens Wiklander err: 644*ca825890SJens Wiklander clear_shm_cache_entry(ce); 645*ca825890SJens Wiklander return NULL; 646*ca825890SJens Wiklander } 647*ca825890SJens Wiklander 648*ca825890SJens Wiklander void thread_rpc_shm_cache_clear(struct thread_shm_cache *cache) 649*ca825890SJens Wiklander { 650*ca825890SJens Wiklander while (true) { 651*ca825890SJens Wiklander struct thread_shm_cache_entry *ce = SLIST_FIRST(cache); 652*ca825890SJens Wiklander 653*ca825890SJens Wiklander if (!ce) 654*ca825890SJens Wiklander break; 655*ca825890SJens Wiklander SLIST_REMOVE_HEAD(cache, link); 656*ca825890SJens Wiklander clear_shm_cache_entry(ce); 657*ca825890SJens Wiklander free(ce); 658*ca825890SJens Wiklander } 659*ca825890SJens Wiklander } 660