Lines Matching +full:- +full:n

1 // SPDX-License-Identifier: BSD-2-Clause
3 * Copyright (c) 2016-2022, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 * Copyright (c) 2020-2021, Arm Limited
47 * stack_xxx[n] "hard" top "soft" top bottom
64 #define GET_STACK_BOTTOM(stack, n) ((vaddr_t)&(stack)[n] + sizeof(stack[n]) - \ argument
68 #define GET_STACK_BOTTOM(stack, n) 0 argument
73 #define GET_STACK_THREAD_BOTTOM(n) 0 argument
76 #define GET_STACK_THREAD_BOTTOM(n) \ argument
77 ((vaddr_t)&stack_thread[n] + sizeof(stack_thread[n]) - \
104 return end_va - l + STACK_CANARY_SIZE; in stack_end_va_to_top_hard()
120 return (uint32_t *)(stack_end_va_to_top_hard(stack_size, end_va) - in stack_end_va_to_start_canary()
127 return (uint32_t *)(end_va + STACK_CANARY_SIZE / 2 - sizeof(uint32_t)); in stack_end_va_to_end_canary()
144 size_t n = 0; in thread_init_canaries() local
147 for (n = 0; n < thread_core_count; n++) { in thread_init_canaries()
148 if (thread_core_local[n].tmp_stack_va_end) { in thread_init_canaries()
149 va = thread_core_local[n].tmp_stack_va_end + in thread_init_canaries()
153 va = thread_core_local[n].abt_stack_va_end; in thread_init_canaries()
163 for (n = 0; n < thread_count; n++) { in thread_init_canaries()
164 va = threads[n].stack_va_end; in thread_init_canaries()
193 size_t n __maybe_unused, in check_stack_canary()
201 stack_name, n, (void *)canary); in check_stack_canary()
208 stack_name, n, (void *)canary); in check_stack_canary()
216 size_t n = 0; in thread_check_canaries() local
219 for (n = 0; n < thread_core_count; n++) { in thread_check_canaries()
220 if (thread_core_local[n].tmp_stack_va_end) { in thread_check_canaries()
221 va = thread_core_local[n].tmp_stack_va_end + in thread_check_canaries()
223 check_stack_canary("tmp_stack", n, in thread_check_canaries()
227 va = thread_core_local[n].abt_stack_va_end; in thread_check_canaries()
229 check_stack_canary("abt_stack", n, in thread_check_canaries()
236 for (n = 0; n < thread_count; n++) { in thread_check_canaries()
237 va = threads[n].stack_va_end; in thread_check_canaries()
239 check_stack_canary("thread_stack", n, in thread_check_canaries()
287 size_t n = 0; in print_stack_limits() local
292 for (n = 0; n < thread_core_count; n++) { in print_stack_limits()
293 va = thread_core_local[n].tmp_stack_va_end + STACK_TMP_OFFS; in print_stack_limits()
296 DMSG("tmp [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end); in print_stack_limits()
298 va = thread_core_local[n].abt_stack_va_end; in print_stack_limits()
301 DMSG("abt [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end); in print_stack_limits()
304 for (n = 0; n < thread_count; n++) { in print_stack_limits()
305 va = threads[n].stack_va_end; in print_stack_limits()
308 DMSG("thr [%zu] 0x%" PRIxVA "..0x%" PRIxVA, n, start, end); in print_stack_limits()
335 int ct = l->curr_thread; in get_stackcheck_recursion_flag()
338 if (l->flags & (THREAD_CLF_ABORT | THREAD_CLF_TMP)) in get_stackcheck_recursion_flag()
339 p = &l->stackcheck_recursion; in get_stackcheck_recursion_flag()
340 else if (!l->flags) in get_stackcheck_recursion_flag()
376 l->curr_thread = 0; in thread_init_boot_thread()
384 assert(l->curr_thread >= 0 && l->curr_thread < CFG_NUM_THREADS); in thread_clr_boot_thread()
385 assert(threads[l->curr_thread].state == THREAD_STATE_ACTIVE); in thread_clr_boot_thread()
386 threads[l->curr_thread].state = THREAD_STATE_FREE; in thread_clr_boot_thread()
387 l->curr_thread = THREAD_ID_INVALID; in thread_clr_boot_thread()
399 l->flags |= THREAD_CLF_TMP; in thread_get_tmp_sp()
401 return (void *)l->tmp_stack_va_end; in thread_get_tmp_sp()
413 return stack_end_va_to_top_soft(STACK_THREAD_SIZE, thr->stack_va_end); in thread_stack_start()
426 int ct = l->curr_thread; in get_stack_limits()
431 if (l->flags & THREAD_CLF_TMP) { in get_stack_limits()
432 va = l->tmp_stack_va_end + STACK_TMP_OFFS; in get_stack_limits()
434 } else if (l->flags & THREAD_CLF_ABORT) { in get_stack_limits()
435 va = l->abt_stack_va_end; in get_stack_limits()
437 } else if (!l->flags && ct >= 0 && (size_t)ct < thread_count) { in get_stack_limits()
459 return (l->flags >> THREAD_CLF_SAVED_SHIFT) & THREAD_CLF_ABORT; in thread_is_from_abort_mode()
473 * If any bit in l->flags is set aside from THREAD_CLF_TMP we're in thread_is_in_normal_mode()
476 ret = (l->curr_thread != THREAD_ID_INVALID) && in thread_is_in_normal_mode()
477 !(l->flags & ~THREAD_CLF_TMP); in thread_is_in_normal_mode()
490 short int ct = l->curr_thread; in thread_get_id_may_fail()
520 end_va = va + l - STACK_CANARY_SIZE / 2; in alloc_stack()
530 size_t n = 0; in init_thread_stacks() local
535 for (n = 0; n < thread_count; n++) { in init_thread_stacks()
550 num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE - 1; in init_thread_stacks()
561 threads[n].stack_va_end = sp; in init_thread_stacks()
568 size_t n = 0; in init_thread_stacks() local
571 for (n = 0; n < thread_count; n++) { in init_thread_stacks()
575 va = GET_STACK_THREAD_BOTTOM(n); in init_thread_stacks()
576 threads[n].stack_va_end = va; in init_thread_stacks()
585 size_t n = 0; in thread_init_threads() local
603 for (n = 0; n < thread_count; n++) in thread_init_threads()
604 TAILQ_INIT(&threads[n].tsd.sess_stack); in thread_init_threads()
619 size_t n = 0; in thread_init_thread_core_local() local
632 for (n = 0; n < thread_core_count; n++) { in thread_init_thread_core_local()
634 GET_STACK_BOTTOM(stack_tmp, n)); in thread_init_thread_core_local()
636 GET_STACK_BOTTOM(stack_abt, n)); in thread_init_thread_core_local()
640 for (n = 0; n < core_count; n++) { in thread_init_thread_core_local()
641 if (n == core_pos) { in thread_init_thread_core_local()
643 tcl[n] = thread_core_local[0]; in thread_init_thread_core_local()
647 tcl[n].curr_thread = THREAD_ID_INVALID; in thread_init_thread_core_local()
648 tcl[n].flags = THREAD_CLF_TMP; in thread_init_thread_core_local()
654 va = GET_STACK_BOTTOM(stack_tmp, n); in thread_init_thread_core_local()
655 tcl[n].tmp_stack_va_end = va - STACK_TMP_OFFS; in thread_init_thread_core_local()
657 tcl[n].tmp_stack_pa_end = in thread_init_thread_core_local()
658 vaddr_to_phys(tcl[n].tmp_stack_va_end); in thread_init_thread_core_local()
664 va = GET_STACK_BOTTOM(stack_abt, n); in thread_init_thread_core_local()
665 tcl[n].abt_stack_va_end = va; in thread_init_thread_core_local()
672 size_t n = 0; in thread_init_thread_pauth_keys() local
674 for (n = 0; n < thread_count; n++) in thread_init_thread_pauth_keys()
675 if (crypto_rng_read(&threads[n].keys, sizeof(threads[n].keys))) in thread_init_thread_pauth_keys()
682 size_t n = 0; in thread_init_core_local_pauth_keys() local
684 for (n = 0; n < thread_core_count; n++) in thread_init_core_local_pauth_keys()
685 if (crypto_rng_read(&tcl[n].keys, sizeof(tcl[n].keys))) in thread_init_core_local_pauth_keys()
699 assert(l->curr_thread != THREAD_ID_INVALID); in thread_get_ctx_regs()
700 return &threads[l->curr_thread].regs; in thread_get_ctx_regs()
711 assert(l->curr_thread != THREAD_ID_INVALID); in thread_set_foreign_intr()
714 threads[l->curr_thread].flags |= in thread_set_foreign_intr()
722 threads[l->curr_thread].flags &= in thread_set_foreign_intr()
735 assert(l->curr_thread != THREAD_ID_INVALID); in thread_restore_foreign_intr()
737 if (threads[l->curr_thread].flags & THREAD_FLAGS_FOREIGN_INTR_ENABLE) in thread_restore_foreign_intr()
757 if (ce->mobj) { in clear_shm_cache_entry()
758 switch (ce->type) { in clear_shm_cache_entry()
760 thread_rpc_free_payload(ce->mobj); in clear_shm_cache_entry()
763 thread_rpc_free_kernel_payload(ce->mobj); in clear_shm_cache_entry()
766 thread_rpc_free_global_payload(ce->mobj); in clear_shm_cache_entry()
773 ce->mobj = NULL; in clear_shm_cache_entry()
774 ce->size = 0; in clear_shm_cache_entry()
784 if (ce->user == user) in get_shm_cache_entry()
789 ce->user = user; in get_shm_cache_entry()
818 if (ce->type != shm_type || sz > ce->size) { in thread_rpc_shm_cache_alloc()
821 ce->mobj = alloc_shm(shm_type, sz); in thread_rpc_shm_cache_alloc()
822 if (!ce->mobj) in thread_rpc_shm_cache_alloc()
825 if (mobj_get_pa(ce->mobj, 0, 0, &p)) in thread_rpc_shm_cache_alloc()
831 va = mobj_get_va(ce->mobj, 0, sz); in thread_rpc_shm_cache_alloc()
835 ce->size = sz; in thread_rpc_shm_cache_alloc()
836 ce->type = shm_type; in thread_rpc_shm_cache_alloc()
838 va = mobj_get_va(ce->mobj, 0, sz); in thread_rpc_shm_cache_alloc()
842 *mobj = ce->mobj; in thread_rpc_shm_cache_alloc()