Searched refs:threads (Results 1 – 15 of 15) sorted by relevance
| /optee_os/core/arch/riscv/kernel/ |
| H A D | thread_arch.c | 242 if (threads[n].state == THREAD_STATE_FREE) { in __thread_alloc_and_run() 243 threads[n].state = THREAD_STATE_ACTIVE; in __thread_alloc_and_run() 256 threads[n].flags = 0; in __thread_alloc_and_run() 257 init_regs(threads + n, a0, a1, a2, a3, a4, a5, a6, a7, pc); in __thread_alloc_and_run() 263 thread_resume(&threads[n].regs); in __thread_alloc_and_run() 326 return threads[ct].kern_sp; in thread_get_saved_thread_sp() 347 if (n < CFG_NUM_THREADS && threads[n].state == THREAD_STATE_SUSPENDED) { in thread_resume_from_rpc() 348 threads[n].state = THREAD_STATE_ACTIVE; in thread_resume_from_rpc() 359 if (threads[n].have_user_map) { in thread_resume_from_rpc() 360 core_mmu_set_user_map(&threads[n].user_map); in thread_resume_from_rpc() [all …]
|
| H A D | thread_optee_abi.c | 147 struct thread_ctx *thr = threads + thread_get_id(); in call_entry_std() 314 if (threads[n].state != THREAD_STATE_FREE) { in thread_disable_prealloc_rpc_cache() 324 if (threads[n].rpc_arg) { in thread_disable_prealloc_rpc_cache() 325 *cookie = mobj_get_cookie(threads[n].rpc_mobj); in thread_disable_prealloc_rpc_cache() 326 mobj_put(threads[n].rpc_mobj); in thread_disable_prealloc_rpc_cache() 327 threads[n].rpc_arg = NULL; in thread_disable_prealloc_rpc_cache() 328 threads[n].rpc_mobj = NULL; in thread_disable_prealloc_rpc_cache() 355 if (threads[n].state != THREAD_STATE_FREE) { in thread_enable_prealloc_rpc_cache() 465 struct thread_ctx *thr = threads + thread_get_id(); in get_rpc_arg()
|
| H A D | entry.S | 334 la a0, threads
|
| H A D | thread_rv.S | 23 la \res, threads
|
| /optee_os/core/arch/arm/kernel/ |
| H A D | thread.c | 125 struct thread_ctx *thr = threads + thread_get_id(); in thread_lazy_save_ns_vfp() 135 struct thread_ctx *thr = threads + thread_get_id(); in thread_lazy_restore_ns_vfp() 233 if (threads[n].state == THREAD_STATE_FREE) { in __thread_alloc_and_run() 234 threads[n].state = THREAD_STATE_ACTIVE; in __thread_alloc_and_run() 247 threads[n].flags = flags; in __thread_alloc_and_run() 248 init_regs(threads + n, a0, a1, a2, a3, a4, a5, a6, a7, pc); in __thread_alloc_and_run() 254 threads[n].regs.apiakey_hi = threads[n].keys.apia_hi; in __thread_alloc_and_run() 255 threads[n].regs.apiakey_lo = threads[n].keys.apia_lo; in __thread_alloc_and_run() 261 thread_resume(&threads[n].regs); in __thread_alloc_and_run() 373 if (n < CFG_NUM_THREADS && threads[n].state == THREAD_STATE_SUSPENDED) { in thread_resume_from_rpc() [all …]
|
| H A D | thread_optee_smc.c | 146 struct thread_ctx *thr = threads + thread_get_id(); in call_entry_std() 313 if (threads[n].state != THREAD_STATE_FREE) { in thread_disable_prealloc_rpc_cache() 323 if (threads[n].rpc_arg) { in thread_disable_prealloc_rpc_cache() 324 *cookie = mobj_get_cookie(threads[n].rpc_mobj); in thread_disable_prealloc_rpc_cache() 325 mobj_put(threads[n].rpc_mobj); in thread_disable_prealloc_rpc_cache() 326 threads[n].rpc_arg = NULL; in thread_disable_prealloc_rpc_cache() 327 threads[n].rpc_mobj = NULL; in thread_disable_prealloc_rpc_cache() 354 if (threads[n].state != THREAD_STATE_FREE) { in thread_enable_prealloc_rpc_cache() 464 struct thread_ctx *thr = threads + thread_get_id(); in get_rpc_arg()
|
| H A D | entry_a64.S | 548 adr_l x0, threads 558 adr_l x0, threads
|
| H A D | abort.c | 382 tc = threads + thread_get_id(); in handle_user_mode_panic()
|
| H A D | entry_a32.S | 728 ldr r0, =threads
|
| H A D | thread_a64.S | 20 adr_l x\res, threads
|
| H A D | thread_spmc.c | 2038 struct thread_ctx *thr = threads + thread_get_id(); in yielding_call_with_arg() 2139 struct thread_ctx *thr = threads + thread_get_id(); in get_rpc_arg()
|
| /optee_os/core/kernel/ |
| H A D | thread.c | 25 struct thread_ctx *threads; variable 33 struct thread_ctx *threads = __threads; variable 162 !IS_ENABLED(CFG_NS_VIRTUALIZATION) && threads) { in thread_init_canaries() 164 va = threads[n].stack_va_end; in thread_init_canaries() 237 va = threads[n].stack_va_end; in thread_check_canaries() 305 va = threads[n].stack_va_end; in print_stack_limits() 341 p = &threads[ct].tsd.stackcheck_recursion; in get_stackcheck_recursion_flag() 377 threads[0].state = THREAD_STATE_ACTIVE; in thread_init_boot_thread() 385 assert(threads[l->curr_thread].state == THREAD_STATE_ACTIVE); in thread_clr_boot_thread() 386 threads[l->curr_thread].state = THREAD_STATE_FREE; in thread_clr_boot_thread() [all …]
|
| /optee_os/core/include/kernel/ |
| H A D | thread_private.h | 75 extern struct thread_ctx *threads;
|
| /optee_os/core/arch/arm/plat-k3/drivers/ |
| H A D | sec_proxy.c | 32 enum threads { enum
|
| /optee_os/lib/libmbedtls/mbedtls/ |
| H A D | ChangeLog | 23 threads make progress in a multithreaded program could force software 92 that could lead to using software AES in some threads at the very
|