Lines Matching refs:threads
125 struct thread_ctx *thr = threads + thread_get_id(); in thread_lazy_save_ns_vfp()
135 struct thread_ctx *thr = threads + thread_get_id(); in thread_lazy_restore_ns_vfp()
233 if (threads[n].state == THREAD_STATE_FREE) { in __thread_alloc_and_run()
234 threads[n].state = THREAD_STATE_ACTIVE; in __thread_alloc_and_run()
247 threads[n].flags = flags; in __thread_alloc_and_run()
248 init_regs(threads + n, a0, a1, a2, a3, a4, a5, a6, a7, pc); in __thread_alloc_and_run()
254 threads[n].regs.apiakey_hi = threads[n].keys.apia_hi; in __thread_alloc_and_run()
255 threads[n].regs.apiakey_lo = threads[n].keys.apia_lo; in __thread_alloc_and_run()
261 thread_resume(&threads[n].regs); in __thread_alloc_and_run()
373 if (n < CFG_NUM_THREADS && threads[n].state == THREAD_STATE_SUSPENDED) { in thread_resume_from_rpc()
374 threads[n].state = THREAD_STATE_ACTIVE; in thread_resume_from_rpc()
385 if (threads[n].have_user_map) { in thread_resume_from_rpc()
386 core_mmu_set_user_map(&threads[n].user_map); in thread_resume_from_rpc()
387 if (threads[n].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR) in thread_resume_from_rpc()
391 if (is_user_mode(&threads[n].regs)) in thread_resume_from_rpc()
398 if (threads[n].flags & THREAD_FLAGS_COPY_ARGS_ON_RETURN) { in thread_resume_from_rpc()
399 copy_a0_to_a3(&threads[n].regs, a0, a1, a2, a3); in thread_resume_from_rpc()
400 threads[n].flags &= ~THREAD_FLAGS_COPY_ARGS_ON_RETURN; in thread_resume_from_rpc()
405 if (threads[n].have_user_map) in thread_resume_from_rpc()
409 thread_resume(&threads[n].regs); in thread_resume_from_rpc()
437 return threads[ct].kern_sp; in thread_get_saved_thread_sp()
457 (void *)(threads[ct].stack_va_end - STACK_THREAD_SIZE), in thread_state_free()
462 assert(threads[ct].state == THREAD_STATE_ACTIVE); in thread_state_free()
463 threads[ct].state = THREAD_STATE_FREE; in thread_state_free()
464 threads[ct].flags = 0; in thread_state_free()
511 release_unused_kernel_stack(threads + ct, cpsr); in thread_state_suspend()
522 assert(threads[ct].state == THREAD_STATE_ACTIVE); in thread_state_suspend()
523 threads[ct].flags |= flags; in thread_state_suspend()
524 threads[ct].regs.cpsr = cpsr; in thread_state_suspend()
525 threads[ct].regs.pc = pc; in thread_state_suspend()
526 threads[ct].state = THREAD_STATE_SUSPENDED; in thread_state_suspend()
528 threads[ct].have_user_map = core_mmu_user_mapping_is_active(); in thread_state_suspend()
529 if (threads[ct].have_user_map) { in thread_state_suspend()
530 if (threads[ct].flags & THREAD_FLAGS_EXIT_ON_FOREIGN_INTR) in thread_state_suspend()
532 core_mmu_get_user_map(&threads[ct].user_map); in thread_state_suspend()
538 TAILQ_FIRST(&threads[ct].tsd.sess_stack); in thread_state_suspend()
782 struct thread_ctx *thr = threads + thread_get_id(); in thread_kernel_enable_vfp()
829 struct thread_ctx *thr = threads + thread_get_id(); in thread_kernel_save_vfp()
840 struct thread_ctx *thr = threads + thread_get_id(); in thread_kernel_restore_vfp()
854 struct thread_ctx *thr = threads + thread_get_id(); in thread_user_enable_vfp()
883 struct thread_ctx *thr = threads + thread_get_id(); in thread_user_save_vfp()
898 struct thread_ctx *thr = threads + thread_get_id(); in thread_user_clear_vfp()