Lines Matching refs:thr

125 	struct thread_ctx *thr = threads + thread_get_id();  in thread_lazy_save_ns_vfp()  local
127 thr->vfp_state.ns_saved = false; in thread_lazy_save_ns_vfp()
128 vfp_lazy_save_state_init(&thr->vfp_state.ns); in thread_lazy_save_ns_vfp()
135 struct thread_ctx *thr = threads + thread_get_id(); in thread_lazy_restore_ns_vfp() local
136 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; in thread_lazy_restore_ns_vfp()
138 assert(!thr->vfp_state.sec_lazy_saved && !thr->vfp_state.sec_saved); in thread_lazy_restore_ns_vfp()
145 vfp_lazy_restore_state(&thr->vfp_state.ns, thr->vfp_state.ns_saved); in thread_lazy_restore_ns_vfp()
146 thr->vfp_state.ns_saved = false; in thread_lazy_restore_ns_vfp()
473 static void release_unused_kernel_stack(struct thread_ctx *thr, in release_unused_kernel_stack() argument
483 vaddr_t sp = is_from_user(cpsr) ? thr->kern_sp : thr->regs.sp; in release_unused_kernel_stack()
485 vaddr_t sp = thr->regs.svc_sp; in release_unused_kernel_stack()
487 vaddr_t base = thr->stack_va_end - STACK_THREAD_SIZE; in release_unused_kernel_stack()
493 static void release_unused_kernel_stack(struct thread_ctx *thr __unused, in release_unused_kernel_stack()
782 struct thread_ctx *thr = threads + thread_get_id(); in thread_kernel_enable_vfp() local
783 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; in thread_kernel_enable_vfp()
787 if (!thr->vfp_state.ns_saved) { in thread_kernel_enable_vfp()
788 vfp_lazy_save_state_final(&thr->vfp_state.ns, in thread_kernel_enable_vfp()
790 thr->vfp_state.ns_saved = true; in thread_kernel_enable_vfp()
791 } else if (thr->vfp_state.sec_lazy_saved && in thread_kernel_enable_vfp()
792 !thr->vfp_state.sec_saved) { in thread_kernel_enable_vfp()
797 vfp_lazy_save_state_final(&thr->vfp_state.sec, in thread_kernel_enable_vfp()
799 thr->vfp_state.sec_saved = true; in thread_kernel_enable_vfp()
829 struct thread_ctx *thr = threads + thread_get_id(); in thread_kernel_save_vfp() local
833 vfp_lazy_save_state_init(&thr->vfp_state.sec); in thread_kernel_save_vfp()
834 thr->vfp_state.sec_lazy_saved = true; in thread_kernel_save_vfp()
840 struct thread_ctx *thr = threads + thread_get_id(); in thread_kernel_restore_vfp() local
844 if (thr->vfp_state.sec_lazy_saved) { in thread_kernel_restore_vfp()
845 vfp_lazy_restore_state(&thr->vfp_state.sec, in thread_kernel_restore_vfp()
846 thr->vfp_state.sec_saved); in thread_kernel_restore_vfp()
847 thr->vfp_state.sec_saved = false; in thread_kernel_restore_vfp()
848 thr->vfp_state.sec_lazy_saved = false; in thread_kernel_restore_vfp()
854 struct thread_ctx *thr = threads + thread_get_id(); in thread_user_enable_vfp() local
855 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; in thread_user_enable_vfp()
860 if (!thr->vfp_state.ns_saved) { in thread_user_enable_vfp()
861 vfp_lazy_save_state_final(&thr->vfp_state.ns, in thread_user_enable_vfp()
863 thr->vfp_state.ns_saved = true; in thread_user_enable_vfp()
877 thr->vfp_state.uvfp = uvfp; in thread_user_enable_vfp()
883 struct thread_ctx *thr = threads + thread_get_id(); in thread_user_save_vfp() local
884 struct thread_user_vfp_state *tuv = thr->vfp_state.uvfp; in thread_user_save_vfp()
898 struct thread_ctx *thr = threads + thread_get_id(); in thread_user_clear_vfp() local
900 if (uvfp == thr->vfp_state.uvfp) in thread_user_clear_vfp()
901 thr->vfp_state.uvfp = NULL; in thread_user_clear_vfp()