xref: /OK3568_Linux_fs/kernel/arch/nds32/include/asm/fpu.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /* Copyright (C) 2005-2018 Andes Technology Corporation */
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun #ifndef __ASM_NDS32_FPU_H
5*4882a593Smuzhiyun #define __ASM_NDS32_FPU_H
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_FPU)
8*4882a593Smuzhiyun #ifndef __ASSEMBLY__
9*4882a593Smuzhiyun #include <linux/sched/task_stack.h>
10*4882a593Smuzhiyun #include <linux/preempt.h>
11*4882a593Smuzhiyun #include <asm/ptrace.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun extern bool has_fpu;
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun extern void save_fpu(struct task_struct *__tsk);
16*4882a593Smuzhiyun extern void load_fpu(const struct fpu_struct *fpregs);
17*4882a593Smuzhiyun extern bool do_fpu_exception(unsigned int subtype, struct pt_regs *regs);
18*4882a593Smuzhiyun extern int do_fpuemu(struct pt_regs *regs, struct fpu_struct *fpu);
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #define test_tsk_fpu(regs)	(regs->fucop_ctl & FUCOP_CTL_mskCP0EN)
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun /*
23*4882a593Smuzhiyun  * Initially load the FPU with signalling NANS.  This bit pattern
24*4882a593Smuzhiyun  * has the property that no matter whether considered as single or as
25*4882a593Smuzhiyun  * double precision, it still represents a signalling NAN.
26*4882a593Smuzhiyun  */
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #define sNAN64    0xFFFFFFFFFFFFFFFFULL
29*4882a593Smuzhiyun #define sNAN32    0xFFFFFFFFUL
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_SUPPORT_DENORMAL_ARITHMETIC)
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun  * Denormalized number is unsupported by nds32 FPU. Hence the operation
34*4882a593Smuzhiyun  * is treated as underflow cases when the final result is a denormalized
35*4882a593Smuzhiyun  * number. To enhance precision, underflow exception trap should be
36*4882a593Smuzhiyun  * enabled by default and kerenl will re-execute it by fpu emulator
37*4882a593Smuzhiyun  * when getting underflow exception.
38*4882a593Smuzhiyun  */
39*4882a593Smuzhiyun #define FPCSR_INIT  (FPCSR_mskUDFE | FPCSR_mskIEXE)
40*4882a593Smuzhiyun #else
41*4882a593Smuzhiyun #define FPCSR_INIT  0x0UL
42*4882a593Smuzhiyun #endif
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun extern const struct fpu_struct init_fpuregs;
45*4882a593Smuzhiyun 
disable_ptreg_fpu(struct pt_regs * regs)46*4882a593Smuzhiyun static inline void disable_ptreg_fpu(struct pt_regs *regs)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun 	regs->fucop_ctl &= ~FUCOP_CTL_mskCP0EN;
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun 
enable_ptreg_fpu(struct pt_regs * regs)51*4882a593Smuzhiyun static inline void enable_ptreg_fpu(struct pt_regs *regs)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun 	regs->fucop_ctl |= FUCOP_CTL_mskCP0EN;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun 
enable_fpu(void)56*4882a593Smuzhiyun static inline void enable_fpu(void)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun 	unsigned long fucop_ctl;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	fucop_ctl = __nds32__mfsr(NDS32_SR_FUCOP_CTL) | FUCOP_CTL_mskCP0EN;
61*4882a593Smuzhiyun 	__nds32__mtsr(fucop_ctl, NDS32_SR_FUCOP_CTL);
62*4882a593Smuzhiyun 	__nds32__isb();
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun 
disable_fpu(void)65*4882a593Smuzhiyun static inline void disable_fpu(void)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun 	unsigned long fucop_ctl;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	fucop_ctl = __nds32__mfsr(NDS32_SR_FUCOP_CTL) & ~FUCOP_CTL_mskCP0EN;
70*4882a593Smuzhiyun 	__nds32__mtsr(fucop_ctl, NDS32_SR_FUCOP_CTL);
71*4882a593Smuzhiyun 	__nds32__isb();
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun 
lose_fpu(void)74*4882a593Smuzhiyun static inline void lose_fpu(void)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun 	preempt_disable();
77*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_LAZY_FPU)
78*4882a593Smuzhiyun 	if (last_task_used_math == current) {
79*4882a593Smuzhiyun 		last_task_used_math = NULL;
80*4882a593Smuzhiyun #else
81*4882a593Smuzhiyun 	if (test_tsk_fpu(task_pt_regs(current))) {
82*4882a593Smuzhiyun #endif
83*4882a593Smuzhiyun 		save_fpu(current);
84*4882a593Smuzhiyun 	}
85*4882a593Smuzhiyun 	disable_ptreg_fpu(task_pt_regs(current));
86*4882a593Smuzhiyun 	preempt_enable();
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun static inline void own_fpu(void)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun 	preempt_disable();
92*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_LAZY_FPU)
93*4882a593Smuzhiyun 	if (last_task_used_math != current) {
94*4882a593Smuzhiyun 		if (last_task_used_math != NULL)
95*4882a593Smuzhiyun 			save_fpu(last_task_used_math);
96*4882a593Smuzhiyun 		load_fpu(&current->thread.fpu);
97*4882a593Smuzhiyun 		last_task_used_math = current;
98*4882a593Smuzhiyun 	}
99*4882a593Smuzhiyun #else
100*4882a593Smuzhiyun 	if (!test_tsk_fpu(task_pt_regs(current))) {
101*4882a593Smuzhiyun 		load_fpu(&current->thread.fpu);
102*4882a593Smuzhiyun 	}
103*4882a593Smuzhiyun #endif
104*4882a593Smuzhiyun 	enable_ptreg_fpu(task_pt_regs(current));
105*4882a593Smuzhiyun 	preempt_enable();
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun #if !IS_ENABLED(CONFIG_LAZY_FPU)
109*4882a593Smuzhiyun static inline void unlazy_fpu(struct task_struct *tsk)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun 	preempt_disable();
112*4882a593Smuzhiyun 	if (test_tsk_fpu(task_pt_regs(tsk)))
113*4882a593Smuzhiyun 		save_fpu(tsk);
114*4882a593Smuzhiyun 	preempt_enable();
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun #endif /* !CONFIG_LAZY_FPU */
117*4882a593Smuzhiyun static inline void clear_fpu(struct pt_regs *regs)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun 	preempt_disable();
120*4882a593Smuzhiyun 	if (test_tsk_fpu(regs))
121*4882a593Smuzhiyun 		disable_ptreg_fpu(regs);
122*4882a593Smuzhiyun 	preempt_enable();
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun #endif /* CONFIG_FPU */
125*4882a593Smuzhiyun #endif /* __ASSEMBLY__ */
126*4882a593Smuzhiyun #endif /* __ASM_NDS32_FPU_H */
127