xref: /OK3568_Linux_fs/kernel/arch/mips/include/asm/fpu.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2002 MontaVista Software Inc.
4*4882a593Smuzhiyun  * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun #ifndef _ASM_FPU_H
7*4882a593Smuzhiyun #define _ASM_FPU_H
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/sched.h>
10*4882a593Smuzhiyun #include <linux/sched/task_stack.h>
11*4882a593Smuzhiyun #include <linux/ptrace.h>
12*4882a593Smuzhiyun #include <linux/thread_info.h>
13*4882a593Smuzhiyun #include <linux/bitops.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <asm/mipsregs.h>
16*4882a593Smuzhiyun #include <asm/cpu.h>
17*4882a593Smuzhiyun #include <asm/cpu-features.h>
18*4882a593Smuzhiyun #include <asm/fpu_emulator.h>
19*4882a593Smuzhiyun #include <asm/hazards.h>
20*4882a593Smuzhiyun #include <asm/ptrace.h>
21*4882a593Smuzhiyun #include <asm/processor.h>
22*4882a593Smuzhiyun #include <asm/current.h>
23*4882a593Smuzhiyun #include <asm/msa.h>
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #ifdef CONFIG_MIPS_MT_FPAFF
26*4882a593Smuzhiyun #include <asm/mips_mt.h>
27*4882a593Smuzhiyun #endif
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun /*
30*4882a593Smuzhiyun  * This enum specifies a mode in which we want the FPU to operate, for cores
31*4882a593Smuzhiyun  * which implement the Status.FR bit. Note that the bottom bit of the value
32*4882a593Smuzhiyun  * purposefully matches the desired value of the Status.FR bit.
33*4882a593Smuzhiyun  */
34*4882a593Smuzhiyun enum fpu_mode {
35*4882a593Smuzhiyun 	FPU_32BIT = 0,		/* FR = 0 */
36*4882a593Smuzhiyun 	FPU_64BIT,		/* FR = 1, FRE = 0 */
37*4882a593Smuzhiyun 	FPU_AS_IS,
38*4882a593Smuzhiyun 	FPU_HYBRID,		/* FR = 1, FRE = 1 */
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #define FPU_FR_MASK		0x1
41*4882a593Smuzhiyun };
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun #ifdef CONFIG_MIPS_FP_SUPPORT
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun extern void _save_fp(struct task_struct *);
46*4882a593Smuzhiyun extern void _restore_fp(struct task_struct *);
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #define __disable_fpu()							\
49*4882a593Smuzhiyun do {									\
50*4882a593Smuzhiyun 	clear_c0_status(ST0_CU1);					\
51*4882a593Smuzhiyun 	disable_fpu_hazard();						\
52*4882a593Smuzhiyun } while (0)
53*4882a593Smuzhiyun 
__enable_fpu(enum fpu_mode mode)54*4882a593Smuzhiyun static inline int __enable_fpu(enum fpu_mode mode)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	int fr;
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	switch (mode) {
59*4882a593Smuzhiyun 	case FPU_AS_IS:
60*4882a593Smuzhiyun 		/* just enable the FPU in its current mode */
61*4882a593Smuzhiyun 		set_c0_status(ST0_CU1);
62*4882a593Smuzhiyun 		enable_fpu_hazard();
63*4882a593Smuzhiyun 		return 0;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	case FPU_HYBRID:
66*4882a593Smuzhiyun 		if (!cpu_has_fre)
67*4882a593Smuzhiyun 			return SIGFPE;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 		/* set FRE */
70*4882a593Smuzhiyun 		set_c0_config5(MIPS_CONF5_FRE);
71*4882a593Smuzhiyun 		goto fr_common;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	case FPU_64BIT:
74*4882a593Smuzhiyun #if !(defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \
75*4882a593Smuzhiyun       defined(CONFIG_CPU_MIPSR6) || defined(CONFIG_64BIT))
76*4882a593Smuzhiyun 		/* we only have a 32-bit FPU */
77*4882a593Smuzhiyun 		return SIGFPE;
78*4882a593Smuzhiyun #endif
79*4882a593Smuzhiyun 		fallthrough;
80*4882a593Smuzhiyun 	case FPU_32BIT:
81*4882a593Smuzhiyun 		if (cpu_has_fre) {
82*4882a593Smuzhiyun 			/* clear FRE */
83*4882a593Smuzhiyun 			clear_c0_config5(MIPS_CONF5_FRE);
84*4882a593Smuzhiyun 		}
85*4882a593Smuzhiyun fr_common:
86*4882a593Smuzhiyun 		/* set CU1 & change FR appropriately */
87*4882a593Smuzhiyun 		fr = (int)mode & FPU_FR_MASK;
88*4882a593Smuzhiyun 		change_c0_status(ST0_CU1 | ST0_FR, ST0_CU1 | (fr ? ST0_FR : 0));
89*4882a593Smuzhiyun 		enable_fpu_hazard();
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 		/* check FR has the desired value */
92*4882a593Smuzhiyun 		if (!!(read_c0_status() & ST0_FR) == !!fr)
93*4882a593Smuzhiyun 			return 0;
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 		/* unsupported FR value */
96*4882a593Smuzhiyun 		__disable_fpu();
97*4882a593Smuzhiyun 		return SIGFPE;
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	default:
100*4882a593Smuzhiyun 		BUG();
101*4882a593Smuzhiyun 	}
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	return SIGFPE;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun #define clear_fpu_owner()	clear_thread_flag(TIF_USEDFPU)
107*4882a593Smuzhiyun 
__is_fpu_owner(void)108*4882a593Smuzhiyun static inline int __is_fpu_owner(void)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun 	return test_thread_flag(TIF_USEDFPU);
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun 
is_fpu_owner(void)113*4882a593Smuzhiyun static inline int is_fpu_owner(void)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun 	return cpu_has_fpu && __is_fpu_owner();
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun 
__own_fpu(void)118*4882a593Smuzhiyun static inline int __own_fpu(void)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun 	enum fpu_mode mode;
121*4882a593Smuzhiyun 	int ret;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	if (test_thread_flag(TIF_HYBRID_FPREGS))
124*4882a593Smuzhiyun 		mode = FPU_HYBRID;
125*4882a593Smuzhiyun 	else
126*4882a593Smuzhiyun 		mode = !test_thread_flag(TIF_32BIT_FPREGS);
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	ret = __enable_fpu(mode);
129*4882a593Smuzhiyun 	if (ret)
130*4882a593Smuzhiyun 		return ret;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	KSTK_STATUS(current) |= ST0_CU1;
133*4882a593Smuzhiyun 	if (mode == FPU_64BIT || mode == FPU_HYBRID)
134*4882a593Smuzhiyun 		KSTK_STATUS(current) |= ST0_FR;
135*4882a593Smuzhiyun 	else /* mode == FPU_32BIT */
136*4882a593Smuzhiyun 		KSTK_STATUS(current) &= ~ST0_FR;
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	set_thread_flag(TIF_USEDFPU);
139*4882a593Smuzhiyun 	return 0;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
own_fpu_inatomic(int restore)142*4882a593Smuzhiyun static inline int own_fpu_inatomic(int restore)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun 	int ret = 0;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	if (cpu_has_fpu && !__is_fpu_owner()) {
147*4882a593Smuzhiyun 		ret = __own_fpu();
148*4882a593Smuzhiyun 		if (restore && !ret)
149*4882a593Smuzhiyun 			_restore_fp(current);
150*4882a593Smuzhiyun 	}
151*4882a593Smuzhiyun 	return ret;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun 
own_fpu(int restore)154*4882a593Smuzhiyun static inline int own_fpu(int restore)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun 	int ret;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	preempt_disable();
159*4882a593Smuzhiyun 	ret = own_fpu_inatomic(restore);
160*4882a593Smuzhiyun 	preempt_enable();
161*4882a593Smuzhiyun 	return ret;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun 
lose_fpu_inatomic(int save,struct task_struct * tsk)164*4882a593Smuzhiyun static inline void lose_fpu_inatomic(int save, struct task_struct *tsk)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun 	if (is_msa_enabled()) {
167*4882a593Smuzhiyun 		if (save) {
168*4882a593Smuzhiyun 			save_msa(tsk);
169*4882a593Smuzhiyun 			tsk->thread.fpu.fcr31 =
170*4882a593Smuzhiyun 					read_32bit_cp1_register(CP1_STATUS);
171*4882a593Smuzhiyun 		}
172*4882a593Smuzhiyun 		disable_msa();
173*4882a593Smuzhiyun 		clear_tsk_thread_flag(tsk, TIF_USEDMSA);
174*4882a593Smuzhiyun 		__disable_fpu();
175*4882a593Smuzhiyun 	} else if (is_fpu_owner()) {
176*4882a593Smuzhiyun 		if (save)
177*4882a593Smuzhiyun 			_save_fp(tsk);
178*4882a593Smuzhiyun 		__disable_fpu();
179*4882a593Smuzhiyun 	} else {
180*4882a593Smuzhiyun 		/* FPU should not have been left enabled with no owner */
181*4882a593Smuzhiyun 		WARN(read_c0_status() & ST0_CU1,
182*4882a593Smuzhiyun 		     "Orphaned FPU left enabled");
183*4882a593Smuzhiyun 	}
184*4882a593Smuzhiyun 	KSTK_STATUS(tsk) &= ~ST0_CU1;
185*4882a593Smuzhiyun 	clear_tsk_thread_flag(tsk, TIF_USEDFPU);
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun 
lose_fpu(int save)188*4882a593Smuzhiyun static inline void lose_fpu(int save)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun 	preempt_disable();
191*4882a593Smuzhiyun 	lose_fpu_inatomic(save, current);
192*4882a593Smuzhiyun 	preempt_enable();
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun /**
196*4882a593Smuzhiyun  * init_fp_ctx() - Initialize task FP context
197*4882a593Smuzhiyun  * @target: The task whose FP context should be initialized.
198*4882a593Smuzhiyun  *
199*4882a593Smuzhiyun  * Initializes the FP context of the target task to sane default values if that
200*4882a593Smuzhiyun  * target task does not already have valid FP context. Once the context has
201*4882a593Smuzhiyun  * been initialized, the task will be marked as having used FP & thus having
202*4882a593Smuzhiyun  * valid FP context.
203*4882a593Smuzhiyun  *
204*4882a593Smuzhiyun  * Returns: true if context is initialized, else false.
205*4882a593Smuzhiyun  */
init_fp_ctx(struct task_struct * target)206*4882a593Smuzhiyun static inline bool init_fp_ctx(struct task_struct *target)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun 	/* If FP has been used then the target already has context */
209*4882a593Smuzhiyun 	if (tsk_used_math(target))
210*4882a593Smuzhiyun 		return false;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	/* Begin with data registers set to all 1s... */
213*4882a593Smuzhiyun 	memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	/* FCSR has been preset by `mips_set_personality_nan'.  */
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	/*
218*4882a593Smuzhiyun 	 * Record that the target has "used" math, such that the context
219*4882a593Smuzhiyun 	 * just initialised, and any modifications made by the caller,
220*4882a593Smuzhiyun 	 * aren't discarded.
221*4882a593Smuzhiyun 	 */
222*4882a593Smuzhiyun 	set_stopped_child_used_math(target);
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	return true;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun 
save_fp(struct task_struct * tsk)227*4882a593Smuzhiyun static inline void save_fp(struct task_struct *tsk)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun 	if (cpu_has_fpu)
230*4882a593Smuzhiyun 		_save_fp(tsk);
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun 
restore_fp(struct task_struct * tsk)233*4882a593Smuzhiyun static inline void restore_fp(struct task_struct *tsk)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun 	if (cpu_has_fpu)
236*4882a593Smuzhiyun 		_restore_fp(tsk);
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun 
get_fpu_regs(struct task_struct * tsk)239*4882a593Smuzhiyun static inline union fpureg *get_fpu_regs(struct task_struct *tsk)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun 	if (tsk == current) {
242*4882a593Smuzhiyun 		preempt_disable();
243*4882a593Smuzhiyun 		if (is_fpu_owner())
244*4882a593Smuzhiyun 			_save_fp(current);
245*4882a593Smuzhiyun 		preempt_enable();
246*4882a593Smuzhiyun 	}
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	return tsk->thread.fpu.fpr;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun #else /* !CONFIG_MIPS_FP_SUPPORT */
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun /*
254*4882a593Smuzhiyun  * When FP support is disabled we provide only a minimal set of stub functions
255*4882a593Smuzhiyun  * to avoid callers needing to care too much about CONFIG_MIPS_FP_SUPPORT.
256*4882a593Smuzhiyun  */
257*4882a593Smuzhiyun 
__enable_fpu(enum fpu_mode mode)258*4882a593Smuzhiyun static inline int __enable_fpu(enum fpu_mode mode)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun 	return SIGILL;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun 
__disable_fpu(void)263*4882a593Smuzhiyun static inline void __disable_fpu(void)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun 	/* no-op */
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 
is_fpu_owner(void)269*4882a593Smuzhiyun static inline int is_fpu_owner(void)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun 	return 0;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun 
clear_fpu_owner(void)274*4882a593Smuzhiyun static inline void clear_fpu_owner(void)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun 	/* no-op */
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun 
own_fpu_inatomic(int restore)279*4882a593Smuzhiyun static inline int own_fpu_inatomic(int restore)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun 	return SIGILL;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun 
own_fpu(int restore)284*4882a593Smuzhiyun static inline int own_fpu(int restore)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun 	return SIGILL;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun 
lose_fpu_inatomic(int save,struct task_struct * tsk)289*4882a593Smuzhiyun static inline void lose_fpu_inatomic(int save, struct task_struct *tsk)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun 	/* no-op */
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun 
lose_fpu(int save)294*4882a593Smuzhiyun static inline void lose_fpu(int save)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun 	/* no-op */
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun 
init_fp_ctx(struct task_struct * target)299*4882a593Smuzhiyun static inline bool init_fp_ctx(struct task_struct *target)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun 	return false;
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun /*
305*4882a593Smuzhiyun  * The following functions should only be called in paths where we know that FP
306*4882a593Smuzhiyun  * support is enabled, typically a path where own_fpu() or __enable_fpu() have
307*4882a593Smuzhiyun  * returned successfully. When CONFIG_MIPS_FP_SUPPORT=n it is known at compile
308*4882a593Smuzhiyun  * time that this should never happen, so calls to these functions should be
309*4882a593Smuzhiyun  * optimized away & never actually be emitted.
310*4882a593Smuzhiyun  */
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun extern void save_fp(struct task_struct *tsk)
313*4882a593Smuzhiyun 	__compiletime_error("save_fp() should not be called when CONFIG_MIPS_FP_SUPPORT=n");
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun extern void _save_fp(struct task_struct *)
316*4882a593Smuzhiyun 	__compiletime_error("_save_fp() should not be called when CONFIG_MIPS_FP_SUPPORT=n");
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun extern void restore_fp(struct task_struct *tsk)
319*4882a593Smuzhiyun 	__compiletime_error("restore_fp() should not be called when CONFIG_MIPS_FP_SUPPORT=n");
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun extern void _restore_fp(struct task_struct *)
322*4882a593Smuzhiyun 	__compiletime_error("_restore_fp() should not be called when CONFIG_MIPS_FP_SUPPORT=n");
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun extern union fpureg *get_fpu_regs(struct task_struct *tsk)
325*4882a593Smuzhiyun 	__compiletime_error("get_fpu_regs() should not be called when CONFIG_MIPS_FP_SUPPORT=n");
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun #endif /* !CONFIG_MIPS_FP_SUPPORT */
328*4882a593Smuzhiyun #endif /* _ASM_FPU_H */
329