xref: /OK3568_Linux_fs/kernel/arch/x86/include/asm/fpu/internal.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 1994 Linus Torvalds
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Pentium III FXSR, SSE support
6*4882a593Smuzhiyun  * General FPU state handling cleanups
7*4882a593Smuzhiyun  *	Gareth Hughes <gareth@valinux.com>, May 2000
8*4882a593Smuzhiyun  * x86-64 work by Andi Kleen 2002
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #ifndef _ASM_X86_FPU_INTERNAL_H
12*4882a593Smuzhiyun #define _ASM_X86_FPU_INTERNAL_H
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include <linux/compat.h>
15*4882a593Smuzhiyun #include <linux/sched.h>
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun #include <linux/mm.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #include <asm/user.h>
20*4882a593Smuzhiyun #include <asm/fpu/api.h>
21*4882a593Smuzhiyun #include <asm/fpu/xstate.h>
22*4882a593Smuzhiyun #include <asm/fpu/xcr.h>
23*4882a593Smuzhiyun #include <asm/cpufeature.h>
24*4882a593Smuzhiyun #include <asm/trace/fpu.h>
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun /*
27*4882a593Smuzhiyun  * High level FPU state handling functions:
28*4882a593Smuzhiyun  */
29*4882a593Smuzhiyun extern void fpu__prepare_read(struct fpu *fpu);
30*4882a593Smuzhiyun extern void fpu__prepare_write(struct fpu *fpu);
31*4882a593Smuzhiyun extern void fpu__save(struct fpu *fpu);
32*4882a593Smuzhiyun extern int  fpu__restore_sig(void __user *buf, int ia32_frame);
33*4882a593Smuzhiyun extern void fpu__drop(struct fpu *fpu);
34*4882a593Smuzhiyun extern int  fpu__copy(struct task_struct *dst, struct task_struct *src);
35*4882a593Smuzhiyun extern void fpu__clear_user_states(struct fpu *fpu);
36*4882a593Smuzhiyun extern void fpu__clear_all(struct fpu *fpu);
37*4882a593Smuzhiyun extern int  fpu__exception_code(struct fpu *fpu, int trap_nr);
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun /*
40*4882a593Smuzhiyun  * Boot time FPU initialization functions:
41*4882a593Smuzhiyun  */
42*4882a593Smuzhiyun extern void fpu__init_cpu(void);
43*4882a593Smuzhiyun extern void fpu__init_system_xstate(void);
44*4882a593Smuzhiyun extern void fpu__init_cpu_xstate(void);
45*4882a593Smuzhiyun extern void fpu__init_system(struct cpuinfo_x86 *c);
46*4882a593Smuzhiyun extern void fpu__init_check_bugs(void);
47*4882a593Smuzhiyun extern void fpu__resume_cpu(void);
48*4882a593Smuzhiyun extern u64 fpu__get_supported_xfeatures_mask(void);
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun /*
51*4882a593Smuzhiyun  * Debugging facility:
52*4882a593Smuzhiyun  */
53*4882a593Smuzhiyun #ifdef CONFIG_X86_DEBUG_FPU
54*4882a593Smuzhiyun # define WARN_ON_FPU(x) WARN_ON_ONCE(x)
55*4882a593Smuzhiyun #else
56*4882a593Smuzhiyun # define WARN_ON_FPU(x) ({ (void)(x); 0; })
57*4882a593Smuzhiyun #endif
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun /*
60*4882a593Smuzhiyun  * FPU related CPU feature flag helper routines:
61*4882a593Smuzhiyun  */
use_xsaveopt(void)62*4882a593Smuzhiyun static __always_inline __pure bool use_xsaveopt(void)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun 	return static_cpu_has(X86_FEATURE_XSAVEOPT);
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun 
use_xsave(void)67*4882a593Smuzhiyun static __always_inline __pure bool use_xsave(void)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun 	return static_cpu_has(X86_FEATURE_XSAVE);
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun 
use_fxsr(void)72*4882a593Smuzhiyun static __always_inline __pure bool use_fxsr(void)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	return static_cpu_has(X86_FEATURE_FXSR);
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun /*
78*4882a593Smuzhiyun  * fpstate handling functions:
79*4882a593Smuzhiyun  */
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun extern union fpregs_state init_fpstate;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun extern void fpstate_init(union fpregs_state *state);
84*4882a593Smuzhiyun #ifdef CONFIG_MATH_EMULATION
85*4882a593Smuzhiyun extern void fpstate_init_soft(struct swregs_state *soft);
86*4882a593Smuzhiyun #else
fpstate_init_soft(struct swregs_state * soft)87*4882a593Smuzhiyun static inline void fpstate_init_soft(struct swregs_state *soft) {}
88*4882a593Smuzhiyun #endif
89*4882a593Smuzhiyun 
fpstate_init_xstate(struct xregs_state * xsave)90*4882a593Smuzhiyun static inline void fpstate_init_xstate(struct xregs_state *xsave)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun 	/*
93*4882a593Smuzhiyun 	 * XRSTORS requires these bits set in xcomp_bv, or it will
94*4882a593Smuzhiyun 	 * trigger #GP:
95*4882a593Smuzhiyun 	 */
96*4882a593Smuzhiyun 	xsave->header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT | xfeatures_mask_all;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun 
fpstate_init_fxstate(struct fxregs_state * fx)99*4882a593Smuzhiyun static inline void fpstate_init_fxstate(struct fxregs_state *fx)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	fx->cwd = 0x37f;
102*4882a593Smuzhiyun 	fx->mxcsr = MXCSR_DEFAULT;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun extern void fpstate_sanitize_xstate(struct fpu *fpu);
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun /* Returns 0 or the negated trap number, which results in -EFAULT for #PF */
107*4882a593Smuzhiyun #define user_insn(insn, output, input...)				\
108*4882a593Smuzhiyun ({									\
109*4882a593Smuzhiyun 	int err;							\
110*4882a593Smuzhiyun 									\
111*4882a593Smuzhiyun 	might_fault();							\
112*4882a593Smuzhiyun 									\
113*4882a593Smuzhiyun 	asm volatile(ASM_STAC "\n"					\
114*4882a593Smuzhiyun 		     "1: " #insn "\n"					\
115*4882a593Smuzhiyun 		     "2: " ASM_CLAC "\n"				\
116*4882a593Smuzhiyun 		     ".section .fixup,\"ax\"\n"				\
117*4882a593Smuzhiyun 		     "3:  negl %%eax\n"					\
118*4882a593Smuzhiyun 		     "    jmp  2b\n"					\
119*4882a593Smuzhiyun 		     ".previous\n"					\
120*4882a593Smuzhiyun 		     _ASM_EXTABLE_FAULT(1b, 3b)				\
121*4882a593Smuzhiyun 		     : [err] "=a" (err), output				\
122*4882a593Smuzhiyun 		     : "0"(0), input);					\
123*4882a593Smuzhiyun 	err;								\
124*4882a593Smuzhiyun })
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun #define kernel_insn_err(insn, output, input...)				\
127*4882a593Smuzhiyun ({									\
128*4882a593Smuzhiyun 	int err;							\
129*4882a593Smuzhiyun 	asm volatile("1:" #insn "\n\t"					\
130*4882a593Smuzhiyun 		     "2:\n"						\
131*4882a593Smuzhiyun 		     ".section .fixup,\"ax\"\n"				\
132*4882a593Smuzhiyun 		     "3:  movl $-1,%[err]\n"				\
133*4882a593Smuzhiyun 		     "    jmp  2b\n"					\
134*4882a593Smuzhiyun 		     ".previous\n"					\
135*4882a593Smuzhiyun 		     _ASM_EXTABLE(1b, 3b)				\
136*4882a593Smuzhiyun 		     : [err] "=r" (err), output				\
137*4882a593Smuzhiyun 		     : "0"(0), input);					\
138*4882a593Smuzhiyun 	err;								\
139*4882a593Smuzhiyun })
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun #define kernel_insn(insn, output, input...)				\
142*4882a593Smuzhiyun 	asm volatile("1:" #insn "\n\t"					\
143*4882a593Smuzhiyun 		     "2:\n"						\
144*4882a593Smuzhiyun 		     _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_fprestore)	\
145*4882a593Smuzhiyun 		     : output : input)
146*4882a593Smuzhiyun 
copy_fregs_to_user(struct fregs_state __user * fx)147*4882a593Smuzhiyun static inline int copy_fregs_to_user(struct fregs_state __user *fx)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun 	return user_insn(fnsave %[fx]; fwait,  [fx] "=m" (*fx), "m" (*fx));
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun 
copy_fxregs_to_user(struct fxregs_state __user * fx)152*4882a593Smuzhiyun static inline int copy_fxregs_to_user(struct fxregs_state __user *fx)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_X86_32))
155*4882a593Smuzhiyun 		return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
156*4882a593Smuzhiyun 	else
157*4882a593Smuzhiyun 		return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun 
copy_kernel_to_fxregs(struct fxregs_state * fx)161*4882a593Smuzhiyun static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_X86_32))
164*4882a593Smuzhiyun 		kernel_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
165*4882a593Smuzhiyun 	else
166*4882a593Smuzhiyun 		kernel_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun 
copy_kernel_to_fxregs_err(struct fxregs_state * fx)169*4882a593Smuzhiyun static inline int copy_kernel_to_fxregs_err(struct fxregs_state *fx)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_X86_32))
172*4882a593Smuzhiyun 		return kernel_insn_err(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
173*4882a593Smuzhiyun 	else
174*4882a593Smuzhiyun 		return kernel_insn_err(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun 
copy_user_to_fxregs(struct fxregs_state __user * fx)177*4882a593Smuzhiyun static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_X86_32))
180*4882a593Smuzhiyun 		return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
181*4882a593Smuzhiyun 	else
182*4882a593Smuzhiyun 		return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun 
copy_kernel_to_fregs(struct fregs_state * fx)185*4882a593Smuzhiyun static inline void copy_kernel_to_fregs(struct fregs_state *fx)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun 	kernel_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun 
copy_kernel_to_fregs_err(struct fregs_state * fx)190*4882a593Smuzhiyun static inline int copy_kernel_to_fregs_err(struct fregs_state *fx)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun 	return kernel_insn_err(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun 
copy_user_to_fregs(struct fregs_state __user * fx)195*4882a593Smuzhiyun static inline int copy_user_to_fregs(struct fregs_state __user *fx)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun 	return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun 
copy_fxregs_to_kernel(struct fpu * fpu)200*4882a593Smuzhiyun static inline void copy_fxregs_to_kernel(struct fpu *fpu)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_X86_32))
203*4882a593Smuzhiyun 		asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave));
204*4882a593Smuzhiyun 	else
205*4882a593Smuzhiyun 		asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave));
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun 
fxsave(struct fxregs_state * fx)208*4882a593Smuzhiyun static inline void fxsave(struct fxregs_state *fx)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_X86_32))
211*4882a593Smuzhiyun 		asm volatile( "fxsave %[fx]" : [fx] "=m" (*fx));
212*4882a593Smuzhiyun 	else
213*4882a593Smuzhiyun 		asm volatile("fxsaveq %[fx]" : [fx] "=m" (*fx));
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun /* These macros all use (%edi)/(%rdi) as the single memory argument. */
217*4882a593Smuzhiyun #define XSAVE		".byte " REX_PREFIX "0x0f,0xae,0x27"
218*4882a593Smuzhiyun #define XSAVEOPT	".byte " REX_PREFIX "0x0f,0xae,0x37"
219*4882a593Smuzhiyun #define XSAVES		".byte " REX_PREFIX "0x0f,0xc7,0x2f"
220*4882a593Smuzhiyun #define XRSTOR		".byte " REX_PREFIX "0x0f,0xae,0x2f"
221*4882a593Smuzhiyun #define XRSTORS		".byte " REX_PREFIX "0x0f,0xc7,0x1f"
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun /*
224*4882a593Smuzhiyun  * After this @err contains 0 on success or the negated trap number when
225*4882a593Smuzhiyun  * the operation raises an exception. For faults this results in -EFAULT.
226*4882a593Smuzhiyun  */
227*4882a593Smuzhiyun #define XSTATE_OP(op, st, lmask, hmask, err)				\
228*4882a593Smuzhiyun 	asm volatile("1:" op "\n\t"					\
229*4882a593Smuzhiyun 		     "xor %[err], %[err]\n"				\
230*4882a593Smuzhiyun 		     "2:\n\t"						\
231*4882a593Smuzhiyun 		     ".pushsection .fixup,\"ax\"\n\t"			\
232*4882a593Smuzhiyun 		     "3: negl %%eax\n\t"				\
233*4882a593Smuzhiyun 		     "jmp 2b\n\t"					\
234*4882a593Smuzhiyun 		     ".popsection\n\t"					\
235*4882a593Smuzhiyun 		     _ASM_EXTABLE_FAULT(1b, 3b)				\
236*4882a593Smuzhiyun 		     : [err] "=a" (err)					\
237*4882a593Smuzhiyun 		     : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)	\
238*4882a593Smuzhiyun 		     : "memory")
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun /*
241*4882a593Smuzhiyun  * If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact
242*4882a593Smuzhiyun  * format and supervisor states in addition to modified optimization in
243*4882a593Smuzhiyun  * XSAVEOPT.
244*4882a593Smuzhiyun  *
245*4882a593Smuzhiyun  * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT
246*4882a593Smuzhiyun  * supports modified optimization which is not supported by XSAVE.
247*4882a593Smuzhiyun  *
248*4882a593Smuzhiyun  * We use XSAVE as a fallback.
249*4882a593Smuzhiyun  *
250*4882a593Smuzhiyun  * The 661 label is defined in the ALTERNATIVE* macros as the address of the
251*4882a593Smuzhiyun  * original instruction which gets replaced. We need to use it here as the
252*4882a593Smuzhiyun  * address of the instruction where we might get an exception at.
253*4882a593Smuzhiyun  */
254*4882a593Smuzhiyun #define XSTATE_XSAVE(st, lmask, hmask, err)				\
255*4882a593Smuzhiyun 	asm volatile(ALTERNATIVE_2(XSAVE,				\
256*4882a593Smuzhiyun 				   XSAVEOPT, X86_FEATURE_XSAVEOPT,	\
257*4882a593Smuzhiyun 				   XSAVES,   X86_FEATURE_XSAVES)	\
258*4882a593Smuzhiyun 		     "\n"						\
259*4882a593Smuzhiyun 		     "xor %[err], %[err]\n"				\
260*4882a593Smuzhiyun 		     "3:\n"						\
261*4882a593Smuzhiyun 		     ".pushsection .fixup,\"ax\"\n"			\
262*4882a593Smuzhiyun 		     "4: movl $-2, %[err]\n"				\
263*4882a593Smuzhiyun 		     "jmp 3b\n"						\
264*4882a593Smuzhiyun 		     ".popsection\n"					\
265*4882a593Smuzhiyun 		     _ASM_EXTABLE(661b, 4b)				\
266*4882a593Smuzhiyun 		     : [err] "=r" (err)					\
267*4882a593Smuzhiyun 		     : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)	\
268*4882a593Smuzhiyun 		     : "memory")
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun /*
271*4882a593Smuzhiyun  * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
272*4882a593Smuzhiyun  * XSAVE area format.
273*4882a593Smuzhiyun  */
274*4882a593Smuzhiyun #define XSTATE_XRESTORE(st, lmask, hmask)				\
275*4882a593Smuzhiyun 	asm volatile(ALTERNATIVE(XRSTOR,				\
276*4882a593Smuzhiyun 				 XRSTORS, X86_FEATURE_XSAVES)		\
277*4882a593Smuzhiyun 		     "\n"						\
278*4882a593Smuzhiyun 		     "3:\n"						\
279*4882a593Smuzhiyun 		     _ASM_EXTABLE_HANDLE(661b, 3b, ex_handler_fprestore)\
280*4882a593Smuzhiyun 		     :							\
281*4882a593Smuzhiyun 		     : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)	\
282*4882a593Smuzhiyun 		     : "memory")
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun /*
285*4882a593Smuzhiyun  * This function is called only during boot time when x86 caps are not set
286*4882a593Smuzhiyun  * up and alternative can not be used yet.
287*4882a593Smuzhiyun  */
copy_kernel_to_xregs_booting(struct xregs_state * xstate)288*4882a593Smuzhiyun static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun 	u64 mask = -1;
291*4882a593Smuzhiyun 	u32 lmask = mask;
292*4882a593Smuzhiyun 	u32 hmask = mask >> 32;
293*4882a593Smuzhiyun 	int err;
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	WARN_ON(system_state != SYSTEM_BOOTING);
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	if (boot_cpu_has(X86_FEATURE_XSAVES))
298*4882a593Smuzhiyun 		XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
299*4882a593Smuzhiyun 	else
300*4882a593Smuzhiyun 		XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	/*
303*4882a593Smuzhiyun 	 * We should never fault when copying from a kernel buffer, and the FPU
304*4882a593Smuzhiyun 	 * state we set at boot time should be valid.
305*4882a593Smuzhiyun 	 */
306*4882a593Smuzhiyun 	WARN_ON_FPU(err);
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun /*
310*4882a593Smuzhiyun  * Save processor xstate to xsave area.
311*4882a593Smuzhiyun  */
copy_xregs_to_kernel(struct xregs_state * xstate)312*4882a593Smuzhiyun static inline void copy_xregs_to_kernel(struct xregs_state *xstate)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun 	u64 mask = xfeatures_mask_all;
315*4882a593Smuzhiyun 	u32 lmask = mask;
316*4882a593Smuzhiyun 	u32 hmask = mask >> 32;
317*4882a593Smuzhiyun 	int err;
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	WARN_ON_FPU(!alternatives_patched);
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	XSTATE_XSAVE(xstate, lmask, hmask, err);
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	/* We should never fault when copying to a kernel buffer: */
324*4882a593Smuzhiyun 	WARN_ON_FPU(err);
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun /*
328*4882a593Smuzhiyun  * Restore processor xstate from xsave area.
329*4882a593Smuzhiyun  */
copy_kernel_to_xregs(struct xregs_state * xstate,u64 mask)330*4882a593Smuzhiyun static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)
331*4882a593Smuzhiyun {
332*4882a593Smuzhiyun 	u32 lmask = mask;
333*4882a593Smuzhiyun 	u32 hmask = mask >> 32;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	XSTATE_XRESTORE(xstate, lmask, hmask);
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun /*
339*4882a593Smuzhiyun  * Save xstate to user space xsave area.
340*4882a593Smuzhiyun  *
341*4882a593Smuzhiyun  * We don't use modified optimization because xrstor/xrstors might track
342*4882a593Smuzhiyun  * a different application.
343*4882a593Smuzhiyun  *
344*4882a593Smuzhiyun  * We don't use compacted format xsave area for
345*4882a593Smuzhiyun  * backward compatibility for old applications which don't understand
346*4882a593Smuzhiyun  * compacted format of xsave area.
347*4882a593Smuzhiyun  */
copy_xregs_to_user(struct xregs_state __user * buf)348*4882a593Smuzhiyun static inline int copy_xregs_to_user(struct xregs_state __user *buf)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun 	u64 mask = xfeatures_mask_user();
351*4882a593Smuzhiyun 	u32 lmask = mask;
352*4882a593Smuzhiyun 	u32 hmask = mask >> 32;
353*4882a593Smuzhiyun 	int err;
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	/*
356*4882a593Smuzhiyun 	 * Clear the xsave header first, so that reserved fields are
357*4882a593Smuzhiyun 	 * initialized to zero.
358*4882a593Smuzhiyun 	 */
359*4882a593Smuzhiyun 	err = __clear_user(&buf->header, sizeof(buf->header));
360*4882a593Smuzhiyun 	if (unlikely(err))
361*4882a593Smuzhiyun 		return -EFAULT;
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	stac();
364*4882a593Smuzhiyun 	XSTATE_OP(XSAVE, buf, lmask, hmask, err);
365*4882a593Smuzhiyun 	clac();
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	return err;
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun /*
371*4882a593Smuzhiyun  * Restore xstate from user space xsave area.
372*4882a593Smuzhiyun  */
copy_user_to_xregs(struct xregs_state __user * buf,u64 mask)373*4882a593Smuzhiyun static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun 	struct xregs_state *xstate = ((__force struct xregs_state *)buf);
376*4882a593Smuzhiyun 	u32 lmask = mask;
377*4882a593Smuzhiyun 	u32 hmask = mask >> 32;
378*4882a593Smuzhiyun 	int err;
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	stac();
381*4882a593Smuzhiyun 	XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
382*4882a593Smuzhiyun 	clac();
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	return err;
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun /*
388*4882a593Smuzhiyun  * Restore xstate from kernel space xsave area, return an error code instead of
389*4882a593Smuzhiyun  * an exception.
390*4882a593Smuzhiyun  */
copy_kernel_to_xregs_err(struct xregs_state * xstate,u64 mask)391*4882a593Smuzhiyun static inline int copy_kernel_to_xregs_err(struct xregs_state *xstate, u64 mask)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun 	u32 lmask = mask;
394*4882a593Smuzhiyun 	u32 hmask = mask >> 32;
395*4882a593Smuzhiyun 	int err;
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	if (static_cpu_has(X86_FEATURE_XSAVES))
398*4882a593Smuzhiyun 		XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
399*4882a593Smuzhiyun 	else
400*4882a593Smuzhiyun 		XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	return err;
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun extern int copy_fpregs_to_fpstate(struct fpu *fpu);
406*4882a593Smuzhiyun 
__copy_kernel_to_fpregs(union fpregs_state * fpstate,u64 mask)407*4882a593Smuzhiyun static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate, u64 mask)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun 	if (use_xsave()) {
410*4882a593Smuzhiyun 		copy_kernel_to_xregs(&fpstate->xsave, mask);
411*4882a593Smuzhiyun 	} else {
412*4882a593Smuzhiyun 		if (use_fxsr())
413*4882a593Smuzhiyun 			copy_kernel_to_fxregs(&fpstate->fxsave);
414*4882a593Smuzhiyun 		else
415*4882a593Smuzhiyun 			copy_kernel_to_fregs(&fpstate->fsave);
416*4882a593Smuzhiyun 	}
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun 
copy_kernel_to_fpregs(union fpregs_state * fpstate)419*4882a593Smuzhiyun static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun 	/*
422*4882a593Smuzhiyun 	 * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
423*4882a593Smuzhiyun 	 * pending. Clear the x87 state here by setting it to fixed values.
424*4882a593Smuzhiyun 	 * "m" is a random variable that should be in L1.
425*4882a593Smuzhiyun 	 */
426*4882a593Smuzhiyun 	if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) {
427*4882a593Smuzhiyun 		asm volatile(
428*4882a593Smuzhiyun 			"fnclex\n\t"
429*4882a593Smuzhiyun 			"emms\n\t"
430*4882a593Smuzhiyun 			"fildl %P[addr]"	/* set F?P to defined value */
431*4882a593Smuzhiyun 			: : [addr] "m" (fpstate));
432*4882a593Smuzhiyun 	}
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	__copy_kernel_to_fpregs(fpstate, -1);
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size);
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun /*
440*4882a593Smuzhiyun  * FPU context switch related helper methods:
441*4882a593Smuzhiyun  */
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun /*
446*4882a593Smuzhiyun  * The in-register FPU state for an FPU context on a CPU is assumed to be
447*4882a593Smuzhiyun  * valid if the fpu->last_cpu matches the CPU, and the fpu_fpregs_owner_ctx
448*4882a593Smuzhiyun  * matches the FPU.
449*4882a593Smuzhiyun  *
450*4882a593Smuzhiyun  * If the FPU register state is valid, the kernel can skip restoring the
451*4882a593Smuzhiyun  * FPU state from memory.
452*4882a593Smuzhiyun  *
453*4882a593Smuzhiyun  * Any code that clobbers the FPU registers or updates the in-memory
454*4882a593Smuzhiyun  * FPU state for a task MUST let the rest of the kernel know that the
455*4882a593Smuzhiyun  * FPU registers are no longer valid for this task.
456*4882a593Smuzhiyun  *
457*4882a593Smuzhiyun  * Either one of these invalidation functions is enough. Invalidate
458*4882a593Smuzhiyun  * a resource you control: CPU if using the CPU for something else
459*4882a593Smuzhiyun  * (with preemption disabled), FPU for the current task, or a task that
460*4882a593Smuzhiyun  * is prevented from running by the current task.
461*4882a593Smuzhiyun  */
__cpu_invalidate_fpregs_state(void)462*4882a593Smuzhiyun static inline void __cpu_invalidate_fpregs_state(void)
463*4882a593Smuzhiyun {
464*4882a593Smuzhiyun 	__this_cpu_write(fpu_fpregs_owner_ctx, NULL);
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun 
__fpu_invalidate_fpregs_state(struct fpu * fpu)467*4882a593Smuzhiyun static inline void __fpu_invalidate_fpregs_state(struct fpu *fpu)
468*4882a593Smuzhiyun {
469*4882a593Smuzhiyun 	fpu->last_cpu = -1;
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun 
fpregs_state_valid(struct fpu * fpu,unsigned int cpu)472*4882a593Smuzhiyun static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu)
473*4882a593Smuzhiyun {
474*4882a593Smuzhiyun 	return fpu == this_cpu_read(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun /*
478*4882a593Smuzhiyun  * These generally need preemption protection to work,
479*4882a593Smuzhiyun  * do try to avoid using these on their own:
480*4882a593Smuzhiyun  */
fpregs_deactivate(struct fpu * fpu)481*4882a593Smuzhiyun static inline void fpregs_deactivate(struct fpu *fpu)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun 	this_cpu_write(fpu_fpregs_owner_ctx, NULL);
484*4882a593Smuzhiyun 	trace_x86_fpu_regs_deactivated(fpu);
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun 
fpregs_activate(struct fpu * fpu)487*4882a593Smuzhiyun static inline void fpregs_activate(struct fpu *fpu)
488*4882a593Smuzhiyun {
489*4882a593Smuzhiyun 	this_cpu_write(fpu_fpregs_owner_ctx, fpu);
490*4882a593Smuzhiyun 	trace_x86_fpu_regs_activated(fpu);
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun /*
494*4882a593Smuzhiyun  * Internal helper, do not use directly. Use switch_fpu_return() instead.
495*4882a593Smuzhiyun  */
__fpregs_load_activate(void)496*4882a593Smuzhiyun static inline void __fpregs_load_activate(void)
497*4882a593Smuzhiyun {
498*4882a593Smuzhiyun 	struct fpu *fpu = &current->thread.fpu;
499*4882a593Smuzhiyun 	int cpu = smp_processor_id();
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 	if (WARN_ON_ONCE(current->flags & PF_KTHREAD))
502*4882a593Smuzhiyun 		return;
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	if (!fpregs_state_valid(fpu, cpu)) {
505*4882a593Smuzhiyun 		copy_kernel_to_fpregs(&fpu->state);
506*4882a593Smuzhiyun 		fpregs_activate(fpu);
507*4882a593Smuzhiyun 		fpu->last_cpu = cpu;
508*4882a593Smuzhiyun 	}
509*4882a593Smuzhiyun 	clear_thread_flag(TIF_NEED_FPU_LOAD);
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun /*
513*4882a593Smuzhiyun  * FPU state switching for scheduling.
514*4882a593Smuzhiyun  *
515*4882a593Smuzhiyun  * This is a two-stage process:
516*4882a593Smuzhiyun  *
517*4882a593Smuzhiyun  *  - switch_fpu_prepare() saves the old state.
518*4882a593Smuzhiyun  *    This is done within the context of the old process.
519*4882a593Smuzhiyun  *
520*4882a593Smuzhiyun  *  - switch_fpu_finish() sets TIF_NEED_FPU_LOAD; the floating point state
521*4882a593Smuzhiyun  *    will get loaded on return to userspace, or when the kernel needs it.
522*4882a593Smuzhiyun  *
523*4882a593Smuzhiyun  * If TIF_NEED_FPU_LOAD is cleared then the CPU's FPU registers
524*4882a593Smuzhiyun  * are saved in the current thread's FPU register state.
525*4882a593Smuzhiyun  *
526*4882a593Smuzhiyun  * If TIF_NEED_FPU_LOAD is set then CPU's FPU registers may not
527*4882a593Smuzhiyun  * hold current()'s FPU registers. It is required to load the
528*4882a593Smuzhiyun  * registers before returning to userland or using the content
529*4882a593Smuzhiyun  * otherwise.
530*4882a593Smuzhiyun  *
531*4882a593Smuzhiyun  * The FPU context is only stored/restored for a user task and
532*4882a593Smuzhiyun  * PF_KTHREAD is used to distinguish between kernel and user threads.
533*4882a593Smuzhiyun  */
switch_fpu_prepare(struct task_struct * prev,int cpu)534*4882a593Smuzhiyun static inline void switch_fpu_prepare(struct task_struct *prev, int cpu)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun 	struct fpu *old_fpu = &prev->thread.fpu;
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	if (static_cpu_has(X86_FEATURE_FPU) && !(prev->flags & PF_KTHREAD)) {
539*4882a593Smuzhiyun 		if (!copy_fpregs_to_fpstate(old_fpu))
540*4882a593Smuzhiyun 			old_fpu->last_cpu = -1;
541*4882a593Smuzhiyun 		else
542*4882a593Smuzhiyun 			old_fpu->last_cpu = cpu;
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 		/* But leave fpu_fpregs_owner_ctx! */
545*4882a593Smuzhiyun 		trace_x86_fpu_regs_deactivated(old_fpu);
546*4882a593Smuzhiyun 	}
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun /*
550*4882a593Smuzhiyun  * Misc helper functions:
551*4882a593Smuzhiyun  */
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun /*
554*4882a593Smuzhiyun  * Load PKRU from the FPU context if available. Delay loading of the
555*4882a593Smuzhiyun  * complete FPU state until the return to userland.
556*4882a593Smuzhiyun  */
switch_fpu_finish(struct task_struct * next)557*4882a593Smuzhiyun static inline void switch_fpu_finish(struct task_struct *next)
558*4882a593Smuzhiyun {
559*4882a593Smuzhiyun 	u32 pkru_val = init_pkru_value;
560*4882a593Smuzhiyun 	struct pkru_state *pk;
561*4882a593Smuzhiyun 	struct fpu *next_fpu = &next->thread.fpu;
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 	if (!static_cpu_has(X86_FEATURE_FPU))
564*4882a593Smuzhiyun 		return;
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	set_thread_flag(TIF_NEED_FPU_LOAD);
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
569*4882a593Smuzhiyun 		return;
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	/*
572*4882a593Smuzhiyun 	 * PKRU state is switched eagerly because it needs to be valid before we
573*4882a593Smuzhiyun 	 * return to userland e.g. for a copy_to_user() operation.
574*4882a593Smuzhiyun 	 */
575*4882a593Smuzhiyun 	if (!(next->flags & PF_KTHREAD)) {
576*4882a593Smuzhiyun 		/*
577*4882a593Smuzhiyun 		 * If the PKRU bit in xsave.header.xfeatures is not set,
578*4882a593Smuzhiyun 		 * then the PKRU component was in init state, which means
579*4882a593Smuzhiyun 		 * XRSTOR will set PKRU to 0. If the bit is not set then
580*4882a593Smuzhiyun 		 * get_xsave_addr() will return NULL because the PKRU value
581*4882a593Smuzhiyun 		 * in memory is not valid. This means pkru_val has to be
582*4882a593Smuzhiyun 		 * set to 0 and not to init_pkru_value.
583*4882a593Smuzhiyun 		 */
584*4882a593Smuzhiyun 		pk = get_xsave_addr(&next_fpu->state.xsave, XFEATURE_PKRU);
585*4882a593Smuzhiyun 		pkru_val = pk ? pk->pkru : 0;
586*4882a593Smuzhiyun 	}
587*4882a593Smuzhiyun 	__write_pkru(pkru_val);
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun #endif /* _ASM_X86_FPU_INTERNAL_H */
591