xref: /OK3568_Linux_fs/kernel/arch/powerpc/include/asm/switch_to.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun #ifndef _ASM_POWERPC_SWITCH_TO_H
6*4882a593Smuzhiyun #define _ASM_POWERPC_SWITCH_TO_H
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/sched.h>
9*4882a593Smuzhiyun #include <asm/reg.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun struct thread_struct;
12*4882a593Smuzhiyun struct task_struct;
13*4882a593Smuzhiyun struct pt_regs;
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun extern struct task_struct *__switch_to(struct task_struct *,
16*4882a593Smuzhiyun 	struct task_struct *);
17*4882a593Smuzhiyun #define switch_to(prev, next, last)	((last) = __switch_to((prev), (next)))
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun extern struct task_struct *_switch(struct thread_struct *prev,
20*4882a593Smuzhiyun 				   struct thread_struct *next);
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun extern void switch_booke_debug_regs(struct debug_reg *new_debug);
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun extern int emulate_altivec(struct pt_regs *);
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
27*4882a593Smuzhiyun void restore_math(struct pt_regs *regs);
28*4882a593Smuzhiyun #else
restore_math(struct pt_regs * regs)29*4882a593Smuzhiyun static inline void restore_math(struct pt_regs *regs)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun }
32*4882a593Smuzhiyun #endif
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun void restore_tm_state(struct pt_regs *regs);
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun extern void flush_all_to_thread(struct task_struct *);
37*4882a593Smuzhiyun extern void giveup_all(struct task_struct *);
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun #ifdef CONFIG_PPC_FPU
40*4882a593Smuzhiyun extern void enable_kernel_fp(void);
41*4882a593Smuzhiyun extern void flush_fp_to_thread(struct task_struct *);
42*4882a593Smuzhiyun extern void giveup_fpu(struct task_struct *);
43*4882a593Smuzhiyun extern void save_fpu(struct task_struct *);
disable_kernel_fp(void)44*4882a593Smuzhiyun static inline void disable_kernel_fp(void)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun 	msr_check_and_clear(MSR_FP);
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun #else
save_fpu(struct task_struct * t)49*4882a593Smuzhiyun static inline void save_fpu(struct task_struct *t) { }
flush_fp_to_thread(struct task_struct * t)50*4882a593Smuzhiyun static inline void flush_fp_to_thread(struct task_struct *t) { }
51*4882a593Smuzhiyun #endif
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun #ifdef CONFIG_ALTIVEC
54*4882a593Smuzhiyun extern void enable_kernel_altivec(void);
55*4882a593Smuzhiyun extern void flush_altivec_to_thread(struct task_struct *);
56*4882a593Smuzhiyun extern void giveup_altivec(struct task_struct *);
57*4882a593Smuzhiyun extern void save_altivec(struct task_struct *);
disable_kernel_altivec(void)58*4882a593Smuzhiyun static inline void disable_kernel_altivec(void)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	msr_check_and_clear(MSR_VEC);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun #else
save_altivec(struct task_struct * t)63*4882a593Smuzhiyun static inline void save_altivec(struct task_struct *t) { }
__giveup_altivec(struct task_struct * t)64*4882a593Smuzhiyun static inline void __giveup_altivec(struct task_struct *t) { }
65*4882a593Smuzhiyun #endif
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun #ifdef CONFIG_VSX
68*4882a593Smuzhiyun extern void enable_kernel_vsx(void);
69*4882a593Smuzhiyun extern void flush_vsx_to_thread(struct task_struct *);
disable_kernel_vsx(void)70*4882a593Smuzhiyun static inline void disable_kernel_vsx(void)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun 	msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun #else
enable_kernel_vsx(void)75*4882a593Smuzhiyun static inline void enable_kernel_vsx(void)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun 	BUILD_BUG();
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun 
disable_kernel_vsx(void)80*4882a593Smuzhiyun static inline void disable_kernel_vsx(void)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun 	BUILD_BUG();
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun #endif
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun #ifdef CONFIG_SPE
87*4882a593Smuzhiyun extern void enable_kernel_spe(void);
88*4882a593Smuzhiyun extern void flush_spe_to_thread(struct task_struct *);
89*4882a593Smuzhiyun extern void giveup_spe(struct task_struct *);
90*4882a593Smuzhiyun extern void __giveup_spe(struct task_struct *);
disable_kernel_spe(void)91*4882a593Smuzhiyun static inline void disable_kernel_spe(void)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun 	msr_check_and_clear(MSR_SPE);
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun #else
__giveup_spe(struct task_struct * t)96*4882a593Smuzhiyun static inline void __giveup_spe(struct task_struct *t) { }
97*4882a593Smuzhiyun #endif
98*4882a593Smuzhiyun 
clear_task_ebb(struct task_struct * t)99*4882a593Smuzhiyun static inline void clear_task_ebb(struct task_struct *t)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
102*4882a593Smuzhiyun     /* EBB perf events are not inherited, so clear all EBB state. */
103*4882a593Smuzhiyun     t->thread.ebbrr = 0;
104*4882a593Smuzhiyun     t->thread.ebbhr = 0;
105*4882a593Smuzhiyun     t->thread.bescr = 0;
106*4882a593Smuzhiyun     t->thread.mmcr2 = 0;
107*4882a593Smuzhiyun     t->thread.mmcr0 = 0;
108*4882a593Smuzhiyun     t->thread.siar = 0;
109*4882a593Smuzhiyun     t->thread.sdar = 0;
110*4882a593Smuzhiyun     t->thread.sier = 0;
111*4882a593Smuzhiyun     t->thread.used_ebb = 0;
112*4882a593Smuzhiyun #endif
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun extern int set_thread_tidr(struct task_struct *t);
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun #endif /* _ASM_POWERPC_SWITCH_TO_H */
118