1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright IBM Corp. 1999, 2009
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #ifndef __ASM_SWITCH_TO_H
9*4882a593Smuzhiyun #define __ASM_SWITCH_TO_H
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/thread_info.h>
12*4882a593Smuzhiyun #include <asm/fpu/api.h>
13*4882a593Smuzhiyun #include <asm/ptrace.h>
14*4882a593Smuzhiyun #include <asm/guarded_storage.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun extern struct task_struct *__switch_to(void *, void *);
17*4882a593Smuzhiyun extern void update_cr_regs(struct task_struct *task);
18*4882a593Smuzhiyun
save_access_regs(unsigned int * acrs)19*4882a593Smuzhiyun static inline void save_access_regs(unsigned int *acrs)
20*4882a593Smuzhiyun {
21*4882a593Smuzhiyun typedef struct { int _[NUM_ACRS]; } acrstype;
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun asm volatile("stam 0,15,%0" : "=Q" (*(acrstype *)acrs));
24*4882a593Smuzhiyun }
25*4882a593Smuzhiyun
restore_access_regs(unsigned int * acrs)26*4882a593Smuzhiyun static inline void restore_access_regs(unsigned int *acrs)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun typedef struct { int _[NUM_ACRS]; } acrstype;
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun asm volatile("lam 0,15,%0" : : "Q" (*(acrstype *)acrs));
31*4882a593Smuzhiyun }
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #define switch_to(prev, next, last) do { \
34*4882a593Smuzhiyun /* save_fpu_regs() sets the CIF_FPU flag, which enforces \
35*4882a593Smuzhiyun * a restore of the floating point / vector registers as \
36*4882a593Smuzhiyun * soon as the next task returns to user space \
37*4882a593Smuzhiyun */ \
38*4882a593Smuzhiyun save_fpu_regs(); \
39*4882a593Smuzhiyun save_access_regs(&prev->thread.acrs[0]); \
40*4882a593Smuzhiyun save_ri_cb(prev->thread.ri_cb); \
41*4882a593Smuzhiyun save_gs_cb(prev->thread.gs_cb); \
42*4882a593Smuzhiyun update_cr_regs(next); \
43*4882a593Smuzhiyun restore_access_regs(&next->thread.acrs[0]); \
44*4882a593Smuzhiyun restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
45*4882a593Smuzhiyun restore_gs_cb(next->thread.gs_cb); \
46*4882a593Smuzhiyun prev = __switch_to(prev, next); \
47*4882a593Smuzhiyun } while (0)
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun #endif /* __ASM_SWITCH_TO_H */
50