xref: /OK3568_Linux_fs/kernel/arch/mips/include/asm/stackframe.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * This file is subject to the terms and conditions of the GNU General Public
3*4882a593Smuzhiyun  * License.  See the file "COPYING" in the main directory of this archive
4*4882a593Smuzhiyun  * for more details.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright (C) 1994, 95, 96, 99, 2001 Ralf Baechle
7*4882a593Smuzhiyun  * Copyright (C) 1994, 1995, 1996 Paul M. Antoine.
8*4882a593Smuzhiyun  * Copyright (C) 1999 Silicon Graphics, Inc.
9*4882a593Smuzhiyun  * Copyright (C) 2007  Maciej W. Rozycki
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun #ifndef _ASM_STACKFRAME_H
12*4882a593Smuzhiyun #define _ASM_STACKFRAME_H
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include <linux/threads.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include <asm/asm.h>
17*4882a593Smuzhiyun #include <asm/asmmacro.h>
18*4882a593Smuzhiyun #include <asm/mipsregs.h>
19*4882a593Smuzhiyun #include <asm/asm-offsets.h>
20*4882a593Smuzhiyun #include <asm/thread_info.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun /* Make the addition of cfi info a little easier. */
23*4882a593Smuzhiyun 	.macro cfi_rel_offset reg offset=0 docfi=0
24*4882a593Smuzhiyun 	.if \docfi
25*4882a593Smuzhiyun 	.cfi_rel_offset \reg, \offset
26*4882a593Smuzhiyun 	.endif
27*4882a593Smuzhiyun 	.endm
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	.macro cfi_st reg offset=0 docfi=0
30*4882a593Smuzhiyun 	LONG_S	\reg, \offset(sp)
31*4882a593Smuzhiyun 	cfi_rel_offset \reg, \offset, \docfi
32*4882a593Smuzhiyun 	.endm
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	.macro cfi_restore reg offset=0 docfi=0
35*4882a593Smuzhiyun 	.if \docfi
36*4882a593Smuzhiyun 	.cfi_restore \reg
37*4882a593Smuzhiyun 	.endif
38*4882a593Smuzhiyun 	.endm
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	.macro cfi_ld reg offset=0 docfi=0
41*4882a593Smuzhiyun 	LONG_L	\reg, \offset(sp)
42*4882a593Smuzhiyun 	cfi_restore \reg \offset \docfi
43*4882a593Smuzhiyun 	.endm
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
46*4882a593Smuzhiyun #define STATMASK 0x3f
47*4882a593Smuzhiyun #else
48*4882a593Smuzhiyun #define STATMASK 0x1f
49*4882a593Smuzhiyun #endif
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 		.macro	SAVE_AT docfi=0
52*4882a593Smuzhiyun 		.set	push
53*4882a593Smuzhiyun 		.set	noat
54*4882a593Smuzhiyun 		cfi_st	$1, PT_R1, \docfi
55*4882a593Smuzhiyun 		.set	pop
56*4882a593Smuzhiyun 		.endm
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 		.macro	SAVE_TEMP docfi=0
59*4882a593Smuzhiyun #ifdef CONFIG_CPU_HAS_SMARTMIPS
60*4882a593Smuzhiyun 		mflhxu	v1
61*4882a593Smuzhiyun 		LONG_S	v1, PT_LO(sp)
62*4882a593Smuzhiyun 		mflhxu	v1
63*4882a593Smuzhiyun 		LONG_S	v1, PT_HI(sp)
64*4882a593Smuzhiyun 		mflhxu	v1
65*4882a593Smuzhiyun 		LONG_S	v1, PT_ACX(sp)
66*4882a593Smuzhiyun #elif !defined(CONFIG_CPU_MIPSR6)
67*4882a593Smuzhiyun 		mfhi	v1
68*4882a593Smuzhiyun #endif
69*4882a593Smuzhiyun #ifdef CONFIG_32BIT
70*4882a593Smuzhiyun 		cfi_st	$8, PT_R8, \docfi
71*4882a593Smuzhiyun 		cfi_st	$9, PT_R9, \docfi
72*4882a593Smuzhiyun #endif
73*4882a593Smuzhiyun 		cfi_st	$10, PT_R10, \docfi
74*4882a593Smuzhiyun 		cfi_st	$11, PT_R11, \docfi
75*4882a593Smuzhiyun 		cfi_st	$12, PT_R12, \docfi
76*4882a593Smuzhiyun #if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
77*4882a593Smuzhiyun 		LONG_S	v1, PT_HI(sp)
78*4882a593Smuzhiyun 		mflo	v1
79*4882a593Smuzhiyun #endif
80*4882a593Smuzhiyun 		cfi_st	$13, PT_R13, \docfi
81*4882a593Smuzhiyun 		cfi_st	$14, PT_R14, \docfi
82*4882a593Smuzhiyun 		cfi_st	$15, PT_R15, \docfi
83*4882a593Smuzhiyun 		cfi_st	$24, PT_R24, \docfi
84*4882a593Smuzhiyun #if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
85*4882a593Smuzhiyun 		LONG_S	v1, PT_LO(sp)
86*4882a593Smuzhiyun #endif
87*4882a593Smuzhiyun #ifdef CONFIG_CPU_CAVIUM_OCTEON
88*4882a593Smuzhiyun 		/*
89*4882a593Smuzhiyun 		 * The Octeon multiplier state is affected by general
90*4882a593Smuzhiyun 		 * multiply instructions. It must be saved before and
91*4882a593Smuzhiyun 		 * kernel code might corrupt it
92*4882a593Smuzhiyun 		 */
93*4882a593Smuzhiyun 		jal     octeon_mult_save
94*4882a593Smuzhiyun #endif
95*4882a593Smuzhiyun 		.endm
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 		.macro	SAVE_STATIC docfi=0
98*4882a593Smuzhiyun 		cfi_st	$16, PT_R16, \docfi
99*4882a593Smuzhiyun 		cfi_st	$17, PT_R17, \docfi
100*4882a593Smuzhiyun 		cfi_st	$18, PT_R18, \docfi
101*4882a593Smuzhiyun 		cfi_st	$19, PT_R19, \docfi
102*4882a593Smuzhiyun 		cfi_st	$20, PT_R20, \docfi
103*4882a593Smuzhiyun 		cfi_st	$21, PT_R21, \docfi
104*4882a593Smuzhiyun 		cfi_st	$22, PT_R22, \docfi
105*4882a593Smuzhiyun 		cfi_st	$23, PT_R23, \docfi
106*4882a593Smuzhiyun 		cfi_st	$30, PT_R30, \docfi
107*4882a593Smuzhiyun 		.endm
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun /*
110*4882a593Smuzhiyun  * get_saved_sp returns the SP for the current CPU by looking in the
111*4882a593Smuzhiyun  * kernelsp array for it.  If tosp is set, it stores the current sp in
112*4882a593Smuzhiyun  * k0 and loads the new value in sp.  If not, it clobbers k0 and
113*4882a593Smuzhiyun  * stores the new value in k1, leaving sp unaffected.
114*4882a593Smuzhiyun  */
115*4882a593Smuzhiyun #ifdef CONFIG_SMP
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 		/* SMP variation */
118*4882a593Smuzhiyun 		.macro	get_saved_sp docfi=0 tosp=0
119*4882a593Smuzhiyun 		ASM_CPUID_MFC0	k0, ASM_SMP_CPUID_REG
120*4882a593Smuzhiyun #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
121*4882a593Smuzhiyun 		lui	k1, %hi(kernelsp)
122*4882a593Smuzhiyun #else
123*4882a593Smuzhiyun 		lui	k1, %highest(kernelsp)
124*4882a593Smuzhiyun 		daddiu	k1, %higher(kernelsp)
125*4882a593Smuzhiyun 		dsll	k1, 16
126*4882a593Smuzhiyun 		daddiu	k1, %hi(kernelsp)
127*4882a593Smuzhiyun 		dsll	k1, 16
128*4882a593Smuzhiyun #endif
129*4882a593Smuzhiyun 		LONG_SRL	k0, SMP_CPUID_PTRSHIFT
130*4882a593Smuzhiyun 		LONG_ADDU	k1, k0
131*4882a593Smuzhiyun 		.if \tosp
132*4882a593Smuzhiyun 		move	k0, sp
133*4882a593Smuzhiyun 		.if \docfi
134*4882a593Smuzhiyun 		.cfi_register sp, k0
135*4882a593Smuzhiyun 		.endif
136*4882a593Smuzhiyun 		LONG_L	sp, %lo(kernelsp)(k1)
137*4882a593Smuzhiyun 		.else
138*4882a593Smuzhiyun 		LONG_L	k1, %lo(kernelsp)(k1)
139*4882a593Smuzhiyun 		.endif
140*4882a593Smuzhiyun 		.endm
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 		.macro	set_saved_sp stackp temp temp2
143*4882a593Smuzhiyun 		ASM_CPUID_MFC0	\temp, ASM_SMP_CPUID_REG
144*4882a593Smuzhiyun 		LONG_SRL	\temp, SMP_CPUID_PTRSHIFT
145*4882a593Smuzhiyun 		LONG_S	\stackp, kernelsp(\temp)
146*4882a593Smuzhiyun 		.endm
147*4882a593Smuzhiyun #else /* !CONFIG_SMP */
148*4882a593Smuzhiyun 		/* Uniprocessor variation */
149*4882a593Smuzhiyun 		.macro	get_saved_sp docfi=0 tosp=0
150*4882a593Smuzhiyun #ifdef CONFIG_CPU_JUMP_WORKAROUNDS
151*4882a593Smuzhiyun 		/*
152*4882a593Smuzhiyun 		 * Clear BTB (branch target buffer), forbid RAS (return address
153*4882a593Smuzhiyun 		 * stack) to workaround the Out-of-order Issue in Loongson2F
154*4882a593Smuzhiyun 		 * via its diagnostic register.
155*4882a593Smuzhiyun 		 */
156*4882a593Smuzhiyun 		move	k0, ra
157*4882a593Smuzhiyun 		jal	1f
158*4882a593Smuzhiyun 		 nop
159*4882a593Smuzhiyun 1:		jal	1f
160*4882a593Smuzhiyun 		 nop
161*4882a593Smuzhiyun 1:		jal	1f
162*4882a593Smuzhiyun 		 nop
163*4882a593Smuzhiyun 1:		jal	1f
164*4882a593Smuzhiyun 		 nop
165*4882a593Smuzhiyun 1:		move	ra, k0
166*4882a593Smuzhiyun 		li	k0, 3
167*4882a593Smuzhiyun 		mtc0	k0, $22
168*4882a593Smuzhiyun #endif /* CONFIG_CPU_JUMP_WORKAROUNDS */
169*4882a593Smuzhiyun #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
170*4882a593Smuzhiyun 		lui	k1, %hi(kernelsp)
171*4882a593Smuzhiyun #else
172*4882a593Smuzhiyun 		lui	k1, %highest(kernelsp)
173*4882a593Smuzhiyun 		daddiu	k1, %higher(kernelsp)
174*4882a593Smuzhiyun 		dsll	k1, k1, 16
175*4882a593Smuzhiyun 		daddiu	k1, %hi(kernelsp)
176*4882a593Smuzhiyun 		dsll	k1, k1, 16
177*4882a593Smuzhiyun #endif
178*4882a593Smuzhiyun 		.if \tosp
179*4882a593Smuzhiyun 		move	k0, sp
180*4882a593Smuzhiyun 		.if \docfi
181*4882a593Smuzhiyun 		.cfi_register sp, k0
182*4882a593Smuzhiyun 		.endif
183*4882a593Smuzhiyun 		LONG_L	sp, %lo(kernelsp)(k1)
184*4882a593Smuzhiyun 		.else
185*4882a593Smuzhiyun 		LONG_L	k1, %lo(kernelsp)(k1)
186*4882a593Smuzhiyun 		.endif
187*4882a593Smuzhiyun 		.endm
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 		.macro	set_saved_sp stackp temp temp2
190*4882a593Smuzhiyun 		LONG_S	\stackp, kernelsp
191*4882a593Smuzhiyun 		.endm
192*4882a593Smuzhiyun #endif
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 		.macro	SAVE_SOME docfi=0
195*4882a593Smuzhiyun 		.set	push
196*4882a593Smuzhiyun 		.set	noat
197*4882a593Smuzhiyun 		.set	reorder
198*4882a593Smuzhiyun 		mfc0	k0, CP0_STATUS
199*4882a593Smuzhiyun 		sll	k0, 3		/* extract cu0 bit */
200*4882a593Smuzhiyun 		.set	noreorder
201*4882a593Smuzhiyun 		bltz	k0, 8f
202*4882a593Smuzhiyun 		 move	k0, sp
203*4882a593Smuzhiyun 		.if \docfi
204*4882a593Smuzhiyun 		.cfi_register sp, k0
205*4882a593Smuzhiyun 		.endif
206*4882a593Smuzhiyun #ifdef CONFIG_EVA
207*4882a593Smuzhiyun 		/*
208*4882a593Smuzhiyun 		 * Flush interAptiv's Return Prediction Stack (RPS) by writing
209*4882a593Smuzhiyun 		 * EntryHi. Toggling Config7.RPS is slower and less portable.
210*4882a593Smuzhiyun 		 *
211*4882a593Smuzhiyun 		 * The RPS isn't automatically flushed when exceptions are
212*4882a593Smuzhiyun 		 * taken, which can result in kernel mode speculative accesses
213*4882a593Smuzhiyun 		 * to user addresses if the RPS mispredicts. That's harmless
214*4882a593Smuzhiyun 		 * when user and kernel share the same address space, but with
215*4882a593Smuzhiyun 		 * EVA the same user segments may be unmapped to kernel mode,
216*4882a593Smuzhiyun 		 * even containing sensitive MMIO regions or invalid memory.
217*4882a593Smuzhiyun 		 *
218*4882a593Smuzhiyun 		 * This can happen when the kernel sets the return address to
219*4882a593Smuzhiyun 		 * ret_from_* and jr's to the exception handler, which looks
220*4882a593Smuzhiyun 		 * more like a tail call than a function call. If nested calls
221*4882a593Smuzhiyun 		 * don't evict the last user address in the RPS, it will
222*4882a593Smuzhiyun 		 * mispredict the return and fetch from a user controlled
223*4882a593Smuzhiyun 		 * address into the icache.
224*4882a593Smuzhiyun 		 *
225*4882a593Smuzhiyun 		 * More recent EVA-capable cores with MAAR to restrict
226*4882a593Smuzhiyun 		 * speculative accesses aren't affected.
227*4882a593Smuzhiyun 		 */
228*4882a593Smuzhiyun 		MFC0	k0, CP0_ENTRYHI
229*4882a593Smuzhiyun 		MTC0	k0, CP0_ENTRYHI
230*4882a593Smuzhiyun #endif
231*4882a593Smuzhiyun 		.set	reorder
232*4882a593Smuzhiyun 		/* Called from user mode, new stack. */
233*4882a593Smuzhiyun 		get_saved_sp docfi=\docfi tosp=1
234*4882a593Smuzhiyun 8:
235*4882a593Smuzhiyun #ifdef CONFIG_CPU_DADDI_WORKAROUNDS
236*4882a593Smuzhiyun 		.set	at=k1
237*4882a593Smuzhiyun #endif
238*4882a593Smuzhiyun 		PTR_SUBU sp, PT_SIZE
239*4882a593Smuzhiyun #ifdef CONFIG_CPU_DADDI_WORKAROUNDS
240*4882a593Smuzhiyun 		.set	noat
241*4882a593Smuzhiyun #endif
242*4882a593Smuzhiyun 		.if \docfi
243*4882a593Smuzhiyun 		.cfi_def_cfa sp,0
244*4882a593Smuzhiyun 		.endif
245*4882a593Smuzhiyun 		cfi_st	k0, PT_R29, \docfi
246*4882a593Smuzhiyun 		cfi_rel_offset  sp, PT_R29, \docfi
247*4882a593Smuzhiyun 		cfi_st	v1, PT_R3, \docfi
248*4882a593Smuzhiyun 		/*
249*4882a593Smuzhiyun 		 * You might think that you don't need to save $0,
250*4882a593Smuzhiyun 		 * but the FPU emulator and gdb remote debug stub
251*4882a593Smuzhiyun 		 * need it to operate correctly
252*4882a593Smuzhiyun 		 */
253*4882a593Smuzhiyun 		LONG_S	$0, PT_R0(sp)
254*4882a593Smuzhiyun 		mfc0	v1, CP0_STATUS
255*4882a593Smuzhiyun 		cfi_st	v0, PT_R2, \docfi
256*4882a593Smuzhiyun 		LONG_S	v1, PT_STATUS(sp)
257*4882a593Smuzhiyun 		cfi_st	$4, PT_R4, \docfi
258*4882a593Smuzhiyun 		mfc0	v1, CP0_CAUSE
259*4882a593Smuzhiyun 		cfi_st	$5, PT_R5, \docfi
260*4882a593Smuzhiyun 		LONG_S	v1, PT_CAUSE(sp)
261*4882a593Smuzhiyun 		cfi_st	$6, PT_R6, \docfi
262*4882a593Smuzhiyun 		cfi_st	ra, PT_R31, \docfi
263*4882a593Smuzhiyun 		MFC0	ra, CP0_EPC
264*4882a593Smuzhiyun 		cfi_st	$7, PT_R7, \docfi
265*4882a593Smuzhiyun #ifdef CONFIG_64BIT
266*4882a593Smuzhiyun 		cfi_st	$8, PT_R8, \docfi
267*4882a593Smuzhiyun 		cfi_st	$9, PT_R9, \docfi
268*4882a593Smuzhiyun #endif
269*4882a593Smuzhiyun 		LONG_S	ra, PT_EPC(sp)
270*4882a593Smuzhiyun 		.if \docfi
271*4882a593Smuzhiyun 		.cfi_rel_offset ra, PT_EPC
272*4882a593Smuzhiyun 		.endif
273*4882a593Smuzhiyun 		cfi_st	$25, PT_R25, \docfi
274*4882a593Smuzhiyun 		cfi_st	$28, PT_R28, \docfi
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 		/* Set thread_info if we're coming from user mode */
277*4882a593Smuzhiyun 		mfc0	k0, CP0_STATUS
278*4882a593Smuzhiyun 		sll	k0, 3		/* extract cu0 bit */
279*4882a593Smuzhiyun 		bltz	k0, 9f
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 		ori	$28, sp, _THREAD_MASK
282*4882a593Smuzhiyun 		xori	$28, _THREAD_MASK
283*4882a593Smuzhiyun #ifdef CONFIG_CPU_CAVIUM_OCTEON
284*4882a593Smuzhiyun 		.set    mips64
285*4882a593Smuzhiyun 		pref    0, 0($28)       /* Prefetch the current pointer */
286*4882a593Smuzhiyun #endif
287*4882a593Smuzhiyun 9:
288*4882a593Smuzhiyun 		.set	pop
289*4882a593Smuzhiyun 		.endm
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 		.macro	SAVE_ALL docfi=0
292*4882a593Smuzhiyun 		SAVE_SOME \docfi
293*4882a593Smuzhiyun 		SAVE_AT \docfi
294*4882a593Smuzhiyun 		SAVE_TEMP \docfi
295*4882a593Smuzhiyun 		SAVE_STATIC \docfi
296*4882a593Smuzhiyun 		.endm
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 		.macro	RESTORE_AT docfi=0
299*4882a593Smuzhiyun 		.set	push
300*4882a593Smuzhiyun 		.set	noat
301*4882a593Smuzhiyun 		cfi_ld	$1, PT_R1, \docfi
302*4882a593Smuzhiyun 		.set	pop
303*4882a593Smuzhiyun 		.endm
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 		.macro	RESTORE_TEMP docfi=0
306*4882a593Smuzhiyun #ifdef CONFIG_CPU_CAVIUM_OCTEON
307*4882a593Smuzhiyun 		/* Restore the Octeon multiplier state */
308*4882a593Smuzhiyun 		jal	octeon_mult_restore
309*4882a593Smuzhiyun #endif
310*4882a593Smuzhiyun #ifdef CONFIG_CPU_HAS_SMARTMIPS
311*4882a593Smuzhiyun 		LONG_L	$24, PT_ACX(sp)
312*4882a593Smuzhiyun 		mtlhx	$24
313*4882a593Smuzhiyun 		LONG_L	$24, PT_HI(sp)
314*4882a593Smuzhiyun 		mtlhx	$24
315*4882a593Smuzhiyun 		LONG_L	$24, PT_LO(sp)
316*4882a593Smuzhiyun 		mtlhx	$24
317*4882a593Smuzhiyun #elif !defined(CONFIG_CPU_MIPSR6)
318*4882a593Smuzhiyun 		LONG_L	$24, PT_LO(sp)
319*4882a593Smuzhiyun 		mtlo	$24
320*4882a593Smuzhiyun 		LONG_L	$24, PT_HI(sp)
321*4882a593Smuzhiyun 		mthi	$24
322*4882a593Smuzhiyun #endif
323*4882a593Smuzhiyun #ifdef CONFIG_32BIT
324*4882a593Smuzhiyun 		cfi_ld	$8, PT_R8, \docfi
325*4882a593Smuzhiyun 		cfi_ld	$9, PT_R9, \docfi
326*4882a593Smuzhiyun #endif
327*4882a593Smuzhiyun 		cfi_ld	$10, PT_R10, \docfi
328*4882a593Smuzhiyun 		cfi_ld	$11, PT_R11, \docfi
329*4882a593Smuzhiyun 		cfi_ld	$12, PT_R12, \docfi
330*4882a593Smuzhiyun 		cfi_ld	$13, PT_R13, \docfi
331*4882a593Smuzhiyun 		cfi_ld	$14, PT_R14, \docfi
332*4882a593Smuzhiyun 		cfi_ld	$15, PT_R15, \docfi
333*4882a593Smuzhiyun 		cfi_ld	$24, PT_R24, \docfi
334*4882a593Smuzhiyun 		.endm
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 		.macro	RESTORE_STATIC docfi=0
337*4882a593Smuzhiyun 		cfi_ld	$16, PT_R16, \docfi
338*4882a593Smuzhiyun 		cfi_ld	$17, PT_R17, \docfi
339*4882a593Smuzhiyun 		cfi_ld	$18, PT_R18, \docfi
340*4882a593Smuzhiyun 		cfi_ld	$19, PT_R19, \docfi
341*4882a593Smuzhiyun 		cfi_ld	$20, PT_R20, \docfi
342*4882a593Smuzhiyun 		cfi_ld	$21, PT_R21, \docfi
343*4882a593Smuzhiyun 		cfi_ld	$22, PT_R22, \docfi
344*4882a593Smuzhiyun 		cfi_ld	$23, PT_R23, \docfi
345*4882a593Smuzhiyun 		cfi_ld	$30, PT_R30, \docfi
346*4882a593Smuzhiyun 		.endm
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 		.macro	RESTORE_SP docfi=0
349*4882a593Smuzhiyun 		cfi_ld	sp, PT_R29, \docfi
350*4882a593Smuzhiyun 		.endm
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 		.macro	RESTORE_SOME docfi=0
355*4882a593Smuzhiyun 		.set	push
356*4882a593Smuzhiyun 		.set	reorder
357*4882a593Smuzhiyun 		.set	noat
358*4882a593Smuzhiyun 		mfc0	a0, CP0_STATUS
359*4882a593Smuzhiyun 		li	v1, ST0_CU1 | ST0_IM
360*4882a593Smuzhiyun 		ori	a0, STATMASK
361*4882a593Smuzhiyun 		xori	a0, STATMASK
362*4882a593Smuzhiyun 		mtc0	a0, CP0_STATUS
363*4882a593Smuzhiyun 		and	a0, v1
364*4882a593Smuzhiyun 		LONG_L	v0, PT_STATUS(sp)
365*4882a593Smuzhiyun 		nor	v1, $0, v1
366*4882a593Smuzhiyun 		and	v0, v1
367*4882a593Smuzhiyun 		or	v0, a0
368*4882a593Smuzhiyun 		mtc0	v0, CP0_STATUS
369*4882a593Smuzhiyun 		cfi_ld	$31, PT_R31, \docfi
370*4882a593Smuzhiyun 		cfi_ld	$28, PT_R28, \docfi
371*4882a593Smuzhiyun 		cfi_ld	$25, PT_R25, \docfi
372*4882a593Smuzhiyun 		cfi_ld	$7,  PT_R7, \docfi
373*4882a593Smuzhiyun 		cfi_ld	$6,  PT_R6, \docfi
374*4882a593Smuzhiyun 		cfi_ld	$5,  PT_R5, \docfi
375*4882a593Smuzhiyun 		cfi_ld	$4,  PT_R4, \docfi
376*4882a593Smuzhiyun 		cfi_ld	$3,  PT_R3, \docfi
377*4882a593Smuzhiyun 		cfi_ld	$2,  PT_R2, \docfi
378*4882a593Smuzhiyun 		.set	pop
379*4882a593Smuzhiyun 		.endm
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 		.macro	RESTORE_SP_AND_RET docfi=0
382*4882a593Smuzhiyun 		.set	push
383*4882a593Smuzhiyun 		.set	noreorder
384*4882a593Smuzhiyun 		LONG_L	k0, PT_EPC(sp)
385*4882a593Smuzhiyun 		RESTORE_SP \docfi
386*4882a593Smuzhiyun 		jr	k0
387*4882a593Smuzhiyun 		 rfe
388*4882a593Smuzhiyun 		.set	pop
389*4882a593Smuzhiyun 		.endm
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun #else
392*4882a593Smuzhiyun 		.macro	RESTORE_SOME docfi=0
393*4882a593Smuzhiyun 		.set	push
394*4882a593Smuzhiyun 		.set	reorder
395*4882a593Smuzhiyun 		.set	noat
396*4882a593Smuzhiyun 		mfc0	a0, CP0_STATUS
397*4882a593Smuzhiyun 		ori	a0, STATMASK
398*4882a593Smuzhiyun 		xori	a0, STATMASK
399*4882a593Smuzhiyun 		mtc0	a0, CP0_STATUS
400*4882a593Smuzhiyun 		li	v1, ST0_CU1 | ST0_FR | ST0_IM
401*4882a593Smuzhiyun 		and	a0, v1
402*4882a593Smuzhiyun 		LONG_L	v0, PT_STATUS(sp)
403*4882a593Smuzhiyun 		nor	v1, $0, v1
404*4882a593Smuzhiyun 		and	v0, v1
405*4882a593Smuzhiyun 		or	v0, a0
406*4882a593Smuzhiyun 		mtc0	v0, CP0_STATUS
407*4882a593Smuzhiyun 		LONG_L	v1, PT_EPC(sp)
408*4882a593Smuzhiyun 		MTC0	v1, CP0_EPC
409*4882a593Smuzhiyun 		cfi_ld	$31, PT_R31, \docfi
410*4882a593Smuzhiyun 		cfi_ld	$28, PT_R28, \docfi
411*4882a593Smuzhiyun 		cfi_ld	$25, PT_R25, \docfi
412*4882a593Smuzhiyun #ifdef CONFIG_64BIT
413*4882a593Smuzhiyun 		cfi_ld	$8, PT_R8, \docfi
414*4882a593Smuzhiyun 		cfi_ld	$9, PT_R9, \docfi
415*4882a593Smuzhiyun #endif
416*4882a593Smuzhiyun 		cfi_ld	$7,  PT_R7, \docfi
417*4882a593Smuzhiyun 		cfi_ld	$6,  PT_R6, \docfi
418*4882a593Smuzhiyun 		cfi_ld	$5,  PT_R5, \docfi
419*4882a593Smuzhiyun 		cfi_ld	$4,  PT_R4, \docfi
420*4882a593Smuzhiyun 		cfi_ld	$3,  PT_R3, \docfi
421*4882a593Smuzhiyun 		cfi_ld	$2,  PT_R2, \docfi
422*4882a593Smuzhiyun 		.set	pop
423*4882a593Smuzhiyun 		.endm
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 		.macro	RESTORE_SP_AND_RET docfi=0
426*4882a593Smuzhiyun 		RESTORE_SP \docfi
427*4882a593Smuzhiyun #if defined(CONFIG_CPU_MIPSR5) || defined(CONFIG_CPU_MIPSR6)
428*4882a593Smuzhiyun 		eretnc
429*4882a593Smuzhiyun #else
430*4882a593Smuzhiyun 		.set	push
431*4882a593Smuzhiyun 		.set	arch=r4000
432*4882a593Smuzhiyun 		eret
433*4882a593Smuzhiyun 		.set	pop
434*4882a593Smuzhiyun #endif
435*4882a593Smuzhiyun 		.endm
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun #endif
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 		.macro	RESTORE_ALL docfi=0
440*4882a593Smuzhiyun 		RESTORE_TEMP \docfi
441*4882a593Smuzhiyun 		RESTORE_STATIC \docfi
442*4882a593Smuzhiyun 		RESTORE_AT \docfi
443*4882a593Smuzhiyun 		RESTORE_SOME \docfi
444*4882a593Smuzhiyun 		RESTORE_SP \docfi
445*4882a593Smuzhiyun 		.endm
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun /*
448*4882a593Smuzhiyun  * Move to kernel mode and disable interrupts.
449*4882a593Smuzhiyun  * Set cp0 enable bit as sign that we're running on the kernel stack
450*4882a593Smuzhiyun  */
451*4882a593Smuzhiyun 		.macro	CLI
452*4882a593Smuzhiyun 		mfc0	t0, CP0_STATUS
453*4882a593Smuzhiyun 		li	t1, ST0_KERNEL_CUMASK | STATMASK
454*4882a593Smuzhiyun 		or	t0, t1
455*4882a593Smuzhiyun 		xori	t0, STATMASK
456*4882a593Smuzhiyun 		mtc0	t0, CP0_STATUS
457*4882a593Smuzhiyun 		irq_disable_hazard
458*4882a593Smuzhiyun 		.endm
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun /*
461*4882a593Smuzhiyun  * Move to kernel mode and enable interrupts.
462*4882a593Smuzhiyun  * Set cp0 enable bit as sign that we're running on the kernel stack
463*4882a593Smuzhiyun  */
464*4882a593Smuzhiyun 		.macro	STI
465*4882a593Smuzhiyun 		mfc0	t0, CP0_STATUS
466*4882a593Smuzhiyun 		li	t1, ST0_KERNEL_CUMASK | STATMASK
467*4882a593Smuzhiyun 		or	t0, t1
468*4882a593Smuzhiyun 		xori	t0, STATMASK & ~1
469*4882a593Smuzhiyun 		mtc0	t0, CP0_STATUS
470*4882a593Smuzhiyun 		irq_enable_hazard
471*4882a593Smuzhiyun 		.endm
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun /*
474*4882a593Smuzhiyun  * Just move to kernel mode and leave interrupts as they are.  Note
475*4882a593Smuzhiyun  * for the R3000 this means copying the previous enable from IEp.
476*4882a593Smuzhiyun  * Set cp0 enable bit as sign that we're running on the kernel stack
477*4882a593Smuzhiyun  */
478*4882a593Smuzhiyun 		.macro	KMODE
479*4882a593Smuzhiyun 		mfc0	t0, CP0_STATUS
480*4882a593Smuzhiyun 		li	t1, ST0_KERNEL_CUMASK | (STATMASK & ~1)
481*4882a593Smuzhiyun #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
482*4882a593Smuzhiyun 		andi	t2, t0, ST0_IEP
483*4882a593Smuzhiyun 		srl	t2, 2
484*4882a593Smuzhiyun 		or	t0, t2
485*4882a593Smuzhiyun #endif
486*4882a593Smuzhiyun 		or	t0, t1
487*4882a593Smuzhiyun 		xori	t0, STATMASK & ~1
488*4882a593Smuzhiyun 		mtc0	t0, CP0_STATUS
489*4882a593Smuzhiyun 		irq_disable_hazard
490*4882a593Smuzhiyun 		.endm
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun #endif /* _ASM_STACKFRAME_H */
493