xref: /OK3568_Linux_fs/kernel/arch/powerpc/kernel/head_booke.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef __HEAD_BOOKE_H__
3*4882a593Smuzhiyun #define __HEAD_BOOKE_H__
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <asm/ptrace.h>	/* for STACK_FRAME_REGS_MARKER */
6*4882a593Smuzhiyun #include <asm/kvm_asm.h>
7*4882a593Smuzhiyun #include <asm/kvm_booke_hv_asm.h>
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #ifdef __ASSEMBLY__
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun /*
12*4882a593Smuzhiyun  * Macros used for common Book-e exception handling
13*4882a593Smuzhiyun  */
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #define SET_IVOR(vector_number, vector_label)		\
16*4882a593Smuzhiyun 		li	r26,vector_label@l; 		\
17*4882a593Smuzhiyun 		mtspr	SPRN_IVOR##vector_number,r26;	\
18*4882a593Smuzhiyun 		sync
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #if (THREAD_SHIFT < 15)
21*4882a593Smuzhiyun #define ALLOC_STACK_FRAME(reg, val)			\
22*4882a593Smuzhiyun 	addi reg,reg,val
23*4882a593Smuzhiyun #else
24*4882a593Smuzhiyun #define ALLOC_STACK_FRAME(reg, val)			\
25*4882a593Smuzhiyun 	addis	reg,reg,val@ha;				\
26*4882a593Smuzhiyun 	addi	reg,reg,val@l
27*4882a593Smuzhiyun #endif
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun /*
30*4882a593Smuzhiyun  * Macro used to get to thread save registers.
31*4882a593Smuzhiyun  * Note that entries 0-3 are used for the prolog code, and the remaining
32*4882a593Smuzhiyun  * entries are available for specific exception use in the event a handler
33*4882a593Smuzhiyun  * requires more than 4 scratch registers.
34*4882a593Smuzhiyun  */
35*4882a593Smuzhiyun #define THREAD_NORMSAVE(offset)	(THREAD_NORMSAVES + (offset * 4))
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #ifdef CONFIG_PPC_FSL_BOOK3E
38*4882a593Smuzhiyun #define BOOKE_CLEAR_BTB(reg)									\
39*4882a593Smuzhiyun START_BTB_FLUSH_SECTION								\
40*4882a593Smuzhiyun 	BTB_FLUSH(reg)									\
41*4882a593Smuzhiyun END_BTB_FLUSH_SECTION
42*4882a593Smuzhiyun #else
43*4882a593Smuzhiyun #define BOOKE_CLEAR_BTB(reg)
44*4882a593Smuzhiyun #endif
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun #define NORMAL_EXCEPTION_PROLOG(intno)						     \
48*4882a593Smuzhiyun 	mtspr	SPRN_SPRG_WSCRATCH0, r10;	/* save one register */	     \
49*4882a593Smuzhiyun 	mfspr	r10, SPRN_SPRG_THREAD;					     \
50*4882a593Smuzhiyun 	stw	r11, THREAD_NORMSAVE(0)(r10);				     \
51*4882a593Smuzhiyun 	stw	r13, THREAD_NORMSAVE(2)(r10);				     \
52*4882a593Smuzhiyun 	mfcr	r13;			/* save CR in r13 for now	   */\
53*4882a593Smuzhiyun 	mfspr	r11, SPRN_SRR1;		                                     \
54*4882a593Smuzhiyun 	DO_KVM	BOOKE_INTERRUPT_##intno SPRN_SRR1;			     \
55*4882a593Smuzhiyun 	andi.	r11, r11, MSR_PR;	/* check whether user or kernel    */\
56*4882a593Smuzhiyun 	mr	r11, r1;						     \
57*4882a593Smuzhiyun 	beq	1f;							     \
58*4882a593Smuzhiyun 	BOOKE_CLEAR_BTB(r11)						\
59*4882a593Smuzhiyun 	/* if from user, start at top of this thread's kernel stack */       \
60*4882a593Smuzhiyun 	lwz	r11, TASK_STACK - THREAD(r10);				     \
61*4882a593Smuzhiyun 	ALLOC_STACK_FRAME(r11, THREAD_SIZE);				     \
62*4882a593Smuzhiyun 1 :	subi	r11, r11, INT_FRAME_SIZE; /* Allocate exception frame */     \
63*4882a593Smuzhiyun 	stw	r13, _CCR(r11);		/* save various registers */	     \
64*4882a593Smuzhiyun 	stw	r12,GPR12(r11);						     \
65*4882a593Smuzhiyun 	stw	r9,GPR9(r11);						     \
66*4882a593Smuzhiyun 	mfspr	r13, SPRN_SPRG_RSCRATCH0;				     \
67*4882a593Smuzhiyun 	stw	r13, GPR10(r11);					     \
68*4882a593Smuzhiyun 	lwz	r12, THREAD_NORMSAVE(0)(r10);				     \
69*4882a593Smuzhiyun 	stw	r12,GPR11(r11);						     \
70*4882a593Smuzhiyun 	lwz	r13, THREAD_NORMSAVE(2)(r10); /* restore r13 */		     \
71*4882a593Smuzhiyun 	mflr	r10;							     \
72*4882a593Smuzhiyun 	stw	r10,_LINK(r11);						     \
73*4882a593Smuzhiyun 	mfspr	r12,SPRN_SRR0;						     \
74*4882a593Smuzhiyun 	stw	r1, GPR1(r11);						     \
75*4882a593Smuzhiyun 	mfspr	r9,SPRN_SRR1;						     \
76*4882a593Smuzhiyun 	stw	r1, 0(r11);						     \
77*4882a593Smuzhiyun 	mr	r1, r11;						     \
78*4882a593Smuzhiyun 	rlwinm	r9,r9,0,14,12;		/* clear MSR_WE (necessary?)	   */\
79*4882a593Smuzhiyun 	stw	r0,GPR0(r11);						     \
80*4882a593Smuzhiyun 	lis	r10, STACK_FRAME_REGS_MARKER@ha;/* exception frame marker */ \
81*4882a593Smuzhiyun 	addi	r10, r10, STACK_FRAME_REGS_MARKER@l;			     \
82*4882a593Smuzhiyun 	stw	r10, 8(r11);						     \
83*4882a593Smuzhiyun 	SAVE_4GPRS(3, r11);						     \
84*4882a593Smuzhiyun 	SAVE_2GPRS(7, r11)
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun .macro SYSCALL_ENTRY trapno intno srr1
87*4882a593Smuzhiyun 	mfspr	r10, SPRN_SPRG_THREAD
88*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOKE_HV
89*4882a593Smuzhiyun BEGIN_FTR_SECTION
90*4882a593Smuzhiyun 	mtspr	SPRN_SPRG_WSCRATCH0, r10
91*4882a593Smuzhiyun 	stw	r11, THREAD_NORMSAVE(0)(r10)
92*4882a593Smuzhiyun 	stw	r13, THREAD_NORMSAVE(2)(r10)
93*4882a593Smuzhiyun 	mfcr	r13			/* save CR in r13 for now	   */
94*4882a593Smuzhiyun 	mfspr	r11, SPRN_SRR1
95*4882a593Smuzhiyun 	mtocrf	0x80, r11	/* check MSR[GS] without clobbering reg */
96*4882a593Smuzhiyun 	bf	3, 1975f
97*4882a593Smuzhiyun 	b	kvmppc_handler_\intno\()_\srr1
98*4882a593Smuzhiyun 1975:
99*4882a593Smuzhiyun 	mr	r12, r13
100*4882a593Smuzhiyun 	lwz	r13, THREAD_NORMSAVE(2)(r10)
101*4882a593Smuzhiyun FTR_SECTION_ELSE
102*4882a593Smuzhiyun #endif
103*4882a593Smuzhiyun 	mfcr	r12
104*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOKE_HV
105*4882a593Smuzhiyun ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
106*4882a593Smuzhiyun #endif
107*4882a593Smuzhiyun 	mfspr	r9, SPRN_SRR1
108*4882a593Smuzhiyun 	BOOKE_CLEAR_BTB(r11)
109*4882a593Smuzhiyun 	andi.	r11, r9, MSR_PR
110*4882a593Smuzhiyun 	lwz	r11, TASK_STACK - THREAD(r10)
111*4882a593Smuzhiyun 	rlwinm	r12,r12,0,4,2	/* Clear SO bit in CR */
112*4882a593Smuzhiyun 	beq-	99f
113*4882a593Smuzhiyun 	ALLOC_STACK_FRAME(r11, THREAD_SIZE - INT_FRAME_SIZE)
114*4882a593Smuzhiyun 	stw	r12, _CCR(r11)		/* save various registers */
115*4882a593Smuzhiyun 	mflr	r12
116*4882a593Smuzhiyun 	stw	r12,_LINK(r11)
117*4882a593Smuzhiyun 	mfspr	r12,SPRN_SRR0
118*4882a593Smuzhiyun 	stw	r1, GPR1(r11)
119*4882a593Smuzhiyun 	stw	r1, 0(r11)
120*4882a593Smuzhiyun 	mr	r1, r11
121*4882a593Smuzhiyun 	stw	r12,_NIP(r11)
122*4882a593Smuzhiyun 	rlwinm	r9,r9,0,14,12		/* clear MSR_WE (necessary?)	   */
123*4882a593Smuzhiyun 	lis	r12, STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
124*4882a593Smuzhiyun 	stw	r2,GPR2(r11)
125*4882a593Smuzhiyun 	addi	r12, r12, STACK_FRAME_REGS_MARKER@l
126*4882a593Smuzhiyun 	stw	r9,_MSR(r11)
127*4882a593Smuzhiyun 	li	r2, \trapno + 1
128*4882a593Smuzhiyun 	stw	r12, 8(r11)
129*4882a593Smuzhiyun 	stw	r2,_TRAP(r11)
130*4882a593Smuzhiyun 	SAVE_GPR(0, r11)
131*4882a593Smuzhiyun 	SAVE_4GPRS(3, r11)
132*4882a593Smuzhiyun 	SAVE_2GPRS(7, r11)
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	addi	r11,r1,STACK_FRAME_OVERHEAD
135*4882a593Smuzhiyun 	addi	r2,r10,-THREAD
136*4882a593Smuzhiyun 	stw	r11,PT_REGS(r10)
137*4882a593Smuzhiyun 	/* Check to see if the dbcr0 register is set up to debug.  Use the
138*4882a593Smuzhiyun 	   internal debug mode bit to do this. */
139*4882a593Smuzhiyun 	lwz	r12,THREAD_DBCR0(r10)
140*4882a593Smuzhiyun 	andis.	r12,r12,DBCR0_IDM@h
141*4882a593Smuzhiyun 	ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
142*4882a593Smuzhiyun 	beq+	3f
143*4882a593Smuzhiyun 	/* From user and task is ptraced - load up global dbcr0 */
144*4882a593Smuzhiyun 	li	r12,-1			/* clear all pending debug events */
145*4882a593Smuzhiyun 	mtspr	SPRN_DBSR,r12
146*4882a593Smuzhiyun 	lis	r11,global_dbcr0@ha
147*4882a593Smuzhiyun 	tophys(r11,r11)
148*4882a593Smuzhiyun 	addi	r11,r11,global_dbcr0@l
149*4882a593Smuzhiyun #ifdef CONFIG_SMP
150*4882a593Smuzhiyun 	lwz	r10, TASK_CPU(r2)
151*4882a593Smuzhiyun 	slwi	r10, r10, 3
152*4882a593Smuzhiyun 	add	r11, r11, r10
153*4882a593Smuzhiyun #endif
154*4882a593Smuzhiyun 	lwz	r12,0(r11)
155*4882a593Smuzhiyun 	mtspr	SPRN_DBCR0,r12
156*4882a593Smuzhiyun 	lwz	r12,4(r11)
157*4882a593Smuzhiyun 	addi	r12,r12,-1
158*4882a593Smuzhiyun 	stw	r12,4(r11)
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 3:
161*4882a593Smuzhiyun 	tovirt(r2, r2)			/* set r2 to current */
162*4882a593Smuzhiyun 	lis	r11, transfer_to_syscall@h
163*4882a593Smuzhiyun 	ori	r11, r11, transfer_to_syscall@l
164*4882a593Smuzhiyun #ifdef CONFIG_TRACE_IRQFLAGS
165*4882a593Smuzhiyun 	/*
166*4882a593Smuzhiyun 	 * If MSR is changing we need to keep interrupts disabled at this point
167*4882a593Smuzhiyun 	 * otherwise we might risk taking an interrupt before we tell lockdep
168*4882a593Smuzhiyun 	 * they are enabled.
169*4882a593Smuzhiyun 	 */
170*4882a593Smuzhiyun 	lis	r10, MSR_KERNEL@h
171*4882a593Smuzhiyun 	ori	r10, r10, MSR_KERNEL@l
172*4882a593Smuzhiyun 	rlwimi	r10, r9, 0, MSR_EE
173*4882a593Smuzhiyun #else
174*4882a593Smuzhiyun 	lis	r10, (MSR_KERNEL | MSR_EE)@h
175*4882a593Smuzhiyun 	ori	r10, r10, (MSR_KERNEL | MSR_EE)@l
176*4882a593Smuzhiyun #endif
177*4882a593Smuzhiyun 	mtspr	SPRN_SRR1,r10
178*4882a593Smuzhiyun 	mtspr	SPRN_SRR0,r11
179*4882a593Smuzhiyun 	RFI				/* jump to handler, enable MMU */
180*4882a593Smuzhiyun 99:	b	ret_from_kernel_syscall
181*4882a593Smuzhiyun .endm
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun /* To handle the additional exception priority levels on 40x and Book-E
184*4882a593Smuzhiyun  * processors we allocate a stack per additional priority level.
185*4882a593Smuzhiyun  *
186*4882a593Smuzhiyun  * On 40x critical is the only additional level
187*4882a593Smuzhiyun  * On 44x/e500 we have critical and machine check
188*4882a593Smuzhiyun  * On e200 we have critical and debug (machine check occurs via critical)
189*4882a593Smuzhiyun  *
190*4882a593Smuzhiyun  * Additionally we reserve a SPRG for each priority level so we can free up a
191*4882a593Smuzhiyun  * GPR to use as the base for indirect access to the exception stacks.  This
192*4882a593Smuzhiyun  * is necessary since the MMU is always on, for Book-E parts, and the stacks
193*4882a593Smuzhiyun  * are offset from KERNELBASE.
194*4882a593Smuzhiyun  *
195*4882a593Smuzhiyun  * There is some space optimization to be had here if desired.  However
196*4882a593Smuzhiyun  * to allow for a common kernel with support for debug exceptions either
197*4882a593Smuzhiyun  * going to critical or their own debug level we aren't currently
198*4882a593Smuzhiyun  * providing configurations that micro-optimize space usage.
199*4882a593Smuzhiyun  */
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun #define MC_STACK_BASE		mcheckirq_ctx
202*4882a593Smuzhiyun #define CRIT_STACK_BASE		critirq_ctx
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun /* only on e500mc/e200 */
205*4882a593Smuzhiyun #define DBG_STACK_BASE		dbgirq_ctx
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun #define EXC_LVL_FRAME_OVERHEAD	(THREAD_SIZE - INT_FRAME_SIZE - EXC_LVL_SIZE)
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun #ifdef CONFIG_SMP
210*4882a593Smuzhiyun #define BOOKE_LOAD_EXC_LEVEL_STACK(level)		\
211*4882a593Smuzhiyun 	mfspr	r8,SPRN_PIR;				\
212*4882a593Smuzhiyun 	slwi	r8,r8,2;				\
213*4882a593Smuzhiyun 	addis	r8,r8,level##_STACK_BASE@ha;		\
214*4882a593Smuzhiyun 	lwz	r8,level##_STACK_BASE@l(r8);		\
215*4882a593Smuzhiyun 	addi	r8,r8,EXC_LVL_FRAME_OVERHEAD;
216*4882a593Smuzhiyun #else
217*4882a593Smuzhiyun #define BOOKE_LOAD_EXC_LEVEL_STACK(level)		\
218*4882a593Smuzhiyun 	lis	r8,level##_STACK_BASE@ha;		\
219*4882a593Smuzhiyun 	lwz	r8,level##_STACK_BASE@l(r8);		\
220*4882a593Smuzhiyun 	addi	r8,r8,EXC_LVL_FRAME_OVERHEAD;
221*4882a593Smuzhiyun #endif
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun /*
224*4882a593Smuzhiyun  * Exception prolog for critical/machine check exceptions.  This is a
225*4882a593Smuzhiyun  * little different from the normal exception prolog above since a
226*4882a593Smuzhiyun  * critical/machine check exception can potentially occur at any point
227*4882a593Smuzhiyun  * during normal exception processing. Thus we cannot use the same SPRG
228*4882a593Smuzhiyun  * registers as the normal prolog above. Instead we use a portion of the
229*4882a593Smuzhiyun  * critical/machine check exception stack at low physical addresses.
230*4882a593Smuzhiyun  */
231*4882a593Smuzhiyun #define EXC_LEVEL_EXCEPTION_PROLOG(exc_level, intno, exc_level_srr0, exc_level_srr1) \
232*4882a593Smuzhiyun 	mtspr	SPRN_SPRG_WSCRATCH_##exc_level,r8;			     \
233*4882a593Smuzhiyun 	BOOKE_LOAD_EXC_LEVEL_STACK(exc_level);/* r8 points to the exc_level stack*/ \
234*4882a593Smuzhiyun 	stw	r9,GPR9(r8);		/* save various registers	   */\
235*4882a593Smuzhiyun 	mfcr	r9;			/* save CR in r9 for now	   */\
236*4882a593Smuzhiyun 	stw	r10,GPR10(r8);						     \
237*4882a593Smuzhiyun 	stw	r11,GPR11(r8);						     \
238*4882a593Smuzhiyun 	stw	r9,_CCR(r8);		/* save CR on stack		   */\
239*4882a593Smuzhiyun 	mfspr	r11,exc_level_srr1;	/* check whether user or kernel    */\
240*4882a593Smuzhiyun 	DO_KVM	BOOKE_INTERRUPT_##intno exc_level_srr1;		             \
241*4882a593Smuzhiyun 	BOOKE_CLEAR_BTB(r10)						\
242*4882a593Smuzhiyun 	andi.	r11,r11,MSR_PR;						     \
243*4882a593Smuzhiyun 	mfspr	r11,SPRN_SPRG_THREAD;	/* if from user, start at top of   */\
244*4882a593Smuzhiyun 	lwz	r11, TASK_STACK - THREAD(r11); /* this thread's kernel stack */\
245*4882a593Smuzhiyun 	addi	r11,r11,EXC_LVL_FRAME_OVERHEAD;	/* allocate stack frame    */\
246*4882a593Smuzhiyun 	beq	1f;							     \
247*4882a593Smuzhiyun 	/* COMING FROM USER MODE */					     \
248*4882a593Smuzhiyun 	stw	r9,_CCR(r11);		/* save CR			   */\
249*4882a593Smuzhiyun 	lwz	r10,GPR10(r8);		/* copy regs from exception stack  */\
250*4882a593Smuzhiyun 	lwz	r9,GPR9(r8);						     \
251*4882a593Smuzhiyun 	stw	r10,GPR10(r11);						     \
252*4882a593Smuzhiyun 	lwz	r10,GPR11(r8);						     \
253*4882a593Smuzhiyun 	stw	r9,GPR9(r11);						     \
254*4882a593Smuzhiyun 	stw	r10,GPR11(r11);						     \
255*4882a593Smuzhiyun 	b	2f;							     \
256*4882a593Smuzhiyun 	/* COMING FROM PRIV MODE */					     \
257*4882a593Smuzhiyun 1:	mr	r11, r8;							     \
258*4882a593Smuzhiyun 2:	mfspr	r8,SPRN_SPRG_RSCRATCH_##exc_level;			     \
259*4882a593Smuzhiyun 	stw	r12,GPR12(r11);		/* save various registers	   */\
260*4882a593Smuzhiyun 	mflr	r10;							     \
261*4882a593Smuzhiyun 	stw	r10,_LINK(r11);						     \
262*4882a593Smuzhiyun 	mfspr	r12,SPRN_DEAR;		/* save DEAR and ESR in the frame  */\
263*4882a593Smuzhiyun 	stw	r12,_DEAR(r11);		/* since they may have had stuff   */\
264*4882a593Smuzhiyun 	mfspr	r9,SPRN_ESR;		/* in them at the point where the  */\
265*4882a593Smuzhiyun 	stw	r9,_ESR(r11);		/* exception was taken		   */\
266*4882a593Smuzhiyun 	mfspr	r12,exc_level_srr0;					     \
267*4882a593Smuzhiyun 	stw	r1,GPR1(r11);						     \
268*4882a593Smuzhiyun 	mfspr	r9,exc_level_srr1;					     \
269*4882a593Smuzhiyun 	stw	r1,0(r11);						     \
270*4882a593Smuzhiyun 	mr	r1,r11;							     \
271*4882a593Smuzhiyun 	rlwinm	r9,r9,0,14,12;		/* clear MSR_WE (necessary?)	   */\
272*4882a593Smuzhiyun 	stw	r0,GPR0(r11);						     \
273*4882a593Smuzhiyun 	SAVE_4GPRS(3, r11);						     \
274*4882a593Smuzhiyun 	SAVE_2GPRS(7, r11)
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun #define CRITICAL_EXCEPTION_PROLOG(intno) \
277*4882a593Smuzhiyun 		EXC_LEVEL_EXCEPTION_PROLOG(CRIT, intno, SPRN_CSRR0, SPRN_CSRR1)
278*4882a593Smuzhiyun #define DEBUG_EXCEPTION_PROLOG \
279*4882a593Smuzhiyun 		EXC_LEVEL_EXCEPTION_PROLOG(DBG, DEBUG, SPRN_DSRR0, SPRN_DSRR1)
280*4882a593Smuzhiyun #define MCHECK_EXCEPTION_PROLOG \
281*4882a593Smuzhiyun 		EXC_LEVEL_EXCEPTION_PROLOG(MC, MACHINE_CHECK, \
282*4882a593Smuzhiyun 			SPRN_MCSRR0, SPRN_MCSRR1)
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun /*
285*4882a593Smuzhiyun  * Guest Doorbell -- this is a bit odd in that uses GSRR0/1 despite
286*4882a593Smuzhiyun  * being delivered to the host.  This exception can only happen
287*4882a593Smuzhiyun  * inside a KVM guest -- so we just handle up to the DO_KVM rather
288*4882a593Smuzhiyun  * than try to fit this into one of the existing prolog macros.
289*4882a593Smuzhiyun  */
290*4882a593Smuzhiyun #define GUEST_DOORBELL_EXCEPTION \
291*4882a593Smuzhiyun 	START_EXCEPTION(GuestDoorbell);					     \
292*4882a593Smuzhiyun 	mtspr	SPRN_SPRG_WSCRATCH0, r10;	/* save one register */	     \
293*4882a593Smuzhiyun 	mfspr	r10, SPRN_SPRG_THREAD;					     \
294*4882a593Smuzhiyun 	stw	r11, THREAD_NORMSAVE(0)(r10);				     \
295*4882a593Smuzhiyun 	mfspr	r11, SPRN_SRR1;		                                     \
296*4882a593Smuzhiyun 	stw	r13, THREAD_NORMSAVE(2)(r10);				     \
297*4882a593Smuzhiyun 	mfcr	r13;			/* save CR in r13 for now	   */\
298*4882a593Smuzhiyun 	DO_KVM	BOOKE_INTERRUPT_GUEST_DBELL SPRN_GSRR1;			     \
299*4882a593Smuzhiyun 	trap
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun /*
302*4882a593Smuzhiyun  * Exception vectors.
303*4882a593Smuzhiyun  */
304*4882a593Smuzhiyun #define	START_EXCEPTION(label)						     \
305*4882a593Smuzhiyun         .align 5;              						     \
306*4882a593Smuzhiyun label:
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun #define EXCEPTION(n, intno, label, hdlr, xfer)			\
309*4882a593Smuzhiyun 	START_EXCEPTION(label);					\
310*4882a593Smuzhiyun 	NORMAL_EXCEPTION_PROLOG(intno);				\
311*4882a593Smuzhiyun 	addi	r3,r1,STACK_FRAME_OVERHEAD;			\
312*4882a593Smuzhiyun 	xfer(n, hdlr)
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun #define CRITICAL_EXCEPTION(n, intno, label, hdlr)			\
315*4882a593Smuzhiyun 	START_EXCEPTION(label);						\
316*4882a593Smuzhiyun 	CRITICAL_EXCEPTION_PROLOG(intno);				\
317*4882a593Smuzhiyun 	addi	r3,r1,STACK_FRAME_OVERHEAD;				\
318*4882a593Smuzhiyun 	EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
319*4882a593Smuzhiyun 			  crit_transfer_to_handler, ret_from_crit_exc)
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun #define MCHECK_EXCEPTION(n, label, hdlr)			\
322*4882a593Smuzhiyun 	START_EXCEPTION(label);					\
323*4882a593Smuzhiyun 	MCHECK_EXCEPTION_PROLOG;				\
324*4882a593Smuzhiyun 	mfspr	r5,SPRN_ESR;					\
325*4882a593Smuzhiyun 	stw	r5,_ESR(r11);					\
326*4882a593Smuzhiyun 	addi	r3,r1,STACK_FRAME_OVERHEAD;			\
327*4882a593Smuzhiyun 	EXC_XFER_TEMPLATE(hdlr, n+4, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
328*4882a593Smuzhiyun 			  mcheck_transfer_to_handler, ret_from_mcheck_exc)
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun #define EXC_XFER_TEMPLATE(hdlr, trap, msr, tfer, ret)	\
331*4882a593Smuzhiyun 	li	r10,trap;					\
332*4882a593Smuzhiyun 	stw	r10,_TRAP(r11);					\
333*4882a593Smuzhiyun 	lis	r10,msr@h;					\
334*4882a593Smuzhiyun 	ori	r10,r10,msr@l;					\
335*4882a593Smuzhiyun 	bl	tfer;		 				\
336*4882a593Smuzhiyun 	.long	hdlr;						\
337*4882a593Smuzhiyun 	.long	ret
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun #define EXC_XFER_STD(n, hdlr)		\
340*4882a593Smuzhiyun 	EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, transfer_to_handler_full, \
341*4882a593Smuzhiyun 			  ret_from_except_full)
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun #define EXC_XFER_LITE(n, hdlr)		\
344*4882a593Smuzhiyun 	EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, transfer_to_handler, \
345*4882a593Smuzhiyun 			  ret_from_except)
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun /* Check for a single step debug exception while in an exception
348*4882a593Smuzhiyun  * handler before state has been saved.  This is to catch the case
349*4882a593Smuzhiyun  * where an instruction that we are trying to single step causes
350*4882a593Smuzhiyun  * an exception (eg ITLB/DTLB miss) and thus the first instruction of
351*4882a593Smuzhiyun  * the exception handler generates a single step debug exception.
352*4882a593Smuzhiyun  *
353*4882a593Smuzhiyun  * If we get a debug trap on the first instruction of an exception handler,
354*4882a593Smuzhiyun  * we reset the MSR_DE in the _exception handler's_ MSR (the debug trap is
355*4882a593Smuzhiyun  * a critical exception, so we are using SPRN_CSRR1 to manipulate the MSR).
356*4882a593Smuzhiyun  * The exception handler was handling a non-critical interrupt, so it will
357*4882a593Smuzhiyun  * save (and later restore) the MSR via SPRN_CSRR1, which will still have
358*4882a593Smuzhiyun  * the MSR_DE bit set.
359*4882a593Smuzhiyun  */
360*4882a593Smuzhiyun #define DEBUG_DEBUG_EXCEPTION						      \
361*4882a593Smuzhiyun 	START_EXCEPTION(DebugDebug);					      \
362*4882a593Smuzhiyun 	DEBUG_EXCEPTION_PROLOG;						      \
363*4882a593Smuzhiyun 									      \
364*4882a593Smuzhiyun 	/*								      \
365*4882a593Smuzhiyun 	 * If there is a single step or branch-taken exception in an	      \
366*4882a593Smuzhiyun 	 * exception entry sequence, it was probably meant to apply to	      \
367*4882a593Smuzhiyun 	 * the code where the exception occurred (since exception entry	      \
368*4882a593Smuzhiyun 	 * doesn't turn off DE automatically).  We simulate the effect	      \
369*4882a593Smuzhiyun 	 * of turning off DE on entry to an exception handler by turning      \
370*4882a593Smuzhiyun 	 * off DE in the DSRR1 value and clearing the debug status.	      \
371*4882a593Smuzhiyun 	 */								      \
372*4882a593Smuzhiyun 	mfspr	r10,SPRN_DBSR;		/* check single-step/branch taken */  \
373*4882a593Smuzhiyun 	andis.	r10,r10,(DBSR_IC|DBSR_BT)@h;				      \
374*4882a593Smuzhiyun 	beq+	2f;							      \
375*4882a593Smuzhiyun 									      \
376*4882a593Smuzhiyun 	lis	r10,interrupt_base@h;	/* check if exception in vectors */   \
377*4882a593Smuzhiyun 	ori	r10,r10,interrupt_base@l;				      \
378*4882a593Smuzhiyun 	cmplw	r12,r10;						      \
379*4882a593Smuzhiyun 	blt+	2f;			/* addr below exception vectors */    \
380*4882a593Smuzhiyun 									      \
381*4882a593Smuzhiyun 	lis	r10,interrupt_end@h;					      \
382*4882a593Smuzhiyun 	ori	r10,r10,interrupt_end@l;				      \
383*4882a593Smuzhiyun 	cmplw	r12,r10;						      \
384*4882a593Smuzhiyun 	bgt+	2f;			/* addr above exception vectors */    \
385*4882a593Smuzhiyun 									      \
386*4882a593Smuzhiyun 	/* here it looks like we got an inappropriate debug exception. */     \
387*4882a593Smuzhiyun 1:	rlwinm	r9,r9,0,~MSR_DE;	/* clear DE in the CDRR1 value */     \
388*4882a593Smuzhiyun 	lis	r10,(DBSR_IC|DBSR_BT)@h;	/* clear the IC event */      \
389*4882a593Smuzhiyun 	mtspr	SPRN_DBSR,r10;						      \
390*4882a593Smuzhiyun 	/* restore state and get out */					      \
391*4882a593Smuzhiyun 	lwz	r10,_CCR(r11);						      \
392*4882a593Smuzhiyun 	lwz	r0,GPR0(r11);						      \
393*4882a593Smuzhiyun 	lwz	r1,GPR1(r11);						      \
394*4882a593Smuzhiyun 	mtcrf	0x80,r10;						      \
395*4882a593Smuzhiyun 	mtspr	SPRN_DSRR0,r12;						      \
396*4882a593Smuzhiyun 	mtspr	SPRN_DSRR1,r9;						      \
397*4882a593Smuzhiyun 	lwz	r9,GPR9(r11);						      \
398*4882a593Smuzhiyun 	lwz	r12,GPR12(r11);						      \
399*4882a593Smuzhiyun 	mtspr	SPRN_SPRG_WSCRATCH_DBG,r8;				      \
400*4882a593Smuzhiyun 	BOOKE_LOAD_EXC_LEVEL_STACK(DBG); /* r8 points to the debug stack */ \
401*4882a593Smuzhiyun 	lwz	r10,GPR10(r8);						      \
402*4882a593Smuzhiyun 	lwz	r11,GPR11(r8);						      \
403*4882a593Smuzhiyun 	mfspr	r8,SPRN_SPRG_RSCRATCH_DBG;				      \
404*4882a593Smuzhiyun 									      \
405*4882a593Smuzhiyun 	PPC_RFDI;							      \
406*4882a593Smuzhiyun 	b	.;							      \
407*4882a593Smuzhiyun 									      \
408*4882a593Smuzhiyun 	/* continue normal handling for a debug exception... */		      \
409*4882a593Smuzhiyun 2:	mfspr	r4,SPRN_DBSR;						      \
410*4882a593Smuzhiyun 	addi	r3,r1,STACK_FRAME_OVERHEAD;				      \
411*4882a593Smuzhiyun 	EXC_XFER_TEMPLATE(DebugException, 0x2008, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), debug_transfer_to_handler, ret_from_debug_exc)
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun #define DEBUG_CRIT_EXCEPTION						      \
414*4882a593Smuzhiyun 	START_EXCEPTION(DebugCrit);					      \
415*4882a593Smuzhiyun 	CRITICAL_EXCEPTION_PROLOG(DEBUG);				      \
416*4882a593Smuzhiyun 									      \
417*4882a593Smuzhiyun 	/*								      \
418*4882a593Smuzhiyun 	 * If there is a single step or branch-taken exception in an	      \
419*4882a593Smuzhiyun 	 * exception entry sequence, it was probably meant to apply to	      \
420*4882a593Smuzhiyun 	 * the code where the exception occurred (since exception entry	      \
421*4882a593Smuzhiyun 	 * doesn't turn off DE automatically).  We simulate the effect	      \
422*4882a593Smuzhiyun 	 * of turning off DE on entry to an exception handler by turning      \
423*4882a593Smuzhiyun 	 * off DE in the CSRR1 value and clearing the debug status.	      \
424*4882a593Smuzhiyun 	 */								      \
425*4882a593Smuzhiyun 	mfspr	r10,SPRN_DBSR;		/* check single-step/branch taken */  \
426*4882a593Smuzhiyun 	andis.	r10,r10,(DBSR_IC|DBSR_BT)@h;				      \
427*4882a593Smuzhiyun 	beq+	2f;							      \
428*4882a593Smuzhiyun 									      \
429*4882a593Smuzhiyun 	lis	r10,interrupt_base@h;	/* check if exception in vectors */   \
430*4882a593Smuzhiyun 	ori	r10,r10,interrupt_base@l;				      \
431*4882a593Smuzhiyun 	cmplw	r12,r10;						      \
432*4882a593Smuzhiyun 	blt+	2f;			/* addr below exception vectors */    \
433*4882a593Smuzhiyun 									      \
434*4882a593Smuzhiyun 	lis	r10,interrupt_end@h;					      \
435*4882a593Smuzhiyun 	ori	r10,r10,interrupt_end@l;				      \
436*4882a593Smuzhiyun 	cmplw	r12,r10;						      \
437*4882a593Smuzhiyun 	bgt+	2f;			/* addr above exception vectors */    \
438*4882a593Smuzhiyun 									      \
439*4882a593Smuzhiyun 	/* here it looks like we got an inappropriate debug exception. */     \
440*4882a593Smuzhiyun 1:	rlwinm	r9,r9,0,~MSR_DE;	/* clear DE in the CSRR1 value */     \
441*4882a593Smuzhiyun 	lis	r10,(DBSR_IC|DBSR_BT)@h;	/* clear the IC event */      \
442*4882a593Smuzhiyun 	mtspr	SPRN_DBSR,r10;						      \
443*4882a593Smuzhiyun 	/* restore state and get out */					      \
444*4882a593Smuzhiyun 	lwz	r10,_CCR(r11);						      \
445*4882a593Smuzhiyun 	lwz	r0,GPR0(r11);						      \
446*4882a593Smuzhiyun 	lwz	r1,GPR1(r11);						      \
447*4882a593Smuzhiyun 	mtcrf	0x80,r10;						      \
448*4882a593Smuzhiyun 	mtspr	SPRN_CSRR0,r12;						      \
449*4882a593Smuzhiyun 	mtspr	SPRN_CSRR1,r9;						      \
450*4882a593Smuzhiyun 	lwz	r9,GPR9(r11);						      \
451*4882a593Smuzhiyun 	lwz	r12,GPR12(r11);						      \
452*4882a593Smuzhiyun 	mtspr	SPRN_SPRG_WSCRATCH_CRIT,r8;				      \
453*4882a593Smuzhiyun 	BOOKE_LOAD_EXC_LEVEL_STACK(CRIT); /* r8 points to the debug stack */  \
454*4882a593Smuzhiyun 	lwz	r10,GPR10(r8);						      \
455*4882a593Smuzhiyun 	lwz	r11,GPR11(r8);						      \
456*4882a593Smuzhiyun 	mfspr	r8,SPRN_SPRG_RSCRATCH_CRIT;				      \
457*4882a593Smuzhiyun 									      \
458*4882a593Smuzhiyun 	rfci;								      \
459*4882a593Smuzhiyun 	b	.;							      \
460*4882a593Smuzhiyun 									      \
461*4882a593Smuzhiyun 	/* continue normal handling for a critical exception... */	      \
462*4882a593Smuzhiyun 2:	mfspr	r4,SPRN_DBSR;						      \
463*4882a593Smuzhiyun 	addi	r3,r1,STACK_FRAME_OVERHEAD;				      \
464*4882a593Smuzhiyun 	EXC_XFER_TEMPLATE(DebugException, 0x2002, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), crit_transfer_to_handler, ret_from_crit_exc)
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun #define DATA_STORAGE_EXCEPTION						      \
467*4882a593Smuzhiyun 	START_EXCEPTION(DataStorage)					      \
468*4882a593Smuzhiyun 	NORMAL_EXCEPTION_PROLOG(DATA_STORAGE);		      \
469*4882a593Smuzhiyun 	mfspr	r5,SPRN_ESR;		/* Grab the ESR and save it */	      \
470*4882a593Smuzhiyun 	stw	r5,_ESR(r11);						      \
471*4882a593Smuzhiyun 	mfspr	r4,SPRN_DEAR;		/* Grab the DEAR */		      \
472*4882a593Smuzhiyun 	stw	r4, _DEAR(r11);						      \
473*4882a593Smuzhiyun 	EXC_XFER_LITE(0x0300, handle_page_fault)
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun #define INSTRUCTION_STORAGE_EXCEPTION					      \
476*4882a593Smuzhiyun 	START_EXCEPTION(InstructionStorage)				      \
477*4882a593Smuzhiyun 	NORMAL_EXCEPTION_PROLOG(INST_STORAGE);		      \
478*4882a593Smuzhiyun 	mfspr	r5,SPRN_ESR;		/* Grab the ESR and save it */	      \
479*4882a593Smuzhiyun 	stw	r5,_ESR(r11);						      \
480*4882a593Smuzhiyun 	mr      r4,r12;                 /* Pass SRR0 as arg2 */		      \
481*4882a593Smuzhiyun 	stw	r4, _DEAR(r11);						      \
482*4882a593Smuzhiyun 	li      r5,0;                   /* Pass zero as arg3 */		      \
483*4882a593Smuzhiyun 	EXC_XFER_LITE(0x0400, handle_page_fault)
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun #define ALIGNMENT_EXCEPTION						      \
486*4882a593Smuzhiyun 	START_EXCEPTION(Alignment)					      \
487*4882a593Smuzhiyun 	NORMAL_EXCEPTION_PROLOG(ALIGNMENT);		      \
488*4882a593Smuzhiyun 	mfspr   r4,SPRN_DEAR;           /* Grab the DEAR and save it */	      \
489*4882a593Smuzhiyun 	stw     r4,_DEAR(r11);						      \
490*4882a593Smuzhiyun 	addi    r3,r1,STACK_FRAME_OVERHEAD;				      \
491*4882a593Smuzhiyun 	EXC_XFER_STD(0x0600, alignment_exception)
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun #define PROGRAM_EXCEPTION						      \
494*4882a593Smuzhiyun 	START_EXCEPTION(Program)					      \
495*4882a593Smuzhiyun 	NORMAL_EXCEPTION_PROLOG(PROGRAM);		      \
496*4882a593Smuzhiyun 	mfspr	r4,SPRN_ESR;		/* Grab the ESR and save it */	      \
497*4882a593Smuzhiyun 	stw	r4,_ESR(r11);						      \
498*4882a593Smuzhiyun 	addi	r3,r1,STACK_FRAME_OVERHEAD;				      \
499*4882a593Smuzhiyun 	EXC_XFER_STD(0x0700, program_check_exception)
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun #define DECREMENTER_EXCEPTION						      \
502*4882a593Smuzhiyun 	START_EXCEPTION(Decrementer)					      \
503*4882a593Smuzhiyun 	NORMAL_EXCEPTION_PROLOG(DECREMENTER);		      \
504*4882a593Smuzhiyun 	lis     r0,TSR_DIS@h;           /* Setup the DEC interrupt mask */    \
505*4882a593Smuzhiyun 	mtspr   SPRN_TSR,r0;		/* Clear the DEC interrupt */	      \
506*4882a593Smuzhiyun 	addi    r3,r1,STACK_FRAME_OVERHEAD;				      \
507*4882a593Smuzhiyun 	EXC_XFER_LITE(0x0900, timer_interrupt)
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun #define FP_UNAVAILABLE_EXCEPTION					      \
510*4882a593Smuzhiyun 	START_EXCEPTION(FloatingPointUnavailable)			      \
511*4882a593Smuzhiyun 	NORMAL_EXCEPTION_PROLOG(FP_UNAVAIL);		      \
512*4882a593Smuzhiyun 	beq	1f;							      \
513*4882a593Smuzhiyun 	bl	load_up_fpu;		/* if from user, just load it up */   \
514*4882a593Smuzhiyun 	b	fast_exception_return;					      \
515*4882a593Smuzhiyun 1:	addi	r3,r1,STACK_FRAME_OVERHEAD;				      \
516*4882a593Smuzhiyun 	EXC_XFER_STD(0x800, kernel_fp_unavailable_exception)
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun #else /* __ASSEMBLY__ */
519*4882a593Smuzhiyun struct exception_regs {
520*4882a593Smuzhiyun 	unsigned long mas0;
521*4882a593Smuzhiyun 	unsigned long mas1;
522*4882a593Smuzhiyun 	unsigned long mas2;
523*4882a593Smuzhiyun 	unsigned long mas3;
524*4882a593Smuzhiyun 	unsigned long mas6;
525*4882a593Smuzhiyun 	unsigned long mas7;
526*4882a593Smuzhiyun 	unsigned long srr0;
527*4882a593Smuzhiyun 	unsigned long srr1;
528*4882a593Smuzhiyun 	unsigned long csrr0;
529*4882a593Smuzhiyun 	unsigned long csrr1;
530*4882a593Smuzhiyun 	unsigned long dsrr0;
531*4882a593Smuzhiyun 	unsigned long dsrr1;
532*4882a593Smuzhiyun 	unsigned long saved_ksp_limit;
533*4882a593Smuzhiyun };
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun /* ensure this structure is always sized to a multiple of the stack alignment */
536*4882a593Smuzhiyun #define STACK_EXC_LVL_FRAME_SIZE	ALIGN(sizeof (struct exception_regs), 16)
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun #endif /* __ASSEMBLY__ */
539*4882a593Smuzhiyun #endif /* __HEAD_BOOKE_H__ */
540