xref: /OK3568_Linux_fs/kernel/arch/powerpc/include/asm/exception-64e.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  Definitions for use by exception code on Book3-E
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *  Copyright (C) 2008 Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp.
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun #ifndef _ASM_POWERPC_EXCEPTION_64E_H
8*4882a593Smuzhiyun #define _ASM_POWERPC_EXCEPTION_64E_H
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun /*
11*4882a593Smuzhiyun  * SPRGs usage an other considerations...
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * Since TLB miss and other standard exceptions can be interrupted by
14*4882a593Smuzhiyun  * critical exceptions which can themselves be interrupted by machine
15*4882a593Smuzhiyun  * checks, and since the two later can themselves cause a TLB miss when
16*4882a593Smuzhiyun  * hitting the linear mapping for the kernel stacks, we need to be a bit
17*4882a593Smuzhiyun  * creative on how we use SPRGs.
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  * The base idea is that we have one SRPG reserved for critical and one
20*4882a593Smuzhiyun  * for machine check interrupts. Those are used to save a GPR that can
21*4882a593Smuzhiyun  * then be used to get the PACA, and store as much context as we need
22*4882a593Smuzhiyun  * to save in there. That includes saving the SPRGs used by the TLB miss
23*4882a593Smuzhiyun  * handler for linear mapping misses and the associated SRR0/1 due to
24*4882a593Smuzhiyun  * the above re-entrancy issue.
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  * So here's the current usage pattern. It's done regardless of which
27*4882a593Smuzhiyun  * SPRGs are user-readable though, thus we might have to change some of
28*4882a593Smuzhiyun  * this later. In order to do that more easily, we use special constants
29*4882a593Smuzhiyun  * for naming them
30*4882a593Smuzhiyun  *
31*4882a593Smuzhiyun  * WARNING: Some of these SPRGs are user readable. We need to do something
32*4882a593Smuzhiyun  * about it as some point by making sure they can't be used to leak kernel
33*4882a593Smuzhiyun  * critical data
34*4882a593Smuzhiyun  */
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #define PACA_EXGDBELL PACA_EXGEN
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun /* We are out of SPRGs so we save some things in the PACA. The normal
39*4882a593Smuzhiyun  * exception frame is smaller than the CRIT or MC one though
40*4882a593Smuzhiyun  */
41*4882a593Smuzhiyun #define EX_R1		(0 * 8)
42*4882a593Smuzhiyun #define EX_CR		(1 * 8)
43*4882a593Smuzhiyun #define EX_R10		(2 * 8)
44*4882a593Smuzhiyun #define EX_R11		(3 * 8)
45*4882a593Smuzhiyun #define EX_R14		(4 * 8)
46*4882a593Smuzhiyun #define EX_R15		(5 * 8)
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun /*
49*4882a593Smuzhiyun  * The TLB miss exception uses different slots.
50*4882a593Smuzhiyun  *
51*4882a593Smuzhiyun  * The bolted variant uses only the first six fields,
52*4882a593Smuzhiyun  * which in combination with pgd and kernel_pgd fits in
53*4882a593Smuzhiyun  * one 64-byte cache line.
54*4882a593Smuzhiyun  */
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun #define EX_TLB_R10	( 0 * 8)
57*4882a593Smuzhiyun #define EX_TLB_R11	( 1 * 8)
58*4882a593Smuzhiyun #define EX_TLB_R14	( 2 * 8)
59*4882a593Smuzhiyun #define EX_TLB_R15	( 3 * 8)
60*4882a593Smuzhiyun #define EX_TLB_R16	( 4 * 8)
61*4882a593Smuzhiyun #define EX_TLB_CR	( 5 * 8)
62*4882a593Smuzhiyun #define EX_TLB_R12	( 6 * 8)
63*4882a593Smuzhiyun #define EX_TLB_R13	( 7 * 8)
64*4882a593Smuzhiyun #define EX_TLB_DEAR	( 8 * 8) /* Level 0 and 2 only */
65*4882a593Smuzhiyun #define EX_TLB_ESR	( 9 * 8) /* Level 0 and 2 only */
66*4882a593Smuzhiyun #define EX_TLB_SRR0	(10 * 8)
67*4882a593Smuzhiyun #define EX_TLB_SRR1	(11 * 8)
68*4882a593Smuzhiyun #define EX_TLB_R7	(12 * 8)
69*4882a593Smuzhiyun #define EX_TLB_SIZE	(13 * 8)
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun #define	START_EXCEPTION(label)						\
72*4882a593Smuzhiyun 	.globl exc_##label##_book3e;					\
73*4882a593Smuzhiyun exc_##label##_book3e:
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun /* TLB miss exception prolog
76*4882a593Smuzhiyun  *
77*4882a593Smuzhiyun  * This prolog handles re-entrancy (up to 3 levels supported in the PACA
78*4882a593Smuzhiyun  * though we currently don't test for overflow). It provides you with a
79*4882a593Smuzhiyun  * re-entrancy safe working space of r10...r16 and CR with r12 being used
80*4882a593Smuzhiyun  * as the exception area pointer in the PACA for that level of re-entrancy
81*4882a593Smuzhiyun  * and r13 containing the PACA pointer.
82*4882a593Smuzhiyun  *
83*4882a593Smuzhiyun  * SRR0 and SRR1 are saved, but DEAR and ESR are not, since they don't apply
84*4882a593Smuzhiyun  * as-is for instruction exceptions. It's up to the actual exception code
85*4882a593Smuzhiyun  * to save them as well if required.
86*4882a593Smuzhiyun  */
87*4882a593Smuzhiyun #define TLB_MISS_PROLOG							    \
88*4882a593Smuzhiyun 	mtspr	SPRN_SPRG_TLB_SCRATCH,r12;				    \
89*4882a593Smuzhiyun 	mfspr	r12,SPRN_SPRG_TLB_EXFRAME;				    \
90*4882a593Smuzhiyun 	std	r10,EX_TLB_R10(r12);					    \
91*4882a593Smuzhiyun 	mfcr	r10;							    \
92*4882a593Smuzhiyun 	std	r11,EX_TLB_R11(r12);					    \
93*4882a593Smuzhiyun 	mfspr	r11,SPRN_SPRG_TLB_SCRATCH;				    \
94*4882a593Smuzhiyun 	std	r13,EX_TLB_R13(r12);					    \
95*4882a593Smuzhiyun 	mfspr	r13,SPRN_SPRG_PACA;					    \
96*4882a593Smuzhiyun 	std	r14,EX_TLB_R14(r12);					    \
97*4882a593Smuzhiyun 	addi	r14,r12,EX_TLB_SIZE;					    \
98*4882a593Smuzhiyun 	std	r15,EX_TLB_R15(r12);					    \
99*4882a593Smuzhiyun 	mfspr	r15,SPRN_SRR1;						    \
100*4882a593Smuzhiyun 	std	r16,EX_TLB_R16(r12);					    \
101*4882a593Smuzhiyun 	mfspr	r16,SPRN_SRR0;						    \
102*4882a593Smuzhiyun 	std	r10,EX_TLB_CR(r12);					    \
103*4882a593Smuzhiyun 	std	r11,EX_TLB_R12(r12);					    \
104*4882a593Smuzhiyun 	mtspr	SPRN_SPRG_TLB_EXFRAME,r14;				    \
105*4882a593Smuzhiyun 	std	r15,EX_TLB_SRR1(r12);					    \
106*4882a593Smuzhiyun 	std	r16,EX_TLB_SRR0(r12);
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun /* And these are the matching epilogs that restores things
109*4882a593Smuzhiyun  *
110*4882a593Smuzhiyun  * There are 3 epilogs:
111*4882a593Smuzhiyun  *
112*4882a593Smuzhiyun  * - SUCCESS       : Unwinds one level
113*4882a593Smuzhiyun  * - ERROR         : restore from level 0 and reset
114*4882a593Smuzhiyun  * - ERROR_SPECIAL : restore from current level and reset
115*4882a593Smuzhiyun  *
116*4882a593Smuzhiyun  * Normal errors use ERROR, that is, they restore the initial fault context
117*4882a593Smuzhiyun  * and trigger a fault. However, there is a special case for linear mapping
118*4882a593Smuzhiyun  * errors. Those should basically never happen, but if they do happen, we
119*4882a593Smuzhiyun  * want the error to point out the context that did that linear mapping
120*4882a593Smuzhiyun  * fault, not the initial level 0 (basically, we got a bogus PGF or something
121*4882a593Smuzhiyun  * like that). For userland errors on the linear mapping, there is no
122*4882a593Smuzhiyun  * difference since those are always level 0 anyway
123*4882a593Smuzhiyun  */
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun #define TLB_MISS_RESTORE(freg)						    \
126*4882a593Smuzhiyun 	ld	r14,EX_TLB_CR(r12);					    \
127*4882a593Smuzhiyun 	ld	r10,EX_TLB_R10(r12);					    \
128*4882a593Smuzhiyun 	ld	r15,EX_TLB_SRR0(r12);					    \
129*4882a593Smuzhiyun 	ld	r16,EX_TLB_SRR1(r12);					    \
130*4882a593Smuzhiyun 	mtspr	SPRN_SPRG_TLB_EXFRAME,freg;				    \
131*4882a593Smuzhiyun 	ld	r11,EX_TLB_R11(r12);					    \
132*4882a593Smuzhiyun 	mtcr	r14;							    \
133*4882a593Smuzhiyun 	ld	r13,EX_TLB_R13(r12);					    \
134*4882a593Smuzhiyun 	ld	r14,EX_TLB_R14(r12);					    \
135*4882a593Smuzhiyun 	mtspr	SPRN_SRR0,r15;						    \
136*4882a593Smuzhiyun 	ld	r15,EX_TLB_R15(r12);					    \
137*4882a593Smuzhiyun 	mtspr	SPRN_SRR1,r16;						    \
138*4882a593Smuzhiyun 	ld	r16,EX_TLB_R16(r12);					    \
139*4882a593Smuzhiyun 	ld	r12,EX_TLB_R12(r12);					    \
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun #define TLB_MISS_EPILOG_SUCCESS						    \
142*4882a593Smuzhiyun 	TLB_MISS_RESTORE(r12)
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun #define TLB_MISS_EPILOG_ERROR						    \
145*4882a593Smuzhiyun 	addi	r12,r13,PACA_EXTLB;					    \
146*4882a593Smuzhiyun 	TLB_MISS_RESTORE(r12)
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun #define TLB_MISS_EPILOG_ERROR_SPECIAL					    \
149*4882a593Smuzhiyun 	addi	r11,r13,PACA_EXTLB;					    \
150*4882a593Smuzhiyun 	TLB_MISS_RESTORE(r11)
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun #define SET_IVOR(vector_number, vector_offset)	\
153*4882a593Smuzhiyun 	LOAD_REG_ADDR(r3,interrupt_base_book3e);\
154*4882a593Smuzhiyun 	ori	r3,r3,vector_offset@l;		\
155*4882a593Smuzhiyun 	mtspr	SPRN_IVOR##vector_number,r3;
156*4882a593Smuzhiyun /*
157*4882a593Smuzhiyun  * powerpc relies on return from interrupt/syscall being context synchronising
158*4882a593Smuzhiyun  * (which rfi is) to support ARCH_HAS_MEMBARRIER_SYNC_CORE without additional
159*4882a593Smuzhiyun  * synchronisation instructions.
160*4882a593Smuzhiyun  */
161*4882a593Smuzhiyun #define RFI_TO_KERNEL							\
162*4882a593Smuzhiyun 	rfi
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun #define RFI_TO_USER							\
165*4882a593Smuzhiyun 	rfi
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun #endif /* _ASM_POWERPC_EXCEPTION_64E_H */
168*4882a593Smuzhiyun 
169