xref: /OK3568_Linux_fs/kernel/arch/powerpc/include/asm/sstep.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun #include <asm/inst.h>
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun struct pt_regs;
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun /*
10*4882a593Smuzhiyun  * We don't allow single-stepping an mtmsrd that would clear
11*4882a593Smuzhiyun  * MSR_RI, since that would make the exception unrecoverable.
12*4882a593Smuzhiyun  * Since we need to single-step to proceed from a breakpoint,
13*4882a593Smuzhiyun  * we don't allow putting a breakpoint on an mtmsrd instruction.
14*4882a593Smuzhiyun  * Similarly we don't allow breakpoints on rfid instructions.
15*4882a593Smuzhiyun  * These macros tell us if an instruction is a mtmsrd or rfid.
16*4882a593Smuzhiyun  * Note that IS_MTMSRD returns true for both an mtmsr (32-bit)
17*4882a593Smuzhiyun  * and an mtmsrd (64-bit).
18*4882a593Smuzhiyun  */
19*4882a593Smuzhiyun #define IS_MTMSRD(instr)	((ppc_inst_val(instr) & 0xfc0007be) == 0x7c000124)
20*4882a593Smuzhiyun #define IS_RFID(instr)		((ppc_inst_val(instr) & 0xfc0007fe) == 0x4c000024)
21*4882a593Smuzhiyun #define IS_RFI(instr)		((ppc_inst_val(instr) & 0xfc0007fe) == 0x4c000064)
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun enum instruction_type {
24*4882a593Smuzhiyun 	COMPUTE,		/* arith/logical/CR op, etc. */
25*4882a593Smuzhiyun 	LOAD,			/* load and store types need to be contiguous */
26*4882a593Smuzhiyun 	LOAD_MULTI,
27*4882a593Smuzhiyun 	LOAD_FP,
28*4882a593Smuzhiyun 	LOAD_VMX,
29*4882a593Smuzhiyun 	LOAD_VSX,
30*4882a593Smuzhiyun 	STORE,
31*4882a593Smuzhiyun 	STORE_MULTI,
32*4882a593Smuzhiyun 	STORE_FP,
33*4882a593Smuzhiyun 	STORE_VMX,
34*4882a593Smuzhiyun 	STORE_VSX,
35*4882a593Smuzhiyun 	LARX,
36*4882a593Smuzhiyun 	STCX,
37*4882a593Smuzhiyun 	BRANCH,
38*4882a593Smuzhiyun 	MFSPR,
39*4882a593Smuzhiyun 	MTSPR,
40*4882a593Smuzhiyun 	CACHEOP,
41*4882a593Smuzhiyun 	BARRIER,
42*4882a593Smuzhiyun 	SYSCALL,
43*4882a593Smuzhiyun 	SYSCALL_VECTORED_0,
44*4882a593Smuzhiyun 	MFMSR,
45*4882a593Smuzhiyun 	MTMSR,
46*4882a593Smuzhiyun 	RFI,
47*4882a593Smuzhiyun 	INTERRUPT,
48*4882a593Smuzhiyun 	UNKNOWN
49*4882a593Smuzhiyun };
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun #define INSTR_TYPE_MASK	0x1f
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun #define OP_IS_LOAD(type)	((LOAD <= (type) && (type) <= LOAD_VSX) || (type) == LARX)
54*4882a593Smuzhiyun #define OP_IS_STORE(type)	((STORE <= (type) && (type) <= STORE_VSX) || (type) == STCX)
55*4882a593Smuzhiyun #define OP_IS_LOAD_STORE(type)	(LOAD <= (type) && (type) <= STCX)
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun /* Compute flags, ORed in with type */
58*4882a593Smuzhiyun #define SETREG		0x20
59*4882a593Smuzhiyun #define SETCC		0x40
60*4882a593Smuzhiyun #define SETXER		0x80
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun /* Branch flags, ORed in with type */
63*4882a593Smuzhiyun #define SETLK		0x20
64*4882a593Smuzhiyun #define BRTAKEN		0x40
65*4882a593Smuzhiyun #define DECCTR		0x80
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun /* Load/store flags, ORed in with type */
68*4882a593Smuzhiyun #define SIGNEXT		0x20
69*4882a593Smuzhiyun #define UPDATE		0x40	/* matches bit in opcode 31 instructions */
70*4882a593Smuzhiyun #define BYTEREV		0x80
71*4882a593Smuzhiyun #define FPCONV		0x100
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun /* Barrier type field, ORed in with type */
74*4882a593Smuzhiyun #define BARRIER_MASK	0xe0
75*4882a593Smuzhiyun #define BARRIER_SYNC	0x00
76*4882a593Smuzhiyun #define BARRIER_ISYNC	0x20
77*4882a593Smuzhiyun #define BARRIER_EIEIO	0x40
78*4882a593Smuzhiyun #define BARRIER_LWSYNC	0x60
79*4882a593Smuzhiyun #define BARRIER_PTESYNC	0x80
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun /* Cacheop values, ORed in with type */
82*4882a593Smuzhiyun #define CACHEOP_MASK	0x700
83*4882a593Smuzhiyun #define DCBST		0
84*4882a593Smuzhiyun #define DCBF		0x100
85*4882a593Smuzhiyun #define DCBTST		0x200
86*4882a593Smuzhiyun #define DCBT		0x300
87*4882a593Smuzhiyun #define ICBI		0x400
88*4882a593Smuzhiyun #define DCBZ		0x500
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun /* VSX flags values */
91*4882a593Smuzhiyun #define VSX_FPCONV	1	/* do floating point SP/DP conversion */
92*4882a593Smuzhiyun #define VSX_SPLAT	2	/* store loaded value into all elements */
93*4882a593Smuzhiyun #define VSX_LDLEFT	4	/* load VSX register from left */
94*4882a593Smuzhiyun #define VSX_CHECK_VEC	8	/* check MSR_VEC not MSR_VSX for reg >= 32 */
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun /* Prefixed flag, ORed in with type */
97*4882a593Smuzhiyun #define PREFIXED       0x800
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun /* Size field in type word */
100*4882a593Smuzhiyun #define SIZE(n)		((n) << 12)
101*4882a593Smuzhiyun #define GETSIZE(w)	((w) >> 12)
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun #define GETTYPE(t)	((t) & INSTR_TYPE_MASK)
104*4882a593Smuzhiyun #define GETLENGTH(t)   (((t) & PREFIXED) ? 8 : 4)
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun #define MKOP(t, f, s)	((t) | (f) | SIZE(s))
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun /* Prefix instruction operands */
109*4882a593Smuzhiyun #define GET_PREFIX_RA(i)	(((i) >> 16) & 0x1f)
110*4882a593Smuzhiyun #define GET_PREFIX_R(i)		((i) & (1ul << 20))
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun extern s32 patch__exec_instr;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun struct instruction_op {
115*4882a593Smuzhiyun 	int type;
116*4882a593Smuzhiyun 	int reg;
117*4882a593Smuzhiyun 	unsigned long val;
118*4882a593Smuzhiyun 	/* For LOAD/STORE/LARX/STCX */
119*4882a593Smuzhiyun 	unsigned long ea;
120*4882a593Smuzhiyun 	int update_reg;
121*4882a593Smuzhiyun 	/* For MFSPR */
122*4882a593Smuzhiyun 	int spr;
123*4882a593Smuzhiyun 	u32 ccval;
124*4882a593Smuzhiyun 	u32 xerval;
125*4882a593Smuzhiyun 	u8 element_size;	/* for VSX/VMX loads/stores */
126*4882a593Smuzhiyun 	u8 vsx_flags;
127*4882a593Smuzhiyun };
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun union vsx_reg {
130*4882a593Smuzhiyun 	u8	b[16];
131*4882a593Smuzhiyun 	u16	h[8];
132*4882a593Smuzhiyun 	u32	w[4];
133*4882a593Smuzhiyun 	unsigned long d[2];
134*4882a593Smuzhiyun 	float	fp[4];
135*4882a593Smuzhiyun 	double	dp[2];
136*4882a593Smuzhiyun 	__vector128 v;
137*4882a593Smuzhiyun };
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun /*
140*4882a593Smuzhiyun  * Decode an instruction, and return information about it in *op
141*4882a593Smuzhiyun  * without changing *regs.
142*4882a593Smuzhiyun  *
143*4882a593Smuzhiyun  * Return value is 1 if the instruction can be emulated just by
144*4882a593Smuzhiyun  * updating *regs with the information in *op, -1 if we need the
145*4882a593Smuzhiyun  * GPRs but *regs doesn't contain the full register set, or 0
146*4882a593Smuzhiyun  * otherwise.
147*4882a593Smuzhiyun  */
148*4882a593Smuzhiyun extern int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
149*4882a593Smuzhiyun 			 struct ppc_inst instr);
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun /*
152*4882a593Smuzhiyun  * Emulate an instruction that can be executed just by updating
153*4882a593Smuzhiyun  * fields in *regs.
154*4882a593Smuzhiyun  */
155*4882a593Smuzhiyun void emulate_update_regs(struct pt_regs *reg, struct instruction_op *op);
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun /*
158*4882a593Smuzhiyun  * Emulate instructions that cause a transfer of control,
159*4882a593Smuzhiyun  * arithmetic/logical instructions, loads and stores,
160*4882a593Smuzhiyun  * cache operations and barriers.
161*4882a593Smuzhiyun  *
162*4882a593Smuzhiyun  * Returns 1 if the instruction was emulated successfully,
163*4882a593Smuzhiyun  * 0 if it could not be emulated, or -1 for an instruction that
164*4882a593Smuzhiyun  * should not be emulated (rfid, mtmsrd clearing MSR_RI, etc.).
165*4882a593Smuzhiyun  */
166*4882a593Smuzhiyun extern int emulate_step(struct pt_regs *regs, struct ppc_inst instr);
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun /*
169*4882a593Smuzhiyun  * Emulate a load or store instruction by reading/writing the
170*4882a593Smuzhiyun  * memory of the current process.  FP/VMX/VSX registers are assumed
171*4882a593Smuzhiyun  * to hold live values if the appropriate enable bit in regs->msr is
172*4882a593Smuzhiyun  * set; otherwise this will use the saved values in the thread struct
173*4882a593Smuzhiyun  * for user-mode accesses.
174*4882a593Smuzhiyun  */
175*4882a593Smuzhiyun extern int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op);
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun extern void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
178*4882a593Smuzhiyun 			     const void *mem, bool cross_endian);
179*4882a593Smuzhiyun extern void emulate_vsx_store(struct instruction_op *op,
180*4882a593Smuzhiyun 			      const union vsx_reg *reg, void *mem,
181*4882a593Smuzhiyun 			      bool cross_endian);
182*4882a593Smuzhiyun extern int emulate_dcbz(unsigned long ea, struct pt_regs *regs);
183