xref: /OK3568_Linux_fs/kernel/arch/s390/include/asm/vx-insn.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Support for Vector Instructions
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Assembler macros to generate .byte/.word code for particular
6*4882a593Smuzhiyun  * vector instructions that are supported by recent binutils (>= 2.26) only.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Copyright IBM Corp. 2015
9*4882a593Smuzhiyun  * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #ifndef __ASM_S390_VX_INSN_H
13*4882a593Smuzhiyun #define __ASM_S390_VX_INSN_H
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #ifdef __ASSEMBLY__
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun /* Macros to generate vector instruction byte code */
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun /* GR_NUM - Retrieve general-purpose register number
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  * @opd:	Operand to store register number
23*4882a593Smuzhiyun  * @r64:	String designation register in the format "%rN"
24*4882a593Smuzhiyun  */
25*4882a593Smuzhiyun .macro	GR_NUM	opd gr
26*4882a593Smuzhiyun 	\opd = 255
27*4882a593Smuzhiyun 	.ifc \gr,%r0
28*4882a593Smuzhiyun 		\opd = 0
29*4882a593Smuzhiyun 	.endif
30*4882a593Smuzhiyun 	.ifc \gr,%r1
31*4882a593Smuzhiyun 		\opd = 1
32*4882a593Smuzhiyun 	.endif
33*4882a593Smuzhiyun 	.ifc \gr,%r2
34*4882a593Smuzhiyun 		\opd = 2
35*4882a593Smuzhiyun 	.endif
36*4882a593Smuzhiyun 	.ifc \gr,%r3
37*4882a593Smuzhiyun 		\opd = 3
38*4882a593Smuzhiyun 	.endif
39*4882a593Smuzhiyun 	.ifc \gr,%r4
40*4882a593Smuzhiyun 		\opd = 4
41*4882a593Smuzhiyun 	.endif
42*4882a593Smuzhiyun 	.ifc \gr,%r5
43*4882a593Smuzhiyun 		\opd = 5
44*4882a593Smuzhiyun 	.endif
45*4882a593Smuzhiyun 	.ifc \gr,%r6
46*4882a593Smuzhiyun 		\opd = 6
47*4882a593Smuzhiyun 	.endif
48*4882a593Smuzhiyun 	.ifc \gr,%r7
49*4882a593Smuzhiyun 		\opd = 7
50*4882a593Smuzhiyun 	.endif
51*4882a593Smuzhiyun 	.ifc \gr,%r8
52*4882a593Smuzhiyun 		\opd = 8
53*4882a593Smuzhiyun 	.endif
54*4882a593Smuzhiyun 	.ifc \gr,%r9
55*4882a593Smuzhiyun 		\opd = 9
56*4882a593Smuzhiyun 	.endif
57*4882a593Smuzhiyun 	.ifc \gr,%r10
58*4882a593Smuzhiyun 		\opd = 10
59*4882a593Smuzhiyun 	.endif
60*4882a593Smuzhiyun 	.ifc \gr,%r11
61*4882a593Smuzhiyun 		\opd = 11
62*4882a593Smuzhiyun 	.endif
63*4882a593Smuzhiyun 	.ifc \gr,%r12
64*4882a593Smuzhiyun 		\opd = 12
65*4882a593Smuzhiyun 	.endif
66*4882a593Smuzhiyun 	.ifc \gr,%r13
67*4882a593Smuzhiyun 		\opd = 13
68*4882a593Smuzhiyun 	.endif
69*4882a593Smuzhiyun 	.ifc \gr,%r14
70*4882a593Smuzhiyun 		\opd = 14
71*4882a593Smuzhiyun 	.endif
72*4882a593Smuzhiyun 	.ifc \gr,%r15
73*4882a593Smuzhiyun 		\opd = 15
74*4882a593Smuzhiyun 	.endif
75*4882a593Smuzhiyun 	.if \opd == 255
76*4882a593Smuzhiyun 		\opd = \gr
77*4882a593Smuzhiyun 	.endif
78*4882a593Smuzhiyun .endm
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun /* VX_NUM - Retrieve vector register number
81*4882a593Smuzhiyun  *
82*4882a593Smuzhiyun  * @opd:	Operand to store register number
83*4882a593Smuzhiyun  * @vxr:	String designation register in the format "%vN"
84*4882a593Smuzhiyun  *
85*4882a593Smuzhiyun  * The vector register number is used for as input number to the
86*4882a593Smuzhiyun  * instruction and, as well as, to compute the RXB field of the
87*4882a593Smuzhiyun  * instruction.
88*4882a593Smuzhiyun  */
89*4882a593Smuzhiyun .macro	VX_NUM	opd vxr
90*4882a593Smuzhiyun 	\opd = 255
91*4882a593Smuzhiyun 	.ifc \vxr,%v0
92*4882a593Smuzhiyun 		\opd = 0
93*4882a593Smuzhiyun 	.endif
94*4882a593Smuzhiyun 	.ifc \vxr,%v1
95*4882a593Smuzhiyun 		\opd = 1
96*4882a593Smuzhiyun 	.endif
97*4882a593Smuzhiyun 	.ifc \vxr,%v2
98*4882a593Smuzhiyun 		\opd = 2
99*4882a593Smuzhiyun 	.endif
100*4882a593Smuzhiyun 	.ifc \vxr,%v3
101*4882a593Smuzhiyun 		\opd = 3
102*4882a593Smuzhiyun 	.endif
103*4882a593Smuzhiyun 	.ifc \vxr,%v4
104*4882a593Smuzhiyun 		\opd = 4
105*4882a593Smuzhiyun 	.endif
106*4882a593Smuzhiyun 	.ifc \vxr,%v5
107*4882a593Smuzhiyun 		\opd = 5
108*4882a593Smuzhiyun 	.endif
109*4882a593Smuzhiyun 	.ifc \vxr,%v6
110*4882a593Smuzhiyun 		\opd = 6
111*4882a593Smuzhiyun 	.endif
112*4882a593Smuzhiyun 	.ifc \vxr,%v7
113*4882a593Smuzhiyun 		\opd = 7
114*4882a593Smuzhiyun 	.endif
115*4882a593Smuzhiyun 	.ifc \vxr,%v8
116*4882a593Smuzhiyun 		\opd = 8
117*4882a593Smuzhiyun 	.endif
118*4882a593Smuzhiyun 	.ifc \vxr,%v9
119*4882a593Smuzhiyun 		\opd = 9
120*4882a593Smuzhiyun 	.endif
121*4882a593Smuzhiyun 	.ifc \vxr,%v10
122*4882a593Smuzhiyun 		\opd = 10
123*4882a593Smuzhiyun 	.endif
124*4882a593Smuzhiyun 	.ifc \vxr,%v11
125*4882a593Smuzhiyun 		\opd = 11
126*4882a593Smuzhiyun 	.endif
127*4882a593Smuzhiyun 	.ifc \vxr,%v12
128*4882a593Smuzhiyun 		\opd = 12
129*4882a593Smuzhiyun 	.endif
130*4882a593Smuzhiyun 	.ifc \vxr,%v13
131*4882a593Smuzhiyun 		\opd = 13
132*4882a593Smuzhiyun 	.endif
133*4882a593Smuzhiyun 	.ifc \vxr,%v14
134*4882a593Smuzhiyun 		\opd = 14
135*4882a593Smuzhiyun 	.endif
136*4882a593Smuzhiyun 	.ifc \vxr,%v15
137*4882a593Smuzhiyun 		\opd = 15
138*4882a593Smuzhiyun 	.endif
139*4882a593Smuzhiyun 	.ifc \vxr,%v16
140*4882a593Smuzhiyun 		\opd = 16
141*4882a593Smuzhiyun 	.endif
142*4882a593Smuzhiyun 	.ifc \vxr,%v17
143*4882a593Smuzhiyun 		\opd = 17
144*4882a593Smuzhiyun 	.endif
145*4882a593Smuzhiyun 	.ifc \vxr,%v18
146*4882a593Smuzhiyun 		\opd = 18
147*4882a593Smuzhiyun 	.endif
148*4882a593Smuzhiyun 	.ifc \vxr,%v19
149*4882a593Smuzhiyun 		\opd = 19
150*4882a593Smuzhiyun 	.endif
151*4882a593Smuzhiyun 	.ifc \vxr,%v20
152*4882a593Smuzhiyun 		\opd = 20
153*4882a593Smuzhiyun 	.endif
154*4882a593Smuzhiyun 	.ifc \vxr,%v21
155*4882a593Smuzhiyun 		\opd = 21
156*4882a593Smuzhiyun 	.endif
157*4882a593Smuzhiyun 	.ifc \vxr,%v22
158*4882a593Smuzhiyun 		\opd = 22
159*4882a593Smuzhiyun 	.endif
160*4882a593Smuzhiyun 	.ifc \vxr,%v23
161*4882a593Smuzhiyun 		\opd = 23
162*4882a593Smuzhiyun 	.endif
163*4882a593Smuzhiyun 	.ifc \vxr,%v24
164*4882a593Smuzhiyun 		\opd = 24
165*4882a593Smuzhiyun 	.endif
166*4882a593Smuzhiyun 	.ifc \vxr,%v25
167*4882a593Smuzhiyun 		\opd = 25
168*4882a593Smuzhiyun 	.endif
169*4882a593Smuzhiyun 	.ifc \vxr,%v26
170*4882a593Smuzhiyun 		\opd = 26
171*4882a593Smuzhiyun 	.endif
172*4882a593Smuzhiyun 	.ifc \vxr,%v27
173*4882a593Smuzhiyun 		\opd = 27
174*4882a593Smuzhiyun 	.endif
175*4882a593Smuzhiyun 	.ifc \vxr,%v28
176*4882a593Smuzhiyun 		\opd = 28
177*4882a593Smuzhiyun 	.endif
178*4882a593Smuzhiyun 	.ifc \vxr,%v29
179*4882a593Smuzhiyun 		\opd = 29
180*4882a593Smuzhiyun 	.endif
181*4882a593Smuzhiyun 	.ifc \vxr,%v30
182*4882a593Smuzhiyun 		\opd = 30
183*4882a593Smuzhiyun 	.endif
184*4882a593Smuzhiyun 	.ifc \vxr,%v31
185*4882a593Smuzhiyun 		\opd = 31
186*4882a593Smuzhiyun 	.endif
187*4882a593Smuzhiyun 	.if \opd == 255
188*4882a593Smuzhiyun 		\opd = \vxr
189*4882a593Smuzhiyun 	.endif
190*4882a593Smuzhiyun .endm
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun /* RXB - Compute most significant bit used vector registers
193*4882a593Smuzhiyun  *
194*4882a593Smuzhiyun  * @rxb:	Operand to store computed RXB value
195*4882a593Smuzhiyun  * @v1:		First vector register designated operand
196*4882a593Smuzhiyun  * @v2:		Second vector register designated operand
197*4882a593Smuzhiyun  * @v3:		Third vector register designated operand
198*4882a593Smuzhiyun  * @v4:		Fourth vector register designated operand
199*4882a593Smuzhiyun  */
200*4882a593Smuzhiyun .macro	RXB	rxb v1 v2=0 v3=0 v4=0
201*4882a593Smuzhiyun 	\rxb = 0
202*4882a593Smuzhiyun 	.if \v1 & 0x10
203*4882a593Smuzhiyun 		\rxb = \rxb | 0x08
204*4882a593Smuzhiyun 	.endif
205*4882a593Smuzhiyun 	.if \v2 & 0x10
206*4882a593Smuzhiyun 		\rxb = \rxb | 0x04
207*4882a593Smuzhiyun 	.endif
208*4882a593Smuzhiyun 	.if \v3 & 0x10
209*4882a593Smuzhiyun 		\rxb = \rxb | 0x02
210*4882a593Smuzhiyun 	.endif
211*4882a593Smuzhiyun 	.if \v4 & 0x10
212*4882a593Smuzhiyun 		\rxb = \rxb | 0x01
213*4882a593Smuzhiyun 	.endif
214*4882a593Smuzhiyun .endm
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun /* MRXB - Generate Element Size Control and RXB value
217*4882a593Smuzhiyun  *
218*4882a593Smuzhiyun  * @m:		Element size control
219*4882a593Smuzhiyun  * @v1:		First vector register designated operand (for RXB)
220*4882a593Smuzhiyun  * @v2:		Second vector register designated operand (for RXB)
221*4882a593Smuzhiyun  * @v3:		Third vector register designated operand (for RXB)
222*4882a593Smuzhiyun  * @v4:		Fourth vector register designated operand (for RXB)
223*4882a593Smuzhiyun  */
224*4882a593Smuzhiyun .macro	MRXB	m v1 v2=0 v3=0 v4=0
225*4882a593Smuzhiyun 	rxb = 0
226*4882a593Smuzhiyun 	RXB	rxb, \v1, \v2, \v3, \v4
227*4882a593Smuzhiyun 	.byte	(\m << 4) | rxb
228*4882a593Smuzhiyun .endm
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun /* MRXBOPC - Generate Element Size Control, RXB, and final Opcode fields
231*4882a593Smuzhiyun  *
232*4882a593Smuzhiyun  * @m:		Element size control
233*4882a593Smuzhiyun  * @opc:	Opcode
234*4882a593Smuzhiyun  * @v1:		First vector register designated operand (for RXB)
235*4882a593Smuzhiyun  * @v2:		Second vector register designated operand (for RXB)
236*4882a593Smuzhiyun  * @v3:		Third vector register designated operand (for RXB)
237*4882a593Smuzhiyun  * @v4:		Fourth vector register designated operand (for RXB)
238*4882a593Smuzhiyun  */
239*4882a593Smuzhiyun .macro	MRXBOPC	m opc v1 v2=0 v3=0 v4=0
240*4882a593Smuzhiyun 	MRXB	\m, \v1, \v2, \v3, \v4
241*4882a593Smuzhiyun 	.byte	\opc
242*4882a593Smuzhiyun .endm
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun /* Vector support instructions */
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun /* VECTOR GENERATE BYTE MASK */
247*4882a593Smuzhiyun .macro	VGBM	vr imm2
248*4882a593Smuzhiyun 	VX_NUM	v1, \vr
249*4882a593Smuzhiyun 	.word	(0xE700 | ((v1&15) << 4))
250*4882a593Smuzhiyun 	.word	\imm2
251*4882a593Smuzhiyun 	MRXBOPC	0, 0x44, v1
252*4882a593Smuzhiyun .endm
253*4882a593Smuzhiyun .macro	VZERO	vxr
254*4882a593Smuzhiyun 	VGBM	\vxr, 0
255*4882a593Smuzhiyun .endm
256*4882a593Smuzhiyun .macro	VONE	vxr
257*4882a593Smuzhiyun 	VGBM	\vxr, 0xFFFF
258*4882a593Smuzhiyun .endm
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun /* VECTOR LOAD VR ELEMENT FROM GR */
261*4882a593Smuzhiyun .macro	VLVG	v, gr, disp, m
262*4882a593Smuzhiyun 	VX_NUM	v1, \v
263*4882a593Smuzhiyun 	GR_NUM	b2, "%r0"
264*4882a593Smuzhiyun 	GR_NUM	r3, \gr
265*4882a593Smuzhiyun 	.word	0xE700 | ((v1&15) << 4) | r3
266*4882a593Smuzhiyun 	.word	(b2 << 12) | (\disp)
267*4882a593Smuzhiyun 	MRXBOPC	\m, 0x22, v1
268*4882a593Smuzhiyun .endm
269*4882a593Smuzhiyun .macro	VLVGB	v, gr, index, base
270*4882a593Smuzhiyun 	VLVG	\v, \gr, \index, \base, 0
271*4882a593Smuzhiyun .endm
272*4882a593Smuzhiyun .macro	VLVGH	v, gr, index
273*4882a593Smuzhiyun 	VLVG	\v, \gr, \index, 1
274*4882a593Smuzhiyun .endm
275*4882a593Smuzhiyun .macro	VLVGF	v, gr, index
276*4882a593Smuzhiyun 	VLVG	\v, \gr, \index, 2
277*4882a593Smuzhiyun .endm
278*4882a593Smuzhiyun .macro	VLVGG	v, gr, index
279*4882a593Smuzhiyun 	VLVG	\v, \gr, \index, 3
280*4882a593Smuzhiyun .endm
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun /* VECTOR LOAD REGISTER */
283*4882a593Smuzhiyun .macro	VLR	v1, v2
284*4882a593Smuzhiyun 	VX_NUM	v1, \v1
285*4882a593Smuzhiyun 	VX_NUM	v2, \v2
286*4882a593Smuzhiyun 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
287*4882a593Smuzhiyun 	.word	0
288*4882a593Smuzhiyun 	MRXBOPC	0, 0x56, v1, v2
289*4882a593Smuzhiyun .endm
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun /* VECTOR LOAD */
292*4882a593Smuzhiyun .macro	VL	v, disp, index="%r0", base
293*4882a593Smuzhiyun 	VX_NUM	v1, \v
294*4882a593Smuzhiyun 	GR_NUM	x2, \index
295*4882a593Smuzhiyun 	GR_NUM	b2, \base
296*4882a593Smuzhiyun 	.word	0xE700 | ((v1&15) << 4) | x2
297*4882a593Smuzhiyun 	.word	(b2 << 12) | (\disp)
298*4882a593Smuzhiyun 	MRXBOPC 0, 0x06, v1
299*4882a593Smuzhiyun .endm
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun /* VECTOR LOAD ELEMENT */
302*4882a593Smuzhiyun .macro	VLEx	vr1, disp, index="%r0", base, m3, opc
303*4882a593Smuzhiyun 	VX_NUM	v1, \vr1
304*4882a593Smuzhiyun 	GR_NUM	x2, \index
305*4882a593Smuzhiyun 	GR_NUM	b2, \base
306*4882a593Smuzhiyun 	.word	0xE700 | ((v1&15) << 4) | x2
307*4882a593Smuzhiyun 	.word	(b2 << 12) | (\disp)
308*4882a593Smuzhiyun 	MRXBOPC	\m3, \opc, v1
309*4882a593Smuzhiyun .endm
310*4882a593Smuzhiyun .macro	VLEB	vr1, disp, index="%r0", base, m3
311*4882a593Smuzhiyun 	VLEx	\vr1, \disp, \index, \base, \m3, 0x00
312*4882a593Smuzhiyun .endm
313*4882a593Smuzhiyun .macro	VLEH	vr1, disp, index="%r0", base, m3
314*4882a593Smuzhiyun 	VLEx	\vr1, \disp, \index, \base, \m3, 0x01
315*4882a593Smuzhiyun .endm
316*4882a593Smuzhiyun .macro	VLEF	vr1, disp, index="%r0", base, m3
317*4882a593Smuzhiyun 	VLEx	\vr1, \disp, \index, \base, \m3, 0x03
318*4882a593Smuzhiyun .endm
319*4882a593Smuzhiyun .macro	VLEG	vr1, disp, index="%r0", base, m3
320*4882a593Smuzhiyun 	VLEx	\vr1, \disp, \index, \base, \m3, 0x02
321*4882a593Smuzhiyun .endm
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun /* VECTOR LOAD ELEMENT IMMEDIATE */
324*4882a593Smuzhiyun .macro	VLEIx	vr1, imm2, m3, opc
325*4882a593Smuzhiyun 	VX_NUM	v1, \vr1
326*4882a593Smuzhiyun 	.word	0xE700 | ((v1&15) << 4)
327*4882a593Smuzhiyun 	.word	\imm2
328*4882a593Smuzhiyun 	MRXBOPC	\m3, \opc, v1
329*4882a593Smuzhiyun .endm
330*4882a593Smuzhiyun .macro	VLEIB	vr1, imm2, index
331*4882a593Smuzhiyun 	VLEIx	\vr1, \imm2, \index, 0x40
332*4882a593Smuzhiyun .endm
333*4882a593Smuzhiyun .macro	VLEIH	vr1, imm2, index
334*4882a593Smuzhiyun 	VLEIx	\vr1, \imm2, \index, 0x41
335*4882a593Smuzhiyun .endm
336*4882a593Smuzhiyun .macro	VLEIF	vr1, imm2, index
337*4882a593Smuzhiyun 	VLEIx	\vr1, \imm2, \index, 0x43
338*4882a593Smuzhiyun .endm
339*4882a593Smuzhiyun .macro	VLEIG	vr1, imm2, index
340*4882a593Smuzhiyun 	VLEIx	\vr1, \imm2, \index, 0x42
341*4882a593Smuzhiyun .endm
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun /* VECTOR LOAD GR FROM VR ELEMENT */
344*4882a593Smuzhiyun .macro	VLGV	gr, vr, disp, base="%r0", m
345*4882a593Smuzhiyun 	GR_NUM	r1, \gr
346*4882a593Smuzhiyun 	GR_NUM	b2, \base
347*4882a593Smuzhiyun 	VX_NUM	v3, \vr
348*4882a593Smuzhiyun 	.word	0xE700 | (r1 << 4) | (v3&15)
349*4882a593Smuzhiyun 	.word	(b2 << 12) | (\disp)
350*4882a593Smuzhiyun 	MRXBOPC	\m, 0x21, v3
351*4882a593Smuzhiyun .endm
352*4882a593Smuzhiyun .macro	VLGVB	gr, vr, disp, base="%r0"
353*4882a593Smuzhiyun 	VLGV	\gr, \vr, \disp, \base, 0
354*4882a593Smuzhiyun .endm
355*4882a593Smuzhiyun .macro	VLGVH	gr, vr, disp, base="%r0"
356*4882a593Smuzhiyun 	VLGV	\gr, \vr, \disp, \base, 1
357*4882a593Smuzhiyun .endm
358*4882a593Smuzhiyun .macro	VLGVF	gr, vr, disp, base="%r0"
359*4882a593Smuzhiyun 	VLGV	\gr, \vr, \disp, \base, 2
360*4882a593Smuzhiyun .endm
361*4882a593Smuzhiyun .macro	VLGVG	gr, vr, disp, base="%r0"
362*4882a593Smuzhiyun 	VLGV	\gr, \vr, \disp, \base, 3
363*4882a593Smuzhiyun .endm
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun /* VECTOR LOAD MULTIPLE */
366*4882a593Smuzhiyun .macro	VLM	vfrom, vto, disp, base, hint=3
367*4882a593Smuzhiyun 	VX_NUM	v1, \vfrom
368*4882a593Smuzhiyun 	VX_NUM	v3, \vto
369*4882a593Smuzhiyun 	GR_NUM	b2, \base	    /* Base register */
370*4882a593Smuzhiyun 	.word	0xE700 | ((v1&15) << 4) | (v3&15)
371*4882a593Smuzhiyun 	.word	(b2 << 12) | (\disp)
372*4882a593Smuzhiyun 	MRXBOPC	\hint, 0x36, v1, v3
373*4882a593Smuzhiyun .endm
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun /* VECTOR STORE MULTIPLE */
376*4882a593Smuzhiyun .macro	VSTM	vfrom, vto, disp, base, hint=3
377*4882a593Smuzhiyun 	VX_NUM	v1, \vfrom
378*4882a593Smuzhiyun 	VX_NUM	v3, \vto
379*4882a593Smuzhiyun 	GR_NUM	b2, \base	    /* Base register */
380*4882a593Smuzhiyun 	.word	0xE700 | ((v1&15) << 4) | (v3&15)
381*4882a593Smuzhiyun 	.word	(b2 << 12) | (\disp)
382*4882a593Smuzhiyun 	MRXBOPC	\hint, 0x3E, v1, v3
383*4882a593Smuzhiyun .endm
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun /* VECTOR PERMUTE */
386*4882a593Smuzhiyun .macro	VPERM	vr1, vr2, vr3, vr4
387*4882a593Smuzhiyun 	VX_NUM	v1, \vr1
388*4882a593Smuzhiyun 	VX_NUM	v2, \vr2
389*4882a593Smuzhiyun 	VX_NUM	v3, \vr3
390*4882a593Smuzhiyun 	VX_NUM	v4, \vr4
391*4882a593Smuzhiyun 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
392*4882a593Smuzhiyun 	.word	((v3&15) << 12)
393*4882a593Smuzhiyun 	MRXBOPC	(v4&15), 0x8C, v1, v2, v3, v4
394*4882a593Smuzhiyun .endm
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun /* VECTOR UNPACK LOGICAL LOW */
397*4882a593Smuzhiyun .macro	VUPLL	vr1, vr2, m3
398*4882a593Smuzhiyun 	VX_NUM	v1, \vr1
399*4882a593Smuzhiyun 	VX_NUM	v2, \vr2
400*4882a593Smuzhiyun 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
401*4882a593Smuzhiyun 	.word	0x0000
402*4882a593Smuzhiyun 	MRXBOPC	\m3, 0xD4, v1, v2
403*4882a593Smuzhiyun .endm
404*4882a593Smuzhiyun .macro	VUPLLB	vr1, vr2
405*4882a593Smuzhiyun 	VUPLL	\vr1, \vr2, 0
406*4882a593Smuzhiyun .endm
407*4882a593Smuzhiyun .macro	VUPLLH	vr1, vr2
408*4882a593Smuzhiyun 	VUPLL	\vr1, \vr2, 1
409*4882a593Smuzhiyun .endm
410*4882a593Smuzhiyun .macro	VUPLLF	vr1, vr2
411*4882a593Smuzhiyun 	VUPLL	\vr1, \vr2, 2
412*4882a593Smuzhiyun .endm
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun /* Vector integer instructions */
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun /* VECTOR AND */
418*4882a593Smuzhiyun .macro	VN	vr1, vr2, vr3
419*4882a593Smuzhiyun 	VX_NUM	v1, \vr1
420*4882a593Smuzhiyun 	VX_NUM	v2, \vr2
421*4882a593Smuzhiyun 	VX_NUM	v3, \vr3
422*4882a593Smuzhiyun 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
423*4882a593Smuzhiyun 	.word	((v3&15) << 12)
424*4882a593Smuzhiyun 	MRXBOPC	0, 0x68, v1, v2, v3
425*4882a593Smuzhiyun .endm
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun /* VECTOR EXCLUSIVE OR */
428*4882a593Smuzhiyun .macro	VX	vr1, vr2, vr3
429*4882a593Smuzhiyun 	VX_NUM	v1, \vr1
430*4882a593Smuzhiyun 	VX_NUM	v2, \vr2
431*4882a593Smuzhiyun 	VX_NUM	v3, \vr3
432*4882a593Smuzhiyun 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
433*4882a593Smuzhiyun 	.word	((v3&15) << 12)
434*4882a593Smuzhiyun 	MRXBOPC	0, 0x6D, v1, v2, v3
435*4882a593Smuzhiyun .endm
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun /* VECTOR GALOIS FIELD MULTIPLY SUM */
438*4882a593Smuzhiyun .macro	VGFM	vr1, vr2, vr3, m4
439*4882a593Smuzhiyun 	VX_NUM	v1, \vr1
440*4882a593Smuzhiyun 	VX_NUM	v2, \vr2
441*4882a593Smuzhiyun 	VX_NUM	v3, \vr3
442*4882a593Smuzhiyun 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
443*4882a593Smuzhiyun 	.word	((v3&15) << 12)
444*4882a593Smuzhiyun 	MRXBOPC	\m4, 0xB4, v1, v2, v3
445*4882a593Smuzhiyun .endm
446*4882a593Smuzhiyun .macro	VGFMB	vr1, vr2, vr3
447*4882a593Smuzhiyun 	VGFM	\vr1, \vr2, \vr3, 0
448*4882a593Smuzhiyun .endm
449*4882a593Smuzhiyun .macro	VGFMH	vr1, vr2, vr3
450*4882a593Smuzhiyun 	VGFM	\vr1, \vr2, \vr3, 1
451*4882a593Smuzhiyun .endm
452*4882a593Smuzhiyun .macro	VGFMF	vr1, vr2, vr3
453*4882a593Smuzhiyun 	VGFM	\vr1, \vr2, \vr3, 2
454*4882a593Smuzhiyun .endm
455*4882a593Smuzhiyun .macro	VGFMG	vr1, vr2, vr3
456*4882a593Smuzhiyun 	VGFM	\vr1, \vr2, \vr3, 3
457*4882a593Smuzhiyun .endm
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun /* VECTOR GALOIS FIELD MULTIPLY SUM AND ACCUMULATE */
460*4882a593Smuzhiyun .macro	VGFMA	vr1, vr2, vr3, vr4, m5
461*4882a593Smuzhiyun 	VX_NUM	v1, \vr1
462*4882a593Smuzhiyun 	VX_NUM	v2, \vr2
463*4882a593Smuzhiyun 	VX_NUM	v3, \vr3
464*4882a593Smuzhiyun 	VX_NUM	v4, \vr4
465*4882a593Smuzhiyun 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
466*4882a593Smuzhiyun 	.word	((v3&15) << 12) | (\m5 << 8)
467*4882a593Smuzhiyun 	MRXBOPC	(v4&15), 0xBC, v1, v2, v3, v4
468*4882a593Smuzhiyun .endm
469*4882a593Smuzhiyun .macro	VGFMAB	vr1, vr2, vr3, vr4
470*4882a593Smuzhiyun 	VGFMA	\vr1, \vr2, \vr3, \vr4, 0
471*4882a593Smuzhiyun .endm
472*4882a593Smuzhiyun .macro	VGFMAH	vr1, vr2, vr3, vr4
473*4882a593Smuzhiyun 	VGFMA	\vr1, \vr2, \vr3, \vr4, 1
474*4882a593Smuzhiyun .endm
475*4882a593Smuzhiyun .macro	VGFMAF	vr1, vr2, vr3, vr4
476*4882a593Smuzhiyun 	VGFMA	\vr1, \vr2, \vr3, \vr4, 2
477*4882a593Smuzhiyun .endm
478*4882a593Smuzhiyun .macro	VGFMAG	vr1, vr2, vr3, vr4
479*4882a593Smuzhiyun 	VGFMA	\vr1, \vr2, \vr3, \vr4, 3
480*4882a593Smuzhiyun .endm
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun /* VECTOR SHIFT RIGHT LOGICAL BY BYTE */
483*4882a593Smuzhiyun .macro	VSRLB	vr1, vr2, vr3
484*4882a593Smuzhiyun 	VX_NUM	v1, \vr1
485*4882a593Smuzhiyun 	VX_NUM	v2, \vr2
486*4882a593Smuzhiyun 	VX_NUM	v3, \vr3
487*4882a593Smuzhiyun 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
488*4882a593Smuzhiyun 	.word	((v3&15) << 12)
489*4882a593Smuzhiyun 	MRXBOPC	0, 0x7D, v1, v2, v3
490*4882a593Smuzhiyun .endm
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun /* VECTOR REPLICATE IMMEDIATE */
493*4882a593Smuzhiyun .macro	VREPI	vr1, imm2, m3
494*4882a593Smuzhiyun 	VX_NUM	v1, \vr1
495*4882a593Smuzhiyun 	.word	0xE700 | ((v1&15) << 4)
496*4882a593Smuzhiyun 	.word	\imm2
497*4882a593Smuzhiyun 	MRXBOPC	\m3, 0x45, v1
498*4882a593Smuzhiyun .endm
499*4882a593Smuzhiyun .macro	VREPIB	vr1, imm2
500*4882a593Smuzhiyun 	VREPI	\vr1, \imm2, 0
501*4882a593Smuzhiyun .endm
502*4882a593Smuzhiyun .macro	VREPIH	vr1, imm2
503*4882a593Smuzhiyun 	VREPI	\vr1, \imm2, 1
504*4882a593Smuzhiyun .endm
505*4882a593Smuzhiyun .macro	VREPIF	vr1, imm2
506*4882a593Smuzhiyun 	VREPI	\vr1, \imm2, 2
507*4882a593Smuzhiyun .endm
508*4882a593Smuzhiyun .macro	VREPIG	vr1, imm2
509*4882a593Smuzhiyun 	VREP	\vr1, \imm2, 3
510*4882a593Smuzhiyun .endm
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun /* VECTOR ADD */
513*4882a593Smuzhiyun .macro	VA	vr1, vr2, vr3, m4
514*4882a593Smuzhiyun 	VX_NUM	v1, \vr1
515*4882a593Smuzhiyun 	VX_NUM	v2, \vr2
516*4882a593Smuzhiyun 	VX_NUM	v3, \vr3
517*4882a593Smuzhiyun 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
518*4882a593Smuzhiyun 	.word	((v3&15) << 12)
519*4882a593Smuzhiyun 	MRXBOPC	\m4, 0xF3, v1, v2, v3
520*4882a593Smuzhiyun .endm
521*4882a593Smuzhiyun .macro	VAB	vr1, vr2, vr3
522*4882a593Smuzhiyun 	VA	\vr1, \vr2, \vr3, 0
523*4882a593Smuzhiyun .endm
524*4882a593Smuzhiyun .macro	VAH	vr1, vr2, vr3
525*4882a593Smuzhiyun 	VA	\vr1, \vr2, \vr3, 1
526*4882a593Smuzhiyun .endm
527*4882a593Smuzhiyun .macro	VAF	vr1, vr2, vr3
528*4882a593Smuzhiyun 	VA	\vr1, \vr2, \vr3, 2
529*4882a593Smuzhiyun .endm
530*4882a593Smuzhiyun .macro	VAG	vr1, vr2, vr3
531*4882a593Smuzhiyun 	VA	\vr1, \vr2, \vr3, 3
532*4882a593Smuzhiyun .endm
533*4882a593Smuzhiyun .macro	VAQ	vr1, vr2, vr3
534*4882a593Smuzhiyun 	VA	\vr1, \vr2, \vr3, 4
535*4882a593Smuzhiyun .endm
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun /* VECTOR ELEMENT SHIFT RIGHT ARITHMETIC */
538*4882a593Smuzhiyun .macro	VESRAV	vr1, vr2, vr3, m4
539*4882a593Smuzhiyun 	VX_NUM	v1, \vr1
540*4882a593Smuzhiyun 	VX_NUM	v2, \vr2
541*4882a593Smuzhiyun 	VX_NUM	v3, \vr3
542*4882a593Smuzhiyun 	.word	0xE700 | ((v1&15) << 4) | (v2&15)
543*4882a593Smuzhiyun 	.word	((v3&15) << 12)
544*4882a593Smuzhiyun 	MRXBOPC \m4, 0x7A, v1, v2, v3
545*4882a593Smuzhiyun .endm
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun .macro	VESRAVB	vr1, vr2, vr3
548*4882a593Smuzhiyun 	VESRAV	\vr1, \vr2, \vr3, 0
549*4882a593Smuzhiyun .endm
550*4882a593Smuzhiyun .macro	VESRAVH	vr1, vr2, vr3
551*4882a593Smuzhiyun 	VESRAV	\vr1, \vr2, \vr3, 1
552*4882a593Smuzhiyun .endm
553*4882a593Smuzhiyun .macro	VESRAVF	vr1, vr2, vr3
554*4882a593Smuzhiyun 	VESRAV	\vr1, \vr2, \vr3, 2
555*4882a593Smuzhiyun .endm
556*4882a593Smuzhiyun .macro	VESRAVG	vr1, vr2, vr3
557*4882a593Smuzhiyun 	VESRAV	\vr1, \vr2, \vr3, 3
558*4882a593Smuzhiyun .endm
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun #endif	/* __ASSEMBLY__ */
561*4882a593Smuzhiyun #endif	/* __ASM_S390_VX_INSN_H */
562