xref: /OK3568_Linux_fs/kernel/arch/x86/crypto/cast5-avx-x86_64-asm_64.S (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun/*
3*4882a593Smuzhiyun * Cast5 Cipher 16-way parallel algorithm (AVX/x86_64)
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2012 Johannes Goetzfried
6*4882a593Smuzhiyun *     <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Copyright © 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun#include <linux/linkage.h>
12*4882a593Smuzhiyun#include <asm/frame.h>
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun.file "cast5-avx-x86_64-asm_64.S"
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun.extern cast_s1
17*4882a593Smuzhiyun.extern cast_s2
18*4882a593Smuzhiyun.extern cast_s3
19*4882a593Smuzhiyun.extern cast_s4
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun/* structure of crypto context */
22*4882a593Smuzhiyun#define km	0
23*4882a593Smuzhiyun#define kr	(16*4)
24*4882a593Smuzhiyun#define rr	((16*4)+16)
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun/* s-boxes */
27*4882a593Smuzhiyun#define s1	cast_s1
28*4882a593Smuzhiyun#define s2	cast_s2
29*4882a593Smuzhiyun#define s3	cast_s3
30*4882a593Smuzhiyun#define s4	cast_s4
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun/**********************************************************************
33*4882a593Smuzhiyun  16-way AVX cast5
34*4882a593Smuzhiyun **********************************************************************/
35*4882a593Smuzhiyun#define CTX %r15
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun#define RL1 %xmm0
38*4882a593Smuzhiyun#define RR1 %xmm1
39*4882a593Smuzhiyun#define RL2 %xmm2
40*4882a593Smuzhiyun#define RR2 %xmm3
41*4882a593Smuzhiyun#define RL3 %xmm4
42*4882a593Smuzhiyun#define RR3 %xmm5
43*4882a593Smuzhiyun#define RL4 %xmm6
44*4882a593Smuzhiyun#define RR4 %xmm7
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun#define RX %xmm8
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun#define RKM  %xmm9
49*4882a593Smuzhiyun#define RKR  %xmm10
50*4882a593Smuzhiyun#define RKRF %xmm11
51*4882a593Smuzhiyun#define RKRR %xmm12
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun#define R32  %xmm13
54*4882a593Smuzhiyun#define R1ST %xmm14
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun#define RTMP %xmm15
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun#define RID1  %rdi
59*4882a593Smuzhiyun#define RID1d %edi
60*4882a593Smuzhiyun#define RID2  %rsi
61*4882a593Smuzhiyun#define RID2d %esi
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun#define RGI1   %rdx
64*4882a593Smuzhiyun#define RGI1bl %dl
65*4882a593Smuzhiyun#define RGI1bh %dh
66*4882a593Smuzhiyun#define RGI2   %rcx
67*4882a593Smuzhiyun#define RGI2bl %cl
68*4882a593Smuzhiyun#define RGI2bh %ch
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun#define RGI3   %rax
71*4882a593Smuzhiyun#define RGI3bl %al
72*4882a593Smuzhiyun#define RGI3bh %ah
73*4882a593Smuzhiyun#define RGI4   %rbx
74*4882a593Smuzhiyun#define RGI4bl %bl
75*4882a593Smuzhiyun#define RGI4bh %bh
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun#define RFS1  %r8
78*4882a593Smuzhiyun#define RFS1d %r8d
79*4882a593Smuzhiyun#define RFS2  %r9
80*4882a593Smuzhiyun#define RFS2d %r9d
81*4882a593Smuzhiyun#define RFS3  %r10
82*4882a593Smuzhiyun#define RFS3d %r10d
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun#define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \
86*4882a593Smuzhiyun	movzbl		src ## bh,     RID1d;    \
87*4882a593Smuzhiyun	movzbl		src ## bl,     RID2d;    \
88*4882a593Smuzhiyun	shrq $16,	src;                     \
89*4882a593Smuzhiyun	movl		s1(, RID1, 4), dst ## d; \
90*4882a593Smuzhiyun	op1		s2(, RID2, 4), dst ## d; \
91*4882a593Smuzhiyun	movzbl		src ## bh,     RID1d;    \
92*4882a593Smuzhiyun	movzbl		src ## bl,     RID2d;    \
93*4882a593Smuzhiyun	interleave_op(il_reg);			 \
94*4882a593Smuzhiyun	op2		s3(, RID1, 4), dst ## d; \
95*4882a593Smuzhiyun	op3		s4(, RID2, 4), dst ## d;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun#define dummy(d) /* do nothing */
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun#define shr_next(reg) \
100*4882a593Smuzhiyun	shrq $16,	reg;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun#define F_head(a, x, gi1, gi2, op0) \
103*4882a593Smuzhiyun	op0	a,	RKM,  x;                 \
104*4882a593Smuzhiyun	vpslld	RKRF,	x,    RTMP;              \
105*4882a593Smuzhiyun	vpsrld	RKRR,	x,    x;                 \
106*4882a593Smuzhiyun	vpor	RTMP,	x,    x;                 \
107*4882a593Smuzhiyun	\
108*4882a593Smuzhiyun	vmovq		x,    gi1;               \
109*4882a593Smuzhiyun	vpextrq $1,	x,    gi2;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun#define F_tail(a, x, gi1, gi2, op1, op2, op3) \
112*4882a593Smuzhiyun	lookup_32bit(##gi1, RFS1, op1, op2, op3, shr_next, ##gi1); \
113*4882a593Smuzhiyun	lookup_32bit(##gi2, RFS3, op1, op2, op3, shr_next, ##gi2); \
114*4882a593Smuzhiyun	\
115*4882a593Smuzhiyun	lookup_32bit(##gi1, RFS2, op1, op2, op3, dummy, none);     \
116*4882a593Smuzhiyun	shlq $32,	RFS2;                                      \
117*4882a593Smuzhiyun	orq		RFS1, RFS2;                                \
118*4882a593Smuzhiyun	lookup_32bit(##gi2, RFS1, op1, op2, op3, dummy, none);     \
119*4882a593Smuzhiyun	shlq $32,	RFS1;                                      \
120*4882a593Smuzhiyun	orq		RFS1, RFS3;                                \
121*4882a593Smuzhiyun	\
122*4882a593Smuzhiyun	vmovq		RFS2, x;                                   \
123*4882a593Smuzhiyun	vpinsrq $1,	RFS3, x, x;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun#define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \
126*4882a593Smuzhiyun	F_head(b1, RX, RGI1, RGI2, op0);              \
127*4882a593Smuzhiyun	F_head(b2, RX, RGI3, RGI4, op0);              \
128*4882a593Smuzhiyun	\
129*4882a593Smuzhiyun	F_tail(b1, RX, RGI1, RGI2, op1, op2, op3);    \
130*4882a593Smuzhiyun	F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3);  \
131*4882a593Smuzhiyun	\
132*4882a593Smuzhiyun	vpxor		a1, RX,   a1;                 \
133*4882a593Smuzhiyun	vpxor		a2, RTMP, a2;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun#define F1_2(a1, b1, a2, b2) \
136*4882a593Smuzhiyun	F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl)
137*4882a593Smuzhiyun#define F2_2(a1, b1, a2, b2) \
138*4882a593Smuzhiyun	F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl)
139*4882a593Smuzhiyun#define F3_2(a1, b1, a2, b2) \
140*4882a593Smuzhiyun	F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun#define subround(a1, b1, a2, b2, f) \
143*4882a593Smuzhiyun	F ## f ## _2(a1, b1, a2, b2);
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun#define round(l, r, n, f) \
146*4882a593Smuzhiyun	vbroadcastss 	(km+(4*n))(CTX), RKM;        \
147*4882a593Smuzhiyun	vpand		R1ST,            RKR,  RKRF; \
148*4882a593Smuzhiyun	vpsubq		RKRF,            R32,  RKRR; \
149*4882a593Smuzhiyun	vpsrldq $1,	RKR,             RKR;        \
150*4882a593Smuzhiyun	subround(l ## 1, r ## 1, l ## 2, r ## 2, f); \
151*4882a593Smuzhiyun	subround(l ## 3, r ## 3, l ## 4, r ## 4, f);
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun#define enc_preload_rkr() \
154*4882a593Smuzhiyun	vbroadcastss	.L16_mask,                RKR;      \
155*4882a593Smuzhiyun	/* add 16-bit rotation to key rotations (mod 32) */ \
156*4882a593Smuzhiyun	vpxor		kr(CTX),                  RKR, RKR;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun#define dec_preload_rkr() \
159*4882a593Smuzhiyun	vbroadcastss	.L16_mask,                RKR;      \
160*4882a593Smuzhiyun	/* add 16-bit rotation to key rotations (mod 32) */ \
161*4882a593Smuzhiyun	vpxor		kr(CTX),                  RKR, RKR; \
162*4882a593Smuzhiyun	vpshufb		.Lbswap128_mask,          RKR, RKR;
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun#define transpose_2x4(x0, x1, t0, t1) \
165*4882a593Smuzhiyun	vpunpckldq		x1, x0, t0; \
166*4882a593Smuzhiyun	vpunpckhdq		x1, x0, t1; \
167*4882a593Smuzhiyun	\
168*4882a593Smuzhiyun	vpunpcklqdq		t1, t0, x0; \
169*4882a593Smuzhiyun	vpunpckhqdq		t1, t0, x1;
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun#define inpack_blocks(x0, x1, t0, t1, rmask) \
172*4882a593Smuzhiyun	vpshufb rmask, 	x0,	x0; \
173*4882a593Smuzhiyun	vpshufb rmask, 	x1,	x1; \
174*4882a593Smuzhiyun	\
175*4882a593Smuzhiyun	transpose_2x4(x0, x1, t0, t1)
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun#define outunpack_blocks(x0, x1, t0, t1, rmask) \
178*4882a593Smuzhiyun	transpose_2x4(x0, x1, t0, t1) \
179*4882a593Smuzhiyun	\
180*4882a593Smuzhiyun	vpshufb rmask,	x0, x0;           \
181*4882a593Smuzhiyun	vpshufb rmask,	x1, x1;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun.section	.rodata.cst16.bswap_mask, "aM", @progbits, 16
184*4882a593Smuzhiyun.align 16
185*4882a593Smuzhiyun.Lbswap_mask:
186*4882a593Smuzhiyun	.byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
187*4882a593Smuzhiyun.section	.rodata.cst16.bswap128_mask, "aM", @progbits, 16
188*4882a593Smuzhiyun.align 16
189*4882a593Smuzhiyun.Lbswap128_mask:
190*4882a593Smuzhiyun	.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
191*4882a593Smuzhiyun.section	.rodata.cst16.bswap_iv_mask, "aM", @progbits, 16
192*4882a593Smuzhiyun.align 16
193*4882a593Smuzhiyun.Lbswap_iv_mask:
194*4882a593Smuzhiyun	.byte 7, 6, 5, 4, 3, 2, 1, 0, 7, 6, 5, 4, 3, 2, 1, 0
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun.section	.rodata.cst4.16_mask, "aM", @progbits, 4
197*4882a593Smuzhiyun.align 4
198*4882a593Smuzhiyun.L16_mask:
199*4882a593Smuzhiyun	.byte 16, 16, 16, 16
200*4882a593Smuzhiyun.section	.rodata.cst4.32_mask, "aM", @progbits, 4
201*4882a593Smuzhiyun.align 4
202*4882a593Smuzhiyun.L32_mask:
203*4882a593Smuzhiyun	.byte 32, 0, 0, 0
204*4882a593Smuzhiyun.section	.rodata.cst4.first_mask, "aM", @progbits, 4
205*4882a593Smuzhiyun.align 4
206*4882a593Smuzhiyun.Lfirst_mask:
207*4882a593Smuzhiyun	.byte 0x1f, 0, 0, 0
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun.text
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun.align 16
212*4882a593SmuzhiyunSYM_FUNC_START_LOCAL(__cast5_enc_blk16)
213*4882a593Smuzhiyun	/* input:
214*4882a593Smuzhiyun	 *	%rdi: ctx
215*4882a593Smuzhiyun	 *	RL1: blocks 1 and 2
216*4882a593Smuzhiyun	 *	RR1: blocks 3 and 4
217*4882a593Smuzhiyun	 *	RL2: blocks 5 and 6
218*4882a593Smuzhiyun	 *	RR2: blocks 7 and 8
219*4882a593Smuzhiyun	 *	RL3: blocks 9 and 10
220*4882a593Smuzhiyun	 *	RR3: blocks 11 and 12
221*4882a593Smuzhiyun	 *	RL4: blocks 13 and 14
222*4882a593Smuzhiyun	 *	RR4: blocks 15 and 16
223*4882a593Smuzhiyun	 * output:
224*4882a593Smuzhiyun	 *	RL1: encrypted blocks 1 and 2
225*4882a593Smuzhiyun	 *	RR1: encrypted blocks 3 and 4
226*4882a593Smuzhiyun	 *	RL2: encrypted blocks 5 and 6
227*4882a593Smuzhiyun	 *	RR2: encrypted blocks 7 and 8
228*4882a593Smuzhiyun	 *	RL3: encrypted blocks 9 and 10
229*4882a593Smuzhiyun	 *	RR3: encrypted blocks 11 and 12
230*4882a593Smuzhiyun	 *	RL4: encrypted blocks 13 and 14
231*4882a593Smuzhiyun	 *	RR4: encrypted blocks 15 and 16
232*4882a593Smuzhiyun	 */
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun	pushq %r15;
235*4882a593Smuzhiyun	pushq %rbx;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun	movq %rdi, CTX;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun	vmovdqa .Lbswap_mask, RKM;
240*4882a593Smuzhiyun	vmovd .Lfirst_mask, R1ST;
241*4882a593Smuzhiyun	vmovd .L32_mask, R32;
242*4882a593Smuzhiyun	enc_preload_rkr();
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun	inpack_blocks(RL1, RR1, RTMP, RX, RKM);
245*4882a593Smuzhiyun	inpack_blocks(RL2, RR2, RTMP, RX, RKM);
246*4882a593Smuzhiyun	inpack_blocks(RL3, RR3, RTMP, RX, RKM);
247*4882a593Smuzhiyun	inpack_blocks(RL4, RR4, RTMP, RX, RKM);
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun	round(RL, RR, 0, 1);
250*4882a593Smuzhiyun	round(RR, RL, 1, 2);
251*4882a593Smuzhiyun	round(RL, RR, 2, 3);
252*4882a593Smuzhiyun	round(RR, RL, 3, 1);
253*4882a593Smuzhiyun	round(RL, RR, 4, 2);
254*4882a593Smuzhiyun	round(RR, RL, 5, 3);
255*4882a593Smuzhiyun	round(RL, RR, 6, 1);
256*4882a593Smuzhiyun	round(RR, RL, 7, 2);
257*4882a593Smuzhiyun	round(RL, RR, 8, 3);
258*4882a593Smuzhiyun	round(RR, RL, 9, 1);
259*4882a593Smuzhiyun	round(RL, RR, 10, 2);
260*4882a593Smuzhiyun	round(RR, RL, 11, 3);
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun	movzbl rr(CTX), %eax;
263*4882a593Smuzhiyun	testl %eax, %eax;
264*4882a593Smuzhiyun	jnz .L__skip_enc;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun	round(RL, RR, 12, 1);
267*4882a593Smuzhiyun	round(RR, RL, 13, 2);
268*4882a593Smuzhiyun	round(RL, RR, 14, 3);
269*4882a593Smuzhiyun	round(RR, RL, 15, 1);
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun.L__skip_enc:
272*4882a593Smuzhiyun	popq %rbx;
273*4882a593Smuzhiyun	popq %r15;
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun	vmovdqa .Lbswap_mask, RKM;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun	outunpack_blocks(RR1, RL1, RTMP, RX, RKM);
278*4882a593Smuzhiyun	outunpack_blocks(RR2, RL2, RTMP, RX, RKM);
279*4882a593Smuzhiyun	outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
280*4882a593Smuzhiyun	outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun	RET;
283*4882a593SmuzhiyunSYM_FUNC_END(__cast5_enc_blk16)
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun.align 16
286*4882a593SmuzhiyunSYM_FUNC_START_LOCAL(__cast5_dec_blk16)
287*4882a593Smuzhiyun	/* input:
288*4882a593Smuzhiyun	 *	%rdi: ctx
289*4882a593Smuzhiyun	 *	RL1: encrypted blocks 1 and 2
290*4882a593Smuzhiyun	 *	RR1: encrypted blocks 3 and 4
291*4882a593Smuzhiyun	 *	RL2: encrypted blocks 5 and 6
292*4882a593Smuzhiyun	 *	RR2: encrypted blocks 7 and 8
293*4882a593Smuzhiyun	 *	RL3: encrypted blocks 9 and 10
294*4882a593Smuzhiyun	 *	RR3: encrypted blocks 11 and 12
295*4882a593Smuzhiyun	 *	RL4: encrypted blocks 13 and 14
296*4882a593Smuzhiyun	 *	RR4: encrypted blocks 15 and 16
297*4882a593Smuzhiyun	 * output:
298*4882a593Smuzhiyun	 *	RL1: decrypted blocks 1 and 2
299*4882a593Smuzhiyun	 *	RR1: decrypted blocks 3 and 4
300*4882a593Smuzhiyun	 *	RL2: decrypted blocks 5 and 6
301*4882a593Smuzhiyun	 *	RR2: decrypted blocks 7 and 8
302*4882a593Smuzhiyun	 *	RL3: decrypted blocks 9 and 10
303*4882a593Smuzhiyun	 *	RR3: decrypted blocks 11 and 12
304*4882a593Smuzhiyun	 *	RL4: decrypted blocks 13 and 14
305*4882a593Smuzhiyun	 *	RR4: decrypted blocks 15 and 16
306*4882a593Smuzhiyun	 */
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun	pushq %r15;
309*4882a593Smuzhiyun	pushq %rbx;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun	movq %rdi, CTX;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun	vmovdqa .Lbswap_mask, RKM;
314*4882a593Smuzhiyun	vmovd .Lfirst_mask, R1ST;
315*4882a593Smuzhiyun	vmovd .L32_mask, R32;
316*4882a593Smuzhiyun	dec_preload_rkr();
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun	inpack_blocks(RL1, RR1, RTMP, RX, RKM);
319*4882a593Smuzhiyun	inpack_blocks(RL2, RR2, RTMP, RX, RKM);
320*4882a593Smuzhiyun	inpack_blocks(RL3, RR3, RTMP, RX, RKM);
321*4882a593Smuzhiyun	inpack_blocks(RL4, RR4, RTMP, RX, RKM);
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun	movzbl rr(CTX), %eax;
324*4882a593Smuzhiyun	testl %eax, %eax;
325*4882a593Smuzhiyun	jnz .L__skip_dec;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun	round(RL, RR, 15, 1);
328*4882a593Smuzhiyun	round(RR, RL, 14, 3);
329*4882a593Smuzhiyun	round(RL, RR, 13, 2);
330*4882a593Smuzhiyun	round(RR, RL, 12, 1);
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun.L__dec_tail:
333*4882a593Smuzhiyun	round(RL, RR, 11, 3);
334*4882a593Smuzhiyun	round(RR, RL, 10, 2);
335*4882a593Smuzhiyun	round(RL, RR, 9, 1);
336*4882a593Smuzhiyun	round(RR, RL, 8, 3);
337*4882a593Smuzhiyun	round(RL, RR, 7, 2);
338*4882a593Smuzhiyun	round(RR, RL, 6, 1);
339*4882a593Smuzhiyun	round(RL, RR, 5, 3);
340*4882a593Smuzhiyun	round(RR, RL, 4, 2);
341*4882a593Smuzhiyun	round(RL, RR, 3, 1);
342*4882a593Smuzhiyun	round(RR, RL, 2, 3);
343*4882a593Smuzhiyun	round(RL, RR, 1, 2);
344*4882a593Smuzhiyun	round(RR, RL, 0, 1);
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun	vmovdqa .Lbswap_mask, RKM;
347*4882a593Smuzhiyun	popq %rbx;
348*4882a593Smuzhiyun	popq %r15;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun	outunpack_blocks(RR1, RL1, RTMP, RX, RKM);
351*4882a593Smuzhiyun	outunpack_blocks(RR2, RL2, RTMP, RX, RKM);
352*4882a593Smuzhiyun	outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
353*4882a593Smuzhiyun	outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun	RET;
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun.L__skip_dec:
358*4882a593Smuzhiyun	vpsrldq $4, RKR, RKR;
359*4882a593Smuzhiyun	jmp .L__dec_tail;
360*4882a593SmuzhiyunSYM_FUNC_END(__cast5_dec_blk16)
361*4882a593Smuzhiyun
362*4882a593SmuzhiyunSYM_FUNC_START(cast5_ecb_enc_16way)
363*4882a593Smuzhiyun	/* input:
364*4882a593Smuzhiyun	 *	%rdi: ctx
365*4882a593Smuzhiyun	 *	%rsi: dst
366*4882a593Smuzhiyun	 *	%rdx: src
367*4882a593Smuzhiyun	 */
368*4882a593Smuzhiyun	FRAME_BEGIN
369*4882a593Smuzhiyun	pushq %r15;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun	movq %rdi, CTX;
372*4882a593Smuzhiyun	movq %rsi, %r11;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun	vmovdqu (0*4*4)(%rdx), RL1;
375*4882a593Smuzhiyun	vmovdqu (1*4*4)(%rdx), RR1;
376*4882a593Smuzhiyun	vmovdqu (2*4*4)(%rdx), RL2;
377*4882a593Smuzhiyun	vmovdqu (3*4*4)(%rdx), RR2;
378*4882a593Smuzhiyun	vmovdqu (4*4*4)(%rdx), RL3;
379*4882a593Smuzhiyun	vmovdqu (5*4*4)(%rdx), RR3;
380*4882a593Smuzhiyun	vmovdqu (6*4*4)(%rdx), RL4;
381*4882a593Smuzhiyun	vmovdqu (7*4*4)(%rdx), RR4;
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun	call __cast5_enc_blk16;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun	vmovdqu RR1, (0*4*4)(%r11);
386*4882a593Smuzhiyun	vmovdqu RL1, (1*4*4)(%r11);
387*4882a593Smuzhiyun	vmovdqu RR2, (2*4*4)(%r11);
388*4882a593Smuzhiyun	vmovdqu RL2, (3*4*4)(%r11);
389*4882a593Smuzhiyun	vmovdqu RR3, (4*4*4)(%r11);
390*4882a593Smuzhiyun	vmovdqu RL3, (5*4*4)(%r11);
391*4882a593Smuzhiyun	vmovdqu RR4, (6*4*4)(%r11);
392*4882a593Smuzhiyun	vmovdqu RL4, (7*4*4)(%r11);
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun	popq %r15;
395*4882a593Smuzhiyun	FRAME_END
396*4882a593Smuzhiyun	RET;
397*4882a593SmuzhiyunSYM_FUNC_END(cast5_ecb_enc_16way)
398*4882a593Smuzhiyun
399*4882a593SmuzhiyunSYM_FUNC_START(cast5_ecb_dec_16way)
400*4882a593Smuzhiyun	/* input:
401*4882a593Smuzhiyun	 *	%rdi: ctx
402*4882a593Smuzhiyun	 *	%rsi: dst
403*4882a593Smuzhiyun	 *	%rdx: src
404*4882a593Smuzhiyun	 */
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun	FRAME_BEGIN
407*4882a593Smuzhiyun	pushq %r15;
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun	movq %rdi, CTX;
410*4882a593Smuzhiyun	movq %rsi, %r11;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun	vmovdqu (0*4*4)(%rdx), RL1;
413*4882a593Smuzhiyun	vmovdqu (1*4*4)(%rdx), RR1;
414*4882a593Smuzhiyun	vmovdqu (2*4*4)(%rdx), RL2;
415*4882a593Smuzhiyun	vmovdqu (3*4*4)(%rdx), RR2;
416*4882a593Smuzhiyun	vmovdqu (4*4*4)(%rdx), RL3;
417*4882a593Smuzhiyun	vmovdqu (5*4*4)(%rdx), RR3;
418*4882a593Smuzhiyun	vmovdqu (6*4*4)(%rdx), RL4;
419*4882a593Smuzhiyun	vmovdqu (7*4*4)(%rdx), RR4;
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun	call __cast5_dec_blk16;
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun	vmovdqu RR1, (0*4*4)(%r11);
424*4882a593Smuzhiyun	vmovdqu RL1, (1*4*4)(%r11);
425*4882a593Smuzhiyun	vmovdqu RR2, (2*4*4)(%r11);
426*4882a593Smuzhiyun	vmovdqu RL2, (3*4*4)(%r11);
427*4882a593Smuzhiyun	vmovdqu RR3, (4*4*4)(%r11);
428*4882a593Smuzhiyun	vmovdqu RL3, (5*4*4)(%r11);
429*4882a593Smuzhiyun	vmovdqu RR4, (6*4*4)(%r11);
430*4882a593Smuzhiyun	vmovdqu RL4, (7*4*4)(%r11);
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun	popq %r15;
433*4882a593Smuzhiyun	FRAME_END
434*4882a593Smuzhiyun	RET;
435*4882a593SmuzhiyunSYM_FUNC_END(cast5_ecb_dec_16way)
436*4882a593Smuzhiyun
437*4882a593SmuzhiyunSYM_FUNC_START(cast5_cbc_dec_16way)
438*4882a593Smuzhiyun	/* input:
439*4882a593Smuzhiyun	 *	%rdi: ctx
440*4882a593Smuzhiyun	 *	%rsi: dst
441*4882a593Smuzhiyun	 *	%rdx: src
442*4882a593Smuzhiyun	 */
443*4882a593Smuzhiyun	FRAME_BEGIN
444*4882a593Smuzhiyun	pushq %r12;
445*4882a593Smuzhiyun	pushq %r15;
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun	movq %rdi, CTX;
448*4882a593Smuzhiyun	movq %rsi, %r11;
449*4882a593Smuzhiyun	movq %rdx, %r12;
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun	vmovdqu (0*16)(%rdx), RL1;
452*4882a593Smuzhiyun	vmovdqu (1*16)(%rdx), RR1;
453*4882a593Smuzhiyun	vmovdqu (2*16)(%rdx), RL2;
454*4882a593Smuzhiyun	vmovdqu (3*16)(%rdx), RR2;
455*4882a593Smuzhiyun	vmovdqu (4*16)(%rdx), RL3;
456*4882a593Smuzhiyun	vmovdqu (5*16)(%rdx), RR3;
457*4882a593Smuzhiyun	vmovdqu (6*16)(%rdx), RL4;
458*4882a593Smuzhiyun	vmovdqu (7*16)(%rdx), RR4;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun	call __cast5_dec_blk16;
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun	/* xor with src */
463*4882a593Smuzhiyun	vmovq (%r12), RX;
464*4882a593Smuzhiyun	vpshufd $0x4f, RX, RX;
465*4882a593Smuzhiyun	vpxor RX, RR1, RR1;
466*4882a593Smuzhiyun	vpxor 0*16+8(%r12), RL1, RL1;
467*4882a593Smuzhiyun	vpxor 1*16+8(%r12), RR2, RR2;
468*4882a593Smuzhiyun	vpxor 2*16+8(%r12), RL2, RL2;
469*4882a593Smuzhiyun	vpxor 3*16+8(%r12), RR3, RR3;
470*4882a593Smuzhiyun	vpxor 4*16+8(%r12), RL3, RL3;
471*4882a593Smuzhiyun	vpxor 5*16+8(%r12), RR4, RR4;
472*4882a593Smuzhiyun	vpxor 6*16+8(%r12), RL4, RL4;
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun	vmovdqu RR1, (0*16)(%r11);
475*4882a593Smuzhiyun	vmovdqu RL1, (1*16)(%r11);
476*4882a593Smuzhiyun	vmovdqu RR2, (2*16)(%r11);
477*4882a593Smuzhiyun	vmovdqu RL2, (3*16)(%r11);
478*4882a593Smuzhiyun	vmovdqu RR3, (4*16)(%r11);
479*4882a593Smuzhiyun	vmovdqu RL3, (5*16)(%r11);
480*4882a593Smuzhiyun	vmovdqu RR4, (6*16)(%r11);
481*4882a593Smuzhiyun	vmovdqu RL4, (7*16)(%r11);
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun	popq %r15;
484*4882a593Smuzhiyun	popq %r12;
485*4882a593Smuzhiyun	FRAME_END
486*4882a593Smuzhiyun	RET;
487*4882a593SmuzhiyunSYM_FUNC_END(cast5_cbc_dec_16way)
488*4882a593Smuzhiyun
489*4882a593SmuzhiyunSYM_FUNC_START(cast5_ctr_16way)
490*4882a593Smuzhiyun	/* input:
491*4882a593Smuzhiyun	 *	%rdi: ctx
492*4882a593Smuzhiyun	 *	%rsi: dst
493*4882a593Smuzhiyun	 *	%rdx: src
494*4882a593Smuzhiyun	 *	%rcx: iv (big endian, 64bit)
495*4882a593Smuzhiyun	 */
496*4882a593Smuzhiyun	FRAME_BEGIN
497*4882a593Smuzhiyun	pushq %r12;
498*4882a593Smuzhiyun	pushq %r15;
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun	movq %rdi, CTX;
501*4882a593Smuzhiyun	movq %rsi, %r11;
502*4882a593Smuzhiyun	movq %rdx, %r12;
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun	vpcmpeqd RTMP, RTMP, RTMP;
505*4882a593Smuzhiyun	vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun	vpcmpeqd RKR, RKR, RKR;
508*4882a593Smuzhiyun	vpaddq RKR, RKR, RKR; /* low: -2, high: -2 */
509*4882a593Smuzhiyun	vmovdqa .Lbswap_iv_mask, R1ST;
510*4882a593Smuzhiyun	vmovdqa .Lbswap128_mask, RKM;
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun	/* load IV and byteswap */
513*4882a593Smuzhiyun	vmovq (%rcx), RX;
514*4882a593Smuzhiyun	vpshufb R1ST, RX, RX;
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun	/* construct IVs */
517*4882a593Smuzhiyun	vpsubq RTMP, RX, RX;  /* le: IV1, IV0 */
518*4882a593Smuzhiyun	vpshufb RKM, RX, RL1; /* be: IV0, IV1 */
519*4882a593Smuzhiyun	vpsubq RKR, RX, RX;
520*4882a593Smuzhiyun	vpshufb RKM, RX, RR1; /* be: IV2, IV3 */
521*4882a593Smuzhiyun	vpsubq RKR, RX, RX;
522*4882a593Smuzhiyun	vpshufb RKM, RX, RL2; /* be: IV4, IV5 */
523*4882a593Smuzhiyun	vpsubq RKR, RX, RX;
524*4882a593Smuzhiyun	vpshufb RKM, RX, RR2; /* be: IV6, IV7 */
525*4882a593Smuzhiyun	vpsubq RKR, RX, RX;
526*4882a593Smuzhiyun	vpshufb RKM, RX, RL3; /* be: IV8, IV9 */
527*4882a593Smuzhiyun	vpsubq RKR, RX, RX;
528*4882a593Smuzhiyun	vpshufb RKM, RX, RR3; /* be: IV10, IV11 */
529*4882a593Smuzhiyun	vpsubq RKR, RX, RX;
530*4882a593Smuzhiyun	vpshufb RKM, RX, RL4; /* be: IV12, IV13 */
531*4882a593Smuzhiyun	vpsubq RKR, RX, RX;
532*4882a593Smuzhiyun	vpshufb RKM, RX, RR4; /* be: IV14, IV15 */
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun	/* store last IV */
535*4882a593Smuzhiyun	vpsubq RTMP, RX, RX; /* le: IV16, IV14 */
536*4882a593Smuzhiyun	vpshufb R1ST, RX, RX; /* be: IV16, IV16 */
537*4882a593Smuzhiyun	vmovq RX, (%rcx);
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun	call __cast5_enc_blk16;
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun	/* dst = src ^ iv */
542*4882a593Smuzhiyun	vpxor (0*16)(%r12), RR1, RR1;
543*4882a593Smuzhiyun	vpxor (1*16)(%r12), RL1, RL1;
544*4882a593Smuzhiyun	vpxor (2*16)(%r12), RR2, RR2;
545*4882a593Smuzhiyun	vpxor (3*16)(%r12), RL2, RL2;
546*4882a593Smuzhiyun	vpxor (4*16)(%r12), RR3, RR3;
547*4882a593Smuzhiyun	vpxor (5*16)(%r12), RL3, RL3;
548*4882a593Smuzhiyun	vpxor (6*16)(%r12), RR4, RR4;
549*4882a593Smuzhiyun	vpxor (7*16)(%r12), RL4, RL4;
550*4882a593Smuzhiyun	vmovdqu RR1, (0*16)(%r11);
551*4882a593Smuzhiyun	vmovdqu RL1, (1*16)(%r11);
552*4882a593Smuzhiyun	vmovdqu RR2, (2*16)(%r11);
553*4882a593Smuzhiyun	vmovdqu RL2, (3*16)(%r11);
554*4882a593Smuzhiyun	vmovdqu RR3, (4*16)(%r11);
555*4882a593Smuzhiyun	vmovdqu RL3, (5*16)(%r11);
556*4882a593Smuzhiyun	vmovdqu RR4, (6*16)(%r11);
557*4882a593Smuzhiyun	vmovdqu RL4, (7*16)(%r11);
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun	popq %r15;
560*4882a593Smuzhiyun	popq %r12;
561*4882a593Smuzhiyun	FRAME_END
562*4882a593Smuzhiyun	RET;
563*4882a593SmuzhiyunSYM_FUNC_END(cast5_ctr_16way)
564