xref: /OK3568_Linux_fs/kernel/arch/arm/crypto/crc32-ce-core.S (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun/*
2*4882a593Smuzhiyun * Accelerated CRC32(C) using ARM CRC, NEON and Crypto Extensions instructions
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify
7*4882a593Smuzhiyun * it under the terms of the GNU General Public License version 2 as
8*4882a593Smuzhiyun * published by the Free Software Foundation.
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun/* GPL HEADER START
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify
16*4882a593Smuzhiyun * it under the terms of the GNU General Public License version 2 only,
17*4882a593Smuzhiyun * as published by the Free Software Foundation.
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * This program is distributed in the hope that it will be useful, but
20*4882a593Smuzhiyun * WITHOUT ANY WARRANTY; without even the implied warranty of
21*4882a593Smuzhiyun * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
22*4882a593Smuzhiyun * General Public License version 2 for more details (a copy is included
23*4882a593Smuzhiyun * in the LICENSE file that accompanied this code).
24*4882a593Smuzhiyun *
25*4882a593Smuzhiyun * You should have received a copy of the GNU General Public License
26*4882a593Smuzhiyun * version 2 along with this program; If not, see http://www.gnu.org/licenses
27*4882a593Smuzhiyun *
28*4882a593Smuzhiyun * Please  visit http://www.xyratex.com/contact if you need additional
29*4882a593Smuzhiyun * information or have any questions.
30*4882a593Smuzhiyun *
31*4882a593Smuzhiyun * GPL HEADER END
32*4882a593Smuzhiyun */
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun/*
35*4882a593Smuzhiyun * Copyright 2012 Xyratex Technology Limited
36*4882a593Smuzhiyun *
37*4882a593Smuzhiyun * Using hardware provided PCLMULQDQ instruction to accelerate the CRC32
38*4882a593Smuzhiyun * calculation.
39*4882a593Smuzhiyun * CRC32 polynomial:0x04c11db7(BE)/0xEDB88320(LE)
40*4882a593Smuzhiyun * PCLMULQDQ is a new instruction in Intel SSE4.2, the reference can be found
41*4882a593Smuzhiyun * at:
42*4882a593Smuzhiyun * https://www.intel.com/products/processor/manuals/
43*4882a593Smuzhiyun * Intel(R) 64 and IA-32 Architectures Software Developer's Manual
44*4882a593Smuzhiyun * Volume 2B: Instruction Set Reference, N-Z
45*4882a593Smuzhiyun *
46*4882a593Smuzhiyun * Authors:   Gregory Prestas <Gregory_Prestas@us.xyratex.com>
47*4882a593Smuzhiyun *	      Alexander Boyko <Alexander_Boyko@xyratex.com>
48*4882a593Smuzhiyun */
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun#include <linux/linkage.h>
51*4882a593Smuzhiyun#include <asm/assembler.h>
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun	.text
54*4882a593Smuzhiyun	.align		6
55*4882a593Smuzhiyun	.arch		armv8-a
56*4882a593Smuzhiyun	.arch_extension	crc
57*4882a593Smuzhiyun	.fpu		crypto-neon-fp-armv8
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun.Lcrc32_constants:
60*4882a593Smuzhiyun	/*
61*4882a593Smuzhiyun	 * [x4*128+32 mod P(x) << 32)]'  << 1   = 0x154442bd4
62*4882a593Smuzhiyun	 * #define CONSTANT_R1  0x154442bd4LL
63*4882a593Smuzhiyun	 *
64*4882a593Smuzhiyun	 * [(x4*128-32 mod P(x) << 32)]' << 1   = 0x1c6e41596
65*4882a593Smuzhiyun	 * #define CONSTANT_R2  0x1c6e41596LL
66*4882a593Smuzhiyun	 */
67*4882a593Smuzhiyun	.quad		0x0000000154442bd4
68*4882a593Smuzhiyun	.quad		0x00000001c6e41596
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun	/*
71*4882a593Smuzhiyun	 * [(x128+32 mod P(x) << 32)]'   << 1   = 0x1751997d0
72*4882a593Smuzhiyun	 * #define CONSTANT_R3  0x1751997d0LL
73*4882a593Smuzhiyun	 *
74*4882a593Smuzhiyun	 * [(x128-32 mod P(x) << 32)]'   << 1   = 0x0ccaa009e
75*4882a593Smuzhiyun	 * #define CONSTANT_R4  0x0ccaa009eLL
76*4882a593Smuzhiyun	 */
77*4882a593Smuzhiyun	.quad		0x00000001751997d0
78*4882a593Smuzhiyun	.quad		0x00000000ccaa009e
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun	/*
81*4882a593Smuzhiyun	 * [(x64 mod P(x) << 32)]'       << 1   = 0x163cd6124
82*4882a593Smuzhiyun	 * #define CONSTANT_R5  0x163cd6124LL
83*4882a593Smuzhiyun	 */
84*4882a593Smuzhiyun	.quad		0x0000000163cd6124
85*4882a593Smuzhiyun	.quad		0x00000000FFFFFFFF
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun	/*
88*4882a593Smuzhiyun	 * #define CRCPOLY_TRUE_LE_FULL 0x1DB710641LL
89*4882a593Smuzhiyun	 *
90*4882a593Smuzhiyun	 * Barrett Reduction constant (u64`) = u` = (x**64 / P(x))`
91*4882a593Smuzhiyun	 *                                                      = 0x1F7011641LL
92*4882a593Smuzhiyun	 * #define CONSTANT_RU  0x1F7011641LL
93*4882a593Smuzhiyun	 */
94*4882a593Smuzhiyun	.quad		0x00000001DB710641
95*4882a593Smuzhiyun	.quad		0x00000001F7011641
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun.Lcrc32c_constants:
98*4882a593Smuzhiyun	.quad		0x00000000740eef02
99*4882a593Smuzhiyun	.quad		0x000000009e4addf8
100*4882a593Smuzhiyun	.quad		0x00000000f20c0dfe
101*4882a593Smuzhiyun	.quad		0x000000014cd00bd6
102*4882a593Smuzhiyun	.quad		0x00000000dd45aab8
103*4882a593Smuzhiyun	.quad		0x00000000FFFFFFFF
104*4882a593Smuzhiyun	.quad		0x0000000105ec76f0
105*4882a593Smuzhiyun	.quad		0x00000000dea713f1
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun	dCONSTANTl	.req	d0
108*4882a593Smuzhiyun	dCONSTANTh	.req	d1
109*4882a593Smuzhiyun	qCONSTANT	.req	q0
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun	BUF		.req	r0
112*4882a593Smuzhiyun	LEN		.req	r1
113*4882a593Smuzhiyun	CRC		.req	r2
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun	qzr		.req	q9
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun	/**
118*4882a593Smuzhiyun	 * Calculate crc32
119*4882a593Smuzhiyun	 * BUF - buffer
120*4882a593Smuzhiyun	 * LEN - sizeof buffer (multiple of 16 bytes), LEN should be > 63
121*4882a593Smuzhiyun	 * CRC - initial crc32
122*4882a593Smuzhiyun	 * return %eax crc32
123*4882a593Smuzhiyun	 * uint crc32_pmull_le(unsigned char const *buffer,
124*4882a593Smuzhiyun	 *                     size_t len, uint crc32)
125*4882a593Smuzhiyun	 */
126*4882a593SmuzhiyunENTRY(crc32_pmull_le)
127*4882a593Smuzhiyun	adr		r3, .Lcrc32_constants
128*4882a593Smuzhiyun	b		0f
129*4882a593Smuzhiyun
130*4882a593SmuzhiyunENTRY(crc32c_pmull_le)
131*4882a593Smuzhiyun	adr		r3, .Lcrc32c_constants
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun0:	bic		LEN, LEN, #15
134*4882a593Smuzhiyun	vld1.8		{q1-q2}, [BUF, :128]!
135*4882a593Smuzhiyun	vld1.8		{q3-q4}, [BUF, :128]!
136*4882a593Smuzhiyun	vmov.i8		qzr, #0
137*4882a593Smuzhiyun	vmov.i8		qCONSTANT, #0
138*4882a593Smuzhiyun	vmov.32		dCONSTANTl[0], CRC
139*4882a593Smuzhiyun	veor.8		d2, d2, dCONSTANTl
140*4882a593Smuzhiyun	sub		LEN, LEN, #0x40
141*4882a593Smuzhiyun	cmp		LEN, #0x40
142*4882a593Smuzhiyun	blt		less_64
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun	vld1.64		{qCONSTANT}, [r3]
145*4882a593Smuzhiyun
146*4882a593Smuzhiyunloop_64:		/* 64 bytes Full cache line folding */
147*4882a593Smuzhiyun	sub		LEN, LEN, #0x40
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun	vmull.p64	q5, d3, dCONSTANTh
150*4882a593Smuzhiyun	vmull.p64	q6, d5, dCONSTANTh
151*4882a593Smuzhiyun	vmull.p64	q7, d7, dCONSTANTh
152*4882a593Smuzhiyun	vmull.p64	q8, d9, dCONSTANTh
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun	vmull.p64	q1, d2, dCONSTANTl
155*4882a593Smuzhiyun	vmull.p64	q2, d4, dCONSTANTl
156*4882a593Smuzhiyun	vmull.p64	q3, d6, dCONSTANTl
157*4882a593Smuzhiyun	vmull.p64	q4, d8, dCONSTANTl
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun	veor.8		q1, q1, q5
160*4882a593Smuzhiyun	vld1.8		{q5}, [BUF, :128]!
161*4882a593Smuzhiyun	veor.8		q2, q2, q6
162*4882a593Smuzhiyun	vld1.8		{q6}, [BUF, :128]!
163*4882a593Smuzhiyun	veor.8		q3, q3, q7
164*4882a593Smuzhiyun	vld1.8		{q7}, [BUF, :128]!
165*4882a593Smuzhiyun	veor.8		q4, q4, q8
166*4882a593Smuzhiyun	vld1.8		{q8}, [BUF, :128]!
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun	veor.8		q1, q1, q5
169*4882a593Smuzhiyun	veor.8		q2, q2, q6
170*4882a593Smuzhiyun	veor.8		q3, q3, q7
171*4882a593Smuzhiyun	veor.8		q4, q4, q8
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun	cmp		LEN, #0x40
174*4882a593Smuzhiyun	bge		loop_64
175*4882a593Smuzhiyun
176*4882a593Smuzhiyunless_64:		/* Folding cache line into 128bit */
177*4882a593Smuzhiyun	vldr		dCONSTANTl, [r3, #16]
178*4882a593Smuzhiyun	vldr		dCONSTANTh, [r3, #24]
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun	vmull.p64	q5, d3, dCONSTANTh
181*4882a593Smuzhiyun	vmull.p64	q1, d2, dCONSTANTl
182*4882a593Smuzhiyun	veor.8		q1, q1, q5
183*4882a593Smuzhiyun	veor.8		q1, q1, q2
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun	vmull.p64	q5, d3, dCONSTANTh
186*4882a593Smuzhiyun	vmull.p64	q1, d2, dCONSTANTl
187*4882a593Smuzhiyun	veor.8		q1, q1, q5
188*4882a593Smuzhiyun	veor.8		q1, q1, q3
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun	vmull.p64	q5, d3, dCONSTANTh
191*4882a593Smuzhiyun	vmull.p64	q1, d2, dCONSTANTl
192*4882a593Smuzhiyun	veor.8		q1, q1, q5
193*4882a593Smuzhiyun	veor.8		q1, q1, q4
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun	teq		LEN, #0
196*4882a593Smuzhiyun	beq		fold_64
197*4882a593Smuzhiyun
198*4882a593Smuzhiyunloop_16:		/* Folding rest buffer into 128bit */
199*4882a593Smuzhiyun	subs		LEN, LEN, #0x10
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun	vld1.8		{q2}, [BUF, :128]!
202*4882a593Smuzhiyun	vmull.p64	q5, d3, dCONSTANTh
203*4882a593Smuzhiyun	vmull.p64	q1, d2, dCONSTANTl
204*4882a593Smuzhiyun	veor.8		q1, q1, q5
205*4882a593Smuzhiyun	veor.8		q1, q1, q2
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun	bne		loop_16
208*4882a593Smuzhiyun
209*4882a593Smuzhiyunfold_64:
210*4882a593Smuzhiyun	/* perform the last 64 bit fold, also adds 32 zeroes
211*4882a593Smuzhiyun	 * to the input stream */
212*4882a593Smuzhiyun	vmull.p64	q2, d2, dCONSTANTh
213*4882a593Smuzhiyun	vext.8		q1, q1, qzr, #8
214*4882a593Smuzhiyun	veor.8		q1, q1, q2
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun	/* final 32-bit fold */
217*4882a593Smuzhiyun	vldr		dCONSTANTl, [r3, #32]
218*4882a593Smuzhiyun	vldr		d6, [r3, #40]
219*4882a593Smuzhiyun	vmov.i8		d7, #0
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun	vext.8		q2, q1, qzr, #4
222*4882a593Smuzhiyun	vand.8		d2, d2, d6
223*4882a593Smuzhiyun	vmull.p64	q1, d2, dCONSTANTl
224*4882a593Smuzhiyun	veor.8		q1, q1, q2
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun	/* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */
227*4882a593Smuzhiyun	vldr		dCONSTANTl, [r3, #48]
228*4882a593Smuzhiyun	vldr		dCONSTANTh, [r3, #56]
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun	vand.8		q2, q1, q3
231*4882a593Smuzhiyun	vext.8		q2, qzr, q2, #8
232*4882a593Smuzhiyun	vmull.p64	q2, d5, dCONSTANTh
233*4882a593Smuzhiyun	vand.8		q2, q2, q3
234*4882a593Smuzhiyun	vmull.p64	q2, d4, dCONSTANTl
235*4882a593Smuzhiyun	veor.8		q1, q1, q2
236*4882a593Smuzhiyun	vmov		r0, s5
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun	bx		lr
239*4882a593SmuzhiyunENDPROC(crc32_pmull_le)
240*4882a593SmuzhiyunENDPROC(crc32c_pmull_le)
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun	.macro		__crc32, c
243*4882a593Smuzhiyun	subs		ip, r2, #8
244*4882a593Smuzhiyun	bmi		.Ltail\c
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun	tst		r1, #3
247*4882a593Smuzhiyun	bne		.Lunaligned\c
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun	teq		ip, #0
250*4882a593Smuzhiyun.Laligned8\c:
251*4882a593Smuzhiyun	ldrd		r2, r3, [r1], #8
252*4882a593SmuzhiyunARM_BE8(rev		r2, r2		)
253*4882a593SmuzhiyunARM_BE8(rev		r3, r3		)
254*4882a593Smuzhiyun	crc32\c\()w	r0, r0, r2
255*4882a593Smuzhiyun	crc32\c\()w	r0, r0, r3
256*4882a593Smuzhiyun	bxeq		lr
257*4882a593Smuzhiyun	subs		ip, ip, #8
258*4882a593Smuzhiyun	bpl		.Laligned8\c
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun.Ltail\c:
261*4882a593Smuzhiyun	tst		ip, #4
262*4882a593Smuzhiyun	beq		2f
263*4882a593Smuzhiyun	ldr		r3, [r1], #4
264*4882a593SmuzhiyunARM_BE8(rev		r3, r3		)
265*4882a593Smuzhiyun	crc32\c\()w	r0, r0, r3
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun2:	tst		ip, #2
268*4882a593Smuzhiyun	beq		1f
269*4882a593Smuzhiyun	ldrh		r3, [r1], #2
270*4882a593SmuzhiyunARM_BE8(rev16		r3, r3		)
271*4882a593Smuzhiyun	crc32\c\()h	r0, r0, r3
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun1:	tst		ip, #1
274*4882a593Smuzhiyun	bxeq		lr
275*4882a593Smuzhiyun	ldrb		r3, [r1]
276*4882a593Smuzhiyun	crc32\c\()b	r0, r0, r3
277*4882a593Smuzhiyun	bx		lr
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun.Lunaligned\c:
280*4882a593Smuzhiyun	tst		r1, #1
281*4882a593Smuzhiyun	beq		2f
282*4882a593Smuzhiyun	ldrb		r3, [r1], #1
283*4882a593Smuzhiyun	subs		r2, r2, #1
284*4882a593Smuzhiyun	crc32\c\()b	r0, r0, r3
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun	tst		r1, #2
287*4882a593Smuzhiyun	beq		0f
288*4882a593Smuzhiyun2:	ldrh		r3, [r1], #2
289*4882a593Smuzhiyun	subs		r2, r2, #2
290*4882a593SmuzhiyunARM_BE8(rev16		r3, r3		)
291*4882a593Smuzhiyun	crc32\c\()h	r0, r0, r3
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun0:	subs		ip, r2, #8
294*4882a593Smuzhiyun	bpl		.Laligned8\c
295*4882a593Smuzhiyun	b		.Ltail\c
296*4882a593Smuzhiyun	.endm
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun	.align		5
299*4882a593SmuzhiyunENTRY(crc32_armv8_le)
300*4882a593Smuzhiyun	__crc32
301*4882a593SmuzhiyunENDPROC(crc32_armv8_le)
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun	.align		5
304*4882a593SmuzhiyunENTRY(crc32c_armv8_le)
305*4882a593Smuzhiyun	__crc32		c
306*4882a593SmuzhiyunENDPROC(crc32c_armv8_le)
307