xref: /OK3568_Linux_fs/kernel/arch/arm/crypto/crct10dif-ce-core.S (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun//
2*4882a593Smuzhiyun// Accelerated CRC-T10DIF using ARM NEON and Crypto Extensions instructions
3*4882a593Smuzhiyun//
4*4882a593Smuzhiyun// Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
5*4882a593Smuzhiyun// Copyright (C) 2019 Google LLC <ebiggers@google.com>
6*4882a593Smuzhiyun//
7*4882a593Smuzhiyun// This program is free software; you can redistribute it and/or modify
8*4882a593Smuzhiyun// it under the terms of the GNU General Public License version 2 as
9*4882a593Smuzhiyun// published by the Free Software Foundation.
10*4882a593Smuzhiyun//
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun// Derived from the x86 version:
13*4882a593Smuzhiyun//
14*4882a593Smuzhiyun// Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions
15*4882a593Smuzhiyun//
16*4882a593Smuzhiyun// Copyright (c) 2013, Intel Corporation
17*4882a593Smuzhiyun//
18*4882a593Smuzhiyun// Authors:
19*4882a593Smuzhiyun//     Erdinc Ozturk <erdinc.ozturk@intel.com>
20*4882a593Smuzhiyun//     Vinodh Gopal <vinodh.gopal@intel.com>
21*4882a593Smuzhiyun//     James Guilford <james.guilford@intel.com>
22*4882a593Smuzhiyun//     Tim Chen <tim.c.chen@linux.intel.com>
23*4882a593Smuzhiyun//
24*4882a593Smuzhiyun// This software is available to you under a choice of one of two
25*4882a593Smuzhiyun// licenses.  You may choose to be licensed under the terms of the GNU
26*4882a593Smuzhiyun// General Public License (GPL) Version 2, available from the file
27*4882a593Smuzhiyun// COPYING in the main directory of this source tree, or the
28*4882a593Smuzhiyun// OpenIB.org BSD license below:
29*4882a593Smuzhiyun//
30*4882a593Smuzhiyun// Redistribution and use in source and binary forms, with or without
31*4882a593Smuzhiyun// modification, are permitted provided that the following conditions are
32*4882a593Smuzhiyun// met:
33*4882a593Smuzhiyun//
34*4882a593Smuzhiyun// * Redistributions of source code must retain the above copyright
35*4882a593Smuzhiyun//   notice, this list of conditions and the following disclaimer.
36*4882a593Smuzhiyun//
37*4882a593Smuzhiyun// * Redistributions in binary form must reproduce the above copyright
38*4882a593Smuzhiyun//   notice, this list of conditions and the following disclaimer in the
39*4882a593Smuzhiyun//   documentation and/or other materials provided with the
40*4882a593Smuzhiyun//   distribution.
41*4882a593Smuzhiyun//
42*4882a593Smuzhiyun// * Neither the name of the Intel Corporation nor the names of its
43*4882a593Smuzhiyun//   contributors may be used to endorse or promote products derived from
44*4882a593Smuzhiyun//   this software without specific prior written permission.
45*4882a593Smuzhiyun//
46*4882a593Smuzhiyun//
47*4882a593Smuzhiyun// THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY
48*4882a593Smuzhiyun// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49*4882a593Smuzhiyun// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
50*4882a593Smuzhiyun// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR
51*4882a593Smuzhiyun// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
52*4882a593Smuzhiyun// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
53*4882a593Smuzhiyun// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
54*4882a593Smuzhiyun// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
55*4882a593Smuzhiyun// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
56*4882a593Smuzhiyun// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
57*4882a593Smuzhiyun// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58*4882a593Smuzhiyun//
59*4882a593Smuzhiyun//       Reference paper titled "Fast CRC Computation for Generic
60*4882a593Smuzhiyun//	Polynomials Using PCLMULQDQ Instruction"
61*4882a593Smuzhiyun//       URL: http://www.intel.com/content/dam/www/public/us/en/documents
62*4882a593Smuzhiyun//  /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
63*4882a593Smuzhiyun//
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun#include <linux/linkage.h>
66*4882a593Smuzhiyun#include <asm/assembler.h>
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun#ifdef CONFIG_CPU_ENDIAN_BE8
69*4882a593Smuzhiyun#define CPU_LE(code...)
70*4882a593Smuzhiyun#else
71*4882a593Smuzhiyun#define CPU_LE(code...)		code
72*4882a593Smuzhiyun#endif
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun	.text
75*4882a593Smuzhiyun	.arch		armv8-a
76*4882a593Smuzhiyun	.fpu		crypto-neon-fp-armv8
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun	init_crc	.req	r0
79*4882a593Smuzhiyun	buf		.req	r1
80*4882a593Smuzhiyun	len		.req	r2
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun	fold_consts_ptr	.req	ip
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun	q0l		.req	d0
85*4882a593Smuzhiyun	q0h		.req	d1
86*4882a593Smuzhiyun	q1l		.req	d2
87*4882a593Smuzhiyun	q1h		.req	d3
88*4882a593Smuzhiyun	q2l		.req	d4
89*4882a593Smuzhiyun	q2h		.req	d5
90*4882a593Smuzhiyun	q3l		.req	d6
91*4882a593Smuzhiyun	q3h		.req	d7
92*4882a593Smuzhiyun	q4l		.req	d8
93*4882a593Smuzhiyun	q4h		.req	d9
94*4882a593Smuzhiyun	q5l		.req	d10
95*4882a593Smuzhiyun	q5h		.req	d11
96*4882a593Smuzhiyun	q6l		.req	d12
97*4882a593Smuzhiyun	q6h		.req	d13
98*4882a593Smuzhiyun	q7l		.req	d14
99*4882a593Smuzhiyun	q7h		.req	d15
100*4882a593Smuzhiyun	q8l		.req	d16
101*4882a593Smuzhiyun	q8h		.req	d17
102*4882a593Smuzhiyun	q9l		.req	d18
103*4882a593Smuzhiyun	q9h		.req	d19
104*4882a593Smuzhiyun	q10l		.req	d20
105*4882a593Smuzhiyun	q10h		.req	d21
106*4882a593Smuzhiyun	q11l		.req	d22
107*4882a593Smuzhiyun	q11h		.req	d23
108*4882a593Smuzhiyun	q12l		.req	d24
109*4882a593Smuzhiyun	q12h		.req	d25
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun	FOLD_CONSTS	.req	q10
112*4882a593Smuzhiyun	FOLD_CONST_L	.req	q10l
113*4882a593Smuzhiyun	FOLD_CONST_H	.req	q10h
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun	// Fold reg1, reg2 into the next 32 data bytes, storing the result back
116*4882a593Smuzhiyun	// into reg1, reg2.
117*4882a593Smuzhiyun	.macro		fold_32_bytes, reg1, reg2
118*4882a593Smuzhiyun	vld1.64		{q11-q12}, [buf]!
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun	vmull.p64	q8, \reg1\()h, FOLD_CONST_H
121*4882a593Smuzhiyun	vmull.p64	\reg1, \reg1\()l, FOLD_CONST_L
122*4882a593Smuzhiyun	vmull.p64	q9, \reg2\()h, FOLD_CONST_H
123*4882a593Smuzhiyun	vmull.p64	\reg2, \reg2\()l, FOLD_CONST_L
124*4882a593Smuzhiyun
125*4882a593SmuzhiyunCPU_LE(	vrev64.8	q11, q11	)
126*4882a593SmuzhiyunCPU_LE(	vrev64.8	q12, q12	)
127*4882a593Smuzhiyun	vswp		q11l, q11h
128*4882a593Smuzhiyun	vswp		q12l, q12h
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun	veor.8		\reg1, \reg1, q8
131*4882a593Smuzhiyun	veor.8		\reg2, \reg2, q9
132*4882a593Smuzhiyun	veor.8		\reg1, \reg1, q11
133*4882a593Smuzhiyun	veor.8		\reg2, \reg2, q12
134*4882a593Smuzhiyun	.endm
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun	// Fold src_reg into dst_reg, optionally loading the next fold constants
137*4882a593Smuzhiyun	.macro		fold_16_bytes, src_reg, dst_reg, load_next_consts
138*4882a593Smuzhiyun	vmull.p64	q8, \src_reg\()l, FOLD_CONST_L
139*4882a593Smuzhiyun	vmull.p64	\src_reg, \src_reg\()h, FOLD_CONST_H
140*4882a593Smuzhiyun	.ifnb		\load_next_consts
141*4882a593Smuzhiyun	vld1.64		{FOLD_CONSTS}, [fold_consts_ptr, :128]!
142*4882a593Smuzhiyun	.endif
143*4882a593Smuzhiyun	veor.8		\dst_reg, \dst_reg, q8
144*4882a593Smuzhiyun	veor.8		\dst_reg, \dst_reg, \src_reg
145*4882a593Smuzhiyun	.endm
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun	.macro		__adrl, out, sym
148*4882a593Smuzhiyun	movw		\out, #:lower16:\sym
149*4882a593Smuzhiyun	movt		\out, #:upper16:\sym
150*4882a593Smuzhiyun	.endm
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun//
153*4882a593Smuzhiyun// u16 crc_t10dif_pmull(u16 init_crc, const u8 *buf, size_t len);
154*4882a593Smuzhiyun//
155*4882a593Smuzhiyun// Assumes len >= 16.
156*4882a593Smuzhiyun//
157*4882a593SmuzhiyunENTRY(crc_t10dif_pmull)
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun	// For sizes less than 256 bytes, we can't fold 128 bytes at a time.
160*4882a593Smuzhiyun	cmp		len, #256
161*4882a593Smuzhiyun	blt		.Lless_than_256_bytes
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun	__adrl		fold_consts_ptr, .Lfold_across_128_bytes_consts
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun	// Load the first 128 data bytes.  Byte swapping is necessary to make
166*4882a593Smuzhiyun	// the bit order match the polynomial coefficient order.
167*4882a593Smuzhiyun	vld1.64		{q0-q1}, [buf]!
168*4882a593Smuzhiyun	vld1.64		{q2-q3}, [buf]!
169*4882a593Smuzhiyun	vld1.64		{q4-q5}, [buf]!
170*4882a593Smuzhiyun	vld1.64		{q6-q7}, [buf]!
171*4882a593SmuzhiyunCPU_LE(	vrev64.8	q0, q0	)
172*4882a593SmuzhiyunCPU_LE(	vrev64.8	q1, q1	)
173*4882a593SmuzhiyunCPU_LE(	vrev64.8	q2, q2	)
174*4882a593SmuzhiyunCPU_LE(	vrev64.8	q3, q3	)
175*4882a593SmuzhiyunCPU_LE(	vrev64.8	q4, q4	)
176*4882a593SmuzhiyunCPU_LE(	vrev64.8	q5, q5	)
177*4882a593SmuzhiyunCPU_LE(	vrev64.8	q6, q6	)
178*4882a593SmuzhiyunCPU_LE(	vrev64.8	q7, q7	)
179*4882a593Smuzhiyun	vswp		q0l, q0h
180*4882a593Smuzhiyun	vswp		q1l, q1h
181*4882a593Smuzhiyun	vswp		q2l, q2h
182*4882a593Smuzhiyun	vswp		q3l, q3h
183*4882a593Smuzhiyun	vswp		q4l, q4h
184*4882a593Smuzhiyun	vswp		q5l, q5h
185*4882a593Smuzhiyun	vswp		q6l, q6h
186*4882a593Smuzhiyun	vswp		q7l, q7h
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun	// XOR the first 16 data *bits* with the initial CRC value.
189*4882a593Smuzhiyun	vmov.i8		q8h, #0
190*4882a593Smuzhiyun	vmov.u16	q8h[3], init_crc
191*4882a593Smuzhiyun	veor		q0h, q0h, q8h
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun	// Load the constants for folding across 128 bytes.
194*4882a593Smuzhiyun	vld1.64		{FOLD_CONSTS}, [fold_consts_ptr, :128]!
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun	// Subtract 128 for the 128 data bytes just consumed.  Subtract another
197*4882a593Smuzhiyun	// 128 to simplify the termination condition of the following loop.
198*4882a593Smuzhiyun	sub		len, len, #256
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun	// While >= 128 data bytes remain (not counting q0-q7), fold the 128
201*4882a593Smuzhiyun	// bytes q0-q7 into them, storing the result back into q0-q7.
202*4882a593Smuzhiyun.Lfold_128_bytes_loop:
203*4882a593Smuzhiyun	fold_32_bytes	q0, q1
204*4882a593Smuzhiyun	fold_32_bytes	q2, q3
205*4882a593Smuzhiyun	fold_32_bytes	q4, q5
206*4882a593Smuzhiyun	fold_32_bytes	q6, q7
207*4882a593Smuzhiyun	subs		len, len, #128
208*4882a593Smuzhiyun	bge		.Lfold_128_bytes_loop
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun	// Now fold the 112 bytes in q0-q6 into the 16 bytes in q7.
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun	// Fold across 64 bytes.
213*4882a593Smuzhiyun	vld1.64		{FOLD_CONSTS}, [fold_consts_ptr, :128]!
214*4882a593Smuzhiyun	fold_16_bytes	q0, q4
215*4882a593Smuzhiyun	fold_16_bytes	q1, q5
216*4882a593Smuzhiyun	fold_16_bytes	q2, q6
217*4882a593Smuzhiyun	fold_16_bytes	q3, q7, 1
218*4882a593Smuzhiyun	// Fold across 32 bytes.
219*4882a593Smuzhiyun	fold_16_bytes	q4, q6
220*4882a593Smuzhiyun	fold_16_bytes	q5, q7, 1
221*4882a593Smuzhiyun	// Fold across 16 bytes.
222*4882a593Smuzhiyun	fold_16_bytes	q6, q7
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun	// Add 128 to get the correct number of data bytes remaining in 0...127
225*4882a593Smuzhiyun	// (not counting q7), following the previous extra subtraction by 128.
226*4882a593Smuzhiyun	// Then subtract 16 to simplify the termination condition of the
227*4882a593Smuzhiyun	// following loop.
228*4882a593Smuzhiyun	adds		len, len, #(128-16)
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun	// While >= 16 data bytes remain (not counting q7), fold the 16 bytes q7
231*4882a593Smuzhiyun	// into them, storing the result back into q7.
232*4882a593Smuzhiyun	blt		.Lfold_16_bytes_loop_done
233*4882a593Smuzhiyun.Lfold_16_bytes_loop:
234*4882a593Smuzhiyun	vmull.p64	q8, q7l, FOLD_CONST_L
235*4882a593Smuzhiyun	vmull.p64	q7, q7h, FOLD_CONST_H
236*4882a593Smuzhiyun	veor.8		q7, q7, q8
237*4882a593Smuzhiyun	vld1.64		{q0}, [buf]!
238*4882a593SmuzhiyunCPU_LE(	vrev64.8	q0, q0	)
239*4882a593Smuzhiyun	vswp		q0l, q0h
240*4882a593Smuzhiyun	veor.8		q7, q7, q0
241*4882a593Smuzhiyun	subs		len, len, #16
242*4882a593Smuzhiyun	bge		.Lfold_16_bytes_loop
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun.Lfold_16_bytes_loop_done:
245*4882a593Smuzhiyun	// Add 16 to get the correct number of data bytes remaining in 0...15
246*4882a593Smuzhiyun	// (not counting q7), following the previous extra subtraction by 16.
247*4882a593Smuzhiyun	adds		len, len, #16
248*4882a593Smuzhiyun	beq		.Lreduce_final_16_bytes
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun.Lhandle_partial_segment:
251*4882a593Smuzhiyun	// Reduce the last '16 + len' bytes where 1 <= len <= 15 and the first
252*4882a593Smuzhiyun	// 16 bytes are in q7 and the rest are the remaining data in 'buf'.  To
253*4882a593Smuzhiyun	// do this without needing a fold constant for each possible 'len',
254*4882a593Smuzhiyun	// redivide the bytes into a first chunk of 'len' bytes and a second
255*4882a593Smuzhiyun	// chunk of 16 bytes, then fold the first chunk into the second.
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun	// q0 = last 16 original data bytes
258*4882a593Smuzhiyun	add		buf, buf, len
259*4882a593Smuzhiyun	sub		buf, buf, #16
260*4882a593Smuzhiyun	vld1.64		{q0}, [buf]
261*4882a593SmuzhiyunCPU_LE(	vrev64.8	q0, q0	)
262*4882a593Smuzhiyun	vswp		q0l, q0h
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun	// q1 = high order part of second chunk: q7 left-shifted by 'len' bytes.
265*4882a593Smuzhiyun	__adrl		r3, .Lbyteshift_table + 16
266*4882a593Smuzhiyun	sub		r3, r3, len
267*4882a593Smuzhiyun	vld1.8		{q2}, [r3]
268*4882a593Smuzhiyun	vtbl.8		q1l, {q7l-q7h}, q2l
269*4882a593Smuzhiyun	vtbl.8		q1h, {q7l-q7h}, q2h
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun	// q3 = first chunk: q7 right-shifted by '16-len' bytes.
272*4882a593Smuzhiyun	vmov.i8		q3, #0x80
273*4882a593Smuzhiyun	veor.8		q2, q2, q3
274*4882a593Smuzhiyun	vtbl.8		q3l, {q7l-q7h}, q2l
275*4882a593Smuzhiyun	vtbl.8		q3h, {q7l-q7h}, q2h
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun	// Convert to 8-bit masks: 'len' 0x00 bytes, then '16-len' 0xff bytes.
278*4882a593Smuzhiyun	vshr.s8		q2, q2, #7
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun	// q2 = second chunk: 'len' bytes from q0 (low-order bytes),
281*4882a593Smuzhiyun	// then '16-len' bytes from q1 (high-order bytes).
282*4882a593Smuzhiyun	vbsl.8		q2, q1, q0
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun	// Fold the first chunk into the second chunk, storing the result in q7.
285*4882a593Smuzhiyun	vmull.p64	q0, q3l, FOLD_CONST_L
286*4882a593Smuzhiyun	vmull.p64	q7, q3h, FOLD_CONST_H
287*4882a593Smuzhiyun	veor.8		q7, q7, q0
288*4882a593Smuzhiyun	veor.8		q7, q7, q2
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun.Lreduce_final_16_bytes:
291*4882a593Smuzhiyun	// Reduce the 128-bit value M(x), stored in q7, to the final 16-bit CRC.
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun	// Load 'x^48 * (x^48 mod G(x))' and 'x^48 * (x^80 mod G(x))'.
294*4882a593Smuzhiyun	vld1.64		{FOLD_CONSTS}, [fold_consts_ptr, :128]!
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun	// Fold the high 64 bits into the low 64 bits, while also multiplying by
297*4882a593Smuzhiyun	// x^64.  This produces a 128-bit value congruent to x^64 * M(x) and
298*4882a593Smuzhiyun	// whose low 48 bits are 0.
299*4882a593Smuzhiyun	vmull.p64	q0, q7h, FOLD_CONST_H	// high bits * x^48 * (x^80 mod G(x))
300*4882a593Smuzhiyun	veor.8		q0h, q0h, q7l		// + low bits * x^64
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun	// Fold the high 32 bits into the low 96 bits.  This produces a 96-bit
303*4882a593Smuzhiyun	// value congruent to x^64 * M(x) and whose low 48 bits are 0.
304*4882a593Smuzhiyun	vmov.i8		q1, #0
305*4882a593Smuzhiyun	vmov		s4, s3			// extract high 32 bits
306*4882a593Smuzhiyun	vmov		s3, s5			// zero high 32 bits
307*4882a593Smuzhiyun	vmull.p64	q1, q1l, FOLD_CONST_L	// high 32 bits * x^48 * (x^48 mod G(x))
308*4882a593Smuzhiyun	veor.8		q0, q0, q1		// + low bits
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun	// Load G(x) and floor(x^48 / G(x)).
311*4882a593Smuzhiyun	vld1.64		{FOLD_CONSTS}, [fold_consts_ptr, :128]
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun	// Use Barrett reduction to compute the final CRC value.
314*4882a593Smuzhiyun	vmull.p64	q1, q0h, FOLD_CONST_H	// high 32 bits * floor(x^48 / G(x))
315*4882a593Smuzhiyun	vshr.u64	q1l, q1l, #32		// /= x^32
316*4882a593Smuzhiyun	vmull.p64	q1, q1l, FOLD_CONST_L	// *= G(x)
317*4882a593Smuzhiyun	vshr.u64	q0l, q0l, #48
318*4882a593Smuzhiyun	veor.8		q0l, q0l, q1l		// + low 16 nonzero bits
319*4882a593Smuzhiyun	// Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of q0.
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun	vmov.u16	r0, q0l[0]
322*4882a593Smuzhiyun	bx		lr
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun.Lless_than_256_bytes:
325*4882a593Smuzhiyun	// Checksumming a buffer of length 16...255 bytes
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun	__adrl		fold_consts_ptr, .Lfold_across_16_bytes_consts
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun	// Load the first 16 data bytes.
330*4882a593Smuzhiyun	vld1.64		{q7}, [buf]!
331*4882a593SmuzhiyunCPU_LE(	vrev64.8	q7, q7	)
332*4882a593Smuzhiyun	vswp		q7l, q7h
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun	// XOR the first 16 data *bits* with the initial CRC value.
335*4882a593Smuzhiyun	vmov.i8		q0h, #0
336*4882a593Smuzhiyun	vmov.u16	q0h[3], init_crc
337*4882a593Smuzhiyun	veor.8		q7h, q7h, q0h
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun	// Load the fold-across-16-bytes constants.
340*4882a593Smuzhiyun	vld1.64		{FOLD_CONSTS}, [fold_consts_ptr, :128]!
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun	cmp		len, #16
343*4882a593Smuzhiyun	beq		.Lreduce_final_16_bytes		// len == 16
344*4882a593Smuzhiyun	subs		len, len, #32
345*4882a593Smuzhiyun	addlt		len, len, #16
346*4882a593Smuzhiyun	blt		.Lhandle_partial_segment	// 17 <= len <= 31
347*4882a593Smuzhiyun	b		.Lfold_16_bytes_loop		// 32 <= len <= 255
348*4882a593SmuzhiyunENDPROC(crc_t10dif_pmull)
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun	.section	".rodata", "a"
351*4882a593Smuzhiyun	.align		4
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun// Fold constants precomputed from the polynomial 0x18bb7
354*4882a593Smuzhiyun// G(x) = x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + x^0
355*4882a593Smuzhiyun.Lfold_across_128_bytes_consts:
356*4882a593Smuzhiyun	.quad		0x0000000000006123	// x^(8*128)	mod G(x)
357*4882a593Smuzhiyun	.quad		0x0000000000002295	// x^(8*128+64)	mod G(x)
358*4882a593Smuzhiyun// .Lfold_across_64_bytes_consts:
359*4882a593Smuzhiyun	.quad		0x0000000000001069	// x^(4*128)	mod G(x)
360*4882a593Smuzhiyun	.quad		0x000000000000dd31	// x^(4*128+64)	mod G(x)
361*4882a593Smuzhiyun// .Lfold_across_32_bytes_consts:
362*4882a593Smuzhiyun	.quad		0x000000000000857d	// x^(2*128)	mod G(x)
363*4882a593Smuzhiyun	.quad		0x0000000000007acc	// x^(2*128+64)	mod G(x)
364*4882a593Smuzhiyun.Lfold_across_16_bytes_consts:
365*4882a593Smuzhiyun	.quad		0x000000000000a010	// x^(1*128)	mod G(x)
366*4882a593Smuzhiyun	.quad		0x0000000000001faa	// x^(1*128+64)	mod G(x)
367*4882a593Smuzhiyun// .Lfinal_fold_consts:
368*4882a593Smuzhiyun	.quad		0x1368000000000000	// x^48 * (x^48 mod G(x))
369*4882a593Smuzhiyun	.quad		0x2d56000000000000	// x^48 * (x^80 mod G(x))
370*4882a593Smuzhiyun// .Lbarrett_reduction_consts:
371*4882a593Smuzhiyun	.quad		0x0000000000018bb7	// G(x)
372*4882a593Smuzhiyun	.quad		0x00000001f65a57f8	// floor(x^48 / G(x))
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun// For 1 <= len <= 15, the 16-byte vector beginning at &byteshift_table[16 -
375*4882a593Smuzhiyun// len] is the index vector to shift left by 'len' bytes, and is also {0x80,
376*4882a593Smuzhiyun// ..., 0x80} XOR the index vector to shift right by '16 - len' bytes.
377*4882a593Smuzhiyun.Lbyteshift_table:
378*4882a593Smuzhiyun	.byte		 0x0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87
379*4882a593Smuzhiyun	.byte		0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f
380*4882a593Smuzhiyun	.byte		 0x0,  0x1,  0x2,  0x3,  0x4,  0x5,  0x6,  0x7
381*4882a593Smuzhiyun	.byte		 0x8,  0x9,  0xa,  0xb,  0xc,  0xd,  0xe , 0x0
382