xref: /OK3568_Linux_fs/kernel/arch/x86/crypto/crct10dif-pcl-asm_64.S (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun########################################################################
2*4882a593Smuzhiyun# Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions
3*4882a593Smuzhiyun#
4*4882a593Smuzhiyun# Copyright (c) 2013, Intel Corporation
5*4882a593Smuzhiyun#
6*4882a593Smuzhiyun# Authors:
7*4882a593Smuzhiyun#     Erdinc Ozturk <erdinc.ozturk@intel.com>
8*4882a593Smuzhiyun#     Vinodh Gopal <vinodh.gopal@intel.com>
9*4882a593Smuzhiyun#     James Guilford <james.guilford@intel.com>
10*4882a593Smuzhiyun#     Tim Chen <tim.c.chen@linux.intel.com>
11*4882a593Smuzhiyun#
12*4882a593Smuzhiyun# This software is available to you under a choice of one of two
13*4882a593Smuzhiyun# licenses.  You may choose to be licensed under the terms of the GNU
14*4882a593Smuzhiyun# General Public License (GPL) Version 2, available from the file
15*4882a593Smuzhiyun# COPYING in the main directory of this source tree, or the
16*4882a593Smuzhiyun# OpenIB.org BSD license below:
17*4882a593Smuzhiyun#
18*4882a593Smuzhiyun# Redistribution and use in source and binary forms, with or without
19*4882a593Smuzhiyun# modification, are permitted provided that the following conditions are
20*4882a593Smuzhiyun# met:
21*4882a593Smuzhiyun#
22*4882a593Smuzhiyun# * Redistributions of source code must retain the above copyright
23*4882a593Smuzhiyun#   notice, this list of conditions and the following disclaimer.
24*4882a593Smuzhiyun#
25*4882a593Smuzhiyun# * Redistributions in binary form must reproduce the above copyright
26*4882a593Smuzhiyun#   notice, this list of conditions and the following disclaimer in the
27*4882a593Smuzhiyun#   documentation and/or other materials provided with the
28*4882a593Smuzhiyun#   distribution.
29*4882a593Smuzhiyun#
30*4882a593Smuzhiyun# * Neither the name of the Intel Corporation nor the names of its
31*4882a593Smuzhiyun#   contributors may be used to endorse or promote products derived from
32*4882a593Smuzhiyun#   this software without specific prior written permission.
33*4882a593Smuzhiyun#
34*4882a593Smuzhiyun#
35*4882a593Smuzhiyun# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY
36*4882a593Smuzhiyun# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37*4882a593Smuzhiyun# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
38*4882a593Smuzhiyun# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR
39*4882a593Smuzhiyun# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
40*4882a593Smuzhiyun# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
41*4882a593Smuzhiyun# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
42*4882a593Smuzhiyun# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
43*4882a593Smuzhiyun# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
44*4882a593Smuzhiyun# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
45*4882a593Smuzhiyun# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46*4882a593Smuzhiyun#
47*4882a593Smuzhiyun#       Reference paper titled "Fast CRC Computation for Generic
48*4882a593Smuzhiyun#	Polynomials Using PCLMULQDQ Instruction"
49*4882a593Smuzhiyun#       URL: http://www.intel.com/content/dam/www/public/us/en/documents
50*4882a593Smuzhiyun#  /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
51*4882a593Smuzhiyun#
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun#include <linux/linkage.h>
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun.text
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun#define		init_crc	%edi
58*4882a593Smuzhiyun#define		buf		%rsi
59*4882a593Smuzhiyun#define		len		%rdx
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun#define		FOLD_CONSTS	%xmm10
62*4882a593Smuzhiyun#define		BSWAP_MASK	%xmm11
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun# Fold reg1, reg2 into the next 32 data bytes, storing the result back into
65*4882a593Smuzhiyun# reg1, reg2.
66*4882a593Smuzhiyun.macro	fold_32_bytes	offset, reg1, reg2
67*4882a593Smuzhiyun	movdqu	\offset(buf), %xmm9
68*4882a593Smuzhiyun	movdqu	\offset+16(buf), %xmm12
69*4882a593Smuzhiyun	pshufb	BSWAP_MASK, %xmm9
70*4882a593Smuzhiyun	pshufb	BSWAP_MASK, %xmm12
71*4882a593Smuzhiyun	movdqa	\reg1, %xmm8
72*4882a593Smuzhiyun	movdqa	\reg2, %xmm13
73*4882a593Smuzhiyun	pclmulqdq	$0x00, FOLD_CONSTS, \reg1
74*4882a593Smuzhiyun	pclmulqdq	$0x11, FOLD_CONSTS, %xmm8
75*4882a593Smuzhiyun	pclmulqdq	$0x00, FOLD_CONSTS, \reg2
76*4882a593Smuzhiyun	pclmulqdq	$0x11, FOLD_CONSTS, %xmm13
77*4882a593Smuzhiyun	pxor	%xmm9 , \reg1
78*4882a593Smuzhiyun	xorps	%xmm8 , \reg1
79*4882a593Smuzhiyun	pxor	%xmm12, \reg2
80*4882a593Smuzhiyun	xorps	%xmm13, \reg2
81*4882a593Smuzhiyun.endm
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun# Fold src_reg into dst_reg.
84*4882a593Smuzhiyun.macro	fold_16_bytes	src_reg, dst_reg
85*4882a593Smuzhiyun	movdqa	\src_reg, %xmm8
86*4882a593Smuzhiyun	pclmulqdq	$0x11, FOLD_CONSTS, \src_reg
87*4882a593Smuzhiyun	pclmulqdq	$0x00, FOLD_CONSTS, %xmm8
88*4882a593Smuzhiyun	pxor	%xmm8, \dst_reg
89*4882a593Smuzhiyun	xorps	\src_reg, \dst_reg
90*4882a593Smuzhiyun.endm
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun#
93*4882a593Smuzhiyun# u16 crc_t10dif_pcl(u16 init_crc, const *u8 buf, size_t len);
94*4882a593Smuzhiyun#
95*4882a593Smuzhiyun# Assumes len >= 16.
96*4882a593Smuzhiyun#
97*4882a593Smuzhiyun.align 16
98*4882a593SmuzhiyunSYM_FUNC_START(crc_t10dif_pcl)
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun	movdqa	.Lbswap_mask(%rip), BSWAP_MASK
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun	# For sizes less than 256 bytes, we can't fold 128 bytes at a time.
103*4882a593Smuzhiyun	cmp	$256, len
104*4882a593Smuzhiyun	jl	.Lless_than_256_bytes
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun	# Load the first 128 data bytes.  Byte swapping is necessary to make the
107*4882a593Smuzhiyun	# bit order match the polynomial coefficient order.
108*4882a593Smuzhiyun	movdqu	16*0(buf), %xmm0
109*4882a593Smuzhiyun	movdqu	16*1(buf), %xmm1
110*4882a593Smuzhiyun	movdqu	16*2(buf), %xmm2
111*4882a593Smuzhiyun	movdqu	16*3(buf), %xmm3
112*4882a593Smuzhiyun	movdqu	16*4(buf), %xmm4
113*4882a593Smuzhiyun	movdqu	16*5(buf), %xmm5
114*4882a593Smuzhiyun	movdqu	16*6(buf), %xmm6
115*4882a593Smuzhiyun	movdqu	16*7(buf), %xmm7
116*4882a593Smuzhiyun	add	$128, buf
117*4882a593Smuzhiyun	pshufb	BSWAP_MASK, %xmm0
118*4882a593Smuzhiyun	pshufb	BSWAP_MASK, %xmm1
119*4882a593Smuzhiyun	pshufb	BSWAP_MASK, %xmm2
120*4882a593Smuzhiyun	pshufb	BSWAP_MASK, %xmm3
121*4882a593Smuzhiyun	pshufb	BSWAP_MASK, %xmm4
122*4882a593Smuzhiyun	pshufb	BSWAP_MASK, %xmm5
123*4882a593Smuzhiyun	pshufb	BSWAP_MASK, %xmm6
124*4882a593Smuzhiyun	pshufb	BSWAP_MASK, %xmm7
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun	# XOR the first 16 data *bits* with the initial CRC value.
127*4882a593Smuzhiyun	pxor	%xmm8, %xmm8
128*4882a593Smuzhiyun	pinsrw	$7, init_crc, %xmm8
129*4882a593Smuzhiyun	pxor	%xmm8, %xmm0
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun	movdqa	.Lfold_across_128_bytes_consts(%rip), FOLD_CONSTS
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun	# Subtract 128 for the 128 data bytes just consumed.  Subtract another
134*4882a593Smuzhiyun	# 128 to simplify the termination condition of the following loop.
135*4882a593Smuzhiyun	sub	$256, len
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun	# While >= 128 data bytes remain (not counting xmm0-7), fold the 128
138*4882a593Smuzhiyun	# bytes xmm0-7 into them, storing the result back into xmm0-7.
139*4882a593Smuzhiyun.Lfold_128_bytes_loop:
140*4882a593Smuzhiyun	fold_32_bytes	0, %xmm0, %xmm1
141*4882a593Smuzhiyun	fold_32_bytes	32, %xmm2, %xmm3
142*4882a593Smuzhiyun	fold_32_bytes	64, %xmm4, %xmm5
143*4882a593Smuzhiyun	fold_32_bytes	96, %xmm6, %xmm7
144*4882a593Smuzhiyun	add	$128, buf
145*4882a593Smuzhiyun	sub	$128, len
146*4882a593Smuzhiyun	jge	.Lfold_128_bytes_loop
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun	# Now fold the 112 bytes in xmm0-xmm6 into the 16 bytes in xmm7.
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun	# Fold across 64 bytes.
151*4882a593Smuzhiyun	movdqa	.Lfold_across_64_bytes_consts(%rip), FOLD_CONSTS
152*4882a593Smuzhiyun	fold_16_bytes	%xmm0, %xmm4
153*4882a593Smuzhiyun	fold_16_bytes	%xmm1, %xmm5
154*4882a593Smuzhiyun	fold_16_bytes	%xmm2, %xmm6
155*4882a593Smuzhiyun	fold_16_bytes	%xmm3, %xmm7
156*4882a593Smuzhiyun	# Fold across 32 bytes.
157*4882a593Smuzhiyun	movdqa	.Lfold_across_32_bytes_consts(%rip), FOLD_CONSTS
158*4882a593Smuzhiyun	fold_16_bytes	%xmm4, %xmm6
159*4882a593Smuzhiyun	fold_16_bytes	%xmm5, %xmm7
160*4882a593Smuzhiyun	# Fold across 16 bytes.
161*4882a593Smuzhiyun	movdqa	.Lfold_across_16_bytes_consts(%rip), FOLD_CONSTS
162*4882a593Smuzhiyun	fold_16_bytes	%xmm6, %xmm7
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun	# Add 128 to get the correct number of data bytes remaining in 0...127
165*4882a593Smuzhiyun	# (not counting xmm7), following the previous extra subtraction by 128.
166*4882a593Smuzhiyun	# Then subtract 16 to simplify the termination condition of the
167*4882a593Smuzhiyun	# following loop.
168*4882a593Smuzhiyun	add	$128-16, len
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun	# While >= 16 data bytes remain (not counting xmm7), fold the 16 bytes
171*4882a593Smuzhiyun	# xmm7 into them, storing the result back into xmm7.
172*4882a593Smuzhiyun	jl	.Lfold_16_bytes_loop_done
173*4882a593Smuzhiyun.Lfold_16_bytes_loop:
174*4882a593Smuzhiyun	movdqa	%xmm7, %xmm8
175*4882a593Smuzhiyun	pclmulqdq	$0x11, FOLD_CONSTS, %xmm7
176*4882a593Smuzhiyun	pclmulqdq	$0x00, FOLD_CONSTS, %xmm8
177*4882a593Smuzhiyun	pxor	%xmm8, %xmm7
178*4882a593Smuzhiyun	movdqu	(buf), %xmm0
179*4882a593Smuzhiyun	pshufb	BSWAP_MASK, %xmm0
180*4882a593Smuzhiyun	pxor	%xmm0 , %xmm7
181*4882a593Smuzhiyun	add	$16, buf
182*4882a593Smuzhiyun	sub	$16, len
183*4882a593Smuzhiyun	jge	.Lfold_16_bytes_loop
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun.Lfold_16_bytes_loop_done:
186*4882a593Smuzhiyun	# Add 16 to get the correct number of data bytes remaining in 0...15
187*4882a593Smuzhiyun	# (not counting xmm7), following the previous extra subtraction by 16.
188*4882a593Smuzhiyun	add	$16, len
189*4882a593Smuzhiyun	je	.Lreduce_final_16_bytes
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun.Lhandle_partial_segment:
192*4882a593Smuzhiyun	# Reduce the last '16 + len' bytes where 1 <= len <= 15 and the first 16
193*4882a593Smuzhiyun	# bytes are in xmm7 and the rest are the remaining data in 'buf'.  To do
194*4882a593Smuzhiyun	# this without needing a fold constant for each possible 'len', redivide
195*4882a593Smuzhiyun	# the bytes into a first chunk of 'len' bytes and a second chunk of 16
196*4882a593Smuzhiyun	# bytes, then fold the first chunk into the second.
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun	movdqa	%xmm7, %xmm2
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun	# xmm1 = last 16 original data bytes
201*4882a593Smuzhiyun	movdqu	-16(buf, len), %xmm1
202*4882a593Smuzhiyun	pshufb	BSWAP_MASK, %xmm1
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun	# xmm2 = high order part of second chunk: xmm7 left-shifted by 'len' bytes.
205*4882a593Smuzhiyun	lea	.Lbyteshift_table+16(%rip), %rax
206*4882a593Smuzhiyun	sub	len, %rax
207*4882a593Smuzhiyun	movdqu	(%rax), %xmm0
208*4882a593Smuzhiyun	pshufb	%xmm0, %xmm2
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun	# xmm7 = first chunk: xmm7 right-shifted by '16-len' bytes.
211*4882a593Smuzhiyun	pxor	.Lmask1(%rip), %xmm0
212*4882a593Smuzhiyun	pshufb	%xmm0, %xmm7
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun	# xmm1 = second chunk: 'len' bytes from xmm1 (low-order bytes),
215*4882a593Smuzhiyun	# then '16-len' bytes from xmm2 (high-order bytes).
216*4882a593Smuzhiyun	pblendvb	%xmm2, %xmm1	#xmm0 is implicit
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun	# Fold the first chunk into the second chunk, storing the result in xmm7.
219*4882a593Smuzhiyun	movdqa	%xmm7, %xmm8
220*4882a593Smuzhiyun	pclmulqdq	$0x11, FOLD_CONSTS, %xmm7
221*4882a593Smuzhiyun	pclmulqdq	$0x00, FOLD_CONSTS, %xmm8
222*4882a593Smuzhiyun	pxor	%xmm8, %xmm7
223*4882a593Smuzhiyun	pxor	%xmm1, %xmm7
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun.Lreduce_final_16_bytes:
226*4882a593Smuzhiyun	# Reduce the 128-bit value M(x), stored in xmm7, to the final 16-bit CRC
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun	# Load 'x^48 * (x^48 mod G(x))' and 'x^48 * (x^80 mod G(x))'.
229*4882a593Smuzhiyun	movdqa	.Lfinal_fold_consts(%rip), FOLD_CONSTS
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun	# Fold the high 64 bits into the low 64 bits, while also multiplying by
232*4882a593Smuzhiyun	# x^64.  This produces a 128-bit value congruent to x^64 * M(x) and
233*4882a593Smuzhiyun	# whose low 48 bits are 0.
234*4882a593Smuzhiyun	movdqa	%xmm7, %xmm0
235*4882a593Smuzhiyun	pclmulqdq	$0x11, FOLD_CONSTS, %xmm7 # high bits * x^48 * (x^80 mod G(x))
236*4882a593Smuzhiyun	pslldq	$8, %xmm0
237*4882a593Smuzhiyun	pxor	%xmm0, %xmm7			  # + low bits * x^64
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun	# Fold the high 32 bits into the low 96 bits.  This produces a 96-bit
240*4882a593Smuzhiyun	# value congruent to x^64 * M(x) and whose low 48 bits are 0.
241*4882a593Smuzhiyun	movdqa	%xmm7, %xmm0
242*4882a593Smuzhiyun	pand	.Lmask2(%rip), %xmm0		  # zero high 32 bits
243*4882a593Smuzhiyun	psrldq	$12, %xmm7			  # extract high 32 bits
244*4882a593Smuzhiyun	pclmulqdq	$0x00, FOLD_CONSTS, %xmm7 # high 32 bits * x^48 * (x^48 mod G(x))
245*4882a593Smuzhiyun	pxor	%xmm0, %xmm7			  # + low bits
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun	# Load G(x) and floor(x^48 / G(x)).
248*4882a593Smuzhiyun	movdqa	.Lbarrett_reduction_consts(%rip), FOLD_CONSTS
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun	# Use Barrett reduction to compute the final CRC value.
251*4882a593Smuzhiyun	movdqa	%xmm7, %xmm0
252*4882a593Smuzhiyun	pclmulqdq	$0x11, FOLD_CONSTS, %xmm7 # high 32 bits * floor(x^48 / G(x))
253*4882a593Smuzhiyun	psrlq	$32, %xmm7			  # /= x^32
254*4882a593Smuzhiyun	pclmulqdq	$0x00, FOLD_CONSTS, %xmm7 # *= G(x)
255*4882a593Smuzhiyun	psrlq	$48, %xmm0
256*4882a593Smuzhiyun	pxor	%xmm7, %xmm0		     # + low 16 nonzero bits
257*4882a593Smuzhiyun	# Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of xmm0.
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun	pextrw	$0, %xmm0, %eax
260*4882a593Smuzhiyun	RET
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun.align 16
263*4882a593Smuzhiyun.Lless_than_256_bytes:
264*4882a593Smuzhiyun	# Checksumming a buffer of length 16...255 bytes
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun	# Load the first 16 data bytes.
267*4882a593Smuzhiyun	movdqu	(buf), %xmm7
268*4882a593Smuzhiyun	pshufb	BSWAP_MASK, %xmm7
269*4882a593Smuzhiyun	add	$16, buf
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun	# XOR the first 16 data *bits* with the initial CRC value.
272*4882a593Smuzhiyun	pxor	%xmm0, %xmm0
273*4882a593Smuzhiyun	pinsrw	$7, init_crc, %xmm0
274*4882a593Smuzhiyun	pxor	%xmm0, %xmm7
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun	movdqa	.Lfold_across_16_bytes_consts(%rip), FOLD_CONSTS
277*4882a593Smuzhiyun	cmp	$16, len
278*4882a593Smuzhiyun	je	.Lreduce_final_16_bytes		# len == 16
279*4882a593Smuzhiyun	sub	$32, len
280*4882a593Smuzhiyun	jge	.Lfold_16_bytes_loop		# 32 <= len <= 255
281*4882a593Smuzhiyun	add	$16, len
282*4882a593Smuzhiyun	jmp	.Lhandle_partial_segment	# 17 <= len <= 31
283*4882a593SmuzhiyunSYM_FUNC_END(crc_t10dif_pcl)
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun.section	.rodata, "a", @progbits
286*4882a593Smuzhiyun.align 16
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun# Fold constants precomputed from the polynomial 0x18bb7
289*4882a593Smuzhiyun# G(x) = x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + x^0
290*4882a593Smuzhiyun.Lfold_across_128_bytes_consts:
291*4882a593Smuzhiyun	.quad		0x0000000000006123	# x^(8*128)	mod G(x)
292*4882a593Smuzhiyun	.quad		0x0000000000002295	# x^(8*128+64)	mod G(x)
293*4882a593Smuzhiyun.Lfold_across_64_bytes_consts:
294*4882a593Smuzhiyun	.quad		0x0000000000001069	# x^(4*128)	mod G(x)
295*4882a593Smuzhiyun	.quad		0x000000000000dd31	# x^(4*128+64)	mod G(x)
296*4882a593Smuzhiyun.Lfold_across_32_bytes_consts:
297*4882a593Smuzhiyun	.quad		0x000000000000857d	# x^(2*128)	mod G(x)
298*4882a593Smuzhiyun	.quad		0x0000000000007acc	# x^(2*128+64)	mod G(x)
299*4882a593Smuzhiyun.Lfold_across_16_bytes_consts:
300*4882a593Smuzhiyun	.quad		0x000000000000a010	# x^(1*128)	mod G(x)
301*4882a593Smuzhiyun	.quad		0x0000000000001faa	# x^(1*128+64)	mod G(x)
302*4882a593Smuzhiyun.Lfinal_fold_consts:
303*4882a593Smuzhiyun	.quad		0x1368000000000000	# x^48 * (x^48 mod G(x))
304*4882a593Smuzhiyun	.quad		0x2d56000000000000	# x^48 * (x^80 mod G(x))
305*4882a593Smuzhiyun.Lbarrett_reduction_consts:
306*4882a593Smuzhiyun	.quad		0x0000000000018bb7	# G(x)
307*4882a593Smuzhiyun	.quad		0x00000001f65a57f8	# floor(x^48 / G(x))
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun.section	.rodata.cst16.mask1, "aM", @progbits, 16
310*4882a593Smuzhiyun.align 16
311*4882a593Smuzhiyun.Lmask1:
312*4882a593Smuzhiyun	.octa	0x80808080808080808080808080808080
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun.section	.rodata.cst16.mask2, "aM", @progbits, 16
315*4882a593Smuzhiyun.align 16
316*4882a593Smuzhiyun.Lmask2:
317*4882a593Smuzhiyun	.octa	0x00000000FFFFFFFFFFFFFFFFFFFFFFFF
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun.section	.rodata.cst16.bswap_mask, "aM", @progbits, 16
320*4882a593Smuzhiyun.align 16
321*4882a593Smuzhiyun.Lbswap_mask:
322*4882a593Smuzhiyun	.octa	0x000102030405060708090A0B0C0D0E0F
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun.section	.rodata.cst32.byteshift_table, "aM", @progbits, 32
325*4882a593Smuzhiyun.align 16
326*4882a593Smuzhiyun# For 1 <= len <= 15, the 16-byte vector beginning at &byteshift_table[16 - len]
327*4882a593Smuzhiyun# is the index vector to shift left by 'len' bytes, and is also {0x80, ...,
328*4882a593Smuzhiyun# 0x80} XOR the index vector to shift right by '16 - len' bytes.
329*4882a593Smuzhiyun.Lbyteshift_table:
330*4882a593Smuzhiyun	.byte		 0x0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87
331*4882a593Smuzhiyun	.byte		0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f
332*4882a593Smuzhiyun	.byte		 0x0,  0x1,  0x2,  0x3,  0x4,  0x5,  0x6,  0x7
333*4882a593Smuzhiyun	.byte		 0x8,  0x9,  0xa,  0xb,  0xc,  0xd,  0xe , 0x0
334