xref: /OK3568_Linux_fs/kernel/arch/sparc/lib/checksum_64.S (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun/* checksum.S: Sparc V9 optimized checksum code.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun *  Copyright(C) 1995 Linus Torvalds
5*4882a593Smuzhiyun *  Copyright(C) 1995 Miguel de Icaza
6*4882a593Smuzhiyun *  Copyright(C) 1996, 2000 David S. Miller
7*4882a593Smuzhiyun *  Copyright(C) 1997 Jakub Jelinek
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * derived from:
10*4882a593Smuzhiyun *	Linux/Alpha checksum c-code
11*4882a593Smuzhiyun *      Linux/ix86 inline checksum assembly
12*4882a593Smuzhiyun *      RFC1071 Computing the Internet Checksum (esp. Jacobsons m68k code)
13*4882a593Smuzhiyun *	David Mosberger-Tang for optimized reference c-code
14*4882a593Smuzhiyun *	BSD4.4 portable checksum routine
15*4882a593Smuzhiyun */
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun#include <asm/export.h>
18*4882a593Smuzhiyun	.text
19*4882a593Smuzhiyun
20*4882a593Smuzhiyuncsum_partial_fix_alignment:
21*4882a593Smuzhiyun	/* We checked for zero length already, so there must be
22*4882a593Smuzhiyun	 * at least one byte.
23*4882a593Smuzhiyun	 */
24*4882a593Smuzhiyun	be,pt		%icc, 1f
25*4882a593Smuzhiyun	 nop
26*4882a593Smuzhiyun	ldub		[%o0 + 0x00], %o4
27*4882a593Smuzhiyun	add		%o0, 1, %o0
28*4882a593Smuzhiyun	sub		%o1, 1, %o1
29*4882a593Smuzhiyun1:	andcc		%o0, 0x2, %g0
30*4882a593Smuzhiyun	be,pn		%icc, csum_partial_post_align
31*4882a593Smuzhiyun	 cmp		%o1, 2
32*4882a593Smuzhiyun	blu,pn		%icc, csum_partial_end_cruft
33*4882a593Smuzhiyun	 nop
34*4882a593Smuzhiyun	lduh		[%o0 + 0x00], %o5
35*4882a593Smuzhiyun	add		%o0, 2, %o0
36*4882a593Smuzhiyun	sub		%o1, 2, %o1
37*4882a593Smuzhiyun	ba,pt		%xcc, csum_partial_post_align
38*4882a593Smuzhiyun	 add		%o5, %o4, %o4
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun	.align		32
41*4882a593Smuzhiyun	.globl		csum_partial
42*4882a593Smuzhiyun	.type		csum_partial,#function
43*4882a593Smuzhiyun	EXPORT_SYMBOL(csum_partial)
44*4882a593Smuzhiyuncsum_partial:		/* %o0=buff, %o1=len, %o2=sum */
45*4882a593Smuzhiyun	prefetch	[%o0 + 0x000], #n_reads
46*4882a593Smuzhiyun	clr		%o4
47*4882a593Smuzhiyun	prefetch	[%o0 + 0x040], #n_reads
48*4882a593Smuzhiyun	brz,pn		%o1, csum_partial_finish
49*4882a593Smuzhiyun	 andcc		%o0, 0x3, %g0
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun	/* We "remember" whether the lowest bit in the address
52*4882a593Smuzhiyun	 * was set in %g7.  Because if it is, we have to swap
53*4882a593Smuzhiyun	 * upper and lower 8 bit fields of the sum we calculate.
54*4882a593Smuzhiyun	*/
55*4882a593Smuzhiyun	bne,pn		%icc, csum_partial_fix_alignment
56*4882a593Smuzhiyun	 andcc		%o0, 0x1, %g7
57*4882a593Smuzhiyun
58*4882a593Smuzhiyuncsum_partial_post_align:
59*4882a593Smuzhiyun	prefetch	[%o0 + 0x080], #n_reads
60*4882a593Smuzhiyun	andncc		%o1, 0x3f, %o3
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun	prefetch	[%o0 + 0x0c0], #n_reads
63*4882a593Smuzhiyun	sub		%o1, %o3, %o1
64*4882a593Smuzhiyun	brz,pn		%o3, 2f
65*4882a593Smuzhiyun	 prefetch	[%o0 + 0x100], #n_reads
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun	/* So that we don't need to use the non-pairing
68*4882a593Smuzhiyun	 * add-with-carry instructions we accumulate 32-bit
69*4882a593Smuzhiyun	 * values into a 64-bit register.  At the end of the
70*4882a593Smuzhiyun	 * loop we fold it down to 32-bits and so on.
71*4882a593Smuzhiyun	 */
72*4882a593Smuzhiyun	prefetch	[%o0 + 0x140], #n_reads
73*4882a593Smuzhiyun1:	lduw		[%o0 + 0x00], %o5
74*4882a593Smuzhiyun	lduw		[%o0 + 0x04], %g1
75*4882a593Smuzhiyun	lduw		[%o0 + 0x08], %g2
76*4882a593Smuzhiyun	add		%o4, %o5, %o4
77*4882a593Smuzhiyun	lduw		[%o0 + 0x0c], %g3
78*4882a593Smuzhiyun	add		%o4, %g1, %o4
79*4882a593Smuzhiyun	lduw		[%o0 + 0x10], %o5
80*4882a593Smuzhiyun	add		%o4, %g2, %o4
81*4882a593Smuzhiyun	lduw		[%o0 + 0x14], %g1
82*4882a593Smuzhiyun	add		%o4, %g3, %o4
83*4882a593Smuzhiyun	lduw		[%o0 + 0x18], %g2
84*4882a593Smuzhiyun	add		%o4, %o5, %o4
85*4882a593Smuzhiyun	lduw		[%o0 + 0x1c], %g3
86*4882a593Smuzhiyun	add		%o4, %g1, %o4
87*4882a593Smuzhiyun	lduw		[%o0 + 0x20], %o5
88*4882a593Smuzhiyun	add		%o4, %g2, %o4
89*4882a593Smuzhiyun	lduw		[%o0 + 0x24], %g1
90*4882a593Smuzhiyun	add		%o4, %g3, %o4
91*4882a593Smuzhiyun	lduw		[%o0 + 0x28], %g2
92*4882a593Smuzhiyun	add		%o4, %o5, %o4
93*4882a593Smuzhiyun	lduw		[%o0 + 0x2c], %g3
94*4882a593Smuzhiyun	add		%o4, %g1, %o4
95*4882a593Smuzhiyun	lduw		[%o0 + 0x30], %o5
96*4882a593Smuzhiyun	add		%o4, %g2, %o4
97*4882a593Smuzhiyun	lduw		[%o0 + 0x34], %g1
98*4882a593Smuzhiyun	add		%o4, %g3, %o4
99*4882a593Smuzhiyun	lduw		[%o0 + 0x38], %g2
100*4882a593Smuzhiyun	add		%o4, %o5, %o4
101*4882a593Smuzhiyun	lduw		[%o0 + 0x3c], %g3
102*4882a593Smuzhiyun	add		%o4, %g1, %o4
103*4882a593Smuzhiyun	prefetch	[%o0 + 0x180], #n_reads
104*4882a593Smuzhiyun	add		%o4, %g2, %o4
105*4882a593Smuzhiyun	subcc		%o3, 0x40, %o3
106*4882a593Smuzhiyun	add		%o0, 0x40, %o0
107*4882a593Smuzhiyun	bne,pt		%icc, 1b
108*4882a593Smuzhiyun	 add		%o4, %g3, %o4
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun2:	and		%o1, 0x3c, %o3
111*4882a593Smuzhiyun	brz,pn		%o3, 2f
112*4882a593Smuzhiyun	 sub		%o1, %o3, %o1
113*4882a593Smuzhiyun1:	lduw		[%o0 + 0x00], %o5
114*4882a593Smuzhiyun	subcc		%o3, 0x4, %o3
115*4882a593Smuzhiyun	add		%o0, 0x4, %o0
116*4882a593Smuzhiyun	bne,pt		%icc, 1b
117*4882a593Smuzhiyun	 add		%o4, %o5, %o4
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun2:
120*4882a593Smuzhiyun	/* fold 64-->32 */
121*4882a593Smuzhiyun	srlx		%o4, 32, %o5
122*4882a593Smuzhiyun	srl		%o4, 0, %o4
123*4882a593Smuzhiyun	add		%o4, %o5, %o4
124*4882a593Smuzhiyun	srlx		%o4, 32, %o5
125*4882a593Smuzhiyun	srl		%o4, 0, %o4
126*4882a593Smuzhiyun	add		%o4, %o5, %o4
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun	/* fold 32-->16 */
129*4882a593Smuzhiyun	sethi		%hi(0xffff0000), %g1
130*4882a593Smuzhiyun	srl		%o4, 16, %o5
131*4882a593Smuzhiyun	andn		%o4, %g1, %g2
132*4882a593Smuzhiyun	add		%o5, %g2, %o4
133*4882a593Smuzhiyun	srl		%o4, 16, %o5
134*4882a593Smuzhiyun	andn		%o4, %g1, %g2
135*4882a593Smuzhiyun	add		%o5, %g2, %o4
136*4882a593Smuzhiyun
137*4882a593Smuzhiyuncsum_partial_end_cruft:
138*4882a593Smuzhiyun	/* %o4 has the 16-bit sum we have calculated so-far.  */
139*4882a593Smuzhiyun	cmp		%o1, 2
140*4882a593Smuzhiyun	blu,pt		%icc, 1f
141*4882a593Smuzhiyun	 nop
142*4882a593Smuzhiyun	lduh		[%o0 + 0x00], %o5
143*4882a593Smuzhiyun	sub		%o1, 2, %o1
144*4882a593Smuzhiyun	add		%o0, 2, %o0
145*4882a593Smuzhiyun	add		%o4, %o5, %o4
146*4882a593Smuzhiyun1:	brz,pt		%o1, 1f
147*4882a593Smuzhiyun	 nop
148*4882a593Smuzhiyun	ldub		[%o0 + 0x00], %o5
149*4882a593Smuzhiyun	sub		%o1, 1, %o1
150*4882a593Smuzhiyun	add		%o0, 1, %o0
151*4882a593Smuzhiyun	sllx		%o5, 8, %o5
152*4882a593Smuzhiyun	add		%o4, %o5, %o4
153*4882a593Smuzhiyun1:
154*4882a593Smuzhiyun	/* fold 32-->16 */
155*4882a593Smuzhiyun	sethi		%hi(0xffff0000), %g1
156*4882a593Smuzhiyun	srl		%o4, 16, %o5
157*4882a593Smuzhiyun	andn		%o4, %g1, %g2
158*4882a593Smuzhiyun	add		%o5, %g2, %o4
159*4882a593Smuzhiyun	srl		%o4, 16, %o5
160*4882a593Smuzhiyun	andn		%o4, %g1, %g2
161*4882a593Smuzhiyun	add		%o5, %g2, %o4
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun1:	brz,pt		%g7, 1f
164*4882a593Smuzhiyun	 nop
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun	/* We started with an odd byte, byte-swap the result.  */
167*4882a593Smuzhiyun	srl		%o4, 8, %o5
168*4882a593Smuzhiyun	and		%o4, 0xff, %g1
169*4882a593Smuzhiyun	sll		%g1, 8, %g1
170*4882a593Smuzhiyun	or		%o5, %g1, %o4
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun1:	addcc		%o2, %o4, %o2
173*4882a593Smuzhiyun	addc		%g0, %o2, %o2
174*4882a593Smuzhiyun
175*4882a593Smuzhiyuncsum_partial_finish:
176*4882a593Smuzhiyun	retl
177*4882a593Smuzhiyun	 srl		%o2, 0, %o0
178