1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun #ifndef _ASM_X86_XOR_H
3*4882a593Smuzhiyun #define _ASM_X86_XOR_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun /*
6*4882a593Smuzhiyun * Optimized RAID-5 checksumming functions for SSE.
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun /*
10*4882a593Smuzhiyun * Cache avoiding checksumming functions utilizing KNI instructions
11*4882a593Smuzhiyun * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
12*4882a593Smuzhiyun */
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun /*
15*4882a593Smuzhiyun * Based on
16*4882a593Smuzhiyun * High-speed RAID5 checksumming functions utilizing SSE instructions.
17*4882a593Smuzhiyun * Copyright (C) 1998 Ingo Molnar.
18*4882a593Smuzhiyun */
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun /*
21*4882a593Smuzhiyun * x86-64 changes / gcc fixes from Andi Kleen.
22*4882a593Smuzhiyun * Copyright 2002 Andi Kleen, SuSE Labs.
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * This hasn't been optimized for the hammer yet, but there are likely
25*4882a593Smuzhiyun * no advantages to be gotten from x86-64 here anyways.
26*4882a593Smuzhiyun */
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #include <asm/fpu/api.h>
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #ifdef CONFIG_X86_32
31*4882a593Smuzhiyun /* reduce register pressure */
32*4882a593Smuzhiyun # define XOR_CONSTANT_CONSTRAINT "i"
33*4882a593Smuzhiyun #else
34*4882a593Smuzhiyun # define XOR_CONSTANT_CONSTRAINT "re"
35*4882a593Smuzhiyun #endif
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun #define OFFS(x) "16*("#x")"
38*4882a593Smuzhiyun #define PF_OFFS(x) "256+16*("#x")"
39*4882a593Smuzhiyun #define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n"
40*4882a593Smuzhiyun #define LD(x, y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n"
41*4882a593Smuzhiyun #define ST(x, y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n"
42*4882a593Smuzhiyun #define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n"
43*4882a593Smuzhiyun #define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n"
44*4882a593Smuzhiyun #define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n"
45*4882a593Smuzhiyun #define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n"
46*4882a593Smuzhiyun #define XO1(x, y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n"
47*4882a593Smuzhiyun #define XO2(x, y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n"
48*4882a593Smuzhiyun #define XO3(x, y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n"
49*4882a593Smuzhiyun #define XO4(x, y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n"
50*4882a593Smuzhiyun #define NOP(x)
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun #define BLK64(pf, op, i) \
53*4882a593Smuzhiyun pf(i) \
54*4882a593Smuzhiyun op(i, 0) \
55*4882a593Smuzhiyun op(i + 1, 1) \
56*4882a593Smuzhiyun op(i + 2, 2) \
57*4882a593Smuzhiyun op(i + 3, 3)
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun static void
xor_sse_2(unsigned long bytes,unsigned long * p1,unsigned long * p2)60*4882a593Smuzhiyun xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun unsigned long lines = bytes >> 8;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun kernel_fpu_begin();
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun asm volatile(
67*4882a593Smuzhiyun #undef BLOCK
68*4882a593Smuzhiyun #define BLOCK(i) \
69*4882a593Smuzhiyun LD(i, 0) \
70*4882a593Smuzhiyun LD(i + 1, 1) \
71*4882a593Smuzhiyun PF1(i) \
72*4882a593Smuzhiyun PF1(i + 2) \
73*4882a593Smuzhiyun LD(i + 2, 2) \
74*4882a593Smuzhiyun LD(i + 3, 3) \
75*4882a593Smuzhiyun PF0(i + 4) \
76*4882a593Smuzhiyun PF0(i + 6) \
77*4882a593Smuzhiyun XO1(i, 0) \
78*4882a593Smuzhiyun XO1(i + 1, 1) \
79*4882a593Smuzhiyun XO1(i + 2, 2) \
80*4882a593Smuzhiyun XO1(i + 3, 3) \
81*4882a593Smuzhiyun ST(i, 0) \
82*4882a593Smuzhiyun ST(i + 1, 1) \
83*4882a593Smuzhiyun ST(i + 2, 2) \
84*4882a593Smuzhiyun ST(i + 3, 3) \
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun PF0(0)
88*4882a593Smuzhiyun PF0(2)
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun " .align 32 ;\n"
91*4882a593Smuzhiyun " 1: ;\n"
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun BLOCK(0)
94*4882a593Smuzhiyun BLOCK(4)
95*4882a593Smuzhiyun BLOCK(8)
96*4882a593Smuzhiyun BLOCK(12)
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun " add %[inc], %[p1] ;\n"
99*4882a593Smuzhiyun " add %[inc], %[p2] ;\n"
100*4882a593Smuzhiyun " dec %[cnt] ;\n"
101*4882a593Smuzhiyun " jnz 1b ;\n"
102*4882a593Smuzhiyun : [cnt] "+r" (lines),
103*4882a593Smuzhiyun [p1] "+r" (p1), [p2] "+r" (p2)
104*4882a593Smuzhiyun : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
105*4882a593Smuzhiyun : "memory");
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun kernel_fpu_end();
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun static void
xor_sse_2_pf64(unsigned long bytes,unsigned long * p1,unsigned long * p2)111*4882a593Smuzhiyun xor_sse_2_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun unsigned long lines = bytes >> 8;
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun kernel_fpu_begin();
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun asm volatile(
118*4882a593Smuzhiyun #undef BLOCK
119*4882a593Smuzhiyun #define BLOCK(i) \
120*4882a593Smuzhiyun BLK64(PF0, LD, i) \
121*4882a593Smuzhiyun BLK64(PF1, XO1, i) \
122*4882a593Smuzhiyun BLK64(NOP, ST, i) \
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun " .align 32 ;\n"
125*4882a593Smuzhiyun " 1: ;\n"
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun BLOCK(0)
128*4882a593Smuzhiyun BLOCK(4)
129*4882a593Smuzhiyun BLOCK(8)
130*4882a593Smuzhiyun BLOCK(12)
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun " add %[inc], %[p1] ;\n"
133*4882a593Smuzhiyun " add %[inc], %[p2] ;\n"
134*4882a593Smuzhiyun " dec %[cnt] ;\n"
135*4882a593Smuzhiyun " jnz 1b ;\n"
136*4882a593Smuzhiyun : [cnt] "+r" (lines),
137*4882a593Smuzhiyun [p1] "+r" (p1), [p2] "+r" (p2)
138*4882a593Smuzhiyun : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
139*4882a593Smuzhiyun : "memory");
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun kernel_fpu_end();
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun static void
xor_sse_3(unsigned long bytes,unsigned long * p1,unsigned long * p2,unsigned long * p3)145*4882a593Smuzhiyun xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
146*4882a593Smuzhiyun unsigned long *p3)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun unsigned long lines = bytes >> 8;
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun kernel_fpu_begin();
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun asm volatile(
153*4882a593Smuzhiyun #undef BLOCK
154*4882a593Smuzhiyun #define BLOCK(i) \
155*4882a593Smuzhiyun PF1(i) \
156*4882a593Smuzhiyun PF1(i + 2) \
157*4882a593Smuzhiyun LD(i, 0) \
158*4882a593Smuzhiyun LD(i + 1, 1) \
159*4882a593Smuzhiyun LD(i + 2, 2) \
160*4882a593Smuzhiyun LD(i + 3, 3) \
161*4882a593Smuzhiyun PF2(i) \
162*4882a593Smuzhiyun PF2(i + 2) \
163*4882a593Smuzhiyun PF0(i + 4) \
164*4882a593Smuzhiyun PF0(i + 6) \
165*4882a593Smuzhiyun XO1(i, 0) \
166*4882a593Smuzhiyun XO1(i + 1, 1) \
167*4882a593Smuzhiyun XO1(i + 2, 2) \
168*4882a593Smuzhiyun XO1(i + 3, 3) \
169*4882a593Smuzhiyun XO2(i, 0) \
170*4882a593Smuzhiyun XO2(i + 1, 1) \
171*4882a593Smuzhiyun XO2(i + 2, 2) \
172*4882a593Smuzhiyun XO2(i + 3, 3) \
173*4882a593Smuzhiyun ST(i, 0) \
174*4882a593Smuzhiyun ST(i + 1, 1) \
175*4882a593Smuzhiyun ST(i + 2, 2) \
176*4882a593Smuzhiyun ST(i + 3, 3) \
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun PF0(0)
180*4882a593Smuzhiyun PF0(2)
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun " .align 32 ;\n"
183*4882a593Smuzhiyun " 1: ;\n"
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun BLOCK(0)
186*4882a593Smuzhiyun BLOCK(4)
187*4882a593Smuzhiyun BLOCK(8)
188*4882a593Smuzhiyun BLOCK(12)
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun " add %[inc], %[p1] ;\n"
191*4882a593Smuzhiyun " add %[inc], %[p2] ;\n"
192*4882a593Smuzhiyun " add %[inc], %[p3] ;\n"
193*4882a593Smuzhiyun " dec %[cnt] ;\n"
194*4882a593Smuzhiyun " jnz 1b ;\n"
195*4882a593Smuzhiyun : [cnt] "+r" (lines),
196*4882a593Smuzhiyun [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
197*4882a593Smuzhiyun : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
198*4882a593Smuzhiyun : "memory");
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun kernel_fpu_end();
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun static void
xor_sse_3_pf64(unsigned long bytes,unsigned long * p1,unsigned long * p2,unsigned long * p3)204*4882a593Smuzhiyun xor_sse_3_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2,
205*4882a593Smuzhiyun unsigned long *p3)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun unsigned long lines = bytes >> 8;
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun kernel_fpu_begin();
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun asm volatile(
212*4882a593Smuzhiyun #undef BLOCK
213*4882a593Smuzhiyun #define BLOCK(i) \
214*4882a593Smuzhiyun BLK64(PF0, LD, i) \
215*4882a593Smuzhiyun BLK64(PF1, XO1, i) \
216*4882a593Smuzhiyun BLK64(PF2, XO2, i) \
217*4882a593Smuzhiyun BLK64(NOP, ST, i) \
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun " .align 32 ;\n"
220*4882a593Smuzhiyun " 1: ;\n"
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun BLOCK(0)
223*4882a593Smuzhiyun BLOCK(4)
224*4882a593Smuzhiyun BLOCK(8)
225*4882a593Smuzhiyun BLOCK(12)
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun " add %[inc], %[p1] ;\n"
228*4882a593Smuzhiyun " add %[inc], %[p2] ;\n"
229*4882a593Smuzhiyun " add %[inc], %[p3] ;\n"
230*4882a593Smuzhiyun " dec %[cnt] ;\n"
231*4882a593Smuzhiyun " jnz 1b ;\n"
232*4882a593Smuzhiyun : [cnt] "+r" (lines),
233*4882a593Smuzhiyun [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
234*4882a593Smuzhiyun : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
235*4882a593Smuzhiyun : "memory");
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun kernel_fpu_end();
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun static void
xor_sse_4(unsigned long bytes,unsigned long * p1,unsigned long * p2,unsigned long * p3,unsigned long * p4)241*4882a593Smuzhiyun xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
242*4882a593Smuzhiyun unsigned long *p3, unsigned long *p4)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun unsigned long lines = bytes >> 8;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun kernel_fpu_begin();
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun asm volatile(
249*4882a593Smuzhiyun #undef BLOCK
250*4882a593Smuzhiyun #define BLOCK(i) \
251*4882a593Smuzhiyun PF1(i) \
252*4882a593Smuzhiyun PF1(i + 2) \
253*4882a593Smuzhiyun LD(i, 0) \
254*4882a593Smuzhiyun LD(i + 1, 1) \
255*4882a593Smuzhiyun LD(i + 2, 2) \
256*4882a593Smuzhiyun LD(i + 3, 3) \
257*4882a593Smuzhiyun PF2(i) \
258*4882a593Smuzhiyun PF2(i + 2) \
259*4882a593Smuzhiyun XO1(i, 0) \
260*4882a593Smuzhiyun XO1(i + 1, 1) \
261*4882a593Smuzhiyun XO1(i + 2, 2) \
262*4882a593Smuzhiyun XO1(i + 3, 3) \
263*4882a593Smuzhiyun PF3(i) \
264*4882a593Smuzhiyun PF3(i + 2) \
265*4882a593Smuzhiyun PF0(i + 4) \
266*4882a593Smuzhiyun PF0(i + 6) \
267*4882a593Smuzhiyun XO2(i, 0) \
268*4882a593Smuzhiyun XO2(i + 1, 1) \
269*4882a593Smuzhiyun XO2(i + 2, 2) \
270*4882a593Smuzhiyun XO2(i + 3, 3) \
271*4882a593Smuzhiyun XO3(i, 0) \
272*4882a593Smuzhiyun XO3(i + 1, 1) \
273*4882a593Smuzhiyun XO3(i + 2, 2) \
274*4882a593Smuzhiyun XO3(i + 3, 3) \
275*4882a593Smuzhiyun ST(i, 0) \
276*4882a593Smuzhiyun ST(i + 1, 1) \
277*4882a593Smuzhiyun ST(i + 2, 2) \
278*4882a593Smuzhiyun ST(i + 3, 3) \
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun PF0(0)
282*4882a593Smuzhiyun PF0(2)
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun " .align 32 ;\n"
285*4882a593Smuzhiyun " 1: ;\n"
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun BLOCK(0)
288*4882a593Smuzhiyun BLOCK(4)
289*4882a593Smuzhiyun BLOCK(8)
290*4882a593Smuzhiyun BLOCK(12)
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun " add %[inc], %[p1] ;\n"
293*4882a593Smuzhiyun " add %[inc], %[p2] ;\n"
294*4882a593Smuzhiyun " add %[inc], %[p3] ;\n"
295*4882a593Smuzhiyun " add %[inc], %[p4] ;\n"
296*4882a593Smuzhiyun " dec %[cnt] ;\n"
297*4882a593Smuzhiyun " jnz 1b ;\n"
298*4882a593Smuzhiyun : [cnt] "+r" (lines), [p1] "+r" (p1),
299*4882a593Smuzhiyun [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
300*4882a593Smuzhiyun : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
301*4882a593Smuzhiyun : "memory");
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun kernel_fpu_end();
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun static void
xor_sse_4_pf64(unsigned long bytes,unsigned long * p1,unsigned long * p2,unsigned long * p3,unsigned long * p4)307*4882a593Smuzhiyun xor_sse_4_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2,
308*4882a593Smuzhiyun unsigned long *p3, unsigned long *p4)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun unsigned long lines = bytes >> 8;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun kernel_fpu_begin();
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun asm volatile(
315*4882a593Smuzhiyun #undef BLOCK
316*4882a593Smuzhiyun #define BLOCK(i) \
317*4882a593Smuzhiyun BLK64(PF0, LD, i) \
318*4882a593Smuzhiyun BLK64(PF1, XO1, i) \
319*4882a593Smuzhiyun BLK64(PF2, XO2, i) \
320*4882a593Smuzhiyun BLK64(PF3, XO3, i) \
321*4882a593Smuzhiyun BLK64(NOP, ST, i) \
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun " .align 32 ;\n"
324*4882a593Smuzhiyun " 1: ;\n"
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun BLOCK(0)
327*4882a593Smuzhiyun BLOCK(4)
328*4882a593Smuzhiyun BLOCK(8)
329*4882a593Smuzhiyun BLOCK(12)
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun " add %[inc], %[p1] ;\n"
332*4882a593Smuzhiyun " add %[inc], %[p2] ;\n"
333*4882a593Smuzhiyun " add %[inc], %[p3] ;\n"
334*4882a593Smuzhiyun " add %[inc], %[p4] ;\n"
335*4882a593Smuzhiyun " dec %[cnt] ;\n"
336*4882a593Smuzhiyun " jnz 1b ;\n"
337*4882a593Smuzhiyun : [cnt] "+r" (lines), [p1] "+r" (p1),
338*4882a593Smuzhiyun [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
339*4882a593Smuzhiyun : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
340*4882a593Smuzhiyun : "memory");
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun kernel_fpu_end();
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun static void
xor_sse_5(unsigned long bytes,unsigned long * p1,unsigned long * p2,unsigned long * p3,unsigned long * p4,unsigned long * p5)346*4882a593Smuzhiyun xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
347*4882a593Smuzhiyun unsigned long *p3, unsigned long *p4, unsigned long *p5)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun unsigned long lines = bytes >> 8;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun kernel_fpu_begin();
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun asm volatile(
354*4882a593Smuzhiyun #undef BLOCK
355*4882a593Smuzhiyun #define BLOCK(i) \
356*4882a593Smuzhiyun PF1(i) \
357*4882a593Smuzhiyun PF1(i + 2) \
358*4882a593Smuzhiyun LD(i, 0) \
359*4882a593Smuzhiyun LD(i + 1, 1) \
360*4882a593Smuzhiyun LD(i + 2, 2) \
361*4882a593Smuzhiyun LD(i + 3, 3) \
362*4882a593Smuzhiyun PF2(i) \
363*4882a593Smuzhiyun PF2(i + 2) \
364*4882a593Smuzhiyun XO1(i, 0) \
365*4882a593Smuzhiyun XO1(i + 1, 1) \
366*4882a593Smuzhiyun XO1(i + 2, 2) \
367*4882a593Smuzhiyun XO1(i + 3, 3) \
368*4882a593Smuzhiyun PF3(i) \
369*4882a593Smuzhiyun PF3(i + 2) \
370*4882a593Smuzhiyun XO2(i, 0) \
371*4882a593Smuzhiyun XO2(i + 1, 1) \
372*4882a593Smuzhiyun XO2(i + 2, 2) \
373*4882a593Smuzhiyun XO2(i + 3, 3) \
374*4882a593Smuzhiyun PF4(i) \
375*4882a593Smuzhiyun PF4(i + 2) \
376*4882a593Smuzhiyun PF0(i + 4) \
377*4882a593Smuzhiyun PF0(i + 6) \
378*4882a593Smuzhiyun XO3(i, 0) \
379*4882a593Smuzhiyun XO3(i + 1, 1) \
380*4882a593Smuzhiyun XO3(i + 2, 2) \
381*4882a593Smuzhiyun XO3(i + 3, 3) \
382*4882a593Smuzhiyun XO4(i, 0) \
383*4882a593Smuzhiyun XO4(i + 1, 1) \
384*4882a593Smuzhiyun XO4(i + 2, 2) \
385*4882a593Smuzhiyun XO4(i + 3, 3) \
386*4882a593Smuzhiyun ST(i, 0) \
387*4882a593Smuzhiyun ST(i + 1, 1) \
388*4882a593Smuzhiyun ST(i + 2, 2) \
389*4882a593Smuzhiyun ST(i + 3, 3) \
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun PF0(0)
393*4882a593Smuzhiyun PF0(2)
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun " .align 32 ;\n"
396*4882a593Smuzhiyun " 1: ;\n"
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun BLOCK(0)
399*4882a593Smuzhiyun BLOCK(4)
400*4882a593Smuzhiyun BLOCK(8)
401*4882a593Smuzhiyun BLOCK(12)
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun " add %[inc], %[p1] ;\n"
404*4882a593Smuzhiyun " add %[inc], %[p2] ;\n"
405*4882a593Smuzhiyun " add %[inc], %[p3] ;\n"
406*4882a593Smuzhiyun " add %[inc], %[p4] ;\n"
407*4882a593Smuzhiyun " add %[inc], %[p5] ;\n"
408*4882a593Smuzhiyun " dec %[cnt] ;\n"
409*4882a593Smuzhiyun " jnz 1b ;\n"
410*4882a593Smuzhiyun : [cnt] "+r" (lines), [p1] "+r" (p1), [p2] "+r" (p2),
411*4882a593Smuzhiyun [p3] "+r" (p3), [p4] "+r" (p4), [p5] "+r" (p5)
412*4882a593Smuzhiyun : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
413*4882a593Smuzhiyun : "memory");
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun kernel_fpu_end();
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun static void
xor_sse_5_pf64(unsigned long bytes,unsigned long * p1,unsigned long * p2,unsigned long * p3,unsigned long * p4,unsigned long * p5)419*4882a593Smuzhiyun xor_sse_5_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2,
420*4882a593Smuzhiyun unsigned long *p3, unsigned long *p4, unsigned long *p5)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun unsigned long lines = bytes >> 8;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun kernel_fpu_begin();
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun asm volatile(
427*4882a593Smuzhiyun #undef BLOCK
428*4882a593Smuzhiyun #define BLOCK(i) \
429*4882a593Smuzhiyun BLK64(PF0, LD, i) \
430*4882a593Smuzhiyun BLK64(PF1, XO1, i) \
431*4882a593Smuzhiyun BLK64(PF2, XO2, i) \
432*4882a593Smuzhiyun BLK64(PF3, XO3, i) \
433*4882a593Smuzhiyun BLK64(PF4, XO4, i) \
434*4882a593Smuzhiyun BLK64(NOP, ST, i) \
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun " .align 32 ;\n"
437*4882a593Smuzhiyun " 1: ;\n"
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun BLOCK(0)
440*4882a593Smuzhiyun BLOCK(4)
441*4882a593Smuzhiyun BLOCK(8)
442*4882a593Smuzhiyun BLOCK(12)
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun " add %[inc], %[p1] ;\n"
445*4882a593Smuzhiyun " add %[inc], %[p2] ;\n"
446*4882a593Smuzhiyun " add %[inc], %[p3] ;\n"
447*4882a593Smuzhiyun " add %[inc], %[p4] ;\n"
448*4882a593Smuzhiyun " add %[inc], %[p5] ;\n"
449*4882a593Smuzhiyun " dec %[cnt] ;\n"
450*4882a593Smuzhiyun " jnz 1b ;\n"
451*4882a593Smuzhiyun : [cnt] "+r" (lines), [p1] "+r" (p1), [p2] "+r" (p2),
452*4882a593Smuzhiyun [p3] "+r" (p3), [p4] "+r" (p4), [p5] "+r" (p5)
453*4882a593Smuzhiyun : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
454*4882a593Smuzhiyun : "memory");
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun kernel_fpu_end();
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun static struct xor_block_template xor_block_sse_pf64 = {
460*4882a593Smuzhiyun .name = "prefetch64-sse",
461*4882a593Smuzhiyun .do_2 = xor_sse_2_pf64,
462*4882a593Smuzhiyun .do_3 = xor_sse_3_pf64,
463*4882a593Smuzhiyun .do_4 = xor_sse_4_pf64,
464*4882a593Smuzhiyun .do_5 = xor_sse_5_pf64,
465*4882a593Smuzhiyun };
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun #undef LD
468*4882a593Smuzhiyun #undef XO1
469*4882a593Smuzhiyun #undef XO2
470*4882a593Smuzhiyun #undef XO3
471*4882a593Smuzhiyun #undef XO4
472*4882a593Smuzhiyun #undef ST
473*4882a593Smuzhiyun #undef NOP
474*4882a593Smuzhiyun #undef BLK64
475*4882a593Smuzhiyun #undef BLOCK
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun #undef XOR_CONSTANT_CONSTRAINT
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun #ifdef CONFIG_X86_32
480*4882a593Smuzhiyun # include <asm/xor_32.h>
481*4882a593Smuzhiyun #else
482*4882a593Smuzhiyun # include <asm/xor_64.h>
483*4882a593Smuzhiyun #endif
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun #define XOR_SELECT_TEMPLATE(FASTEST) \
486*4882a593Smuzhiyun AVX_SELECT(FASTEST)
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun #endif /* _ASM_X86_XOR_H */
489