1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /* -*- linux-c -*- ------------------------------------------------------- *
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright 2002 H. Peter Anvin - All Rights Reserved
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * ----------------------------------------------------------------------- */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun /*
9*4882a593Smuzhiyun * raid6/sse1.c
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * SSE-1/MMXEXT implementation of RAID-6 syndrome functions
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * This is really an MMX implementation, but it requires SSE-1 or
14*4882a593Smuzhiyun * AMD MMXEXT for prefetch support and a few other features. The
15*4882a593Smuzhiyun * support for nontemporal memory accesses is enough to make this
16*4882a593Smuzhiyun * worthwhile as a separate implementation.
17*4882a593Smuzhiyun */
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #ifdef CONFIG_X86_32
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #include <linux/raid/pq.h>
22*4882a593Smuzhiyun #include "x86.h"
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun /* Defined in raid6/mmx.c */
25*4882a593Smuzhiyun extern const struct raid6_mmx_constants {
26*4882a593Smuzhiyun u64 x1d;
27*4882a593Smuzhiyun } raid6_mmx_constants;
28*4882a593Smuzhiyun
raid6_have_sse1_or_mmxext(void)29*4882a593Smuzhiyun static int raid6_have_sse1_or_mmxext(void)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun /* Not really boot_cpu but "all_cpus" */
32*4882a593Smuzhiyun return boot_cpu_has(X86_FEATURE_MMX) &&
33*4882a593Smuzhiyun (boot_cpu_has(X86_FEATURE_XMM) ||
34*4882a593Smuzhiyun boot_cpu_has(X86_FEATURE_MMXEXT));
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /*
38*4882a593Smuzhiyun * Plain SSE1 implementation
39*4882a593Smuzhiyun */
raid6_sse11_gen_syndrome(int disks,size_t bytes,void ** ptrs)40*4882a593Smuzhiyun static void raid6_sse11_gen_syndrome(int disks, size_t bytes, void **ptrs)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun u8 **dptr = (u8 **)ptrs;
43*4882a593Smuzhiyun u8 *p, *q;
44*4882a593Smuzhiyun int d, z, z0;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun z0 = disks - 3; /* Highest data disk */
47*4882a593Smuzhiyun p = dptr[z0+1]; /* XOR parity */
48*4882a593Smuzhiyun q = dptr[z0+2]; /* RS syndrome */
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun kernel_fpu_begin();
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d));
53*4882a593Smuzhiyun asm volatile("pxor %mm5,%mm5"); /* Zero temp */
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun for ( d = 0 ; d < bytes ; d += 8 ) {
56*4882a593Smuzhiyun asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
57*4882a593Smuzhiyun asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */
58*4882a593Smuzhiyun asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d]));
59*4882a593Smuzhiyun asm volatile("movq %mm2,%mm4"); /* Q[0] */
60*4882a593Smuzhiyun asm volatile("movq %0,%%mm6" : : "m" (dptr[z0-1][d]));
61*4882a593Smuzhiyun for ( z = z0-2 ; z >= 0 ; z-- ) {
62*4882a593Smuzhiyun asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
63*4882a593Smuzhiyun asm volatile("pcmpgtb %mm4,%mm5");
64*4882a593Smuzhiyun asm volatile("paddb %mm4,%mm4");
65*4882a593Smuzhiyun asm volatile("pand %mm0,%mm5");
66*4882a593Smuzhiyun asm volatile("pxor %mm5,%mm4");
67*4882a593Smuzhiyun asm volatile("pxor %mm5,%mm5");
68*4882a593Smuzhiyun asm volatile("pxor %mm6,%mm2");
69*4882a593Smuzhiyun asm volatile("pxor %mm6,%mm4");
70*4882a593Smuzhiyun asm volatile("movq %0,%%mm6" : : "m" (dptr[z][d]));
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun asm volatile("pcmpgtb %mm4,%mm5");
73*4882a593Smuzhiyun asm volatile("paddb %mm4,%mm4");
74*4882a593Smuzhiyun asm volatile("pand %mm0,%mm5");
75*4882a593Smuzhiyun asm volatile("pxor %mm5,%mm4");
76*4882a593Smuzhiyun asm volatile("pxor %mm5,%mm5");
77*4882a593Smuzhiyun asm volatile("pxor %mm6,%mm2");
78*4882a593Smuzhiyun asm volatile("pxor %mm6,%mm4");
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun asm volatile("movntq %%mm2,%0" : "=m" (p[d]));
81*4882a593Smuzhiyun asm volatile("movntq %%mm4,%0" : "=m" (q[d]));
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun asm volatile("sfence" : : : "memory");
85*4882a593Smuzhiyun kernel_fpu_end();
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun const struct raid6_calls raid6_sse1x1 = {
89*4882a593Smuzhiyun raid6_sse11_gen_syndrome,
90*4882a593Smuzhiyun NULL, /* XOR not yet implemented */
91*4882a593Smuzhiyun raid6_have_sse1_or_mmxext,
92*4882a593Smuzhiyun "sse1x1",
93*4882a593Smuzhiyun 1 /* Has cache hints */
94*4882a593Smuzhiyun };
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun /*
97*4882a593Smuzhiyun * Unrolled-by-2 SSE1 implementation
98*4882a593Smuzhiyun */
raid6_sse12_gen_syndrome(int disks,size_t bytes,void ** ptrs)99*4882a593Smuzhiyun static void raid6_sse12_gen_syndrome(int disks, size_t bytes, void **ptrs)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun u8 **dptr = (u8 **)ptrs;
102*4882a593Smuzhiyun u8 *p, *q;
103*4882a593Smuzhiyun int d, z, z0;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun z0 = disks - 3; /* Highest data disk */
106*4882a593Smuzhiyun p = dptr[z0+1]; /* XOR parity */
107*4882a593Smuzhiyun q = dptr[z0+2]; /* RS syndrome */
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun kernel_fpu_begin();
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d));
112*4882a593Smuzhiyun asm volatile("pxor %mm5,%mm5"); /* Zero temp */
113*4882a593Smuzhiyun asm volatile("pxor %mm7,%mm7"); /* Zero temp */
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun /* We uniformly assume a single prefetch covers at least 16 bytes */
116*4882a593Smuzhiyun for ( d = 0 ; d < bytes ; d += 16 ) {
117*4882a593Smuzhiyun asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
118*4882a593Smuzhiyun asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */
119*4882a593Smuzhiyun asm volatile("movq %0,%%mm3" : : "m" (dptr[z0][d+8])); /* P[1] */
120*4882a593Smuzhiyun asm volatile("movq %mm2,%mm4"); /* Q[0] */
121*4882a593Smuzhiyun asm volatile("movq %mm3,%mm6"); /* Q[1] */
122*4882a593Smuzhiyun for ( z = z0-1 ; z >= 0 ; z-- ) {
123*4882a593Smuzhiyun asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
124*4882a593Smuzhiyun asm volatile("pcmpgtb %mm4,%mm5");
125*4882a593Smuzhiyun asm volatile("pcmpgtb %mm6,%mm7");
126*4882a593Smuzhiyun asm volatile("paddb %mm4,%mm4");
127*4882a593Smuzhiyun asm volatile("paddb %mm6,%mm6");
128*4882a593Smuzhiyun asm volatile("pand %mm0,%mm5");
129*4882a593Smuzhiyun asm volatile("pand %mm0,%mm7");
130*4882a593Smuzhiyun asm volatile("pxor %mm5,%mm4");
131*4882a593Smuzhiyun asm volatile("pxor %mm7,%mm6");
132*4882a593Smuzhiyun asm volatile("movq %0,%%mm5" : : "m" (dptr[z][d]));
133*4882a593Smuzhiyun asm volatile("movq %0,%%mm7" : : "m" (dptr[z][d+8]));
134*4882a593Smuzhiyun asm volatile("pxor %mm5,%mm2");
135*4882a593Smuzhiyun asm volatile("pxor %mm7,%mm3");
136*4882a593Smuzhiyun asm volatile("pxor %mm5,%mm4");
137*4882a593Smuzhiyun asm volatile("pxor %mm7,%mm6");
138*4882a593Smuzhiyun asm volatile("pxor %mm5,%mm5");
139*4882a593Smuzhiyun asm volatile("pxor %mm7,%mm7");
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun asm volatile("movntq %%mm2,%0" : "=m" (p[d]));
142*4882a593Smuzhiyun asm volatile("movntq %%mm3,%0" : "=m" (p[d+8]));
143*4882a593Smuzhiyun asm volatile("movntq %%mm4,%0" : "=m" (q[d]));
144*4882a593Smuzhiyun asm volatile("movntq %%mm6,%0" : "=m" (q[d+8]));
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun asm volatile("sfence" : :: "memory");
148*4882a593Smuzhiyun kernel_fpu_end();
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun const struct raid6_calls raid6_sse1x2 = {
152*4882a593Smuzhiyun raid6_sse12_gen_syndrome,
153*4882a593Smuzhiyun NULL, /* XOR not yet implemented */
154*4882a593Smuzhiyun raid6_have_sse1_or_mmxext,
155*4882a593Smuzhiyun "sse1x2",
156*4882a593Smuzhiyun 1 /* Has cache hints */
157*4882a593Smuzhiyun };
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun #endif
160