1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0 */ 2*4882a593Smuzhiyun/* 3*4882a593Smuzhiyun * Hardware-accelerated CRC-32 variants for Linux on z Systems 4*4882a593Smuzhiyun * 5*4882a593Smuzhiyun * Use the z/Architecture Vector Extension Facility to accelerate the 6*4882a593Smuzhiyun * computing of bitreflected CRC-32 checksums for IEEE 802.3 Ethernet 7*4882a593Smuzhiyun * and Castagnoli. 8*4882a593Smuzhiyun * 9*4882a593Smuzhiyun * This CRC-32 implementation algorithm is bitreflected and processes 10*4882a593Smuzhiyun * the least-significant bit first (Little-Endian). 11*4882a593Smuzhiyun * 12*4882a593Smuzhiyun * Copyright IBM Corp. 2015 13*4882a593Smuzhiyun * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> 14*4882a593Smuzhiyun */ 15*4882a593Smuzhiyun 16*4882a593Smuzhiyun#include <linux/linkage.h> 17*4882a593Smuzhiyun#include <asm/nospec-insn.h> 18*4882a593Smuzhiyun#include <asm/vx-insn.h> 19*4882a593Smuzhiyun 20*4882a593Smuzhiyun/* Vector register range containing CRC-32 constants */ 21*4882a593Smuzhiyun#define CONST_PERM_LE2BE %v9 22*4882a593Smuzhiyun#define CONST_R2R1 %v10 23*4882a593Smuzhiyun#define CONST_R4R3 %v11 24*4882a593Smuzhiyun#define CONST_R5 %v12 25*4882a593Smuzhiyun#define CONST_RU_POLY %v13 26*4882a593Smuzhiyun#define CONST_CRC_POLY %v14 27*4882a593Smuzhiyun 28*4882a593Smuzhiyun.data 29*4882a593Smuzhiyun.align 8 30*4882a593Smuzhiyun 31*4882a593Smuzhiyun/* 32*4882a593Smuzhiyun * The CRC-32 constant block contains reduction constants to fold and 33*4882a593Smuzhiyun * process particular chunks of the input data stream in parallel. 34*4882a593Smuzhiyun * 35*4882a593Smuzhiyun * For the CRC-32 variants, the constants are precomputed according to 36*4882a593Smuzhiyun * these definitions: 37*4882a593Smuzhiyun * 38*4882a593Smuzhiyun * R1 = [(x4*128+32 mod P'(x) << 32)]' << 1 39*4882a593Smuzhiyun * R2 = [(x4*128-32 mod P'(x) << 32)]' << 1 40*4882a593Smuzhiyun * R3 = [(x128+32 mod P'(x) << 32)]' << 1 41*4882a593Smuzhiyun * R4 = [(x128-32 mod P'(x) << 32)]' << 1 42*4882a593Smuzhiyun * R5 = [(x64 mod P'(x) << 32)]' << 1 43*4882a593Smuzhiyun * R6 = [(x32 mod P'(x) << 32)]' << 1 44*4882a593Smuzhiyun * 45*4882a593Smuzhiyun * The bitreflected Barret reduction constant, u', is defined as 46*4882a593Smuzhiyun * the bit reversal of floor(x**64 / P(x)). 47*4882a593Smuzhiyun * 48*4882a593Smuzhiyun * where P(x) is the polynomial in the normal domain and the P'(x) is the 49*4882a593Smuzhiyun * polynomial in the reversed (bitreflected) domain. 50*4882a593Smuzhiyun * 51*4882a593Smuzhiyun * CRC-32 (IEEE 802.3 Ethernet, ...) polynomials: 52*4882a593Smuzhiyun * 53*4882a593Smuzhiyun * P(x) = 0x04C11DB7 54*4882a593Smuzhiyun * P'(x) = 0xEDB88320 55*4882a593Smuzhiyun * 56*4882a593Smuzhiyun * CRC-32C (Castagnoli) polynomials: 57*4882a593Smuzhiyun * 58*4882a593Smuzhiyun * P(x) = 0x1EDC6F41 59*4882a593Smuzhiyun * P'(x) = 0x82F63B78 60*4882a593Smuzhiyun */ 61*4882a593Smuzhiyun 62*4882a593Smuzhiyun.Lconstants_CRC_32_LE: 63*4882a593Smuzhiyun .octa 0x0F0E0D0C0B0A09080706050403020100 # BE->LE mask 64*4882a593Smuzhiyun .quad 0x1c6e41596, 0x154442bd4 # R2, R1 65*4882a593Smuzhiyun .quad 0x0ccaa009e, 0x1751997d0 # R4, R3 66*4882a593Smuzhiyun .octa 0x163cd6124 # R5 67*4882a593Smuzhiyun .octa 0x1F7011641 # u' 68*4882a593Smuzhiyun .octa 0x1DB710641 # P'(x) << 1 69*4882a593Smuzhiyun 70*4882a593Smuzhiyun.Lconstants_CRC_32C_LE: 71*4882a593Smuzhiyun .octa 0x0F0E0D0C0B0A09080706050403020100 # BE->LE mask 72*4882a593Smuzhiyun .quad 0x09e4addf8, 0x740eef02 # R2, R1 73*4882a593Smuzhiyun .quad 0x14cd00bd6, 0xf20c0dfe # R4, R3 74*4882a593Smuzhiyun .octa 0x0dd45aab8 # R5 75*4882a593Smuzhiyun .octa 0x0dea713f1 # u' 76*4882a593Smuzhiyun .octa 0x105ec76f0 # P'(x) << 1 77*4882a593Smuzhiyun 78*4882a593Smuzhiyun.previous 79*4882a593Smuzhiyun 80*4882a593Smuzhiyun GEN_BR_THUNK %r14 81*4882a593Smuzhiyun 82*4882a593Smuzhiyun.text 83*4882a593Smuzhiyun 84*4882a593Smuzhiyun/* 85*4882a593Smuzhiyun * The CRC-32 functions use these calling conventions: 86*4882a593Smuzhiyun * 87*4882a593Smuzhiyun * Parameters: 88*4882a593Smuzhiyun * 89*4882a593Smuzhiyun * %r2: Initial CRC value, typically ~0; and final CRC (return) value. 90*4882a593Smuzhiyun * %r3: Input buffer pointer, performance might be improved if the 91*4882a593Smuzhiyun * buffer is on a doubleword boundary. 92*4882a593Smuzhiyun * %r4: Length of the buffer, must be 64 bytes or greater. 93*4882a593Smuzhiyun * 94*4882a593Smuzhiyun * Register usage: 95*4882a593Smuzhiyun * 96*4882a593Smuzhiyun * %r5: CRC-32 constant pool base pointer. 97*4882a593Smuzhiyun * V0: Initial CRC value and intermediate constants and results. 98*4882a593Smuzhiyun * V1..V4: Data for CRC computation. 99*4882a593Smuzhiyun * V5..V8: Next data chunks that are fetched from the input buffer. 100*4882a593Smuzhiyun * V9: Constant for BE->LE conversion and shift operations 101*4882a593Smuzhiyun * 102*4882a593Smuzhiyun * V10..V14: CRC-32 constants. 103*4882a593Smuzhiyun */ 104*4882a593Smuzhiyun 105*4882a593SmuzhiyunENTRY(crc32_le_vgfm_16) 106*4882a593Smuzhiyun larl %r5,.Lconstants_CRC_32_LE 107*4882a593Smuzhiyun j crc32_le_vgfm_generic 108*4882a593SmuzhiyunENDPROC(crc32_le_vgfm_16) 109*4882a593Smuzhiyun 110*4882a593SmuzhiyunENTRY(crc32c_le_vgfm_16) 111*4882a593Smuzhiyun larl %r5,.Lconstants_CRC_32C_LE 112*4882a593Smuzhiyun j crc32_le_vgfm_generic 113*4882a593SmuzhiyunENDPROC(crc32c_le_vgfm_16) 114*4882a593Smuzhiyun 115*4882a593SmuzhiyunENTRY(crc32_le_vgfm_generic) 116*4882a593Smuzhiyun /* Load CRC-32 constants */ 117*4882a593Smuzhiyun VLM CONST_PERM_LE2BE,CONST_CRC_POLY,0,%r5 118*4882a593Smuzhiyun 119*4882a593Smuzhiyun /* 120*4882a593Smuzhiyun * Load the initial CRC value. 121*4882a593Smuzhiyun * 122*4882a593Smuzhiyun * The CRC value is loaded into the rightmost word of the 123*4882a593Smuzhiyun * vector register and is later XORed with the LSB portion 124*4882a593Smuzhiyun * of the loaded input data. 125*4882a593Smuzhiyun */ 126*4882a593Smuzhiyun VZERO %v0 /* Clear V0 */ 127*4882a593Smuzhiyun VLVGF %v0,%r2,3 /* Load CRC into rightmost word */ 128*4882a593Smuzhiyun 129*4882a593Smuzhiyun /* Load a 64-byte data chunk and XOR with CRC */ 130*4882a593Smuzhiyun VLM %v1,%v4,0,%r3 /* 64-bytes into V1..V4 */ 131*4882a593Smuzhiyun VPERM %v1,%v1,%v1,CONST_PERM_LE2BE 132*4882a593Smuzhiyun VPERM %v2,%v2,%v2,CONST_PERM_LE2BE 133*4882a593Smuzhiyun VPERM %v3,%v3,%v3,CONST_PERM_LE2BE 134*4882a593Smuzhiyun VPERM %v4,%v4,%v4,CONST_PERM_LE2BE 135*4882a593Smuzhiyun 136*4882a593Smuzhiyun VX %v1,%v0,%v1 /* V1 ^= CRC */ 137*4882a593Smuzhiyun aghi %r3,64 /* BUF = BUF + 64 */ 138*4882a593Smuzhiyun aghi %r4,-64 /* LEN = LEN - 64 */ 139*4882a593Smuzhiyun 140*4882a593Smuzhiyun cghi %r4,64 141*4882a593Smuzhiyun jl .Lless_than_64bytes 142*4882a593Smuzhiyun 143*4882a593Smuzhiyun.Lfold_64bytes_loop: 144*4882a593Smuzhiyun /* Load the next 64-byte data chunk into V5 to V8 */ 145*4882a593Smuzhiyun VLM %v5,%v8,0,%r3 146*4882a593Smuzhiyun VPERM %v5,%v5,%v5,CONST_PERM_LE2BE 147*4882a593Smuzhiyun VPERM %v6,%v6,%v6,CONST_PERM_LE2BE 148*4882a593Smuzhiyun VPERM %v7,%v7,%v7,CONST_PERM_LE2BE 149*4882a593Smuzhiyun VPERM %v8,%v8,%v8,CONST_PERM_LE2BE 150*4882a593Smuzhiyun 151*4882a593Smuzhiyun /* 152*4882a593Smuzhiyun * Perform a GF(2) multiplication of the doublewords in V1 with 153*4882a593Smuzhiyun * the R1 and R2 reduction constants in V0. The intermediate result 154*4882a593Smuzhiyun * is then folded (accumulated) with the next data chunk in V5 and 155*4882a593Smuzhiyun * stored in V1. Repeat this step for the register contents 156*4882a593Smuzhiyun * in V2, V3, and V4 respectively. 157*4882a593Smuzhiyun */ 158*4882a593Smuzhiyun VGFMAG %v1,CONST_R2R1,%v1,%v5 159*4882a593Smuzhiyun VGFMAG %v2,CONST_R2R1,%v2,%v6 160*4882a593Smuzhiyun VGFMAG %v3,CONST_R2R1,%v3,%v7 161*4882a593Smuzhiyun VGFMAG %v4,CONST_R2R1,%v4,%v8 162*4882a593Smuzhiyun 163*4882a593Smuzhiyun aghi %r3,64 /* BUF = BUF + 64 */ 164*4882a593Smuzhiyun aghi %r4,-64 /* LEN = LEN - 64 */ 165*4882a593Smuzhiyun 166*4882a593Smuzhiyun cghi %r4,64 167*4882a593Smuzhiyun jnl .Lfold_64bytes_loop 168*4882a593Smuzhiyun 169*4882a593Smuzhiyun.Lless_than_64bytes: 170*4882a593Smuzhiyun /* 171*4882a593Smuzhiyun * Fold V1 to V4 into a single 128-bit value in V1. Multiply V1 with R3 172*4882a593Smuzhiyun * and R4 and accumulating the next 128-bit chunk until a single 128-bit 173*4882a593Smuzhiyun * value remains. 174*4882a593Smuzhiyun */ 175*4882a593Smuzhiyun VGFMAG %v1,CONST_R4R3,%v1,%v2 176*4882a593Smuzhiyun VGFMAG %v1,CONST_R4R3,%v1,%v3 177*4882a593Smuzhiyun VGFMAG %v1,CONST_R4R3,%v1,%v4 178*4882a593Smuzhiyun 179*4882a593Smuzhiyun cghi %r4,16 180*4882a593Smuzhiyun jl .Lfinal_fold 181*4882a593Smuzhiyun 182*4882a593Smuzhiyun.Lfold_16bytes_loop: 183*4882a593Smuzhiyun 184*4882a593Smuzhiyun VL %v2,0,,%r3 /* Load next data chunk */ 185*4882a593Smuzhiyun VPERM %v2,%v2,%v2,CONST_PERM_LE2BE 186*4882a593Smuzhiyun VGFMAG %v1,CONST_R4R3,%v1,%v2 /* Fold next data chunk */ 187*4882a593Smuzhiyun 188*4882a593Smuzhiyun aghi %r3,16 189*4882a593Smuzhiyun aghi %r4,-16 190*4882a593Smuzhiyun 191*4882a593Smuzhiyun cghi %r4,16 192*4882a593Smuzhiyun jnl .Lfold_16bytes_loop 193*4882a593Smuzhiyun 194*4882a593Smuzhiyun.Lfinal_fold: 195*4882a593Smuzhiyun /* 196*4882a593Smuzhiyun * Set up a vector register for byte shifts. The shift value must 197*4882a593Smuzhiyun * be loaded in bits 1-4 in byte element 7 of a vector register. 198*4882a593Smuzhiyun * Shift by 8 bytes: 0x40 199*4882a593Smuzhiyun * Shift by 4 bytes: 0x20 200*4882a593Smuzhiyun */ 201*4882a593Smuzhiyun VLEIB %v9,0x40,7 202*4882a593Smuzhiyun 203*4882a593Smuzhiyun /* 204*4882a593Smuzhiyun * Prepare V0 for the next GF(2) multiplication: shift V0 by 8 bytes 205*4882a593Smuzhiyun * to move R4 into the rightmost doubleword and set the leftmost 206*4882a593Smuzhiyun * doubleword to 0x1. 207*4882a593Smuzhiyun */ 208*4882a593Smuzhiyun VSRLB %v0,CONST_R4R3,%v9 209*4882a593Smuzhiyun VLEIG %v0,1,0 210*4882a593Smuzhiyun 211*4882a593Smuzhiyun /* 212*4882a593Smuzhiyun * Compute GF(2) product of V1 and V0. The rightmost doubleword 213*4882a593Smuzhiyun * of V1 is multiplied with R4. The leftmost doubleword of V1 is 214*4882a593Smuzhiyun * multiplied by 0x1 and is then XORed with rightmost product. 215*4882a593Smuzhiyun * Implicitly, the intermediate leftmost product becomes padded 216*4882a593Smuzhiyun */ 217*4882a593Smuzhiyun VGFMG %v1,%v0,%v1 218*4882a593Smuzhiyun 219*4882a593Smuzhiyun /* 220*4882a593Smuzhiyun * Now do the final 32-bit fold by multiplying the rightmost word 221*4882a593Smuzhiyun * in V1 with R5 and XOR the result with the remaining bits in V1. 222*4882a593Smuzhiyun * 223*4882a593Smuzhiyun * To achieve this by a single VGFMAG, right shift V1 by a word 224*4882a593Smuzhiyun * and store the result in V2 which is then accumulated. Use the 225*4882a593Smuzhiyun * vector unpack instruction to load the rightmost half of the 226*4882a593Smuzhiyun * doubleword into the rightmost doubleword element of V1; the other 227*4882a593Smuzhiyun * half is loaded in the leftmost doubleword. 228*4882a593Smuzhiyun * The vector register with CONST_R5 contains the R5 constant in the 229*4882a593Smuzhiyun * rightmost doubleword and the leftmost doubleword is zero to ignore 230*4882a593Smuzhiyun * the leftmost product of V1. 231*4882a593Smuzhiyun */ 232*4882a593Smuzhiyun VLEIB %v9,0x20,7 /* Shift by words */ 233*4882a593Smuzhiyun VSRLB %v2,%v1,%v9 /* Store remaining bits in V2 */ 234*4882a593Smuzhiyun VUPLLF %v1,%v1 /* Split rightmost doubleword */ 235*4882a593Smuzhiyun VGFMAG %v1,CONST_R5,%v1,%v2 /* V1 = (V1 * R5) XOR V2 */ 236*4882a593Smuzhiyun 237*4882a593Smuzhiyun /* 238*4882a593Smuzhiyun * Apply a Barret reduction to compute the final 32-bit CRC value. 239*4882a593Smuzhiyun * 240*4882a593Smuzhiyun * The input values to the Barret reduction are the degree-63 polynomial 241*4882a593Smuzhiyun * in V1 (R(x)), degree-32 generator polynomial, and the reduction 242*4882a593Smuzhiyun * constant u. The Barret reduction result is the CRC value of R(x) mod 243*4882a593Smuzhiyun * P(x). 244*4882a593Smuzhiyun * 245*4882a593Smuzhiyun * The Barret reduction algorithm is defined as: 246*4882a593Smuzhiyun * 247*4882a593Smuzhiyun * 1. T1(x) = floor( R(x) / x^32 ) GF2MUL u 248*4882a593Smuzhiyun * 2. T2(x) = floor( T1(x) / x^32 ) GF2MUL P(x) 249*4882a593Smuzhiyun * 3. C(x) = R(x) XOR T2(x) mod x^32 250*4882a593Smuzhiyun * 251*4882a593Smuzhiyun * Note: The leftmost doubleword of vector register containing 252*4882a593Smuzhiyun * CONST_RU_POLY is zero and, thus, the intermediate GF(2) product 253*4882a593Smuzhiyun * is zero and does not contribute to the final result. 254*4882a593Smuzhiyun */ 255*4882a593Smuzhiyun 256*4882a593Smuzhiyun /* T1(x) = floor( R(x) / x^32 ) GF2MUL u */ 257*4882a593Smuzhiyun VUPLLF %v2,%v1 258*4882a593Smuzhiyun VGFMG %v2,CONST_RU_POLY,%v2 259*4882a593Smuzhiyun 260*4882a593Smuzhiyun /* 261*4882a593Smuzhiyun * Compute the GF(2) product of the CRC polynomial with T1(x) in 262*4882a593Smuzhiyun * V2 and XOR the intermediate result, T2(x), with the value in V1. 263*4882a593Smuzhiyun * The final result is stored in word element 2 of V2. 264*4882a593Smuzhiyun */ 265*4882a593Smuzhiyun VUPLLF %v2,%v2 266*4882a593Smuzhiyun VGFMAG %v2,CONST_CRC_POLY,%v2,%v1 267*4882a593Smuzhiyun 268*4882a593Smuzhiyun.Ldone: 269*4882a593Smuzhiyun VLGVF %r2,%v2,2 270*4882a593Smuzhiyun BR_EX %r14 271*4882a593SmuzhiyunENDPROC(crc32_le_vgfm_generic) 272*4882a593Smuzhiyun 273*4882a593Smuzhiyun.previous 274