1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-only */ 2*4882a593Smuzhiyun/* 3*4882a593Smuzhiyun * sha2-ce-core.S - core SHA-224/SHA-256 transform using v8 Crypto Extensions 4*4882a593Smuzhiyun * 5*4882a593Smuzhiyun * Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org> 6*4882a593Smuzhiyun */ 7*4882a593Smuzhiyun 8*4882a593Smuzhiyun#include <linux/linkage.h> 9*4882a593Smuzhiyun#include <asm/assembler.h> 10*4882a593Smuzhiyun 11*4882a593Smuzhiyun .text 12*4882a593Smuzhiyun .arch armv8-a+crypto 13*4882a593Smuzhiyun 14*4882a593Smuzhiyun dga .req q20 15*4882a593Smuzhiyun dgav .req v20 16*4882a593Smuzhiyun dgb .req q21 17*4882a593Smuzhiyun dgbv .req v21 18*4882a593Smuzhiyun 19*4882a593Smuzhiyun t0 .req v22 20*4882a593Smuzhiyun t1 .req v23 21*4882a593Smuzhiyun 22*4882a593Smuzhiyun dg0q .req q24 23*4882a593Smuzhiyun dg0v .req v24 24*4882a593Smuzhiyun dg1q .req q25 25*4882a593Smuzhiyun dg1v .req v25 26*4882a593Smuzhiyun dg2q .req q26 27*4882a593Smuzhiyun dg2v .req v26 28*4882a593Smuzhiyun 29*4882a593Smuzhiyun .macro add_only, ev, rc, s0 30*4882a593Smuzhiyun mov dg2v.16b, dg0v.16b 31*4882a593Smuzhiyun .ifeq \ev 32*4882a593Smuzhiyun add t1.4s, v\s0\().4s, \rc\().4s 33*4882a593Smuzhiyun sha256h dg0q, dg1q, t0.4s 34*4882a593Smuzhiyun sha256h2 dg1q, dg2q, t0.4s 35*4882a593Smuzhiyun .else 36*4882a593Smuzhiyun .ifnb \s0 37*4882a593Smuzhiyun add t0.4s, v\s0\().4s, \rc\().4s 38*4882a593Smuzhiyun .endif 39*4882a593Smuzhiyun sha256h dg0q, dg1q, t1.4s 40*4882a593Smuzhiyun sha256h2 dg1q, dg2q, t1.4s 41*4882a593Smuzhiyun .endif 42*4882a593Smuzhiyun .endm 43*4882a593Smuzhiyun 44*4882a593Smuzhiyun .macro add_update, ev, rc, s0, s1, s2, s3 45*4882a593Smuzhiyun sha256su0 v\s0\().4s, v\s1\().4s 46*4882a593Smuzhiyun add_only \ev, \rc, \s1 47*4882a593Smuzhiyun sha256su1 v\s0\().4s, v\s2\().4s, v\s3\().4s 48*4882a593Smuzhiyun .endm 49*4882a593Smuzhiyun 50*4882a593Smuzhiyun /* 51*4882a593Smuzhiyun * The SHA-256 round constants 52*4882a593Smuzhiyun */ 53*4882a593Smuzhiyun .section ".rodata", "a" 54*4882a593Smuzhiyun .align 4 55*4882a593Smuzhiyun.Lsha2_rcon: 56*4882a593Smuzhiyun .word 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5 57*4882a593Smuzhiyun .word 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5 58*4882a593Smuzhiyun .word 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3 59*4882a593Smuzhiyun .word 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174 60*4882a593Smuzhiyun .word 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc 61*4882a593Smuzhiyun .word 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da 62*4882a593Smuzhiyun .word 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7 63*4882a593Smuzhiyun .word 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967 64*4882a593Smuzhiyun .word 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13 65*4882a593Smuzhiyun .word 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85 66*4882a593Smuzhiyun .word 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3 67*4882a593Smuzhiyun .word 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070 68*4882a593Smuzhiyun .word 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5 69*4882a593Smuzhiyun .word 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3 70*4882a593Smuzhiyun .word 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208 71*4882a593Smuzhiyun .word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 72*4882a593Smuzhiyun 73*4882a593Smuzhiyun /* 74*4882a593Smuzhiyun * void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src, 75*4882a593Smuzhiyun * int blocks) 76*4882a593Smuzhiyun */ 77*4882a593Smuzhiyun .text 78*4882a593SmuzhiyunSYM_FUNC_START(sha2_ce_transform) 79*4882a593Smuzhiyun /* load round constants */ 80*4882a593Smuzhiyun adr_l x8, .Lsha2_rcon 81*4882a593Smuzhiyun ld1 { v0.4s- v3.4s}, [x8], #64 82*4882a593Smuzhiyun ld1 { v4.4s- v7.4s}, [x8], #64 83*4882a593Smuzhiyun ld1 { v8.4s-v11.4s}, [x8], #64 84*4882a593Smuzhiyun ld1 {v12.4s-v15.4s}, [x8] 85*4882a593Smuzhiyun 86*4882a593Smuzhiyun /* load state */ 87*4882a593Smuzhiyun ld1 {dgav.4s, dgbv.4s}, [x0] 88*4882a593Smuzhiyun 89*4882a593Smuzhiyun /* load sha256_ce_state::finalize */ 90*4882a593Smuzhiyun ldr_l w4, sha256_ce_offsetof_finalize, x4 91*4882a593Smuzhiyun ldr w4, [x0, x4] 92*4882a593Smuzhiyun 93*4882a593Smuzhiyun /* load input */ 94*4882a593Smuzhiyun0: ld1 {v16.4s-v19.4s}, [x1], #64 95*4882a593Smuzhiyun sub w2, w2, #1 96*4882a593Smuzhiyun 97*4882a593SmuzhiyunCPU_LE( rev32 v16.16b, v16.16b ) 98*4882a593SmuzhiyunCPU_LE( rev32 v17.16b, v17.16b ) 99*4882a593SmuzhiyunCPU_LE( rev32 v18.16b, v18.16b ) 100*4882a593SmuzhiyunCPU_LE( rev32 v19.16b, v19.16b ) 101*4882a593Smuzhiyun 102*4882a593Smuzhiyun1: add t0.4s, v16.4s, v0.4s 103*4882a593Smuzhiyun mov dg0v.16b, dgav.16b 104*4882a593Smuzhiyun mov dg1v.16b, dgbv.16b 105*4882a593Smuzhiyun 106*4882a593Smuzhiyun add_update 0, v1, 16, 17, 18, 19 107*4882a593Smuzhiyun add_update 1, v2, 17, 18, 19, 16 108*4882a593Smuzhiyun add_update 0, v3, 18, 19, 16, 17 109*4882a593Smuzhiyun add_update 1, v4, 19, 16, 17, 18 110*4882a593Smuzhiyun 111*4882a593Smuzhiyun add_update 0, v5, 16, 17, 18, 19 112*4882a593Smuzhiyun add_update 1, v6, 17, 18, 19, 16 113*4882a593Smuzhiyun add_update 0, v7, 18, 19, 16, 17 114*4882a593Smuzhiyun add_update 1, v8, 19, 16, 17, 18 115*4882a593Smuzhiyun 116*4882a593Smuzhiyun add_update 0, v9, 16, 17, 18, 19 117*4882a593Smuzhiyun add_update 1, v10, 17, 18, 19, 16 118*4882a593Smuzhiyun add_update 0, v11, 18, 19, 16, 17 119*4882a593Smuzhiyun add_update 1, v12, 19, 16, 17, 18 120*4882a593Smuzhiyun 121*4882a593Smuzhiyun add_only 0, v13, 17 122*4882a593Smuzhiyun add_only 1, v14, 18 123*4882a593Smuzhiyun add_only 0, v15, 19 124*4882a593Smuzhiyun add_only 1 125*4882a593Smuzhiyun 126*4882a593Smuzhiyun /* update state */ 127*4882a593Smuzhiyun add dgav.4s, dgav.4s, dg0v.4s 128*4882a593Smuzhiyun add dgbv.4s, dgbv.4s, dg1v.4s 129*4882a593Smuzhiyun 130*4882a593Smuzhiyun /* handled all input blocks? */ 131*4882a593Smuzhiyun cbz w2, 2f 132*4882a593Smuzhiyun cond_yield 3f, x5, x6 133*4882a593Smuzhiyun b 0b 134*4882a593Smuzhiyun 135*4882a593Smuzhiyun /* 136*4882a593Smuzhiyun * Final block: add padding and total bit count. 137*4882a593Smuzhiyun * Skip if the input size was not a round multiple of the block size, 138*4882a593Smuzhiyun * the padding is handled by the C code in that case. 139*4882a593Smuzhiyun */ 140*4882a593Smuzhiyun2: cbz x4, 3f 141*4882a593Smuzhiyun ldr_l w4, sha256_ce_offsetof_count, x4 142*4882a593Smuzhiyun ldr x4, [x0, x4] 143*4882a593Smuzhiyun movi v17.2d, #0 144*4882a593Smuzhiyun mov x8, #0x80000000 145*4882a593Smuzhiyun movi v18.2d, #0 146*4882a593Smuzhiyun ror x7, x4, #29 // ror(lsl(x4, 3), 32) 147*4882a593Smuzhiyun fmov d16, x8 148*4882a593Smuzhiyun mov x4, #0 149*4882a593Smuzhiyun mov v19.d[0], xzr 150*4882a593Smuzhiyun mov v19.d[1], x7 151*4882a593Smuzhiyun b 1b 152*4882a593Smuzhiyun 153*4882a593Smuzhiyun /* store new state */ 154*4882a593Smuzhiyun3: st1 {dgav.4s, dgbv.4s}, [x0] 155*4882a593Smuzhiyun mov w0, w2 156*4882a593Smuzhiyun ret 157*4882a593SmuzhiyunSYM_FUNC_END(sha2_ce_transform) 158