1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun ---------------------------------------------------------------------------
3*4882a593Smuzhiyun Copyright (c) 2002, Dr Brian Gladman <brg@gladman.me.uk>, Worcester, UK.
4*4882a593Smuzhiyun All rights reserved.
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun LICENSE TERMS
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun The free distribution and use of this software in both source and binary
9*4882a593Smuzhiyun form is allowed (with or without changes) provided that:
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun 1. distributions of this source code include the above copyright
12*4882a593Smuzhiyun notice, this list of conditions and the following disclaimer;
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun 2. distributions in binary form include the above copyright
15*4882a593Smuzhiyun notice, this list of conditions and the following disclaimer
16*4882a593Smuzhiyun in the documentation and/or other associated materials;
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun 3. the copyright holder's name is not used to endorse products
19*4882a593Smuzhiyun built using this software without specific written permission.
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun ALTERNATIVELY, provided that this notice is retained in full, this product
22*4882a593Smuzhiyun may be distributed under the terms of the GNU General Public License (GPL),
23*4882a593Smuzhiyun in which case the provisions of the GPL apply INSTEAD OF those given above.
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun DISCLAIMER
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun This software is provided 'as is' with no explicit or implied warranties
28*4882a593Smuzhiyun in respect of its properties, including, but not limited to, correctness
29*4882a593Smuzhiyun and/or fitness for purpose.
30*4882a593Smuzhiyun ---------------------------------------------------------------------------
31*4882a593Smuzhiyun Issue Date: 30/11/2002
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun This is a byte oriented version of SHA2 that operates on arrays of bytes
34*4882a593Smuzhiyun stored in memory. This code implements sha256, sha384 and sha512 but the
35*4882a593Smuzhiyun latter two functions rely on efficient 64-bit integer operations that
36*4882a593Smuzhiyun may not be very efficient on 32-bit machines
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun The sha256 functions use a type 'sha256_ctx' to hold details of the
39*4882a593Smuzhiyun current hash state and uses the following three calls:
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun void sha256_begin(sha256_ctx ctx[1])
42*4882a593Smuzhiyun void sha256_hash(sha256_ctx ctx[1], const unsigned char data[],
43*4882a593Smuzhiyun unsigned long len)
44*4882a593Smuzhiyun void sha256_end(sha256_ctx ctx[1], unsigned char hval[])
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun The first subroutine initialises a hash computation by setting up the
47*4882a593Smuzhiyun context in the sha256_ctx context. The second subroutine hashes 8-bit
48*4882a593Smuzhiyun bytes from array data[] into the hash state withinh sha256_ctx context,
49*4882a593Smuzhiyun the number of bytes to be hashed being given by the the unsigned long
50*4882a593Smuzhiyun integer len. The third subroutine completes the hash calculation and
51*4882a593Smuzhiyun places the resulting digest value in the array of 8-bit bytes hval[].
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun The sha384 and sha512 functions are similar and use the interfaces:
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun void sha384_begin(sha384_ctx ctx[1]);
56*4882a593Smuzhiyun void sha384_hash(sha384_ctx ctx[1], const unsigned char data[],
57*4882a593Smuzhiyun unsigned long len);
58*4882a593Smuzhiyun void sha384_end(sha384_ctx ctx[1], unsigned char hval[]);
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun void sha512_begin(sha512_ctx ctx[1]);
61*4882a593Smuzhiyun void sha512_hash(sha512_ctx ctx[1], const unsigned char data[],
62*4882a593Smuzhiyun unsigned long len);
63*4882a593Smuzhiyun void sha512_end(sha512_ctx ctx[1], unsigned char hval[]);
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun In addition there is a function sha2 that can be used to call all these
66*4882a593Smuzhiyun functions using a call with a hash length parameter as follows:
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun int sha2_begin(sha2_ctx ctx[1], unsigned long len);
69*4882a593Smuzhiyun void sha2_hash(sha2_ctx ctx[1], const unsigned char data[],
70*4882a593Smuzhiyun unsigned long len);
71*4882a593Smuzhiyun void sha2_end(sha2_ctx ctx[1], unsigned char hval[]);
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun My thanks to Erik Andersen <andersen@codepoet.org> for testing this code
74*4882a593Smuzhiyun on big-endian systems and for his assistance with corrections
75*4882a593Smuzhiyun */
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun /* define the hash functions that you need */
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun #define SHA_2 /* for dynamic hash length */
80*4882a593Smuzhiyun #define SHA_256
81*4882a593Smuzhiyun #define SHA_384
82*4882a593Smuzhiyun #define SHA_512
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun #ifdef USE_HOSTCC
85*4882a593Smuzhiyun #include <string.h> /* for memcpy() etc. */
86*4882a593Smuzhiyun #include <stdlib.h> /* for _lrotr with VC++ */
87*4882a593Smuzhiyun #endif
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun #include "sha2.h"
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun /* rockchip crypto byte order */
92*4882a593Smuzhiyun #define PLATFORM_BYTE_ORDER SHA_BIG_ENDIAN
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun /* 1. PLATFORM SPECIFIC INCLUDES */
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun // #if defined(__GNU_LIBRARY__)
97*4882a593Smuzhiyun // # include <byteswap.h>
98*4882a593Smuzhiyun // # include <endian.h>
99*4882a593Smuzhiyun // #elif defined(__CRYPTLIB__)
100*4882a593Smuzhiyun // # if defined( INC_ALL )
101*4882a593Smuzhiyun // # include "crypt.h"
102*4882a593Smuzhiyun // # elif defined( INC_CHILD )
103*4882a593Smuzhiyun // # include "../crypt.h"
104*4882a593Smuzhiyun // # else
105*4882a593Smuzhiyun // # include "crypt.h"
106*4882a593Smuzhiyun // # endif
107*4882a593Smuzhiyun // # if defined(DATA_LITTLEENDIAN)
108*4882a593Smuzhiyun // # define PLATFORM_BYTE_ORDER SHA_LITTLE_ENDIAN
109*4882a593Smuzhiyun // # else
110*4882a593Smuzhiyun // # define PLATFORM_BYTE_ORDER SHA_BIG_ENDIAN
111*4882a593Smuzhiyun // # endif
112*4882a593Smuzhiyun // #if defined(_MSC_VER)
113*4882a593Smuzhiyun // # include <stdlib.h>
114*4882a593Smuzhiyun // #elif !defined(WIN32)
115*4882a593Smuzhiyun // # include <stdlib.h>
116*4882a593Smuzhiyun // # if !defined (_ENDIAN_H)
117*4882a593Smuzhiyun // # include <sys/param.h>
118*4882a593Smuzhiyun // # else
119*4882a593Smuzhiyun // # include _ENDIAN_H
120*4882a593Smuzhiyun // # endif
121*4882a593Smuzhiyun // #endif
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun /* 2. BYTE ORDER IN 32-BIT WORDS
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun To obtain the highest speed on processors with 32-bit words, this code
126*4882a593Smuzhiyun needs to determine the order in which bytes are packed into such words.
127*4882a593Smuzhiyun The following block of code is an attempt to capture the most obvious
128*4882a593Smuzhiyun ways in which various environments specify their endian definitions.
129*4882a593Smuzhiyun It may well fail, in which case the definitions will need to be set by
130*4882a593Smuzhiyun editing at the points marked **** EDIT HERE IF NECESSARY **** below.
131*4882a593Smuzhiyun */
132*4882a593Smuzhiyun #define SHA_LITTLE_ENDIAN 1234 /* byte 0 is least significant (i386) */
133*4882a593Smuzhiyun #define SHA_BIG_ENDIAN 4321 /* byte 0 is most significant (mc68k) */
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun #if !defined(PLATFORM_BYTE_ORDER)
136*4882a593Smuzhiyun #if defined(LITTLE_ENDIAN) || defined(BIG_ENDIAN)
137*4882a593Smuzhiyun #if defined(LITTLE_ENDIAN) && defined(BIG_ENDIAN)
138*4882a593Smuzhiyun #if defined(BYTE_ORDER)
139*4882a593Smuzhiyun #if (BYTE_ORDER == LITTLE_ENDIAN)
140*4882a593Smuzhiyun #define PLATFORM_BYTE_ORDER SHA_LITTLE_ENDIAN
141*4882a593Smuzhiyun #elif(BYTE_ORDER == BIG_ENDIAN)
142*4882a593Smuzhiyun #define PLATFORM_BYTE_ORDER SHA_BIG_ENDIAN
143*4882a593Smuzhiyun #endif
144*4882a593Smuzhiyun #endif
145*4882a593Smuzhiyun #elif defined(LITTLE_ENDIAN) && !defined(BIG_ENDIAN)
146*4882a593Smuzhiyun #define PLATFORM_BYTE_ORDER SHA_LITTLE_ENDIAN
147*4882a593Smuzhiyun #elif !defined(LITTLE_ENDIAN) && defined(BIG_ENDIAN)
148*4882a593Smuzhiyun #define PLATFORM_BYTE_ORDER SHA_BIG_ENDIAN
149*4882a593Smuzhiyun #endif
150*4882a593Smuzhiyun #elif defined(_LITTLE_ENDIAN) || defined(_BIG_ENDIAN)
151*4882a593Smuzhiyun #if defined(_LITTLE_ENDIAN) && defined(_BIG_ENDIAN)
152*4882a593Smuzhiyun #if defined(_BYTE_ORDER)
153*4882a593Smuzhiyun #if (_BYTE_ORDER == _LITTLE_ENDIAN)
154*4882a593Smuzhiyun #define PLATFORM_BYTE_ORDER SHA_LITTLE_ENDIAN
155*4882a593Smuzhiyun #elif(_BYTE_ORDER == _BIG_ENDIAN)
156*4882a593Smuzhiyun #define PLATFORM_BYTE_ORDER SHA_BIG_ENDIAN
157*4882a593Smuzhiyun #endif
158*4882a593Smuzhiyun #endif
159*4882a593Smuzhiyun #elif defined(_LITTLE_ENDIAN) && !defined(_BIG_ENDIAN)
160*4882a593Smuzhiyun #define PLATFORM_BYTE_ORDER SHA_LITTLE_ENDIAN
161*4882a593Smuzhiyun #elif !defined(_LITTLE_ENDIAN) && defined(_BIG_ENDIAN)
162*4882a593Smuzhiyun #define PLATFORM_BYTE_ORDER SHA_BIG_ENDIAN
163*4882a593Smuzhiyun #endif
164*4882a593Smuzhiyun #elif 0 /* **** EDIT HERE IF NECESSARY **** */
165*4882a593Smuzhiyun #define PLATFORM_BYTE_ORDER SHA_LITTLE_ENDIAN
166*4882a593Smuzhiyun #elif 0 /* **** EDIT HERE IF NECESSARY **** */
167*4882a593Smuzhiyun #define PLATFORM_BYTE_ORDER SHA_BIG_ENDIAN
168*4882a593Smuzhiyun #elif(('1234' >> 24) == '1')
169*4882a593Smuzhiyun #define PLATFORM_BYTE_ORDER SHA_LITTLE_ENDIAN
170*4882a593Smuzhiyun #elif(('4321' >> 24) == '1')
171*4882a593Smuzhiyun #define PLATFORM_BYTE_ORDER SHA_BIG_ENDIAN
172*4882a593Smuzhiyun #endif
173*4882a593Smuzhiyun #endif
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun #if !defined(PLATFORM_BYTE_ORDER)
176*4882a593Smuzhiyun #error Please set undetermined byte order (lines 159 or 161 of sha2.c).
177*4882a593Smuzhiyun #endif
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun #ifdef _MSC_VER
180*4882a593Smuzhiyun #pragma intrinsic(memcpy)
181*4882a593Smuzhiyun #endif
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun #define rotr32(x, n) (((x) >> n) | ((x) << (32 - n)))
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun #if !defined(bswap_32)
186*4882a593Smuzhiyun #define bswap_32(x) \
187*4882a593Smuzhiyun ((rotr32((x), 24) & 0x00ff00ff) | (rotr32((x), 8) & 0xff00ff00))
188*4882a593Smuzhiyun #endif
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun #if (PLATFORM_BYTE_ORDER == SHA_LITTLE_ENDIAN)
191*4882a593Smuzhiyun #define SWAP_BYTES
192*4882a593Smuzhiyun #else
193*4882a593Smuzhiyun #undef SWAP_BYTES
194*4882a593Smuzhiyun #endif
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun #if defined(SHA_2) || defined(SHA_256)
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun #define SHA256_MASK (SHA256_BLOCK_SIZE - 1)
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun #if defined(SWAP_BYTES)
201*4882a593Smuzhiyun #define bsw_32(p, n) \
202*4882a593Smuzhiyun { \
203*4882a593Smuzhiyun int _i = (n); \
204*4882a593Smuzhiyun while (_i--) \
205*4882a593Smuzhiyun p[_i] = bswap_32(p[_i]); \
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun #else
208*4882a593Smuzhiyun #define bsw_32(p, n)
209*4882a593Smuzhiyun #endif
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun /* SHA256 mixing function definitions */
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun #define ch(x, y, z) (((x) & (y)) ^ (~(x) & (z)))
214*4882a593Smuzhiyun #define maj(x, y, z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun #define s256_0(x) (rotr32((x), 2) ^ rotr32((x), 13) ^ rotr32((x), 22))
217*4882a593Smuzhiyun #define s256_1(x) (rotr32((x), 6) ^ rotr32((x), 11) ^ rotr32((x), 25))
218*4882a593Smuzhiyun #define g256_0(x) (rotr32((x), 7) ^ rotr32((x), 18) ^ ((x) >> 3))
219*4882a593Smuzhiyun #define g256_1(x) (rotr32((x), 17) ^ rotr32((x), 19) ^ ((x) >> 10))
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun /* rotated SHA256 round definition. Rather than swapping variables as in */
222*4882a593Smuzhiyun /* FIPS-180, different variables are 'rotated' on each round, returning */
223*4882a593Smuzhiyun /* to their starting positions every eight rounds */
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun #define h2(i) \
226*4882a593Smuzhiyun ctx->wbuf[i & 15] += \
227*4882a593Smuzhiyun g256_1(ctx->wbuf[(i + 14) & 15]) + ctx->wbuf[(i + 9) & 15] + \
228*4882a593Smuzhiyun g256_0(ctx->wbuf[(i + 1) & 15])
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun #define h2_cycle(i, j) \
231*4882a593Smuzhiyun v[(7 - i) & 7] += \
232*4882a593Smuzhiyun (j ? h2(i) : ctx->wbuf[i & 15]) + k256[i + j] + s256_1(v[(4 - i) & 7]) + \
233*4882a593Smuzhiyun ch(v[(4 - i) & 7], v[(5 - i) & 7], v[(6 - i) & 7]); \
234*4882a593Smuzhiyun v[(3 - i) & 7] += v[(7 - i) & 7]; \
235*4882a593Smuzhiyun v[(7 - i) & 7] += s256_0(v[(0 - i) & 7]) + \
236*4882a593Smuzhiyun maj(v[(0 - i) & 7], v[(1 - i) & 7], v[(2 - i) & 7])
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun /* SHA256 mixing data */
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun const sha2_32t k256[64] = {
241*4882a593Smuzhiyun n_u32(428a2f98), n_u32(71374491), n_u32(b5c0fbcf), n_u32(e9b5dba5),
242*4882a593Smuzhiyun n_u32(3956c25b), n_u32(59f111f1), n_u32(923f82a4), n_u32(ab1c5ed5),
243*4882a593Smuzhiyun n_u32(d807aa98), n_u32(12835b01), n_u32(243185be), n_u32(550c7dc3),
244*4882a593Smuzhiyun n_u32(72be5d74), n_u32(80deb1fe), n_u32(9bdc06a7), n_u32(c19bf174),
245*4882a593Smuzhiyun n_u32(e49b69c1), n_u32(efbe4786), n_u32(0fc19dc6), n_u32(240ca1cc),
246*4882a593Smuzhiyun n_u32(2de92c6f), n_u32(4a7484aa), n_u32(5cb0a9dc), n_u32(76f988da),
247*4882a593Smuzhiyun n_u32(983e5152), n_u32(a831c66d), n_u32(b00327c8), n_u32(bf597fc7),
248*4882a593Smuzhiyun n_u32(c6e00bf3), n_u32(d5a79147), n_u32(06ca6351), n_u32(14292967),
249*4882a593Smuzhiyun n_u32(27b70a85), n_u32(2e1b2138), n_u32(4d2c6dfc), n_u32(53380d13),
250*4882a593Smuzhiyun n_u32(650a7354), n_u32(766a0abb), n_u32(81c2c92e), n_u32(92722c85),
251*4882a593Smuzhiyun n_u32(a2bfe8a1), n_u32(a81a664b), n_u32(c24b8b70), n_u32(c76c51a3),
252*4882a593Smuzhiyun n_u32(d192e819), n_u32(d6990624), n_u32(f40e3585), n_u32(106aa070),
253*4882a593Smuzhiyun n_u32(19a4c116), n_u32(1e376c08), n_u32(2748774c), n_u32(34b0bcb5),
254*4882a593Smuzhiyun n_u32(391c0cb3), n_u32(4ed8aa4a), n_u32(5b9cca4f), n_u32(682e6ff3),
255*4882a593Smuzhiyun n_u32(748f82ee), n_u32(78a5636f), n_u32(84c87814), n_u32(8cc70208),
256*4882a593Smuzhiyun n_u32(90befffa), n_u32(a4506ceb), n_u32(bef9a3f7), n_u32(c67178f2),
257*4882a593Smuzhiyun };
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun /* SHA256 initialisation data */
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun const sha2_32t i256[8] = { n_u32(6a09e667), n_u32(bb67ae85), n_u32(3c6ef372),
262*4882a593Smuzhiyun n_u32(a54ff53a), n_u32(510e527f), n_u32(9b05688c),
263*4882a593Smuzhiyun n_u32(1f83d9ab), n_u32(5be0cd19)
264*4882a593Smuzhiyun };
265*4882a593Smuzhiyun
sha256_begin(sha256_ctx ctx[1])266*4882a593Smuzhiyun void sha256_begin(sha256_ctx ctx[1])
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun ctx->count[0] = ctx->count[1] = 0;
269*4882a593Smuzhiyun memcpy(ctx->hash, i256, 8 * sizeof(sha2_32t));
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun /* Compile 64 bytes of hash data into SHA256 digest value */
273*4882a593Smuzhiyun /* NOTE: this routine assumes that the byte order in the */
274*4882a593Smuzhiyun /* ctx->wbuf[] at this point is in such an order that low */
275*4882a593Smuzhiyun /* address bytes in the ORIGINAL byte stream placed in this */
276*4882a593Smuzhiyun /* buffer will now go to the high end of words on BOTH big */
277*4882a593Smuzhiyun /* and little endian systems */
278*4882a593Smuzhiyun
sha256_compile(sha256_ctx ctx[1])279*4882a593Smuzhiyun void sha256_compile(sha256_ctx ctx[1])
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun sha2_32t v[8], j;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun memcpy(v, ctx->hash, 8 * sizeof(sha2_32t));
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun for (j = 0; j < 64; j += 16) {
286*4882a593Smuzhiyun h2_cycle(0, j);
287*4882a593Smuzhiyun h2_cycle(1, j);
288*4882a593Smuzhiyun h2_cycle(2, j);
289*4882a593Smuzhiyun h2_cycle(3, j);
290*4882a593Smuzhiyun h2_cycle(4, j);
291*4882a593Smuzhiyun h2_cycle(5, j);
292*4882a593Smuzhiyun h2_cycle(6, j);
293*4882a593Smuzhiyun h2_cycle(7, j);
294*4882a593Smuzhiyun h2_cycle(8, j);
295*4882a593Smuzhiyun h2_cycle(9, j);
296*4882a593Smuzhiyun h2_cycle(10, j);
297*4882a593Smuzhiyun h2_cycle(11, j);
298*4882a593Smuzhiyun h2_cycle(12, j);
299*4882a593Smuzhiyun h2_cycle(13, j);
300*4882a593Smuzhiyun h2_cycle(14, j);
301*4882a593Smuzhiyun h2_cycle(15, j);
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun ctx->hash[0] += v[0];
305*4882a593Smuzhiyun ctx->hash[1] += v[1];
306*4882a593Smuzhiyun ctx->hash[2] += v[2];
307*4882a593Smuzhiyun ctx->hash[3] += v[3];
308*4882a593Smuzhiyun ctx->hash[4] += v[4];
309*4882a593Smuzhiyun ctx->hash[5] += v[5];
310*4882a593Smuzhiyun ctx->hash[6] += v[6];
311*4882a593Smuzhiyun ctx->hash[7] += v[7];
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun /* SHA256 hash data in an array of bytes into hash buffer */
315*4882a593Smuzhiyun /* and call the hash_compile function as required. */
316*4882a593Smuzhiyun
sha256_hash(sha256_ctx ctx[1],const unsigned char data[],unsigned long len)317*4882a593Smuzhiyun void sha256_hash(sha256_ctx ctx[1], const unsigned char data[],
318*4882a593Smuzhiyun unsigned long len)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun sha2_32t pos = (sha2_32t)(ctx->count[0] & SHA256_MASK),
321*4882a593Smuzhiyun space = SHA256_BLOCK_SIZE - pos;
322*4882a593Smuzhiyun const unsigned char *sp = data;
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun if ((ctx->count[0] += len) < len)
325*4882a593Smuzhiyun ++(ctx->count[1]);
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun while (len >= space) { /* tranfer whole blocks while possible */
328*4882a593Smuzhiyun memcpy(((unsigned char *)ctx->wbuf) + pos, sp, space);
329*4882a593Smuzhiyun sp += space;
330*4882a593Smuzhiyun len -= space;
331*4882a593Smuzhiyun space = SHA256_BLOCK_SIZE;
332*4882a593Smuzhiyun pos = 0;
333*4882a593Smuzhiyun bsw_32(ctx->wbuf, SHA256_BLOCK_SIZE >> 2);
334*4882a593Smuzhiyun sha256_compile(ctx);
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun memcpy(((unsigned char *)ctx->wbuf) + pos, sp, len);
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun /* SHA256 Final padding and digest calculation */
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun static sha2_32t m1[4] = { n_u32(00000000), n_u32(ff000000), n_u32(ffff0000),
343*4882a593Smuzhiyun n_u32(ffffff00)
344*4882a593Smuzhiyun };
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun static sha2_32t b1[4] = { n_u32(80000000), n_u32(00800000), n_u32(00008000),
347*4882a593Smuzhiyun n_u32(00000080)
348*4882a593Smuzhiyun };
349*4882a593Smuzhiyun
sha256_end(sha256_ctx ctx[1],unsigned char hval[])350*4882a593Smuzhiyun void sha256_end(sha256_ctx ctx[1], unsigned char hval[])
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun sha2_32t i = (sha2_32t)(ctx->count[0] & SHA256_MASK);
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun bsw_32(ctx->wbuf, (i + 3) >> 2)
355*4882a593Smuzhiyun /* bytes in the buffer are now in an order in which references */
356*4882a593Smuzhiyun /* to 32-bit words will put bytes with lower addresses into the */
357*4882a593Smuzhiyun /* top of 32 bit words on BOTH big and little endian machines */
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun /* we now need to mask valid bytes and add the padding which is */
360*4882a593Smuzhiyun /* a single 1 bit and as many zero bits as necessary. */
361*4882a593Smuzhiyun ctx->wbuf[i >> 2] = (ctx->wbuf[i >> 2] & m1[i & 3]) | b1[i & 3];
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun /* we need 9 or more empty positions, one for the padding byte */
364*4882a593Smuzhiyun /* (above) and eight for the length count. If there is not */
365*4882a593Smuzhiyun /* enough space pad and empty the buffer */
366*4882a593Smuzhiyun if (i > SHA256_BLOCK_SIZE - 9) {
367*4882a593Smuzhiyun if (i < 60)
368*4882a593Smuzhiyun ctx->wbuf[15] = 0;
369*4882a593Smuzhiyun sha256_compile(ctx);
370*4882a593Smuzhiyun i = 0;
371*4882a593Smuzhiyun } else /* compute a word index for the empty buffer positions */
372*4882a593Smuzhiyun i = (i >> 2) + 1;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun while (i < 14) /* and zero pad all but last two positions */
375*4882a593Smuzhiyun ctx->wbuf[i++] = 0;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun /* the following 32-bit length fields are assembled in the */
378*4882a593Smuzhiyun /* wrong byte order on little endian machines but this is */
379*4882a593Smuzhiyun /* corrected later since they are only ever used as 32-bit */
380*4882a593Smuzhiyun /* word values. */
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun ctx->wbuf[14] = (ctx->count[1] << 3) | (ctx->count[0] >> 29);
383*4882a593Smuzhiyun ctx->wbuf[15] = ctx->count[0] << 3;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun sha256_compile(ctx);
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun /* extract the hash value as bytes in case the hash buffer is */
388*4882a593Smuzhiyun /* mislaigned for 32-bit words */
389*4882a593Smuzhiyun for (i = 0; i < SHA256_DIGEST_SIZE; ++i)
390*4882a593Smuzhiyun hval[i] = (unsigned char)(ctx->hash[i >> 2] >> 8 * (~i & 3));
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun
sha256(unsigned char hval[],const unsigned char data[],unsigned long len)393*4882a593Smuzhiyun void sha256(unsigned char hval[], const unsigned char data[],
394*4882a593Smuzhiyun unsigned long len)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun sha256_ctx cx[1];
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun sha256_begin(cx);
399*4882a593Smuzhiyun sha256_hash(cx, data, len);
400*4882a593Smuzhiyun sha256_end(cx, hval);
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun #endif
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun #if defined(SHA_2) || defined(SHA_384) || defined(SHA_512)
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun #define SHA512_MASK (SHA512_BLOCK_SIZE - 1)
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun #define rotr64(x, n) (((x) >> n) | ((x) << (64 - n)))
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun #if !defined(bswap_64)
412*4882a593Smuzhiyun #define bswap_64(x) \
413*4882a593Smuzhiyun (((sha2_64t)(bswap_32((sha2_32t)(x)))) << 32 | \
414*4882a593Smuzhiyun bswap_32((sha2_32t)((x) >> 32)))
415*4882a593Smuzhiyun #endif
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun #if defined(SWAP_BYTES)
418*4882a593Smuzhiyun #define bsw_64(p, n) \
419*4882a593Smuzhiyun { \
420*4882a593Smuzhiyun int _i = (n); \
421*4882a593Smuzhiyun while (_i--) \
422*4882a593Smuzhiyun p[_i] = bswap_64(p[_i]); \
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun #else
425*4882a593Smuzhiyun #define bsw_64(p, n)
426*4882a593Smuzhiyun #endif
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun /* SHA512 mixing function definitions */
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun #define s512_0(x) (rotr64((x), 28) ^ rotr64((x), 34) ^ rotr64((x), 39))
431*4882a593Smuzhiyun #define s512_1(x) (rotr64((x), 14) ^ rotr64((x), 18) ^ rotr64((x), 41))
432*4882a593Smuzhiyun #define g512_0(x) (rotr64((x), 1) ^ rotr64((x), 8) ^ ((x) >> 7))
433*4882a593Smuzhiyun #define g512_1(x) (rotr64((x), 19) ^ rotr64((x), 61) ^ ((x) >> 6))
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun /* rotated SHA512 round definition. Rather than swapping variables as in */
436*4882a593Smuzhiyun /* FIPS-180, different variables are 'rotated' on each round, returning */
437*4882a593Smuzhiyun /* to their starting positions every eight rounds */
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun #define h5(i) \
440*4882a593Smuzhiyun ctx->wbuf[i & 15] += \
441*4882a593Smuzhiyun g512_1(ctx->wbuf[(i + 14) & 15]) + ctx->wbuf[(i + 9) & 15] + \
442*4882a593Smuzhiyun g512_0(ctx->wbuf[(i + 1) & 15])
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun #define h5_cycle(i, j) \
445*4882a593Smuzhiyun v[(7 - i) & 7] += \
446*4882a593Smuzhiyun (j ? h5(i) : ctx->wbuf[i & 15]) + k512[i + j] + s512_1(v[(4 - i) & 7]) + \
447*4882a593Smuzhiyun ch(v[(4 - i) & 7], v[(5 - i) & 7], v[(6 - i) & 7]); \
448*4882a593Smuzhiyun v[(3 - i) & 7] += v[(7 - i) & 7]; \
449*4882a593Smuzhiyun v[(7 - i) & 7] += s512_0(v[(0 - i) & 7]) + \
450*4882a593Smuzhiyun maj(v[(0 - i) & 7], v[(1 - i) & 7], v[(2 - i) & 7])
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun /* SHA384/SHA512 mixing data */
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun const sha2_64t k512[80] = {
455*4882a593Smuzhiyun n_u64(428a2f98d728ae22), n_u64(7137449123ef65cd), n_u64(b5c0fbcfec4d3b2f),
456*4882a593Smuzhiyun n_u64(e9b5dba58189dbbc), n_u64(3956c25bf348b538), n_u64(59f111f1b605d019),
457*4882a593Smuzhiyun n_u64(923f82a4af194f9b), n_u64(ab1c5ed5da6d8118), n_u64(d807aa98a3030242),
458*4882a593Smuzhiyun n_u64(12835b0145706fbe), n_u64(243185be4ee4b28c), n_u64(550c7dc3d5ffb4e2),
459*4882a593Smuzhiyun n_u64(72be5d74f27b896f), n_u64(80deb1fe3b1696b1), n_u64(9bdc06a725c71235),
460*4882a593Smuzhiyun n_u64(c19bf174cf692694), n_u64(e49b69c19ef14ad2), n_u64(efbe4786384f25e3),
461*4882a593Smuzhiyun n_u64(0fc19dc68b8cd5b5), n_u64(240ca1cc77ac9c65), n_u64(2de92c6f592b0275),
462*4882a593Smuzhiyun n_u64(4a7484aa6ea6e483), n_u64(5cb0a9dcbd41fbd4), n_u64(76f988da831153b5),
463*4882a593Smuzhiyun n_u64(983e5152ee66dfab), n_u64(a831c66d2db43210), n_u64(b00327c898fb213f),
464*4882a593Smuzhiyun n_u64(bf597fc7beef0ee4), n_u64(c6e00bf33da88fc2), n_u64(d5a79147930aa725),
465*4882a593Smuzhiyun n_u64(06ca6351e003826f), n_u64(142929670a0e6e70), n_u64(27b70a8546d22ffc),
466*4882a593Smuzhiyun n_u64(2e1b21385c26c926), n_u64(4d2c6dfc5ac42aed), n_u64(53380d139d95b3df),
467*4882a593Smuzhiyun n_u64(650a73548baf63de), n_u64(766a0abb3c77b2a8), n_u64(81c2c92e47edaee6),
468*4882a593Smuzhiyun n_u64(92722c851482353b), n_u64(a2bfe8a14cf10364), n_u64(a81a664bbc423001),
469*4882a593Smuzhiyun n_u64(c24b8b70d0f89791), n_u64(c76c51a30654be30), n_u64(d192e819d6ef5218),
470*4882a593Smuzhiyun n_u64(d69906245565a910), n_u64(f40e35855771202a), n_u64(106aa07032bbd1b8),
471*4882a593Smuzhiyun n_u64(19a4c116b8d2d0c8), n_u64(1e376c085141ab53), n_u64(2748774cdf8eeb99),
472*4882a593Smuzhiyun n_u64(34b0bcb5e19b48a8), n_u64(391c0cb3c5c95a63), n_u64(4ed8aa4ae3418acb),
473*4882a593Smuzhiyun n_u64(5b9cca4f7763e373), n_u64(682e6ff3d6b2b8a3), n_u64(748f82ee5defb2fc),
474*4882a593Smuzhiyun n_u64(78a5636f43172f60), n_u64(84c87814a1f0ab72), n_u64(8cc702081a6439ec),
475*4882a593Smuzhiyun n_u64(90befffa23631e28), n_u64(a4506cebde82bde9), n_u64(bef9a3f7b2c67915),
476*4882a593Smuzhiyun n_u64(c67178f2e372532b), n_u64(ca273eceea26619c), n_u64(d186b8c721c0c207),
477*4882a593Smuzhiyun n_u64(eada7dd6cde0eb1e), n_u64(f57d4f7fee6ed178), n_u64(06f067aa72176fba),
478*4882a593Smuzhiyun n_u64(0a637dc5a2c898a6), n_u64(113f9804bef90dae), n_u64(1b710b35131c471b),
479*4882a593Smuzhiyun n_u64(28db77f523047d84), n_u64(32caab7b40c72493), n_u64(3c9ebe0a15c9bebc),
480*4882a593Smuzhiyun n_u64(431d67c49c100d4c), n_u64(4cc5d4becb3e42b6), n_u64(597f299cfc657e2a),
481*4882a593Smuzhiyun n_u64(5fcb6fab3ad6faec), n_u64(6c44198c4a475817)
482*4882a593Smuzhiyun };
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun /* Compile 64 bytes of hash data into SHA384/SHA512 digest value */
485*4882a593Smuzhiyun
sha512_compile(sha512_ctx ctx[1])486*4882a593Smuzhiyun void sha512_compile(sha512_ctx ctx[1])
487*4882a593Smuzhiyun {
488*4882a593Smuzhiyun sha2_64t v[8];
489*4882a593Smuzhiyun sha2_32t j;
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun memcpy(v, ctx->hash, 8 * sizeof(sha2_64t));
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun for (j = 0; j < 80; j += 16) {
494*4882a593Smuzhiyun h5_cycle(0, j);
495*4882a593Smuzhiyun h5_cycle(1, j);
496*4882a593Smuzhiyun h5_cycle(2, j);
497*4882a593Smuzhiyun h5_cycle(3, j);
498*4882a593Smuzhiyun h5_cycle(4, j);
499*4882a593Smuzhiyun h5_cycle(5, j);
500*4882a593Smuzhiyun h5_cycle(6, j);
501*4882a593Smuzhiyun h5_cycle(7, j);
502*4882a593Smuzhiyun h5_cycle(8, j);
503*4882a593Smuzhiyun h5_cycle(9, j);
504*4882a593Smuzhiyun h5_cycle(10, j);
505*4882a593Smuzhiyun h5_cycle(11, j);
506*4882a593Smuzhiyun h5_cycle(12, j);
507*4882a593Smuzhiyun h5_cycle(13, j);
508*4882a593Smuzhiyun h5_cycle(14, j);
509*4882a593Smuzhiyun h5_cycle(15, j);
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun ctx->hash[0] += v[0];
513*4882a593Smuzhiyun ctx->hash[1] += v[1];
514*4882a593Smuzhiyun ctx->hash[2] += v[2];
515*4882a593Smuzhiyun ctx->hash[3] += v[3];
516*4882a593Smuzhiyun ctx->hash[4] += v[4];
517*4882a593Smuzhiyun ctx->hash[5] += v[5];
518*4882a593Smuzhiyun ctx->hash[6] += v[6];
519*4882a593Smuzhiyun ctx->hash[7] += v[7];
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun /* Compile 128 bytes of hash data into SHA256 digest value */
523*4882a593Smuzhiyun /* NOTE: this routine assumes that the byte order in the */
524*4882a593Smuzhiyun /* ctx->wbuf[] at this point is in such an order that low */
525*4882a593Smuzhiyun /* address bytes in the ORIGINAL byte stream placed in this */
526*4882a593Smuzhiyun /* buffer will now go to the high end of words on BOTH big */
527*4882a593Smuzhiyun /* and little endian systems */
528*4882a593Smuzhiyun
sha512_hash(sha512_ctx ctx[1],const unsigned char data[],unsigned long len)529*4882a593Smuzhiyun void sha512_hash(sha512_ctx ctx[1], const unsigned char data[],
530*4882a593Smuzhiyun unsigned long len)
531*4882a593Smuzhiyun {
532*4882a593Smuzhiyun sha2_32t pos = (sha2_32t)(ctx->count[0] & SHA512_MASK),
533*4882a593Smuzhiyun space = SHA512_BLOCK_SIZE - pos;
534*4882a593Smuzhiyun const unsigned char *sp = data;
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun if ((ctx->count[0] += len) < len)
537*4882a593Smuzhiyun ++(ctx->count[1]);
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun while (len >= space) { /* tranfer whole blocks while possible */
540*4882a593Smuzhiyun memcpy(((unsigned char *)ctx->wbuf) + pos, sp, space);
541*4882a593Smuzhiyun sp += space;
542*4882a593Smuzhiyun len -= space;
543*4882a593Smuzhiyun space = SHA512_BLOCK_SIZE;
544*4882a593Smuzhiyun pos = 0;
545*4882a593Smuzhiyun bsw_64(ctx->wbuf, SHA512_BLOCK_SIZE >> 3);
546*4882a593Smuzhiyun sha512_compile(ctx);
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun memcpy(((unsigned char *)ctx->wbuf) + pos, sp, len);
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun /* SHA384/512 Final padding and digest calculation */
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun static sha2_64t m2[8] = { n_u64(0000000000000000), n_u64(ff00000000000000),
555*4882a593Smuzhiyun n_u64(ffff000000000000), n_u64(ffffff0000000000),
556*4882a593Smuzhiyun n_u64(ffffffff00000000), n_u64(ffffffffff000000),
557*4882a593Smuzhiyun n_u64(ffffffffffff0000), n_u64(ffffffffffffff00)
558*4882a593Smuzhiyun };
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun static sha2_64t b2[8] = { n_u64(8000000000000000), n_u64(0080000000000000),
561*4882a593Smuzhiyun n_u64(0000800000000000), n_u64(0000008000000000),
562*4882a593Smuzhiyun n_u64(0000000080000000), n_u64(0000000000800000),
563*4882a593Smuzhiyun n_u64(0000000000008000), n_u64(0000000000000080)
564*4882a593Smuzhiyun };
565*4882a593Smuzhiyun
sha_end(sha512_ctx ctx[1],unsigned char hval[],const unsigned int hlen)566*4882a593Smuzhiyun static void sha_end(sha512_ctx ctx[1], unsigned char hval[],
567*4882a593Smuzhiyun const unsigned int hlen)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun sha2_32t i = (sha2_32t)(ctx->count[0] & SHA512_MASK);
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun bsw_64(ctx->wbuf, (i + 7) >> 3);
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun /* bytes in the buffer are now in an order in which references */
574*4882a593Smuzhiyun /* to 64-bit words will put bytes with lower addresses into the */
575*4882a593Smuzhiyun /* top of 64 bit words on BOTH big and little endian machines */
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun /* we now need to mask valid bytes and add the padding which is */
578*4882a593Smuzhiyun /* a single 1 bit and as many zero bits as necessary. */
579*4882a593Smuzhiyun ctx->wbuf[i >> 3] = (ctx->wbuf[i >> 3] & m2[i & 7]) | b2[i & 7];
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun /* we need 17 or more empty byte positions, one for the padding */
582*4882a593Smuzhiyun /* byte (above) and sixteen for the length count. If there is */
583*4882a593Smuzhiyun /* not enough space pad and empty the buffer */
584*4882a593Smuzhiyun if (i > SHA512_BLOCK_SIZE - 17) {
585*4882a593Smuzhiyun if (i < 120)
586*4882a593Smuzhiyun ctx->wbuf[15] = 0;
587*4882a593Smuzhiyun sha512_compile(ctx);
588*4882a593Smuzhiyun i = 0;
589*4882a593Smuzhiyun } else
590*4882a593Smuzhiyun i = (i >> 3) + 1;
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun while (i < 14)
593*4882a593Smuzhiyun ctx->wbuf[i++] = 0;
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun /* the following 64-bit length fields are assembled in the */
596*4882a593Smuzhiyun /* wrong byte order on little endian machines but this is */
597*4882a593Smuzhiyun /* corrected later since they are only ever used as 64-bit */
598*4882a593Smuzhiyun /* word values. */
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun ctx->wbuf[14] = (ctx->count[1] << 3) | (ctx->count[0] >> 61);
601*4882a593Smuzhiyun ctx->wbuf[15] = ctx->count[0] << 3;
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun sha512_compile(ctx);
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun /* extract the hash value as bytes in case the hash buffer is */
606*4882a593Smuzhiyun /* misaligned for 32-bit words */
607*4882a593Smuzhiyun for (i = 0; i < hlen; ++i)
608*4882a593Smuzhiyun hval[i] = (unsigned char)(ctx->hash[i >> 3] >> 8 * (~i & 7));
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun #endif
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun #if defined(SHA_2) || defined(SHA_384)
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun /* SHA384 initialisation data */
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun const sha2_64t i384[80] = { n_u64(cbbb9d5dc1059ed8), n_u64(629a292a367cd507),
618*4882a593Smuzhiyun n_u64(9159015a3070dd17), n_u64(152fecd8f70e5939),
619*4882a593Smuzhiyun n_u64(67332667ffc00b31), n_u64(8eb44a8768581511),
620*4882a593Smuzhiyun n_u64(db0c2e0d64f98fa7), n_u64(47b5481dbefa4fa4)
621*4882a593Smuzhiyun };
622*4882a593Smuzhiyun
sha384_begin(sha384_ctx ctx[1])623*4882a593Smuzhiyun void sha384_begin(sha384_ctx ctx[1])
624*4882a593Smuzhiyun {
625*4882a593Smuzhiyun ctx->count[0] = ctx->count[1] = 0;
626*4882a593Smuzhiyun memcpy(ctx->hash, i384, 8 * sizeof(sha2_64t));
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun
sha384_end(sha384_ctx ctx[1],unsigned char hval[])629*4882a593Smuzhiyun void sha384_end(sha384_ctx ctx[1], unsigned char hval[])
630*4882a593Smuzhiyun {
631*4882a593Smuzhiyun sha_end(ctx, hval, SHA384_DIGEST_SIZE);
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun
sha384(unsigned char hval[],const unsigned char data[],unsigned long len)634*4882a593Smuzhiyun void sha384(unsigned char hval[], const unsigned char data[],
635*4882a593Smuzhiyun unsigned long len)
636*4882a593Smuzhiyun {
637*4882a593Smuzhiyun sha384_ctx cx[1];
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun sha384_begin(cx);
640*4882a593Smuzhiyun sha384_hash(cx, data, len);
641*4882a593Smuzhiyun sha384_end(cx, hval);
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun #endif
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun #if defined(SHA_2) || defined(SHA_512)
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun /* SHA512 initialisation data */
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun const sha2_64t i512[80] = { n_u64(6a09e667f3bcc908), n_u64(bb67ae8584caa73b),
651*4882a593Smuzhiyun n_u64(3c6ef372fe94f82b), n_u64(a54ff53a5f1d36f1),
652*4882a593Smuzhiyun n_u64(510e527fade682d1), n_u64(9b05688c2b3e6c1f),
653*4882a593Smuzhiyun n_u64(1f83d9abfb41bd6b), n_u64(5be0cd19137e2179)
654*4882a593Smuzhiyun };
655*4882a593Smuzhiyun
sha512_begin(sha512_ctx ctx[1])656*4882a593Smuzhiyun void sha512_begin(sha512_ctx ctx[1])
657*4882a593Smuzhiyun {
658*4882a593Smuzhiyun ctx->count[0] = ctx->count[1] = 0;
659*4882a593Smuzhiyun memcpy(ctx->hash, i512, 8 * sizeof(sha2_64t));
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun
sha512_end(sha512_ctx ctx[1],unsigned char hval[])662*4882a593Smuzhiyun void sha512_end(sha512_ctx ctx[1], unsigned char hval[])
663*4882a593Smuzhiyun {
664*4882a593Smuzhiyun sha_end(ctx, hval, SHA512_DIGEST_SIZE);
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun
sha512(unsigned char hval[],const unsigned char data[],unsigned long len)667*4882a593Smuzhiyun void sha512(unsigned char hval[], const unsigned char data[],
668*4882a593Smuzhiyun unsigned long len)
669*4882a593Smuzhiyun {
670*4882a593Smuzhiyun sha512_ctx cx[1];
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun sha512_begin(cx);
673*4882a593Smuzhiyun sha512_hash(cx, data, len);
674*4882a593Smuzhiyun sha512_end(cx, hval);
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun #endif
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun #if defined(SHA_2)
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun #define CTX_256(x) ((x)->uu->ctx256)
682*4882a593Smuzhiyun #define CTX_384(x) ((x)->uu->ctx512)
683*4882a593Smuzhiyun #define CTX_512(x) ((x)->uu->ctx512)
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun /* SHA2 initialisation */
686*4882a593Smuzhiyun
sha2_begin(sha2_ctx ctx[1],unsigned long len)687*4882a593Smuzhiyun int sha2_begin(sha2_ctx ctx[1], unsigned long len)
688*4882a593Smuzhiyun {
689*4882a593Smuzhiyun unsigned long l = len;
690*4882a593Smuzhiyun switch (len) {
691*4882a593Smuzhiyun case 256:
692*4882a593Smuzhiyun l = len >> 3;
693*4882a593Smuzhiyun case 32:
694*4882a593Smuzhiyun CTX_256(ctx)->count[0] = CTX_256(ctx)->count[1] = 0;
695*4882a593Smuzhiyun memcpy(CTX_256(ctx)->hash, i256, 32);
696*4882a593Smuzhiyun break;
697*4882a593Smuzhiyun case 384:
698*4882a593Smuzhiyun l = len >> 3;
699*4882a593Smuzhiyun case 48:
700*4882a593Smuzhiyun CTX_384(ctx)->count[0] = CTX_384(ctx)->count[1] = 0;
701*4882a593Smuzhiyun memcpy(CTX_384(ctx)->hash, i384, 64);
702*4882a593Smuzhiyun break;
703*4882a593Smuzhiyun case 512:
704*4882a593Smuzhiyun l = len >> 3;
705*4882a593Smuzhiyun case 64:
706*4882a593Smuzhiyun CTX_512(ctx)->count[0] = CTX_512(ctx)->count[1] = 0;
707*4882a593Smuzhiyun memcpy(CTX_512(ctx)->hash, i512, 64);
708*4882a593Smuzhiyun break;
709*4882a593Smuzhiyun default:
710*4882a593Smuzhiyun return SHA2_BAD;
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun ctx->sha2_len = l;
714*4882a593Smuzhiyun return SHA2_GOOD;
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun
sha2_hash(sha2_ctx ctx[1],const unsigned char data[],unsigned long len)717*4882a593Smuzhiyun void sha2_hash(sha2_ctx ctx[1], const unsigned char data[], unsigned long len)
718*4882a593Smuzhiyun {
719*4882a593Smuzhiyun switch (ctx->sha2_len) {
720*4882a593Smuzhiyun case 32:
721*4882a593Smuzhiyun sha256_hash(CTX_256(ctx), data, len);
722*4882a593Smuzhiyun return;
723*4882a593Smuzhiyun case 48:
724*4882a593Smuzhiyun sha384_hash(CTX_384(ctx), data, len);
725*4882a593Smuzhiyun return;
726*4882a593Smuzhiyun case 64:
727*4882a593Smuzhiyun sha512_hash(CTX_512(ctx), data, len);
728*4882a593Smuzhiyun return;
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun
sha2_end(sha2_ctx ctx[1],unsigned char hval[])732*4882a593Smuzhiyun void sha2_end(sha2_ctx ctx[1], unsigned char hval[])
733*4882a593Smuzhiyun {
734*4882a593Smuzhiyun switch (ctx->sha2_len) {
735*4882a593Smuzhiyun case 32:
736*4882a593Smuzhiyun sha256_end(CTX_256(ctx), hval);
737*4882a593Smuzhiyun return;
738*4882a593Smuzhiyun case 48:
739*4882a593Smuzhiyun sha_end(CTX_384(ctx), hval, SHA384_DIGEST_SIZE);
740*4882a593Smuzhiyun return;
741*4882a593Smuzhiyun case 64:
742*4882a593Smuzhiyun sha_end(CTX_512(ctx), hval, SHA512_DIGEST_SIZE);
743*4882a593Smuzhiyun return;
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun }
746*4882a593Smuzhiyun
sha2(unsigned char hval[],unsigned long size,const unsigned char data[],unsigned long len)747*4882a593Smuzhiyun int sha2(unsigned char hval[], unsigned long size, const unsigned char data[],
748*4882a593Smuzhiyun unsigned long len)
749*4882a593Smuzhiyun {
750*4882a593Smuzhiyun sha2_ctx cx[1];
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun if (sha2_begin(cx, size) == SHA2_GOOD) {
753*4882a593Smuzhiyun sha2_hash(cx, data, len);
754*4882a593Smuzhiyun sha2_end(cx, hval);
755*4882a593Smuzhiyun return SHA2_GOOD;
756*4882a593Smuzhiyun } else
757*4882a593Smuzhiyun return SHA2_BAD;
758*4882a593Smuzhiyun }
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun #endif
761