xref: /rk3399_rockchip-uboot/tools/rockchip/sha2.c (revision 23ba6841ccdaeb51290dc49d4e32f175bd3baa34)
1*23ba6841SJoseph Chen /*
2*23ba6841SJoseph Chen  ---------------------------------------------------------------------------
3*23ba6841SJoseph Chen  Copyright (c) 2002, Dr Brian Gladman <brg@gladman.me.uk>, Worcester, UK.
4*23ba6841SJoseph Chen  All rights reserved.
5*23ba6841SJoseph Chen 
6*23ba6841SJoseph Chen  LICENSE TERMS
7*23ba6841SJoseph Chen 
8*23ba6841SJoseph Chen  The free distribution and use of this software in both source and binary
9*23ba6841SJoseph Chen  form is allowed (with or without changes) provided that:
10*23ba6841SJoseph Chen 
11*23ba6841SJoseph Chen    1. distributions of this source code include the above copyright
12*23ba6841SJoseph Chen       notice, this list of conditions and the following disclaimer;
13*23ba6841SJoseph Chen 
14*23ba6841SJoseph Chen    2. distributions in binary form include the above copyright
15*23ba6841SJoseph Chen       notice, this list of conditions and the following disclaimer
16*23ba6841SJoseph Chen       in the documentation and/or other associated materials;
17*23ba6841SJoseph Chen 
18*23ba6841SJoseph Chen    3. the copyright holder's name is not used to endorse products
19*23ba6841SJoseph Chen       built using this software without specific written permission.
20*23ba6841SJoseph Chen 
21*23ba6841SJoseph Chen  ALTERNATIVELY, provided that this notice is retained in full, this product
22*23ba6841SJoseph Chen  may be distributed under the terms of the GNU General Public License (GPL),
23*23ba6841SJoseph Chen  in which case the provisions of the GPL apply INSTEAD OF those given above.
24*23ba6841SJoseph Chen 
25*23ba6841SJoseph Chen  DISCLAIMER
26*23ba6841SJoseph Chen 
27*23ba6841SJoseph Chen  This software is provided 'as is' with no explicit or implied warranties
28*23ba6841SJoseph Chen  in respect of its properties, including, but not limited to, correctness
29*23ba6841SJoseph Chen  and/or fitness for purpose.
30*23ba6841SJoseph Chen  ---------------------------------------------------------------------------
31*23ba6841SJoseph Chen  Issue Date: 30/11/2002
32*23ba6841SJoseph Chen 
33*23ba6841SJoseph Chen  This is a byte oriented version of SHA2 that operates on arrays of bytes
34*23ba6841SJoseph Chen  stored in memory. This code implements sha256, sha384 and sha512 but the
35*23ba6841SJoseph Chen  latter two functions rely on efficient 64-bit integer operations that
36*23ba6841SJoseph Chen  may not be very efficient on 32-bit machines
37*23ba6841SJoseph Chen 
38*23ba6841SJoseph Chen  The sha256 functions use a type 'sha256_ctx' to hold details of the
39*23ba6841SJoseph Chen  current hash state and uses the following three calls:
40*23ba6841SJoseph Chen 
41*23ba6841SJoseph Chen        void sha256_begin(sha256_ctx ctx[1])
42*23ba6841SJoseph Chen        void sha256_hash(sha256_ctx ctx[1], const unsigned char data[],
43*23ba6841SJoseph Chen                             unsigned long len)
44*23ba6841SJoseph Chen        void sha256_end(sha256_ctx ctx[1], unsigned char hval[])
45*23ba6841SJoseph Chen 
46*23ba6841SJoseph Chen  The first subroutine initialises a hash computation by setting up the
47*23ba6841SJoseph Chen  context in the sha256_ctx context. The second subroutine hashes 8-bit
48*23ba6841SJoseph Chen  bytes from array data[] into the hash state withinh sha256_ctx context,
49*23ba6841SJoseph Chen  the number of bytes to be hashed being given by the the unsigned long
50*23ba6841SJoseph Chen  integer len.  The third subroutine completes the hash calculation and
51*23ba6841SJoseph Chen  places the resulting digest value in the array of 8-bit bytes hval[].
52*23ba6841SJoseph Chen 
53*23ba6841SJoseph Chen  The sha384 and sha512 functions are similar and use the interfaces:
54*23ba6841SJoseph Chen 
55*23ba6841SJoseph Chen        void sha384_begin(sha384_ctx ctx[1]);
56*23ba6841SJoseph Chen        void sha384_hash(sha384_ctx ctx[1], const unsigned char data[],
57*23ba6841SJoseph Chen                             unsigned long len);
58*23ba6841SJoseph Chen        void sha384_end(sha384_ctx ctx[1], unsigned char hval[]);
59*23ba6841SJoseph Chen 
60*23ba6841SJoseph Chen        void sha512_begin(sha512_ctx ctx[1]);
61*23ba6841SJoseph Chen        void sha512_hash(sha512_ctx ctx[1], const unsigned char data[],
62*23ba6841SJoseph Chen                             unsigned long len);
63*23ba6841SJoseph Chen        void sha512_end(sha512_ctx ctx[1], unsigned char hval[]);
64*23ba6841SJoseph Chen 
65*23ba6841SJoseph Chen  In addition there is a function sha2 that can be used to call all these
66*23ba6841SJoseph Chen  functions using a call with a hash length parameter as follows:
67*23ba6841SJoseph Chen 
68*23ba6841SJoseph Chen        int sha2_begin(sha2_ctx ctx[1], unsigned long len);
69*23ba6841SJoseph Chen        void sha2_hash(sha2_ctx ctx[1], const unsigned char data[],
70*23ba6841SJoseph Chen                             unsigned long len);
71*23ba6841SJoseph Chen        void sha2_end(sha2_ctx ctx[1], unsigned char hval[]);
72*23ba6841SJoseph Chen 
73*23ba6841SJoseph Chen  My thanks to Erik Andersen <andersen@codepoet.org> for testing this code
74*23ba6841SJoseph Chen  on big-endian systems and for his assistance with corrections
75*23ba6841SJoseph Chen */
76*23ba6841SJoseph Chen 
77*23ba6841SJoseph Chen /* define the hash functions that you need          */
78*23ba6841SJoseph Chen 
79*23ba6841SJoseph Chen #define SHA_2 /* for dynamic hash length  */
80*23ba6841SJoseph Chen #define SHA_256
81*23ba6841SJoseph Chen #define SHA_384
82*23ba6841SJoseph Chen #define SHA_512
83*23ba6841SJoseph Chen 
84*23ba6841SJoseph Chen #ifdef USE_HOSTCC
85*23ba6841SJoseph Chen #include <string.h> /* for memcpy() etc.        */
86*23ba6841SJoseph Chen #include <stdlib.h> /* for _lrotr with VC++     */
87*23ba6841SJoseph Chen #endif
88*23ba6841SJoseph Chen 
89*23ba6841SJoseph Chen #include "sha2.h"
90*23ba6841SJoseph Chen 
91*23ba6841SJoseph Chen /* rockchip crypto byte order */
92*23ba6841SJoseph Chen #define PLATFORM_BYTE_ORDER SHA_BIG_ENDIAN
93*23ba6841SJoseph Chen 
94*23ba6841SJoseph Chen /*  1. PLATFORM SPECIFIC INCLUDES */
95*23ba6841SJoseph Chen 
96*23ba6841SJoseph Chen // #if defined(__GNU_LIBRARY__)
97*23ba6841SJoseph Chen // #  include <byteswap.h>
98*23ba6841SJoseph Chen // #  include <endian.h>
99*23ba6841SJoseph Chen // #elif defined(__CRYPTLIB__)
100*23ba6841SJoseph Chen // #  if defined( INC_ALL )
101*23ba6841SJoseph Chen // #    include "crypt.h"
102*23ba6841SJoseph Chen // #  elif defined( INC_CHILD )
103*23ba6841SJoseph Chen // #    include "../crypt.h"
104*23ba6841SJoseph Chen // #  else
105*23ba6841SJoseph Chen // #    include "crypt.h"
106*23ba6841SJoseph Chen // #  endif
107*23ba6841SJoseph Chen // #  if defined(DATA_LITTLEENDIAN)
108*23ba6841SJoseph Chen // #    define PLATFORM_BYTE_ORDER SHA_LITTLE_ENDIAN
109*23ba6841SJoseph Chen // #  else
110*23ba6841SJoseph Chen // #    define PLATFORM_BYTE_ORDER SHA_BIG_ENDIAN
111*23ba6841SJoseph Chen // #  endif
112*23ba6841SJoseph Chen // #if defined(_MSC_VER)
113*23ba6841SJoseph Chen // #  include <stdlib.h>
114*23ba6841SJoseph Chen // #elif !defined(WIN32)
115*23ba6841SJoseph Chen // #  include <stdlib.h>
116*23ba6841SJoseph Chen // #  if !defined (_ENDIAN_H)
117*23ba6841SJoseph Chen // #    include <sys/param.h>
118*23ba6841SJoseph Chen // #  else
119*23ba6841SJoseph Chen // #    include _ENDIAN_H
120*23ba6841SJoseph Chen // #  endif
121*23ba6841SJoseph Chen // #endif
122*23ba6841SJoseph Chen 
123*23ba6841SJoseph Chen /*  2. BYTE ORDER IN 32-BIT WORDS
124*23ba6841SJoseph Chen 
125*23ba6841SJoseph Chen     To obtain the highest speed on processors with 32-bit words, this code
126*23ba6841SJoseph Chen     needs to determine the order in which bytes are packed into such words.
127*23ba6841SJoseph Chen     The following block of code is an attempt to capture the most obvious
128*23ba6841SJoseph Chen     ways in which various environments specify their endian definitions.
129*23ba6841SJoseph Chen     It may well fail, in which case the definitions will need to be set by
130*23ba6841SJoseph Chen     editing at the points marked **** EDIT HERE IF NECESSARY **** below.
131*23ba6841SJoseph Chen */
132*23ba6841SJoseph Chen #define SHA_LITTLE_ENDIAN 1234 /* byte 0 is least significant (i386) */
133*23ba6841SJoseph Chen #define SHA_BIG_ENDIAN 4321    /* byte 0 is most significant (mc68k) */
134*23ba6841SJoseph Chen 
135*23ba6841SJoseph Chen #if !defined(PLATFORM_BYTE_ORDER)
136*23ba6841SJoseph Chen #if defined(LITTLE_ENDIAN) || defined(BIG_ENDIAN)
137*23ba6841SJoseph Chen #if defined(LITTLE_ENDIAN) && defined(BIG_ENDIAN)
138*23ba6841SJoseph Chen #if defined(BYTE_ORDER)
139*23ba6841SJoseph Chen #if (BYTE_ORDER == LITTLE_ENDIAN)
140*23ba6841SJoseph Chen #define PLATFORM_BYTE_ORDER SHA_LITTLE_ENDIAN
141*23ba6841SJoseph Chen #elif(BYTE_ORDER == BIG_ENDIAN)
142*23ba6841SJoseph Chen #define PLATFORM_BYTE_ORDER SHA_BIG_ENDIAN
143*23ba6841SJoseph Chen #endif
144*23ba6841SJoseph Chen #endif
145*23ba6841SJoseph Chen #elif defined(LITTLE_ENDIAN) && !defined(BIG_ENDIAN)
146*23ba6841SJoseph Chen #define PLATFORM_BYTE_ORDER SHA_LITTLE_ENDIAN
147*23ba6841SJoseph Chen #elif !defined(LITTLE_ENDIAN) && defined(BIG_ENDIAN)
148*23ba6841SJoseph Chen #define PLATFORM_BYTE_ORDER SHA_BIG_ENDIAN
149*23ba6841SJoseph Chen #endif
150*23ba6841SJoseph Chen #elif defined(_LITTLE_ENDIAN) || defined(_BIG_ENDIAN)
151*23ba6841SJoseph Chen #if defined(_LITTLE_ENDIAN) && defined(_BIG_ENDIAN)
152*23ba6841SJoseph Chen #if defined(_BYTE_ORDER)
153*23ba6841SJoseph Chen #if (_BYTE_ORDER == _LITTLE_ENDIAN)
154*23ba6841SJoseph Chen #define PLATFORM_BYTE_ORDER SHA_LITTLE_ENDIAN
155*23ba6841SJoseph Chen #elif(_BYTE_ORDER == _BIG_ENDIAN)
156*23ba6841SJoseph Chen #define PLATFORM_BYTE_ORDER SHA_BIG_ENDIAN
157*23ba6841SJoseph Chen #endif
158*23ba6841SJoseph Chen #endif
159*23ba6841SJoseph Chen #elif defined(_LITTLE_ENDIAN) && !defined(_BIG_ENDIAN)
160*23ba6841SJoseph Chen #define PLATFORM_BYTE_ORDER SHA_LITTLE_ENDIAN
161*23ba6841SJoseph Chen #elif !defined(_LITTLE_ENDIAN) && defined(_BIG_ENDIAN)
162*23ba6841SJoseph Chen #define PLATFORM_BYTE_ORDER SHA_BIG_ENDIAN
163*23ba6841SJoseph Chen #endif
164*23ba6841SJoseph Chen #elif 0 /* **** EDIT HERE IF NECESSARY **** */
165*23ba6841SJoseph Chen #define PLATFORM_BYTE_ORDER SHA_LITTLE_ENDIAN
166*23ba6841SJoseph Chen #elif 0 /* **** EDIT HERE IF NECESSARY **** */
167*23ba6841SJoseph Chen #define PLATFORM_BYTE_ORDER SHA_BIG_ENDIAN
168*23ba6841SJoseph Chen #elif(('1234' >> 24) == '1')
169*23ba6841SJoseph Chen #define PLATFORM_BYTE_ORDER SHA_LITTLE_ENDIAN
170*23ba6841SJoseph Chen #elif(('4321' >> 24) == '1')
171*23ba6841SJoseph Chen #define PLATFORM_BYTE_ORDER SHA_BIG_ENDIAN
172*23ba6841SJoseph Chen #endif
173*23ba6841SJoseph Chen #endif
174*23ba6841SJoseph Chen 
175*23ba6841SJoseph Chen #if !defined(PLATFORM_BYTE_ORDER)
176*23ba6841SJoseph Chen #error Please set undetermined byte order (lines 159 or 161 of sha2.c).
177*23ba6841SJoseph Chen #endif
178*23ba6841SJoseph Chen 
179*23ba6841SJoseph Chen #ifdef _MSC_VER
180*23ba6841SJoseph Chen #pragma intrinsic(memcpy)
181*23ba6841SJoseph Chen #endif
182*23ba6841SJoseph Chen 
183*23ba6841SJoseph Chen #define rotr32(x, n) (((x) >> n) | ((x) << (32 - n)))
184*23ba6841SJoseph Chen 
185*23ba6841SJoseph Chen #if !defined(bswap_32)
186*23ba6841SJoseph Chen #define bswap_32(x)                                                            \
187*23ba6841SJoseph Chen   ((rotr32((x), 24) & 0x00ff00ff) | (rotr32((x), 8) & 0xff00ff00))
188*23ba6841SJoseph Chen #endif
189*23ba6841SJoseph Chen 
190*23ba6841SJoseph Chen #if (PLATFORM_BYTE_ORDER == SHA_LITTLE_ENDIAN)
191*23ba6841SJoseph Chen #define SWAP_BYTES
192*23ba6841SJoseph Chen #else
193*23ba6841SJoseph Chen #undef SWAP_BYTES
194*23ba6841SJoseph Chen #endif
195*23ba6841SJoseph Chen 
196*23ba6841SJoseph Chen #if defined(SHA_2) || defined(SHA_256)
197*23ba6841SJoseph Chen 
198*23ba6841SJoseph Chen #define SHA256_MASK (SHA256_BLOCK_SIZE - 1)
199*23ba6841SJoseph Chen 
200*23ba6841SJoseph Chen #if defined(SWAP_BYTES)
201*23ba6841SJoseph Chen #define bsw_32(p, n)                                                           \
202*23ba6841SJoseph Chen   {                                                                            \
203*23ba6841SJoseph Chen     int _i = (n);                                                              \
204*23ba6841SJoseph Chen     while (_i--)                                                               \
205*23ba6841SJoseph Chen       p[_i] = bswap_32(p[_i]);                                                 \
206*23ba6841SJoseph Chen   }
207*23ba6841SJoseph Chen #else
208*23ba6841SJoseph Chen #define bsw_32(p, n)
209*23ba6841SJoseph Chen #endif
210*23ba6841SJoseph Chen 
211*23ba6841SJoseph Chen /* SHA256 mixing function definitions   */
212*23ba6841SJoseph Chen 
213*23ba6841SJoseph Chen #define ch(x, y, z) (((x) & (y)) ^ (~(x) & (z)))
214*23ba6841SJoseph Chen #define maj(x, y, z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
215*23ba6841SJoseph Chen 
216*23ba6841SJoseph Chen #define s256_0(x) (rotr32((x), 2) ^ rotr32((x), 13) ^ rotr32((x), 22))
217*23ba6841SJoseph Chen #define s256_1(x) (rotr32((x), 6) ^ rotr32((x), 11) ^ rotr32((x), 25))
218*23ba6841SJoseph Chen #define g256_0(x) (rotr32((x), 7) ^ rotr32((x), 18) ^ ((x) >> 3))
219*23ba6841SJoseph Chen #define g256_1(x) (rotr32((x), 17) ^ rotr32((x), 19) ^ ((x) >> 10))
220*23ba6841SJoseph Chen 
221*23ba6841SJoseph Chen /* rotated SHA256 round definition. Rather than swapping variables as in    */
222*23ba6841SJoseph Chen /* FIPS-180, different variables are 'rotated' on each round, returning     */
223*23ba6841SJoseph Chen /* to their starting positions every eight rounds                           */
224*23ba6841SJoseph Chen 
225*23ba6841SJoseph Chen #define h2(i)                                                                  \
226*23ba6841SJoseph Chen   ctx->wbuf[i & 15] +=                                                         \
227*23ba6841SJoseph Chen       g256_1(ctx->wbuf[(i + 14) & 15]) + ctx->wbuf[(i + 9) & 15] +             \
228*23ba6841SJoseph Chen       g256_0(ctx->wbuf[(i + 1) & 15])
229*23ba6841SJoseph Chen 
230*23ba6841SJoseph Chen #define h2_cycle(i, j)                                                         \
231*23ba6841SJoseph Chen   v[(7 - i) & 7] +=                                                            \
232*23ba6841SJoseph Chen       (j ? h2(i) : ctx->wbuf[i & 15]) + k256[i + j] + s256_1(v[(4 - i) & 7]) + \
233*23ba6841SJoseph Chen       ch(v[(4 - i) & 7], v[(5 - i) & 7], v[(6 - i) & 7]);                      \
234*23ba6841SJoseph Chen   v[(3 - i) & 7] += v[(7 - i) & 7];                                            \
235*23ba6841SJoseph Chen   v[(7 - i) & 7] += s256_0(v[(0 - i) & 7]) +                                   \
236*23ba6841SJoseph Chen                     maj(v[(0 - i) & 7], v[(1 - i) & 7], v[(2 - i) & 7])
237*23ba6841SJoseph Chen 
238*23ba6841SJoseph Chen /* SHA256 mixing data   */
239*23ba6841SJoseph Chen 
240*23ba6841SJoseph Chen const sha2_32t k256[64] = {
241*23ba6841SJoseph Chen 	n_u32(428a2f98), n_u32(71374491), n_u32(b5c0fbcf), n_u32(e9b5dba5),
242*23ba6841SJoseph Chen 	n_u32(3956c25b), n_u32(59f111f1), n_u32(923f82a4), n_u32(ab1c5ed5),
243*23ba6841SJoseph Chen 	n_u32(d807aa98), n_u32(12835b01), n_u32(243185be), n_u32(550c7dc3),
244*23ba6841SJoseph Chen 	n_u32(72be5d74), n_u32(80deb1fe), n_u32(9bdc06a7), n_u32(c19bf174),
245*23ba6841SJoseph Chen 	n_u32(e49b69c1), n_u32(efbe4786), n_u32(0fc19dc6), n_u32(240ca1cc),
246*23ba6841SJoseph Chen 	n_u32(2de92c6f), n_u32(4a7484aa), n_u32(5cb0a9dc), n_u32(76f988da),
247*23ba6841SJoseph Chen 	n_u32(983e5152), n_u32(a831c66d), n_u32(b00327c8), n_u32(bf597fc7),
248*23ba6841SJoseph Chen 	n_u32(c6e00bf3), n_u32(d5a79147), n_u32(06ca6351), n_u32(14292967),
249*23ba6841SJoseph Chen 	n_u32(27b70a85), n_u32(2e1b2138), n_u32(4d2c6dfc), n_u32(53380d13),
250*23ba6841SJoseph Chen 	n_u32(650a7354), n_u32(766a0abb), n_u32(81c2c92e), n_u32(92722c85),
251*23ba6841SJoseph Chen 	n_u32(a2bfe8a1), n_u32(a81a664b), n_u32(c24b8b70), n_u32(c76c51a3),
252*23ba6841SJoseph Chen 	n_u32(d192e819), n_u32(d6990624), n_u32(f40e3585), n_u32(106aa070),
253*23ba6841SJoseph Chen 	n_u32(19a4c116), n_u32(1e376c08), n_u32(2748774c), n_u32(34b0bcb5),
254*23ba6841SJoseph Chen 	n_u32(391c0cb3), n_u32(4ed8aa4a), n_u32(5b9cca4f), n_u32(682e6ff3),
255*23ba6841SJoseph Chen 	n_u32(748f82ee), n_u32(78a5636f), n_u32(84c87814), n_u32(8cc70208),
256*23ba6841SJoseph Chen 	n_u32(90befffa), n_u32(a4506ceb), n_u32(bef9a3f7), n_u32(c67178f2),
257*23ba6841SJoseph Chen };
258*23ba6841SJoseph Chen 
259*23ba6841SJoseph Chen /* SHA256 initialisation data */
260*23ba6841SJoseph Chen 
261*23ba6841SJoseph Chen const sha2_32t i256[8] = { n_u32(6a09e667), n_u32(bb67ae85), n_u32(3c6ef372),
262*23ba6841SJoseph Chen                            n_u32(a54ff53a), n_u32(510e527f), n_u32(9b05688c),
263*23ba6841SJoseph Chen                            n_u32(1f83d9ab), n_u32(5be0cd19)
264*23ba6841SJoseph Chen                          };
265*23ba6841SJoseph Chen 
sha256_begin(sha256_ctx ctx[1])266*23ba6841SJoseph Chen void sha256_begin(sha256_ctx ctx[1])
267*23ba6841SJoseph Chen {
268*23ba6841SJoseph Chen 	ctx->count[0] = ctx->count[1] = 0;
269*23ba6841SJoseph Chen 	memcpy(ctx->hash, i256, 8 * sizeof(sha2_32t));
270*23ba6841SJoseph Chen }
271*23ba6841SJoseph Chen 
272*23ba6841SJoseph Chen /* Compile 64 bytes of hash data into SHA256 digest value   */
273*23ba6841SJoseph Chen /* NOTE: this routine assumes that the byte order in the    */
274*23ba6841SJoseph Chen /* ctx->wbuf[] at this point is in such an order that low   */
275*23ba6841SJoseph Chen /* address bytes in the ORIGINAL byte stream placed in this */
276*23ba6841SJoseph Chen /* buffer will now go to the high end of words on BOTH big  */
277*23ba6841SJoseph Chen /* and little endian systems                                */
278*23ba6841SJoseph Chen 
sha256_compile(sha256_ctx ctx[1])279*23ba6841SJoseph Chen void sha256_compile(sha256_ctx ctx[1])
280*23ba6841SJoseph Chen {
281*23ba6841SJoseph Chen 	sha2_32t v[8], j;
282*23ba6841SJoseph Chen 
283*23ba6841SJoseph Chen 	memcpy(v, ctx->hash, 8 * sizeof(sha2_32t));
284*23ba6841SJoseph Chen 
285*23ba6841SJoseph Chen 	for (j = 0; j < 64; j += 16) {
286*23ba6841SJoseph Chen 		h2_cycle(0, j);
287*23ba6841SJoseph Chen 		h2_cycle(1, j);
288*23ba6841SJoseph Chen 		h2_cycle(2, j);
289*23ba6841SJoseph Chen 		h2_cycle(3, j);
290*23ba6841SJoseph Chen 		h2_cycle(4, j);
291*23ba6841SJoseph Chen 		h2_cycle(5, j);
292*23ba6841SJoseph Chen 		h2_cycle(6, j);
293*23ba6841SJoseph Chen 		h2_cycle(7, j);
294*23ba6841SJoseph Chen 		h2_cycle(8, j);
295*23ba6841SJoseph Chen 		h2_cycle(9, j);
296*23ba6841SJoseph Chen 		h2_cycle(10, j);
297*23ba6841SJoseph Chen 		h2_cycle(11, j);
298*23ba6841SJoseph Chen 		h2_cycle(12, j);
299*23ba6841SJoseph Chen 		h2_cycle(13, j);
300*23ba6841SJoseph Chen 		h2_cycle(14, j);
301*23ba6841SJoseph Chen 		h2_cycle(15, j);
302*23ba6841SJoseph Chen 	}
303*23ba6841SJoseph Chen 
304*23ba6841SJoseph Chen 	ctx->hash[0] += v[0];
305*23ba6841SJoseph Chen 	ctx->hash[1] += v[1];
306*23ba6841SJoseph Chen 	ctx->hash[2] += v[2];
307*23ba6841SJoseph Chen 	ctx->hash[3] += v[3];
308*23ba6841SJoseph Chen 	ctx->hash[4] += v[4];
309*23ba6841SJoseph Chen 	ctx->hash[5] += v[5];
310*23ba6841SJoseph Chen 	ctx->hash[6] += v[6];
311*23ba6841SJoseph Chen 	ctx->hash[7] += v[7];
312*23ba6841SJoseph Chen }
313*23ba6841SJoseph Chen 
314*23ba6841SJoseph Chen /* SHA256 hash data in an array of bytes into hash buffer   */
315*23ba6841SJoseph Chen /* and call the hash_compile function as required.          */
316*23ba6841SJoseph Chen 
sha256_hash(sha256_ctx ctx[1],const unsigned char data[],unsigned long len)317*23ba6841SJoseph Chen void sha256_hash(sha256_ctx ctx[1], const unsigned char data[],
318*23ba6841SJoseph Chen                  unsigned long len)
319*23ba6841SJoseph Chen {
320*23ba6841SJoseph Chen 	sha2_32t pos = (sha2_32t)(ctx->count[0] & SHA256_MASK),
321*23ba6841SJoseph Chen 	         space = SHA256_BLOCK_SIZE - pos;
322*23ba6841SJoseph Chen 	const unsigned char *sp = data;
323*23ba6841SJoseph Chen 
324*23ba6841SJoseph Chen 	if ((ctx->count[0] += len) < len)
325*23ba6841SJoseph Chen 		++(ctx->count[1]);
326*23ba6841SJoseph Chen 
327*23ba6841SJoseph Chen 	while (len >= space) { /* tranfer whole blocks while possible  */
328*23ba6841SJoseph Chen 		memcpy(((unsigned char *)ctx->wbuf) + pos, sp, space);
329*23ba6841SJoseph Chen 		sp += space;
330*23ba6841SJoseph Chen 		len -= space;
331*23ba6841SJoseph Chen 		space = SHA256_BLOCK_SIZE;
332*23ba6841SJoseph Chen 		pos = 0;
333*23ba6841SJoseph Chen 		bsw_32(ctx->wbuf, SHA256_BLOCK_SIZE >> 2);
334*23ba6841SJoseph Chen 		sha256_compile(ctx);
335*23ba6841SJoseph Chen 	}
336*23ba6841SJoseph Chen 
337*23ba6841SJoseph Chen 	memcpy(((unsigned char *)ctx->wbuf) + pos, sp, len);
338*23ba6841SJoseph Chen }
339*23ba6841SJoseph Chen 
340*23ba6841SJoseph Chen /* SHA256 Final padding and digest calculation  */
341*23ba6841SJoseph Chen 
342*23ba6841SJoseph Chen static sha2_32t m1[4] = { n_u32(00000000), n_u32(ff000000), n_u32(ffff0000),
343*23ba6841SJoseph Chen                           n_u32(ffffff00)
344*23ba6841SJoseph Chen                         };
345*23ba6841SJoseph Chen 
346*23ba6841SJoseph Chen static sha2_32t b1[4] = { n_u32(80000000), n_u32(00800000), n_u32(00008000),
347*23ba6841SJoseph Chen                           n_u32(00000080)
348*23ba6841SJoseph Chen                         };
349*23ba6841SJoseph Chen 
sha256_end(sha256_ctx ctx[1],unsigned char hval[])350*23ba6841SJoseph Chen void sha256_end(sha256_ctx ctx[1], unsigned char hval[])
351*23ba6841SJoseph Chen {
352*23ba6841SJoseph Chen 	sha2_32t i = (sha2_32t)(ctx->count[0] & SHA256_MASK);
353*23ba6841SJoseph Chen 
354*23ba6841SJoseph Chen 	bsw_32(ctx->wbuf, (i + 3) >> 2)
355*23ba6841SJoseph Chen 	/* bytes in the buffer are now in an order in which references  */
356*23ba6841SJoseph Chen 	/* to 32-bit words will put bytes with lower addresses into the */
357*23ba6841SJoseph Chen 	/* top of 32 bit words on BOTH big and little endian machines   */
358*23ba6841SJoseph Chen 
359*23ba6841SJoseph Chen 	/* we now need to mask valid bytes and add the padding which is */
360*23ba6841SJoseph Chen 	/* a single 1 bit and as many zero bits as necessary.           */
361*23ba6841SJoseph Chen 	ctx->wbuf[i >> 2] = (ctx->wbuf[i >> 2] & m1[i & 3]) | b1[i & 3];
362*23ba6841SJoseph Chen 
363*23ba6841SJoseph Chen 	/* we need 9 or more empty positions, one for the padding byte  */
364*23ba6841SJoseph Chen 	/* (above) and eight for the length count.  If there is not     */
365*23ba6841SJoseph Chen 	/* enough space pad and empty the buffer                        */
366*23ba6841SJoseph Chen 	if (i > SHA256_BLOCK_SIZE - 9) {
367*23ba6841SJoseph Chen 		if (i < 60)
368*23ba6841SJoseph Chen 			ctx->wbuf[15] = 0;
369*23ba6841SJoseph Chen 		sha256_compile(ctx);
370*23ba6841SJoseph Chen 		i = 0;
371*23ba6841SJoseph Chen 	} else /* compute a word index for the empty buffer positions  */
372*23ba6841SJoseph Chen 		i = (i >> 2) + 1;
373*23ba6841SJoseph Chen 
374*23ba6841SJoseph Chen 	while (i < 14) /* and zero pad all but last two positions      */
375*23ba6841SJoseph Chen 		ctx->wbuf[i++] = 0;
376*23ba6841SJoseph Chen 
377*23ba6841SJoseph Chen 	/* the following 32-bit length fields are assembled in the      */
378*23ba6841SJoseph Chen 	/* wrong byte order on little endian machines but this is       */
379*23ba6841SJoseph Chen 	/* corrected later since they are only ever used as 32-bit      */
380*23ba6841SJoseph Chen 	/* word values.                                                 */
381*23ba6841SJoseph Chen 
382*23ba6841SJoseph Chen 	ctx->wbuf[14] = (ctx->count[1] << 3) | (ctx->count[0] >> 29);
383*23ba6841SJoseph Chen 	ctx->wbuf[15] = ctx->count[0] << 3;
384*23ba6841SJoseph Chen 
385*23ba6841SJoseph Chen 	sha256_compile(ctx);
386*23ba6841SJoseph Chen 
387*23ba6841SJoseph Chen 	/* extract the hash value as bytes in case the hash buffer is   */
388*23ba6841SJoseph Chen 	/* mislaigned for 32-bit words                                  */
389*23ba6841SJoseph Chen 	for (i = 0; i < SHA256_DIGEST_SIZE; ++i)
390*23ba6841SJoseph Chen 		hval[i] = (unsigned char)(ctx->hash[i >> 2] >> 8 * (~i & 3));
391*23ba6841SJoseph Chen }
392*23ba6841SJoseph Chen 
sha256(unsigned char hval[],const unsigned char data[],unsigned long len)393*23ba6841SJoseph Chen void sha256(unsigned char hval[], const unsigned char data[],
394*23ba6841SJoseph Chen             unsigned long len)
395*23ba6841SJoseph Chen {
396*23ba6841SJoseph Chen 	sha256_ctx cx[1];
397*23ba6841SJoseph Chen 
398*23ba6841SJoseph Chen 	sha256_begin(cx);
399*23ba6841SJoseph Chen 	sha256_hash(cx, data, len);
400*23ba6841SJoseph Chen 	sha256_end(cx, hval);
401*23ba6841SJoseph Chen }
402*23ba6841SJoseph Chen 
403*23ba6841SJoseph Chen #endif
404*23ba6841SJoseph Chen 
405*23ba6841SJoseph Chen #if defined(SHA_2) || defined(SHA_384) || defined(SHA_512)
406*23ba6841SJoseph Chen 
407*23ba6841SJoseph Chen #define SHA512_MASK (SHA512_BLOCK_SIZE - 1)
408*23ba6841SJoseph Chen 
409*23ba6841SJoseph Chen #define rotr64(x, n) (((x) >> n) | ((x) << (64 - n)))
410*23ba6841SJoseph Chen 
411*23ba6841SJoseph Chen #if !defined(bswap_64)
412*23ba6841SJoseph Chen #define bswap_64(x)                                                            \
413*23ba6841SJoseph Chen   (((sha2_64t)(bswap_32((sha2_32t)(x)))) << 32 |                               \
414*23ba6841SJoseph Chen    bswap_32((sha2_32t)((x) >> 32)))
415*23ba6841SJoseph Chen #endif
416*23ba6841SJoseph Chen 
417*23ba6841SJoseph Chen #if defined(SWAP_BYTES)
418*23ba6841SJoseph Chen #define bsw_64(p, n)                                                           \
419*23ba6841SJoseph Chen   {                                                                            \
420*23ba6841SJoseph Chen     int _i = (n);                                                              \
421*23ba6841SJoseph Chen     while (_i--)                                                               \
422*23ba6841SJoseph Chen       p[_i] = bswap_64(p[_i]);                                                 \
423*23ba6841SJoseph Chen   }
424*23ba6841SJoseph Chen #else
425*23ba6841SJoseph Chen #define bsw_64(p, n)
426*23ba6841SJoseph Chen #endif
427*23ba6841SJoseph Chen 
428*23ba6841SJoseph Chen /* SHA512 mixing function definitions   */
429*23ba6841SJoseph Chen 
430*23ba6841SJoseph Chen #define s512_0(x) (rotr64((x), 28) ^ rotr64((x), 34) ^ rotr64((x), 39))
431*23ba6841SJoseph Chen #define s512_1(x) (rotr64((x), 14) ^ rotr64((x), 18) ^ rotr64((x), 41))
432*23ba6841SJoseph Chen #define g512_0(x) (rotr64((x), 1) ^ rotr64((x), 8) ^ ((x) >> 7))
433*23ba6841SJoseph Chen #define g512_1(x) (rotr64((x), 19) ^ rotr64((x), 61) ^ ((x) >> 6))
434*23ba6841SJoseph Chen 
435*23ba6841SJoseph Chen /* rotated SHA512 round definition. Rather than swapping variables as in    */
436*23ba6841SJoseph Chen /* FIPS-180, different variables are 'rotated' on each round, returning     */
437*23ba6841SJoseph Chen /* to their starting positions every eight rounds                           */
438*23ba6841SJoseph Chen 
439*23ba6841SJoseph Chen #define h5(i)                                                                  \
440*23ba6841SJoseph Chen   ctx->wbuf[i & 15] +=                                                         \
441*23ba6841SJoseph Chen       g512_1(ctx->wbuf[(i + 14) & 15]) + ctx->wbuf[(i + 9) & 15] +             \
442*23ba6841SJoseph Chen       g512_0(ctx->wbuf[(i + 1) & 15])
443*23ba6841SJoseph Chen 
444*23ba6841SJoseph Chen #define h5_cycle(i, j)                                                         \
445*23ba6841SJoseph Chen   v[(7 - i) & 7] +=                                                            \
446*23ba6841SJoseph Chen       (j ? h5(i) : ctx->wbuf[i & 15]) + k512[i + j] + s512_1(v[(4 - i) & 7]) + \
447*23ba6841SJoseph Chen       ch(v[(4 - i) & 7], v[(5 - i) & 7], v[(6 - i) & 7]);                      \
448*23ba6841SJoseph Chen   v[(3 - i) & 7] += v[(7 - i) & 7];                                            \
449*23ba6841SJoseph Chen   v[(7 - i) & 7] += s512_0(v[(0 - i) & 7]) +                                   \
450*23ba6841SJoseph Chen                     maj(v[(0 - i) & 7], v[(1 - i) & 7], v[(2 - i) & 7])
451*23ba6841SJoseph Chen 
452*23ba6841SJoseph Chen /* SHA384/SHA512 mixing data    */
453*23ba6841SJoseph Chen 
454*23ba6841SJoseph Chen const sha2_64t k512[80] = {
455*23ba6841SJoseph Chen 	n_u64(428a2f98d728ae22), n_u64(7137449123ef65cd), n_u64(b5c0fbcfec4d3b2f),
456*23ba6841SJoseph Chen 	n_u64(e9b5dba58189dbbc), n_u64(3956c25bf348b538), n_u64(59f111f1b605d019),
457*23ba6841SJoseph Chen 	n_u64(923f82a4af194f9b), n_u64(ab1c5ed5da6d8118), n_u64(d807aa98a3030242),
458*23ba6841SJoseph Chen 	n_u64(12835b0145706fbe), n_u64(243185be4ee4b28c), n_u64(550c7dc3d5ffb4e2),
459*23ba6841SJoseph Chen 	n_u64(72be5d74f27b896f), n_u64(80deb1fe3b1696b1), n_u64(9bdc06a725c71235),
460*23ba6841SJoseph Chen 	n_u64(c19bf174cf692694), n_u64(e49b69c19ef14ad2), n_u64(efbe4786384f25e3),
461*23ba6841SJoseph Chen 	n_u64(0fc19dc68b8cd5b5), n_u64(240ca1cc77ac9c65), n_u64(2de92c6f592b0275),
462*23ba6841SJoseph Chen 	n_u64(4a7484aa6ea6e483), n_u64(5cb0a9dcbd41fbd4), n_u64(76f988da831153b5),
463*23ba6841SJoseph Chen 	n_u64(983e5152ee66dfab), n_u64(a831c66d2db43210), n_u64(b00327c898fb213f),
464*23ba6841SJoseph Chen 	n_u64(bf597fc7beef0ee4), n_u64(c6e00bf33da88fc2), n_u64(d5a79147930aa725),
465*23ba6841SJoseph Chen 	n_u64(06ca6351e003826f), n_u64(142929670a0e6e70), n_u64(27b70a8546d22ffc),
466*23ba6841SJoseph Chen 	n_u64(2e1b21385c26c926), n_u64(4d2c6dfc5ac42aed), n_u64(53380d139d95b3df),
467*23ba6841SJoseph Chen 	n_u64(650a73548baf63de), n_u64(766a0abb3c77b2a8), n_u64(81c2c92e47edaee6),
468*23ba6841SJoseph Chen 	n_u64(92722c851482353b), n_u64(a2bfe8a14cf10364), n_u64(a81a664bbc423001),
469*23ba6841SJoseph Chen 	n_u64(c24b8b70d0f89791), n_u64(c76c51a30654be30), n_u64(d192e819d6ef5218),
470*23ba6841SJoseph Chen 	n_u64(d69906245565a910), n_u64(f40e35855771202a), n_u64(106aa07032bbd1b8),
471*23ba6841SJoseph Chen 	n_u64(19a4c116b8d2d0c8), n_u64(1e376c085141ab53), n_u64(2748774cdf8eeb99),
472*23ba6841SJoseph Chen 	n_u64(34b0bcb5e19b48a8), n_u64(391c0cb3c5c95a63), n_u64(4ed8aa4ae3418acb),
473*23ba6841SJoseph Chen 	n_u64(5b9cca4f7763e373), n_u64(682e6ff3d6b2b8a3), n_u64(748f82ee5defb2fc),
474*23ba6841SJoseph Chen 	n_u64(78a5636f43172f60), n_u64(84c87814a1f0ab72), n_u64(8cc702081a6439ec),
475*23ba6841SJoseph Chen 	n_u64(90befffa23631e28), n_u64(a4506cebde82bde9), n_u64(bef9a3f7b2c67915),
476*23ba6841SJoseph Chen 	n_u64(c67178f2e372532b), n_u64(ca273eceea26619c), n_u64(d186b8c721c0c207),
477*23ba6841SJoseph Chen 	n_u64(eada7dd6cde0eb1e), n_u64(f57d4f7fee6ed178), n_u64(06f067aa72176fba),
478*23ba6841SJoseph Chen 	n_u64(0a637dc5a2c898a6), n_u64(113f9804bef90dae), n_u64(1b710b35131c471b),
479*23ba6841SJoseph Chen 	n_u64(28db77f523047d84), n_u64(32caab7b40c72493), n_u64(3c9ebe0a15c9bebc),
480*23ba6841SJoseph Chen 	n_u64(431d67c49c100d4c), n_u64(4cc5d4becb3e42b6), n_u64(597f299cfc657e2a),
481*23ba6841SJoseph Chen 	n_u64(5fcb6fab3ad6faec), n_u64(6c44198c4a475817)
482*23ba6841SJoseph Chen };
483*23ba6841SJoseph Chen 
484*23ba6841SJoseph Chen /* Compile 64 bytes of hash data into SHA384/SHA512 digest value  */
485*23ba6841SJoseph Chen 
sha512_compile(sha512_ctx ctx[1])486*23ba6841SJoseph Chen void sha512_compile(sha512_ctx ctx[1])
487*23ba6841SJoseph Chen {
488*23ba6841SJoseph Chen 	sha2_64t v[8];
489*23ba6841SJoseph Chen 	sha2_32t j;
490*23ba6841SJoseph Chen 
491*23ba6841SJoseph Chen 	memcpy(v, ctx->hash, 8 * sizeof(sha2_64t));
492*23ba6841SJoseph Chen 
493*23ba6841SJoseph Chen 	for (j = 0; j < 80; j += 16) {
494*23ba6841SJoseph Chen 		h5_cycle(0, j);
495*23ba6841SJoseph Chen 		h5_cycle(1, j);
496*23ba6841SJoseph Chen 		h5_cycle(2, j);
497*23ba6841SJoseph Chen 		h5_cycle(3, j);
498*23ba6841SJoseph Chen 		h5_cycle(4, j);
499*23ba6841SJoseph Chen 		h5_cycle(5, j);
500*23ba6841SJoseph Chen 		h5_cycle(6, j);
501*23ba6841SJoseph Chen 		h5_cycle(7, j);
502*23ba6841SJoseph Chen 		h5_cycle(8, j);
503*23ba6841SJoseph Chen 		h5_cycle(9, j);
504*23ba6841SJoseph Chen 		h5_cycle(10, j);
505*23ba6841SJoseph Chen 		h5_cycle(11, j);
506*23ba6841SJoseph Chen 		h5_cycle(12, j);
507*23ba6841SJoseph Chen 		h5_cycle(13, j);
508*23ba6841SJoseph Chen 		h5_cycle(14, j);
509*23ba6841SJoseph Chen 		h5_cycle(15, j);
510*23ba6841SJoseph Chen 	}
511*23ba6841SJoseph Chen 
512*23ba6841SJoseph Chen 	ctx->hash[0] += v[0];
513*23ba6841SJoseph Chen 	ctx->hash[1] += v[1];
514*23ba6841SJoseph Chen 	ctx->hash[2] += v[2];
515*23ba6841SJoseph Chen 	ctx->hash[3] += v[3];
516*23ba6841SJoseph Chen 	ctx->hash[4] += v[4];
517*23ba6841SJoseph Chen 	ctx->hash[5] += v[5];
518*23ba6841SJoseph Chen 	ctx->hash[6] += v[6];
519*23ba6841SJoseph Chen 	ctx->hash[7] += v[7];
520*23ba6841SJoseph Chen }
521*23ba6841SJoseph Chen 
522*23ba6841SJoseph Chen /* Compile 128 bytes of hash data into SHA256 digest value  */
523*23ba6841SJoseph Chen /* NOTE: this routine assumes that the byte order in the    */
524*23ba6841SJoseph Chen /* ctx->wbuf[] at this point is in such an order that low   */
525*23ba6841SJoseph Chen /* address bytes in the ORIGINAL byte stream placed in this */
526*23ba6841SJoseph Chen /* buffer will now go to the high end of words on BOTH big  */
527*23ba6841SJoseph Chen /* and little endian systems                                */
528*23ba6841SJoseph Chen 
sha512_hash(sha512_ctx ctx[1],const unsigned char data[],unsigned long len)529*23ba6841SJoseph Chen void sha512_hash(sha512_ctx ctx[1], const unsigned char data[],
530*23ba6841SJoseph Chen                  unsigned long len)
531*23ba6841SJoseph Chen {
532*23ba6841SJoseph Chen 	sha2_32t pos = (sha2_32t)(ctx->count[0] & SHA512_MASK),
533*23ba6841SJoseph Chen 	         space = SHA512_BLOCK_SIZE - pos;
534*23ba6841SJoseph Chen 	const unsigned char *sp = data;
535*23ba6841SJoseph Chen 
536*23ba6841SJoseph Chen 	if ((ctx->count[0] += len) < len)
537*23ba6841SJoseph Chen 		++(ctx->count[1]);
538*23ba6841SJoseph Chen 
539*23ba6841SJoseph Chen 	while (len >= space) { /* tranfer whole blocks while possible  */
540*23ba6841SJoseph Chen 		memcpy(((unsigned char *)ctx->wbuf) + pos, sp, space);
541*23ba6841SJoseph Chen 		sp += space;
542*23ba6841SJoseph Chen 		len -= space;
543*23ba6841SJoseph Chen 		space = SHA512_BLOCK_SIZE;
544*23ba6841SJoseph Chen 		pos = 0;
545*23ba6841SJoseph Chen 		bsw_64(ctx->wbuf, SHA512_BLOCK_SIZE >> 3);
546*23ba6841SJoseph Chen 		sha512_compile(ctx);
547*23ba6841SJoseph Chen 	}
548*23ba6841SJoseph Chen 
549*23ba6841SJoseph Chen 	memcpy(((unsigned char *)ctx->wbuf) + pos, sp, len);
550*23ba6841SJoseph Chen }
551*23ba6841SJoseph Chen 
552*23ba6841SJoseph Chen /* SHA384/512 Final padding and digest calculation  */
553*23ba6841SJoseph Chen 
554*23ba6841SJoseph Chen static sha2_64t m2[8] = { n_u64(0000000000000000), n_u64(ff00000000000000),
555*23ba6841SJoseph Chen                           n_u64(ffff000000000000), n_u64(ffffff0000000000),
556*23ba6841SJoseph Chen                           n_u64(ffffffff00000000), n_u64(ffffffffff000000),
557*23ba6841SJoseph Chen                           n_u64(ffffffffffff0000), n_u64(ffffffffffffff00)
558*23ba6841SJoseph Chen                         };
559*23ba6841SJoseph Chen 
560*23ba6841SJoseph Chen static sha2_64t b2[8] = { n_u64(8000000000000000), n_u64(0080000000000000),
561*23ba6841SJoseph Chen                           n_u64(0000800000000000), n_u64(0000008000000000),
562*23ba6841SJoseph Chen                           n_u64(0000000080000000), n_u64(0000000000800000),
563*23ba6841SJoseph Chen                           n_u64(0000000000008000), n_u64(0000000000000080)
564*23ba6841SJoseph Chen                         };
565*23ba6841SJoseph Chen 
sha_end(sha512_ctx ctx[1],unsigned char hval[],const unsigned int hlen)566*23ba6841SJoseph Chen static void sha_end(sha512_ctx ctx[1], unsigned char hval[],
567*23ba6841SJoseph Chen                     const unsigned int hlen)
568*23ba6841SJoseph Chen {
569*23ba6841SJoseph Chen 	sha2_32t i = (sha2_32t)(ctx->count[0] & SHA512_MASK);
570*23ba6841SJoseph Chen 
571*23ba6841SJoseph Chen 	bsw_64(ctx->wbuf, (i + 7) >> 3);
572*23ba6841SJoseph Chen 
573*23ba6841SJoseph Chen 	/* bytes in the buffer are now in an order in which references  */
574*23ba6841SJoseph Chen 	/* to 64-bit words will put bytes with lower addresses into the */
575*23ba6841SJoseph Chen 	/* top of 64 bit words on BOTH big and little endian machines   */
576*23ba6841SJoseph Chen 
577*23ba6841SJoseph Chen 	/* we now need to mask valid bytes and add the padding which is */
578*23ba6841SJoseph Chen 	/* a single 1 bit and as many zero bits as necessary.           */
579*23ba6841SJoseph Chen 	ctx->wbuf[i >> 3] = (ctx->wbuf[i >> 3] & m2[i & 7]) | b2[i & 7];
580*23ba6841SJoseph Chen 
581*23ba6841SJoseph Chen 	/* we need 17 or more empty byte positions, one for the padding */
582*23ba6841SJoseph Chen 	/* byte (above) and sixteen for the length count.  If there is  */
583*23ba6841SJoseph Chen 	/* not enough space pad and empty the buffer                    */
584*23ba6841SJoseph Chen 	if (i > SHA512_BLOCK_SIZE - 17) {
585*23ba6841SJoseph Chen 		if (i < 120)
586*23ba6841SJoseph Chen 			ctx->wbuf[15] = 0;
587*23ba6841SJoseph Chen 		sha512_compile(ctx);
588*23ba6841SJoseph Chen 		i = 0;
589*23ba6841SJoseph Chen 	} else
590*23ba6841SJoseph Chen 		i = (i >> 3) + 1;
591*23ba6841SJoseph Chen 
592*23ba6841SJoseph Chen 	while (i < 14)
593*23ba6841SJoseph Chen 		ctx->wbuf[i++] = 0;
594*23ba6841SJoseph Chen 
595*23ba6841SJoseph Chen 	/* the following 64-bit length fields are assembled in the      */
596*23ba6841SJoseph Chen 	/* wrong byte order on little endian machines but this is       */
597*23ba6841SJoseph Chen 	/* corrected later since they are only ever used as 64-bit      */
598*23ba6841SJoseph Chen 	/* word values.                                                 */
599*23ba6841SJoseph Chen 
600*23ba6841SJoseph Chen 	ctx->wbuf[14] = (ctx->count[1] << 3) | (ctx->count[0] >> 61);
601*23ba6841SJoseph Chen 	ctx->wbuf[15] = ctx->count[0] << 3;
602*23ba6841SJoseph Chen 
603*23ba6841SJoseph Chen 	sha512_compile(ctx);
604*23ba6841SJoseph Chen 
605*23ba6841SJoseph Chen 	/* extract the hash value as bytes in case the hash buffer is   */
606*23ba6841SJoseph Chen 	/* misaligned for 32-bit words                                  */
607*23ba6841SJoseph Chen 	for (i = 0; i < hlen; ++i)
608*23ba6841SJoseph Chen 		hval[i] = (unsigned char)(ctx->hash[i >> 3] >> 8 * (~i & 7));
609*23ba6841SJoseph Chen }
610*23ba6841SJoseph Chen 
611*23ba6841SJoseph Chen #endif
612*23ba6841SJoseph Chen 
613*23ba6841SJoseph Chen #if defined(SHA_2) || defined(SHA_384)
614*23ba6841SJoseph Chen 
615*23ba6841SJoseph Chen /* SHA384 initialisation data   */
616*23ba6841SJoseph Chen 
617*23ba6841SJoseph Chen const sha2_64t i384[80] = { n_u64(cbbb9d5dc1059ed8), n_u64(629a292a367cd507),
618*23ba6841SJoseph Chen                             n_u64(9159015a3070dd17), n_u64(152fecd8f70e5939),
619*23ba6841SJoseph Chen                             n_u64(67332667ffc00b31), n_u64(8eb44a8768581511),
620*23ba6841SJoseph Chen                             n_u64(db0c2e0d64f98fa7), n_u64(47b5481dbefa4fa4)
621*23ba6841SJoseph Chen                           };
622*23ba6841SJoseph Chen 
sha384_begin(sha384_ctx ctx[1])623*23ba6841SJoseph Chen void sha384_begin(sha384_ctx ctx[1])
624*23ba6841SJoseph Chen {
625*23ba6841SJoseph Chen 	ctx->count[0] = ctx->count[1] = 0;
626*23ba6841SJoseph Chen 	memcpy(ctx->hash, i384, 8 * sizeof(sha2_64t));
627*23ba6841SJoseph Chen }
628*23ba6841SJoseph Chen 
sha384_end(sha384_ctx ctx[1],unsigned char hval[])629*23ba6841SJoseph Chen void sha384_end(sha384_ctx ctx[1], unsigned char hval[])
630*23ba6841SJoseph Chen {
631*23ba6841SJoseph Chen 	sha_end(ctx, hval, SHA384_DIGEST_SIZE);
632*23ba6841SJoseph Chen }
633*23ba6841SJoseph Chen 
sha384(unsigned char hval[],const unsigned char data[],unsigned long len)634*23ba6841SJoseph Chen void sha384(unsigned char hval[], const unsigned char data[],
635*23ba6841SJoseph Chen             unsigned long len)
636*23ba6841SJoseph Chen {
637*23ba6841SJoseph Chen 	sha384_ctx cx[1];
638*23ba6841SJoseph Chen 
639*23ba6841SJoseph Chen 	sha384_begin(cx);
640*23ba6841SJoseph Chen 	sha384_hash(cx, data, len);
641*23ba6841SJoseph Chen 	sha384_end(cx, hval);
642*23ba6841SJoseph Chen }
643*23ba6841SJoseph Chen 
644*23ba6841SJoseph Chen #endif
645*23ba6841SJoseph Chen 
646*23ba6841SJoseph Chen #if defined(SHA_2) || defined(SHA_512)
647*23ba6841SJoseph Chen 
648*23ba6841SJoseph Chen /* SHA512 initialisation data   */
649*23ba6841SJoseph Chen 
650*23ba6841SJoseph Chen const sha2_64t i512[80] = { n_u64(6a09e667f3bcc908), n_u64(bb67ae8584caa73b),
651*23ba6841SJoseph Chen                             n_u64(3c6ef372fe94f82b), n_u64(a54ff53a5f1d36f1),
652*23ba6841SJoseph Chen                             n_u64(510e527fade682d1), n_u64(9b05688c2b3e6c1f),
653*23ba6841SJoseph Chen                             n_u64(1f83d9abfb41bd6b), n_u64(5be0cd19137e2179)
654*23ba6841SJoseph Chen                           };
655*23ba6841SJoseph Chen 
sha512_begin(sha512_ctx ctx[1])656*23ba6841SJoseph Chen void sha512_begin(sha512_ctx ctx[1])
657*23ba6841SJoseph Chen {
658*23ba6841SJoseph Chen 	ctx->count[0] = ctx->count[1] = 0;
659*23ba6841SJoseph Chen 	memcpy(ctx->hash, i512, 8 * sizeof(sha2_64t));
660*23ba6841SJoseph Chen }
661*23ba6841SJoseph Chen 
sha512_end(sha512_ctx ctx[1],unsigned char hval[])662*23ba6841SJoseph Chen void sha512_end(sha512_ctx ctx[1], unsigned char hval[])
663*23ba6841SJoseph Chen {
664*23ba6841SJoseph Chen 	sha_end(ctx, hval, SHA512_DIGEST_SIZE);
665*23ba6841SJoseph Chen }
666*23ba6841SJoseph Chen 
sha512(unsigned char hval[],const unsigned char data[],unsigned long len)667*23ba6841SJoseph Chen void sha512(unsigned char hval[], const unsigned char data[],
668*23ba6841SJoseph Chen             unsigned long len)
669*23ba6841SJoseph Chen {
670*23ba6841SJoseph Chen 	sha512_ctx cx[1];
671*23ba6841SJoseph Chen 
672*23ba6841SJoseph Chen 	sha512_begin(cx);
673*23ba6841SJoseph Chen 	sha512_hash(cx, data, len);
674*23ba6841SJoseph Chen 	sha512_end(cx, hval);
675*23ba6841SJoseph Chen }
676*23ba6841SJoseph Chen 
677*23ba6841SJoseph Chen #endif
678*23ba6841SJoseph Chen 
679*23ba6841SJoseph Chen #if defined(SHA_2)
680*23ba6841SJoseph Chen 
681*23ba6841SJoseph Chen #define CTX_256(x) ((x)->uu->ctx256)
682*23ba6841SJoseph Chen #define CTX_384(x) ((x)->uu->ctx512)
683*23ba6841SJoseph Chen #define CTX_512(x) ((x)->uu->ctx512)
684*23ba6841SJoseph Chen 
685*23ba6841SJoseph Chen /* SHA2 initialisation */
686*23ba6841SJoseph Chen 
sha2_begin(sha2_ctx ctx[1],unsigned long len)687*23ba6841SJoseph Chen int sha2_begin(sha2_ctx ctx[1], unsigned long len)
688*23ba6841SJoseph Chen {
689*23ba6841SJoseph Chen 	unsigned long l = len;
690*23ba6841SJoseph Chen 	switch (len) {
691*23ba6841SJoseph Chen 	case 256:
692*23ba6841SJoseph Chen 		l = len >> 3;
693*23ba6841SJoseph Chen 	case 32:
694*23ba6841SJoseph Chen 		CTX_256(ctx)->count[0] = CTX_256(ctx)->count[1] = 0;
695*23ba6841SJoseph Chen 		memcpy(CTX_256(ctx)->hash, i256, 32);
696*23ba6841SJoseph Chen 		break;
697*23ba6841SJoseph Chen 	case 384:
698*23ba6841SJoseph Chen 		l = len >> 3;
699*23ba6841SJoseph Chen 	case 48:
700*23ba6841SJoseph Chen 		CTX_384(ctx)->count[0] = CTX_384(ctx)->count[1] = 0;
701*23ba6841SJoseph Chen 		memcpy(CTX_384(ctx)->hash, i384, 64);
702*23ba6841SJoseph Chen 		break;
703*23ba6841SJoseph Chen 	case 512:
704*23ba6841SJoseph Chen 		l = len >> 3;
705*23ba6841SJoseph Chen 	case 64:
706*23ba6841SJoseph Chen 		CTX_512(ctx)->count[0] = CTX_512(ctx)->count[1] = 0;
707*23ba6841SJoseph Chen 		memcpy(CTX_512(ctx)->hash, i512, 64);
708*23ba6841SJoseph Chen 		break;
709*23ba6841SJoseph Chen 	default:
710*23ba6841SJoseph Chen 		return SHA2_BAD;
711*23ba6841SJoseph Chen 	}
712*23ba6841SJoseph Chen 
713*23ba6841SJoseph Chen 	ctx->sha2_len = l;
714*23ba6841SJoseph Chen 	return SHA2_GOOD;
715*23ba6841SJoseph Chen }
716*23ba6841SJoseph Chen 
sha2_hash(sha2_ctx ctx[1],const unsigned char data[],unsigned long len)717*23ba6841SJoseph Chen void sha2_hash(sha2_ctx ctx[1], const unsigned char data[], unsigned long len)
718*23ba6841SJoseph Chen {
719*23ba6841SJoseph Chen 	switch (ctx->sha2_len) {
720*23ba6841SJoseph Chen 	case 32:
721*23ba6841SJoseph Chen 		sha256_hash(CTX_256(ctx), data, len);
722*23ba6841SJoseph Chen 		return;
723*23ba6841SJoseph Chen 	case 48:
724*23ba6841SJoseph Chen 		sha384_hash(CTX_384(ctx), data, len);
725*23ba6841SJoseph Chen 		return;
726*23ba6841SJoseph Chen 	case 64:
727*23ba6841SJoseph Chen 		sha512_hash(CTX_512(ctx), data, len);
728*23ba6841SJoseph Chen 		return;
729*23ba6841SJoseph Chen 	}
730*23ba6841SJoseph Chen }
731*23ba6841SJoseph Chen 
sha2_end(sha2_ctx ctx[1],unsigned char hval[])732*23ba6841SJoseph Chen void sha2_end(sha2_ctx ctx[1], unsigned char hval[])
733*23ba6841SJoseph Chen {
734*23ba6841SJoseph Chen 	switch (ctx->sha2_len) {
735*23ba6841SJoseph Chen 	case 32:
736*23ba6841SJoseph Chen 		sha256_end(CTX_256(ctx), hval);
737*23ba6841SJoseph Chen 		return;
738*23ba6841SJoseph Chen 	case 48:
739*23ba6841SJoseph Chen 		sha_end(CTX_384(ctx), hval, SHA384_DIGEST_SIZE);
740*23ba6841SJoseph Chen 		return;
741*23ba6841SJoseph Chen 	case 64:
742*23ba6841SJoseph Chen 		sha_end(CTX_512(ctx), hval, SHA512_DIGEST_SIZE);
743*23ba6841SJoseph Chen 		return;
744*23ba6841SJoseph Chen 	}
745*23ba6841SJoseph Chen }
746*23ba6841SJoseph Chen 
sha2(unsigned char hval[],unsigned long size,const unsigned char data[],unsigned long len)747*23ba6841SJoseph Chen int sha2(unsigned char hval[], unsigned long size, const unsigned char data[],
748*23ba6841SJoseph Chen          unsigned long len)
749*23ba6841SJoseph Chen {
750*23ba6841SJoseph Chen 	sha2_ctx cx[1];
751*23ba6841SJoseph Chen 
752*23ba6841SJoseph Chen 	if (sha2_begin(cx, size) == SHA2_GOOD) {
753*23ba6841SJoseph Chen 		sha2_hash(cx, data, len);
754*23ba6841SJoseph Chen 		sha2_end(cx, hval);
755*23ba6841SJoseph Chen 		return SHA2_GOOD;
756*23ba6841SJoseph Chen 	} else
757*23ba6841SJoseph Chen 		return SHA2_BAD;
758*23ba6841SJoseph Chen }
759*23ba6841SJoseph Chen 
760*23ba6841SJoseph Chen #endif
761