1 /*
2 ---------------------------------------------------------------------------
3 Copyright (c) 2002, Dr Brian Gladman <brg@gladman.me.uk>, Worcester, UK.
4 All rights reserved.
5
6 LICENSE TERMS
7
8 The free distribution and use of this software in both source and binary
9 form is allowed (with or without changes) provided that:
10
11 1. distributions of this source code include the above copyright
12 notice, this list of conditions and the following disclaimer;
13
14 2. distributions in binary form include the above copyright
15 notice, this list of conditions and the following disclaimer
16 in the documentation and/or other associated materials;
17
18 3. the copyright holder's name is not used to endorse products
19 built using this software without specific written permission.
20
21 ALTERNATIVELY, provided that this notice is retained in full, this product
22 may be distributed under the terms of the GNU General Public License (GPL),
23 in which case the provisions of the GPL apply INSTEAD OF those given above.
24
25 DISCLAIMER
26
27 This software is provided 'as is' with no explicit or implied warranties
28 in respect of its properties, including, but not limited to, correctness
29 and/or fitness for purpose.
30 ---------------------------------------------------------------------------
31 Issue Date: 30/11/2002
32
33 This is a byte oriented version of SHA2 that operates on arrays of bytes
34 stored in memory. This code implements sha256, sha384 and sha512 but the
35 latter two functions rely on efficient 64-bit integer operations that
36 may not be very efficient on 32-bit machines
37
38 The sha256 functions use a type 'sha256_ctx' to hold details of the
39 current hash state and uses the following three calls:
40
41 void sha256_begin(sha256_ctx ctx[1])
42 void sha256_hash(sha256_ctx ctx[1], const unsigned char data[],
43 unsigned long len)
44 void sha256_end(sha256_ctx ctx[1], unsigned char hval[])
45
46 The first subroutine initialises a hash computation by setting up the
47 context in the sha256_ctx context. The second subroutine hashes 8-bit
48 bytes from array data[] into the hash state withinh sha256_ctx context,
49 the number of bytes to be hashed being given by the the unsigned long
50 integer len. The third subroutine completes the hash calculation and
51 places the resulting digest value in the array of 8-bit bytes hval[].
52
53 The sha384 and sha512 functions are similar and use the interfaces:
54
55 void sha384_begin(sha384_ctx ctx[1]);
56 void sha384_hash(sha384_ctx ctx[1], const unsigned char data[],
57 unsigned long len);
58 void sha384_end(sha384_ctx ctx[1], unsigned char hval[]);
59
60 void sha512_begin(sha512_ctx ctx[1]);
61 void sha512_hash(sha512_ctx ctx[1], const unsigned char data[],
62 unsigned long len);
63 void sha512_end(sha512_ctx ctx[1], unsigned char hval[]);
64
65 In addition there is a function sha2 that can be used to call all these
66 functions using a call with a hash length parameter as follows:
67
68 int sha2_begin(sha2_ctx ctx[1], unsigned long len);
69 void sha2_hash(sha2_ctx ctx[1], const unsigned char data[],
70 unsigned long len);
71 void sha2_end(sha2_ctx ctx[1], unsigned char hval[]);
72
73 My thanks to Erik Andersen <andersen@codepoet.org> for testing this code
74 on big-endian systems and for his assistance with corrections
75 */
76
77 /* define the hash functions that you need */
78
79 #define SHA_2 /* for dynamic hash length */
80 #define SHA_256
81 #define SHA_384
82 #define SHA_512
83
84 #ifdef USE_HOSTCC
85 #include <string.h> /* for memcpy() etc. */
86 #include <stdlib.h> /* for _lrotr with VC++ */
87 #endif
88
89 #include "sha2.h"
90
91 /* rockchip crypto byte order */
92 #define PLATFORM_BYTE_ORDER SHA_BIG_ENDIAN
93
94 /* 1. PLATFORM SPECIFIC INCLUDES */
95
96 // #if defined(__GNU_LIBRARY__)
97 // # include <byteswap.h>
98 // # include <endian.h>
99 // #elif defined(__CRYPTLIB__)
100 // # if defined( INC_ALL )
101 // # include "crypt.h"
102 // # elif defined( INC_CHILD )
103 // # include "../crypt.h"
104 // # else
105 // # include "crypt.h"
106 // # endif
107 // # if defined(DATA_LITTLEENDIAN)
108 // # define PLATFORM_BYTE_ORDER SHA_LITTLE_ENDIAN
109 // # else
110 // # define PLATFORM_BYTE_ORDER SHA_BIG_ENDIAN
111 // # endif
112 // #if defined(_MSC_VER)
113 // # include <stdlib.h>
114 // #elif !defined(WIN32)
115 // # include <stdlib.h>
116 // # if !defined (_ENDIAN_H)
117 // # include <sys/param.h>
118 // # else
119 // # include _ENDIAN_H
120 // # endif
121 // #endif
122
123 /* 2. BYTE ORDER IN 32-BIT WORDS
124
125 To obtain the highest speed on processors with 32-bit words, this code
126 needs to determine the order in which bytes are packed into such words.
127 The following block of code is an attempt to capture the most obvious
128 ways in which various environments specify their endian definitions.
129 It may well fail, in which case the definitions will need to be set by
130 editing at the points marked **** EDIT HERE IF NECESSARY **** below.
131 */
132 #define SHA_LITTLE_ENDIAN 1234 /* byte 0 is least significant (i386) */
133 #define SHA_BIG_ENDIAN 4321 /* byte 0 is most significant (mc68k) */
134
135 #if !defined(PLATFORM_BYTE_ORDER)
136 #if defined(LITTLE_ENDIAN) || defined(BIG_ENDIAN)
137 #if defined(LITTLE_ENDIAN) && defined(BIG_ENDIAN)
138 #if defined(BYTE_ORDER)
139 #if (BYTE_ORDER == LITTLE_ENDIAN)
140 #define PLATFORM_BYTE_ORDER SHA_LITTLE_ENDIAN
141 #elif(BYTE_ORDER == BIG_ENDIAN)
142 #define PLATFORM_BYTE_ORDER SHA_BIG_ENDIAN
143 #endif
144 #endif
145 #elif defined(LITTLE_ENDIAN) && !defined(BIG_ENDIAN)
146 #define PLATFORM_BYTE_ORDER SHA_LITTLE_ENDIAN
147 #elif !defined(LITTLE_ENDIAN) && defined(BIG_ENDIAN)
148 #define PLATFORM_BYTE_ORDER SHA_BIG_ENDIAN
149 #endif
150 #elif defined(_LITTLE_ENDIAN) || defined(_BIG_ENDIAN)
151 #if defined(_LITTLE_ENDIAN) && defined(_BIG_ENDIAN)
152 #if defined(_BYTE_ORDER)
153 #if (_BYTE_ORDER == _LITTLE_ENDIAN)
154 #define PLATFORM_BYTE_ORDER SHA_LITTLE_ENDIAN
155 #elif(_BYTE_ORDER == _BIG_ENDIAN)
156 #define PLATFORM_BYTE_ORDER SHA_BIG_ENDIAN
157 #endif
158 #endif
159 #elif defined(_LITTLE_ENDIAN) && !defined(_BIG_ENDIAN)
160 #define PLATFORM_BYTE_ORDER SHA_LITTLE_ENDIAN
161 #elif !defined(_LITTLE_ENDIAN) && defined(_BIG_ENDIAN)
162 #define PLATFORM_BYTE_ORDER SHA_BIG_ENDIAN
163 #endif
164 #elif 0 /* **** EDIT HERE IF NECESSARY **** */
165 #define PLATFORM_BYTE_ORDER SHA_LITTLE_ENDIAN
166 #elif 0 /* **** EDIT HERE IF NECESSARY **** */
167 #define PLATFORM_BYTE_ORDER SHA_BIG_ENDIAN
168 #elif(('1234' >> 24) == '1')
169 #define PLATFORM_BYTE_ORDER SHA_LITTLE_ENDIAN
170 #elif(('4321' >> 24) == '1')
171 #define PLATFORM_BYTE_ORDER SHA_BIG_ENDIAN
172 #endif
173 #endif
174
175 #if !defined(PLATFORM_BYTE_ORDER)
176 #error Please set undetermined byte order (lines 159 or 161 of sha2.c).
177 #endif
178
179 #ifdef _MSC_VER
180 #pragma intrinsic(memcpy)
181 #endif
182
183 #define rotr32(x, n) (((x) >> n) | ((x) << (32 - n)))
184
185 #if !defined(bswap_32)
186 #define bswap_32(x) \
187 ((rotr32((x), 24) & 0x00ff00ff) | (rotr32((x), 8) & 0xff00ff00))
188 #endif
189
190 #if (PLATFORM_BYTE_ORDER == SHA_LITTLE_ENDIAN)
191 #define SWAP_BYTES
192 #else
193 #undef SWAP_BYTES
194 #endif
195
196 #if defined(SHA_2) || defined(SHA_256)
197
198 #define SHA256_MASK (SHA256_BLOCK_SIZE - 1)
199
200 #if defined(SWAP_BYTES)
201 #define bsw_32(p, n) \
202 { \
203 int _i = (n); \
204 while (_i--) \
205 p[_i] = bswap_32(p[_i]); \
206 }
207 #else
208 #define bsw_32(p, n)
209 #endif
210
211 /* SHA256 mixing function definitions */
212
213 #define ch(x, y, z) (((x) & (y)) ^ (~(x) & (z)))
214 #define maj(x, y, z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
215
216 #define s256_0(x) (rotr32((x), 2) ^ rotr32((x), 13) ^ rotr32((x), 22))
217 #define s256_1(x) (rotr32((x), 6) ^ rotr32((x), 11) ^ rotr32((x), 25))
218 #define g256_0(x) (rotr32((x), 7) ^ rotr32((x), 18) ^ ((x) >> 3))
219 #define g256_1(x) (rotr32((x), 17) ^ rotr32((x), 19) ^ ((x) >> 10))
220
221 /* rotated SHA256 round definition. Rather than swapping variables as in */
222 /* FIPS-180, different variables are 'rotated' on each round, returning */
223 /* to their starting positions every eight rounds */
224
225 #define h2(i) \
226 ctx->wbuf[i & 15] += \
227 g256_1(ctx->wbuf[(i + 14) & 15]) + ctx->wbuf[(i + 9) & 15] + \
228 g256_0(ctx->wbuf[(i + 1) & 15])
229
230 #define h2_cycle(i, j) \
231 v[(7 - i) & 7] += \
232 (j ? h2(i) : ctx->wbuf[i & 15]) + k256[i + j] + s256_1(v[(4 - i) & 7]) + \
233 ch(v[(4 - i) & 7], v[(5 - i) & 7], v[(6 - i) & 7]); \
234 v[(3 - i) & 7] += v[(7 - i) & 7]; \
235 v[(7 - i) & 7] += s256_0(v[(0 - i) & 7]) + \
236 maj(v[(0 - i) & 7], v[(1 - i) & 7], v[(2 - i) & 7])
237
238 /* SHA256 mixing data */
239
240 const sha2_32t k256[64] = {
241 n_u32(428a2f98), n_u32(71374491), n_u32(b5c0fbcf), n_u32(e9b5dba5),
242 n_u32(3956c25b), n_u32(59f111f1), n_u32(923f82a4), n_u32(ab1c5ed5),
243 n_u32(d807aa98), n_u32(12835b01), n_u32(243185be), n_u32(550c7dc3),
244 n_u32(72be5d74), n_u32(80deb1fe), n_u32(9bdc06a7), n_u32(c19bf174),
245 n_u32(e49b69c1), n_u32(efbe4786), n_u32(0fc19dc6), n_u32(240ca1cc),
246 n_u32(2de92c6f), n_u32(4a7484aa), n_u32(5cb0a9dc), n_u32(76f988da),
247 n_u32(983e5152), n_u32(a831c66d), n_u32(b00327c8), n_u32(bf597fc7),
248 n_u32(c6e00bf3), n_u32(d5a79147), n_u32(06ca6351), n_u32(14292967),
249 n_u32(27b70a85), n_u32(2e1b2138), n_u32(4d2c6dfc), n_u32(53380d13),
250 n_u32(650a7354), n_u32(766a0abb), n_u32(81c2c92e), n_u32(92722c85),
251 n_u32(a2bfe8a1), n_u32(a81a664b), n_u32(c24b8b70), n_u32(c76c51a3),
252 n_u32(d192e819), n_u32(d6990624), n_u32(f40e3585), n_u32(106aa070),
253 n_u32(19a4c116), n_u32(1e376c08), n_u32(2748774c), n_u32(34b0bcb5),
254 n_u32(391c0cb3), n_u32(4ed8aa4a), n_u32(5b9cca4f), n_u32(682e6ff3),
255 n_u32(748f82ee), n_u32(78a5636f), n_u32(84c87814), n_u32(8cc70208),
256 n_u32(90befffa), n_u32(a4506ceb), n_u32(bef9a3f7), n_u32(c67178f2),
257 };
258
259 /* SHA256 initialisation data */
260
261 const sha2_32t i256[8] = { n_u32(6a09e667), n_u32(bb67ae85), n_u32(3c6ef372),
262 n_u32(a54ff53a), n_u32(510e527f), n_u32(9b05688c),
263 n_u32(1f83d9ab), n_u32(5be0cd19)
264 };
265
sha256_begin(sha256_ctx ctx[1])266 void sha256_begin(sha256_ctx ctx[1])
267 {
268 ctx->count[0] = ctx->count[1] = 0;
269 memcpy(ctx->hash, i256, 8 * sizeof(sha2_32t));
270 }
271
272 /* Compile 64 bytes of hash data into SHA256 digest value */
273 /* NOTE: this routine assumes that the byte order in the */
274 /* ctx->wbuf[] at this point is in such an order that low */
275 /* address bytes in the ORIGINAL byte stream placed in this */
276 /* buffer will now go to the high end of words on BOTH big */
277 /* and little endian systems */
278
sha256_compile(sha256_ctx ctx[1])279 void sha256_compile(sha256_ctx ctx[1])
280 {
281 sha2_32t v[8], j;
282
283 memcpy(v, ctx->hash, 8 * sizeof(sha2_32t));
284
285 for (j = 0; j < 64; j += 16) {
286 h2_cycle(0, j);
287 h2_cycle(1, j);
288 h2_cycle(2, j);
289 h2_cycle(3, j);
290 h2_cycle(4, j);
291 h2_cycle(5, j);
292 h2_cycle(6, j);
293 h2_cycle(7, j);
294 h2_cycle(8, j);
295 h2_cycle(9, j);
296 h2_cycle(10, j);
297 h2_cycle(11, j);
298 h2_cycle(12, j);
299 h2_cycle(13, j);
300 h2_cycle(14, j);
301 h2_cycle(15, j);
302 }
303
304 ctx->hash[0] += v[0];
305 ctx->hash[1] += v[1];
306 ctx->hash[2] += v[2];
307 ctx->hash[3] += v[3];
308 ctx->hash[4] += v[4];
309 ctx->hash[5] += v[5];
310 ctx->hash[6] += v[6];
311 ctx->hash[7] += v[7];
312 }
313
314 /* SHA256 hash data in an array of bytes into hash buffer */
315 /* and call the hash_compile function as required. */
316
sha256_hash(sha256_ctx ctx[1],const unsigned char data[],unsigned long len)317 void sha256_hash(sha256_ctx ctx[1], const unsigned char data[],
318 unsigned long len)
319 {
320 sha2_32t pos = (sha2_32t)(ctx->count[0] & SHA256_MASK),
321 space = SHA256_BLOCK_SIZE - pos;
322 const unsigned char *sp = data;
323
324 if ((ctx->count[0] += len) < len)
325 ++(ctx->count[1]);
326
327 while (len >= space) { /* tranfer whole blocks while possible */
328 memcpy(((unsigned char *)ctx->wbuf) + pos, sp, space);
329 sp += space;
330 len -= space;
331 space = SHA256_BLOCK_SIZE;
332 pos = 0;
333 bsw_32(ctx->wbuf, SHA256_BLOCK_SIZE >> 2);
334 sha256_compile(ctx);
335 }
336
337 memcpy(((unsigned char *)ctx->wbuf) + pos, sp, len);
338 }
339
340 /* SHA256 Final padding and digest calculation */
341
342 static sha2_32t m1[4] = { n_u32(00000000), n_u32(ff000000), n_u32(ffff0000),
343 n_u32(ffffff00)
344 };
345
346 static sha2_32t b1[4] = { n_u32(80000000), n_u32(00800000), n_u32(00008000),
347 n_u32(00000080)
348 };
349
sha256_end(sha256_ctx ctx[1],unsigned char hval[])350 void sha256_end(sha256_ctx ctx[1], unsigned char hval[])
351 {
352 sha2_32t i = (sha2_32t)(ctx->count[0] & SHA256_MASK);
353
354 bsw_32(ctx->wbuf, (i + 3) >> 2)
355 /* bytes in the buffer are now in an order in which references */
356 /* to 32-bit words will put bytes with lower addresses into the */
357 /* top of 32 bit words on BOTH big and little endian machines */
358
359 /* we now need to mask valid bytes and add the padding which is */
360 /* a single 1 bit and as many zero bits as necessary. */
361 ctx->wbuf[i >> 2] = (ctx->wbuf[i >> 2] & m1[i & 3]) | b1[i & 3];
362
363 /* we need 9 or more empty positions, one for the padding byte */
364 /* (above) and eight for the length count. If there is not */
365 /* enough space pad and empty the buffer */
366 if (i > SHA256_BLOCK_SIZE - 9) {
367 if (i < 60)
368 ctx->wbuf[15] = 0;
369 sha256_compile(ctx);
370 i = 0;
371 } else /* compute a word index for the empty buffer positions */
372 i = (i >> 2) + 1;
373
374 while (i < 14) /* and zero pad all but last two positions */
375 ctx->wbuf[i++] = 0;
376
377 /* the following 32-bit length fields are assembled in the */
378 /* wrong byte order on little endian machines but this is */
379 /* corrected later since they are only ever used as 32-bit */
380 /* word values. */
381
382 ctx->wbuf[14] = (ctx->count[1] << 3) | (ctx->count[0] >> 29);
383 ctx->wbuf[15] = ctx->count[0] << 3;
384
385 sha256_compile(ctx);
386
387 /* extract the hash value as bytes in case the hash buffer is */
388 /* mislaigned for 32-bit words */
389 for (i = 0; i < SHA256_DIGEST_SIZE; ++i)
390 hval[i] = (unsigned char)(ctx->hash[i >> 2] >> 8 * (~i & 3));
391 }
392
sha256(unsigned char hval[],const unsigned char data[],unsigned long len)393 void sha256(unsigned char hval[], const unsigned char data[],
394 unsigned long len)
395 {
396 sha256_ctx cx[1];
397
398 sha256_begin(cx);
399 sha256_hash(cx, data, len);
400 sha256_end(cx, hval);
401 }
402
403 #endif
404
405 #if defined(SHA_2) || defined(SHA_384) || defined(SHA_512)
406
407 #define SHA512_MASK (SHA512_BLOCK_SIZE - 1)
408
409 #define rotr64(x, n) (((x) >> n) | ((x) << (64 - n)))
410
411 #if !defined(bswap_64)
412 #define bswap_64(x) \
413 (((sha2_64t)(bswap_32((sha2_32t)(x)))) << 32 | \
414 bswap_32((sha2_32t)((x) >> 32)))
415 #endif
416
417 #if defined(SWAP_BYTES)
418 #define bsw_64(p, n) \
419 { \
420 int _i = (n); \
421 while (_i--) \
422 p[_i] = bswap_64(p[_i]); \
423 }
424 #else
425 #define bsw_64(p, n)
426 #endif
427
428 /* SHA512 mixing function definitions */
429
430 #define s512_0(x) (rotr64((x), 28) ^ rotr64((x), 34) ^ rotr64((x), 39))
431 #define s512_1(x) (rotr64((x), 14) ^ rotr64((x), 18) ^ rotr64((x), 41))
432 #define g512_0(x) (rotr64((x), 1) ^ rotr64((x), 8) ^ ((x) >> 7))
433 #define g512_1(x) (rotr64((x), 19) ^ rotr64((x), 61) ^ ((x) >> 6))
434
435 /* rotated SHA512 round definition. Rather than swapping variables as in */
436 /* FIPS-180, different variables are 'rotated' on each round, returning */
437 /* to their starting positions every eight rounds */
438
439 #define h5(i) \
440 ctx->wbuf[i & 15] += \
441 g512_1(ctx->wbuf[(i + 14) & 15]) + ctx->wbuf[(i + 9) & 15] + \
442 g512_0(ctx->wbuf[(i + 1) & 15])
443
444 #define h5_cycle(i, j) \
445 v[(7 - i) & 7] += \
446 (j ? h5(i) : ctx->wbuf[i & 15]) + k512[i + j] + s512_1(v[(4 - i) & 7]) + \
447 ch(v[(4 - i) & 7], v[(5 - i) & 7], v[(6 - i) & 7]); \
448 v[(3 - i) & 7] += v[(7 - i) & 7]; \
449 v[(7 - i) & 7] += s512_0(v[(0 - i) & 7]) + \
450 maj(v[(0 - i) & 7], v[(1 - i) & 7], v[(2 - i) & 7])
451
452 /* SHA384/SHA512 mixing data */
453
454 const sha2_64t k512[80] = {
455 n_u64(428a2f98d728ae22), n_u64(7137449123ef65cd), n_u64(b5c0fbcfec4d3b2f),
456 n_u64(e9b5dba58189dbbc), n_u64(3956c25bf348b538), n_u64(59f111f1b605d019),
457 n_u64(923f82a4af194f9b), n_u64(ab1c5ed5da6d8118), n_u64(d807aa98a3030242),
458 n_u64(12835b0145706fbe), n_u64(243185be4ee4b28c), n_u64(550c7dc3d5ffb4e2),
459 n_u64(72be5d74f27b896f), n_u64(80deb1fe3b1696b1), n_u64(9bdc06a725c71235),
460 n_u64(c19bf174cf692694), n_u64(e49b69c19ef14ad2), n_u64(efbe4786384f25e3),
461 n_u64(0fc19dc68b8cd5b5), n_u64(240ca1cc77ac9c65), n_u64(2de92c6f592b0275),
462 n_u64(4a7484aa6ea6e483), n_u64(5cb0a9dcbd41fbd4), n_u64(76f988da831153b5),
463 n_u64(983e5152ee66dfab), n_u64(a831c66d2db43210), n_u64(b00327c898fb213f),
464 n_u64(bf597fc7beef0ee4), n_u64(c6e00bf33da88fc2), n_u64(d5a79147930aa725),
465 n_u64(06ca6351e003826f), n_u64(142929670a0e6e70), n_u64(27b70a8546d22ffc),
466 n_u64(2e1b21385c26c926), n_u64(4d2c6dfc5ac42aed), n_u64(53380d139d95b3df),
467 n_u64(650a73548baf63de), n_u64(766a0abb3c77b2a8), n_u64(81c2c92e47edaee6),
468 n_u64(92722c851482353b), n_u64(a2bfe8a14cf10364), n_u64(a81a664bbc423001),
469 n_u64(c24b8b70d0f89791), n_u64(c76c51a30654be30), n_u64(d192e819d6ef5218),
470 n_u64(d69906245565a910), n_u64(f40e35855771202a), n_u64(106aa07032bbd1b8),
471 n_u64(19a4c116b8d2d0c8), n_u64(1e376c085141ab53), n_u64(2748774cdf8eeb99),
472 n_u64(34b0bcb5e19b48a8), n_u64(391c0cb3c5c95a63), n_u64(4ed8aa4ae3418acb),
473 n_u64(5b9cca4f7763e373), n_u64(682e6ff3d6b2b8a3), n_u64(748f82ee5defb2fc),
474 n_u64(78a5636f43172f60), n_u64(84c87814a1f0ab72), n_u64(8cc702081a6439ec),
475 n_u64(90befffa23631e28), n_u64(a4506cebde82bde9), n_u64(bef9a3f7b2c67915),
476 n_u64(c67178f2e372532b), n_u64(ca273eceea26619c), n_u64(d186b8c721c0c207),
477 n_u64(eada7dd6cde0eb1e), n_u64(f57d4f7fee6ed178), n_u64(06f067aa72176fba),
478 n_u64(0a637dc5a2c898a6), n_u64(113f9804bef90dae), n_u64(1b710b35131c471b),
479 n_u64(28db77f523047d84), n_u64(32caab7b40c72493), n_u64(3c9ebe0a15c9bebc),
480 n_u64(431d67c49c100d4c), n_u64(4cc5d4becb3e42b6), n_u64(597f299cfc657e2a),
481 n_u64(5fcb6fab3ad6faec), n_u64(6c44198c4a475817)
482 };
483
484 /* Compile 64 bytes of hash data into SHA384/SHA512 digest value */
485
sha512_compile(sha512_ctx ctx[1])486 void sha512_compile(sha512_ctx ctx[1])
487 {
488 sha2_64t v[8];
489 sha2_32t j;
490
491 memcpy(v, ctx->hash, 8 * sizeof(sha2_64t));
492
493 for (j = 0; j < 80; j += 16) {
494 h5_cycle(0, j);
495 h5_cycle(1, j);
496 h5_cycle(2, j);
497 h5_cycle(3, j);
498 h5_cycle(4, j);
499 h5_cycle(5, j);
500 h5_cycle(6, j);
501 h5_cycle(7, j);
502 h5_cycle(8, j);
503 h5_cycle(9, j);
504 h5_cycle(10, j);
505 h5_cycle(11, j);
506 h5_cycle(12, j);
507 h5_cycle(13, j);
508 h5_cycle(14, j);
509 h5_cycle(15, j);
510 }
511
512 ctx->hash[0] += v[0];
513 ctx->hash[1] += v[1];
514 ctx->hash[2] += v[2];
515 ctx->hash[3] += v[3];
516 ctx->hash[4] += v[4];
517 ctx->hash[5] += v[5];
518 ctx->hash[6] += v[6];
519 ctx->hash[7] += v[7];
520 }
521
522 /* Compile 128 bytes of hash data into SHA256 digest value */
523 /* NOTE: this routine assumes that the byte order in the */
524 /* ctx->wbuf[] at this point is in such an order that low */
525 /* address bytes in the ORIGINAL byte stream placed in this */
526 /* buffer will now go to the high end of words on BOTH big */
527 /* and little endian systems */
528
sha512_hash(sha512_ctx ctx[1],const unsigned char data[],unsigned long len)529 void sha512_hash(sha512_ctx ctx[1], const unsigned char data[],
530 unsigned long len)
531 {
532 sha2_32t pos = (sha2_32t)(ctx->count[0] & SHA512_MASK),
533 space = SHA512_BLOCK_SIZE - pos;
534 const unsigned char *sp = data;
535
536 if ((ctx->count[0] += len) < len)
537 ++(ctx->count[1]);
538
539 while (len >= space) { /* tranfer whole blocks while possible */
540 memcpy(((unsigned char *)ctx->wbuf) + pos, sp, space);
541 sp += space;
542 len -= space;
543 space = SHA512_BLOCK_SIZE;
544 pos = 0;
545 bsw_64(ctx->wbuf, SHA512_BLOCK_SIZE >> 3);
546 sha512_compile(ctx);
547 }
548
549 memcpy(((unsigned char *)ctx->wbuf) + pos, sp, len);
550 }
551
552 /* SHA384/512 Final padding and digest calculation */
553
554 static sha2_64t m2[8] = { n_u64(0000000000000000), n_u64(ff00000000000000),
555 n_u64(ffff000000000000), n_u64(ffffff0000000000),
556 n_u64(ffffffff00000000), n_u64(ffffffffff000000),
557 n_u64(ffffffffffff0000), n_u64(ffffffffffffff00)
558 };
559
560 static sha2_64t b2[8] = { n_u64(8000000000000000), n_u64(0080000000000000),
561 n_u64(0000800000000000), n_u64(0000008000000000),
562 n_u64(0000000080000000), n_u64(0000000000800000),
563 n_u64(0000000000008000), n_u64(0000000000000080)
564 };
565
sha_end(sha512_ctx ctx[1],unsigned char hval[],const unsigned int hlen)566 static void sha_end(sha512_ctx ctx[1], unsigned char hval[],
567 const unsigned int hlen)
568 {
569 sha2_32t i = (sha2_32t)(ctx->count[0] & SHA512_MASK);
570
571 bsw_64(ctx->wbuf, (i + 7) >> 3);
572
573 /* bytes in the buffer are now in an order in which references */
574 /* to 64-bit words will put bytes with lower addresses into the */
575 /* top of 64 bit words on BOTH big and little endian machines */
576
577 /* we now need to mask valid bytes and add the padding which is */
578 /* a single 1 bit and as many zero bits as necessary. */
579 ctx->wbuf[i >> 3] = (ctx->wbuf[i >> 3] & m2[i & 7]) | b2[i & 7];
580
581 /* we need 17 or more empty byte positions, one for the padding */
582 /* byte (above) and sixteen for the length count. If there is */
583 /* not enough space pad and empty the buffer */
584 if (i > SHA512_BLOCK_SIZE - 17) {
585 if (i < 120)
586 ctx->wbuf[15] = 0;
587 sha512_compile(ctx);
588 i = 0;
589 } else
590 i = (i >> 3) + 1;
591
592 while (i < 14)
593 ctx->wbuf[i++] = 0;
594
595 /* the following 64-bit length fields are assembled in the */
596 /* wrong byte order on little endian machines but this is */
597 /* corrected later since they are only ever used as 64-bit */
598 /* word values. */
599
600 ctx->wbuf[14] = (ctx->count[1] << 3) | (ctx->count[0] >> 61);
601 ctx->wbuf[15] = ctx->count[0] << 3;
602
603 sha512_compile(ctx);
604
605 /* extract the hash value as bytes in case the hash buffer is */
606 /* misaligned for 32-bit words */
607 for (i = 0; i < hlen; ++i)
608 hval[i] = (unsigned char)(ctx->hash[i >> 3] >> 8 * (~i & 7));
609 }
610
611 #endif
612
613 #if defined(SHA_2) || defined(SHA_384)
614
615 /* SHA384 initialisation data */
616
617 const sha2_64t i384[80] = { n_u64(cbbb9d5dc1059ed8), n_u64(629a292a367cd507),
618 n_u64(9159015a3070dd17), n_u64(152fecd8f70e5939),
619 n_u64(67332667ffc00b31), n_u64(8eb44a8768581511),
620 n_u64(db0c2e0d64f98fa7), n_u64(47b5481dbefa4fa4)
621 };
622
sha384_begin(sha384_ctx ctx[1])623 void sha384_begin(sha384_ctx ctx[1])
624 {
625 ctx->count[0] = ctx->count[1] = 0;
626 memcpy(ctx->hash, i384, 8 * sizeof(sha2_64t));
627 }
628
sha384_end(sha384_ctx ctx[1],unsigned char hval[])629 void sha384_end(sha384_ctx ctx[1], unsigned char hval[])
630 {
631 sha_end(ctx, hval, SHA384_DIGEST_SIZE);
632 }
633
sha384(unsigned char hval[],const unsigned char data[],unsigned long len)634 void sha384(unsigned char hval[], const unsigned char data[],
635 unsigned long len)
636 {
637 sha384_ctx cx[1];
638
639 sha384_begin(cx);
640 sha384_hash(cx, data, len);
641 sha384_end(cx, hval);
642 }
643
644 #endif
645
646 #if defined(SHA_2) || defined(SHA_512)
647
648 /* SHA512 initialisation data */
649
650 const sha2_64t i512[80] = { n_u64(6a09e667f3bcc908), n_u64(bb67ae8584caa73b),
651 n_u64(3c6ef372fe94f82b), n_u64(a54ff53a5f1d36f1),
652 n_u64(510e527fade682d1), n_u64(9b05688c2b3e6c1f),
653 n_u64(1f83d9abfb41bd6b), n_u64(5be0cd19137e2179)
654 };
655
sha512_begin(sha512_ctx ctx[1])656 void sha512_begin(sha512_ctx ctx[1])
657 {
658 ctx->count[0] = ctx->count[1] = 0;
659 memcpy(ctx->hash, i512, 8 * sizeof(sha2_64t));
660 }
661
sha512_end(sha512_ctx ctx[1],unsigned char hval[])662 void sha512_end(sha512_ctx ctx[1], unsigned char hval[])
663 {
664 sha_end(ctx, hval, SHA512_DIGEST_SIZE);
665 }
666
sha512(unsigned char hval[],const unsigned char data[],unsigned long len)667 void sha512(unsigned char hval[], const unsigned char data[],
668 unsigned long len)
669 {
670 sha512_ctx cx[1];
671
672 sha512_begin(cx);
673 sha512_hash(cx, data, len);
674 sha512_end(cx, hval);
675 }
676
677 #endif
678
679 #if defined(SHA_2)
680
681 #define CTX_256(x) ((x)->uu->ctx256)
682 #define CTX_384(x) ((x)->uu->ctx512)
683 #define CTX_512(x) ((x)->uu->ctx512)
684
685 /* SHA2 initialisation */
686
sha2_begin(sha2_ctx ctx[1],unsigned long len)687 int sha2_begin(sha2_ctx ctx[1], unsigned long len)
688 {
689 unsigned long l = len;
690 switch (len) {
691 case 256:
692 l = len >> 3;
693 case 32:
694 CTX_256(ctx)->count[0] = CTX_256(ctx)->count[1] = 0;
695 memcpy(CTX_256(ctx)->hash, i256, 32);
696 break;
697 case 384:
698 l = len >> 3;
699 case 48:
700 CTX_384(ctx)->count[0] = CTX_384(ctx)->count[1] = 0;
701 memcpy(CTX_384(ctx)->hash, i384, 64);
702 break;
703 case 512:
704 l = len >> 3;
705 case 64:
706 CTX_512(ctx)->count[0] = CTX_512(ctx)->count[1] = 0;
707 memcpy(CTX_512(ctx)->hash, i512, 64);
708 break;
709 default:
710 return SHA2_BAD;
711 }
712
713 ctx->sha2_len = l;
714 return SHA2_GOOD;
715 }
716
sha2_hash(sha2_ctx ctx[1],const unsigned char data[],unsigned long len)717 void sha2_hash(sha2_ctx ctx[1], const unsigned char data[], unsigned long len)
718 {
719 switch (ctx->sha2_len) {
720 case 32:
721 sha256_hash(CTX_256(ctx), data, len);
722 return;
723 case 48:
724 sha384_hash(CTX_384(ctx), data, len);
725 return;
726 case 64:
727 sha512_hash(CTX_512(ctx), data, len);
728 return;
729 }
730 }
731
sha2_end(sha2_ctx ctx[1],unsigned char hval[])732 void sha2_end(sha2_ctx ctx[1], unsigned char hval[])
733 {
734 switch (ctx->sha2_len) {
735 case 32:
736 sha256_end(CTX_256(ctx), hval);
737 return;
738 case 48:
739 sha_end(CTX_384(ctx), hval, SHA384_DIGEST_SIZE);
740 return;
741 case 64:
742 sha_end(CTX_512(ctx), hval, SHA512_DIGEST_SIZE);
743 return;
744 }
745 }
746
sha2(unsigned char hval[],unsigned long size,const unsigned char data[],unsigned long len)747 int sha2(unsigned char hval[], unsigned long size, const unsigned char data[],
748 unsigned long len)
749 {
750 sha2_ctx cx[1];
751
752 if (sha2_begin(cx, size) == SHA2_GOOD) {
753 sha2_hash(cx, data, len);
754 sha2_end(cx, hval);
755 return SHA2_GOOD;
756 } else
757 return SHA2_BAD;
758 }
759
760 #endif
761