xref: /rk3399_rockchip-uboot/lib/avb/libavb/avb_sha512.c (revision 894688431927c1b73c64860c8aa71463c2593ea2)
1 /* SHA-256 and SHA-512 implementation based on code by Oliver Gay
2  * <olivier.gay@a3.epfl.ch> under a BSD-style license. See below.
3  */
4 
5 /*
6  * FIPS 180-2 SHA-224/256/384/512 implementation
7  * Last update: 02/02/2007
8  * Issue date:  04/30/2005
9  *
10  * Copyright (C) 2005, 2007 Olivier Gay <olivier.gay@a3.epfl.ch>
11  * All rights reserved.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the project nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 
38 #include <android_avb/avb_sha.h>
39 #include <android_avb/avb_util.h>
40 
41 /* Crypto-v1 is not support sha512 */
42 #ifdef CONFIG_ROCKCHIP_CRYPTO_V2
43 void avb_sha512_init(AvbSHA512Ctx* ctx) {
44   ctx->crypto_ctx.algo = CRYPTO_SHA512;
45   ctx->crypto_ctx.length = ctx->tot_len;
46   memset(ctx->buf, 0, sizeof(ctx->buf));
47 
48   ctx->crypto_dev = crypto_get_device(ctx->crypto_ctx.algo);
49   if (!ctx->crypto_dev)
50     avb_error("Can't get sha512 crypto device\n");
51   else
52     crypto_sha_init(ctx->crypto_dev, &ctx->crypto_ctx);
53 }
54 
55 void avb_sha512_update(AvbSHA512Ctx* ctx, const uint8_t* data, size_t len) {
56   if (ctx->crypto_dev)
57     crypto_sha_update(ctx->crypto_dev, (u32 *)data, len);
58 }
59 
60 uint8_t* avb_sha512_final(AvbSHA512Ctx* ctx) {
61   if (ctx->crypto_dev)
62     crypto_sha_final(ctx->crypto_dev, &ctx->crypto_ctx, ctx->buf);
63 
64   return ctx->buf;
65 }
66 
67 #else
68 #define SHFR(x, n) (x >> n)
69 #define ROTR(x, n) ((x >> n) | (x << ((sizeof(x) << 3) - n)))
70 #define ROTL(x, n) ((x << n) | (x >> ((sizeof(x) << 3) - n)))
71 #define CH(x, y, z) ((x & y) ^ (~x & z))
72 #define MAJ(x, y, z) ((x & y) ^ (x & z) ^ (y & z))
73 
74 #define SHA512_F1(x) (ROTR(x, 28) ^ ROTR(x, 34) ^ ROTR(x, 39))
75 #define SHA512_F2(x) (ROTR(x, 14) ^ ROTR(x, 18) ^ ROTR(x, 41))
76 #define SHA512_F3(x) (ROTR(x, 1) ^ ROTR(x, 8) ^ SHFR(x, 7))
77 #define SHA512_F4(x) (ROTR(x, 19) ^ ROTR(x, 61) ^ SHFR(x, 6))
78 
79 #define UNPACK32(x, str)                 \
80   {                                      \
81     *((str) + 3) = (uint8_t)((x));       \
82     *((str) + 2) = (uint8_t)((x) >> 8);  \
83     *((str) + 1) = (uint8_t)((x) >> 16); \
84     *((str) + 0) = (uint8_t)((x) >> 24); \
85   }
86 
87 #define UNPACK64(x, str)                         \
88   {                                              \
89     *((str) + 7) = (uint8_t)x;                   \
90     *((str) + 6) = (uint8_t)((uint64_t)x >> 8);  \
91     *((str) + 5) = (uint8_t)((uint64_t)x >> 16); \
92     *((str) + 4) = (uint8_t)((uint64_t)x >> 24); \
93     *((str) + 3) = (uint8_t)((uint64_t)x >> 32); \
94     *((str) + 2) = (uint8_t)((uint64_t)x >> 40); \
95     *((str) + 1) = (uint8_t)((uint64_t)x >> 48); \
96     *((str) + 0) = (uint8_t)((uint64_t)x >> 56); \
97   }
98 
99 #define PACK64(str, x)                                                        \
100   {                                                                           \
101     *(x) =                                                                    \
102         ((uint64_t) * ((str) + 7)) | ((uint64_t) * ((str) + 6) << 8) |        \
103         ((uint64_t) * ((str) + 5) << 16) | ((uint64_t) * ((str) + 4) << 24) | \
104         ((uint64_t) * ((str) + 3) << 32) | ((uint64_t) * ((str) + 2) << 40) | \
105         ((uint64_t) * ((str) + 1) << 48) | ((uint64_t) * ((str) + 0) << 56);  \
106   }
107 
108 /* Macros used for loops unrolling */
109 
110 #define SHA512_SCR(i) \
111   { w[i] = SHA512_F4(w[i - 2]) + w[i - 7] + SHA512_F3(w[i - 15]) + w[i - 16]; }
112 
113 #define SHA512_EXP(a, b, c, d, e, f, g, h, j)                               \
114   {                                                                         \
115     t1 = wv[h] + SHA512_F2(wv[e]) + CH(wv[e], wv[f], wv[g]) + sha512_k[j] + \
116          w[j];                                                              \
117     t2 = SHA512_F1(wv[a]) + MAJ(wv[a], wv[b], wv[c]);                       \
118     wv[d] += t1;                                                            \
119     wv[h] = t1 + t2;                                                        \
120   }
121 
122 static const uint64_t sha512_h0[8] = {0x6a09e667f3bcc908ULL,
123                                       0xbb67ae8584caa73bULL,
124                                       0x3c6ef372fe94f82bULL,
125                                       0xa54ff53a5f1d36f1ULL,
126                                       0x510e527fade682d1ULL,
127                                       0x9b05688c2b3e6c1fULL,
128                                       0x1f83d9abfb41bd6bULL,
129                                       0x5be0cd19137e2179ULL};
130 
131 static const uint64_t sha512_k[80] = {
132     0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, 0xb5c0fbcfec4d3b2fULL,
133     0xe9b5dba58189dbbcULL, 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL,
134     0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL, 0xd807aa98a3030242ULL,
135     0x12835b0145706fbeULL, 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL,
136     0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL, 0x9bdc06a725c71235ULL,
137     0xc19bf174cf692694ULL, 0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL,
138     0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL, 0x2de92c6f592b0275ULL,
139     0x4a7484aa6ea6e483ULL, 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL,
140     0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL, 0xb00327c898fb213fULL,
141     0xbf597fc7beef0ee4ULL, 0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL,
142     0x06ca6351e003826fULL, 0x142929670a0e6e70ULL, 0x27b70a8546d22ffcULL,
143     0x2e1b21385c26c926ULL, 0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL,
144     0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL, 0x81c2c92e47edaee6ULL,
145     0x92722c851482353bULL, 0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL,
146     0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL, 0xd192e819d6ef5218ULL,
147     0xd69906245565a910ULL, 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL,
148     0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL, 0x2748774cdf8eeb99ULL,
149     0x34b0bcb5e19b48a8ULL, 0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL,
150     0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL, 0x748f82ee5defb2fcULL,
151     0x78a5636f43172f60ULL, 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL,
152     0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL, 0xbef9a3f7b2c67915ULL,
153     0xc67178f2e372532bULL, 0xca273eceea26619cULL, 0xd186b8c721c0c207ULL,
154     0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL, 0x06f067aa72176fbaULL,
155     0x0a637dc5a2c898a6ULL, 0x113f9804bef90daeULL, 0x1b710b35131c471bULL,
156     0x28db77f523047d84ULL, 0x32caab7b40c72493ULL, 0x3c9ebe0a15c9bebcULL,
157     0x431d67c49c100d4cULL, 0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL,
158     0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL};
159 
160 /* SHA-512 implementation */
161 
162 void avb_sha512_init(AvbSHA512Ctx* ctx) {
163 #ifdef UNROLL_LOOPS_SHA512
164   ctx->h[0] = sha512_h0[0];
165   ctx->h[1] = sha512_h0[1];
166   ctx->h[2] = sha512_h0[2];
167   ctx->h[3] = sha512_h0[3];
168   ctx->h[4] = sha512_h0[4];
169   ctx->h[5] = sha512_h0[5];
170   ctx->h[6] = sha512_h0[6];
171   ctx->h[7] = sha512_h0[7];
172 #else
173   int i;
174 
175   for (i = 0; i < 8; i++)
176     ctx->h[i] = sha512_h0[i];
177 #endif /* UNROLL_LOOPS_SHA512 */
178 
179   ctx->len = 0;
180   ctx->tot_len = 0;
181 }
182 
183 static void SHA512_transform(AvbSHA512Ctx* ctx,
184                              const uint8_t* message,
185                              size_t block_nb) {
186   uint64_t w[80];
187   uint64_t wv[8];
188   uint64_t t1, t2;
189   const uint8_t* sub_block;
190   size_t i, j;
191 
192   for (i = 0; i < block_nb; i++) {
193     sub_block = message + (i << 7);
194 
195 #ifdef UNROLL_LOOPS_SHA512
196     PACK64(&sub_block[0], &w[0]);
197     PACK64(&sub_block[8], &w[1]);
198     PACK64(&sub_block[16], &w[2]);
199     PACK64(&sub_block[24], &w[3]);
200     PACK64(&sub_block[32], &w[4]);
201     PACK64(&sub_block[40], &w[5]);
202     PACK64(&sub_block[48], &w[6]);
203     PACK64(&sub_block[56], &w[7]);
204     PACK64(&sub_block[64], &w[8]);
205     PACK64(&sub_block[72], &w[9]);
206     PACK64(&sub_block[80], &w[10]);
207     PACK64(&sub_block[88], &w[11]);
208     PACK64(&sub_block[96], &w[12]);
209     PACK64(&sub_block[104], &w[13]);
210     PACK64(&sub_block[112], &w[14]);
211     PACK64(&sub_block[120], &w[15]);
212 
213     SHA512_SCR(16);
214     SHA512_SCR(17);
215     SHA512_SCR(18);
216     SHA512_SCR(19);
217     SHA512_SCR(20);
218     SHA512_SCR(21);
219     SHA512_SCR(22);
220     SHA512_SCR(23);
221     SHA512_SCR(24);
222     SHA512_SCR(25);
223     SHA512_SCR(26);
224     SHA512_SCR(27);
225     SHA512_SCR(28);
226     SHA512_SCR(29);
227     SHA512_SCR(30);
228     SHA512_SCR(31);
229     SHA512_SCR(32);
230     SHA512_SCR(33);
231     SHA512_SCR(34);
232     SHA512_SCR(35);
233     SHA512_SCR(36);
234     SHA512_SCR(37);
235     SHA512_SCR(38);
236     SHA512_SCR(39);
237     SHA512_SCR(40);
238     SHA512_SCR(41);
239     SHA512_SCR(42);
240     SHA512_SCR(43);
241     SHA512_SCR(44);
242     SHA512_SCR(45);
243     SHA512_SCR(46);
244     SHA512_SCR(47);
245     SHA512_SCR(48);
246     SHA512_SCR(49);
247     SHA512_SCR(50);
248     SHA512_SCR(51);
249     SHA512_SCR(52);
250     SHA512_SCR(53);
251     SHA512_SCR(54);
252     SHA512_SCR(55);
253     SHA512_SCR(56);
254     SHA512_SCR(57);
255     SHA512_SCR(58);
256     SHA512_SCR(59);
257     SHA512_SCR(60);
258     SHA512_SCR(61);
259     SHA512_SCR(62);
260     SHA512_SCR(63);
261     SHA512_SCR(64);
262     SHA512_SCR(65);
263     SHA512_SCR(66);
264     SHA512_SCR(67);
265     SHA512_SCR(68);
266     SHA512_SCR(69);
267     SHA512_SCR(70);
268     SHA512_SCR(71);
269     SHA512_SCR(72);
270     SHA512_SCR(73);
271     SHA512_SCR(74);
272     SHA512_SCR(75);
273     SHA512_SCR(76);
274     SHA512_SCR(77);
275     SHA512_SCR(78);
276     SHA512_SCR(79);
277 
278     wv[0] = ctx->h[0];
279     wv[1] = ctx->h[1];
280     wv[2] = ctx->h[2];
281     wv[3] = ctx->h[3];
282     wv[4] = ctx->h[4];
283     wv[5] = ctx->h[5];
284     wv[6] = ctx->h[6];
285     wv[7] = ctx->h[7];
286 
287     j = 0;
288 
289     do {
290       SHA512_EXP(0, 1, 2, 3, 4, 5, 6, 7, j);
291       j++;
292       SHA512_EXP(7, 0, 1, 2, 3, 4, 5, 6, j);
293       j++;
294       SHA512_EXP(6, 7, 0, 1, 2, 3, 4, 5, j);
295       j++;
296       SHA512_EXP(5, 6, 7, 0, 1, 2, 3, 4, j);
297       j++;
298       SHA512_EXP(4, 5, 6, 7, 0, 1, 2, 3, j);
299       j++;
300       SHA512_EXP(3, 4, 5, 6, 7, 0, 1, 2, j);
301       j++;
302       SHA512_EXP(2, 3, 4, 5, 6, 7, 0, 1, j);
303       j++;
304       SHA512_EXP(1, 2, 3, 4, 5, 6, 7, 0, j);
305       j++;
306     } while (j < 80);
307 
308     ctx->h[0] += wv[0];
309     ctx->h[1] += wv[1];
310     ctx->h[2] += wv[2];
311     ctx->h[3] += wv[3];
312     ctx->h[4] += wv[4];
313     ctx->h[5] += wv[5];
314     ctx->h[6] += wv[6];
315     ctx->h[7] += wv[7];
316 #else
317     for (j = 0; j < 16; j++) {
318       PACK64(&sub_block[j << 3], &w[j]);
319     }
320 
321     for (j = 16; j < 80; j++) {
322       SHA512_SCR(j);
323     }
324 
325     for (j = 0; j < 8; j++) {
326       wv[j] = ctx->h[j];
327     }
328 
329     for (j = 0; j < 80; j++) {
330       t1 = wv[7] + SHA512_F2(wv[4]) + CH(wv[4], wv[5], wv[6]) + sha512_k[j] +
331            w[j];
332       t2 = SHA512_F1(wv[0]) + MAJ(wv[0], wv[1], wv[2]);
333       wv[7] = wv[6];
334       wv[6] = wv[5];
335       wv[5] = wv[4];
336       wv[4] = wv[3] + t1;
337       wv[3] = wv[2];
338       wv[2] = wv[1];
339       wv[1] = wv[0];
340       wv[0] = t1 + t2;
341     }
342 
343     for (j = 0; j < 8; j++)
344       ctx->h[j] += wv[j];
345 #endif /* UNROLL_LOOPS_SHA512 */
346   }
347 }
348 
349 void avb_sha512_update(AvbSHA512Ctx* ctx, const uint8_t* data, size_t len) {
350   size_t block_nb;
351   size_t new_len, rem_len, tmp_len;
352   const uint8_t* shifted_data;
353 
354   tmp_len = AVB_SHA512_BLOCK_SIZE - ctx->len;
355   rem_len = len < tmp_len ? len : tmp_len;
356 
357   avb_memcpy(&ctx->block[ctx->len], data, rem_len);
358 
359   if (ctx->len + len < AVB_SHA512_BLOCK_SIZE) {
360     ctx->len += len;
361     return;
362   }
363 
364   new_len = len - rem_len;
365   block_nb = new_len / AVB_SHA512_BLOCK_SIZE;
366 
367   shifted_data = data + rem_len;
368 
369   SHA512_transform(ctx, ctx->block, 1);
370   SHA512_transform(ctx, shifted_data, block_nb);
371 
372   rem_len = new_len % AVB_SHA512_BLOCK_SIZE;
373 
374   avb_memcpy(ctx->block, &shifted_data[block_nb << 7], rem_len);
375 
376   ctx->len = rem_len;
377   ctx->tot_len += (block_nb + 1) << 7;
378 }
379 
380 uint8_t* avb_sha512_final(AvbSHA512Ctx* ctx) {
381   size_t block_nb;
382   size_t pm_len;
383   uint64_t len_b;
384 
385 #ifndef UNROLL_LOOPS_SHA512
386   size_t i;
387 #endif
388 
389   block_nb =
390       1 + ((AVB_SHA512_BLOCK_SIZE - 17) < (ctx->len % AVB_SHA512_BLOCK_SIZE));
391 
392   len_b = (ctx->tot_len + ctx->len) << 3;
393   pm_len = block_nb << 7;
394 
395   avb_memset(ctx->block + ctx->len, 0, pm_len - ctx->len);
396   ctx->block[ctx->len] = 0x80;
397   UNPACK64(len_b, ctx->block + pm_len - 8);
398 
399   SHA512_transform(ctx, ctx->block, block_nb);
400 
401 #ifdef UNROLL_LOOPS_SHA512
402   UNPACK64(ctx->h[0], &ctx->buf[0]);
403   UNPACK64(ctx->h[1], &ctx->buf[8]);
404   UNPACK64(ctx->h[2], &ctx->buf[16]);
405   UNPACK64(ctx->h[3], &ctx->buf[24]);
406   UNPACK64(ctx->h[4], &ctx->buf[32]);
407   UNPACK64(ctx->h[5], &ctx->buf[40]);
408   UNPACK64(ctx->h[6], &ctx->buf[48]);
409   UNPACK64(ctx->h[7], &ctx->buf[56]);
410 #else
411   for (i = 0; i < 8; i++)
412     UNPACK64(ctx->h[i], &ctx->buf[i << 3]);
413 #endif /* UNROLL_LOOPS_SHA512 */
414 
415   return ctx->buf;
416 }
417 #endif
418