xref: /rk3399_rockchip-uboot/lib/avb/libavb/avb_sha256.c (revision 6cef3c7b7cc45a9c882fc64e5496a416c99313b8)
1 /* SHA-256 and SHA-512 implementation based on code by Oliver Gay
2  * <olivier.gay@a3.epfl.ch> under a BSD-style license. See below.
3  */
4 
5 /*
6  * FIPS 180-2 SHA-224/256/384/512 implementation
7  * Last update: 02/02/2007
8  * Issue date:  04/30/2005
9  *
10  * Copyright (C) 2005, 2007 Olivier Gay <olivier.gay@a3.epfl.ch>
11  * All rights reserved.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the project nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 
38 #include <android_avb/avb_sha.h>
39 #include <android_avb/avb_util.h>
40 
41 #if defined(CONFIG_DM_CRYPTO) || (CONFIG_ARMV8_CRYPTO)
42 void avb_sha256_init(AvbSHA256Ctx* ctx) {
43   ctx->sha256ctx.length = ctx->tot_len;
44   sha256_starts(&ctx->sha256ctx);
45   memset(ctx->buf, 0, sizeof(ctx->buf));
46 }
47 
48 void avb_sha256_update(AvbSHA256Ctx* ctx, const uint8_t* data, size_t len) {
49   sha256_update(&ctx->sha256ctx, data, len);
50 }
51 
52 uint8_t* avb_sha256_final(AvbSHA256Ctx* ctx) {
53   sha256_finish(&ctx->sha256ctx, ctx->buf);
54 
55   return ctx->buf;
56 }
57 #else
58 #define SHFR(x, n) (x >> n)
59 #define ROTR(x, n) ((x >> n) | (x << ((sizeof(x) << 3) - n)))
60 #define ROTL(x, n) ((x << n) | (x >> ((sizeof(x) << 3) - n)))
61 #define CH(x, y, z) ((x & y) ^ (~x & z))
62 #define MAJ(x, y, z) ((x & y) ^ (x & z) ^ (y & z))
63 
64 #define SHA256_F1(x) (ROTR(x, 2) ^ ROTR(x, 13) ^ ROTR(x, 22))
65 #define SHA256_F2(x) (ROTR(x, 6) ^ ROTR(x, 11) ^ ROTR(x, 25))
66 #define SHA256_F3(x) (ROTR(x, 7) ^ ROTR(x, 18) ^ SHFR(x, 3))
67 #define SHA256_F4(x) (ROTR(x, 17) ^ ROTR(x, 19) ^ SHFR(x, 10))
68 
69 #define UNPACK32(x, str)                 \
70   {                                      \
71     *((str) + 3) = (uint8_t)((x));       \
72     *((str) + 2) = (uint8_t)((x) >> 8);  \
73     *((str) + 1) = (uint8_t)((x) >> 16); \
74     *((str) + 0) = (uint8_t)((x) >> 24); \
75   }
76 
77 #define UNPACK64(x, str)                         \
78   {                                              \
79     *((str) + 7) = (uint8_t)x;                   \
80     *((str) + 6) = (uint8_t)((uint64_t)x >> 8);  \
81     *((str) + 5) = (uint8_t)((uint64_t)x >> 16); \
82     *((str) + 4) = (uint8_t)((uint64_t)x >> 24); \
83     *((str) + 3) = (uint8_t)((uint64_t)x >> 32); \
84     *((str) + 2) = (uint8_t)((uint64_t)x >> 40); \
85     *((str) + 1) = (uint8_t)((uint64_t)x >> 48); \
86     *((str) + 0) = (uint8_t)((uint64_t)x >> 56); \
87   }
88 
89 #define PACK32(str, x)                                                    \
90   {                                                                       \
91     *(x) = ((uint32_t) * ((str) + 3)) | ((uint32_t) * ((str) + 2) << 8) | \
92            ((uint32_t) * ((str) + 1) << 16) |                             \
93            ((uint32_t) * ((str) + 0) << 24);                              \
94   }
95 
96 /* Macros used for loops unrolling */
97 
98 #define SHA256_SCR(i) \
99   { w[i] = SHA256_F4(w[i - 2]) + w[i - 7] + SHA256_F3(w[i - 15]) + w[i - 16]; }
100 
101 #define SHA256_EXP(a, b, c, d, e, f, g, h, j)                               \
102   {                                                                         \
103     t1 = wv[h] + SHA256_F2(wv[e]) + CH(wv[e], wv[f], wv[g]) + sha256_k[j] + \
104          w[j];                                                              \
105     t2 = SHA256_F1(wv[a]) + MAJ(wv[a], wv[b], wv[c]);                       \
106     wv[d] += t1;                                                            \
107     wv[h] = t1 + t2;                                                        \
108   }
109 
110 static const uint32_t sha256_h0[8] = {0x6a09e667,
111                                       0xbb67ae85,
112                                       0x3c6ef372,
113                                       0xa54ff53a,
114                                       0x510e527f,
115                                       0x9b05688c,
116                                       0x1f83d9ab,
117                                       0x5be0cd19};
118 
119 static const uint32_t sha256_k[64] = {
120     0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1,
121     0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
122     0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786,
123     0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
124     0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147,
125     0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
126     0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b,
127     0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
128     0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a,
129     0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
130     0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2};
131 
132 /* SHA-256 implementation */
133 void avb_sha256_init(AvbSHA256Ctx* ctx) {
134 #ifndef UNROLL_LOOPS
135   int i;
136   for (i = 0; i < 8; i++) {
137     ctx->h[i] = sha256_h0[i];
138   }
139 #else
140   ctx->h[0] = sha256_h0[0];
141   ctx->h[1] = sha256_h0[1];
142   ctx->h[2] = sha256_h0[2];
143   ctx->h[3] = sha256_h0[3];
144   ctx->h[4] = sha256_h0[4];
145   ctx->h[5] = sha256_h0[5];
146   ctx->h[6] = sha256_h0[6];
147   ctx->h[7] = sha256_h0[7];
148 #endif /* !UNROLL_LOOPS */
149 
150   ctx->len = 0;
151   ctx->tot_len = 0;
152 }
153 
154 static void SHA256_transform(AvbSHA256Ctx* ctx,
155                              const uint8_t* message,
156                              size_t block_nb) {
157   uint32_t w[64];
158   uint32_t wv[8];
159   uint32_t t1, t2;
160   const unsigned char* sub_block;
161   size_t i;
162 
163 #ifndef UNROLL_LOOPS
164   size_t j;
165 #endif
166 
167   for (i = 0; i < block_nb; i++) {
168     sub_block = message + (i << 6);
169 
170 #ifndef UNROLL_LOOPS
171     for (j = 0; j < 16; j++) {
172       PACK32(&sub_block[j << 2], &w[j]);
173     }
174 
175     for (j = 16; j < 64; j++) {
176       SHA256_SCR(j);
177     }
178 
179     for (j = 0; j < 8; j++) {
180       wv[j] = ctx->h[j];
181     }
182 
183     for (j = 0; j < 64; j++) {
184       t1 = wv[7] + SHA256_F2(wv[4]) + CH(wv[4], wv[5], wv[6]) + sha256_k[j] +
185            w[j];
186       t2 = SHA256_F1(wv[0]) + MAJ(wv[0], wv[1], wv[2]);
187       wv[7] = wv[6];
188       wv[6] = wv[5];
189       wv[5] = wv[4];
190       wv[4] = wv[3] + t1;
191       wv[3] = wv[2];
192       wv[2] = wv[1];
193       wv[1] = wv[0];
194       wv[0] = t1 + t2;
195     }
196 
197     for (j = 0; j < 8; j++) {
198       ctx->h[j] += wv[j];
199     }
200 #else
201     PACK32(&sub_block[0], &w[0]);
202     PACK32(&sub_block[4], &w[1]);
203     PACK32(&sub_block[8], &w[2]);
204     PACK32(&sub_block[12], &w[3]);
205     PACK32(&sub_block[16], &w[4]);
206     PACK32(&sub_block[20], &w[5]);
207     PACK32(&sub_block[24], &w[6]);
208     PACK32(&sub_block[28], &w[7]);
209     PACK32(&sub_block[32], &w[8]);
210     PACK32(&sub_block[36], &w[9]);
211     PACK32(&sub_block[40], &w[10]);
212     PACK32(&sub_block[44], &w[11]);
213     PACK32(&sub_block[48], &w[12]);
214     PACK32(&sub_block[52], &w[13]);
215     PACK32(&sub_block[56], &w[14]);
216     PACK32(&sub_block[60], &w[15]);
217 
218     SHA256_SCR(16);
219     SHA256_SCR(17);
220     SHA256_SCR(18);
221     SHA256_SCR(19);
222     SHA256_SCR(20);
223     SHA256_SCR(21);
224     SHA256_SCR(22);
225     SHA256_SCR(23);
226     SHA256_SCR(24);
227     SHA256_SCR(25);
228     SHA256_SCR(26);
229     SHA256_SCR(27);
230     SHA256_SCR(28);
231     SHA256_SCR(29);
232     SHA256_SCR(30);
233     SHA256_SCR(31);
234     SHA256_SCR(32);
235     SHA256_SCR(33);
236     SHA256_SCR(34);
237     SHA256_SCR(35);
238     SHA256_SCR(36);
239     SHA256_SCR(37);
240     SHA256_SCR(38);
241     SHA256_SCR(39);
242     SHA256_SCR(40);
243     SHA256_SCR(41);
244     SHA256_SCR(42);
245     SHA256_SCR(43);
246     SHA256_SCR(44);
247     SHA256_SCR(45);
248     SHA256_SCR(46);
249     SHA256_SCR(47);
250     SHA256_SCR(48);
251     SHA256_SCR(49);
252     SHA256_SCR(50);
253     SHA256_SCR(51);
254     SHA256_SCR(52);
255     SHA256_SCR(53);
256     SHA256_SCR(54);
257     SHA256_SCR(55);
258     SHA256_SCR(56);
259     SHA256_SCR(57);
260     SHA256_SCR(58);
261     SHA256_SCR(59);
262     SHA256_SCR(60);
263     SHA256_SCR(61);
264     SHA256_SCR(62);
265     SHA256_SCR(63);
266 
267     wv[0] = ctx->h[0];
268     wv[1] = ctx->h[1];
269     wv[2] = ctx->h[2];
270     wv[3] = ctx->h[3];
271     wv[4] = ctx->h[4];
272     wv[5] = ctx->h[5];
273     wv[6] = ctx->h[6];
274     wv[7] = ctx->h[7];
275 
276     SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 0);
277     SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 1);
278     SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 2);
279     SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 3);
280     SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 4);
281     SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 5);
282     SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 6);
283     SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 7);
284     SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 8);
285     SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 9);
286     SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 10);
287     SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 11);
288     SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 12);
289     SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 13);
290     SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 14);
291     SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 15);
292     SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 16);
293     SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 17);
294     SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 18);
295     SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 19);
296     SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 20);
297     SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 21);
298     SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 22);
299     SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 23);
300     SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 24);
301     SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 25);
302     SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 26);
303     SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 27);
304     SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 28);
305     SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 29);
306     SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 30);
307     SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 31);
308     SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 32);
309     SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 33);
310     SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 34);
311     SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 35);
312     SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 36);
313     SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 37);
314     SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 38);
315     SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 39);
316     SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 40);
317     SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 41);
318     SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 42);
319     SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 43);
320     SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 44);
321     SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 45);
322     SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 46);
323     SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 47);
324     SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 48);
325     SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 49);
326     SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 50);
327     SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 51);
328     SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 52);
329     SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 53);
330     SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 54);
331     SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 55);
332     SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 56);
333     SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 57);
334     SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 58);
335     SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 59);
336     SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 60);
337     SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 61);
338     SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 62);
339     SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 63);
340 
341     ctx->h[0] += wv[0];
342     ctx->h[1] += wv[1];
343     ctx->h[2] += wv[2];
344     ctx->h[3] += wv[3];
345     ctx->h[4] += wv[4];
346     ctx->h[5] += wv[5];
347     ctx->h[6] += wv[6];
348     ctx->h[7] += wv[7];
349 #endif /* !UNROLL_LOOPS */
350   }
351 }
352 
353 void avb_sha256_update(AvbSHA256Ctx* ctx, const uint8_t* data, size_t len) {
354   size_t block_nb;
355   size_t new_len, rem_len, tmp_len;
356   const uint8_t* shifted_data;
357 
358   tmp_len = AVB_SHA256_BLOCK_SIZE - ctx->len;
359   rem_len = len < tmp_len ? len : tmp_len;
360 
361   avb_memcpy(&ctx->block[ctx->len], data, rem_len);
362 
363   if (ctx->len + len < AVB_SHA256_BLOCK_SIZE) {
364     ctx->len += len;
365     return;
366   }
367 
368   new_len = len - rem_len;
369   block_nb = new_len / AVB_SHA256_BLOCK_SIZE;
370 
371   shifted_data = data + rem_len;
372 
373   SHA256_transform(ctx, ctx->block, 1);
374   SHA256_transform(ctx, shifted_data, block_nb);
375 
376   rem_len = new_len % AVB_SHA256_BLOCK_SIZE;
377 
378   avb_memcpy(ctx->block, &shifted_data[block_nb << 6], rem_len);
379 
380   ctx->len = rem_len;
381   ctx->tot_len += (block_nb + 1) << 6;
382 }
383 
384 uint8_t* avb_sha256_final(AvbSHA256Ctx* ctx) {
385   size_t block_nb;
386   size_t pm_len;
387   uint64_t len_b;
388 #ifndef UNROLL_LOOPS
389   size_t i;
390 #endif
391 
392   block_nb =
393       (1 + ((AVB_SHA256_BLOCK_SIZE - 9) < (ctx->len % AVB_SHA256_BLOCK_SIZE)));
394 
395   len_b = (ctx->tot_len + ctx->len) << 3;
396   pm_len = block_nb << 6;
397 
398   avb_memset(ctx->block + ctx->len, 0, pm_len - ctx->len);
399   ctx->block[ctx->len] = 0x80;
400   UNPACK64(len_b, ctx->block + pm_len - 8);
401 
402   SHA256_transform(ctx, ctx->block, block_nb);
403 
404 #ifndef UNROLL_LOOPS
405   for (i = 0; i < 8; i++) {
406     UNPACK32(ctx->h[i], &ctx->buf[i << 2]);
407   }
408 #else
409   UNPACK32(ctx->h[0], &ctx->buf[0]);
410   UNPACK32(ctx->h[1], &ctx->buf[4]);
411   UNPACK32(ctx->h[2], &ctx->buf[8]);
412   UNPACK32(ctx->h[3], &ctx->buf[12]);
413   UNPACK32(ctx->h[4], &ctx->buf[16]);
414   UNPACK32(ctx->h[5], &ctx->buf[20]);
415   UNPACK32(ctx->h[6], &ctx->buf[24]);
416   UNPACK32(ctx->h[7], &ctx->buf[28]);
417 #endif /* !UNROLL_LOOPS */
418 
419   return ctx->buf;
420 }
421 #endif
422