1 /* SHA-256 and SHA-512 implementation based on code by Oliver Gay
2 * <olivier.gay@a3.epfl.ch> under a BSD-style license. See below.
3 */
4
5 /*
6 * FIPS 180-2 SHA-224/256/384/512 implementation
7 * Last update: 02/02/2007
8 * Issue date: 04/30/2005
9 *
10 * Copyright (C) 2005, 2007 Olivier Gay <olivier.gay@a3.epfl.ch>
11 * All rights reserved.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the project nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 */
37
38 #include <android_avb/avb_sha.h>
39 #include <android_avb/avb_util.h>
40
41 #ifdef CONFIG_DM_CRYPTO
avb_sha256_init(AvbSHA256Ctx * ctx)42 void avb_sha256_init(AvbSHA256Ctx* ctx) {
43 ctx->crypto_ctx.algo = CRYPTO_SHA256;
44 ctx->crypto_ctx.length = ctx->tot_len;
45 memset(ctx->buf, 0, sizeof(ctx->buf));
46
47 ctx->crypto_dev = crypto_get_device(ctx->crypto_ctx.algo);
48 if (!ctx->crypto_dev)
49 avb_error("Can't get sha256 crypto device\n");
50 else
51 crypto_sha_init(ctx->crypto_dev, &ctx->crypto_ctx);
52 }
53
avb_sha256_update(AvbSHA256Ctx * ctx,const uint8_t * data,size_t len)54 void avb_sha256_update(AvbSHA256Ctx* ctx, const uint8_t* data, size_t len) {
55 if (ctx->crypto_dev)
56 crypto_sha_update(ctx->crypto_dev, (u32 *)data, len);
57 }
58
avb_sha256_final(AvbSHA256Ctx * ctx)59 uint8_t* avb_sha256_final(AvbSHA256Ctx* ctx) {
60 if (ctx->crypto_dev)
61 crypto_sha_final(ctx->crypto_dev, &ctx->crypto_ctx, ctx->buf);
62
63 return ctx->buf;
64 }
65 #else
66 #define SHFR(x, n) (x >> n)
67 #define ROTR(x, n) ((x >> n) | (x << ((sizeof(x) << 3) - n)))
68 #define ROTL(x, n) ((x << n) | (x >> ((sizeof(x) << 3) - n)))
69 #define CH(x, y, z) ((x & y) ^ (~x & z))
70 #define MAJ(x, y, z) ((x & y) ^ (x & z) ^ (y & z))
71
72 #define SHA256_F1(x) (ROTR(x, 2) ^ ROTR(x, 13) ^ ROTR(x, 22))
73 #define SHA256_F2(x) (ROTR(x, 6) ^ ROTR(x, 11) ^ ROTR(x, 25))
74 #define SHA256_F3(x) (ROTR(x, 7) ^ ROTR(x, 18) ^ SHFR(x, 3))
75 #define SHA256_F4(x) (ROTR(x, 17) ^ ROTR(x, 19) ^ SHFR(x, 10))
76
77 #define UNPACK32(x, str) \
78 { \
79 *((str) + 3) = (uint8_t)((x)); \
80 *((str) + 2) = (uint8_t)((x) >> 8); \
81 *((str) + 1) = (uint8_t)((x) >> 16); \
82 *((str) + 0) = (uint8_t)((x) >> 24); \
83 }
84
85 #define UNPACK64(x, str) \
86 { \
87 *((str) + 7) = (uint8_t)x; \
88 *((str) + 6) = (uint8_t)((uint64_t)x >> 8); \
89 *((str) + 5) = (uint8_t)((uint64_t)x >> 16); \
90 *((str) + 4) = (uint8_t)((uint64_t)x >> 24); \
91 *((str) + 3) = (uint8_t)((uint64_t)x >> 32); \
92 *((str) + 2) = (uint8_t)((uint64_t)x >> 40); \
93 *((str) + 1) = (uint8_t)((uint64_t)x >> 48); \
94 *((str) + 0) = (uint8_t)((uint64_t)x >> 56); \
95 }
96
97 #define PACK32(str, x) \
98 { \
99 *(x) = ((uint32_t) * ((str) + 3)) | ((uint32_t) * ((str) + 2) << 8) | \
100 ((uint32_t) * ((str) + 1) << 16) | \
101 ((uint32_t) * ((str) + 0) << 24); \
102 }
103
104 /* Macros used for loops unrolling */
105
106 #define SHA256_SCR(i) \
107 { w[i] = SHA256_F4(w[i - 2]) + w[i - 7] + SHA256_F3(w[i - 15]) + w[i - 16]; }
108
109 #define SHA256_EXP(a, b, c, d, e, f, g, h, j) \
110 { \
111 t1 = wv[h] + SHA256_F2(wv[e]) + CH(wv[e], wv[f], wv[g]) + sha256_k[j] + \
112 w[j]; \
113 t2 = SHA256_F1(wv[a]) + MAJ(wv[a], wv[b], wv[c]); \
114 wv[d] += t1; \
115 wv[h] = t1 + t2; \
116 }
117
118 static const uint32_t sha256_h0[8] = {0x6a09e667,
119 0xbb67ae85,
120 0x3c6ef372,
121 0xa54ff53a,
122 0x510e527f,
123 0x9b05688c,
124 0x1f83d9ab,
125 0x5be0cd19};
126
127 static const uint32_t sha256_k[64] = {
128 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1,
129 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
130 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786,
131 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
132 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147,
133 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
134 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b,
135 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
136 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a,
137 0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
138 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2};
139
140 /* SHA-256 implementation */
avb_sha256_init(AvbSHA256Ctx * ctx)141 void avb_sha256_init(AvbSHA256Ctx* ctx) {
142 #ifndef UNROLL_LOOPS
143 int i;
144 for (i = 0; i < 8; i++) {
145 ctx->h[i] = sha256_h0[i];
146 }
147 #else
148 ctx->h[0] = sha256_h0[0];
149 ctx->h[1] = sha256_h0[1];
150 ctx->h[2] = sha256_h0[2];
151 ctx->h[3] = sha256_h0[3];
152 ctx->h[4] = sha256_h0[4];
153 ctx->h[5] = sha256_h0[5];
154 ctx->h[6] = sha256_h0[6];
155 ctx->h[7] = sha256_h0[7];
156 #endif /* !UNROLL_LOOPS */
157
158 ctx->len = 0;
159 ctx->tot_len = 0;
160 }
161
SHA256_transform(AvbSHA256Ctx * ctx,const uint8_t * message,size_t block_nb)162 static void SHA256_transform(AvbSHA256Ctx* ctx,
163 const uint8_t* message,
164 size_t block_nb) {
165 uint32_t w[64];
166 uint32_t wv[8];
167 uint32_t t1, t2;
168 const unsigned char* sub_block;
169 size_t i;
170
171 #ifndef UNROLL_LOOPS
172 size_t j;
173 #endif
174
175 for (i = 0; i < block_nb; i++) {
176 sub_block = message + (i << 6);
177
178 #ifndef UNROLL_LOOPS
179 for (j = 0; j < 16; j++) {
180 PACK32(&sub_block[j << 2], &w[j]);
181 }
182
183 for (j = 16; j < 64; j++) {
184 SHA256_SCR(j);
185 }
186
187 for (j = 0; j < 8; j++) {
188 wv[j] = ctx->h[j];
189 }
190
191 for (j = 0; j < 64; j++) {
192 t1 = wv[7] + SHA256_F2(wv[4]) + CH(wv[4], wv[5], wv[6]) + sha256_k[j] +
193 w[j];
194 t2 = SHA256_F1(wv[0]) + MAJ(wv[0], wv[1], wv[2]);
195 wv[7] = wv[6];
196 wv[6] = wv[5];
197 wv[5] = wv[4];
198 wv[4] = wv[3] + t1;
199 wv[3] = wv[2];
200 wv[2] = wv[1];
201 wv[1] = wv[0];
202 wv[0] = t1 + t2;
203 }
204
205 for (j = 0; j < 8; j++) {
206 ctx->h[j] += wv[j];
207 }
208 #else
209 PACK32(&sub_block[0], &w[0]);
210 PACK32(&sub_block[4], &w[1]);
211 PACK32(&sub_block[8], &w[2]);
212 PACK32(&sub_block[12], &w[3]);
213 PACK32(&sub_block[16], &w[4]);
214 PACK32(&sub_block[20], &w[5]);
215 PACK32(&sub_block[24], &w[6]);
216 PACK32(&sub_block[28], &w[7]);
217 PACK32(&sub_block[32], &w[8]);
218 PACK32(&sub_block[36], &w[9]);
219 PACK32(&sub_block[40], &w[10]);
220 PACK32(&sub_block[44], &w[11]);
221 PACK32(&sub_block[48], &w[12]);
222 PACK32(&sub_block[52], &w[13]);
223 PACK32(&sub_block[56], &w[14]);
224 PACK32(&sub_block[60], &w[15]);
225
226 SHA256_SCR(16);
227 SHA256_SCR(17);
228 SHA256_SCR(18);
229 SHA256_SCR(19);
230 SHA256_SCR(20);
231 SHA256_SCR(21);
232 SHA256_SCR(22);
233 SHA256_SCR(23);
234 SHA256_SCR(24);
235 SHA256_SCR(25);
236 SHA256_SCR(26);
237 SHA256_SCR(27);
238 SHA256_SCR(28);
239 SHA256_SCR(29);
240 SHA256_SCR(30);
241 SHA256_SCR(31);
242 SHA256_SCR(32);
243 SHA256_SCR(33);
244 SHA256_SCR(34);
245 SHA256_SCR(35);
246 SHA256_SCR(36);
247 SHA256_SCR(37);
248 SHA256_SCR(38);
249 SHA256_SCR(39);
250 SHA256_SCR(40);
251 SHA256_SCR(41);
252 SHA256_SCR(42);
253 SHA256_SCR(43);
254 SHA256_SCR(44);
255 SHA256_SCR(45);
256 SHA256_SCR(46);
257 SHA256_SCR(47);
258 SHA256_SCR(48);
259 SHA256_SCR(49);
260 SHA256_SCR(50);
261 SHA256_SCR(51);
262 SHA256_SCR(52);
263 SHA256_SCR(53);
264 SHA256_SCR(54);
265 SHA256_SCR(55);
266 SHA256_SCR(56);
267 SHA256_SCR(57);
268 SHA256_SCR(58);
269 SHA256_SCR(59);
270 SHA256_SCR(60);
271 SHA256_SCR(61);
272 SHA256_SCR(62);
273 SHA256_SCR(63);
274
275 wv[0] = ctx->h[0];
276 wv[1] = ctx->h[1];
277 wv[2] = ctx->h[2];
278 wv[3] = ctx->h[3];
279 wv[4] = ctx->h[4];
280 wv[5] = ctx->h[5];
281 wv[6] = ctx->h[6];
282 wv[7] = ctx->h[7];
283
284 SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 0);
285 SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 1);
286 SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 2);
287 SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 3);
288 SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 4);
289 SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 5);
290 SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 6);
291 SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 7);
292 SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 8);
293 SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 9);
294 SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 10);
295 SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 11);
296 SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 12);
297 SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 13);
298 SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 14);
299 SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 15);
300 SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 16);
301 SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 17);
302 SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 18);
303 SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 19);
304 SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 20);
305 SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 21);
306 SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 22);
307 SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 23);
308 SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 24);
309 SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 25);
310 SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 26);
311 SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 27);
312 SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 28);
313 SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 29);
314 SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 30);
315 SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 31);
316 SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 32);
317 SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 33);
318 SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 34);
319 SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 35);
320 SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 36);
321 SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 37);
322 SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 38);
323 SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 39);
324 SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 40);
325 SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 41);
326 SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 42);
327 SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 43);
328 SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 44);
329 SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 45);
330 SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 46);
331 SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 47);
332 SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 48);
333 SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 49);
334 SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 50);
335 SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 51);
336 SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 52);
337 SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 53);
338 SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 54);
339 SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 55);
340 SHA256_EXP(0, 1, 2, 3, 4, 5, 6, 7, 56);
341 SHA256_EXP(7, 0, 1, 2, 3, 4, 5, 6, 57);
342 SHA256_EXP(6, 7, 0, 1, 2, 3, 4, 5, 58);
343 SHA256_EXP(5, 6, 7, 0, 1, 2, 3, 4, 59);
344 SHA256_EXP(4, 5, 6, 7, 0, 1, 2, 3, 60);
345 SHA256_EXP(3, 4, 5, 6, 7, 0, 1, 2, 61);
346 SHA256_EXP(2, 3, 4, 5, 6, 7, 0, 1, 62);
347 SHA256_EXP(1, 2, 3, 4, 5, 6, 7, 0, 63);
348
349 ctx->h[0] += wv[0];
350 ctx->h[1] += wv[1];
351 ctx->h[2] += wv[2];
352 ctx->h[3] += wv[3];
353 ctx->h[4] += wv[4];
354 ctx->h[5] += wv[5];
355 ctx->h[6] += wv[6];
356 ctx->h[7] += wv[7];
357 #endif /* !UNROLL_LOOPS */
358 }
359 }
360
avb_sha256_update(AvbSHA256Ctx * ctx,const uint8_t * data,size_t len)361 void avb_sha256_update(AvbSHA256Ctx* ctx, const uint8_t* data, size_t len) {
362 size_t block_nb;
363 size_t new_len, rem_len, tmp_len;
364 const uint8_t* shifted_data;
365
366 tmp_len = AVB_SHA256_BLOCK_SIZE - ctx->len;
367 rem_len = len < tmp_len ? len : tmp_len;
368
369 avb_memcpy(&ctx->block[ctx->len], data, rem_len);
370
371 if (ctx->len + len < AVB_SHA256_BLOCK_SIZE) {
372 ctx->len += len;
373 return;
374 }
375
376 new_len = len - rem_len;
377 block_nb = new_len / AVB_SHA256_BLOCK_SIZE;
378
379 shifted_data = data + rem_len;
380
381 SHA256_transform(ctx, ctx->block, 1);
382 SHA256_transform(ctx, shifted_data, block_nb);
383
384 rem_len = new_len % AVB_SHA256_BLOCK_SIZE;
385
386 avb_memcpy(ctx->block, &shifted_data[block_nb << 6], rem_len);
387
388 ctx->len = rem_len;
389 ctx->tot_len += (block_nb + 1) << 6;
390 }
391
avb_sha256_final(AvbSHA256Ctx * ctx)392 uint8_t* avb_sha256_final(AvbSHA256Ctx* ctx) {
393 size_t block_nb;
394 size_t pm_len;
395 uint64_t len_b;
396 #ifndef UNROLL_LOOPS
397 size_t i;
398 #endif
399
400 block_nb =
401 (1 + ((AVB_SHA256_BLOCK_SIZE - 9) < (ctx->len % AVB_SHA256_BLOCK_SIZE)));
402
403 len_b = (ctx->tot_len + ctx->len) << 3;
404 pm_len = block_nb << 6;
405
406 avb_memset(ctx->block + ctx->len, 0, pm_len - ctx->len);
407 ctx->block[ctx->len] = 0x80;
408 UNPACK64(len_b, ctx->block + pm_len - 8);
409
410 SHA256_transform(ctx, ctx->block, block_nb);
411
412 #ifndef UNROLL_LOOPS
413 for (i = 0; i < 8; i++) {
414 UNPACK32(ctx->h[i], &ctx->buf[i << 2]);
415 }
416 #else
417 UNPACK32(ctx->h[0], &ctx->buf[0]);
418 UNPACK32(ctx->h[1], &ctx->buf[4]);
419 UNPACK32(ctx->h[2], &ctx->buf[8]);
420 UNPACK32(ctx->h[3], &ctx->buf[12]);
421 UNPACK32(ctx->h[4], &ctx->buf[16]);
422 UNPACK32(ctx->h[5], &ctx->buf[20]);
423 UNPACK32(ctx->h[6], &ctx->buf[24]);
424 UNPACK32(ctx->h[7], &ctx->buf[28]);
425 #endif /* !UNROLL_LOOPS */
426
427 return ctx->buf;
428 }
429 #endif
430