1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright (c) 2021-2025, STMicroelectronics - All Rights Reserved 4 */ 5 6 #include <assert.h> 7 #include <config.h> 8 #include <drivers/clk_dt.h> 9 #include <drivers/clk.h> 10 #include <drivers/rstctrl.h> 11 #include <io.h> 12 #include <kernel/delay.h> 13 #include <kernel/dt.h> 14 #include <kernel/mutex.h> 15 #include <libfdt.h> 16 #include <stm32_util.h> 17 #include <utee_defines.h> 18 #include <util.h> 19 20 #include "common.h" 21 #include "stm32_hash.h" 22 23 #define _HASH_CR U(0x00) 24 #define _HASH_DIN U(0x04) 25 #define _HASH_STR U(0x08) 26 #define _HASH_IMR U(0x20) 27 #define _HASH_SR U(0x24) 28 #define _HASH_HR(x) (U(0x310) + ((x) * U(0x04))) 29 #define _HASH_VERR U(0x3F4) 30 #define _HASH_CSR(x) (U(0xF8) + ((x) * U(0x04))) 31 32 /* Control Register */ 33 #define _HASH_CR_INIT BIT(2) 34 #define _HASH_CR_MODE BIT(6) 35 #define _HASH_CR_DATATYPE_SHIFT U(4) 36 #define _HASH_CR_DATATYPE_NONE SHIFT_U32(U(0), _HASH_CR_DATATYPE_SHIFT) 37 #define _HASH_CR_DATATYPE_HALFWORD SHIFT_U32(U(1), _HASH_CR_DATATYPE_SHIFT) 38 #define _HASH_CR_DATATYPE_BYTE SHIFT_U32(U(2), _HASH_CR_DATATYPE_SHIFT) 39 #define _HASH_CR_DATATYPE_BIT SHIFT_U32(U(3), _HASH_CR_DATATYPE_SHIFT) 40 #define _HASH_CR_LKEY BIT(16) 41 42 #define _HASH_CR_ALGO_SHIFT U(17) 43 #define _HASH_CR_ALGO_MD5 BIT(7) 44 #define _HASH_CR_ALGO_SHA1 SHIFT_U32(U(0x0), _HASH_CR_ALGO_SHIFT) 45 #define _HASH_CR_ALGO_SHA224 SHIFT_U32(U(0x2), _HASH_CR_ALGO_SHIFT) 46 #define _HASH_CR_ALGO_SHA256 SHIFT_U32(U(0x3), _HASH_CR_ALGO_SHIFT) 47 #define _HASH_CR_ALGO_SHA256_IF_MD5 (BIT(18) | BIT(7)) 48 #define _HASH_CR_ALGO_SHA384 SHIFT_U32(U(0xC), _HASH_CR_ALGO_SHIFT) 49 #define _HASH_CR_ALGO_SHA512_224 SHIFT_U32(U(0xD), _HASH_CR_ALGO_SHIFT) 50 #define _HASH_CR_ALGO_SHA512_256 SHIFT_U32(U(0xE), _HASH_CR_ALGO_SHIFT) 51 #define _HASH_CR_ALGO_SHA512 SHIFT_U32(U(0xF), _HASH_CR_ALGO_SHIFT) 52 #define _HASH_CR_ALGO_SHA3_224 SHIFT_U32(U(0x4), _HASH_CR_ALGO_SHIFT) 53 #define _HASH_CR_ALGO_SHA3_256 SHIFT_U32(U(0x5), _HASH_CR_ALGO_SHIFT) 54 #define _HASH_CR_ALGO_SHA3_384 SHIFT_U32(U(0x6), _HASH_CR_ALGO_SHIFT) 55 #define _HASH_CR_ALGO_SHA3_512 SHIFT_U32(U(0x7), _HASH_CR_ALGO_SHIFT) 56 #define _HASH_CR_ALGO_SHAKE128 SHIFT_U32(U(0x8), _HASH_CR_ALGO_SHIFT) 57 #define _HASH_CR_ALGO_SHAKE256 SHIFT_U32(U(0x9), _HASH_CR_ALGO_SHIFT) 58 #define _HASH_CR_ALGO_RAWSHAKE128 SHIFT_U32(U(0xA), _HASH_CR_ALGO_SHIFT) 59 #define _HASH_CR_ALGO_RAWSHAKE256 SHIFT_U32(U(0xB), _HASH_CR_ALGO_SHIFT) 60 61 /* Status Flags */ 62 #define _HASH_SR_DINIS BIT(0) 63 #define _HASH_SR_DCIS BIT(1) 64 #define _HASH_SR_BUSY BIT(3) 65 #define _HASH_SR_NBWP_MASK GENMASK_32(13, 9) 66 #define _HASH_SR_NBWP_OFF 9 67 #define _HASH_SR_NBWE_MASK GENMASK_32(21, 16) 68 #define _HASH_SR_NBWE_OFF 16 69 70 /* STR Register */ 71 #define _HASH_STR_NBLW_MASK GENMASK_32(4, 0) 72 #define _HASH_STR_DCAL BIT(8) 73 74 /* _iHASH_VERR bit fields */ 75 #define _HASH_VERR_MINREV GENMASK_32(3, 0) 76 #define _HASH_VERR_MAJREV GENMASK_32(7, 4) 77 78 /* Digest size in nb of uint32_t */ 79 #define MD5_DIGEST_U32 U(4) 80 #define SHA1_DIGEST_U32 U(5) 81 #define SHA224_DIGEST_U32 U(7) 82 #define SHA256_DIGEST_U32 U(8) 83 #define SHA384_DIGEST_U32 U(12) 84 #define SHA512_224_DIGEST_U32 U(7) 85 #define SHA512_256_DIGEST_U32 U(8) 86 #define SHA512_DIGEST_U32 U(16) 87 #define SHA3_224_DIGEST_U32 U(7) 88 #define SHA3_256_DIGEST_U32 U(8) 89 #define SHA3_384_DIGEST_U32 U(12) 90 #define SHA3_512_DIGEST_U32 U(16) 91 92 /* Internal block size */ 93 #define MD5_BLOCK_SIZE U(64) 94 #define SHA1_BLOCK_SIZE U(64) 95 #define SHA224_BLOCK_SIZE U(64) 96 #define SHA256_BLOCK_SIZE U(64) 97 #define SHA384_BLOCK_SIZE U(128) 98 #define SHA512_224_BLOCK_SIZE U(128) 99 #define SHA512_256_BLOCK_SIZE U(128) 100 #define SHA512_BLOCK_SIZE U(128) 101 #define SHA3_224_BLOCK_SIZE U(144) 102 #define SHA3_256_BLOCK_SIZE U(136) 103 #define SHA3_384_BLOCK_SIZE U(104) 104 #define SHA3_512_BLOCK_SIZE U(72) 105 106 /* Define the registers needed to save context */ 107 #define SAVE_SMALL U(1) 108 #define SAVE_BIG U(2) 109 #define SAVE_SHA3 U(3) 110 111 #define SAVE_SMALL_NB_REG U(22) 112 #define SAVE_SMALL_FIRST_REG U(0) 113 #define SAVE_SMALL_HMAC_NB_REG U(16) 114 #define SAVE_SMALL_HMAC_FIRST_REG U(38) 115 #define SAVE_BIG_NB_REG U(91) 116 #define SAVE_BIG_FIRST_REG U(0) 117 #define SAVE_BIG_HMAC_NB_REG U(12) 118 #define SAVE_BIG_HMAC_FIRST_REG U(91) 119 #define SAVE_SHA3_NB_REG U(72) 120 #define SAVE_SHA3_FIRST_REG U(0) 121 #define SAVE_SHA3_HMAC_NB_REG U(72) 122 #define SAVE_SHA3_HMAC_FIRST_REG U(16) 123 124 #define RESET_TIMEOUT_US_1MS U(1000) 125 #define HASH_TIMEOUT_US U(10000) 126 127 /* Define capabilities */ 128 #define CAPS_MD5 BIT(0) 129 #define CAPS_SHA1 BIT(1) 130 #define CAPS_SHA2_224 BIT(2) 131 #define CAPS_SHA2_256 BIT(3) 132 #define CAPS_SHA2_384 BIT(4) 133 #define CAPS_SHA2_512 BIT(5) 134 #define CAPS_SHA3 BIT(6) 135 136 struct stm32_hash_compat { 137 uint32_t caps; 138 }; 139 140 struct stm32_hash_platdata { 141 vaddr_t base; 142 struct clk *clock; 143 struct rstctrl *reset; 144 struct stm32_hash_compat *compat; 145 }; 146 147 struct stm32_hash_device { 148 struct stm32_hash_platdata pdata; 149 struct mutex lock; /* Protect HASH HW instance access */ 150 }; 151 152 static struct stm32_hash_device *stm32_hash; 153 154 static TEE_Result wait_end_busy(vaddr_t base) 155 { 156 uint32_t value = 0; 157 uint32_t addr = base + _HASH_SR; 158 159 /* Timeout may append due to a schedule after the while(timeout()) */ 160 if (IO_READ32_POLL_TIMEOUT(addr, value, !(value & _HASH_SR_BUSY), 0, 161 HASH_TIMEOUT_US)) { 162 EMSG("Busy timeout"); 163 return TEE_ERROR_BUSY; 164 } 165 166 return TEE_SUCCESS; 167 } 168 169 static int wait_digest_ready(vaddr_t base) 170 { 171 uint32_t value = 0; 172 uint32_t addr = base + _HASH_SR; 173 174 /* Timeout may append due to a schedule after the while(test) */ 175 if (IO_READ32_POLL_TIMEOUT(addr, value, value & _HASH_SR_DCIS, 0, 176 HASH_TIMEOUT_US)) { 177 EMSG("Ready timeout"); 178 return TEE_ERROR_BUSY; 179 } 180 181 return TEE_SUCCESS; 182 } 183 184 static TEE_Result hash_write_data(vaddr_t base, uint32_t data) 185 { 186 io_write32(base + _HASH_DIN, data); 187 188 return wait_end_busy(base); 189 } 190 191 static TEE_Result write_key(vaddr_t base, const uint8_t *key, size_t len) 192 { 193 TEE_Result res = TEE_ERROR_GENERIC; 194 uint32_t tmp_buf = 0; 195 196 io_clrsetbits32(base + _HASH_STR, _HASH_STR_NBLW_MASK, 197 8 * (len % sizeof(uint32_t))); 198 199 while (len / sizeof(uint32_t)) { 200 memcpy(&tmp_buf, key, sizeof(uint32_t)); 201 res = hash_write_data(base, tmp_buf); 202 if (res) 203 return res; 204 205 key += sizeof(uint32_t); 206 len -= sizeof(uint32_t); 207 } 208 209 if (len) { 210 tmp_buf = 0; 211 memcpy(&tmp_buf, key, len); 212 res = hash_write_data(base, tmp_buf); 213 if (res) 214 return res; 215 } 216 217 io_setbits32(base + _HASH_STR, _HASH_STR_DCAL); 218 219 return TEE_SUCCESS; 220 } 221 222 static void get_save_registers(struct stm32_hash_context *c, size_t *nb_regs, 223 size_t *first, size_t *hmac_nb_regs, 224 size_t *hmac_first) 225 { 226 switch (c->save_mode) { 227 case SAVE_SMALL: 228 *nb_regs = SAVE_SMALL_NB_REG; 229 *first = SAVE_SMALL_FIRST_REG; 230 if (c->mode == STM32_HMAC_MODE) { 231 *hmac_nb_regs = SAVE_SMALL_HMAC_NB_REG; 232 *hmac_first = SAVE_SMALL_HMAC_FIRST_REG; 233 } 234 break; 235 236 case SAVE_BIG: 237 *nb_regs = SAVE_BIG_NB_REG; 238 *first = SAVE_BIG_FIRST_REG; 239 if (c->mode == STM32_HMAC_MODE) { 240 *hmac_nb_regs = SAVE_BIG_HMAC_NB_REG; 241 *hmac_first = SAVE_BIG_HMAC_FIRST_REG; 242 } 243 break; 244 245 case SAVE_SHA3: 246 *nb_regs = SAVE_SHA3_NB_REG; 247 *first = SAVE_SHA3_FIRST_REG; 248 if (c->mode == STM32_HMAC_MODE) { 249 *hmac_nb_regs = SAVE_SHA3_HMAC_NB_REG; 250 *hmac_first = SAVE_SHA3_HMAC_FIRST_REG; 251 } 252 break; 253 254 default: 255 break; 256 } 257 } 258 259 static TEE_Result save_context(struct stm32_hash_context *c) 260 { 261 TEE_Result res = TEE_ERROR_GENERIC; 262 size_t i = 0; 263 size_t nb_reg = 0; 264 size_t first = 0; 265 size_t hmac_nb_reg = 0; 266 size_t hmac_first = 0; 267 vaddr_t base = c->dev->pdata.base; 268 269 res = wait_end_busy(base); 270 if (res) 271 return res; 272 273 /* Check that FIFO is empty */ 274 if (!(io_read32(base + _HASH_SR) & _HASH_SR_DINIS)) 275 return TEE_ERROR_BAD_STATE; 276 277 c->imr = io_read32(base + _HASH_IMR); 278 c->str = io_read32(base + _HASH_STR); 279 c->cr = io_read32(base + _HASH_CR); 280 281 get_save_registers(c, &nb_reg, &first, &hmac_nb_reg, &hmac_first); 282 283 if (!c->csr) 284 return TEE_ERROR_BAD_STATE; 285 286 /* Save context registers */ 287 for (i = 0; i < nb_reg; i++) 288 c->csr[i] = io_read32(base + _HASH_CSR(i + first)); 289 /* Save HMAC context registers */ 290 for (i = 0 ; i < hmac_nb_reg; i++) 291 c->csr[i + nb_reg] = io_read32(base + _HASH_CSR(i + 292 hmac_first)); 293 294 return TEE_SUCCESS; 295 } 296 297 static TEE_Result restore_context(struct stm32_hash_context *c) 298 { 299 size_t i = 0; 300 size_t nb_reg = 0; 301 size_t first = 0; 302 size_t hmac_nb_reg = 0; 303 size_t hmac_first = 0; 304 vaddr_t base = c->dev->pdata.base; 305 306 io_write32(base + _HASH_IMR, c->imr); 307 io_write32(base + _HASH_STR, c->str); 308 io_write32(base + _HASH_CR, c->cr | _HASH_CR_INIT); 309 310 get_save_registers(c, &nb_reg, &first, &hmac_nb_reg, &hmac_first); 311 312 if (!c->csr) 313 return TEE_ERROR_BAD_STATE; 314 315 /* Restore context registers */ 316 for (i = 0; i < nb_reg; i++) 317 io_write32(base + _HASH_CSR(i + first), c->csr[i]); 318 319 /* Restore HMAC context registers */ 320 for (i = 0 ; i < hmac_nb_reg; i++) 321 io_write32(base + _HASH_CSR(i + hmac_first), 322 c->csr[i + nb_reg]); 323 324 return TEE_SUCCESS; 325 } 326 327 static TEE_Result hw_init(struct stm32_hash_context *c, const uint8_t *key, 328 size_t len) 329 { 330 uint32_t reg_cr = 0; 331 vaddr_t base = c->dev->pdata.base; 332 333 reg_cr = _HASH_CR_INIT | _HASH_CR_DATATYPE_BYTE; 334 335 switch (c->algo) { 336 case STM32_HASH_MD5: 337 reg_cr |= _HASH_CR_ALGO_MD5; 338 break; 339 case STM32_HASH_SHA1: 340 reg_cr |= _HASH_CR_ALGO_SHA1; 341 break; 342 case STM32_HASH_SHA224: 343 reg_cr |= _HASH_CR_ALGO_SHA224; 344 break; 345 case STM32_HASH_SHA384: 346 reg_cr |= _HASH_CR_ALGO_SHA384; 347 break; 348 case STM32_HASH_SHA512: 349 reg_cr |= _HASH_CR_ALGO_SHA512; 350 break; 351 case STM32_HASH_SHA3_224: 352 reg_cr |= _HASH_CR_ALGO_SHA3_224; 353 break; 354 case STM32_HASH_SHA3_256: 355 reg_cr |= _HASH_CR_ALGO_SHA3_256; 356 break; 357 case STM32_HASH_SHA3_384: 358 reg_cr |= _HASH_CR_ALGO_SHA3_384; 359 break; 360 case STM32_HASH_SHA3_512: 361 reg_cr |= _HASH_CR_ALGO_SHA3_512; 362 break; 363 /* Default selected algo is SHA256 */ 364 case STM32_HASH_SHA256: 365 if (c->dev->pdata.compat->caps & CAPS_MD5) 366 reg_cr |= _HASH_CR_ALGO_SHA256_IF_MD5; 367 else 368 reg_cr |= _HASH_CR_ALGO_SHA256; 369 370 break; 371 default: 372 return TEE_ERROR_BAD_STATE; 373 } 374 375 if (c->mode == STM32_HMAC_MODE) { 376 reg_cr |= _HASH_CR_MODE; 377 378 if (len > c->block_size) 379 reg_cr |= _HASH_CR_LKEY; 380 381 io_write32(base + _HASH_CR, reg_cr); 382 383 return write_key(base, key, len); 384 } 385 386 io_write32(base + _HASH_CR, reg_cr); 387 388 return TEE_SUCCESS; 389 } 390 391 static TEE_Result hash_get_digest(struct stm32_hash_context *c, uint8_t *digest) 392 { 393 TEE_Result res = TEE_ERROR_GENERIC; 394 vaddr_t base = c->dev->pdata.base; 395 uint32_t i = 0; 396 uint32_t dsg = 0; 397 398 res = wait_digest_ready(base); 399 if (res) 400 return res; 401 402 for (i = 0; i < c->digest_u32; i++) { 403 dsg = TEE_U32_FROM_BIG_ENDIAN(io_read32(base + _HASH_HR(i))); 404 memcpy(digest + (i * sizeof(uint32_t)), &dsg, sizeof(uint32_t)); 405 } 406 407 return TEE_SUCCESS; 408 } 409 410 size_t stm32_hash_digest_size(struct stm32_hash_context *c) 411 { 412 assert(c); 413 414 return c->digest_u32 * sizeof(uint32_t); 415 } 416 417 TEE_Result stm32_hash_deep_copy(struct stm32_hash_context *dst, 418 struct stm32_hash_context *src) 419 { 420 size_t nb_reg = 0; 421 size_t first = 0; 422 size_t hmac_nb_reg = 0; 423 size_t hmac_first = 0; 424 uint32_t *dst_buf = NULL; 425 uint32_t *dst_csr = NULL; 426 427 if (!dst || !src || dst->mode != src->mode || dst->algo != src->algo) 428 return TEE_ERROR_BAD_PARAMETERS; 429 430 dst_buf = dst->remain.buf; 431 dst_csr = dst->csr; 432 *dst = *src; 433 dst->remain.buf = dst_buf; 434 dst->csr = dst_csr; 435 436 memcpy(dst->remain.buf, src->remain.buf, dst->remain.len); 437 get_save_registers(dst, &nb_reg, &first, &hmac_nb_reg, &hmac_first); 438 memcpy(dst->csr, src->csr, (nb_reg + hmac_nb_reg) * sizeof(uint32_t)); 439 440 return TEE_SUCCESS; 441 } 442 443 TEE_Result stm32_hash_alloc(struct stm32_hash_context *c, 444 enum stm32_hash_mode mode, 445 enum stm32_hash_algo algo) 446 { 447 size_t nb_reg = 0; 448 size_t first = 0; 449 size_t hmac_nb_reg = 0; 450 size_t hmac_first = 0; 451 452 assert(c); 453 454 /* Check if initialized */ 455 if (!stm32_hash) 456 return TEE_ERROR_NOT_IMPLEMENTED; 457 458 c->dev = stm32_hash; 459 c->mode = mode; 460 c->algo = algo; 461 462 switch (algo) { 463 case STM32_HASH_MD5: 464 if (!(c->dev->pdata.compat->caps & CAPS_MD5)) 465 return TEE_ERROR_NOT_IMPLEMENTED; 466 467 c->digest_u32 = MD5_DIGEST_U32; 468 c->block_size = MD5_BLOCK_SIZE; 469 c->save_mode = SAVE_SMALL; 470 break; 471 case STM32_HASH_SHA1: 472 if (!(c->dev->pdata.compat->caps & CAPS_SHA1)) 473 return TEE_ERROR_NOT_IMPLEMENTED; 474 475 c->digest_u32 = SHA1_DIGEST_U32; 476 c->block_size = SHA1_BLOCK_SIZE; 477 c->save_mode = SAVE_SMALL; 478 break; 479 case STM32_HASH_SHA224: 480 if (!(c->dev->pdata.compat->caps & CAPS_SHA2_224)) 481 return TEE_ERROR_NOT_IMPLEMENTED; 482 483 c->digest_u32 = SHA224_DIGEST_U32; 484 c->block_size = SHA224_BLOCK_SIZE; 485 c->save_mode = SAVE_SMALL; 486 break; 487 case STM32_HASH_SHA256: 488 if (!(c->dev->pdata.compat->caps & CAPS_SHA2_256)) 489 return TEE_ERROR_NOT_IMPLEMENTED; 490 491 c->digest_u32 = SHA256_DIGEST_U32; 492 c->block_size = SHA256_BLOCK_SIZE; 493 c->save_mode = SAVE_SMALL; 494 break; 495 case STM32_HASH_SHA384: 496 if (!(c->dev->pdata.compat->caps & CAPS_SHA2_384)) 497 return TEE_ERROR_NOT_IMPLEMENTED; 498 499 c->digest_u32 = SHA384_DIGEST_U32; 500 c->block_size = SHA384_BLOCK_SIZE; 501 c->save_mode = SAVE_BIG; 502 break; 503 case STM32_HASH_SHA512: 504 if (!(c->dev->pdata.compat->caps & CAPS_SHA2_512)) 505 return TEE_ERROR_NOT_IMPLEMENTED; 506 507 c->digest_u32 = SHA512_DIGEST_U32; 508 c->block_size = SHA512_BLOCK_SIZE; 509 c->save_mode = SAVE_BIG; 510 break; 511 case STM32_HASH_SHA3_224: 512 if (!(c->dev->pdata.compat->caps & CAPS_SHA3)) 513 return TEE_ERROR_NOT_IMPLEMENTED; 514 515 c->digest_u32 = SHA3_224_DIGEST_U32; 516 c->block_size = SHA3_224_BLOCK_SIZE; 517 c->save_mode = SAVE_SHA3; 518 break; 519 case STM32_HASH_SHA3_256: 520 if (!(c->dev->pdata.compat->caps & CAPS_SHA3)) 521 return TEE_ERROR_NOT_IMPLEMENTED; 522 523 c->digest_u32 = SHA3_256_DIGEST_U32; 524 c->block_size = SHA3_256_BLOCK_SIZE; 525 c->save_mode = SAVE_SHA3; 526 break; 527 case STM32_HASH_SHA3_384: 528 if (!(c->dev->pdata.compat->caps & CAPS_SHA3)) 529 return TEE_ERROR_NOT_IMPLEMENTED; 530 531 c->digest_u32 = SHA3_384_DIGEST_U32; 532 c->block_size = SHA3_384_BLOCK_SIZE; 533 c->save_mode = SAVE_SHA3; 534 break; 535 case STM32_HASH_SHA3_512: 536 if (!(c->dev->pdata.compat->caps & CAPS_SHA3)) 537 return TEE_ERROR_NOT_IMPLEMENTED; 538 539 c->digest_u32 = SHA3_512_DIGEST_U32; 540 c->block_size = SHA3_512_BLOCK_SIZE; 541 c->save_mode = SAVE_SHA3; 542 break; 543 default: 544 return TEE_ERROR_NOT_IMPLEMENTED; 545 } 546 547 /* 548 * The queue size is block_size + one register at first 549 * then block_size. 550 * So we may need to save at max queue_size + 3 bytes. 551 * Let allocate a number of uin32_t: queue_size + 4. 552 */ 553 c->remain.buf = calloc(c->block_size + sizeof(uint32_t), 1); 554 if (!c->remain.buf) 555 return TEE_ERROR_OUT_OF_MEMORY; 556 557 get_save_registers(c, &nb_reg, &first, &hmac_nb_reg, &hmac_first); 558 559 c->csr = calloc(nb_reg + hmac_nb_reg, sizeof(uint32_t)); 560 if (!c->csr) { 561 free(c->remain.buf); 562 return TEE_ERROR_OUT_OF_MEMORY; 563 } 564 565 return TEE_SUCCESS; 566 } 567 568 void stm32_hash_free(struct stm32_hash_context *c) 569 { 570 if (!c) 571 return; 572 573 free(c->remain.buf); 574 free(c->csr); 575 } 576 577 TEE_Result stm32_hash_update(struct stm32_hash_context *c, 578 const uint8_t *buffer, size_t len) 579 { 580 TEE_Result res = TEE_ERROR_GENERIC; 581 size_t next_queue_size = c->queue_size; 582 vaddr_t base = 0; 583 584 assert(c); 585 586 base = c->dev->pdata.base; 587 588 if (!len || !buffer) 589 return TEE_SUCCESS; 590 591 mutex_lock(&c->dev->lock); 592 if (clk_enable(c->dev->pdata.clock)) { 593 EMSG("Fail to enable clk %s", 594 clk_get_name(c->dev->pdata.clock)); 595 panic(); 596 } 597 598 res = restore_context(c); 599 if (res) 600 goto exit; 601 602 /* We cannot fill the fifo */ 603 if (c->remain.len + len < c->queue_size) { 604 if (!c->remain.buf) { 605 res = TEE_ERROR_BAD_STATE; 606 goto exit; 607 } 608 609 memcpy(((uint8_t *)c->remain.buf) + c->remain.len, buffer, len); 610 c->remain.len += len; 611 612 /* 613 * We don't need to save status as we didn't change IP 614 * internal state. 615 */ 616 goto exit; 617 } else { 618 next_queue_size = c->block_size; 619 } 620 621 /* First write data saved in previous update */ 622 if (c->remain.len) { 623 size_t align = 0; 624 size_t i = 0; 625 626 if (!c->remain.buf) { 627 res = TEE_ERROR_BAD_STATE; 628 goto exit; 629 } 630 631 /* Add bytes needed to align saved data */ 632 align = ROUNDUP(c->remain.len, sizeof(uint32_t)) - 633 c->remain.len; 634 memcpy(((uint8_t *)c->remain.buf) + c->remain.len, buffer, 635 align); 636 c->remain.len += align; 637 buffer += align; 638 len -= align; 639 640 for (i = 0; i < c->remain.len / sizeof(uint32_t); i++) { 641 res = hash_write_data(base, c->remain.buf[i]); 642 if (res) 643 goto exit; 644 645 c->remain.buf[i] = 0; /* Reset to 0 */ 646 } 647 648 /* No more saved data */ 649 c->remain.len = 0; 650 } 651 652 /* 653 * Here, the data should be written to the FIFO until we cannot 654 * guarantee anymore that the data that we write will trigger a 655 * process of data. Then we write the remaining data until DINIS 656 * is set to 1 by hardware, meaning that a complete block can be 657 * sent. Data written will be saved during save_context() and 658 * remaining data not written (if there's any) will be saved in 659 * c->remain.buf. 660 */ 661 while (len >= c->queue_size || 662 !(io_read32(base + _HASH_SR) & _HASH_SR_DINIS)) { 663 uint32_t tmp_buf = 0; 664 665 memcpy(&tmp_buf, buffer, sizeof(uint32_t)); 666 res = hash_write_data(base, tmp_buf); 667 if (res) 668 goto exit; 669 670 buffer += sizeof(uint32_t); 671 len -= sizeof(uint32_t); 672 } 673 674 c->queue_size = next_queue_size; 675 676 if (len) { 677 assert(c->remain.len == 0); 678 679 if (!c->remain.buf) { 680 res = TEE_ERROR_BAD_STATE; 681 goto exit; 682 } 683 684 memcpy(c->remain.buf, buffer, len); 685 c->remain.len = len; 686 } 687 688 res = save_context(c); 689 690 exit: 691 clk_disable(c->dev->pdata.clock); 692 mutex_unlock(&c->dev->lock); 693 694 return res; 695 } 696 697 TEE_Result stm32_hash_final(struct stm32_hash_context *c, uint8_t *digest, 698 const uint8_t *key, size_t len) 699 { 700 TEE_Result res = TEE_ERROR_GENERIC; 701 vaddr_t base = 0; 702 703 assert(c); 704 705 base = c->dev->pdata.base; 706 707 if ((!key || !len) && c->mode != STM32_HASH_MODE) 708 return TEE_ERROR_BAD_STATE; 709 710 mutex_lock(&c->dev->lock); 711 if (clk_enable(c->dev->pdata.clock)) { 712 EMSG("Fail to enable clk %s", 713 clk_get_name(c->dev->pdata.clock)); 714 panic(); 715 } 716 717 res = restore_context(c); 718 if (res) 719 goto exit; 720 721 if (c->remain.len) { 722 size_t i = 0; 723 724 for (i = 0; 725 i < ROUNDUP_DIV(c->remain.len, sizeof(uint32_t)); 726 i++) { 727 res = hash_write_data(base, c->remain.buf[i]); 728 if (res) 729 goto exit; 730 c->remain.buf[i] = 0; /* Reset to 0 */ 731 } 732 733 io_clrsetbits32(base + _HASH_STR, _HASH_STR_NBLW_MASK, 734 8 * (c->remain.len % sizeof(uint32_t))); 735 736 /* No more saved data */ 737 c->remain.len = 0; 738 } else { 739 io_clrbits32(base + _HASH_STR, _HASH_STR_NBLW_MASK); 740 } 741 742 io_setbits32(base + _HASH_STR, _HASH_STR_DCAL); 743 744 if (c->mode == STM32_HMAC_MODE) { 745 res = write_key(base, key, len); 746 if (res) 747 goto exit; 748 } 749 750 res = hash_get_digest(c, digest); 751 752 exit: 753 clk_disable(c->dev->pdata.clock); 754 mutex_unlock(&c->dev->lock); 755 756 return res; 757 } 758 759 TEE_Result stm32_hash_init(struct stm32_hash_context *c, const uint8_t *key, 760 size_t len) 761 { 762 TEE_Result res = TEE_ERROR_GENERIC; 763 764 assert(c); 765 766 if ((!key || !len) && c->mode != STM32_HASH_MODE) 767 return TEE_ERROR_BAD_PARAMETERS; 768 769 mutex_lock(&c->dev->lock); 770 771 if (clk_enable(c->dev->pdata.clock)) { 772 EMSG("Fail to enable clk %s", 773 clk_get_name(c->dev->pdata.clock)); 774 panic(); 775 } 776 777 c->remain.len = 0; 778 /* First queue is block_size + one register */ 779 c->queue_size = c->block_size + sizeof(uint32_t); 780 memset(c->remain.buf, 0, c->queue_size); 781 782 res = hw_init(c, key, len); 783 if (res) 784 goto exit; 785 786 res = save_context(c); 787 788 exit: 789 clk_disable(c->dev->pdata.clock); 790 mutex_unlock(&c->dev->lock); 791 792 return res; 793 } 794 795 static TEE_Result stm32_hash_parse_fdt(struct stm32_hash_platdata *pdata, 796 const void *fdt, int node, 797 const void *compat_data) 798 { 799 TEE_Result res = TEE_ERROR_GENERIC; 800 size_t reg_size = 0; 801 paddr_t reg = 0; 802 803 res = rstctrl_dt_get_by_index(fdt, node, 0, &pdata->reset); 804 if (res != TEE_SUCCESS && res != TEE_ERROR_ITEM_NOT_FOUND) 805 return res; 806 807 res = clk_dt_get_by_index(fdt, node, 0, &pdata->clock); 808 if (res) 809 return res; 810 811 res = fdt_reg_info(fdt, node, ®, ®_size); 812 if (res) 813 return res; 814 815 pdata->base = (vaddr_t)phys_to_virt(reg, MEM_AREA_IO_SEC, reg_size); 816 if (!pdata->base) 817 panic(); 818 819 pdata->compat = (struct stm32_hash_compat *)compat_data; 820 821 return TEE_SUCCESS; 822 } 823 824 static TEE_Result stm32_hash_probe(const void *fdt, int node, 825 const void *compat_data) 826 { 827 TEE_Result res = TEE_ERROR_GENERIC; 828 uint32_t __maybe_unused rev = 0; 829 struct stm32_hash_platdata temp_pdata = { }; 830 831 res = stm32_hash_parse_fdt(&temp_pdata, fdt, node, compat_data); 832 if (res) 833 return res; 834 835 stm32_hash = calloc(1, sizeof(*stm32_hash)); 836 if (!stm32_hash) 837 return TEE_ERROR_OUT_OF_MEMORY; 838 839 stm32_hash->pdata = temp_pdata; 840 841 if (clk_enable(stm32_hash->pdata.clock)) { 842 EMSG("Fail to enable clk %s", 843 clk_get_name(stm32_hash->pdata.clock)); 844 panic(); 845 } 846 847 rev = io_read32(stm32_hash->pdata.base + _HASH_VERR); 848 FMSG("STM32 HASH v%"PRIu32".%"PRIu32, (rev & _HASH_VERR_MAJREV) >> 4, 849 rev & _HASH_VERR_MINREV); 850 851 if (stm32_hash->pdata.reset && 852 rstctrl_assert_to(stm32_hash->pdata.reset, RESET_TIMEOUT_US_1MS)) 853 panic(); 854 855 if (stm32_hash->pdata.reset && 856 rstctrl_deassert_to(stm32_hash->pdata.reset, RESET_TIMEOUT_US_1MS)) 857 panic(); 858 859 mutex_init(&stm32_hash->lock); 860 861 clk_disable(stm32_hash->pdata.clock); 862 863 if (IS_ENABLED(CFG_CRYPTO_DRV_HASH)) { 864 res = stm32_register_hash(); 865 if (res) { 866 EMSG("Failed to register to HASH: %#"PRIx32, res); 867 panic(); 868 } 869 } 870 871 if (IS_ENABLED(CFG_CRYPTO_DRV_MAC)) { 872 res = stm32_register_hmac(); 873 if (res) { 874 EMSG("Failed to register to HMAC : %#"PRIx32, res); 875 panic(); 876 } 877 } 878 879 return TEE_SUCCESS; 880 } 881 882 static const struct stm32_hash_compat mp13_compat = { 883 .caps = CAPS_SHA1 | CAPS_SHA2_224 | CAPS_SHA2_256 | CAPS_SHA2_384 | 884 CAPS_SHA2_512 | CAPS_SHA3, 885 }; 886 887 static const struct stm32_hash_compat mp15_compat = { 888 .caps = CAPS_MD5 | CAPS_SHA1 | CAPS_SHA2_224 | CAPS_SHA2_256, 889 }; 890 891 static const struct dt_device_match hash_match_table[] = { 892 { .compatible = "st,stm32mp13-hash", .compat_data = &mp13_compat }, 893 { .compatible = "st,stm32f756-hash", .compat_data = &mp15_compat }, 894 { } 895 }; 896 897 DEFINE_DT_DRIVER(stm32_hash_dt_driver) = { 898 .name = "stm32-hash", 899 .match_table = hash_match_table, 900 .probe = &stm32_hash_probe, 901 }; 902