1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright 2022-2024 HiSilicon Limited. 4 * Kunpeng hardware accelerator sec hash algorithm implementation. 5 */ 6 7 #include <drvcrypt_hash.h> 8 #include <initcall.h> 9 #include <sec_hash.h> 10 #include <sec_main.h> 11 12 static enum hisi_drv_status sec_digest_set_hmac_key(struct hashctx *ctx, 13 struct hisi_sec_sqe *sqe) 14 { 15 if (ctx->key_len > SEC_DIGEST_MAX_KEY_SIZE || !ctx->key_len) { 16 EMSG("Invalid digest key len(%ld)", ctx->key_len); 17 return HISI_QM_DRVCRYPT_IN_EPARA; 18 } 19 20 /* If the length of key is not word-aligned, increment by 1 */ 21 sqe->type2.mac_key_alg |= SHIFT_U64(DIV_ROUND_UP(ctx->key_len, 22 SEC_ENCODE_BYTES), 23 SEC_AKEY_OFFSET); 24 sqe->type2.a_key_addr = ctx->key_dma; 25 26 return HISI_QM_DRVCRYPT_NO_ERR; 27 } 28 29 static void sec_digest_fill_long_bd2(struct hashctx *ctx, 30 struct hisi_sec_sqe *sqe) 31 { 32 uint64_t total_bits = 0; 33 34 if (ctx->has_next && !ctx->iv_len) { 35 /* LONG BD FIRST */ 36 sqe->ai_apd_cs |= AI_GEN_INNER; 37 sqe->ai_apd_cs |= SHIFT_U32(AUTHPAD_NOPAD, SEC_APAD_OFFSET); 38 ctx->iv_len = ctx->mac_len; 39 } else if (ctx->has_next && ctx->iv_len) { 40 /* LONG BD MIDDLE */ 41 sqe->ai_apd_cs |= AI_GEN_IVIN_ADDR; 42 sqe->ai_apd_cs |= SHIFT_U32(AUTHPAD_NOPAD, SEC_APAD_OFFSET); 43 sqe->type2.a_ivin_addr = sqe->type2.mac_addr; 44 ctx->iv_len = ctx->mac_len; 45 } else if (!ctx->has_next && ctx->iv_len) { 46 /* LONG BD END */ 47 sqe->ai_apd_cs |= AI_GEN_IVIN_ADDR; 48 sqe->ai_apd_cs |= SHIFT_U32(AUTHPAD_PAD, SEC_APAD_OFFSET); 49 sqe->type2.a_ivin_addr = sqe->type2.mac_addr; 50 total_bits = ctx->long_data_len * BYTE_BITS; 51 sqe->type2.long_a_data_len = total_bits; 52 ctx->iv_len = 0; 53 } else { 54 /* SHORT BD */ 55 ctx->iv_len = 0; 56 } 57 } 58 59 static struct crypto_hash *to_hash_ctx(struct crypto_hash_ctx *ctx) 60 { 61 return container_of(ctx, struct crypto_hash, hash_ctx); 62 } 63 64 static uint32_t sec_digest_get_alg_type(uint32_t algo) 65 { 66 switch (algo) { 67 case TEE_ALG_MD5: 68 return A_ALG_MD5; 69 case TEE_ALG_HMAC_MD5: 70 return A_ALG_HMAC_MD5; 71 case TEE_ALG_SHA1: 72 return A_ALG_SHA1; 73 case TEE_ALG_HMAC_SHA1: 74 return A_ALG_HMAC_SHA1; 75 case TEE_ALG_SHA224: 76 return A_ALG_SHA224; 77 case TEE_ALG_HMAC_SHA224: 78 return A_ALG_HMAC_SHA224; 79 case TEE_ALG_SM3: 80 return A_ALG_SM3; 81 case TEE_ALG_HMAC_SM3: 82 return A_ALG_HMAC_SM3; 83 case TEE_ALG_SHA256: 84 return A_ALG_SHA256; 85 case TEE_ALG_HMAC_SHA256: 86 return A_ALG_HMAC_SHA256; 87 case TEE_ALG_SHA384: 88 return A_ALG_SHA384; 89 case TEE_ALG_HMAC_SHA384: 90 return A_ALG_HMAC_SHA384; 91 case TEE_ALG_SHA512: 92 return A_ALG_SHA512; 93 case TEE_ALG_HMAC_SHA512: 94 return A_ALG_HMAC_SHA512; 95 default: 96 return A_ALG_MAX; 97 } 98 } 99 100 static enum hisi_drv_status sec_digest_fill_sqe(void *bd, void *msg) 101 { 102 enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR; 103 struct hisi_sec_sqe *sqe = bd; 104 struct hashctx *ctx = msg; 105 uint32_t alg_type = 0; 106 107 if (!ctx->in_len) { 108 EMSG("Digest bd2 not support 0 packet"); 109 return HISI_QM_DRVCRYPT_IN_EPARA; 110 } 111 112 sqe->type_auth_cipher = BD_TYPE2; 113 sqe->sds_sa_type = SHIFT_U32(SCENE_NOTHING, SEC_SCENE_OFFSET); 114 sqe->type_auth_cipher |= SHIFT_U32(AUTH_MAC_CALCULATE, SEC_AUTH_OFFSET); 115 sqe->type2.alen_ivllen = ctx->in_len; 116 117 sqe->type2.data_src_addr = ctx->in_dma; 118 sqe->type2.mac_addr = ctx->out_dma; 119 sqe->type2.mac_key_alg |= ctx->mac_len / SEC_ENCODE_BYTES; 120 121 if (ctx->mode == WCRYPTO_DIGEST_HMAC) { 122 ret = sec_digest_set_hmac_key(ctx, sqe); 123 if (ret) 124 return ret; 125 } 126 127 alg_type = sec_digest_get_alg_type(ctx->algo); 128 if (alg_type >= A_ALG_MAX) { 129 EMSG("Fail to get digest alg type"); 130 return HISI_QM_DRVCRYPT_IN_EPARA; 131 } 132 sqe->type2.mac_key_alg |= SHIFT_U32(alg_type, SEC_AEAD_ALG_OFFSET); 133 134 sec_digest_fill_long_bd2(ctx, sqe); 135 136 return ret; 137 } 138 139 static enum hisi_drv_status 140 sec_digest_set_hmac_bd3_key(struct hashctx *ctx, struct hisi_sec_bd3_sqe *sqe) 141 { 142 if (ctx->key_len > SEC_DIGEST_MAX_KEY_SIZE || !ctx->key_len) { 143 EMSG("Invalid digest key len(%ld)", ctx->key_len); 144 return HISI_QM_DRVCRYPT_IN_EPARA; 145 } 146 147 /* If the length of key is not word-aligned, increment by 1 */ 148 sqe->auth_mac_key |= SHIFT_U64(DIV_ROUND_UP(ctx->key_len, 149 SEC_ENCODE_BYTES), 150 SEC_AKEY_OFFSET_V3); 151 sqe->a_key_addr = ctx->key_dma; 152 153 return HISI_QM_DRVCRYPT_NO_ERR; 154 } 155 156 static void sec_digest_fill_long_bd3(struct hashctx *ctx, 157 struct hisi_sec_bd3_sqe *sqe) 158 { 159 uint64_t total_bits = 0; 160 161 if (ctx->has_next && !ctx->iv_len) { 162 /* LONG BD FIRST */ 163 sqe->auth_mac_key |= SHIFT_U32(AI_GEN_INNER, 164 SEC_AI_GEN_OFFSET_V3); 165 sqe->stream_scene.auth_pad = AUTHPAD_NOPAD; 166 ctx->iv_len = ctx->mac_len; 167 } else if (ctx->has_next && ctx->iv_len) { 168 /* LONG BD MIDDLE */ 169 sqe->auth_mac_key |= SHIFT_U32(AI_GEN_IVIN_ADDR, 170 SEC_AI_GEN_OFFSET_V3); 171 sqe->stream_scene.auth_pad = AUTHPAD_NOPAD; 172 sqe->a_ivin_addr = sqe->mac_addr; 173 ctx->iv_len = ctx->mac_len; 174 } else if (!ctx->has_next && ctx->iv_len) { 175 /* LONG BD END */ 176 sqe->auth_mac_key |= SHIFT_U32(AI_GEN_IVIN_ADDR, 177 SEC_AI_GEN_OFFSET_V3); 178 sqe->stream_scene.auth_pad = AUTHPAD_PAD; 179 sqe->a_ivin_addr = sqe->mac_addr; 180 total_bits = ctx->long_data_len * BYTE_BITS; 181 sqe->stream_scene.long_a_data_len = total_bits; 182 ctx->iv_len = 0; 183 } else { 184 /* SHORT BD */ 185 ctx->iv_len = 0; 186 } 187 } 188 189 static enum hisi_drv_status sec_digest_fill_bd3_sqe(void *bd, void *msg) 190 { 191 enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR; 192 struct hisi_sec_bd3_sqe *sqe = bd; 193 struct hashctx *ctx = msg; 194 uint32_t alg_type = 0; 195 196 sqe->bd_param = BD_TYPE3 | SHIFT_U32(ctx->scene, SEC_SCENE_OFFSET_V3); 197 sqe->a_len_key = ctx->in_len; 198 sqe->auth_mac_key = AUTH_MAC_CALCULATE; 199 sqe->data_src_addr = ctx->in_dma; 200 sqe->mac_addr = ctx->out_dma; 201 202 if (ctx->mode == WCRYPTO_DIGEST_HMAC) { 203 ret = sec_digest_set_hmac_bd3_key(ctx, sqe); 204 if (ret) 205 return HISI_QM_DRVCRYPT_IN_EPARA; 206 } 207 208 sqe->auth_mac_key |= SHIFT_U64(ctx->mac_len / SEC_ENCODE_BYTES, 209 SEC_MAC_OFFSET_V3); 210 alg_type = sec_digest_get_alg_type(ctx->algo); 211 if (alg_type >= A_ALG_MAX) { 212 EMSG("Fail to get digest bd3 alg"); 213 return HISI_QM_DRVCRYPT_IN_EPARA; 214 } 215 sqe->auth_mac_key |= SHIFT_U32(alg_type, SEC_AUTH_ALG_OFFSET_V3); 216 sec_digest_fill_long_bd3(ctx, sqe); 217 218 return HISI_QM_DRVCRYPT_NO_ERR; 219 } 220 221 static TEE_Result sec_digest_do_task(struct hisi_qp *qp, void *msg) 222 { 223 TEE_Result ret = TEE_SUCCESS; 224 225 ret = hisi_qp_send(qp, msg); 226 if (ret) { 227 EMSG("Fail to send task, ret=%d", ret); 228 return TEE_ERROR_BAD_STATE; 229 } 230 231 ret = hisi_qp_recv_sync(qp, msg); 232 if (ret) { 233 EMSG("Recv task error, ret=%d", ret); 234 return TEE_ERROR_BAD_STATE; 235 } 236 237 return TEE_SUCCESS; 238 } 239 240 static enum hisi_drv_status sec_parse_digest_sqe(void *bd, void *msg __unused) 241 { 242 struct hisi_sec_sqe *sqe = bd; 243 uint16_t done = 0; 244 245 done = SEC_GET_FIELD(sqe->type2.done_flag, SEC_DONE_MASK, 0); 246 if (done != SEC_HW_TASK_DONE || sqe->type2.error_type) { 247 EMSG("SEC BD2 fail! done=%#"PRIx16", etype=%#"PRIx8, 248 done, sqe->type2.error_type); 249 return HISI_QM_DRVCRYPT_IN_EPARA; 250 } 251 252 return HISI_QM_DRVCRYPT_NO_ERR; 253 } 254 255 static enum hisi_drv_status sec_parse_digest_bd3_sqe(void *bd, 256 void *msg __unused) 257 { 258 struct hisi_sec_bd3_sqe *sqe = bd; 259 uint16_t done = 0; 260 261 done = SEC_GET_FIELD(sqe->done_flag, SEC_DONE_MASK, 0); 262 if (done != SEC_HW_TASK_DONE || sqe->error_type) { 263 EMSG("SEC BD3 fail! done=%#"PRIx16", etype=%#"PRIx8, 264 done, sqe->error_type); 265 return HISI_QM_DRVCRYPT_IN_EPARA; 266 } 267 268 return HISI_QM_DRVCRYPT_NO_ERR; 269 } 270 271 TEE_Result hisi_sec_digest_ctx_init(struct hashctx *hash_ctx, 272 const uint8_t *key, size_t len) 273 { 274 if (!hash_ctx) { 275 EMSG("Input hash_ctx is NULL"); 276 return TEE_ERROR_BAD_PARAMETERS; 277 } 278 279 hash_ctx->in_len = 0; 280 hash_ctx->iv_len = 0; 281 hash_ctx->has_next = false; 282 hash_ctx->long_data_len = 0; 283 hash_ctx->scene = SCENE_NOTHING; 284 285 /* 286 * In reset ctx scenarios, sec_hash_initialize will be called. 287 * To ensure in data is NULL, reset ctx need to free in data 288 * which is not NULL. 289 */ 290 free(hash_ctx->in); 291 hash_ctx->in = NULL; 292 293 if (len) { 294 hash_ctx->key_len = len; 295 memcpy(hash_ctx->key, key, len); 296 } 297 298 return TEE_SUCCESS; 299 } 300 301 static TEE_Result sec_hash_initialize(struct crypto_hash_ctx *ctx) 302 { 303 struct crypto_hash *hash = NULL; 304 struct hashctx *hash_ctx = NULL; 305 306 if (!ctx) { 307 EMSG("Input ctx is NULL"); 308 return TEE_ERROR_BAD_PARAMETERS; 309 } 310 311 hash = to_hash_ctx(ctx); 312 hash_ctx = hash->ctx; 313 314 return hisi_sec_digest_ctx_init(hash_ctx, NULL, 0); 315 } 316 317 TEE_Result hisi_sec_digest_do_update(struct hashctx *hash_ctx, 318 const uint8_t *data, size_t len) 319 { 320 TEE_Result ret = TEE_SUCCESS; 321 size_t left_size = 0; 322 323 hash_ctx->long_data_len += len; 324 325 if (!hash_ctx->in) { 326 if (len <= SMALL_BUF_SIZE) 327 hash_ctx->buf_len = SMALL_BUF_SIZE; 328 else if (len <= MAX_AUTH_LENGTH) 329 hash_ctx->buf_len = ROUNDUP(len, HISI_QM_ALIGN128); 330 else 331 hash_ctx->buf_len = MAX_AUTH_LENGTH; 332 333 hash_ctx->in_len = 0; 334 hash_ctx->in = malloc(hash_ctx->buf_len); 335 if (!hash_ctx->in) { 336 EMSG("Fail to alloc in data buf"); 337 return TEE_ERROR_STORAGE_NO_SPACE; 338 } 339 hash_ctx->in_dma = virt_to_phys(hash_ctx->in); 340 if (!hash_ctx->in_dma) { 341 free(hash_ctx->in); 342 hash_ctx->in = NULL; 343 EMSG("Fail to get in_dma"); 344 return TEE_ERROR_STORAGE_NO_SPACE; 345 } 346 } 347 348 while (len > 0) { 349 if (hash_ctx->in_len + len <= hash_ctx->buf_len) { 350 memcpy(hash_ctx->in + hash_ctx->in_len, data, len); 351 hash_ctx->in_len += len; 352 len = 0; 353 } else { 354 left_size = hash_ctx->buf_len - hash_ctx->in_len; 355 memcpy(hash_ctx->in + hash_ctx->in_len, data, 356 left_size); 357 hash_ctx->in_len = hash_ctx->buf_len; 358 hash_ctx->scene = SCENE_STREAM; 359 hash_ctx->has_next = true; 360 data += left_size; 361 len -= left_size; 362 ret = sec_digest_do_task(hash_ctx->qp, hash_ctx); 363 if (ret) { 364 EMSG("Fail to do digest task! ret = %#"PRIx32, 365 ret); 366 return ret; 367 } 368 hash_ctx->iv_len = hash_ctx->mac_len; 369 hash_ctx->in_len = 0; 370 } 371 } 372 return TEE_SUCCESS; 373 } 374 375 static TEE_Result sec_hash_do_update(struct crypto_hash_ctx *ctx, 376 const uint8_t *data, size_t len) 377 { 378 struct crypto_hash *hash = NULL; 379 struct hashctx *hash_ctx = NULL; 380 381 if (!len) { 382 IMSG("This is 0 len task, skip"); 383 return TEE_SUCCESS; 384 } 385 386 if (!ctx || (!data && len)) { 387 EMSG("Invalid input parameters"); 388 return TEE_ERROR_BAD_PARAMETERS; 389 } 390 391 hash = to_hash_ctx(ctx); 392 hash_ctx = hash->ctx; 393 394 return hisi_sec_digest_do_update(hash_ctx, data, len); 395 } 396 397 TEE_Result hisi_sec_digest_do_final(struct hashctx *hash_ctx, uint8_t *digest, 398 size_t len) 399 { 400 TEE_Result ret = TEE_SUCCESS; 401 402 if (!digest || len == 0) { 403 EMSG("Invalid input parameters"); 404 return TEE_ERROR_BAD_PARAMETERS; 405 } 406 407 if (hash_ctx->mac_len & WORD_ALIGNMENT_MASK) { 408 EMSG("Invalid digest out_bytes"); 409 return TEE_ERROR_BAD_PARAMETERS; 410 } 411 412 hash_ctx->has_next = false; 413 ret = sec_digest_do_task(hash_ctx->qp, hash_ctx); 414 if (ret) { 415 EMSG("Fail to do digest task! ret = %#"PRIx32, ret); 416 return ret; 417 } 418 419 memcpy(digest, hash_ctx->out, MIN(hash_ctx->mac_len, len)); 420 421 return TEE_SUCCESS; 422 } 423 424 static TEE_Result sec_hash_do_final(struct crypto_hash_ctx *ctx, 425 uint8_t *digest, size_t len) 426 { 427 struct crypto_hash *hash = to_hash_ctx(ctx); 428 struct hashctx *hash_ctx = hash->ctx; 429 430 return hisi_sec_digest_do_final(hash_ctx, digest, len); 431 } 432 433 void hisi_sec_digest_ctx_free(struct hashctx *hash_ctx) 434 { 435 hisi_qm_release_qp(hash_ctx->qp); 436 437 free(hash_ctx->in); 438 hash_ctx->in = NULL; 439 440 memzero_explicit(hash_ctx->key, SEC_DIGEST_MAX_KEY_SIZE); 441 442 free(hash_ctx); 443 } 444 445 static void sec_hash_ctx_free(struct crypto_hash_ctx *ctx) 446 { 447 struct crypto_hash *hash = NULL; 448 struct hashctx *hash_ctx = NULL; 449 450 if (!ctx) 451 return; 452 453 hash = to_hash_ctx(ctx); 454 hash_ctx = hash->ctx; 455 if (!hash_ctx) 456 return; 457 hisi_sec_digest_ctx_free(hash_ctx); 458 459 hash->ctx = NULL; 460 461 free(hash); 462 } 463 464 void hisi_sec_digest_copy_state(struct hashctx *out_hash_ctx, 465 struct hashctx *in_hash_ctx) 466 { 467 out_hash_ctx->iv_len = in_hash_ctx->iv_len; 468 out_hash_ctx->buf_len = in_hash_ctx->buf_len; 469 out_hash_ctx->key_len = in_hash_ctx->key_len; 470 out_hash_ctx->has_next = in_hash_ctx->has_next; 471 out_hash_ctx->long_data_len = in_hash_ctx->long_data_len; 472 473 if (in_hash_ctx->in) { 474 out_hash_ctx->in = malloc(out_hash_ctx->buf_len); 475 if (!out_hash_ctx->in) { 476 EMSG("Fail to alloc in buf"); 477 return; 478 } 479 out_hash_ctx->in_dma = virt_to_phys(out_hash_ctx->in); 480 if (!out_hash_ctx->in_dma) { 481 free(out_hash_ctx->in); 482 out_hash_ctx->in = NULL; 483 EMSG("Fail to get in_dma"); 484 return; 485 } 486 out_hash_ctx->in_len = in_hash_ctx->in_len; 487 memcpy(out_hash_ctx->in, in_hash_ctx->in, 488 out_hash_ctx->buf_len); 489 } 490 491 memcpy(out_hash_ctx->iv, in_hash_ctx->iv, out_hash_ctx->iv_len); 492 memcpy(out_hash_ctx->key, in_hash_ctx->key, out_hash_ctx->key_len); 493 } 494 495 static void sec_hash_copy_state(struct crypto_hash_ctx *out_ctx, 496 struct crypto_hash_ctx *in_ctx) 497 { 498 struct crypto_hash *out_hash = NULL; 499 struct crypto_hash *in_hash = NULL; 500 struct hashctx *out_hash_ctx = NULL; 501 struct hashctx *in_hash_ctx = NULL; 502 503 if (!out_ctx || !in_ctx) { 504 EMSG("Invalid input parameters"); 505 return; 506 } 507 508 out_hash = to_hash_ctx(out_ctx); 509 in_hash = to_hash_ctx(in_ctx); 510 511 out_hash_ctx = out_hash->ctx; 512 in_hash_ctx = in_hash->ctx; 513 514 hisi_sec_digest_copy_state(out_hash_ctx, in_hash_ctx); 515 } 516 517 static struct crypto_hash_ops hash_ops = { 518 .init = sec_hash_initialize, 519 .update = sec_hash_do_update, 520 .final = sec_hash_do_final, 521 .free_ctx = sec_hash_ctx_free, 522 .copy_state = sec_hash_copy_state, 523 }; 524 525 static size_t sec_hash_get_mac_len(uint32_t type) 526 { 527 switch (type) { 528 case TEE_ALG_MD5: 529 case TEE_ALG_HMAC_MD5: 530 return HASH_MAC_LEN128; 531 case TEE_ALG_SHA1: 532 case TEE_ALG_HMAC_SHA1: 533 return HASH_MAC_LEN160; 534 case TEE_ALG_SHA224: 535 case TEE_ALG_HMAC_SHA224: 536 return HASH_MAC_LEN224; 537 case TEE_ALG_SM3: 538 case TEE_ALG_HMAC_SM3: 539 case TEE_ALG_SHA256: 540 case TEE_ALG_HMAC_SHA256: 541 return HASH_MAC_LEN256; 542 case TEE_ALG_SHA384: 543 case TEE_ALG_HMAC_SHA384: 544 return HASH_MAC_LEN384; 545 case TEE_ALG_SHA512: 546 case TEE_ALG_HMAC_SHA512: 547 return HASH_MAC_LEN512; 548 default: 549 return 0; 550 } 551 } 552 553 static TEE_Result sec_hash_get_dma(struct hashctx *hash_ctx) 554 { 555 hash_ctx->key_dma = virt_to_phys(hash_ctx->key); 556 if (!hash_ctx->key_dma) { 557 EMSG("Fail to get key_dma"); 558 return TEE_ERROR_STORAGE_NO_SPACE; 559 } 560 561 hash_ctx->iv_dma = virt_to_phys(hash_ctx->iv); 562 if (!hash_ctx->iv_dma) { 563 EMSG("Fail to get iv_dma"); 564 return TEE_ERROR_STORAGE_NO_SPACE; 565 } 566 567 hash_ctx->out_dma = virt_to_phys(hash_ctx->out); 568 if (!hash_ctx->out_dma) { 569 EMSG("Fail to get out_dma"); 570 return TEE_ERROR_STORAGE_NO_SPACE; 571 } 572 573 return TEE_SUCCESS; 574 } 575 576 TEE_Result hisi_sec_hash_ctx_init(struct hashctx *hash_ctx, uint32_t algo) 577 { 578 TEE_Result ret = TEE_SUCCESS; 579 580 hash_ctx->mac_len = sec_hash_get_mac_len(algo); 581 if (!hash_ctx->mac_len) { 582 EMSG("Invalid algo type %#"PRIx32, algo); 583 return TEE_ERROR_NOT_IMPLEMENTED; 584 } 585 586 hash_ctx->algo = algo; 587 hash_ctx->mode = algo >> HASH_MODE_OFFSET; 588 589 ret = sec_hash_get_dma(hash_ctx); 590 if (ret) 591 return ret; 592 593 hash_ctx->qp = sec_create_qp(HISI_QM_CHANNEL_TYPE0); 594 if (!hash_ctx->qp) { 595 EMSG("Fail to create hash qp"); 596 return TEE_ERROR_BUSY; 597 } 598 599 if (hash_ctx->qp->qm->version == HISI_QM_HW_V2) { 600 hash_ctx->qp->fill_sqe = sec_digest_fill_sqe; 601 hash_ctx->qp->parse_sqe = sec_parse_digest_sqe; 602 } else { 603 hash_ctx->qp->fill_sqe = sec_digest_fill_bd3_sqe; 604 hash_ctx->qp->parse_sqe = sec_parse_digest_bd3_sqe; 605 } 606 607 return TEE_SUCCESS; 608 } 609 610 static TEE_Result sec_hash_ctx_allocate(struct crypto_hash_ctx **ctx, 611 uint32_t algo) 612 { 613 struct crypto_hash *hash = NULL; 614 struct hashctx *hash_ctx = NULL; 615 TEE_Result ret = TEE_SUCCESS; 616 617 if (!ctx) { 618 EMSG("Ctx is NULL"); 619 return TEE_ERROR_BAD_PARAMETERS; 620 } 621 622 hash = calloc(1, sizeof(*hash)); 623 if (!hash) { 624 EMSG("Fail to alloc hash"); 625 return TEE_ERROR_STORAGE_NO_SPACE; 626 } 627 628 hash_ctx = calloc(1, sizeof(*hash_ctx)); 629 if (!hash_ctx) { 630 EMSG("Fail to alloc hash_ctx"); 631 ret = TEE_ERROR_STORAGE_NO_SPACE; 632 goto free_hash; 633 } 634 635 ret = hisi_sec_hash_ctx_init(hash_ctx, algo); 636 if (ret) 637 goto free_ctx; 638 639 hash->hash_ctx.ops = &hash_ops; 640 hash->ctx = hash_ctx; 641 *ctx = &hash->hash_ctx; 642 643 return TEE_SUCCESS; 644 645 free_ctx: 646 free(hash_ctx); 647 free_hash: 648 free(hash); 649 650 return ret; 651 } 652 653 static TEE_Result sec_hash_init(void) 654 { 655 TEE_Result ret = TEE_SUCCESS; 656 657 ret = drvcrypt_register_hash(&sec_hash_ctx_allocate); 658 if (ret) 659 EMSG("Sec hash register to crypto fail"); 660 661 return ret; 662 } 663 driver_init(sec_hash_init); 664