1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd 4 */ 5 6 #include <common.h> 7 #include <clk.h> 8 #include <crypto.h> 9 #include <dm.h> 10 #include <asm/io.h> 11 #include <asm/arch/hardware.h> 12 #include <asm/arch/clock.h> 13 #include <rockchip/crypto_hash_cache.h> 14 #include <rockchip/crypto_v2.h> 15 #include <rockchip/crypto_v2_pka.h> 16 17 #define RK_HASH_CTX_MAGIC 0x1A1A1A1A 18 19 #ifdef DEBUG 20 #define IMSG(format, ...) printf("[%s, %05d]-trace: " format "\n", \ 21 __func__, __LINE__, ##__VA_ARGS__) 22 #else 23 #define IMSG(format, ...) 24 #endif 25 26 struct crypto_lli_desc { 27 u32 src_addr; 28 u32 src_len; 29 u32 dst_addr; 30 u32 dst_len; 31 u32 user_define; 32 u32 reserve; 33 u32 dma_ctrl; 34 u32 next_addr; 35 }; 36 37 struct rk_hash_ctx { 38 struct crypto_lli_desc data_lli; /* lli desc */ 39 struct crypto_hash_cache *hash_cache; 40 u32 magic; /* to check ctx */ 41 u32 algo; /* hash algo */ 42 u8 digest_size; /* hash out length */ 43 u8 reserved[3]; 44 }; 45 46 struct rk_crypto_soc_data { 47 u32 capability; 48 u32 (*dynamic_cap)(void); 49 }; 50 51 struct rockchip_crypto_priv { 52 fdt_addr_t reg; 53 u32 frequency; 54 char *clocks; 55 u32 *frequencies; 56 u32 nclocks; 57 u32 length; 58 struct rk_hash_ctx *hw_ctx; 59 struct rk_crypto_soc_data *soc_data; 60 }; 61 62 #define LLI_ADDR_ALIGN_SIZE 8 63 #define DATA_ADDR_ALIGN_SIZE 8 64 #define DATA_LEN_ALIGN_SIZE 64 65 66 /* crypto timeout 500ms, must support more than 32M data per times*/ 67 #define HASH_UPDATE_LIMIT (32 * 1024 * 1024) 68 #define RK_CRYPTO_TIMEOUT 500000 69 70 #define RK_POLL_TIMEOUT(condition, timeout) \ 71 ({ \ 72 int time_out = timeout; \ 73 while (condition) { \ 74 if (--time_out <= 0) { \ 75 debug("[%s] %d: time out!\n", __func__,\ 76 __LINE__); \ 77 break; \ 78 } \ 79 udelay(1); \ 80 } \ 81 (time_out <= 0) ? -ETIMEDOUT : 0; \ 82 }) 83 84 #define WAIT_TAG_VALID(channel, timeout) ({ \ 85 u32 tag_mask = CRYPTO_CH0_TAG_VALID << (channel);\ 86 int ret;\ 87 ret = RK_POLL_TIMEOUT(!(crypto_read(CRYPTO_TAG_VALID) & tag_mask),\ 88 timeout);\ 89 crypto_write(crypto_read(CRYPTO_TAG_VALID) & tag_mask, CRYPTO_TAG_VALID);\ 90 ret;\ 91 }) 92 93 #define virt_to_phys(addr) (((unsigned long)addr) & 0xffffffff) 94 #define phys_to_virt(addr, area) ((unsigned long)addr) 95 96 #define align_malloc(bytes, alignment) memalign(alignment, bytes) 97 #define align_free(addr) do {if (addr) free(addr);} while (0) 98 99 #define ROUNDUP(size, alignment) round_up(size, alignment) 100 #define cache_op_inner(type, addr, size) \ 101 crypto_flush_cacheline((ulong)addr, size) 102 103 #define IS_NEED_IV(rk_mode) ((rk_mode) != RK_MODE_ECB && \ 104 (rk_mode) != RK_MODE_CMAC && \ 105 (rk_mode) != RK_MODE_CBC_MAC) 106 107 #define IS_NEED_TAG(rk_mode) ((rk_mode) == RK_MODE_CMAC || \ 108 (rk_mode) == RK_MODE_CBC_MAC || \ 109 (rk_mode) == RK_MODE_CCM || \ 110 (rk_mode) == RK_MODE_GCM) 111 112 #define IS_MAC_MODE(rk_mode) ((rk_mode) == RK_MODE_CMAC || \ 113 (rk_mode) == RK_MODE_CBC_MAC) 114 115 #define IS_AE_MODE(rk_mode) ((rk_mode) == RK_MODE_CCM || \ 116 (rk_mode) == RK_MODE_GCM) 117 118 fdt_addr_t crypto_base; 119 120 static inline void word2byte_be(u32 word, u8 *ch) 121 { 122 ch[0] = (word >> 24) & 0xff; 123 ch[1] = (word >> 16) & 0xff; 124 ch[2] = (word >> 8) & 0xff; 125 ch[3] = (word >> 0) & 0xff; 126 } 127 128 static inline u32 byte2word_be(const u8 *ch) 129 { 130 return (*ch << 24) + (*(ch + 1) << 16) + (*(ch + 2) << 8) + *(ch + 3); 131 } 132 133 static inline void clear_regs(u32 base, u32 words) 134 { 135 int i; 136 137 /*clear out register*/ 138 for (i = 0; i < words; i++) 139 crypto_write(0, base + 4 * i); 140 } 141 142 static inline void clear_hash_out_reg(void) 143 { 144 clear_regs(CRYPTO_HASH_DOUT_0, 16); 145 } 146 147 static inline void clear_key_regs(void) 148 { 149 clear_regs(CRYPTO_CH0_KEY_0, CRYPTO_KEY_CHANNEL_NUM * 4); 150 } 151 152 static inline void read_regs(u32 base, u8 *data, u32 data_len) 153 { 154 u8 tmp_buf[4]; 155 u32 i; 156 157 for (i = 0; i < data_len / 4; i++) 158 word2byte_be(crypto_read(base + i * 4), 159 data + i * 4); 160 161 if (data_len % 4) { 162 word2byte_be(crypto_read(base + i * 4), tmp_buf); 163 memcpy(data + i * 4, tmp_buf, data_len % 4); 164 } 165 } 166 167 static inline void write_regs(u32 base, const u8 *data, u32 data_len) 168 { 169 u8 tmp_buf[4]; 170 u32 i; 171 172 for (i = 0; i < data_len / 4; i++, base += 4) 173 crypto_write(byte2word_be(data + i * 4), base); 174 175 if (data_len % 4) { 176 memset(tmp_buf, 0x00, sizeof(tmp_buf)); 177 memcpy((u8 *)tmp_buf, data + i * 4, data_len % 4); 178 crypto_write(byte2word_be(tmp_buf), base); 179 } 180 } 181 182 static inline void write_key_reg(u32 chn, const u8 *key, u32 key_len) 183 { 184 write_regs(CRYPTO_CH0_KEY_0 + chn * 0x10, key, key_len); 185 } 186 187 static inline void set_iv_reg(u32 chn, const u8 *iv, u32 iv_len) 188 { 189 u32 base_iv; 190 191 base_iv = CRYPTO_CH0_IV_0 + chn * 0x10; 192 193 /* clear iv */ 194 clear_regs(base_iv, 4); 195 196 if (!iv || iv_len == 0) 197 return; 198 199 write_regs(base_iv, iv, iv_len); 200 201 crypto_write(iv_len, CRYPTO_CH0_IV_LEN_0 + 4 * chn); 202 } 203 204 static inline void get_iv_reg(u32 chn, u8 *iv, u32 iv_len) 205 { 206 u32 base_iv; 207 208 base_iv = CRYPTO_CH0_IV_0 + chn * 0x10; 209 210 read_regs(base_iv, iv, iv_len); 211 } 212 213 static inline void get_tag_from_reg(u32 chn, u8 *tag, u32 tag_len) 214 { 215 u32 i; 216 u32 chn_base = CRYPTO_CH0_TAG_0 + 0x10 * chn; 217 218 for (i = 0; i < tag_len / 4; i++, chn_base += 4) 219 word2byte_be(crypto_read(chn_base), tag + 4 * i); 220 } 221 222 static u32 crypto_v3_dynamic_cap(void) 223 { 224 u32 capability = 0; 225 u32 ver_reg, i; 226 struct cap_map { 227 u32 ver_offset; 228 u32 mask; 229 u32 cap_bit; 230 }; 231 const struct cap_map cap_tbl[] = { 232 {CRYPTO_HASH_VERSION, CRYPTO_HASH_MD5_FLAG, CRYPTO_MD5}, 233 {CRYPTO_HASH_VERSION, CRYPTO_HASH_SHA1_FLAG, CRYPTO_SHA1}, 234 {CRYPTO_HASH_VERSION, CRYPTO_HASH_SHA256_FLAG, CRYPTO_SHA256}, 235 {CRYPTO_HASH_VERSION, CRYPTO_HASH_SHA512_FLAG, CRYPTO_SHA512}, 236 {CRYPTO_HASH_VERSION, CRYPTO_HASH_SM3_FLAG, CRYPTO_SM3}, 237 238 {CRYPTO_HMAC_VERSION, CRYPTO_HMAC_MD5_FLAG, CRYPTO_HMAC_MD5}, 239 {CRYPTO_HMAC_VERSION, CRYPTO_HMAC_SHA1_FLAG, CRYPTO_HMAC_SHA1}, 240 {CRYPTO_HMAC_VERSION, CRYPTO_HMAC_SHA256_FLAG, CRYPTO_HMAC_SHA256}, 241 {CRYPTO_HMAC_VERSION, CRYPTO_HMAC_SHA512_FLAG, CRYPTO_HMAC_SHA512}, 242 {CRYPTO_HMAC_VERSION, CRYPTO_HMAC_SM3_FLAG, CRYPTO_HMAC_SM3}, 243 244 {CRYPTO_AES_VERSION, CRYPTO_AES256_FLAG, CRYPTO_AES}, 245 {CRYPTO_DES_VERSION, CRYPTO_TDES_FLAG, CRYPTO_DES}, 246 {CRYPTO_SM4_VERSION, CRYPTO_ECB_FLAG, CRYPTO_SM4}, 247 }; 248 249 /* rsa */ 250 capability = CRYPTO_RSA512 | 251 CRYPTO_RSA1024 | 252 CRYPTO_RSA2048 | 253 CRYPTO_RSA3072 | 254 CRYPTO_RSA4096; 255 256 for (i = 0; i < ARRAY_SIZE(cap_tbl); i++) { 257 ver_reg = crypto_read(cap_tbl[i].ver_offset); 258 259 if ((ver_reg & cap_tbl[i].mask) == cap_tbl[i].mask) 260 capability |= cap_tbl[i].cap_bit; 261 } 262 263 return capability; 264 } 265 266 static int hw_crypto_reset(void) 267 { 268 u32 val = 0, mask = 0; 269 int ret; 270 271 val = CRYPTO_SW_PKA_RESET | CRYPTO_SW_CC_RESET; 272 mask = val << CRYPTO_WRITE_MASK_SHIFT; 273 274 /* reset pka and crypto modules*/ 275 crypto_write(val | mask, CRYPTO_RST_CTL); 276 277 /* wait reset compelete */ 278 ret = RK_POLL_TIMEOUT(crypto_read(CRYPTO_RST_CTL), RK_CRYPTO_TIMEOUT); 279 280 return ret; 281 } 282 283 static void hw_hash_clean_ctx(struct rk_hash_ctx *ctx) 284 { 285 /* clear hash status */ 286 crypto_write(CRYPTO_WRITE_MASK_ALL | 0, CRYPTO_HASH_CTL); 287 288 assert(ctx); 289 assert(ctx->magic == RK_HASH_CTX_MAGIC); 290 291 crypto_hash_cache_free(ctx->hash_cache); 292 293 memset(ctx, 0x00, sizeof(*ctx)); 294 } 295 296 static int rk_hash_init(void *hw_ctx, u32 algo) 297 { 298 struct rk_hash_ctx *tmp_ctx = (struct rk_hash_ctx *)hw_ctx; 299 u32 reg_ctrl = 0; 300 int ret; 301 302 if (!tmp_ctx) 303 return -EINVAL; 304 305 reg_ctrl = CRYPTO_SW_CC_RESET; 306 crypto_write(reg_ctrl | (reg_ctrl << CRYPTO_WRITE_MASK_SHIFT), 307 CRYPTO_RST_CTL); 308 309 /* wait reset compelete */ 310 ret = RK_POLL_TIMEOUT(crypto_read(CRYPTO_RST_CTL), 311 RK_CRYPTO_TIMEOUT); 312 313 reg_ctrl = 0; 314 tmp_ctx->algo = algo; 315 switch (algo) { 316 case CRYPTO_MD5: 317 case CRYPTO_HMAC_MD5: 318 reg_ctrl |= CRYPTO_MODE_MD5; 319 tmp_ctx->digest_size = 16; 320 break; 321 case CRYPTO_SHA1: 322 case CRYPTO_HMAC_SHA1: 323 reg_ctrl |= CRYPTO_MODE_SHA1; 324 tmp_ctx->digest_size = 20; 325 break; 326 case CRYPTO_SHA256: 327 case CRYPTO_HMAC_SHA256: 328 reg_ctrl |= CRYPTO_MODE_SHA256; 329 tmp_ctx->digest_size = 32; 330 break; 331 case CRYPTO_SHA512: 332 case CRYPTO_HMAC_SHA512: 333 reg_ctrl |= CRYPTO_MODE_SHA512; 334 tmp_ctx->digest_size = 64; 335 break; 336 case CRYPTO_SM3: 337 case CRYPTO_HMAC_SM3: 338 reg_ctrl |= CRYPTO_MODE_SM3; 339 tmp_ctx->digest_size = 32; 340 break; 341 default: 342 ret = -EINVAL; 343 goto exit; 344 } 345 346 clear_hash_out_reg(); 347 348 /* enable hardware padding */ 349 reg_ctrl |= CRYPTO_HW_PAD_ENABLE; 350 crypto_write(reg_ctrl | CRYPTO_WRITE_MASK_ALL, CRYPTO_HASH_CTL); 351 352 /* FIFO input and output data byte swap */ 353 /* such as B0, B1, B2, B3 -> B3, B2, B1, B0 */ 354 reg_ctrl = CRYPTO_DOUT_BYTESWAP | CRYPTO_DOIN_BYTESWAP; 355 crypto_write(reg_ctrl | CRYPTO_WRITE_MASK_ALL, CRYPTO_FIFO_CTL); 356 357 /* enable src_item_done interrupt */ 358 crypto_write(0, CRYPTO_DMA_INT_EN); 359 360 tmp_ctx->magic = RK_HASH_CTX_MAGIC; 361 362 return 0; 363 exit: 364 /* clear hash setting if init failed */ 365 crypto_write(CRYPTO_WRITE_MASK_ALL | 0, CRYPTO_HASH_CTL); 366 367 return ret; 368 } 369 370 static int rk_hash_direct_calc(void *hw_data, const u8 *data, 371 u32 data_len, u8 *started_flag, u8 is_last) 372 { 373 struct rockchip_crypto_priv *priv = hw_data; 374 struct rk_hash_ctx *hash_ctx = priv->hw_ctx; 375 struct crypto_lli_desc *lli = &hash_ctx->data_lli; 376 int ret = -EINVAL; 377 u32 tmp = 0, mask = 0; 378 379 assert(IS_ALIGNED((ulong)data, DATA_ADDR_ALIGN_SIZE)); 380 assert(is_last || IS_ALIGNED(data_len, DATA_LEN_ALIGN_SIZE)); 381 382 debug("%s: data = %p, len = %u, s = %x, l = %x\n", 383 __func__, data, data_len, *started_flag, is_last); 384 385 memset(lli, 0x00, sizeof(*lli)); 386 lli->src_addr = (u32)virt_to_phys(data); 387 lli->src_len = data_len; 388 lli->dma_ctrl = LLI_DMA_CTRL_SRC_DONE; 389 390 if (is_last) { 391 lli->user_define |= LLI_USER_STRING_LAST; 392 lli->dma_ctrl |= LLI_DMA_CTRL_LAST; 393 } else { 394 lli->next_addr = (u32)virt_to_phys(lli); 395 lli->dma_ctrl |= LLI_DMA_CTRL_PAUSE; 396 } 397 398 if (!(*started_flag)) { 399 lli->user_define |= 400 (LLI_USER_STRING_START | LLI_USER_CPIHER_START); 401 crypto_write((u32)virt_to_phys(lli), CRYPTO_DMA_LLI_ADDR); 402 crypto_write((CRYPTO_HASH_ENABLE << CRYPTO_WRITE_MASK_SHIFT) | 403 CRYPTO_HASH_ENABLE, CRYPTO_HASH_CTL); 404 tmp = CRYPTO_DMA_START; 405 *started_flag = 1; 406 } else { 407 tmp = CRYPTO_DMA_RESTART; 408 } 409 410 /* flush cache */ 411 crypto_flush_cacheline((ulong)lli, sizeof(*lli)); 412 crypto_flush_cacheline((ulong)data, data_len); 413 414 /* start calculate */ 415 crypto_write(tmp << CRYPTO_WRITE_MASK_SHIFT | tmp, 416 CRYPTO_DMA_CTL); 417 418 /* mask CRYPTO_SYNC_LOCKSTEP_INT_ST flag */ 419 mask = ~(mask | CRYPTO_SYNC_LOCKSTEP_INT_ST); 420 421 /* wait calc ok */ 422 ret = RK_POLL_TIMEOUT(!(crypto_read(CRYPTO_DMA_INT_ST) & mask), 423 RK_CRYPTO_TIMEOUT); 424 425 /* clear interrupt status */ 426 tmp = crypto_read(CRYPTO_DMA_INT_ST); 427 crypto_write(tmp, CRYPTO_DMA_INT_ST); 428 429 if ((tmp & mask) != CRYPTO_SRC_ITEM_DONE_INT_ST && 430 (tmp & mask) != CRYPTO_ZERO_LEN_INT_ST) { 431 ret = -EFAULT; 432 debug("[%s] %d: CRYPTO_DMA_INT_ST = 0x%x\n", 433 __func__, __LINE__, tmp); 434 goto exit; 435 } 436 437 priv->length += data_len; 438 exit: 439 return ret; 440 } 441 442 int rk_hash_update(void *ctx, const u8 *data, u32 data_len) 443 { 444 struct rk_hash_ctx *tmp_ctx = (struct rk_hash_ctx *)ctx; 445 int ret = -EINVAL; 446 447 debug("\n"); 448 if (!tmp_ctx || !data) 449 goto exit; 450 451 if (tmp_ctx->digest_size == 0 || tmp_ctx->magic != RK_HASH_CTX_MAGIC) 452 goto exit; 453 454 ret = crypto_hash_update_with_cache(tmp_ctx->hash_cache, 455 data, data_len); 456 457 exit: 458 /* free lli list */ 459 if (ret) 460 hw_hash_clean_ctx(tmp_ctx); 461 462 return ret; 463 } 464 465 int rk_hash_final(void *ctx, u8 *digest, size_t len) 466 { 467 struct rk_hash_ctx *tmp_ctx = (struct rk_hash_ctx *)ctx; 468 int ret = -EINVAL; 469 470 if (!digest) 471 goto exit; 472 473 if (!tmp_ctx || 474 tmp_ctx->digest_size == 0 || 475 len > tmp_ctx->digest_size || 476 tmp_ctx->magic != RK_HASH_CTX_MAGIC) { 477 goto exit; 478 } 479 480 /* wait hash value ok */ 481 ret = RK_POLL_TIMEOUT(!crypto_read(CRYPTO_HASH_VALID), 482 RK_CRYPTO_TIMEOUT); 483 484 read_regs(CRYPTO_HASH_DOUT_0, digest, len); 485 486 /* clear hash status */ 487 crypto_write(CRYPTO_HASH_IS_VALID, CRYPTO_HASH_VALID); 488 crypto_write(CRYPTO_WRITE_MASK_ALL | 0, CRYPTO_HASH_CTL); 489 490 exit: 491 492 return ret; 493 } 494 495 static u32 rockchip_crypto_capability(struct udevice *dev) 496 { 497 struct rockchip_crypto_priv *priv = dev_get_priv(dev); 498 u32 capability, mask = 0; 499 500 capability = priv->soc_data->capability; 501 502 #if !(CONFIG_IS_ENABLED(ROCKCHIP_CIPHER)) 503 mask |= (CRYPTO_DES | CRYPTO_AES | CRYPTO_SM4); 504 #endif 505 506 #if !(CONFIG_IS_ENABLED(ROCKCHIP_HMAC)) 507 mask |= (CRYPTO_HMAC_MD5 | CRYPTO_HMAC_SHA1 | CRYPTO_HMAC_SHA256 | 508 CRYPTO_HMAC_SHA512 | CRYPTO_HMAC_SM3); 509 #endif 510 511 #if !(CONFIG_IS_ENABLED(ROCKCHIP_RSA)) 512 mask |= (CRYPTO_RSA512 | CRYPTO_RSA1024 | CRYPTO_RSA2048 | 513 CRYPTO_RSA3072 | CRYPTO_RSA4096); 514 #endif 515 516 return capability & (~mask); 517 } 518 519 static int rockchip_crypto_sha_init(struct udevice *dev, sha_context *ctx) 520 { 521 struct rockchip_crypto_priv *priv = dev_get_priv(dev); 522 struct rk_hash_ctx *hash_ctx = priv->hw_ctx; 523 524 if (!ctx) 525 return -EINVAL; 526 527 memset(hash_ctx, 0x00, sizeof(*hash_ctx)); 528 529 priv->length = 0; 530 531 hash_ctx->hash_cache = crypto_hash_cache_alloc(rk_hash_direct_calc, 532 priv, ctx->length, 533 DATA_ADDR_ALIGN_SIZE, 534 DATA_LEN_ALIGN_SIZE); 535 if (!hash_ctx->hash_cache) 536 return -EFAULT; 537 538 return rk_hash_init(hash_ctx, ctx->algo); 539 } 540 541 static int rockchip_crypto_sha_update(struct udevice *dev, 542 u32 *input, u32 len) 543 { 544 struct rockchip_crypto_priv *priv = dev_get_priv(dev); 545 int ret, i; 546 u8 *p; 547 548 if (!len) 549 return -EINVAL; 550 551 p = (u8 *)input; 552 553 for (i = 0; i < len / HASH_UPDATE_LIMIT; i++, p += HASH_UPDATE_LIMIT) { 554 ret = rk_hash_update(priv->hw_ctx, p, HASH_UPDATE_LIMIT); 555 if (ret) 556 goto exit; 557 } 558 559 if (len % HASH_UPDATE_LIMIT) 560 ret = rk_hash_update(priv->hw_ctx, p, len % HASH_UPDATE_LIMIT); 561 562 exit: 563 return ret; 564 } 565 566 static int rockchip_crypto_sha_final(struct udevice *dev, 567 sha_context *ctx, u8 *output) 568 { 569 struct rockchip_crypto_priv *priv = dev_get_priv(dev); 570 u32 nbits; 571 int ret; 572 573 nbits = crypto_algo_nbits(ctx->algo); 574 575 if (priv->length != ctx->length) { 576 printf("total length(0x%08x) != init length(0x%08x)!\n", 577 priv->length, ctx->length); 578 ret = -EIO; 579 goto exit; 580 } 581 582 ret = rk_hash_final(priv->hw_ctx, (u8 *)output, BITS2BYTE(nbits)); 583 584 exit: 585 hw_hash_clean_ctx(priv->hw_ctx); 586 return ret; 587 } 588 589 #if CONFIG_IS_ENABLED(ROCKCHIP_HMAC) 590 int rk_hmac_init(void *hw_ctx, u32 algo, u8 *key, u32 key_len) 591 { 592 u32 reg_ctrl = 0; 593 int ret; 594 595 if (!key || !key_len || key_len > 64) 596 return -EINVAL; 597 598 clear_key_regs(); 599 600 write_key_reg(0, key, key_len); 601 602 ret = rk_hash_init(hw_ctx, algo); 603 if (ret) 604 return ret; 605 606 reg_ctrl = crypto_read(CRYPTO_HASH_CTL) | CRYPTO_HMAC_ENABLE; 607 crypto_write(reg_ctrl | CRYPTO_WRITE_MASK_ALL, CRYPTO_HASH_CTL); 608 609 return ret; 610 } 611 612 static int rockchip_crypto_hmac_init(struct udevice *dev, 613 sha_context *ctx, u8 *key, u32 key_len) 614 { 615 struct rockchip_crypto_priv *priv = dev_get_priv(dev); 616 struct rk_hash_ctx *hash_ctx = priv->hw_ctx; 617 618 if (!ctx) 619 return -EINVAL; 620 621 memset(hash_ctx, 0x00, sizeof(*hash_ctx)); 622 623 priv->length = 0; 624 625 hash_ctx->hash_cache = crypto_hash_cache_alloc(rk_hash_direct_calc, 626 priv, ctx->length, 627 DATA_ADDR_ALIGN_SIZE, 628 DATA_LEN_ALIGN_SIZE); 629 if (!hash_ctx->hash_cache) 630 return -EFAULT; 631 632 return rk_hmac_init(priv->hw_ctx, ctx->algo, key, key_len); 633 } 634 635 static int rockchip_crypto_hmac_update(struct udevice *dev, 636 u32 *input, u32 len) 637 { 638 return rockchip_crypto_sha_update(dev, input, len); 639 } 640 641 static int rockchip_crypto_hmac_final(struct udevice *dev, 642 sha_context *ctx, u8 *output) 643 { 644 return rockchip_crypto_sha_final(dev, ctx, output); 645 } 646 647 #endif 648 649 #if CONFIG_IS_ENABLED(ROCKCHIP_CIPHER) 650 static u8 g_key_chn; 651 652 static const u32 rk_mode2bc_mode[RK_MODE_MAX] = { 653 [RK_MODE_ECB] = CRYPTO_BC_ECB, 654 [RK_MODE_CBC] = CRYPTO_BC_CBC, 655 [RK_MODE_CTS] = CRYPTO_BC_CTS, 656 [RK_MODE_CTR] = CRYPTO_BC_CTR, 657 [RK_MODE_CFB] = CRYPTO_BC_CFB, 658 [RK_MODE_OFB] = CRYPTO_BC_OFB, 659 [RK_MODE_XTS] = CRYPTO_BC_XTS, 660 [RK_MODE_CCM] = CRYPTO_BC_CCM, 661 [RK_MODE_GCM] = CRYPTO_BC_GCM, 662 [RK_MODE_CMAC] = CRYPTO_BC_CMAC, 663 [RK_MODE_CBC_MAC] = CRYPTO_BC_CBC_MAC, 664 }; 665 666 static inline void set_pc_len_reg(u32 chn, u64 pc_len) 667 { 668 u32 chn_base = CRYPTO_CH0_PC_LEN_0 + chn * 0x08; 669 670 crypto_write(pc_len & 0xffffffff, chn_base); 671 crypto_write(pc_len >> 32, chn_base + 4); 672 } 673 674 static inline void set_aad_len_reg(u32 chn, u64 pc_len) 675 { 676 u32 chn_base = CRYPTO_CH0_AAD_LEN_0 + chn * 0x08; 677 678 crypto_write(pc_len & 0xffffffff, chn_base); 679 crypto_write(pc_len >> 32, chn_base + 4); 680 } 681 682 static inline bool is_des_mode(u32 rk_mode) 683 { 684 return (rk_mode == RK_MODE_ECB || 685 rk_mode == RK_MODE_CBC || 686 rk_mode == RK_MODE_CFB || 687 rk_mode == RK_MODE_OFB); 688 } 689 690 static void dump_crypto_state(struct crypto_lli_desc *desc, 691 u32 tmp, u32 expt_int, 692 const u8 *in, const u8 *out, 693 u32 len, int ret) 694 { 695 IMSG("%s\n", ret == -ETIME ? "timeout" : "dismatch"); 696 697 IMSG("CRYPTO_DMA_INT_ST = %08x, expect_int = %08x\n", 698 tmp, expt_int); 699 IMSG("data desc = %p\n", desc); 700 IMSG("\taddr_in = [%08x <=> %08x]\n", 701 desc->src_addr, (u32)virt_to_phys(in)); 702 IMSG("\taddr_out = [%08x <=> %08x]\n", 703 desc->dst_addr, (u32)virt_to_phys(out)); 704 IMSG("\tsrc_len = [%08x <=> %08x]\n", 705 desc->src_len, (u32)len); 706 IMSG("\tdst_len = %08x\n", desc->dst_len); 707 IMSG("\tdma_ctl = %08x\n", desc->dma_ctrl); 708 IMSG("\tuser_define = %08x\n", desc->user_define); 709 710 IMSG("\n\nDMA CRYPTO_DMA_LLI_ADDR status = %08x\n", 711 crypto_read(CRYPTO_DMA_LLI_ADDR)); 712 IMSG("DMA CRYPTO_DMA_ST status = %08x\n", 713 crypto_read(CRYPTO_DMA_ST)); 714 IMSG("DMA CRYPTO_DMA_STATE status = %08x\n", 715 crypto_read(CRYPTO_DMA_STATE)); 716 IMSG("DMA CRYPTO_DMA_LLI_RADDR status = %08x\n", 717 crypto_read(CRYPTO_DMA_LLI_RADDR)); 718 IMSG("DMA CRYPTO_DMA_SRC_RADDR status = %08x\n", 719 crypto_read(CRYPTO_DMA_SRC_RADDR)); 720 IMSG("DMA CRYPTO_DMA_DST_RADDR status = %08x\n", 721 crypto_read(CRYPTO_DMA_DST_RADDR)); 722 IMSG("DMA CRYPTO_CIPHER_ST status = %08x\n", 723 crypto_read(CRYPTO_CIPHER_ST)); 724 IMSG("DMA CRYPTO_CIPHER_STATE status = %08x\n", 725 crypto_read(CRYPTO_CIPHER_STATE)); 726 IMSG("DMA CRYPTO_TAG_VALID status = %08x\n", 727 crypto_read(CRYPTO_TAG_VALID)); 728 IMSG("LOCKSTEP status = %08x\n\n", 729 crypto_read(0x618)); 730 731 IMSG("dst %dbyte not transferred\n", 732 desc->dst_addr + desc->dst_len - 733 crypto_read(CRYPTO_DMA_DST_RADDR)); 734 } 735 736 static int ccm128_set_iv_reg(u32 chn, const u8 *nonce, u32 nlen) 737 { 738 u8 iv_buf[AES_BLOCK_SIZE]; 739 u32 L; 740 741 memset(iv_buf, 0x00, sizeof(iv_buf)); 742 743 L = 15 - nlen; 744 iv_buf[0] = ((u8)(L - 1) & 7); 745 746 /* the L parameter */ 747 L = iv_buf[0] & 7; 748 749 /* nonce is too short */ 750 if (nlen < (14 - L)) 751 return -EINVAL; 752 753 /* clear aad flag */ 754 iv_buf[0] &= ~0x40; 755 memcpy(&iv_buf[1], nonce, 14 - L); 756 757 set_iv_reg(chn, iv_buf, AES_BLOCK_SIZE); 758 759 return 0; 760 } 761 762 static void ccm_aad_padding(u32 aad_len, u8 *padding, u32 *padding_size) 763 { 764 u32 i; 765 766 i = aad_len < (0x10000 - 0x100) ? 2 : 6; 767 768 if (i == 2) { 769 padding[0] = (u8)(aad_len >> 8); 770 padding[1] = (u8)aad_len; 771 } else { 772 padding[0] = 0xFF; 773 padding[1] = 0xFE; 774 padding[2] = (u8)(aad_len >> 24); 775 padding[3] = (u8)(aad_len >> 16); 776 padding[4] = (u8)(aad_len >> 8); 777 } 778 779 *padding_size = i; 780 } 781 782 static int ccm_compose_aad_iv(u8 *aad_iv, u32 data_len, u32 tag_size) 783 { 784 aad_iv[0] |= ((u8)(((tag_size - 2) / 2) & 7) << 3); 785 786 aad_iv[12] = (u8)(data_len >> 24); 787 aad_iv[13] = (u8)(data_len >> 16); 788 aad_iv[14] = (u8)(data_len >> 8); 789 aad_iv[15] = (u8)data_len; 790 791 aad_iv[0] |= 0x40; //set aad flag 792 793 return 0; 794 } 795 796 static int hw_cipher_init(u32 chn, const u8 *key, const u8 *twk_key, 797 u32 key_len, const u8 *iv, u32 iv_len, 798 u32 algo, u32 mode, bool enc) 799 { 800 u32 rk_mode = RK_GET_RK_MODE(mode); 801 u32 key_chn_sel = chn; 802 u32 reg_ctrl = 0; 803 804 IMSG("%s: key addr is %p, key_len is %d, iv addr is %p", 805 __func__, key, key_len, iv); 806 if (rk_mode >= RK_MODE_MAX) 807 return -EINVAL; 808 809 switch (algo) { 810 case CRYPTO_DES: 811 if (key_len > DES_BLOCK_SIZE) 812 reg_ctrl |= CRYPTO_BC_TDES; 813 else 814 reg_ctrl |= CRYPTO_BC_DES; 815 break; 816 case CRYPTO_AES: 817 reg_ctrl |= CRYPTO_BC_AES; 818 break; 819 case CRYPTO_SM4: 820 reg_ctrl |= CRYPTO_BC_SM4; 821 break; 822 default: 823 return -EINVAL; 824 } 825 826 if (algo == CRYPTO_AES || algo == CRYPTO_SM4) { 827 switch (key_len) { 828 case AES_KEYSIZE_128: 829 reg_ctrl |= CRYPTO_BC_128_bit_key; 830 break; 831 case AES_KEYSIZE_192: 832 reg_ctrl |= CRYPTO_BC_192_bit_key; 833 break; 834 case AES_KEYSIZE_256: 835 reg_ctrl |= CRYPTO_BC_256_bit_key; 836 break; 837 default: 838 return -EINVAL; 839 } 840 } 841 842 reg_ctrl |= rk_mode2bc_mode[rk_mode]; 843 if (!enc) 844 reg_ctrl |= CRYPTO_BC_DECRYPT; 845 846 /* write key data to reg */ 847 write_key_reg(key_chn_sel, key, key_len); 848 849 /* write twk key for xts mode */ 850 if (rk_mode == RK_MODE_XTS) 851 write_key_reg(key_chn_sel + 4, twk_key, key_len); 852 853 /* set iv reg */ 854 if (rk_mode == RK_MODE_CCM) 855 ccm128_set_iv_reg(chn, iv, iv_len); 856 else 857 set_iv_reg(chn, iv, iv_len); 858 859 /* din_swap set 1, dout_swap set 1, default 1. */ 860 crypto_write(0x00030003, CRYPTO_FIFO_CTL); 861 crypto_write(0, CRYPTO_DMA_INT_EN); 862 863 crypto_write(reg_ctrl | CRYPTO_WRITE_MASK_ALL, CRYPTO_BC_CTL); 864 865 return 0; 866 } 867 868 static int hw_cipher_crypt(const u8 *in, u8 *out, u64 len, 869 const u8 *aad, u32 aad_len, 870 u8 *tag, u32 tag_len, u32 mode) 871 { 872 struct crypto_lli_desc *data_desc = NULL, *aad_desc = NULL; 873 u8 *dma_in = NULL, *dma_out = NULL, *aad_tmp = NULL; 874 u32 rk_mode = RK_GET_RK_MODE(mode); 875 u32 reg_ctrl = 0, tmp_len = 0; 876 u32 expt_int = 0, mask = 0; 877 u32 key_chn = g_key_chn; 878 u32 tmp, dst_len = 0; 879 int ret = -1; 880 881 if (rk_mode == RK_MODE_CTS && len <= AES_BLOCK_SIZE) { 882 printf("CTS mode length %u < 16Byte\n", (u32)len); 883 return -EINVAL; 884 } 885 886 tmp_len = (rk_mode == RK_MODE_CTR) ? ROUNDUP(len, AES_BLOCK_SIZE) : len; 887 888 data_desc = align_malloc(sizeof(*data_desc), LLI_ADDR_ALIGN_SIZE); 889 if (!data_desc) 890 goto exit; 891 892 if (IS_ALIGNED((ulong)in, DATA_ADDR_ALIGN_SIZE) && tmp_len == len) 893 dma_in = (void *)in; 894 else 895 dma_in = align_malloc(tmp_len, DATA_ADDR_ALIGN_SIZE); 896 if (!dma_in) 897 goto exit; 898 899 if (out) { 900 if (IS_ALIGNED((ulong)out, DATA_ADDR_ALIGN_SIZE) && 901 tmp_len == len) 902 dma_out = out; 903 else 904 dma_out = align_malloc(tmp_len, DATA_ADDR_ALIGN_SIZE); 905 if (!dma_out) 906 goto exit; 907 dst_len = tmp_len; 908 } 909 910 memset(data_desc, 0x00, sizeof(*data_desc)); 911 if (dma_in != in) 912 memcpy(dma_in, in, len); 913 914 data_desc->src_addr = (u32)virt_to_phys(dma_in); 915 data_desc->src_len = tmp_len; 916 data_desc->dst_addr = (u32)virt_to_phys(dma_out); 917 data_desc->dst_len = dst_len; 918 data_desc->dma_ctrl = LLI_DMA_CTRL_LAST; 919 920 if (IS_MAC_MODE(rk_mode)) { 921 expt_int = CRYPTO_LIST_DONE_INT_ST; 922 data_desc->dma_ctrl |= LLI_DMA_CTRL_LIST_DONE; 923 } else { 924 expt_int = CRYPTO_DST_ITEM_DONE_INT_ST; 925 data_desc->dma_ctrl |= LLI_DMA_CTRL_DST_DONE; 926 } 927 928 if (rk_mode == RK_MODE_CCM || rk_mode == RK_MODE_GCM) { 929 u32 aad_tmp_len = 0; 930 931 data_desc->user_define = LLI_USER_STRING_START | 932 LLI_USER_STRING_LAST | 933 (key_chn << 4); 934 935 aad_desc = align_malloc(sizeof(*aad_desc), LLI_ADDR_ALIGN_SIZE); 936 if (!aad_desc) 937 goto exit; 938 939 memset(aad_desc, 0x00, sizeof(*aad_desc)); 940 aad_desc->next_addr = (u32)virt_to_phys(data_desc); 941 aad_desc->user_define = LLI_USER_CPIHER_START | 942 LLI_USER_STRING_START | 943 LLI_USER_STRING_LAST | 944 LLI_USER_STRING_AAD | 945 (key_chn << 4); 946 947 if (rk_mode == RK_MODE_CCM) { 948 u8 padding[AES_BLOCK_SIZE]; 949 u32 padding_size = 0; 950 951 memset(padding, 0x00, sizeof(padding)); 952 ccm_aad_padding(aad_len, padding, &padding_size); 953 954 aad_tmp_len = aad_len + AES_BLOCK_SIZE + padding_size; 955 aad_tmp_len = ROUNDUP(aad_tmp_len, AES_BLOCK_SIZE); 956 aad_tmp = align_malloc(aad_tmp_len, 957 DATA_ADDR_ALIGN_SIZE); 958 if (!aad_tmp) 959 goto exit; 960 961 /* read iv data from reg */ 962 get_iv_reg(key_chn, aad_tmp, AES_BLOCK_SIZE); 963 ccm_compose_aad_iv(aad_tmp, tmp_len, tag_len); 964 memcpy(aad_tmp + AES_BLOCK_SIZE, padding, padding_size); 965 memset(aad_tmp + aad_tmp_len - AES_BLOCK_SIZE, 966 0x00, AES_BLOCK_SIZE); 967 memcpy(aad_tmp + AES_BLOCK_SIZE + padding_size, 968 aad, aad_len); 969 } else { 970 aad_tmp_len = aad_len; 971 if (IS_ALIGNED((ulong)aad, DATA_ADDR_ALIGN_SIZE)) { 972 aad_tmp = (void *)aad; 973 } else { 974 aad_tmp = align_malloc(aad_tmp_len, 975 DATA_ADDR_ALIGN_SIZE); 976 if (!aad_tmp) 977 goto exit; 978 979 memcpy(aad_tmp, aad, aad_tmp_len); 980 } 981 982 set_aad_len_reg(key_chn, aad_tmp_len); 983 set_pc_len_reg(key_chn, tmp_len); 984 } 985 986 aad_desc->src_addr = (u32)virt_to_phys(aad_tmp); 987 aad_desc->src_len = aad_tmp_len; 988 crypto_write((u32)virt_to_phys(aad_desc), CRYPTO_DMA_LLI_ADDR); 989 cache_op_inner(DCACHE_AREA_CLEAN, aad_tmp, aad_tmp_len); 990 cache_op_inner(DCACHE_AREA_CLEAN, aad_desc, sizeof(*aad_desc)); 991 } else { 992 data_desc->user_define = LLI_USER_CPIHER_START | 993 LLI_USER_STRING_START | 994 LLI_USER_STRING_LAST | 995 (key_chn << 4); 996 crypto_write((u32)virt_to_phys(data_desc), CRYPTO_DMA_LLI_ADDR); 997 } 998 999 cache_op_inner(DCACHE_AREA_CLEAN, data_desc, sizeof(*data_desc)); 1000 cache_op_inner(DCACHE_AREA_CLEAN, dma_in, tmp_len); 1001 cache_op_inner(DCACHE_AREA_INVALIDATE, dma_out, tmp_len); 1002 1003 /* din_swap set 1, dout_swap set 1, default 1. */ 1004 crypto_write(0x00030003, CRYPTO_FIFO_CTL); 1005 crypto_write(0, CRYPTO_DMA_INT_EN); 1006 1007 reg_ctrl = crypto_read(CRYPTO_BC_CTL) | CRYPTO_BC_ENABLE; 1008 crypto_write(reg_ctrl | CRYPTO_WRITE_MASK_ALL, CRYPTO_BC_CTL); 1009 crypto_write(0x00010001, CRYPTO_DMA_CTL);//start 1010 1011 mask = ~(mask | CRYPTO_SYNC_LOCKSTEP_INT_ST); 1012 1013 /* wait calc ok */ 1014 ret = RK_POLL_TIMEOUT(!(crypto_read(CRYPTO_DMA_INT_ST) & mask), 1015 RK_CRYPTO_TIMEOUT); 1016 tmp = crypto_read(CRYPTO_DMA_INT_ST); 1017 crypto_write(tmp, CRYPTO_DMA_INT_ST); 1018 1019 if ((tmp & mask) == expt_int) { 1020 if (out && out != dma_out) 1021 memcpy(out, dma_out, len); 1022 1023 if (IS_NEED_TAG(rk_mode)) { 1024 ret = WAIT_TAG_VALID(key_chn, RK_CRYPTO_TIMEOUT); 1025 get_tag_from_reg(key_chn, tag, AES_BLOCK_SIZE); 1026 } 1027 } else { 1028 dump_crypto_state(data_desc, tmp, expt_int, in, out, len, ret); 1029 ret = -1; 1030 } 1031 1032 exit: 1033 crypto_write(0xffff0000, CRYPTO_BC_CTL);//bc_ctl disable 1034 align_free(data_desc); 1035 align_free(aad_desc); 1036 if (dma_in != in) 1037 align_free(dma_in); 1038 if (out && dma_out != out) 1039 align_free(dma_out); 1040 if (aad && aad != aad_tmp) 1041 align_free(aad_tmp); 1042 1043 return ret; 1044 } 1045 1046 static int hw_aes_init(u32 chn, const u8 *key, const u8 *twk_key, u32 key_len, 1047 const u8 *iv, u32 iv_len, u32 mode, bool enc) 1048 { 1049 u32 rk_mode = RK_GET_RK_MODE(mode); 1050 1051 if (rk_mode > RK_MODE_XTS) 1052 return -EINVAL; 1053 1054 if (iv_len > AES_BLOCK_SIZE) 1055 return -EINVAL; 1056 1057 if (IS_NEED_IV(rk_mode)) { 1058 if (!iv || iv_len != AES_BLOCK_SIZE) 1059 return -EINVAL; 1060 } else { 1061 iv_len = 0; 1062 } 1063 1064 if (rk_mode == RK_MODE_XTS) { 1065 if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_256) 1066 return -EINVAL; 1067 1068 if (!key || !twk_key) 1069 return -EINVAL; 1070 } else { 1071 if (key_len != AES_KEYSIZE_128 && 1072 key_len != AES_KEYSIZE_192 && 1073 key_len != AES_KEYSIZE_256) 1074 return -EINVAL; 1075 } 1076 1077 return hw_cipher_init(chn, key, twk_key, key_len, iv, iv_len, 1078 CRYPTO_AES, mode, enc); 1079 } 1080 1081 static int hw_sm4_init(u32 chn, const u8 *key, const u8 *twk_key, u32 key_len, 1082 const u8 *iv, u32 iv_len, u32 mode, bool enc) 1083 { 1084 u32 rk_mode = RK_GET_RK_MODE(mode); 1085 1086 if (rk_mode > RK_MODE_XTS) 1087 return -EINVAL; 1088 1089 if (iv_len > SM4_BLOCK_SIZE || key_len != SM4_KEYSIZE) 1090 return -EINVAL; 1091 1092 if (IS_NEED_IV(rk_mode)) { 1093 if (!iv || iv_len != SM4_BLOCK_SIZE) 1094 return -EINVAL; 1095 } else { 1096 iv_len = 0; 1097 } 1098 1099 if (rk_mode == RK_MODE_XTS) { 1100 if (!key || !twk_key) 1101 return -EINVAL; 1102 } 1103 1104 return hw_cipher_init(chn, key, twk_key, key_len, iv, iv_len, 1105 CRYPTO_SM4, mode, enc); 1106 } 1107 1108 int rk_crypto_des(struct udevice *dev, u32 mode, const u8 *key, u32 key_len, 1109 const u8 *iv, const u8 *in, u8 *out, u32 len, bool enc) 1110 { 1111 u32 rk_mode = RK_GET_RK_MODE(mode); 1112 u8 tmp_key[24]; 1113 int ret; 1114 1115 if (!is_des_mode(rk_mode)) 1116 return -EINVAL; 1117 1118 if (key_len == DES_BLOCK_SIZE || key_len == 3 * DES_BLOCK_SIZE) { 1119 memcpy(tmp_key, key, key_len); 1120 } else if (key_len == 2 * DES_BLOCK_SIZE) { 1121 memcpy(tmp_key, key, 16); 1122 memcpy(tmp_key + 16, key, 8); 1123 key_len = 3 * DES_BLOCK_SIZE; 1124 } else { 1125 return -EINVAL; 1126 } 1127 1128 ret = hw_cipher_init(0, tmp_key, NULL, key_len, iv, DES_BLOCK_SIZE, 1129 CRYPTO_DES, mode, enc); 1130 if (ret) 1131 goto exit; 1132 1133 ret = hw_cipher_crypt(in, out, len, NULL, 0, 1134 NULL, 0, mode); 1135 1136 exit: 1137 return ret; 1138 } 1139 1140 int rk_crypto_aes(struct udevice *dev, u32 mode, 1141 const u8 *key, const u8 *twk_key, u32 key_len, 1142 const u8 *iv, u32 iv_len, 1143 const u8 *in, u8 *out, u32 len, bool enc) 1144 { 1145 int ret; 1146 1147 /* RV1126/RV1109 do not support aes-192 */ 1148 #if defined(CONFIG_ROCKCHIP_RV1126) 1149 if (key_len == AES_KEYSIZE_192) 1150 return -EINVAL; 1151 #endif 1152 1153 ret = hw_aes_init(0, key, twk_key, key_len, iv, iv_len, mode, enc); 1154 if (ret) 1155 return ret; 1156 1157 return hw_cipher_crypt(in, out, len, NULL, 0, 1158 NULL, 0, mode); 1159 } 1160 1161 int rk_crypto_sm4(struct udevice *dev, u32 mode, 1162 const u8 *key, const u8 *twk_key, u32 key_len, 1163 const u8 *iv, u32 iv_len, 1164 const u8 *in, u8 *out, u32 len, bool enc) 1165 { 1166 int ret; 1167 1168 ret = hw_sm4_init(0, key, twk_key, key_len, iv, iv_len, mode, enc); 1169 if (ret) 1170 return ret; 1171 1172 return hw_cipher_crypt(in, out, len, NULL, 0, NULL, 0, mode); 1173 } 1174 1175 int rockchip_crypto_cipher(struct udevice *dev, cipher_context *ctx, 1176 const u8 *in, u8 *out, u32 len, bool enc) 1177 { 1178 switch (ctx->algo) { 1179 case CRYPTO_DES: 1180 return rk_crypto_des(dev, ctx->mode, ctx->key, ctx->key_len, 1181 ctx->iv, in, out, len, enc); 1182 case CRYPTO_AES: 1183 return rk_crypto_aes(dev, ctx->mode, 1184 ctx->key, ctx->twk_key, ctx->key_len, 1185 ctx->iv, ctx->iv_len, in, out, len, enc); 1186 case CRYPTO_SM4: 1187 return rk_crypto_sm4(dev, ctx->mode, 1188 ctx->key, ctx->twk_key, ctx->key_len, 1189 ctx->iv, ctx->iv_len, in, out, len, enc); 1190 default: 1191 return -EINVAL; 1192 } 1193 } 1194 1195 int rk_crypto_mac(struct udevice *dev, u32 algo, u32 mode, 1196 const u8 *key, u32 key_len, 1197 const u8 *in, u32 len, u8 *tag) 1198 { 1199 u32 rk_mode = RK_GET_RK_MODE(mode); 1200 int ret; 1201 1202 if (!IS_MAC_MODE(rk_mode)) 1203 return -EINVAL; 1204 1205 if (algo != CRYPTO_AES && algo != CRYPTO_SM4) 1206 return -EINVAL; 1207 1208 /* RV1126/RV1109 do not support aes-192 */ 1209 #if defined(CONFIG_ROCKCHIP_RV1126) 1210 if (algo == CRYPTO_AES && key_len == AES_KEYSIZE_192) 1211 return -EINVAL; 1212 #endif 1213 1214 ret = hw_cipher_init(g_key_chn, key, NULL, key_len, NULL, 0, 1215 algo, mode, true); 1216 if (ret) 1217 return ret; 1218 1219 return hw_cipher_crypt(in, NULL, len, NULL, 0, 1220 tag, AES_BLOCK_SIZE, mode); 1221 } 1222 1223 int rockchip_crypto_mac(struct udevice *dev, cipher_context *ctx, 1224 const u8 *in, u32 len, u8 *tag) 1225 { 1226 return rk_crypto_mac(dev, ctx->algo, ctx->mode, 1227 ctx->key, ctx->key_len, in, len, tag); 1228 } 1229 1230 int rk_crypto_ae(struct udevice *dev, u32 algo, u32 mode, 1231 const u8 *key, u32 key_len, const u8 *nonce, u32 nonce_len, 1232 const u8 *in, u32 len, const u8 *aad, u32 aad_len, 1233 u8 *out, u8 *tag) 1234 { 1235 u32 rk_mode = RK_GET_RK_MODE(mode); 1236 int ret; 1237 1238 if (!IS_AE_MODE(rk_mode)) 1239 return -EINVAL; 1240 1241 if (algo != CRYPTO_AES && algo != CRYPTO_SM4) 1242 return -EINVAL; 1243 1244 /* RV1126/RV1109 do not support aes-192 */ 1245 #if defined(CONFIG_ROCKCHIP_RV1126) 1246 if (algo == CRYPTO_AES && key_len == AES_KEYSIZE_192) 1247 return -EINVAL; 1248 #endif 1249 1250 ret = hw_cipher_init(g_key_chn, key, NULL, key_len, nonce, nonce_len, 1251 algo, mode, true); 1252 if (ret) 1253 return ret; 1254 1255 return hw_cipher_crypt(in, out, len, aad, aad_len, 1256 tag, AES_BLOCK_SIZE, mode); 1257 } 1258 1259 int rockchip_crypto_ae(struct udevice *dev, cipher_context *ctx, 1260 const u8 *in, u32 len, const u8 *aad, u32 aad_len, 1261 u8 *out, u8 *tag) 1262 1263 { 1264 return rk_crypto_ae(dev, ctx->algo, ctx->mode, ctx->key, ctx->key_len, 1265 ctx->iv, ctx->iv_len, in, len, 1266 aad, aad_len, out, tag); 1267 } 1268 1269 #endif 1270 1271 #if CONFIG_IS_ENABLED(ROCKCHIP_RSA) 1272 static int rockchip_crypto_rsa_verify(struct udevice *dev, rsa_key *ctx, 1273 u8 *sign, u8 *output) 1274 { 1275 struct mpa_num *mpa_m = NULL, *mpa_e = NULL, *mpa_n = NULL; 1276 struct mpa_num *mpa_c = NULL, *mpa_result = NULL; 1277 u32 n_bits, n_words; 1278 int ret; 1279 1280 if (!ctx) 1281 return -EINVAL; 1282 1283 if (ctx->algo != CRYPTO_RSA512 && 1284 ctx->algo != CRYPTO_RSA1024 && 1285 ctx->algo != CRYPTO_RSA2048 && 1286 ctx->algo != CRYPTO_RSA3072 && 1287 ctx->algo != CRYPTO_RSA4096) 1288 return -EINVAL; 1289 1290 n_bits = crypto_algo_nbits(ctx->algo); 1291 n_words = BITS2WORD(n_bits); 1292 1293 ret = rk_mpa_alloc(&mpa_m, sign, n_words); 1294 if (ret) 1295 goto exit; 1296 1297 ret = rk_mpa_alloc(&mpa_e, ctx->e, n_words); 1298 if (ret) 1299 goto exit; 1300 1301 ret = rk_mpa_alloc(&mpa_n, ctx->n, n_words); 1302 if (ret) 1303 goto exit; 1304 1305 if (ctx->c) { 1306 ret = rk_mpa_alloc(&mpa_c, ctx->c, n_words); 1307 if (ret) 1308 goto exit; 1309 } 1310 1311 ret = rk_mpa_alloc(&mpa_result, NULL, n_words); 1312 if (ret) 1313 goto exit; 1314 1315 ret = rk_exptmod_np(mpa_m, mpa_e, mpa_n, mpa_c, mpa_result); 1316 if (!ret) 1317 memcpy(output, mpa_result->d, BITS2BYTE(n_bits)); 1318 1319 exit: 1320 rk_mpa_free(&mpa_m); 1321 rk_mpa_free(&mpa_e); 1322 rk_mpa_free(&mpa_n); 1323 rk_mpa_free(&mpa_c); 1324 rk_mpa_free(&mpa_result); 1325 1326 return ret; 1327 } 1328 #endif 1329 1330 static const struct dm_crypto_ops rockchip_crypto_ops = { 1331 .capability = rockchip_crypto_capability, 1332 .sha_init = rockchip_crypto_sha_init, 1333 .sha_update = rockchip_crypto_sha_update, 1334 .sha_final = rockchip_crypto_sha_final, 1335 #if CONFIG_IS_ENABLED(ROCKCHIP_RSA) 1336 .rsa_verify = rockchip_crypto_rsa_verify, 1337 #endif 1338 #if CONFIG_IS_ENABLED(ROCKCHIP_HMAC) 1339 .hmac_init = rockchip_crypto_hmac_init, 1340 .hmac_update = rockchip_crypto_hmac_update, 1341 .hmac_final = rockchip_crypto_hmac_final, 1342 #endif 1343 #if CONFIG_IS_ENABLED(ROCKCHIP_CIPHER) 1344 .cipher_crypt = rockchip_crypto_cipher, 1345 .cipher_mac = rockchip_crypto_mac, 1346 .cipher_ae = rockchip_crypto_ae, 1347 #endif 1348 }; 1349 1350 /* 1351 * Only use "clocks" to parse crypto clock id and use rockchip_get_clk(). 1352 * Because we always add crypto node in U-Boot dts, when kernel dtb enabled : 1353 * 1354 * 1. There is cru phandle mismatch between U-Boot and kernel dtb; 1355 * 2. CONFIG_OF_SPL_REMOVE_PROPS removes clock property; 1356 */ 1357 static int rockchip_crypto_ofdata_to_platdata(struct udevice *dev) 1358 { 1359 struct rockchip_crypto_priv *priv = dev_get_priv(dev); 1360 int len, ret = -EINVAL; 1361 1362 memset(priv, 0x00, sizeof(*priv)); 1363 1364 priv->reg = (fdt_addr_t)dev_read_addr_ptr(dev); 1365 if (priv->reg == FDT_ADDR_T_NONE) 1366 return -EINVAL; 1367 1368 crypto_base = priv->reg; 1369 1370 /* if there is no clocks in dts, just skip it */ 1371 if (!dev_read_prop(dev, "clocks", &len)) { 1372 printf("Can't find \"clocks\" property\n"); 1373 return 0; 1374 } 1375 1376 memset(priv, 0x00, sizeof(*priv)); 1377 priv->clocks = malloc(len); 1378 if (!priv->clocks) 1379 return -ENOMEM; 1380 1381 priv->nclocks = len / sizeof(u32); 1382 if (dev_read_u32_array(dev, "clocks", (u32 *)priv->clocks, 1383 priv->nclocks)) { 1384 printf("Can't read \"clocks\" property\n"); 1385 ret = -EINVAL; 1386 goto exit; 1387 } 1388 1389 if (!dev_read_prop(dev, "clock-frequency", &len)) { 1390 printf("Can't find \"clock-frequency\" property\n"); 1391 ret = -EINVAL; 1392 goto exit; 1393 } 1394 1395 priv->frequencies = malloc(len); 1396 if (!priv->frequencies) { 1397 ret = -ENOMEM; 1398 goto exit; 1399 } 1400 1401 priv->nclocks = len / sizeof(u32); 1402 if (dev_read_u32_array(dev, "clock-frequency", priv->frequencies, 1403 priv->nclocks)) { 1404 printf("Can't read \"clock-frequency\" property\n"); 1405 ret = -EINVAL; 1406 goto exit; 1407 } 1408 1409 return 0; 1410 exit: 1411 if (priv->clocks) 1412 free(priv->clocks); 1413 1414 if (priv->frequencies) 1415 free(priv->frequencies); 1416 1417 return ret; 1418 } 1419 1420 static int rk_crypto_set_clk(struct udevice *dev) 1421 { 1422 struct rockchip_crypto_priv *priv = dev_get_priv(dev); 1423 struct clk clk; 1424 int i, ret; 1425 1426 if (!priv->clocks && priv->nclocks == 0) 1427 return 0; 1428 1429 for (i = 0; i < priv->nclocks; i++) { 1430 ret = clk_get_by_index(dev, i, &clk); 1431 if (ret < 0) { 1432 printf("Failed to get clk index %d, ret=%d\n", i, ret); 1433 return ret; 1434 } 1435 ret = clk_set_rate(&clk, priv->frequencies[i]); 1436 if (ret < 0) { 1437 printf("%s: Failed to set clk(%ld): ret=%d\n", 1438 __func__, clk.id, ret); 1439 return ret; 1440 } 1441 } 1442 1443 return 0; 1444 } 1445 1446 static int rockchip_crypto_probe(struct udevice *dev) 1447 { 1448 struct rockchip_crypto_priv *priv = dev_get_priv(dev); 1449 struct rk_crypto_soc_data *sdata; 1450 int ret = 0; 1451 1452 sdata = (struct rk_crypto_soc_data *)dev_get_driver_data(dev); 1453 1454 if (sdata->dynamic_cap) 1455 sdata->capability = sdata->dynamic_cap(); 1456 1457 priv->soc_data = sdata; 1458 1459 priv->hw_ctx = memalign(LLI_ADDR_ALIGN_SIZE, 1460 sizeof(struct rk_hash_ctx)); 1461 if (!priv->hw_ctx) 1462 return -ENOMEM; 1463 1464 ret = rk_crypto_set_clk(dev); 1465 if (ret) 1466 return ret; 1467 1468 hw_crypto_reset(); 1469 1470 return 0; 1471 } 1472 1473 static const struct rk_crypto_soc_data soc_data_base = { 1474 .capability = CRYPTO_MD5 | 1475 CRYPTO_SHA1 | 1476 CRYPTO_SHA256 | 1477 CRYPTO_SHA512 | 1478 CRYPTO_HMAC_MD5 | 1479 CRYPTO_HMAC_SHA1 | 1480 CRYPTO_HMAC_SHA256 | 1481 CRYPTO_HMAC_SHA512 | 1482 CRYPTO_RSA512 | 1483 CRYPTO_RSA1024 | 1484 CRYPTO_RSA2048 | 1485 CRYPTO_RSA3072 | 1486 CRYPTO_RSA4096 | 1487 CRYPTO_DES | 1488 CRYPTO_AES, 1489 }; 1490 1491 static const struct rk_crypto_soc_data soc_data_base_sm = { 1492 .capability = CRYPTO_MD5 | 1493 CRYPTO_SHA1 | 1494 CRYPTO_SHA256 | 1495 CRYPTO_SHA512 | 1496 CRYPTO_SM3 | 1497 CRYPTO_HMAC_MD5 | 1498 CRYPTO_HMAC_SHA1 | 1499 CRYPTO_HMAC_SHA256 | 1500 CRYPTO_HMAC_SHA512 | 1501 CRYPTO_HMAC_SM3 | 1502 CRYPTO_RSA512 | 1503 CRYPTO_RSA1024 | 1504 CRYPTO_RSA2048 | 1505 CRYPTO_RSA3072 | 1506 CRYPTO_RSA4096 | 1507 CRYPTO_DES | 1508 CRYPTO_AES | 1509 CRYPTO_SM4, 1510 }; 1511 1512 static const struct rk_crypto_soc_data soc_data_rk1808 = { 1513 .capability = CRYPTO_MD5 | 1514 CRYPTO_SHA1 | 1515 CRYPTO_SHA256 | 1516 CRYPTO_HMAC_MD5 | 1517 CRYPTO_HMAC_SHA1 | 1518 CRYPTO_HMAC_SHA256 | 1519 CRYPTO_RSA512 | 1520 CRYPTO_RSA1024 | 1521 CRYPTO_RSA2048 | 1522 CRYPTO_RSA3072 | 1523 CRYPTO_RSA4096, 1524 }; 1525 1526 static const struct rk_crypto_soc_data soc_data_cryptov3 = { 1527 .capability = 0, 1528 .dynamic_cap = crypto_v3_dynamic_cap, 1529 }; 1530 1531 static const struct udevice_id rockchip_crypto_ids[] = { 1532 { 1533 .compatible = "rockchip,px30-crypto", 1534 .data = (ulong)&soc_data_base 1535 }, 1536 { 1537 .compatible = "rockchip,rk1808-crypto", 1538 .data = (ulong)&soc_data_rk1808 1539 }, 1540 { 1541 .compatible = "rockchip,rk3308-crypto", 1542 .data = (ulong)&soc_data_base 1543 }, 1544 { 1545 .compatible = "rockchip,rv1126-crypto", 1546 .data = (ulong)&soc_data_base_sm 1547 }, 1548 { 1549 .compatible = "rockchip,rk3568-crypto", 1550 .data = (ulong)&soc_data_base_sm 1551 }, 1552 { 1553 .compatible = "rockchip,rk3588-crypto", 1554 .data = (ulong)&soc_data_base_sm 1555 }, 1556 { 1557 .compatible = "rockchip,crypto_v3", 1558 .data = (ulong)&soc_data_cryptov3 1559 }, 1560 { } 1561 }; 1562 1563 U_BOOT_DRIVER(rockchip_crypto_v2) = { 1564 .name = "rockchip_crypto_v2", 1565 .id = UCLASS_CRYPTO, 1566 .of_match = rockchip_crypto_ids, 1567 .ops = &rockchip_crypto_ops, 1568 .probe = rockchip_crypto_probe, 1569 .ofdata_to_platdata = rockchip_crypto_ofdata_to_platdata, 1570 .priv_auto_alloc_size = sizeof(struct rockchip_crypto_priv), 1571 }; 1572