xref: /rk3399_rockchip-uboot/drivers/crypto/rockchip/crypto_v2.c (revision 386ae599c0eec847f4b4f44007d397a926dd4d6b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
4  */
5 
6 #include <common.h>
7 #include <clk.h>
8 #include <crypto.h>
9 #include <dm.h>
10 #include <asm/io.h>
11 #include <asm/arch/hardware.h>
12 #include <asm/arch/clock.h>
13 #include <rockchip/crypto_hash_cache.h>
14 #include <rockchip/crypto_v2.h>
15 #include <rockchip/crypto_v2_pka.h>
16 
17 #define	RK_HASH_CTX_MAGIC		0x1A1A1A1A
18 
19 #ifdef DEBUG
20 #define IMSG(format, ...) printf("[%s, %05d]-trace: " format "\n", \
21 				 __func__, __LINE__, ##__VA_ARGS__)
22 #else
23 #define IMSG(format, ...)
24 #endif
25 
26 struct crypto_lli_desc {
27 	u32 src_addr;
28 	u32 src_len;
29 	u32 dst_addr;
30 	u32 dst_len;
31 	u32 user_define;
32 	u32 reserve;
33 	u32 dma_ctrl;
34 	u32 next_addr;
35 };
36 
37 struct rk_hash_ctx {
38 	struct crypto_lli_desc		data_lli;	/* lli desc */
39 	struct crypto_hash_cache	*hash_cache;
40 	u32				magic;		/* to check ctx */
41 	u32				algo;		/* hash algo */
42 	u8				digest_size;	/* hash out length */
43 	u8				reserved[3];
44 };
45 
46 struct rk_crypto_soc_data {
47 	u32 capability;
48 	u32 (*dynamic_cap)(void);
49 };
50 
51 struct rockchip_crypto_priv {
52 	fdt_addr_t			reg;
53 	u32				frequency;
54 	char				*clocks;
55 	u32				*frequencies;
56 	u32				nclocks;
57 	u32				length;
58 	struct rk_hash_ctx		*hw_ctx;
59 	struct rk_crypto_soc_data	*soc_data;
60 };
61 
62 #define LLI_ADDR_ALIGN_SIZE	8
63 #define DATA_ADDR_ALIGN_SIZE	8
64 #define DATA_LEN_ALIGN_SIZE	64
65 
66 /* crypto timeout 500ms, must support more than 32M data per times*/
67 #define HASH_UPDATE_LIMIT	(32 * 1024 * 1024)
68 #define RK_CRYPTO_TIMEOUT	500000
69 
70 #define RK_POLL_TIMEOUT(condition, timeout) \
71 ({ \
72 	int time_out = timeout; \
73 	while (condition) { \
74 		if (--time_out <= 0) { \
75 			debug("[%s] %d: time out!\n", __func__,\
76 				__LINE__); \
77 			break; \
78 		} \
79 		udelay(1); \
80 	} \
81 	(time_out <= 0) ? -ETIMEDOUT : 0; \
82 })
83 
84 #define WAIT_TAG_VALID(channel, timeout) ({ \
85 	u32 tag_mask = CRYPTO_CH0_TAG_VALID << (channel);\
86 	int ret;\
87 	ret = RK_POLL_TIMEOUT(!(crypto_read(CRYPTO_TAG_VALID) & tag_mask),\
88 			      timeout);\
89 	crypto_write(crypto_read(CRYPTO_TAG_VALID) & tag_mask, CRYPTO_TAG_VALID);\
90 	ret;\
91 })
92 
93 #define virt_to_phys(addr)		(((unsigned long)addr) & 0xffffffff)
94 #define phys_to_virt(addr, area)	((unsigned long)addr)
95 
96 #define align_malloc(bytes, alignment)	memalign(alignment, bytes)
97 #define align_free(addr)		do {if (addr) free(addr);} while (0)
98 
99 #define ROUNDUP(size, alignment)	round_up(size, alignment)
100 #define cache_op_inner(type, addr, size) \
101 					crypto_flush_cacheline((ulong)addr, size)
102 
103 #define IS_NEED_IV(rk_mode) ((rk_mode) != RK_MODE_ECB && \
104 			     (rk_mode) != RK_MODE_CMAC && \
105 			     (rk_mode) != RK_MODE_CBC_MAC)
106 
107 #define IS_NEED_TAG(rk_mode) ((rk_mode) == RK_MODE_CMAC || \
108 			      (rk_mode) == RK_MODE_CBC_MAC || \
109 			      (rk_mode) == RK_MODE_CCM || \
110 			      (rk_mode) == RK_MODE_GCM)
111 
112 #define IS_MAC_MODE(rk_mode) ((rk_mode) == RK_MODE_CMAC || \
113 			      (rk_mode) == RK_MODE_CBC_MAC)
114 
115 #define IS_AE_MODE(rk_mode) ((rk_mode) == RK_MODE_CCM || \
116 			     (rk_mode) == RK_MODE_GCM)
117 
118 fdt_addr_t crypto_base;
119 
120 static inline void word2byte_be(u32 word, u8 *ch)
121 {
122 	ch[0] = (word >> 24) & 0xff;
123 	ch[1] = (word >> 16) & 0xff;
124 	ch[2] = (word >> 8) & 0xff;
125 	ch[3] = (word >> 0) & 0xff;
126 }
127 
128 static inline u32 byte2word_be(const u8 *ch)
129 {
130 	return (*ch << 24) + (*(ch + 1) << 16) + (*(ch + 2) << 8) + *(ch + 3);
131 }
132 
133 static inline void clear_regs(u32 base, u32 words)
134 {
135 	int i;
136 
137 	/*clear out register*/
138 	for (i = 0; i < words; i++)
139 		crypto_write(0, base + 4 * i);
140 }
141 
142 static inline void clear_hash_out_reg(void)
143 {
144 	clear_regs(CRYPTO_HASH_DOUT_0, 16);
145 }
146 
147 static inline void clear_key_regs(void)
148 {
149 	clear_regs(CRYPTO_CH0_KEY_0, CRYPTO_KEY_CHANNEL_NUM * 4);
150 }
151 
152 static inline void read_regs(u32 base, u8 *data, u32 data_len)
153 {
154 	u8 tmp_buf[4];
155 	u32 i;
156 
157 	for (i = 0; i < data_len / 4; i++)
158 		word2byte_be(crypto_read(base + i * 4),
159 			     data + i * 4);
160 
161 	if (data_len % 4) {
162 		word2byte_be(crypto_read(base + i * 4), tmp_buf);
163 		memcpy(data + i * 4, tmp_buf, data_len % 4);
164 	}
165 }
166 
167 static inline void write_regs(u32 base, const u8 *data, u32 data_len)
168 {
169 	u8 tmp_buf[4];
170 	u32 i;
171 
172 	for (i = 0; i < data_len / 4; i++, base += 4)
173 		crypto_write(byte2word_be(data + i * 4), base);
174 
175 	if (data_len % 4) {
176 		memset(tmp_buf, 0x00, sizeof(tmp_buf));
177 		memcpy((u8 *)tmp_buf, data + i * 4, data_len % 4);
178 		crypto_write(byte2word_be(tmp_buf), base);
179 	}
180 }
181 
182 static inline void write_key_reg(u32 chn, const u8 *key, u32 key_len)
183 {
184 	write_regs(CRYPTO_CH0_KEY_0 + chn * 0x10, key, key_len);
185 }
186 
187 static inline void set_iv_reg(u32 chn, const u8 *iv, u32 iv_len)
188 {
189 	u32 base_iv;
190 
191 	base_iv = CRYPTO_CH0_IV_0 + chn * 0x10;
192 
193 	/* clear iv */
194 	clear_regs(base_iv, 4);
195 
196 	if (!iv || iv_len == 0)
197 		return;
198 
199 	write_regs(base_iv, iv, iv_len);
200 
201 	crypto_write(iv_len, CRYPTO_CH0_IV_LEN_0 + 4 * chn);
202 }
203 
204 static inline void get_iv_reg(u32 chn, u8 *iv, u32 iv_len)
205 {
206 	u32 base_iv;
207 
208 	base_iv = CRYPTO_CH0_IV_0 + chn * 0x10;
209 
210 	read_regs(base_iv, iv, iv_len);
211 }
212 
213 static inline void get_tag_from_reg(u32 chn, u8 *tag, u32 tag_len)
214 {
215 	u32 i;
216 	u32 chn_base = CRYPTO_CH0_TAG_0 + 0x10 * chn;
217 
218 	for (i = 0; i < tag_len / 4; i++, chn_base += 4)
219 		word2byte_be(crypto_read(chn_base), tag + 4 * i);
220 }
221 
222 static u32 crypto_v3_dynamic_cap(void)
223 {
224 	u32 capability = 0;
225 	u32 ver_reg, i;
226 	struct cap_map {
227 		u32 ver_offset;
228 		u32 mask;
229 		u32 cap_bit;
230 	};
231 	const struct cap_map cap_tbl[] = {
232 	{CRYPTO_HASH_VERSION, CRYPTO_HASH_MD5_FLAG,    CRYPTO_MD5},
233 	{CRYPTO_HASH_VERSION, CRYPTO_HASH_SHA1_FLAG,   CRYPTO_SHA1},
234 	{CRYPTO_HASH_VERSION, CRYPTO_HASH_SHA256_FLAG, CRYPTO_SHA256},
235 	{CRYPTO_HASH_VERSION, CRYPTO_HASH_SHA512_FLAG, CRYPTO_SHA512},
236 	{CRYPTO_HASH_VERSION, CRYPTO_HASH_SM3_FLAG,    CRYPTO_SM3},
237 
238 	{CRYPTO_HMAC_VERSION, CRYPTO_HMAC_MD5_FLAG,    CRYPTO_HMAC_MD5},
239 	{CRYPTO_HMAC_VERSION, CRYPTO_HMAC_SHA1_FLAG,   CRYPTO_HMAC_SHA1},
240 	{CRYPTO_HMAC_VERSION, CRYPTO_HMAC_SHA256_FLAG, CRYPTO_HMAC_SHA256},
241 	{CRYPTO_HMAC_VERSION, CRYPTO_HMAC_SHA512_FLAG, CRYPTO_HMAC_SHA512},
242 	{CRYPTO_HMAC_VERSION, CRYPTO_HMAC_SM3_FLAG,    CRYPTO_HMAC_SM3},
243 
244 	{CRYPTO_AES_VERSION,  CRYPTO_AES256_FLAG,      CRYPTO_AES},
245 	{CRYPTO_DES_VERSION,  CRYPTO_TDES_FLAG,        CRYPTO_DES},
246 	{CRYPTO_SM4_VERSION,  CRYPTO_ECB_FLAG,         CRYPTO_SM4},
247 	};
248 
249 	/* rsa */
250 	capability = CRYPTO_RSA512 |
251 		     CRYPTO_RSA1024 |
252 		     CRYPTO_RSA2048 |
253 		     CRYPTO_RSA3072 |
254 		     CRYPTO_RSA4096;
255 
256 	for (i = 0; i < ARRAY_SIZE(cap_tbl); i++) {
257 		ver_reg = crypto_read(cap_tbl[i].ver_offset);
258 
259 		if ((ver_reg & cap_tbl[i].mask) == cap_tbl[i].mask)
260 			capability |= cap_tbl[i].cap_bit;
261 	}
262 
263 	return capability;
264 }
265 
266 static int hw_crypto_reset(void)
267 {
268 	u32 val = 0, mask = 0;
269 	int ret;
270 
271 	val = CRYPTO_SW_PKA_RESET | CRYPTO_SW_CC_RESET;
272 	mask = val << CRYPTO_WRITE_MASK_SHIFT;
273 
274 	/* reset pka and crypto modules*/
275 	crypto_write(val | mask, CRYPTO_RST_CTL);
276 
277 	/* wait reset compelete */
278 	ret = RK_POLL_TIMEOUT(crypto_read(CRYPTO_RST_CTL), RK_CRYPTO_TIMEOUT);
279 
280 	return ret;
281 }
282 
283 static void hw_hash_clean_ctx(struct rk_hash_ctx *ctx)
284 {
285 	/* clear hash status */
286 	crypto_write(CRYPTO_WRITE_MASK_ALL | 0, CRYPTO_HASH_CTL);
287 
288 	assert(ctx);
289 	assert(ctx->magic == RK_HASH_CTX_MAGIC);
290 
291 	crypto_hash_cache_free(ctx->hash_cache);
292 
293 	memset(ctx, 0x00, sizeof(*ctx));
294 }
295 
296 static int rk_hash_init(void *hw_ctx, u32 algo)
297 {
298 	struct rk_hash_ctx *tmp_ctx = (struct rk_hash_ctx *)hw_ctx;
299 	u32 reg_ctrl = 0;
300 	int ret;
301 
302 	if (!tmp_ctx)
303 		return -EINVAL;
304 
305 	reg_ctrl = CRYPTO_SW_CC_RESET;
306 	crypto_write(reg_ctrl | (reg_ctrl << CRYPTO_WRITE_MASK_SHIFT),
307 		     CRYPTO_RST_CTL);
308 
309 	/* wait reset compelete */
310 	ret = RK_POLL_TIMEOUT(crypto_read(CRYPTO_RST_CTL),
311 			      RK_CRYPTO_TIMEOUT);
312 
313 	reg_ctrl = 0;
314 	tmp_ctx->algo = algo;
315 	switch (algo) {
316 	case CRYPTO_MD5:
317 	case CRYPTO_HMAC_MD5:
318 		reg_ctrl |= CRYPTO_MODE_MD5;
319 		tmp_ctx->digest_size = 16;
320 		break;
321 	case CRYPTO_SHA1:
322 	case CRYPTO_HMAC_SHA1:
323 		reg_ctrl |= CRYPTO_MODE_SHA1;
324 		tmp_ctx->digest_size = 20;
325 		break;
326 	case CRYPTO_SHA256:
327 	case CRYPTO_HMAC_SHA256:
328 		reg_ctrl |= CRYPTO_MODE_SHA256;
329 		tmp_ctx->digest_size = 32;
330 		break;
331 	case CRYPTO_SHA512:
332 	case CRYPTO_HMAC_SHA512:
333 		reg_ctrl |= CRYPTO_MODE_SHA512;
334 		tmp_ctx->digest_size = 64;
335 		break;
336 	case CRYPTO_SM3:
337 	case CRYPTO_HMAC_SM3:
338 		reg_ctrl |= CRYPTO_MODE_SM3;
339 		tmp_ctx->digest_size = 32;
340 		break;
341 	default:
342 		ret = -EINVAL;
343 		goto exit;
344 	}
345 
346 	clear_hash_out_reg();
347 
348 	/* enable hardware padding */
349 	reg_ctrl |= CRYPTO_HW_PAD_ENABLE;
350 	crypto_write(reg_ctrl | CRYPTO_WRITE_MASK_ALL, CRYPTO_HASH_CTL);
351 
352 	/* FIFO input and output data byte swap */
353 	/* such as B0, B1, B2, B3 -> B3, B2, B1, B0 */
354 	reg_ctrl = CRYPTO_DOUT_BYTESWAP | CRYPTO_DOIN_BYTESWAP;
355 	crypto_write(reg_ctrl | CRYPTO_WRITE_MASK_ALL, CRYPTO_FIFO_CTL);
356 
357 	/* enable src_item_done interrupt */
358 	crypto_write(0, CRYPTO_DMA_INT_EN);
359 
360 	tmp_ctx->magic = RK_HASH_CTX_MAGIC;
361 
362 	return 0;
363 exit:
364 	/* clear hash setting if init failed */
365 	crypto_write(CRYPTO_WRITE_MASK_ALL | 0, CRYPTO_HASH_CTL);
366 
367 	return ret;
368 }
369 
370 static int rk_hash_direct_calc(void *hw_data, const u8 *data,
371 			       u32 data_len, u8 *started_flag, u8 is_last)
372 {
373 	struct rockchip_crypto_priv *priv = hw_data;
374 	struct rk_hash_ctx *hash_ctx = priv->hw_ctx;
375 	struct crypto_lli_desc *lli = &hash_ctx->data_lli;
376 	int ret = -EINVAL;
377 	u32 tmp = 0, mask = 0;
378 
379 	assert(IS_ALIGNED((ulong)data, DATA_ADDR_ALIGN_SIZE));
380 	assert(is_last || IS_ALIGNED(data_len, DATA_LEN_ALIGN_SIZE));
381 
382 	debug("%s: data = %p, len = %u, s = %x, l = %x\n",
383 	      __func__, data, data_len, *started_flag, is_last);
384 
385 	memset(lli, 0x00, sizeof(*lli));
386 	lli->src_addr = (u32)virt_to_phys(data);
387 	lli->src_len = data_len;
388 	lli->dma_ctrl = LLI_DMA_CTRL_SRC_DONE;
389 
390 	if (is_last) {
391 		lli->user_define |= LLI_USER_STRING_LAST;
392 		lli->dma_ctrl |= LLI_DMA_CTRL_LAST;
393 	} else {
394 		lli->next_addr = (u32)virt_to_phys(lli);
395 		lli->dma_ctrl |= LLI_DMA_CTRL_PAUSE;
396 	}
397 
398 	if (!(*started_flag)) {
399 		lli->user_define |=
400 			(LLI_USER_STRING_START | LLI_USER_CIPHER_START);
401 		crypto_write((u32)virt_to_phys(lli), CRYPTO_DMA_LLI_ADDR);
402 		crypto_write((CRYPTO_HASH_ENABLE << CRYPTO_WRITE_MASK_SHIFT) |
403 			     CRYPTO_HASH_ENABLE, CRYPTO_HASH_CTL);
404 		tmp = CRYPTO_DMA_START;
405 		*started_flag = 1;
406 	} else {
407 		tmp = CRYPTO_DMA_RESTART;
408 	}
409 
410 	/* flush cache */
411 	crypto_flush_cacheline((ulong)lli, sizeof(*lli));
412 	crypto_flush_cacheline((ulong)data, data_len);
413 
414 	/* start calculate */
415 	crypto_write(tmp << CRYPTO_WRITE_MASK_SHIFT | tmp,
416 		     CRYPTO_DMA_CTL);
417 
418 	/* mask CRYPTO_SYNC_LOCKSTEP_INT_ST flag */
419 	mask = ~(mask | CRYPTO_SYNC_LOCKSTEP_INT_ST);
420 
421 	/* wait calc ok */
422 	ret = RK_POLL_TIMEOUT(!(crypto_read(CRYPTO_DMA_INT_ST) & mask),
423 			      RK_CRYPTO_TIMEOUT);
424 
425 	/* clear interrupt status */
426 	tmp = crypto_read(CRYPTO_DMA_INT_ST);
427 	crypto_write(tmp, CRYPTO_DMA_INT_ST);
428 
429 	if ((tmp & mask) != CRYPTO_SRC_ITEM_DONE_INT_ST &&
430 	    (tmp & mask) != CRYPTO_ZERO_LEN_INT_ST) {
431 		ret = -EFAULT;
432 		debug("[%s] %d: CRYPTO_DMA_INT_ST = 0x%x\n",
433 		      __func__, __LINE__, tmp);
434 		goto exit;
435 	}
436 
437 	priv->length += data_len;
438 exit:
439 	return ret;
440 }
441 
442 int rk_hash_update(void *ctx, const u8 *data, u32 data_len)
443 {
444 	struct rk_hash_ctx *tmp_ctx = (struct rk_hash_ctx *)ctx;
445 	int ret = -EINVAL;
446 
447 	debug("\n");
448 	if (!tmp_ctx || !data)
449 		goto exit;
450 
451 	if (tmp_ctx->digest_size == 0 || tmp_ctx->magic != RK_HASH_CTX_MAGIC)
452 		goto exit;
453 
454 	ret = crypto_hash_update_with_cache(tmp_ctx->hash_cache,
455 					    data, data_len);
456 
457 exit:
458 	/* free lli list */
459 	if (ret)
460 		hw_hash_clean_ctx(tmp_ctx);
461 
462 	return ret;
463 }
464 
465 int rk_hash_final(void *ctx, u8 *digest, size_t len)
466 {
467 	struct rk_hash_ctx *tmp_ctx = (struct rk_hash_ctx *)ctx;
468 	int ret = -EINVAL;
469 
470 	if (!digest)
471 		goto exit;
472 
473 	if (!tmp_ctx ||
474 	    tmp_ctx->digest_size == 0 ||
475 	    len > tmp_ctx->digest_size ||
476 	    tmp_ctx->magic != RK_HASH_CTX_MAGIC) {
477 		goto exit;
478 	}
479 
480 	/* wait hash value ok */
481 	ret = RK_POLL_TIMEOUT(!crypto_read(CRYPTO_HASH_VALID),
482 			      RK_CRYPTO_TIMEOUT);
483 
484 	read_regs(CRYPTO_HASH_DOUT_0, digest, len);
485 
486 	/* clear hash status */
487 	crypto_write(CRYPTO_HASH_IS_VALID, CRYPTO_HASH_VALID);
488 	crypto_write(CRYPTO_WRITE_MASK_ALL | 0, CRYPTO_HASH_CTL);
489 
490 exit:
491 
492 	return ret;
493 }
494 
495 static u32 rockchip_crypto_capability(struct udevice *dev)
496 {
497 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
498 	u32 capability, mask = 0;
499 
500 	capability = priv->soc_data->capability;
501 
502 #if !(CONFIG_IS_ENABLED(ROCKCHIP_CIPHER))
503 	mask |= (CRYPTO_DES | CRYPTO_AES | CRYPTO_SM4);
504 #endif
505 
506 #if !(CONFIG_IS_ENABLED(ROCKCHIP_HMAC))
507 	mask |= (CRYPTO_HMAC_MD5 | CRYPTO_HMAC_SHA1 | CRYPTO_HMAC_SHA256 |
508 			 CRYPTO_HMAC_SHA512 | CRYPTO_HMAC_SM3);
509 #endif
510 
511 #if !(CONFIG_IS_ENABLED(ROCKCHIP_RSA))
512 	mask |= (CRYPTO_RSA512 | CRYPTO_RSA1024 | CRYPTO_RSA2048 |
513 			 CRYPTO_RSA3072 | CRYPTO_RSA4096);
514 #endif
515 
516 	return capability & (~mask);
517 }
518 
519 static int rockchip_crypto_sha_init(struct udevice *dev, sha_context *ctx)
520 {
521 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
522 	struct rk_hash_ctx *hash_ctx = priv->hw_ctx;
523 
524 	if (!ctx)
525 		return -EINVAL;
526 
527 	memset(hash_ctx, 0x00, sizeof(*hash_ctx));
528 
529 	priv->length = 0;
530 
531 	hash_ctx->hash_cache = crypto_hash_cache_alloc(rk_hash_direct_calc,
532 						       priv, ctx->length,
533 						       DATA_ADDR_ALIGN_SIZE,
534 						       DATA_LEN_ALIGN_SIZE);
535 	if (!hash_ctx->hash_cache)
536 		return -EFAULT;
537 
538 	return rk_hash_init(hash_ctx, ctx->algo);
539 }
540 
541 static int rockchip_crypto_sha_update(struct udevice *dev,
542 				      u32 *input, u32 len)
543 {
544 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
545 	int ret, i;
546 	u8 *p;
547 
548 	if (!len)
549 		return -EINVAL;
550 
551 	p = (u8 *)input;
552 
553 	for (i = 0; i < len / HASH_UPDATE_LIMIT; i++, p += HASH_UPDATE_LIMIT) {
554 		ret = rk_hash_update(priv->hw_ctx, p, HASH_UPDATE_LIMIT);
555 		if (ret)
556 			goto exit;
557 	}
558 
559 	if (len % HASH_UPDATE_LIMIT)
560 		ret = rk_hash_update(priv->hw_ctx, p, len % HASH_UPDATE_LIMIT);
561 
562 exit:
563 	return ret;
564 }
565 
566 static int rockchip_crypto_sha_final(struct udevice *dev,
567 				     sha_context *ctx, u8 *output)
568 {
569 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
570 	u32 nbits;
571 	int ret;
572 
573 	nbits = crypto_algo_nbits(ctx->algo);
574 
575 	if (priv->length != ctx->length) {
576 		printf("total length(0x%08x) != init length(0x%08x)!\n",
577 		       priv->length, ctx->length);
578 		ret = -EIO;
579 		goto exit;
580 	}
581 
582 	ret = rk_hash_final(priv->hw_ctx, (u8 *)output, BITS2BYTE(nbits));
583 
584 exit:
585 	hw_hash_clean_ctx(priv->hw_ctx);
586 	return ret;
587 }
588 
589 #if CONFIG_IS_ENABLED(ROCKCHIP_HMAC)
590 int rk_hmac_init(void *hw_ctx, u32 algo, u8 *key, u32 key_len)
591 {
592 	u32 reg_ctrl = 0;
593 	int ret;
594 
595 	if (!key || !key_len || key_len > 64)
596 		return -EINVAL;
597 
598 	clear_key_regs();
599 
600 	write_key_reg(0, key, key_len);
601 
602 	ret = rk_hash_init(hw_ctx, algo);
603 	if (ret)
604 		return ret;
605 
606 	reg_ctrl = crypto_read(CRYPTO_HASH_CTL) | CRYPTO_HMAC_ENABLE;
607 	crypto_write(reg_ctrl | CRYPTO_WRITE_MASK_ALL, CRYPTO_HASH_CTL);
608 
609 	return ret;
610 }
611 
612 static int rockchip_crypto_hmac_init(struct udevice *dev,
613 				     sha_context *ctx, u8 *key, u32 key_len)
614 {
615 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
616 	struct rk_hash_ctx *hash_ctx = priv->hw_ctx;
617 
618 	if (!ctx)
619 		return -EINVAL;
620 
621 	memset(hash_ctx, 0x00, sizeof(*hash_ctx));
622 
623 	priv->length = 0;
624 
625 	hash_ctx->hash_cache = crypto_hash_cache_alloc(rk_hash_direct_calc,
626 						       priv, ctx->length,
627 						       DATA_ADDR_ALIGN_SIZE,
628 						       DATA_LEN_ALIGN_SIZE);
629 	if (!hash_ctx->hash_cache)
630 		return -EFAULT;
631 
632 	return rk_hmac_init(priv->hw_ctx, ctx->algo, key, key_len);
633 }
634 
635 static int rockchip_crypto_hmac_update(struct udevice *dev,
636 				       u32 *input, u32 len)
637 {
638 	return rockchip_crypto_sha_update(dev, input, len);
639 }
640 
641 static int rockchip_crypto_hmac_final(struct udevice *dev,
642 				      sha_context *ctx, u8 *output)
643 {
644 	return rockchip_crypto_sha_final(dev, ctx, output);
645 }
646 
647 #endif
648 
649 #if CONFIG_IS_ENABLED(ROCKCHIP_CIPHER)
650 static u8 g_key_chn;
651 
652 static const u32 rk_mode2bc_mode[RK_MODE_MAX] = {
653 	[RK_MODE_ECB] = CRYPTO_BC_ECB,
654 	[RK_MODE_CBC] = CRYPTO_BC_CBC,
655 	[RK_MODE_CTS] = CRYPTO_BC_CTS,
656 	[RK_MODE_CTR] = CRYPTO_BC_CTR,
657 	[RK_MODE_CFB] = CRYPTO_BC_CFB,
658 	[RK_MODE_OFB] = CRYPTO_BC_OFB,
659 	[RK_MODE_XTS] = CRYPTO_BC_XTS,
660 	[RK_MODE_CCM] = CRYPTO_BC_CCM,
661 	[RK_MODE_GCM] = CRYPTO_BC_GCM,
662 	[RK_MODE_CMAC] = CRYPTO_BC_CMAC,
663 	[RK_MODE_CBC_MAC] = CRYPTO_BC_CBC_MAC,
664 };
665 
666 static inline void set_pc_len_reg(u32 chn, u64 pc_len)
667 {
668 	u32 chn_base = CRYPTO_CH0_PC_LEN_0 + chn * 0x08;
669 
670 	crypto_write(pc_len & 0xffffffff, chn_base);
671 	crypto_write(pc_len >> 32, chn_base + 4);
672 }
673 
674 static inline void set_aad_len_reg(u32 chn, u64 pc_len)
675 {
676 	u32 chn_base = CRYPTO_CH0_AAD_LEN_0 + chn * 0x08;
677 
678 	crypto_write(pc_len & 0xffffffff, chn_base);
679 	crypto_write(pc_len >> 32, chn_base + 4);
680 }
681 
682 static inline bool is_des_mode(u32 rk_mode)
683 {
684 	return (rk_mode == RK_MODE_ECB ||
685 		rk_mode == RK_MODE_CBC ||
686 		rk_mode == RK_MODE_CFB ||
687 		rk_mode == RK_MODE_OFB);
688 }
689 
690 static void dump_crypto_state(struct crypto_lli_desc *desc,
691 			      u32 tmp, u32 expt_int,
692 			      const u8 *in, const u8 *out,
693 			      u32 len, int ret)
694 {
695 	IMSG("%s\n", ret == -ETIME ? "timeout" : "dismatch");
696 
697 	IMSG("CRYPTO_DMA_INT_ST = %08x, expect_int = %08x\n",
698 	     tmp, expt_int);
699 	IMSG("data desc		= %p\n", desc);
700 	IMSG("\taddr_in		= [%08x <=> %08x]\n",
701 	     desc->src_addr, (u32)virt_to_phys(in));
702 	IMSG("\taddr_out	= [%08x <=> %08x]\n",
703 	     desc->dst_addr, (u32)virt_to_phys(out));
704 	IMSG("\tsrc_len		= [%08x <=> %08x]\n",
705 	     desc->src_len, (u32)len);
706 	IMSG("\tdst_len		= %08x\n", desc->dst_len);
707 	IMSG("\tdma_ctl		= %08x\n", desc->dma_ctrl);
708 	IMSG("\tuser_define	= %08x\n", desc->user_define);
709 
710 	IMSG("\n\nDMA CRYPTO_DMA_LLI_ADDR status = %08x\n",
711 	     crypto_read(CRYPTO_DMA_LLI_ADDR));
712 	IMSG("DMA CRYPTO_DMA_ST status = %08x\n",
713 	     crypto_read(CRYPTO_DMA_ST));
714 	IMSG("DMA CRYPTO_DMA_STATE status = %08x\n",
715 	     crypto_read(CRYPTO_DMA_STATE));
716 	IMSG("DMA CRYPTO_DMA_LLI_RADDR status = %08x\n",
717 	     crypto_read(CRYPTO_DMA_LLI_RADDR));
718 	IMSG("DMA CRYPTO_DMA_SRC_RADDR status = %08x\n",
719 	     crypto_read(CRYPTO_DMA_SRC_RADDR));
720 	IMSG("DMA CRYPTO_DMA_DST_RADDR status = %08x\n",
721 	     crypto_read(CRYPTO_DMA_DST_RADDR));
722 	IMSG("DMA CRYPTO_CIPHER_ST status = %08x\n",
723 	     crypto_read(CRYPTO_CIPHER_ST));
724 	IMSG("DMA CRYPTO_CIPHER_STATE status = %08x\n",
725 	     crypto_read(CRYPTO_CIPHER_STATE));
726 	IMSG("DMA CRYPTO_TAG_VALID status = %08x\n",
727 	     crypto_read(CRYPTO_TAG_VALID));
728 	IMSG("LOCKSTEP status = %08x\n\n",
729 	     crypto_read(0x618));
730 
731 	IMSG("dst %dbyte not transferred\n",
732 	     desc->dst_addr + desc->dst_len -
733 	     crypto_read(CRYPTO_DMA_DST_RADDR));
734 }
735 
736 static int ccm128_set_iv_reg(u32 chn, const u8 *nonce, u32 nlen)
737 {
738 	u8 iv_buf[AES_BLOCK_SIZE];
739 	u32 L;
740 
741 	memset(iv_buf, 0x00, sizeof(iv_buf));
742 
743 	L = 15 - nlen;
744 	iv_buf[0] = ((u8)(L - 1) & 7);
745 
746 	/* the L parameter */
747 	L = iv_buf[0] & 7;
748 
749 	/* nonce is too short */
750 	if (nlen < (14 - L))
751 		return -EINVAL;
752 
753 	/* clear aad flag */
754 	iv_buf[0] &= ~0x40;
755 	memcpy(&iv_buf[1], nonce, 14 - L);
756 
757 	set_iv_reg(chn, iv_buf, AES_BLOCK_SIZE);
758 
759 	return 0;
760 }
761 
762 static void ccm_aad_padding(u32 aad_len, u8 *padding, u32 *padding_size)
763 {
764 	u32 i;
765 
766 	if (aad_len == 0) {
767 		*padding_size = 0;
768 		return;
769 	}
770 
771 	i = aad_len < (0x10000 - 0x100) ? 2 : 6;
772 
773 	if (i == 2) {
774 		padding[0] = (u8)(aad_len >> 8);
775 		padding[1] = (u8)aad_len;
776 	} else {
777 		padding[0] = 0xFF;
778 		padding[1] = 0xFE;
779 		padding[2] = (u8)(aad_len >> 24);
780 		padding[3] = (u8)(aad_len >> 16);
781 		padding[4] = (u8)(aad_len >> 8);
782 	}
783 
784 	*padding_size = i;
785 }
786 
787 static int ccm_compose_aad_iv(u8 *aad_iv, u32 data_len, u32 aad_len, u32 tag_size)
788 {
789 	aad_iv[0] |= ((u8)(((tag_size - 2) / 2) & 7) << 3);
790 
791 	aad_iv[12] = (u8)(data_len >> 24);
792 	aad_iv[13] = (u8)(data_len >> 16);
793 	aad_iv[14] = (u8)(data_len >> 8);
794 	aad_iv[15] = (u8)data_len;
795 
796 	if (aad_len)
797 		aad_iv[0] |= 0x40;	//set aad flag
798 
799 	return 0;
800 }
801 
802 static int hw_cipher_init(u32 chn, const u8 *key, const u8 *twk_key,
803 			  u32 key_len, const u8 *iv, u32 iv_len,
804 			  u32 algo, u32 mode, bool enc)
805 {
806 	u32 rk_mode = RK_GET_RK_MODE(mode);
807 	u32 key_chn_sel = chn;
808 	u32 reg_ctrl = 0;
809 
810 	IMSG("%s: key addr is %p, key_len is %d, iv addr is %p",
811 	     __func__, key, key_len, iv);
812 	if (rk_mode >= RK_MODE_MAX)
813 		return -EINVAL;
814 
815 	switch (algo) {
816 	case CRYPTO_DES:
817 		if (key_len > DES_BLOCK_SIZE)
818 			reg_ctrl |= CRYPTO_BC_TDES;
819 		else
820 			reg_ctrl |= CRYPTO_BC_DES;
821 		break;
822 	case CRYPTO_AES:
823 		reg_ctrl |= CRYPTO_BC_AES;
824 		break;
825 	case CRYPTO_SM4:
826 		reg_ctrl |= CRYPTO_BC_SM4;
827 		break;
828 	default:
829 		return -EINVAL;
830 	}
831 
832 	if (algo == CRYPTO_AES || algo == CRYPTO_SM4) {
833 		switch (key_len) {
834 		case AES_KEYSIZE_128:
835 			reg_ctrl |= CRYPTO_BC_128_bit_key;
836 			break;
837 		case AES_KEYSIZE_192:
838 			reg_ctrl |= CRYPTO_BC_192_bit_key;
839 			break;
840 		case AES_KEYSIZE_256:
841 			reg_ctrl |= CRYPTO_BC_256_bit_key;
842 			break;
843 		default:
844 			return -EINVAL;
845 		}
846 	}
847 
848 	reg_ctrl |= rk_mode2bc_mode[rk_mode];
849 	if (!enc)
850 		reg_ctrl |= CRYPTO_BC_DECRYPT;
851 
852 	/* write key data to reg */
853 	write_key_reg(key_chn_sel, key, key_len);
854 
855 	/* write twk key for xts mode */
856 	if (rk_mode == RK_MODE_XTS)
857 		write_key_reg(key_chn_sel + 4, twk_key, key_len);
858 
859 	/* set iv reg */
860 	if (rk_mode == RK_MODE_CCM)
861 		ccm128_set_iv_reg(chn, iv, iv_len);
862 	else
863 		set_iv_reg(chn, iv, iv_len);
864 
865 	/* din_swap set 1, dout_swap set 1, default 1. */
866 	crypto_write(0x00030003, CRYPTO_FIFO_CTL);
867 	crypto_write(0, CRYPTO_DMA_INT_EN);
868 
869 	crypto_write(reg_ctrl | CRYPTO_WRITE_MASK_ALL, CRYPTO_BC_CTL);
870 
871 	return 0;
872 }
873 
874 static int hw_cipher_crypt(const u8 *in, u8 *out, u64 len,
875 			   const u8 *aad, u32 aad_len,
876 			   u8 *tag, u32 tag_len, u32 mode)
877 {
878 	struct crypto_lli_desc *data_desc = NULL, *aad_desc = NULL;
879 	u8 *dma_in = NULL, *dma_out = NULL, *aad_tmp = NULL;
880 	u32 rk_mode = RK_GET_RK_MODE(mode);
881 	u32 reg_ctrl = 0, tmp_len = 0;
882 	u32 expt_int = 0, mask = 0;
883 	u32 key_chn = g_key_chn;
884 	u32 tmp, dst_len = 0;
885 	int ret = -1;
886 
887 	if (rk_mode == RK_MODE_CTS && len <= AES_BLOCK_SIZE) {
888 		printf("CTS mode length %u < 16Byte\n", (u32)len);
889 		return -EINVAL;
890 	}
891 
892 	tmp_len = (rk_mode == RK_MODE_CTR) ? ROUNDUP(len, AES_BLOCK_SIZE) : len;
893 
894 	data_desc = align_malloc(sizeof(*data_desc), LLI_ADDR_ALIGN_SIZE);
895 	if (!data_desc)
896 		goto exit;
897 
898 	if (IS_ALIGNED((ulong)in, DATA_ADDR_ALIGN_SIZE) && tmp_len == len)
899 		dma_in = (void *)in;
900 	else
901 		dma_in = align_malloc(tmp_len, DATA_ADDR_ALIGN_SIZE);
902 	if (!dma_in)
903 		goto exit;
904 
905 	if (out) {
906 		if (IS_ALIGNED((ulong)out, DATA_ADDR_ALIGN_SIZE) &&
907 		    tmp_len == len)
908 			dma_out = out;
909 		else
910 			dma_out = align_malloc(tmp_len, DATA_ADDR_ALIGN_SIZE);
911 		if (!dma_out)
912 			goto exit;
913 		dst_len = tmp_len;
914 	}
915 
916 	memset(data_desc, 0x00, sizeof(*data_desc));
917 	if (dma_in != in)
918 		memcpy(dma_in, in, len);
919 
920 	data_desc->src_addr    = (u32)virt_to_phys(dma_in);
921 	data_desc->src_len     = tmp_len;
922 	data_desc->dst_addr    = (u32)virt_to_phys(dma_out);
923 	data_desc->dst_len     = dst_len;
924 	data_desc->dma_ctrl    = LLI_DMA_CTRL_LAST;
925 
926 	if (IS_MAC_MODE(rk_mode)) {
927 		expt_int = CRYPTO_LIST_DONE_INT_ST;
928 		data_desc->dma_ctrl |= LLI_DMA_CTRL_LIST_DONE;
929 	} else {
930 		expt_int = CRYPTO_DST_ITEM_DONE_INT_ST;
931 		data_desc->dma_ctrl |= LLI_DMA_CTRL_DST_DONE;
932 	}
933 
934 	data_desc->user_define = LLI_USER_CIPHER_START |
935 				 LLI_USER_STRING_START |
936 				 LLI_USER_STRING_LAST |
937 				 (key_chn << 4);
938 	crypto_write((u32)virt_to_phys(data_desc), CRYPTO_DMA_LLI_ADDR);
939 
940 	if (rk_mode == RK_MODE_CCM || rk_mode == RK_MODE_GCM) {
941 		u32 aad_tmp_len = 0;
942 
943 		aad_desc = align_malloc(sizeof(*aad_desc), LLI_ADDR_ALIGN_SIZE);
944 		if (!aad_desc)
945 			goto exit;
946 
947 		memset(aad_desc, 0x00, sizeof(*aad_desc));
948 		aad_desc->next_addr = (u32)virt_to_phys(data_desc);
949 		aad_desc->user_define = LLI_USER_CIPHER_START |
950 					 LLI_USER_STRING_START |
951 					 LLI_USER_STRING_LAST |
952 					 LLI_USER_STRING_AAD |
953 					 (key_chn << 4);
954 
955 		if (rk_mode == RK_MODE_CCM) {
956 			u8 padding[AES_BLOCK_SIZE];
957 			u32 padding_size = 0;
958 
959 			memset(padding, 0x00, sizeof(padding));
960 			ccm_aad_padding(aad_len, padding, &padding_size);
961 
962 			aad_tmp_len = aad_len + AES_BLOCK_SIZE + padding_size;
963 			aad_tmp_len = ROUNDUP(aad_tmp_len, AES_BLOCK_SIZE);
964 			aad_tmp = align_malloc(aad_tmp_len,
965 					       DATA_ADDR_ALIGN_SIZE);
966 			if (!aad_tmp)
967 				goto exit;
968 
969 			/* clear last block */
970 			memset(aad_tmp + aad_tmp_len - AES_BLOCK_SIZE,
971 			       0x00, AES_BLOCK_SIZE);
972 
973 			/* read iv data from reg */
974 			get_iv_reg(key_chn, aad_tmp, AES_BLOCK_SIZE);
975 			ccm_compose_aad_iv(aad_tmp, tmp_len, aad_len, tag_len);
976 			memcpy(aad_tmp + AES_BLOCK_SIZE, padding, padding_size);
977 
978 			memcpy(aad_tmp + AES_BLOCK_SIZE + padding_size,
979 			       aad, aad_len);
980 		} else {
981 			aad_tmp_len = aad_len;
982 			if (IS_ALIGNED((ulong)aad, DATA_ADDR_ALIGN_SIZE)) {
983 				aad_tmp = (void *)aad;
984 			} else {
985 				aad_tmp = align_malloc(aad_tmp_len,
986 						       DATA_ADDR_ALIGN_SIZE);
987 				if (!aad_tmp)
988 					goto exit;
989 
990 				memcpy(aad_tmp, aad, aad_tmp_len);
991 			}
992 
993 			set_aad_len_reg(key_chn, aad_tmp_len);
994 			set_pc_len_reg(key_chn, tmp_len);
995 		}
996 
997 		aad_desc->src_addr = (u32)virt_to_phys(aad_tmp);
998 		aad_desc->src_len  = aad_tmp_len;
999 
1000 		if (aad_tmp_len) {
1001 			data_desc->user_define = LLI_USER_STRING_START |
1002 						 LLI_USER_STRING_LAST |
1003 						 (key_chn << 4);
1004 			crypto_write((u32)virt_to_phys(aad_desc), CRYPTO_DMA_LLI_ADDR);
1005 			cache_op_inner(DCACHE_AREA_CLEAN, aad_tmp, aad_tmp_len);
1006 			cache_op_inner(DCACHE_AREA_CLEAN, aad_desc, sizeof(*aad_desc));
1007 		}
1008 	}
1009 
1010 	cache_op_inner(DCACHE_AREA_CLEAN, data_desc, sizeof(*data_desc));
1011 	cache_op_inner(DCACHE_AREA_CLEAN, dma_in, tmp_len);
1012 	cache_op_inner(DCACHE_AREA_INVALIDATE, dma_out, tmp_len);
1013 
1014 	/* din_swap set 1, dout_swap set 1, default 1. */
1015 	crypto_write(0x00030003, CRYPTO_FIFO_CTL);
1016 	crypto_write(0, CRYPTO_DMA_INT_EN);
1017 
1018 	reg_ctrl = crypto_read(CRYPTO_BC_CTL) | CRYPTO_BC_ENABLE;
1019 	crypto_write(reg_ctrl | CRYPTO_WRITE_MASK_ALL, CRYPTO_BC_CTL);
1020 	crypto_write(0x00010001, CRYPTO_DMA_CTL);//start
1021 
1022 	mask = ~(mask | CRYPTO_SYNC_LOCKSTEP_INT_ST);
1023 
1024 	/* wait calc ok */
1025 	ret = RK_POLL_TIMEOUT(!(crypto_read(CRYPTO_DMA_INT_ST) & mask),
1026 			      RK_CRYPTO_TIMEOUT);
1027 	tmp = crypto_read(CRYPTO_DMA_INT_ST);
1028 	crypto_write(tmp, CRYPTO_DMA_INT_ST);
1029 
1030 	if ((tmp & mask) == expt_int) {
1031 		if (out && out != dma_out)
1032 			memcpy(out, dma_out, len);
1033 
1034 		if (IS_NEED_TAG(rk_mode)) {
1035 			ret = WAIT_TAG_VALID(key_chn, RK_CRYPTO_TIMEOUT);
1036 			get_tag_from_reg(key_chn, tag, AES_BLOCK_SIZE);
1037 		}
1038 	} else {
1039 		dump_crypto_state(data_desc, tmp, expt_int, in, out, len, ret);
1040 		ret = -1;
1041 	}
1042 
1043 exit:
1044 	crypto_write(0xffff0000, CRYPTO_BC_CTL);//bc_ctl disable
1045 	align_free(data_desc);
1046 	align_free(aad_desc);
1047 	if (dma_in != in)
1048 		align_free(dma_in);
1049 	if (out && dma_out != out)
1050 		align_free(dma_out);
1051 	if (aad && aad != aad_tmp)
1052 		align_free(aad_tmp);
1053 
1054 	return ret;
1055 }
1056 
1057 static int hw_aes_init(u32 chn, const u8 *key, const u8 *twk_key, u32 key_len,
1058 		       const u8 *iv, u32 iv_len, u32 mode, bool enc)
1059 {
1060 	u32 rk_mode = RK_GET_RK_MODE(mode);
1061 
1062 	if (rk_mode > RK_MODE_XTS)
1063 		return -EINVAL;
1064 
1065 	if (iv_len > AES_BLOCK_SIZE)
1066 		return -EINVAL;
1067 
1068 	if (IS_NEED_IV(rk_mode)) {
1069 		if (!iv || iv_len != AES_BLOCK_SIZE)
1070 			return -EINVAL;
1071 	} else {
1072 		iv_len = 0;
1073 	}
1074 
1075 	if (rk_mode == RK_MODE_XTS) {
1076 		if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_256)
1077 			return -EINVAL;
1078 
1079 		if (!key || !twk_key)
1080 			return -EINVAL;
1081 	} else {
1082 		if (key_len != AES_KEYSIZE_128 &&
1083 		    key_len != AES_KEYSIZE_192 &&
1084 		    key_len != AES_KEYSIZE_256)
1085 			return -EINVAL;
1086 	}
1087 
1088 	return hw_cipher_init(chn, key, twk_key, key_len, iv, iv_len,
1089 			      CRYPTO_AES, mode, enc);
1090 }
1091 
1092 static int hw_sm4_init(u32  chn, const u8 *key, const u8 *twk_key, u32 key_len,
1093 		       const u8 *iv, u32 iv_len, u32 mode, bool enc)
1094 {
1095 	u32 rk_mode = RK_GET_RK_MODE(mode);
1096 
1097 	if (rk_mode > RK_MODE_XTS)
1098 		return -EINVAL;
1099 
1100 	if (iv_len > SM4_BLOCK_SIZE || key_len != SM4_KEYSIZE)
1101 		return -EINVAL;
1102 
1103 	if (IS_NEED_IV(rk_mode)) {
1104 		if (!iv || iv_len != SM4_BLOCK_SIZE)
1105 			return -EINVAL;
1106 	} else {
1107 		iv_len = 0;
1108 	}
1109 
1110 	if (rk_mode == RK_MODE_XTS) {
1111 		if (!key || !twk_key)
1112 			return -EINVAL;
1113 	}
1114 
1115 	return hw_cipher_init(chn, key, twk_key, key_len, iv, iv_len,
1116 			      CRYPTO_SM4, mode, enc);
1117 }
1118 
1119 int rk_crypto_des(struct udevice *dev, u32 mode, const u8 *key, u32 key_len,
1120 		  const u8 *iv, const u8 *in, u8 *out, u32 len, bool enc)
1121 {
1122 	u32 rk_mode = RK_GET_RK_MODE(mode);
1123 	u8 tmp_key[24];
1124 	int ret;
1125 
1126 	if (!is_des_mode(rk_mode))
1127 		return -EINVAL;
1128 
1129 	if (key_len == DES_BLOCK_SIZE || key_len == 3 * DES_BLOCK_SIZE) {
1130 		memcpy(tmp_key, key, key_len);
1131 	} else if (key_len == 2 * DES_BLOCK_SIZE) {
1132 		memcpy(tmp_key, key, 16);
1133 		memcpy(tmp_key + 16, key, 8);
1134 		key_len = 3 * DES_BLOCK_SIZE;
1135 	} else {
1136 		return -EINVAL;
1137 	}
1138 
1139 	ret = hw_cipher_init(0, tmp_key, NULL, key_len, iv, DES_BLOCK_SIZE,
1140 			     CRYPTO_DES, mode, enc);
1141 	if (ret)
1142 		goto exit;
1143 
1144 	ret = hw_cipher_crypt(in, out, len, NULL, 0,
1145 			      NULL, 0, mode);
1146 
1147 exit:
1148 	return ret;
1149 }
1150 
1151 int rk_crypto_aes(struct udevice *dev, u32 mode,
1152 		  const u8 *key, const u8 *twk_key, u32 key_len,
1153 		  const u8 *iv, u32 iv_len,
1154 		  const u8 *in, u8 *out, u32 len, bool enc)
1155 {
1156 	int ret;
1157 
1158 	/* RV1126/RV1109 do not support aes-192 */
1159 #if defined(CONFIG_ROCKCHIP_RV1126)
1160 	if (key_len == AES_KEYSIZE_192)
1161 		return -EINVAL;
1162 #endif
1163 
1164 	ret = hw_aes_init(0, key, twk_key, key_len, iv, iv_len, mode, enc);
1165 	if (ret)
1166 		return ret;
1167 
1168 	return hw_cipher_crypt(in, out, len, NULL, 0,
1169 			       NULL, 0, mode);
1170 }
1171 
1172 int rk_crypto_sm4(struct udevice *dev, u32 mode,
1173 		  const u8 *key, const u8 *twk_key, u32 key_len,
1174 		  const u8 *iv, u32 iv_len,
1175 		  const u8 *in, u8 *out, u32 len, bool enc)
1176 {
1177 	int ret;
1178 
1179 	ret = hw_sm4_init(0, key, twk_key, key_len, iv, iv_len, mode, enc);
1180 	if (ret)
1181 		return ret;
1182 
1183 	return hw_cipher_crypt(in, out, len, NULL, 0, NULL, 0, mode);
1184 }
1185 
1186 int rockchip_crypto_cipher(struct udevice *dev, cipher_context *ctx,
1187 			   const u8 *in, u8 *out, u32 len, bool enc)
1188 {
1189 	switch (ctx->algo) {
1190 	case CRYPTO_DES:
1191 		return rk_crypto_des(dev, ctx->mode, ctx->key, ctx->key_len,
1192 				     ctx->iv, in, out, len, enc);
1193 	case CRYPTO_AES:
1194 		return rk_crypto_aes(dev, ctx->mode,
1195 				     ctx->key, ctx->twk_key, ctx->key_len,
1196 				     ctx->iv, ctx->iv_len, in, out, len, enc);
1197 	case CRYPTO_SM4:
1198 		return rk_crypto_sm4(dev, ctx->mode,
1199 				     ctx->key, ctx->twk_key, ctx->key_len,
1200 				     ctx->iv, ctx->iv_len, in, out, len, enc);
1201 	default:
1202 		return -EINVAL;
1203 	}
1204 }
1205 
1206 int rk_crypto_mac(struct udevice *dev, u32 algo, u32 mode,
1207 		  const u8 *key, u32 key_len,
1208 		  const u8 *in, u32 len, u8 *tag)
1209 {
1210 	u32 rk_mode = RK_GET_RK_MODE(mode);
1211 	int ret;
1212 
1213 	if (!IS_MAC_MODE(rk_mode))
1214 		return -EINVAL;
1215 
1216 	if (algo != CRYPTO_AES && algo != CRYPTO_SM4)
1217 		return -EINVAL;
1218 
1219 	/* RV1126/RV1109 do not support aes-192 */
1220 #if defined(CONFIG_ROCKCHIP_RV1126)
1221 	if (algo == CRYPTO_AES && key_len == AES_KEYSIZE_192)
1222 		return -EINVAL;
1223 #endif
1224 
1225 	ret = hw_cipher_init(g_key_chn, key, NULL, key_len, NULL, 0,
1226 			     algo, mode, true);
1227 	if (ret)
1228 		return ret;
1229 
1230 	return hw_cipher_crypt(in, NULL, len, NULL, 0,
1231 			       tag, AES_BLOCK_SIZE, mode);
1232 }
1233 
1234 int rockchip_crypto_mac(struct udevice *dev, cipher_context *ctx,
1235 			const u8 *in, u32 len, u8 *tag)
1236 {
1237 	return rk_crypto_mac(dev, ctx->algo, ctx->mode,
1238 			     ctx->key, ctx->key_len, in, len, tag);
1239 }
1240 
1241 int rk_crypto_ae(struct udevice *dev, u32 algo, u32 mode,
1242 		 const u8 *key, u32 key_len, const u8 *nonce, u32 nonce_len,
1243 		 const u8 *in, u32 len, const u8 *aad, u32 aad_len,
1244 		 u8 *out, u8 *tag)
1245 {
1246 	u32 rk_mode = RK_GET_RK_MODE(mode);
1247 	int ret;
1248 
1249 	if (!IS_AE_MODE(rk_mode))
1250 		return -EINVAL;
1251 
1252 	if (len == 0)
1253 		return -EINVAL;
1254 
1255 	if (algo != CRYPTO_AES && algo != CRYPTO_SM4)
1256 		return -EINVAL;
1257 
1258 	/* RV1126/RV1109 do not support aes-192 */
1259 #if defined(CONFIG_ROCKCHIP_RV1126)
1260 	if (algo == CRYPTO_AES && key_len == AES_KEYSIZE_192)
1261 		return -EINVAL;
1262 #endif
1263 
1264 	ret = hw_cipher_init(g_key_chn, key, NULL, key_len, nonce, nonce_len,
1265 			     algo, mode, true);
1266 	if (ret)
1267 		return ret;
1268 
1269 	return hw_cipher_crypt(in, out, len, aad, aad_len,
1270 			       tag, AES_BLOCK_SIZE, mode);
1271 }
1272 
1273 int rockchip_crypto_ae(struct udevice *dev, cipher_context *ctx,
1274 		       const u8 *in, u32 len, const u8 *aad, u32 aad_len,
1275 		       u8 *out, u8 *tag)
1276 
1277 {
1278 	return rk_crypto_ae(dev, ctx->algo, ctx->mode, ctx->key, ctx->key_len,
1279 			    ctx->iv, ctx->iv_len, in, len,
1280 			    aad, aad_len, out, tag);
1281 }
1282 
1283 #endif
1284 
1285 #if CONFIG_IS_ENABLED(ROCKCHIP_RSA)
1286 static int rockchip_crypto_rsa_verify(struct udevice *dev, rsa_key *ctx,
1287 				      u8 *sign, u8 *output)
1288 {
1289 	struct mpa_num *mpa_m = NULL, *mpa_e = NULL, *mpa_n = NULL;
1290 	struct mpa_num *mpa_c = NULL, *mpa_result = NULL;
1291 	u32 n_bits, n_words;
1292 	int ret;
1293 
1294 	if (!ctx)
1295 		return -EINVAL;
1296 
1297 	if (ctx->algo != CRYPTO_RSA512 &&
1298 	    ctx->algo != CRYPTO_RSA1024 &&
1299 	    ctx->algo != CRYPTO_RSA2048 &&
1300 	    ctx->algo != CRYPTO_RSA3072 &&
1301 	    ctx->algo != CRYPTO_RSA4096)
1302 		return -EINVAL;
1303 
1304 	n_bits = crypto_algo_nbits(ctx->algo);
1305 	n_words = BITS2WORD(n_bits);
1306 
1307 	ret = rk_mpa_alloc(&mpa_m, sign, n_words);
1308 	if (ret)
1309 		goto exit;
1310 
1311 	ret = rk_mpa_alloc(&mpa_e, ctx->e, n_words);
1312 	if (ret)
1313 		goto exit;
1314 
1315 	ret = rk_mpa_alloc(&mpa_n, ctx->n, n_words);
1316 	if (ret)
1317 		goto exit;
1318 
1319 	if (ctx->c) {
1320 		ret = rk_mpa_alloc(&mpa_c, ctx->c, n_words);
1321 		if (ret)
1322 			goto exit;
1323 	}
1324 
1325 	ret = rk_mpa_alloc(&mpa_result, NULL, n_words);
1326 	if (ret)
1327 		goto exit;
1328 
1329 	ret = rk_exptmod_np(mpa_m, mpa_e, mpa_n, mpa_c, mpa_result);
1330 	if (!ret)
1331 		memcpy(output, mpa_result->d, BITS2BYTE(n_bits));
1332 
1333 exit:
1334 	rk_mpa_free(&mpa_m);
1335 	rk_mpa_free(&mpa_e);
1336 	rk_mpa_free(&mpa_n);
1337 	rk_mpa_free(&mpa_c);
1338 	rk_mpa_free(&mpa_result);
1339 
1340 	return ret;
1341 }
1342 #endif
1343 
1344 static const struct dm_crypto_ops rockchip_crypto_ops = {
1345 	.capability   = rockchip_crypto_capability,
1346 	.sha_init     = rockchip_crypto_sha_init,
1347 	.sha_update   = rockchip_crypto_sha_update,
1348 	.sha_final    = rockchip_crypto_sha_final,
1349 #if CONFIG_IS_ENABLED(ROCKCHIP_RSA)
1350 	.rsa_verify   = rockchip_crypto_rsa_verify,
1351 #endif
1352 #if CONFIG_IS_ENABLED(ROCKCHIP_HMAC)
1353 	.hmac_init    = rockchip_crypto_hmac_init,
1354 	.hmac_update  = rockchip_crypto_hmac_update,
1355 	.hmac_final   = rockchip_crypto_hmac_final,
1356 #endif
1357 #if CONFIG_IS_ENABLED(ROCKCHIP_CIPHER)
1358 	.cipher_crypt = rockchip_crypto_cipher,
1359 	.cipher_mac = rockchip_crypto_mac,
1360 	.cipher_ae  = rockchip_crypto_ae,
1361 #endif
1362 };
1363 
1364 /*
1365  * Only use "clocks" to parse crypto clock id and use rockchip_get_clk().
1366  * Because we always add crypto node in U-Boot dts, when kernel dtb enabled :
1367  *
1368  *   1. There is cru phandle mismatch between U-Boot and kernel dtb;
1369  *   2. CONFIG_OF_SPL_REMOVE_PROPS removes clock property;
1370  */
1371 static int rockchip_crypto_ofdata_to_platdata(struct udevice *dev)
1372 {
1373 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
1374 	int len, ret = -EINVAL;
1375 
1376 	memset(priv, 0x00, sizeof(*priv));
1377 
1378 	priv->reg = (fdt_addr_t)dev_read_addr_ptr(dev);
1379 	if (priv->reg == FDT_ADDR_T_NONE)
1380 		return -EINVAL;
1381 
1382 	crypto_base = priv->reg;
1383 
1384 	/* if there is no clocks in dts, just skip it */
1385 	if (!dev_read_prop(dev, "clocks", &len)) {
1386 		printf("Can't find \"clocks\" property\n");
1387 		return 0;
1388 	}
1389 
1390 	memset(priv, 0x00, sizeof(*priv));
1391 	priv->clocks = malloc(len);
1392 	if (!priv->clocks)
1393 		return -ENOMEM;
1394 
1395 	priv->nclocks = len / sizeof(u32);
1396 	if (dev_read_u32_array(dev, "clocks", (u32 *)priv->clocks,
1397 			       priv->nclocks)) {
1398 		printf("Can't read \"clocks\" property\n");
1399 		ret = -EINVAL;
1400 		goto exit;
1401 	}
1402 
1403 	if (!dev_read_prop(dev, "clock-frequency", &len)) {
1404 		printf("Can't find \"clock-frequency\" property\n");
1405 		ret = -EINVAL;
1406 		goto exit;
1407 	}
1408 
1409 	priv->frequencies = malloc(len);
1410 	if (!priv->frequencies) {
1411 		ret = -ENOMEM;
1412 		goto exit;
1413 	}
1414 
1415 	priv->nclocks = len / sizeof(u32);
1416 	if (dev_read_u32_array(dev, "clock-frequency", priv->frequencies,
1417 			       priv->nclocks)) {
1418 		printf("Can't read \"clock-frequency\" property\n");
1419 		ret = -EINVAL;
1420 		goto exit;
1421 	}
1422 
1423 	return 0;
1424 exit:
1425 	if (priv->clocks)
1426 		free(priv->clocks);
1427 
1428 	if (priv->frequencies)
1429 		free(priv->frequencies);
1430 
1431 	return ret;
1432 }
1433 
1434 static int rk_crypto_set_clk(struct udevice *dev)
1435 {
1436 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
1437 	struct clk clk;
1438 	int i, ret;
1439 
1440 	if (!priv->clocks && priv->nclocks == 0)
1441 		return 0;
1442 
1443 	for (i = 0; i < priv->nclocks; i++) {
1444 		ret = clk_get_by_index(dev, i, &clk);
1445 		if (ret < 0) {
1446 			printf("Failed to get clk index %d, ret=%d\n", i, ret);
1447 			return ret;
1448 		}
1449 		ret = clk_set_rate(&clk, priv->frequencies[i]);
1450 		if (ret < 0) {
1451 			printf("%s: Failed to set clk(%ld): ret=%d\n",
1452 			       __func__, clk.id, ret);
1453 			return ret;
1454 		}
1455 	}
1456 
1457 	return 0;
1458 }
1459 
1460 static int rockchip_crypto_probe(struct udevice *dev)
1461 {
1462 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
1463 	struct rk_crypto_soc_data *sdata;
1464 	int ret = 0;
1465 
1466 	sdata = (struct rk_crypto_soc_data *)dev_get_driver_data(dev);
1467 
1468 	if (sdata->dynamic_cap)
1469 		sdata->capability = sdata->dynamic_cap();
1470 
1471 	priv->soc_data = sdata;
1472 
1473 	priv->hw_ctx = memalign(LLI_ADDR_ALIGN_SIZE,
1474 				sizeof(struct rk_hash_ctx));
1475 	if (!priv->hw_ctx)
1476 		return -ENOMEM;
1477 
1478 	ret = rk_crypto_set_clk(dev);
1479 	if (ret)
1480 		return ret;
1481 
1482 	hw_crypto_reset();
1483 
1484 	return 0;
1485 }
1486 
1487 static const struct rk_crypto_soc_data soc_data_base = {
1488 	.capability = CRYPTO_MD5 |
1489 		      CRYPTO_SHA1 |
1490 		      CRYPTO_SHA256 |
1491 		      CRYPTO_SHA512 |
1492 		      CRYPTO_HMAC_MD5 |
1493 		      CRYPTO_HMAC_SHA1 |
1494 		      CRYPTO_HMAC_SHA256 |
1495 		      CRYPTO_HMAC_SHA512 |
1496 		      CRYPTO_RSA512 |
1497 		      CRYPTO_RSA1024 |
1498 		      CRYPTO_RSA2048 |
1499 		      CRYPTO_RSA3072 |
1500 		      CRYPTO_RSA4096 |
1501 		      CRYPTO_DES |
1502 		      CRYPTO_AES,
1503 };
1504 
1505 static const struct rk_crypto_soc_data soc_data_base_sm = {
1506 	.capability = CRYPTO_MD5 |
1507 		      CRYPTO_SHA1 |
1508 		      CRYPTO_SHA256 |
1509 		      CRYPTO_SHA512 |
1510 		      CRYPTO_SM3 |
1511 		      CRYPTO_HMAC_MD5 |
1512 		      CRYPTO_HMAC_SHA1 |
1513 		      CRYPTO_HMAC_SHA256 |
1514 		      CRYPTO_HMAC_SHA512 |
1515 		      CRYPTO_HMAC_SM3 |
1516 		      CRYPTO_RSA512 |
1517 		      CRYPTO_RSA1024 |
1518 		      CRYPTO_RSA2048 |
1519 		      CRYPTO_RSA3072 |
1520 		      CRYPTO_RSA4096 |
1521 		      CRYPTO_DES |
1522 		      CRYPTO_AES |
1523 		      CRYPTO_SM4,
1524 };
1525 
1526 static const struct rk_crypto_soc_data soc_data_rk1808 = {
1527 	.capability = CRYPTO_MD5 |
1528 		      CRYPTO_SHA1 |
1529 		      CRYPTO_SHA256 |
1530 		      CRYPTO_HMAC_MD5 |
1531 		      CRYPTO_HMAC_SHA1 |
1532 		      CRYPTO_HMAC_SHA256 |
1533 		      CRYPTO_RSA512 |
1534 		      CRYPTO_RSA1024 |
1535 		      CRYPTO_RSA2048 |
1536 		      CRYPTO_RSA3072 |
1537 		      CRYPTO_RSA4096,
1538 };
1539 
1540 static const struct rk_crypto_soc_data soc_data_cryptov3 = {
1541 	.capability  = 0,
1542 	.dynamic_cap = crypto_v3_dynamic_cap,
1543 };
1544 
1545 static const struct udevice_id rockchip_crypto_ids[] = {
1546 	{
1547 		.compatible = "rockchip,px30-crypto",
1548 		.data = (ulong)&soc_data_base
1549 	},
1550 	{
1551 		.compatible = "rockchip,rk1808-crypto",
1552 		.data = (ulong)&soc_data_rk1808
1553 	},
1554 	{
1555 		.compatible = "rockchip,rk3308-crypto",
1556 		.data = (ulong)&soc_data_base
1557 	},
1558 	{
1559 		.compatible = "rockchip,rv1126-crypto",
1560 		.data = (ulong)&soc_data_base_sm
1561 	},
1562 	{
1563 		.compatible = "rockchip,rk3568-crypto",
1564 		.data = (ulong)&soc_data_base_sm
1565 	},
1566 	{
1567 		.compatible = "rockchip,rk3588-crypto",
1568 		.data = (ulong)&soc_data_base_sm
1569 	},
1570 	{
1571 		.compatible = "rockchip,crypto-v3",
1572 		.data = (ulong)&soc_data_cryptov3
1573 	},
1574 	{
1575 		.compatible = "rockchip,crypto-v4",
1576 		.data = (ulong)&soc_data_cryptov3 /* reuse crypto v3 config */
1577 	},
1578 	{ }
1579 };
1580 
1581 U_BOOT_DRIVER(rockchip_crypto_v2) = {
1582 	.name		= "rockchip_crypto_v2",
1583 	.id		= UCLASS_CRYPTO,
1584 	.of_match	= rockchip_crypto_ids,
1585 	.ops		= &rockchip_crypto_ops,
1586 	.probe		= rockchip_crypto_probe,
1587 	.ofdata_to_platdata = rockchip_crypto_ofdata_to_platdata,
1588 	.priv_auto_alloc_size = sizeof(struct rockchip_crypto_priv),
1589 };
1590