xref: /rk3399_rockchip-uboot/drivers/crypto/rockchip/crypto_v2.c (revision 10427e2df5a90fdf95a3ef373e36c5dd49ba07ad)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
4  */
5 
6 #include <common.h>
7 #include <clk.h>
8 #include <crypto.h>
9 #include <dm.h>
10 #include <asm/io.h>
11 #include <asm/arch/hardware.h>
12 #include <asm/arch/clock.h>
13 #include <rockchip/crypto_hash_cache.h>
14 #include <rockchip/crypto_v2.h>
15 #include <rockchip/crypto_v2_pka.h>
16 
17 #define	RK_HASH_CTX_MAGIC		0x1A1A1A1A
18 
19 #ifdef DEBUG
20 #define IMSG(format, ...) printf("[%s, %05d]-trace: " format "\n", \
21 				 __func__, __LINE__, ##__VA_ARGS__)
22 #else
23 #define IMSG(format, ...)
24 #endif
25 
26 struct crypto_lli_desc {
27 	u32 src_addr;
28 	u32 src_len;
29 	u32 dst_addr;
30 	u32 dst_len;
31 	u32 user_define;
32 	u32 reserve;
33 	u32 dma_ctrl;
34 	u32 next_addr;
35 };
36 
37 struct rk_hash_ctx {
38 	struct crypto_lli_desc		data_lli;	/* lli desc */
39 	struct crypto_hash_cache	*hash_cache;
40 	u32				magic;		/* to check ctx */
41 	u32				algo;		/* hash algo */
42 	u8				digest_size;	/* hash out length */
43 	u8				reserved[3];
44 };
45 
46 struct rk_crypto_soc_data {
47 	u32 capability;
48 };
49 
50 struct rockchip_crypto_priv {
51 	fdt_addr_t			reg;
52 	u32				frequency;
53 	char				*clocks;
54 	u32				*frequencies;
55 	u32				nclocks;
56 	u32				length;
57 	struct rk_hash_ctx		*hw_ctx;
58 	struct rk_crypto_soc_data	*soc_data;
59 };
60 
61 #define LLI_ADDR_ALIGN_SIZE	8
62 #define DATA_ADDR_ALIGN_SIZE	8
63 #define DATA_LEN_ALIGN_SIZE	64
64 
65 /* crypto timeout 500ms, must support more than 32M data per times*/
66 #define HASH_UPDATE_LIMIT	(32 * 1024 * 1024)
67 #define RK_CRYPTO_TIMEOUT	500000
68 
69 #define RK_POLL_TIMEOUT(condition, timeout) \
70 ({ \
71 	int time_out = timeout; \
72 	while (condition) { \
73 		if (--time_out <= 0) { \
74 			debug("[%s] %d: time out!\n", __func__,\
75 				__LINE__); \
76 			break; \
77 		} \
78 		udelay(1); \
79 	} \
80 	(time_out <= 0) ? -ETIMEDOUT : 0; \
81 })
82 
83 #define WAIT_TAG_VALID(channel, timeout) ({ \
84 	u32 tag_mask = CRYPTO_CH0_TAG_VALID << (channel);\
85 	int ret;\
86 	ret = RK_POLL_TIMEOUT(!(crypto_read(CRYPTO_TAG_VALID) & tag_mask),\
87 			      timeout);\
88 	crypto_write(crypto_read(CRYPTO_TAG_VALID) & tag_mask, CRYPTO_TAG_VALID);\
89 	ret;\
90 })
91 
92 #define virt_to_phys(addr)		(((unsigned long)addr) & 0xffffffff)
93 #define phys_to_virt(addr, area)	((unsigned long)addr)
94 
95 #define align_malloc(bytes, alignment)	memalign(alignment, bytes)
96 #define align_free(addr)		do {if (addr) free(addr);} while (0)
97 
98 #define ROUNDUP(size, alignment)	round_up(size, alignment)
99 #define cache_op_inner(type, addr, size) \
100 					crypto_flush_cacheline((ulong)addr, size)
101 
102 #define IS_NEED_IV(rk_mode) ((rk_mode) != RK_MODE_ECB && \
103 			     (rk_mode) != RK_MODE_CMAC && \
104 			     (rk_mode) != RK_MODE_CBC_MAC)
105 
106 #define IS_NEED_TAG(rk_mode) ((rk_mode) == RK_MODE_CMAC || \
107 			      (rk_mode) == RK_MODE_CBC_MAC || \
108 			      (rk_mode) == RK_MODE_CCM || \
109 			      (rk_mode) == RK_MODE_GCM)
110 
111 #define IS_MAC_MODE(rk_mode) ((rk_mode) == RK_MODE_CMAC || \
112 			      (rk_mode) == RK_MODE_CBC_MAC)
113 
114 #define IS_AE_MODE(rk_mode) ((rk_mode) == RK_MODE_CCM || \
115 			     (rk_mode) == RK_MODE_GCM)
116 
117 fdt_addr_t crypto_base;
118 
119 static inline void word2byte_be(u32 word, u8 *ch)
120 {
121 	ch[0] = (word >> 24) & 0xff;
122 	ch[1] = (word >> 16) & 0xff;
123 	ch[2] = (word >> 8) & 0xff;
124 	ch[3] = (word >> 0) & 0xff;
125 }
126 
127 static inline u32 byte2word_be(const u8 *ch)
128 {
129 	return (*ch << 24) + (*(ch + 1) << 16) + (*(ch + 2) << 8) + *(ch + 3);
130 }
131 
132 static inline void clear_regs(u32 base, u32 words)
133 {
134 	int i;
135 
136 	/*clear out register*/
137 	for (i = 0; i < words; i++)
138 		crypto_write(0, base + 4 * i);
139 }
140 
141 static inline void clear_hash_out_reg(void)
142 {
143 	clear_regs(CRYPTO_HASH_DOUT_0, 16);
144 }
145 
146 static inline void clear_key_regs(void)
147 {
148 	clear_regs(CRYPTO_CH0_KEY_0, CRYPTO_KEY_CHANNEL_NUM * 4);
149 }
150 
151 static inline void read_regs(u32 base, u8 *data, u32 data_len)
152 {
153 	u8 tmp_buf[4];
154 	u32 i;
155 
156 	for (i = 0; i < data_len / 4; i++)
157 		word2byte_be(crypto_read(base + i * 4),
158 			     data + i * 4);
159 
160 	if (data_len % 4) {
161 		word2byte_be(crypto_read(base + i * 4), tmp_buf);
162 		memcpy(data + i * 4, tmp_buf, data_len % 4);
163 	}
164 }
165 
166 static inline void write_regs(u32 base, const u8 *data, u32 data_len)
167 {
168 	u8 tmp_buf[4];
169 	u32 i;
170 
171 	for (i = 0; i < data_len / 4; i++, base += 4)
172 		crypto_write(byte2word_be(data + i * 4), base);
173 
174 	if (data_len % 4) {
175 		memset(tmp_buf, 0x00, sizeof(tmp_buf));
176 		memcpy((u8 *)tmp_buf, data + i * 4, data_len % 4);
177 		crypto_write(byte2word_be(tmp_buf), base);
178 	}
179 }
180 
181 static inline void write_key_reg(u32 chn, const u8 *key, u32 key_len)
182 {
183 	write_regs(CRYPTO_CH0_KEY_0 + chn * 0x10, key, key_len);
184 }
185 
186 static inline void set_iv_reg(u32 chn, const u8 *iv, u32 iv_len)
187 {
188 	u32 base_iv;
189 
190 	base_iv = CRYPTO_CH0_IV_0 + chn * 0x10;
191 
192 	/* clear iv */
193 	clear_regs(base_iv, 4);
194 
195 	if (!iv || iv_len == 0)
196 		return;
197 
198 	write_regs(base_iv, iv, iv_len);
199 
200 	crypto_write(iv_len, CRYPTO_CH0_IV_LEN_0 + 4 * chn);
201 }
202 
203 static inline void get_iv_reg(u32 chn, u8 *iv, u32 iv_len)
204 {
205 	u32 base_iv;
206 
207 	base_iv = CRYPTO_CH0_IV_0 + chn * 0x10;
208 
209 	read_regs(base_iv, iv, iv_len);
210 }
211 
212 static inline void get_tag_from_reg(u32 chn, u8 *tag, u32 tag_len)
213 {
214 	u32 i;
215 	u32 chn_base = CRYPTO_CH0_TAG_0 + 0x10 * chn;
216 
217 	for (i = 0; i < tag_len / 4; i++, chn_base += 4)
218 		word2byte_be(crypto_read(chn_base), tag + 4 * i);
219 }
220 
221 static int hw_crypto_reset(void)
222 {
223 	u32 val = 0, mask = 0;
224 	int ret;
225 
226 	val = CRYPTO_SW_PKA_RESET | CRYPTO_SW_CC_RESET;
227 	mask = val << CRYPTO_WRITE_MASK_SHIFT;
228 
229 	/* reset pka and crypto modules*/
230 	crypto_write(val | mask, CRYPTO_RST_CTL);
231 
232 	/* wait reset compelete */
233 	ret = RK_POLL_TIMEOUT(crypto_read(CRYPTO_RST_CTL), RK_CRYPTO_TIMEOUT);
234 
235 	return ret;
236 }
237 
238 static void hw_hash_clean_ctx(struct rk_hash_ctx *ctx)
239 {
240 	/* clear hash status */
241 	crypto_write(CRYPTO_WRITE_MASK_ALL | 0, CRYPTO_HASH_CTL);
242 
243 	assert(ctx);
244 	assert(ctx->magic == RK_HASH_CTX_MAGIC);
245 
246 	crypto_hash_cache_free(ctx->hash_cache);
247 
248 	memset(ctx, 0x00, sizeof(*ctx));
249 }
250 
251 static int rk_hash_init(void *hw_ctx, u32 algo)
252 {
253 	struct rk_hash_ctx *tmp_ctx = (struct rk_hash_ctx *)hw_ctx;
254 	u32 reg_ctrl = 0;
255 	int ret;
256 
257 	if (!tmp_ctx)
258 		return -EINVAL;
259 
260 	reg_ctrl = CRYPTO_SW_CC_RESET;
261 	crypto_write(reg_ctrl | (reg_ctrl << CRYPTO_WRITE_MASK_SHIFT),
262 		     CRYPTO_RST_CTL);
263 
264 	/* wait reset compelete */
265 	ret = RK_POLL_TIMEOUT(crypto_read(CRYPTO_RST_CTL),
266 			      RK_CRYPTO_TIMEOUT);
267 
268 	reg_ctrl = 0;
269 	tmp_ctx->algo = algo;
270 	switch (algo) {
271 	case CRYPTO_MD5:
272 	case CRYPTO_HMAC_MD5:
273 		reg_ctrl |= CRYPTO_MODE_MD5;
274 		tmp_ctx->digest_size = 16;
275 		break;
276 	case CRYPTO_SHA1:
277 	case CRYPTO_HMAC_SHA1:
278 		reg_ctrl |= CRYPTO_MODE_SHA1;
279 		tmp_ctx->digest_size = 20;
280 		break;
281 	case CRYPTO_SHA256:
282 	case CRYPTO_HMAC_SHA256:
283 		reg_ctrl |= CRYPTO_MODE_SHA256;
284 		tmp_ctx->digest_size = 32;
285 		break;
286 	case CRYPTO_SHA512:
287 	case CRYPTO_HMAC_SHA512:
288 		reg_ctrl |= CRYPTO_MODE_SHA512;
289 		tmp_ctx->digest_size = 64;
290 		break;
291 	case CRYPTO_SM3:
292 	case CRYPTO_HMAC_SM3:
293 		reg_ctrl |= CRYPTO_MODE_SM3;
294 		tmp_ctx->digest_size = 32;
295 		break;
296 	default:
297 		ret = -EINVAL;
298 		goto exit;
299 	}
300 
301 	clear_hash_out_reg();
302 
303 	/* enable hardware padding */
304 	reg_ctrl |= CRYPTO_HW_PAD_ENABLE;
305 	crypto_write(reg_ctrl | CRYPTO_WRITE_MASK_ALL, CRYPTO_HASH_CTL);
306 
307 	/* FIFO input and output data byte swap */
308 	/* such as B0, B1, B2, B3 -> B3, B2, B1, B0 */
309 	reg_ctrl = CRYPTO_DOUT_BYTESWAP | CRYPTO_DOIN_BYTESWAP;
310 	crypto_write(reg_ctrl | CRYPTO_WRITE_MASK_ALL, CRYPTO_FIFO_CTL);
311 
312 	/* enable src_item_done interrupt */
313 	crypto_write(0, CRYPTO_DMA_INT_EN);
314 
315 	tmp_ctx->magic = RK_HASH_CTX_MAGIC;
316 
317 	return 0;
318 exit:
319 	/* clear hash setting if init failed */
320 	crypto_write(CRYPTO_WRITE_MASK_ALL | 0, CRYPTO_HASH_CTL);
321 
322 	return ret;
323 }
324 
325 static int rk_hash_direct_calc(void *hw_data, const u8 *data,
326 			       u32 data_len, u8 *started_flag, u8 is_last)
327 {
328 	struct rockchip_crypto_priv *priv = hw_data;
329 	struct rk_hash_ctx *hash_ctx = priv->hw_ctx;
330 	struct crypto_lli_desc *lli = &hash_ctx->data_lli;
331 	int ret = -EINVAL;
332 	u32 tmp = 0, mask = 0;
333 
334 	assert(IS_ALIGNED((ulong)data, DATA_ADDR_ALIGN_SIZE));
335 	assert(is_last || IS_ALIGNED(data_len, DATA_LEN_ALIGN_SIZE));
336 
337 	debug("%s: data = %p, len = %u, s = %x, l = %x\n",
338 	      __func__, data, data_len, *started_flag, is_last);
339 
340 	memset(lli, 0x00, sizeof(*lli));
341 	lli->src_addr = (u32)virt_to_phys(data);
342 	lli->src_len = data_len;
343 	lli->dma_ctrl = LLI_DMA_CTRL_SRC_DONE;
344 
345 	if (is_last) {
346 		lli->user_define |= LLI_USER_STRING_LAST;
347 		lli->dma_ctrl |= LLI_DMA_CTRL_LAST;
348 	} else {
349 		lli->next_addr = (u32)virt_to_phys(lli);
350 		lli->dma_ctrl |= LLI_DMA_CTRL_PAUSE;
351 	}
352 
353 	if (!(*started_flag)) {
354 		lli->user_define |=
355 			(LLI_USER_STRING_START | LLI_USER_CPIHER_START);
356 		crypto_write((u32)virt_to_phys(lli), CRYPTO_DMA_LLI_ADDR);
357 		crypto_write((CRYPTO_HASH_ENABLE << CRYPTO_WRITE_MASK_SHIFT) |
358 			     CRYPTO_HASH_ENABLE, CRYPTO_HASH_CTL);
359 		tmp = CRYPTO_DMA_START;
360 		*started_flag = 1;
361 	} else {
362 		tmp = CRYPTO_DMA_RESTART;
363 	}
364 
365 	/* flush cache */
366 	crypto_flush_cacheline((ulong)lli, sizeof(*lli));
367 	crypto_flush_cacheline((ulong)data, data_len);
368 
369 	/* start calculate */
370 	crypto_write(tmp << CRYPTO_WRITE_MASK_SHIFT | tmp,
371 		     CRYPTO_DMA_CTL);
372 
373 	/* mask CRYPTO_SYNC_LOCKSTEP_INT_ST flag */
374 	mask = ~(mask | CRYPTO_SYNC_LOCKSTEP_INT_ST);
375 
376 	/* wait calc ok */
377 	ret = RK_POLL_TIMEOUT(!(crypto_read(CRYPTO_DMA_INT_ST) & mask),
378 			      RK_CRYPTO_TIMEOUT);
379 
380 	/* clear interrupt status */
381 	tmp = crypto_read(CRYPTO_DMA_INT_ST);
382 	crypto_write(tmp, CRYPTO_DMA_INT_ST);
383 
384 	if (tmp != CRYPTO_SRC_ITEM_DONE_INT_ST &&
385 	    tmp != CRYPTO_ZERO_LEN_INT_ST) {
386 		debug("[%s] %d: CRYPTO_DMA_INT_ST = 0x%x\n",
387 		      __func__, __LINE__, tmp);
388 		goto exit;
389 	}
390 
391 	priv->length += data_len;
392 exit:
393 	return ret;
394 }
395 
396 int rk_hash_update(void *ctx, const u8 *data, u32 data_len)
397 {
398 	struct rk_hash_ctx *tmp_ctx = (struct rk_hash_ctx *)ctx;
399 	int ret = -EINVAL;
400 
401 	debug("\n");
402 	if (!tmp_ctx || !data)
403 		goto exit;
404 
405 	if (tmp_ctx->digest_size == 0 || tmp_ctx->magic != RK_HASH_CTX_MAGIC)
406 		goto exit;
407 
408 	ret = crypto_hash_update_with_cache(tmp_ctx->hash_cache,
409 					    data, data_len);
410 
411 exit:
412 	/* free lli list */
413 	if (ret)
414 		hw_hash_clean_ctx(tmp_ctx);
415 
416 	return ret;
417 }
418 
419 int rk_hash_final(void *ctx, u8 *digest, size_t len)
420 {
421 	struct rk_hash_ctx *tmp_ctx = (struct rk_hash_ctx *)ctx;
422 	int ret = -EINVAL;
423 
424 	if (!digest)
425 		goto exit;
426 
427 	if (!tmp_ctx ||
428 	    tmp_ctx->digest_size == 0 ||
429 	    len > tmp_ctx->digest_size ||
430 	    tmp_ctx->magic != RK_HASH_CTX_MAGIC) {
431 		goto exit;
432 	}
433 
434 	/* wait hash value ok */
435 	ret = RK_POLL_TIMEOUT(!crypto_read(CRYPTO_HASH_VALID),
436 			      RK_CRYPTO_TIMEOUT);
437 
438 	read_regs(CRYPTO_HASH_DOUT_0, digest, len);
439 
440 	/* clear hash status */
441 	crypto_write(CRYPTO_HASH_IS_VALID, CRYPTO_HASH_VALID);
442 	crypto_write(CRYPTO_WRITE_MASK_ALL | 0, CRYPTO_HASH_CTL);
443 
444 exit:
445 
446 	return ret;
447 }
448 
449 static u32 rockchip_crypto_capability(struct udevice *dev)
450 {
451 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
452 	u32 capability, mask = 0;
453 
454 	capability = priv->soc_data->capability;
455 
456 #if !(CONFIG_IS_ENABLED(ROCKCHIP_CIPHER))
457 	mask |= (CRYPTO_DES | CRYPTO_AES | CRYPTO_SM4);
458 #endif
459 
460 #if !(CONFIG_IS_ENABLED(ROCKCHIP_HMAC))
461 	mask |= (CRYPTO_HMAC_MD5 | CRYPTO_HMAC_SHA1 | CRYPTO_HMAC_SHA256 |
462 			 CRYPTO_HMAC_SHA512 | CRYPTO_HMAC_SM3);
463 #endif
464 
465 #if !(CONFIG_IS_ENABLED(ROCKCHIP_RSA))
466 	mask |= (CRYPTO_RSA512 | CRYPTO_RSA1024 | CRYPTO_RSA2048 |
467 			 CRYPTO_RSA3072 | CRYPTO_RSA4096);
468 #endif
469 
470 	return capability & (~mask);
471 }
472 
473 static int rockchip_crypto_sha_init(struct udevice *dev, sha_context *ctx)
474 {
475 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
476 	struct rk_hash_ctx *hash_ctx = priv->hw_ctx;
477 
478 	if (!ctx)
479 		return -EINVAL;
480 
481 	memset(hash_ctx, 0x00, sizeof(*hash_ctx));
482 
483 	priv->length = 0;
484 
485 	hash_ctx->hash_cache = crypto_hash_cache_alloc(rk_hash_direct_calc,
486 						       priv, ctx->length,
487 						       DATA_ADDR_ALIGN_SIZE,
488 						       DATA_LEN_ALIGN_SIZE);
489 	if (!hash_ctx->hash_cache)
490 		return -EFAULT;
491 
492 	return rk_hash_init(hash_ctx, ctx->algo);
493 }
494 
495 static int rockchip_crypto_sha_update(struct udevice *dev,
496 				      u32 *input, u32 len)
497 {
498 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
499 	int ret, i;
500 	u8 *p;
501 
502 	if (!len)
503 		return -EINVAL;
504 
505 	p = (u8 *)input;
506 
507 	for (i = 0; i < len / HASH_UPDATE_LIMIT; i++, p += HASH_UPDATE_LIMIT) {
508 		ret = rk_hash_update(priv->hw_ctx, p, HASH_UPDATE_LIMIT);
509 		if (ret)
510 			goto exit;
511 	}
512 
513 	if (len % HASH_UPDATE_LIMIT)
514 		ret = rk_hash_update(priv->hw_ctx, p, len % HASH_UPDATE_LIMIT);
515 
516 exit:
517 	return ret;
518 }
519 
520 static int rockchip_crypto_sha_final(struct udevice *dev,
521 				     sha_context *ctx, u8 *output)
522 {
523 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
524 	u32 nbits;
525 	int ret;
526 
527 	nbits = crypto_algo_nbits(ctx->algo);
528 
529 	if (priv->length != ctx->length) {
530 		printf("total length(0x%08x) != init length(0x%08x)!\n",
531 		       priv->length, ctx->length);
532 		ret = -EIO;
533 		goto exit;
534 	}
535 
536 	ret = rk_hash_final(priv->hw_ctx, (u8 *)output, BITS2BYTE(nbits));
537 
538 exit:
539 	hw_hash_clean_ctx(priv->hw_ctx);
540 	return ret;
541 }
542 
543 #if CONFIG_IS_ENABLED(ROCKCHIP_HMAC)
544 int rk_hmac_init(void *hw_ctx, u32 algo, u8 *key, u32 key_len)
545 {
546 	u32 reg_ctrl = 0;
547 	int ret;
548 
549 	if (!key || !key_len || key_len > 64)
550 		return -EINVAL;
551 
552 	clear_key_regs();
553 
554 	write_key_reg(0, key, key_len);
555 
556 	ret = rk_hash_init(hw_ctx, algo);
557 	if (ret)
558 		return ret;
559 
560 	reg_ctrl = crypto_read(CRYPTO_HASH_CTL) | CRYPTO_HMAC_ENABLE;
561 	crypto_write(reg_ctrl | CRYPTO_WRITE_MASK_ALL, CRYPTO_HASH_CTL);
562 
563 	return ret;
564 }
565 
566 static int rockchip_crypto_hmac_init(struct udevice *dev,
567 				     sha_context *ctx, u8 *key, u32 key_len)
568 {
569 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
570 	struct rk_hash_ctx *hash_ctx = priv->hw_ctx;
571 
572 	if (!ctx)
573 		return -EINVAL;
574 
575 	memset(hash_ctx, 0x00, sizeof(*hash_ctx));
576 
577 	priv->length = 0;
578 
579 	hash_ctx->hash_cache = crypto_hash_cache_alloc(rk_hash_direct_calc,
580 						       priv, ctx->length,
581 						       DATA_ADDR_ALIGN_SIZE,
582 						       DATA_LEN_ALIGN_SIZE);
583 	if (!hash_ctx->hash_cache)
584 		return -EFAULT;
585 
586 	return rk_hmac_init(priv->hw_ctx, ctx->algo, key, key_len);
587 }
588 
589 static int rockchip_crypto_hmac_update(struct udevice *dev,
590 				       u32 *input, u32 len)
591 {
592 	return rockchip_crypto_sha_update(dev, input, len);
593 }
594 
595 static int rockchip_crypto_hmac_final(struct udevice *dev,
596 				      sha_context *ctx, u8 *output)
597 {
598 	return rockchip_crypto_sha_final(dev, ctx, output);
599 }
600 
601 #endif
602 
603 #if CONFIG_IS_ENABLED(ROCKCHIP_CIPHER)
604 static u8 g_key_chn;
605 
606 static const u32 rk_mode2bc_mode[RK_MODE_MAX] = {
607 	[RK_MODE_ECB] = CRYPTO_BC_ECB,
608 	[RK_MODE_CBC] = CRYPTO_BC_CBC,
609 	[RK_MODE_CTS] = CRYPTO_BC_CTS,
610 	[RK_MODE_CTR] = CRYPTO_BC_CTR,
611 	[RK_MODE_CFB] = CRYPTO_BC_CFB,
612 	[RK_MODE_OFB] = CRYPTO_BC_OFB,
613 	[RK_MODE_XTS] = CRYPTO_BC_XTS,
614 	[RK_MODE_CCM] = CRYPTO_BC_CCM,
615 	[RK_MODE_GCM] = CRYPTO_BC_GCM,
616 	[RK_MODE_CMAC] = CRYPTO_BC_CMAC,
617 	[RK_MODE_CBC_MAC] = CRYPTO_BC_CBC_MAC,
618 };
619 
620 static inline void set_pc_len_reg(u32 chn, u64 pc_len)
621 {
622 	u32 chn_base = CRYPTO_CH0_PC_LEN_0 + chn * 0x08;
623 
624 	crypto_write(pc_len & 0xffffffff, chn_base);
625 	crypto_write(pc_len >> 32, chn_base + 4);
626 }
627 
628 static inline void set_aad_len_reg(u32 chn, u64 pc_len)
629 {
630 	u32 chn_base = CRYPTO_CH0_AAD_LEN_0 + chn * 0x08;
631 
632 	crypto_write(pc_len & 0xffffffff, chn_base);
633 	crypto_write(pc_len >> 32, chn_base + 4);
634 }
635 
636 static inline bool is_des_mode(u32 rk_mode)
637 {
638 	return (rk_mode == RK_MODE_ECB ||
639 		rk_mode == RK_MODE_CBC ||
640 		rk_mode == RK_MODE_CFB ||
641 		rk_mode == RK_MODE_OFB);
642 }
643 
644 static void dump_crypto_state(struct crypto_lli_desc *desc,
645 			      u32 tmp, u32 expt_int,
646 			      const u8 *in, const u8 *out,
647 			      u32 len, int ret)
648 {
649 	IMSG("%s\n", ret == -ETIME ? "timeout" : "dismatch");
650 
651 	IMSG("CRYPTO_DMA_INT_ST = %08x, expect_int = %08x\n",
652 	     tmp, expt_int);
653 	IMSG("data desc		= %p\n", desc);
654 	IMSG("\taddr_in		= [%08x <=> %08x]\n",
655 	     desc->src_addr, (u32)virt_to_phys(in));
656 	IMSG("\taddr_out	= [%08x <=> %08x]\n",
657 	     desc->dst_addr, (u32)virt_to_phys(out));
658 	IMSG("\tsrc_len		= [%08x <=> %08x]\n",
659 	     desc->src_len, (u32)len);
660 	IMSG("\tdst_len		= %08x\n", desc->dst_len);
661 	IMSG("\tdma_ctl		= %08x\n", desc->dma_ctrl);
662 	IMSG("\tuser_define	= %08x\n", desc->user_define);
663 
664 	IMSG("\n\nDMA CRYPTO_DMA_LLI_ADDR status = %08x\n",
665 	     crypto_read(CRYPTO_DMA_LLI_ADDR));
666 	IMSG("DMA CRYPTO_DMA_ST status = %08x\n",
667 	     crypto_read(CRYPTO_DMA_ST));
668 	IMSG("DMA CRYPTO_DMA_STATE status = %08x\n",
669 	     crypto_read(CRYPTO_DMA_STATE));
670 	IMSG("DMA CRYPTO_DMA_LLI_RADDR status = %08x\n",
671 	     crypto_read(CRYPTO_DMA_LLI_RADDR));
672 	IMSG("DMA CRYPTO_DMA_SRC_RADDR status = %08x\n",
673 	     crypto_read(CRYPTO_DMA_SRC_RADDR));
674 	IMSG("DMA CRYPTO_DMA_DST_RADDR status = %08x\n",
675 	     crypto_read(CRYPTO_DMA_DST_RADDR));
676 	IMSG("DMA CRYPTO_CIPHER_ST status = %08x\n",
677 	     crypto_read(CRYPTO_CIPHER_ST));
678 	IMSG("DMA CRYPTO_CIPHER_STATE status = %08x\n",
679 	     crypto_read(CRYPTO_CIPHER_STATE));
680 	IMSG("DMA CRYPTO_TAG_VALID status = %08x\n",
681 	     crypto_read(CRYPTO_TAG_VALID));
682 	IMSG("LOCKSTEP status = %08x\n\n",
683 	     crypto_read(0x618));
684 
685 	IMSG("dst %dbyte not transferred\n",
686 	     desc->dst_addr + desc->dst_len -
687 	     crypto_read(CRYPTO_DMA_DST_RADDR));
688 }
689 
690 static int ccm128_set_iv_reg(u32 chn, const u8 *nonce, u32 nlen)
691 {
692 	u8 iv_buf[AES_BLOCK_SIZE];
693 	u32 L;
694 
695 	memset(iv_buf, 0x00, sizeof(iv_buf));
696 
697 	L = 15 - nlen;
698 	iv_buf[0] = ((u8)(L - 1) & 7);
699 
700 	/* the L parameter */
701 	L = iv_buf[0] & 7;
702 
703 	/* nonce is too short */
704 	if (nlen < (14 - L))
705 		return -EINVAL;
706 
707 	/* clear aad flag */
708 	iv_buf[0] &= ~0x40;
709 	memcpy(&iv_buf[1], nonce, 14 - L);
710 
711 	set_iv_reg(chn, iv_buf, AES_BLOCK_SIZE);
712 
713 	return 0;
714 }
715 
716 static void ccm_aad_padding(u32 aad_len, u8 *padding, u32 *padding_size)
717 {
718 	u32 i;
719 
720 	i = aad_len < (0x10000 - 0x100) ? 2 : 6;
721 
722 	if (i == 2) {
723 		padding[0] = (u8)(aad_len >> 8);
724 		padding[1] = (u8)aad_len;
725 	} else {
726 		padding[0] = 0xFF;
727 		padding[1] = 0xFE;
728 		padding[2] = (u8)(aad_len >> 24);
729 		padding[3] = (u8)(aad_len >> 16);
730 		padding[4] = (u8)(aad_len >> 8);
731 	}
732 
733 	*padding_size = i;
734 }
735 
736 static int ccm_compose_aad_iv(u8 *aad_iv, u32 data_len, u32 tag_size)
737 {
738 	aad_iv[0] |= ((u8)(((tag_size - 2) / 2) & 7) << 3);
739 
740 	aad_iv[12] = (u8)(data_len >> 24);
741 	aad_iv[13] = (u8)(data_len >> 16);
742 	aad_iv[14] = (u8)(data_len >> 8);
743 	aad_iv[15] = (u8)data_len;
744 
745 	aad_iv[0] |= 0x40;	//set aad flag
746 
747 	return 0;
748 }
749 
750 static int hw_cipher_init(u32 chn, const u8 *key, const u8 *twk_key,
751 			  u32 key_len, const u8 *iv, u32 iv_len,
752 			  u32 algo, u32 mode, bool enc)
753 {
754 	u32 rk_mode = RK_GET_RK_MODE(mode);
755 	u32 key_chn_sel = chn;
756 	u32 reg_ctrl = 0;
757 
758 	IMSG("%s: key addr is %p, key_len is %d, iv addr is %p",
759 	     __func__, key, key_len, iv);
760 	if (rk_mode >= RK_MODE_MAX)
761 		return -EINVAL;
762 
763 	switch (algo) {
764 	case CRYPTO_DES:
765 		if (key_len > DES_BLOCK_SIZE)
766 			reg_ctrl |= CRYPTO_BC_TDES;
767 		else
768 			reg_ctrl |= CRYPTO_BC_DES;
769 		break;
770 	case CRYPTO_AES:
771 		reg_ctrl |= CRYPTO_BC_AES;
772 		break;
773 	case CRYPTO_SM4:
774 		reg_ctrl |= CRYPTO_BC_SM4;
775 		break;
776 	default:
777 		return -EINVAL;
778 	}
779 
780 	if (algo == CRYPTO_AES || algo == CRYPTO_SM4) {
781 		switch (key_len) {
782 		case AES_KEYSIZE_128:
783 			reg_ctrl |= CRYPTO_BC_128_bit_key;
784 			break;
785 		case AES_KEYSIZE_192:
786 			reg_ctrl |= CRYPTO_BC_192_bit_key;
787 			break;
788 		case AES_KEYSIZE_256:
789 			reg_ctrl |= CRYPTO_BC_256_bit_key;
790 			break;
791 		default:
792 			return -EINVAL;
793 		}
794 	}
795 
796 	reg_ctrl |= rk_mode2bc_mode[rk_mode];
797 	if (!enc)
798 		reg_ctrl |= CRYPTO_BC_DECRYPT;
799 
800 	/* write key data to reg */
801 	write_key_reg(key_chn_sel, key, key_len);
802 
803 	/* write twk key for xts mode */
804 	if (rk_mode == RK_MODE_XTS)
805 		write_key_reg(key_chn_sel + 4, twk_key, key_len);
806 
807 	/* set iv reg */
808 	if (rk_mode == RK_MODE_CCM)
809 		ccm128_set_iv_reg(chn, iv, iv_len);
810 	else
811 		set_iv_reg(chn, iv, iv_len);
812 
813 	/* din_swap set 1, dout_swap set 1, default 1. */
814 	crypto_write(0x00030003, CRYPTO_FIFO_CTL);
815 	crypto_write(0, CRYPTO_DMA_INT_EN);
816 
817 	crypto_write(reg_ctrl | CRYPTO_WRITE_MASK_ALL, CRYPTO_BC_CTL);
818 
819 	return 0;
820 }
821 
822 static int hw_cipher_crypt(const u8 *in, u8 *out, u64 len,
823 			   const u8 *aad, u32 aad_len,
824 			   u8 *tag, u32 tag_len, u32 mode)
825 {
826 	struct crypto_lli_desc *data_desc = NULL, *aad_desc = NULL;
827 	u8 *dma_in = NULL, *dma_out = NULL, *aad_tmp = NULL;
828 	u32 rk_mode = RK_GET_RK_MODE(mode);
829 	u32 reg_ctrl = 0, tmp_len = 0;
830 	u32 expt_int = 0, mask = 0;
831 	u32 key_chn = g_key_chn;
832 	u32 tmp, dst_len = 0;
833 	int ret = -1;
834 
835 	if (rk_mode == RK_MODE_CTS && len <= AES_BLOCK_SIZE) {
836 		printf("CTS mode length %u < 16Byte\n", (u32)len);
837 		return -EINVAL;
838 	}
839 
840 	tmp_len = (rk_mode == RK_MODE_CTR) ? ROUNDUP(len, AES_BLOCK_SIZE) : len;
841 
842 	data_desc = align_malloc(sizeof(*data_desc), LLI_ADDR_ALIGN_SIZE);
843 	if (!data_desc)
844 		goto exit;
845 
846 	if (IS_ALIGNED((ulong)in, DATA_ADDR_ALIGN_SIZE) && tmp_len == len)
847 		dma_in = (void *)in;
848 	else
849 		dma_in = align_malloc(tmp_len, DATA_ADDR_ALIGN_SIZE);
850 	if (!dma_in)
851 		goto exit;
852 
853 	if (out) {
854 		if (IS_ALIGNED((ulong)out, DATA_ADDR_ALIGN_SIZE) &&
855 		    tmp_len == len)
856 			dma_out = out;
857 		else
858 			dma_out = align_malloc(tmp_len, DATA_ADDR_ALIGN_SIZE);
859 		if (!dma_out)
860 			goto exit;
861 		dst_len = tmp_len;
862 	}
863 
864 	memset(data_desc, 0x00, sizeof(*data_desc));
865 	if (dma_in != in)
866 		memcpy(dma_in, in, len);
867 
868 	data_desc->src_addr    = (u32)virt_to_phys(dma_in);
869 	data_desc->src_len     = tmp_len;
870 	data_desc->dst_addr    = (u32)virt_to_phys(dma_out);
871 	data_desc->dst_len     = dst_len;
872 	data_desc->dma_ctrl    = LLI_DMA_CTRL_LAST;
873 
874 	if (IS_MAC_MODE(rk_mode)) {
875 		expt_int = CRYPTO_LIST_DONE_INT_ST;
876 		data_desc->dma_ctrl |= LLI_DMA_CTRL_LIST_DONE;
877 	} else {
878 		expt_int = CRYPTO_DST_ITEM_DONE_INT_ST;
879 		data_desc->dma_ctrl |= LLI_DMA_CTRL_DST_DONE;
880 	}
881 
882 	if (rk_mode == RK_MODE_CCM || rk_mode == RK_MODE_GCM) {
883 		u32 aad_tmp_len = 0;
884 
885 		data_desc->user_define = LLI_USER_STRING_START |
886 					 LLI_USER_STRING_LAST |
887 					 (key_chn << 4);
888 
889 		aad_desc = align_malloc(sizeof(*aad_desc), LLI_ADDR_ALIGN_SIZE);
890 		if (!aad_desc)
891 			goto exit;
892 
893 		memset(aad_desc, 0x00, sizeof(*aad_desc));
894 		aad_desc->next_addr = (u32)virt_to_phys(data_desc);
895 		aad_desc->user_define = LLI_USER_CPIHER_START |
896 					 LLI_USER_STRING_START |
897 					 LLI_USER_STRING_LAST |
898 					 LLI_USER_STRING_AAD |
899 					 (key_chn << 4);
900 
901 		if (rk_mode == RK_MODE_CCM) {
902 			u8 padding[AES_BLOCK_SIZE];
903 			u32 padding_size = 0;
904 
905 			memset(padding, 0x00, sizeof(padding));
906 			ccm_aad_padding(aad_len, padding, &padding_size);
907 
908 			aad_tmp_len = aad_len + AES_BLOCK_SIZE + padding_size;
909 			aad_tmp_len = ROUNDUP(aad_tmp_len, AES_BLOCK_SIZE);
910 			aad_tmp = align_malloc(aad_tmp_len,
911 					       DATA_ADDR_ALIGN_SIZE);
912 			if (!aad_tmp)
913 				goto exit;
914 
915 			/* read iv data from reg */
916 			get_iv_reg(key_chn, aad_tmp, AES_BLOCK_SIZE);
917 			ccm_compose_aad_iv(aad_tmp, tmp_len, tag_len);
918 			memcpy(aad_tmp + AES_BLOCK_SIZE, padding, padding_size);
919 			memset(aad_tmp + aad_tmp_len - AES_BLOCK_SIZE,
920 			       0x00, AES_BLOCK_SIZE);
921 			memcpy(aad_tmp + AES_BLOCK_SIZE + padding_size,
922 			       aad, aad_len);
923 		} else {
924 			aad_tmp_len = aad_len;
925 			if (IS_ALIGNED((ulong)aad, DATA_ADDR_ALIGN_SIZE)) {
926 				aad_tmp = (void *)aad;
927 			} else {
928 				aad_tmp = align_malloc(aad_tmp_len,
929 						       DATA_ADDR_ALIGN_SIZE);
930 				if (!aad_tmp)
931 					goto exit;
932 
933 				memcpy(aad_tmp, aad, aad_tmp_len);
934 			}
935 
936 			set_aad_len_reg(key_chn, aad_tmp_len);
937 			set_pc_len_reg(key_chn, tmp_len);
938 		}
939 
940 		aad_desc->src_addr = (u32)virt_to_phys(aad_tmp);
941 		aad_desc->src_len  = aad_tmp_len;
942 		crypto_write((u32)virt_to_phys(aad_desc), CRYPTO_DMA_LLI_ADDR);
943 		cache_op_inner(DCACHE_AREA_CLEAN, aad_tmp, aad_tmp_len);
944 		cache_op_inner(DCACHE_AREA_CLEAN, aad_desc, sizeof(*aad_desc));
945 	} else {
946 		data_desc->user_define = LLI_USER_CPIHER_START |
947 					 LLI_USER_STRING_START |
948 					 LLI_USER_STRING_LAST |
949 					 (key_chn << 4);
950 		crypto_write((u32)virt_to_phys(data_desc), CRYPTO_DMA_LLI_ADDR);
951 	}
952 
953 	cache_op_inner(DCACHE_AREA_CLEAN, data_desc, sizeof(*data_desc));
954 	cache_op_inner(DCACHE_AREA_CLEAN, dma_in, tmp_len);
955 	cache_op_inner(DCACHE_AREA_INVALIDATE, dma_out, tmp_len);
956 
957 	/* din_swap set 1, dout_swap set 1, default 1. */
958 	crypto_write(0x00030003, CRYPTO_FIFO_CTL);
959 	crypto_write(0, CRYPTO_DMA_INT_EN);
960 
961 	reg_ctrl = crypto_read(CRYPTO_BC_CTL) | CRYPTO_BC_ENABLE;
962 	crypto_write(reg_ctrl | CRYPTO_WRITE_MASK_ALL, CRYPTO_BC_CTL);
963 	crypto_write(0x00010001, CRYPTO_DMA_CTL);//start
964 
965 	mask = ~(mask | CRYPTO_SYNC_LOCKSTEP_INT_ST);
966 
967 	/* wait calc ok */
968 	ret = RK_POLL_TIMEOUT(!(crypto_read(CRYPTO_DMA_INT_ST) & mask),
969 			      RK_CRYPTO_TIMEOUT);
970 	tmp = crypto_read(CRYPTO_DMA_INT_ST);
971 	crypto_write(tmp, CRYPTO_DMA_INT_ST);
972 
973 	if ((tmp & mask) == expt_int) {
974 		if (out && out != dma_out)
975 			memcpy(out, dma_out, len);
976 
977 		if (IS_NEED_TAG(rk_mode)) {
978 			ret = WAIT_TAG_VALID(key_chn, RK_CRYPTO_TIMEOUT);
979 			get_tag_from_reg(key_chn, tag, AES_BLOCK_SIZE);
980 		}
981 	} else {
982 		dump_crypto_state(data_desc, tmp, expt_int, in, out, len, ret);
983 		ret = -1;
984 	}
985 
986 exit:
987 	crypto_write(0xffff0000, CRYPTO_BC_CTL);//bc_ctl disable
988 	align_free(data_desc);
989 	align_free(aad_desc);
990 	if (dma_in != in)
991 		align_free(dma_in);
992 	if (out && dma_out != out)
993 		align_free(dma_out);
994 	if (aad && aad != aad_tmp)
995 		align_free(aad_tmp);
996 
997 	return ret;
998 }
999 
1000 static int hw_aes_init(u32 chn, const u8 *key, const u8 *twk_key, u32 key_len,
1001 		       const u8 *iv, u32 iv_len, u32 mode, bool enc)
1002 {
1003 	u32 rk_mode = RK_GET_RK_MODE(mode);
1004 
1005 	if (rk_mode > RK_MODE_XTS)
1006 		return -EINVAL;
1007 
1008 	if (iv_len > AES_BLOCK_SIZE)
1009 		return -EINVAL;
1010 
1011 	if (IS_NEED_IV(rk_mode)) {
1012 		if (!iv || iv_len != AES_BLOCK_SIZE)
1013 			return -EINVAL;
1014 	} else {
1015 		iv_len = 0;
1016 	}
1017 
1018 	if (rk_mode == RK_MODE_XTS) {
1019 		if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_256)
1020 			return -EINVAL;
1021 
1022 		if (!key || !twk_key)
1023 			return -EINVAL;
1024 	} else {
1025 		if (key_len != AES_KEYSIZE_128 &&
1026 		    key_len != AES_KEYSIZE_192 &&
1027 		    key_len != AES_KEYSIZE_256)
1028 			return -EINVAL;
1029 	}
1030 
1031 	return hw_cipher_init(chn, key, twk_key, key_len, iv, iv_len,
1032 			      CRYPTO_AES, mode, enc);
1033 }
1034 
1035 static int hw_sm4_init(u32  chn, const u8 *key, const u8 *twk_key, u32 key_len,
1036 		       const u8 *iv, u32 iv_len, u32 mode, bool enc)
1037 {
1038 	u32 rk_mode = RK_GET_RK_MODE(mode);
1039 
1040 	if (rk_mode > RK_MODE_XTS)
1041 		return -EINVAL;
1042 
1043 	if (iv_len > SM4_BLOCK_SIZE || key_len != SM4_KEYSIZE)
1044 		return -EINVAL;
1045 
1046 	if (IS_NEED_IV(rk_mode)) {
1047 		if (!iv || iv_len != SM4_BLOCK_SIZE)
1048 			return -EINVAL;
1049 	} else {
1050 		iv_len = 0;
1051 	}
1052 
1053 	if (rk_mode == RK_MODE_XTS) {
1054 		if (!key || !twk_key)
1055 			return -EINVAL;
1056 	}
1057 
1058 	return hw_cipher_init(chn, key, twk_key, key_len, iv, iv_len,
1059 			      CRYPTO_SM4, mode, enc);
1060 }
1061 
1062 int rk_crypto_des(struct udevice *dev, u32 mode, const u8 *key, u32 key_len,
1063 		  const u8 *iv, const u8 *in, u8 *out, u32 len, bool enc)
1064 {
1065 	u32 rk_mode = RK_GET_RK_MODE(mode);
1066 	u8 tmp_key[24];
1067 	int ret;
1068 
1069 	if (!is_des_mode(rk_mode))
1070 		return -EINVAL;
1071 
1072 	if (key_len == DES_BLOCK_SIZE || key_len == 3 * DES_BLOCK_SIZE) {
1073 		memcpy(tmp_key, key, key_len);
1074 	} else if (key_len == 2 * DES_BLOCK_SIZE) {
1075 		memcpy(tmp_key, key, 16);
1076 		memcpy(tmp_key + 16, key, 8);
1077 		key_len = 3 * DES_BLOCK_SIZE;
1078 	} else {
1079 		return -EINVAL;
1080 	}
1081 
1082 	ret = hw_cipher_init(0, tmp_key, NULL, key_len, iv, DES_BLOCK_SIZE,
1083 			     CRYPTO_DES, mode, enc);
1084 	if (ret)
1085 		goto exit;
1086 
1087 	ret = hw_cipher_crypt(in, out, len, NULL, 0,
1088 			      NULL, 0, mode);
1089 
1090 exit:
1091 	return ret;
1092 }
1093 
1094 int rk_crypto_aes(struct udevice *dev, u32 mode,
1095 		  const u8 *key, const u8 *twk_key, u32 key_len,
1096 		  const u8 *iv, u32 iv_len,
1097 		  const u8 *in, u8 *out, u32 len, bool enc)
1098 {
1099 	int ret;
1100 
1101 	/* RV1126/RV1109 do not support aes-192 */
1102 #if defined(CONFIG_ROCKCHIP_RV1126)
1103 	if (key_len == AES_KEYSIZE_192)
1104 		return -EINVAL;
1105 #endif
1106 
1107 	ret = hw_aes_init(0, key, twk_key, key_len, iv, iv_len, mode, enc);
1108 	if (ret)
1109 		return ret;
1110 
1111 	return hw_cipher_crypt(in, out, len, NULL, 0,
1112 			       NULL, 0, mode);
1113 }
1114 
1115 int rk_crypto_sm4(struct udevice *dev, u32 mode,
1116 		  const u8 *key, const u8 *twk_key, u32 key_len,
1117 		  const u8 *iv, u32 iv_len,
1118 		  const u8 *in, u8 *out, u32 len, bool enc)
1119 {
1120 	int ret;
1121 
1122 	ret = hw_sm4_init(0, key, twk_key, key_len, iv, iv_len, mode, enc);
1123 	if (ret)
1124 		return ret;
1125 
1126 	return hw_cipher_crypt(in, out, len, NULL, 0, NULL, 0, mode);
1127 }
1128 
1129 int rockchip_crypto_cipher(struct udevice *dev, cipher_context *ctx,
1130 			   const u8 *in, u8 *out, u32 len, bool enc)
1131 {
1132 	switch (ctx->algo) {
1133 	case CRYPTO_DES:
1134 		return rk_crypto_des(dev, ctx->mode, ctx->key, ctx->key_len,
1135 				     ctx->iv, in, out, len, enc);
1136 	case CRYPTO_AES:
1137 		return rk_crypto_aes(dev, ctx->mode,
1138 				     ctx->key, ctx->twk_key, ctx->key_len,
1139 				     ctx->iv, ctx->iv_len, in, out, len, enc);
1140 	case CRYPTO_SM4:
1141 		return rk_crypto_sm4(dev, ctx->mode,
1142 				     ctx->key, ctx->twk_key, ctx->key_len,
1143 				     ctx->iv, ctx->iv_len, in, out, len, enc);
1144 	default:
1145 		return -EINVAL;
1146 	}
1147 }
1148 
1149 int rk_crypto_mac(struct udevice *dev, u32 algo, u32 mode,
1150 		  const u8 *key, u32 key_len,
1151 		  const u8 *in, u32 len, u8 *tag)
1152 {
1153 	u32 rk_mode = RK_GET_RK_MODE(mode);
1154 	int ret;
1155 
1156 	if (!IS_MAC_MODE(rk_mode))
1157 		return -EINVAL;
1158 
1159 	if (algo != CRYPTO_AES && algo != CRYPTO_SM4)
1160 		return -EINVAL;
1161 
1162 	/* RV1126/RV1109 do not support aes-192 */
1163 #if defined(CONFIG_ROCKCHIP_RV1126)
1164 	if (algo == CRYPTO_AES && key_len == AES_KEYSIZE_192)
1165 		return -EINVAL;
1166 #endif
1167 
1168 	ret = hw_cipher_init(g_key_chn, key, NULL, key_len, NULL, 0,
1169 			     algo, mode, true);
1170 	if (ret)
1171 		return ret;
1172 
1173 	return hw_cipher_crypt(in, NULL, len, NULL, 0,
1174 			       tag, AES_BLOCK_SIZE, mode);
1175 }
1176 
1177 int rockchip_crypto_mac(struct udevice *dev, cipher_context *ctx,
1178 			const u8 *in, u32 len, u8 *tag)
1179 {
1180 	return rk_crypto_mac(dev, ctx->algo, ctx->mode,
1181 			     ctx->key, ctx->key_len, in, len, tag);
1182 }
1183 
1184 int rk_crypto_ae(struct udevice *dev, u32 algo, u32 mode,
1185 		 const u8 *key, u32 key_len, const u8 *nonce, u32 nonce_len,
1186 		 const u8 *in, u32 len, const u8 *aad, u32 aad_len,
1187 		 u8 *out, u8 *tag)
1188 {
1189 	u32 rk_mode = RK_GET_RK_MODE(mode);
1190 	int ret;
1191 
1192 	if (!IS_AE_MODE(rk_mode))
1193 		return -EINVAL;
1194 
1195 	if (algo != CRYPTO_AES && algo != CRYPTO_SM4)
1196 		return -EINVAL;
1197 
1198 	/* RV1126/RV1109 do not support aes-192 */
1199 #if defined(CONFIG_ROCKCHIP_RV1126)
1200 	if (algo == CRYPTO_AES && key_len == AES_KEYSIZE_192)
1201 		return -EINVAL;
1202 #endif
1203 
1204 	ret = hw_cipher_init(g_key_chn, key, NULL, key_len, nonce, nonce_len,
1205 			     algo, mode, true);
1206 	if (ret)
1207 		return ret;
1208 
1209 	return hw_cipher_crypt(in, out, len, aad, aad_len,
1210 			       tag, AES_BLOCK_SIZE, mode);
1211 }
1212 
1213 int rockchip_crypto_ae(struct udevice *dev, cipher_context *ctx,
1214 		       const u8 *in, u32 len, const u8 *aad, u32 aad_len,
1215 		       u8 *out, u8 *tag)
1216 
1217 {
1218 	return rk_crypto_ae(dev, ctx->algo, ctx->mode, ctx->key, ctx->key_len,
1219 			    ctx->iv, ctx->iv_len, in, len,
1220 			    aad, aad_len, out, tag);
1221 }
1222 
1223 #endif
1224 
1225 #if CONFIG_IS_ENABLED(ROCKCHIP_RSA)
1226 static int rockchip_crypto_rsa_verify(struct udevice *dev, rsa_key *ctx,
1227 				      u8 *sign, u8 *output)
1228 {
1229 	struct mpa_num *mpa_m = NULL, *mpa_e = NULL, *mpa_n = NULL;
1230 	struct mpa_num *mpa_c = NULL, *mpa_result = NULL;
1231 	u32 n_bits, n_words;
1232 	u32 *rsa_result;
1233 	int ret;
1234 
1235 	if (!ctx)
1236 		return -EINVAL;
1237 
1238 	if (ctx->algo != CRYPTO_RSA512 &&
1239 	    ctx->algo != CRYPTO_RSA1024 &&
1240 	    ctx->algo != CRYPTO_RSA2048 &&
1241 	    ctx->algo != CRYPTO_RSA3072 &&
1242 	    ctx->algo != CRYPTO_RSA4096)
1243 		return -EINVAL;
1244 
1245 	n_bits = crypto_algo_nbits(ctx->algo);
1246 	n_words = BITS2WORD(n_bits);
1247 
1248 	rsa_result = malloc(BITS2BYTE(n_bits));
1249 	if (!rsa_result)
1250 		return -ENOMEM;
1251 
1252 	memset(rsa_result, 0x00, BITS2BYTE(n_bits));
1253 
1254 	ret = rk_mpa_alloc(&mpa_m);
1255 	ret |= rk_mpa_alloc(&mpa_e);
1256 	ret |= rk_mpa_alloc(&mpa_n);
1257 	ret |= rk_mpa_alloc(&mpa_c);
1258 	ret |= rk_mpa_alloc(&mpa_result);
1259 	if (ret)
1260 		goto exit;
1261 
1262 	mpa_m->d = (void *)sign;
1263 	mpa_e->d = (void *)ctx->e;
1264 	mpa_n->d = (void *)ctx->n;
1265 	mpa_c->d = (void *)ctx->c;
1266 	mpa_result->d = (void *)rsa_result;
1267 
1268 	mpa_m->size = n_words;
1269 	mpa_e->size = n_words;
1270 	mpa_n->size = n_words;
1271 	mpa_c->size = n_words;
1272 	mpa_result->size = n_words;
1273 
1274 	ret = rk_exptmod_np(mpa_m, mpa_e, mpa_n, mpa_c, mpa_result);
1275 	if (!ret)
1276 		memcpy(output, rsa_result, BITS2BYTE(n_bits));
1277 
1278 exit:
1279 	free(rsa_result);
1280 	rk_mpa_free(&mpa_m);
1281 	rk_mpa_free(&mpa_e);
1282 	rk_mpa_free(&mpa_n);
1283 	rk_mpa_free(&mpa_c);
1284 	rk_mpa_free(&mpa_result);
1285 
1286 	return ret;
1287 }
1288 #endif
1289 
1290 static const struct dm_crypto_ops rockchip_crypto_ops = {
1291 	.capability   = rockchip_crypto_capability,
1292 	.sha_init     = rockchip_crypto_sha_init,
1293 	.sha_update   = rockchip_crypto_sha_update,
1294 	.sha_final    = rockchip_crypto_sha_final,
1295 #if CONFIG_IS_ENABLED(ROCKCHIP_RSA)
1296 	.rsa_verify   = rockchip_crypto_rsa_verify,
1297 #endif
1298 #if CONFIG_IS_ENABLED(ROCKCHIP_HMAC)
1299 	.hmac_init    = rockchip_crypto_hmac_init,
1300 	.hmac_update  = rockchip_crypto_hmac_update,
1301 	.hmac_final   = rockchip_crypto_hmac_final,
1302 #endif
1303 #if CONFIG_IS_ENABLED(ROCKCHIP_CIPHER)
1304 	.cipher_crypt = rockchip_crypto_cipher,
1305 	.cipher_mac = rockchip_crypto_mac,
1306 	.cipher_ae  = rockchip_crypto_ae,
1307 #endif
1308 };
1309 
1310 /*
1311  * Only use "clocks" to parse crypto clock id and use rockchip_get_clk().
1312  * Because we always add crypto node in U-Boot dts, when kernel dtb enabled :
1313  *
1314  *   1. There is cru phandle mismatch between U-Boot and kernel dtb;
1315  *   2. CONFIG_OF_SPL_REMOVE_PROPS removes clock property;
1316  */
1317 static int rockchip_crypto_ofdata_to_platdata(struct udevice *dev)
1318 {
1319 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
1320 	int len, ret = -EINVAL;
1321 
1322 	memset(priv, 0x00, sizeof(*priv));
1323 
1324 	priv->reg = (fdt_addr_t)dev_read_addr_ptr(dev);
1325 	if (priv->reg == FDT_ADDR_T_NONE)
1326 		return -EINVAL;
1327 
1328 	crypto_base = priv->reg;
1329 
1330 	/* if there is no clocks in dts, just skip it */
1331 	if (!dev_read_prop(dev, "clocks", &len)) {
1332 		printf("Can't find \"clocks\" property\n");
1333 		return 0;
1334 	}
1335 
1336 	memset(priv, 0x00, sizeof(*priv));
1337 	priv->clocks = malloc(len);
1338 	if (!priv->clocks)
1339 		return -ENOMEM;
1340 
1341 	priv->nclocks = len / sizeof(u32);
1342 	if (dev_read_u32_array(dev, "clocks", (u32 *)priv->clocks,
1343 			       priv->nclocks)) {
1344 		printf("Can't read \"clocks\" property\n");
1345 		ret = -EINVAL;
1346 		goto exit;
1347 	}
1348 
1349 	if (!dev_read_prop(dev, "clock-frequency", &len)) {
1350 		printf("Can't find \"clock-frequency\" property\n");
1351 		ret = -EINVAL;
1352 		goto exit;
1353 	}
1354 
1355 	priv->frequencies = malloc(len);
1356 	if (!priv->frequencies) {
1357 		ret = -ENOMEM;
1358 		goto exit;
1359 	}
1360 
1361 	priv->nclocks = len / sizeof(u32);
1362 	if (dev_read_u32_array(dev, "clock-frequency", priv->frequencies,
1363 			       priv->nclocks)) {
1364 		printf("Can't read \"clock-frequency\" property\n");
1365 		ret = -EINVAL;
1366 		goto exit;
1367 	}
1368 
1369 	return 0;
1370 exit:
1371 	if (priv->clocks)
1372 		free(priv->clocks);
1373 
1374 	if (priv->frequencies)
1375 		free(priv->frequencies);
1376 
1377 	return ret;
1378 }
1379 
1380 static int rk_crypto_set_clk(struct udevice *dev)
1381 {
1382 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
1383 	struct clk clk;
1384 	int i, ret;
1385 
1386 	if (!priv->clocks && priv->nclocks == 0)
1387 		return 0;
1388 
1389 	for (i = 0; i < priv->nclocks; i++) {
1390 		ret = clk_get_by_index(dev, i, &clk);
1391 		if (ret < 0) {
1392 			printf("Failed to get clk index %d, ret=%d\n", i, ret);
1393 			return ret;
1394 		}
1395 		ret = clk_set_rate(&clk, priv->frequencies[i]);
1396 		if (ret < 0) {
1397 			printf("%s: Failed to set clk(%ld): ret=%d\n",
1398 			       __func__, clk.id, ret);
1399 			return ret;
1400 		}
1401 	}
1402 
1403 	return 0;
1404 }
1405 
1406 static int rockchip_crypto_probe(struct udevice *dev)
1407 {
1408 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
1409 	struct rk_crypto_soc_data *sdata;
1410 	int ret = 0;
1411 
1412 	sdata = (struct rk_crypto_soc_data *)dev_get_driver_data(dev);
1413 	priv->soc_data = sdata;
1414 
1415 	priv->hw_ctx = memalign(LLI_ADDR_ALIGN_SIZE,
1416 				sizeof(struct rk_hash_ctx));
1417 	if (!priv->hw_ctx)
1418 		return -ENOMEM;
1419 
1420 	ret = rk_crypto_set_clk(dev);
1421 	if (ret)
1422 		return ret;
1423 
1424 	hw_crypto_reset();
1425 
1426 	return 0;
1427 }
1428 
1429 static const struct rk_crypto_soc_data soc_data_base = {
1430 	.capability = CRYPTO_MD5 |
1431 		      CRYPTO_SHA1 |
1432 		      CRYPTO_SHA256 |
1433 		      CRYPTO_SHA512 |
1434 		      CRYPTO_HMAC_MD5 |
1435 		      CRYPTO_HMAC_SHA1 |
1436 		      CRYPTO_HMAC_SHA256 |
1437 		      CRYPTO_HMAC_SHA512 |
1438 		      CRYPTO_RSA512 |
1439 		      CRYPTO_RSA1024 |
1440 		      CRYPTO_RSA2048 |
1441 		      CRYPTO_RSA3072 |
1442 		      CRYPTO_RSA4096 |
1443 		      CRYPTO_DES |
1444 		      CRYPTO_AES,
1445 };
1446 
1447 static const struct rk_crypto_soc_data soc_data_base_sm = {
1448 	.capability = CRYPTO_MD5 |
1449 		      CRYPTO_SHA1 |
1450 		      CRYPTO_SHA256 |
1451 		      CRYPTO_SHA512 |
1452 		      CRYPTO_SM3 |
1453 		      CRYPTO_HMAC_MD5 |
1454 		      CRYPTO_HMAC_SHA1 |
1455 		      CRYPTO_HMAC_SHA256 |
1456 		      CRYPTO_HMAC_SHA512 |
1457 		      CRYPTO_HMAC_SM3 |
1458 		      CRYPTO_RSA512 |
1459 		      CRYPTO_RSA1024 |
1460 		      CRYPTO_RSA2048 |
1461 		      CRYPTO_RSA3072 |
1462 		      CRYPTO_RSA4096 |
1463 		      CRYPTO_DES |
1464 		      CRYPTO_AES |
1465 		      CRYPTO_SM4,
1466 };
1467 
1468 static const struct rk_crypto_soc_data soc_data_rk1808 = {
1469 	.capability = CRYPTO_MD5 |
1470 		      CRYPTO_SHA1 |
1471 		      CRYPTO_SHA256 |
1472 		      CRYPTO_HMAC_MD5 |
1473 		      CRYPTO_HMAC_SHA1 |
1474 		      CRYPTO_HMAC_SHA256 |
1475 		      CRYPTO_RSA512 |
1476 		      CRYPTO_RSA1024 |
1477 		      CRYPTO_RSA2048 |
1478 		      CRYPTO_RSA3072 |
1479 		      CRYPTO_RSA4096,
1480 };
1481 
1482 static const struct udevice_id rockchip_crypto_ids[] = {
1483 	{
1484 		.compatible = "rockchip,px30-crypto",
1485 		.data = (ulong)&soc_data_base
1486 	},
1487 	{
1488 		.compatible = "rockchip,rk1808-crypto",
1489 		.data = (ulong)&soc_data_rk1808
1490 	},
1491 	{
1492 		.compatible = "rockchip,rk3308-crypto",
1493 		.data = (ulong)&soc_data_base
1494 	},
1495 	{
1496 		.compatible = "rockchip,rv1126-crypto",
1497 		.data = (ulong)&soc_data_base_sm
1498 	},
1499 	{
1500 		.compatible = "rockchip,rk3568-crypto",
1501 		.data = (ulong)&soc_data_base_sm
1502 	},
1503 	{
1504 		.compatible = "rockchip,rk3588-crypto",
1505 		.data = (ulong)&soc_data_base_sm
1506 	},
1507 	{ }
1508 };
1509 
1510 U_BOOT_DRIVER(rockchip_crypto_v2) = {
1511 	.name		= "rockchip_crypto_v2",
1512 	.id		= UCLASS_CRYPTO,
1513 	.of_match	= rockchip_crypto_ids,
1514 	.ops		= &rockchip_crypto_ops,
1515 	.probe		= rockchip_crypto_probe,
1516 	.ofdata_to_platdata = rockchip_crypto_ofdata_to_platdata,
1517 	.priv_auto_alloc_size = sizeof(struct rockchip_crypto_priv),
1518 };
1519