xref: /rk3399_rockchip-uboot/drivers/crypto/rockchip/crypto_v2.c (revision 8c7c24c01a6f59f93e6564743e0e75c973d73387)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
4  */
5 
6 #include <common.h>
7 #include <clk.h>
8 #include <crypto.h>
9 #include <dm.h>
10 #include <asm/io.h>
11 #include <clk-uclass.h>
12 #include <asm/arch/hardware.h>
13 #include <asm/arch/clock.h>
14 #include <rockchip/crypto_hash_cache.h>
15 #include <rockchip/crypto_v2.h>
16 #include <rockchip/crypto_v2_pka.h>
17 
18 #define	RK_HASH_CTX_MAGIC		0x1A1A1A1A
19 
20 #define CRYPTO_MAJOR_VER(ver)		((ver) & 0x0f000000)
21 
22 #define CRYPTO_MAJOR_VER_3		0x03000000
23 #define CRYPTO_MAJOR_VER_4		0x04000000
24 
25 #ifdef DEBUG
26 #define IMSG(format, ...) printf("[%s, %05d]-trace: " format "\n", \
27 				 __func__, __LINE__, ##__VA_ARGS__)
28 #else
29 #define IMSG(format, ...)
30 #endif
31 
32 struct crypto_lli_desc {
33 	u32 src_addr;
34 	u32 src_len;
35 	u32 dst_addr;
36 	u32 dst_len;
37 	u32 user_define;
38 	u32 reserve;
39 	u32 dma_ctrl;
40 	u32 next_addr;
41 };
42 
43 struct rk_hash_ctx {
44 	struct crypto_lli_desc		data_lli;	/* lli desc */
45 	struct crypto_hash_cache	*hash_cache;
46 	u32				magic;		/* to check ctx */
47 	u32				algo;		/* hash algo */
48 	u8				digest_size;	/* hash out length */
49 	u8				reserved[3];
50 };
51 
52 struct rk_crypto_soc_data {
53 	u32 capability;
54 	u32 (*dynamic_cap)(void);
55 };
56 
57 struct rockchip_crypto_priv {
58 	fdt_addr_t			reg;
59 	u32				frequency;
60 	char				*clocks;
61 	u32				*frequencies;
62 	u32				nclocks;
63 	u32				freq_nclocks;
64 	u32				length;
65 	struct rk_hash_ctx		*hw_ctx;
66 	struct rk_crypto_soc_data	*soc_data;
67 };
68 
69 #define LLI_ADDR_ALIGN_SIZE	8
70 #define DATA_ADDR_ALIGN_SIZE	8
71 #define DATA_LEN_ALIGN_SIZE	64
72 
73 /* crypto timeout 500ms, must support more than 32M data per times*/
74 #define HASH_UPDATE_LIMIT	(32 * 1024 * 1024)
75 #define RK_CRYPTO_TIMEOUT	500000
76 
77 #define RK_POLL_TIMEOUT(condition, timeout) \
78 ({ \
79 	int time_out = timeout; \
80 	while (condition) { \
81 		if (--time_out <= 0) { \
82 			debug("[%s] %d: time out!\n", __func__,\
83 				__LINE__); \
84 			break; \
85 		} \
86 		udelay(1); \
87 	} \
88 	(time_out <= 0) ? -ETIMEDOUT : 0; \
89 })
90 
91 #define WAIT_TAG_VALID(channel, timeout) ({ \
92 	u32 tag_mask = CRYPTO_CH0_TAG_VALID << (channel);\
93 	int ret = 0;\
94 	if (is_check_tag_valid()) { \
95 		ret = RK_POLL_TIMEOUT(!(crypto_read(CRYPTO_TAG_VALID) & tag_mask),\
96 				      timeout);\
97 	} \
98 	crypto_write(crypto_read(CRYPTO_TAG_VALID) & tag_mask, CRYPTO_TAG_VALID);\
99 	ret;\
100 })
101 
102 #define virt_to_phys(addr)		(((unsigned long)addr) & 0xffffffff)
103 #define phys_to_virt(addr, area)	((unsigned long)addr)
104 
105 #define align_malloc(bytes, alignment)	memalign(alignment, bytes)
106 #define align_free(addr)		do {if (addr) free(addr);} while (0)
107 
108 #define ROUNDUP(size, alignment)	round_up(size, alignment)
109 #define cache_op_inner(type, addr, size) \
110 					crypto_flush_cacheline((ulong)addr, size)
111 
112 #define IS_NEED_IV(rk_mode) ((rk_mode) != RK_MODE_ECB && \
113 			     (rk_mode) != RK_MODE_CMAC && \
114 			     (rk_mode) != RK_MODE_CBC_MAC)
115 
116 #define IS_NEED_TAG(rk_mode) ((rk_mode) == RK_MODE_CMAC || \
117 			      (rk_mode) == RK_MODE_CBC_MAC || \
118 			      (rk_mode) == RK_MODE_CCM || \
119 			      (rk_mode) == RK_MODE_GCM)
120 
121 #define IS_MAC_MODE(rk_mode) ((rk_mode) == RK_MODE_CMAC || \
122 			      (rk_mode) == RK_MODE_CBC_MAC)
123 
124 #define IS_AE_MODE(rk_mode) ((rk_mode) == RK_MODE_CCM || \
125 			     (rk_mode) == RK_MODE_GCM)
126 
127 fdt_addr_t crypto_base;
128 static uint32_t g_crypto_version;
129 
130 static inline bool is_check_hash_valid(void)
131 {
132 	/* crypto < v4 need to check hash valid */
133 	return CRYPTO_MAJOR_VER(g_crypto_version) < CRYPTO_MAJOR_VER_4;
134 }
135 
136 static inline bool is_check_tag_valid(void)
137 {
138 	/* crypto < v4 need to check hash valid */
139 	return CRYPTO_MAJOR_VER(g_crypto_version) < CRYPTO_MAJOR_VER_4;
140 }
141 
142 static inline void word2byte_be(u32 word, u8 *ch)
143 {
144 	ch[0] = (word >> 24) & 0xff;
145 	ch[1] = (word >> 16) & 0xff;
146 	ch[2] = (word >> 8) & 0xff;
147 	ch[3] = (word >> 0) & 0xff;
148 }
149 
150 static inline u32 byte2word_be(const u8 *ch)
151 {
152 	return (*ch << 24) + (*(ch + 1) << 16) + (*(ch + 2) << 8) + *(ch + 3);
153 }
154 
155 static inline void clear_regs(u32 base, u32 words)
156 {
157 	int i;
158 
159 	/*clear out register*/
160 	for (i = 0; i < words; i++)
161 		crypto_write(0, base + 4 * i);
162 }
163 
164 static inline void clear_key_regs(void)
165 {
166 	clear_regs(CRYPTO_CH0_KEY_0, CRYPTO_KEY_CHANNEL_NUM * 4);
167 }
168 
169 static inline void read_regs(u32 base, u8 *data, u32 data_len)
170 {
171 	u8 tmp_buf[4];
172 	u32 i;
173 
174 	for (i = 0; i < data_len / 4; i++)
175 		word2byte_be(crypto_read(base + i * 4),
176 			     data + i * 4);
177 
178 	if (data_len % 4) {
179 		word2byte_be(crypto_read(base + i * 4), tmp_buf);
180 		memcpy(data + i * 4, tmp_buf, data_len % 4);
181 	}
182 }
183 
184 static inline void write_regs(u32 base, const u8 *data, u32 data_len)
185 {
186 	u8 tmp_buf[4];
187 	u32 i;
188 
189 	for (i = 0; i < data_len / 4; i++, base += 4)
190 		crypto_write(byte2word_be(data + i * 4), base);
191 
192 	if (data_len % 4) {
193 		memset(tmp_buf, 0x00, sizeof(tmp_buf));
194 		memcpy((u8 *)tmp_buf, data + i * 4, data_len % 4);
195 		crypto_write(byte2word_be(tmp_buf), base);
196 	}
197 }
198 
199 static inline void write_key_reg(u32 chn, const u8 *key, u32 key_len)
200 {
201 	write_regs(CRYPTO_CH0_KEY_0 + chn * 0x10, key, key_len);
202 }
203 
204 static inline void set_iv_reg(u32 chn, const u8 *iv, u32 iv_len)
205 {
206 	u32 base_iv;
207 
208 	base_iv = CRYPTO_CH0_IV_0 + chn * 0x10;
209 
210 	/* clear iv */
211 	clear_regs(base_iv, 4);
212 
213 	if (!iv || iv_len == 0)
214 		return;
215 
216 	write_regs(base_iv, iv, iv_len);
217 
218 	crypto_write(iv_len, CRYPTO_CH0_IV_LEN_0 + 4 * chn);
219 }
220 
221 static inline void get_iv_reg(u32 chn, u8 *iv, u32 iv_len)
222 {
223 	u32 base_iv;
224 
225 	base_iv = CRYPTO_CH0_IV_0 + chn * 0x10;
226 
227 	read_regs(base_iv, iv, iv_len);
228 }
229 
230 static inline void get_tag_from_reg(u32 chn, u8 *tag, u32 tag_len)
231 {
232 	u32 i;
233 	u32 chn_base = CRYPTO_CH0_TAG_0 + 0x10 * chn;
234 
235 	for (i = 0; i < tag_len / 4; i++, chn_base += 4)
236 		word2byte_be(crypto_read(chn_base), tag + 4 * i);
237 }
238 
239 static int rk_crypto_do_enable_clk(struct udevice *dev, int enable)
240 {
241 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
242 	struct clk clk;
243 	int i, ret;
244 
245 	for (i = 0; i < priv->nclocks; i++) {
246 		ret = clk_get_by_index(dev, i, &clk);
247 		if (ret < 0) {
248 			printf("Failed to get clk index %d, ret=%d\n", i, ret);
249 			return ret;
250 		}
251 
252 		if (enable)
253 			ret = clk_enable(&clk);
254 		else
255 			ret = clk_disable(&clk);
256 		if (ret < 0 && ret != -ENOSYS) {
257 			printf("Failed to enable(%d) clk(%ld): ret=%d\n",
258 			       enable, clk.id, ret);
259 			return ret;
260 		}
261 	}
262 
263 	return 0;
264 }
265 
266 static int rk_crypto_enable_clk(struct udevice *dev)
267 {
268 	return rk_crypto_do_enable_clk(dev, 1);
269 }
270 
271 static int rk_crypto_disable_clk(struct udevice *dev)
272 {
273 	return rk_crypto_do_enable_clk(dev, 0);
274 }
275 
276 static u32 crypto_v3_dynamic_cap(void)
277 {
278 	u32 capability = 0;
279 	u32 ver_reg, i;
280 	struct cap_map {
281 		u32 ver_offset;
282 		u32 mask;
283 		u32 cap_bit;
284 	};
285 	const struct cap_map cap_tbl[] = {
286 	{CRYPTO_HASH_VERSION, CRYPTO_HASH_MD5_FLAG,    CRYPTO_MD5},
287 	{CRYPTO_HASH_VERSION, CRYPTO_HASH_SHA1_FLAG,   CRYPTO_SHA1},
288 	{CRYPTO_HASH_VERSION, CRYPTO_HASH_SHA256_FLAG, CRYPTO_SHA256},
289 	{CRYPTO_HASH_VERSION, CRYPTO_HASH_SHA512_FLAG, CRYPTO_SHA512},
290 	{CRYPTO_HASH_VERSION, CRYPTO_HASH_SM3_FLAG,    CRYPTO_SM3},
291 
292 	{CRYPTO_HMAC_VERSION, CRYPTO_HMAC_MD5_FLAG,    CRYPTO_HMAC_MD5},
293 	{CRYPTO_HMAC_VERSION, CRYPTO_HMAC_SHA1_FLAG,   CRYPTO_HMAC_SHA1},
294 	{CRYPTO_HMAC_VERSION, CRYPTO_HMAC_SHA256_FLAG, CRYPTO_HMAC_SHA256},
295 	{CRYPTO_HMAC_VERSION, CRYPTO_HMAC_SHA512_FLAG, CRYPTO_HMAC_SHA512},
296 	{CRYPTO_HMAC_VERSION, CRYPTO_HMAC_SM3_FLAG,    CRYPTO_HMAC_SM3},
297 
298 	{CRYPTO_AES_VERSION,  CRYPTO_AES256_FLAG,      CRYPTO_AES},
299 	{CRYPTO_DES_VERSION,  CRYPTO_TDES_FLAG,        CRYPTO_DES},
300 	{CRYPTO_SM4_VERSION,  CRYPTO_ECB_FLAG,         CRYPTO_SM4},
301 	};
302 
303 	/* rsa */
304 	capability = CRYPTO_RSA512 |
305 		     CRYPTO_RSA1024 |
306 		     CRYPTO_RSA2048 |
307 		     CRYPTO_RSA3072 |
308 		     CRYPTO_RSA4096;
309 
310 	for (i = 0; i < ARRAY_SIZE(cap_tbl); i++) {
311 		ver_reg = crypto_read(cap_tbl[i].ver_offset);
312 
313 		if ((ver_reg & cap_tbl[i].mask) == cap_tbl[i].mask)
314 			capability |= cap_tbl[i].cap_bit;
315 	}
316 
317 	return capability;
318 }
319 
320 static int hw_crypto_reset(void)
321 {
322 	u32 val = 0, mask = 0;
323 	int ret;
324 
325 	val = CRYPTO_SW_PKA_RESET | CRYPTO_SW_CC_RESET;
326 	mask = val << CRYPTO_WRITE_MASK_SHIFT;
327 
328 	/* reset pka and crypto modules*/
329 	crypto_write(val | mask, CRYPTO_RST_CTL);
330 
331 	/* wait reset compelete */
332 	ret = RK_POLL_TIMEOUT(crypto_read(CRYPTO_RST_CTL), RK_CRYPTO_TIMEOUT);
333 
334 	g_crypto_version = crypto_read(CRYPTO_CRYPTO_VERSION_NEW);
335 
336 	return ret;
337 }
338 
339 static void hw_hash_clean_ctx(struct rk_hash_ctx *ctx)
340 {
341 	/* clear hash status */
342 	crypto_write(CRYPTO_WRITE_MASK_ALL | 0, CRYPTO_HASH_CTL);
343 
344 	assert(ctx);
345 	assert(ctx->magic == RK_HASH_CTX_MAGIC);
346 
347 	crypto_hash_cache_free(ctx->hash_cache);
348 
349 	memset(ctx, 0x00, sizeof(*ctx));
350 }
351 
352 static int rk_hash_init(void *hw_ctx, u32 algo)
353 {
354 	struct rk_hash_ctx *tmp_ctx = (struct rk_hash_ctx *)hw_ctx;
355 	u32 reg_ctrl = 0;
356 	int ret;
357 
358 	if (!tmp_ctx)
359 		return -EINVAL;
360 
361 	reg_ctrl = CRYPTO_SW_CC_RESET;
362 	crypto_write(reg_ctrl | (reg_ctrl << CRYPTO_WRITE_MASK_SHIFT),
363 		     CRYPTO_RST_CTL);
364 
365 	/* wait reset compelete */
366 	ret = RK_POLL_TIMEOUT(crypto_read(CRYPTO_RST_CTL),
367 			      RK_CRYPTO_TIMEOUT);
368 
369 	reg_ctrl = 0;
370 	tmp_ctx->algo = algo;
371 	switch (algo) {
372 	case CRYPTO_MD5:
373 	case CRYPTO_HMAC_MD5:
374 		reg_ctrl |= CRYPTO_MODE_MD5;
375 		tmp_ctx->digest_size = 16;
376 		break;
377 	case CRYPTO_SHA1:
378 	case CRYPTO_HMAC_SHA1:
379 		reg_ctrl |= CRYPTO_MODE_SHA1;
380 		tmp_ctx->digest_size = 20;
381 		break;
382 	case CRYPTO_SHA256:
383 	case CRYPTO_HMAC_SHA256:
384 		reg_ctrl |= CRYPTO_MODE_SHA256;
385 		tmp_ctx->digest_size = 32;
386 		break;
387 	case CRYPTO_SHA512:
388 	case CRYPTO_HMAC_SHA512:
389 		reg_ctrl |= CRYPTO_MODE_SHA512;
390 		tmp_ctx->digest_size = 64;
391 		break;
392 	case CRYPTO_SM3:
393 	case CRYPTO_HMAC_SM3:
394 		reg_ctrl |= CRYPTO_MODE_SM3;
395 		tmp_ctx->digest_size = 32;
396 		break;
397 	default:
398 		ret = -EINVAL;
399 		goto exit;
400 	}
401 
402 	/* enable hardware padding */
403 	reg_ctrl |= CRYPTO_HW_PAD_ENABLE;
404 	crypto_write(reg_ctrl | CRYPTO_WRITE_MASK_ALL, CRYPTO_HASH_CTL);
405 
406 	/* FIFO input and output data byte swap */
407 	/* such as B0, B1, B2, B3 -> B3, B2, B1, B0 */
408 	reg_ctrl = CRYPTO_DOUT_BYTESWAP | CRYPTO_DOIN_BYTESWAP;
409 	crypto_write(reg_ctrl | CRYPTO_WRITE_MASK_ALL, CRYPTO_FIFO_CTL);
410 
411 	/* enable src_item_done interrupt */
412 	crypto_write(0, CRYPTO_DMA_INT_EN);
413 
414 	tmp_ctx->magic = RK_HASH_CTX_MAGIC;
415 
416 	return 0;
417 exit:
418 	/* clear hash setting if init failed */
419 	crypto_write(CRYPTO_WRITE_MASK_ALL | 0, CRYPTO_HASH_CTL);
420 
421 	return ret;
422 }
423 
424 static int rk_hash_direct_calc(void *hw_data, const u8 *data,
425 			       u32 data_len, u8 *started_flag, u8 is_last)
426 {
427 	struct rockchip_crypto_priv *priv = hw_data;
428 	struct rk_hash_ctx *hash_ctx = priv->hw_ctx;
429 	struct crypto_lli_desc *lli = &hash_ctx->data_lli;
430 	int ret = -EINVAL;
431 	u32 tmp = 0, mask = 0;
432 
433 	assert(IS_ALIGNED((ulong)data, DATA_ADDR_ALIGN_SIZE));
434 	assert(is_last || IS_ALIGNED(data_len, DATA_LEN_ALIGN_SIZE));
435 
436 	debug("%s: data = %p, len = %u, s = %x, l = %x\n",
437 	      __func__, data, data_len, *started_flag, is_last);
438 
439 	memset(lli, 0x00, sizeof(*lli));
440 	lli->src_addr = (u32)virt_to_phys(data);
441 	lli->src_len = data_len;
442 	lli->dma_ctrl = LLI_DMA_CTRL_SRC_DONE;
443 
444 	if (is_last) {
445 		lli->user_define |= LLI_USER_STRING_LAST;
446 		lli->dma_ctrl |= LLI_DMA_CTRL_LAST;
447 	} else {
448 		lli->next_addr = (u32)virt_to_phys(lli);
449 		lli->dma_ctrl |= LLI_DMA_CTRL_PAUSE;
450 	}
451 
452 	if (!(*started_flag)) {
453 		lli->user_define |=
454 			(LLI_USER_STRING_START | LLI_USER_CIPHER_START);
455 		crypto_write((u32)virt_to_phys(lli), CRYPTO_DMA_LLI_ADDR);
456 		crypto_write((CRYPTO_HASH_ENABLE << CRYPTO_WRITE_MASK_SHIFT) |
457 			     CRYPTO_HASH_ENABLE, CRYPTO_HASH_CTL);
458 		tmp = CRYPTO_DMA_START;
459 		*started_flag = 1;
460 	} else {
461 		tmp = CRYPTO_DMA_RESTART;
462 	}
463 
464 	/* flush cache */
465 	crypto_flush_cacheline((ulong)lli, sizeof(*lli));
466 	crypto_flush_cacheline((ulong)data, data_len);
467 
468 	/* start calculate */
469 	crypto_write(tmp << CRYPTO_WRITE_MASK_SHIFT | tmp,
470 		     CRYPTO_DMA_CTL);
471 
472 	/* mask CRYPTO_SYNC_LOCKSTEP_INT_ST flag */
473 	mask = ~(mask | CRYPTO_SYNC_LOCKSTEP_INT_ST);
474 
475 	/* wait calc ok */
476 	ret = RK_POLL_TIMEOUT(!(crypto_read(CRYPTO_DMA_INT_ST) & mask),
477 			      RK_CRYPTO_TIMEOUT);
478 
479 	/* clear interrupt status */
480 	tmp = crypto_read(CRYPTO_DMA_INT_ST);
481 	crypto_write(tmp, CRYPTO_DMA_INT_ST);
482 
483 	if ((tmp & mask) != CRYPTO_SRC_ITEM_DONE_INT_ST &&
484 	    (tmp & mask) != CRYPTO_ZERO_LEN_INT_ST) {
485 		ret = -EFAULT;
486 		debug("[%s] %d: CRYPTO_DMA_INT_ST = 0x%x\n",
487 		      __func__, __LINE__, tmp);
488 		goto exit;
489 	}
490 
491 	priv->length += data_len;
492 exit:
493 	return ret;
494 }
495 
496 int rk_hash_update(void *ctx, const u8 *data, u32 data_len)
497 {
498 	struct rk_hash_ctx *tmp_ctx = (struct rk_hash_ctx *)ctx;
499 	int ret = -EINVAL;
500 
501 	debug("\n");
502 	if (!tmp_ctx || !data)
503 		goto exit;
504 
505 	if (tmp_ctx->digest_size == 0 || tmp_ctx->magic != RK_HASH_CTX_MAGIC)
506 		goto exit;
507 
508 	ret = crypto_hash_update_with_cache(tmp_ctx->hash_cache,
509 					    data, data_len);
510 
511 exit:
512 	/* free lli list */
513 	if (ret)
514 		hw_hash_clean_ctx(tmp_ctx);
515 
516 	return ret;
517 }
518 
519 int rk_hash_final(void *ctx, u8 *digest, size_t len)
520 {
521 	struct rk_hash_ctx *tmp_ctx = (struct rk_hash_ctx *)ctx;
522 	int ret = 0;
523 
524 	if (!digest)
525 		goto exit;
526 
527 	if (!tmp_ctx ||
528 	    tmp_ctx->digest_size == 0 ||
529 	    len > tmp_ctx->digest_size ||
530 	    tmp_ctx->magic != RK_HASH_CTX_MAGIC) {
531 		goto exit;
532 	}
533 
534 	if(is_check_hash_valid()) {
535 		/* wait hash value ok */
536 		ret = RK_POLL_TIMEOUT(!crypto_read(CRYPTO_HASH_VALID),
537 				      RK_CRYPTO_TIMEOUT);
538 	}
539 
540 	read_regs(CRYPTO_HASH_DOUT_0, digest, len);
541 
542 	/* clear hash status */
543 	crypto_write(CRYPTO_HASH_IS_VALID, CRYPTO_HASH_VALID);
544 	crypto_write(CRYPTO_WRITE_MASK_ALL | 0, CRYPTO_HASH_CTL);
545 
546 exit:
547 
548 	return ret;
549 }
550 
551 static u32 rockchip_crypto_capability(struct udevice *dev)
552 {
553 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
554 	u32 capability, mask = 0;
555 
556 	capability = priv->soc_data->capability;
557 
558 #if !(CONFIG_IS_ENABLED(ROCKCHIP_CIPHER))
559 	mask |= (CRYPTO_DES | CRYPTO_AES | CRYPTO_SM4);
560 #endif
561 
562 #if !(CONFIG_IS_ENABLED(ROCKCHIP_HMAC))
563 	mask |= (CRYPTO_HMAC_MD5 | CRYPTO_HMAC_SHA1 | CRYPTO_HMAC_SHA256 |
564 			 CRYPTO_HMAC_SHA512 | CRYPTO_HMAC_SM3);
565 #endif
566 
567 #if !(CONFIG_IS_ENABLED(ROCKCHIP_RSA))
568 	mask |= (CRYPTO_RSA512 | CRYPTO_RSA1024 | CRYPTO_RSA2048 |
569 			 CRYPTO_RSA3072 | CRYPTO_RSA4096);
570 #endif
571 
572 	return capability & (~mask);
573 }
574 
575 static int rockchip_crypto_sha_init(struct udevice *dev, sha_context *ctx)
576 {
577 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
578 	struct rk_hash_ctx *hash_ctx = priv->hw_ctx;
579 	int ret = 0;
580 
581 	if (!ctx)
582 		return -EINVAL;
583 
584 	memset(hash_ctx, 0x00, sizeof(*hash_ctx));
585 
586 	priv->length = 0;
587 
588 	hash_ctx->hash_cache = crypto_hash_cache_alloc(rk_hash_direct_calc,
589 						       priv, ctx->length,
590 						       DATA_ADDR_ALIGN_SIZE,
591 						       DATA_LEN_ALIGN_SIZE);
592 	if (!hash_ctx->hash_cache)
593 		return -EFAULT;
594 
595 	rk_crypto_enable_clk(dev);
596 	ret = rk_hash_init(hash_ctx, ctx->algo);
597 	if (ret)
598 		rk_crypto_disable_clk(dev);
599 
600 	return ret;
601 }
602 
603 static int rockchip_crypto_sha_update(struct udevice *dev,
604 				      u32 *input, u32 len)
605 {
606 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
607 	int ret, i;
608 	u8 *p;
609 
610 	if (!len) {
611 		ret = -EINVAL;
612 		goto exit;
613 	}
614 
615 	p = (u8 *)input;
616 
617 	for (i = 0; i < len / HASH_UPDATE_LIMIT; i++, p += HASH_UPDATE_LIMIT) {
618 		ret = rk_hash_update(priv->hw_ctx, p, HASH_UPDATE_LIMIT);
619 		if (ret)
620 			goto exit;
621 	}
622 
623 	if (len % HASH_UPDATE_LIMIT)
624 		ret = rk_hash_update(priv->hw_ctx, p, len % HASH_UPDATE_LIMIT);
625 
626 exit:
627 	if (ret)
628 		rk_crypto_disable_clk(dev);
629 
630 	return ret;
631 }
632 
633 static int rockchip_crypto_sha_final(struct udevice *dev,
634 				     sha_context *ctx, u8 *output)
635 {
636 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
637 	u32 nbits;
638 	int ret;
639 
640 	nbits = crypto_algo_nbits(ctx->algo);
641 
642 	if (priv->length != ctx->length) {
643 		printf("total length(0x%08x) != init length(0x%08x)!\n",
644 		       priv->length, ctx->length);
645 		ret = -EIO;
646 		goto exit;
647 	}
648 
649 	ret = rk_hash_final(priv->hw_ctx, (u8 *)output, BITS2BYTE(nbits));
650 
651 exit:
652 	hw_hash_clean_ctx(priv->hw_ctx);
653 	rk_crypto_disable_clk(dev);
654 
655 	return ret;
656 }
657 
658 #if CONFIG_IS_ENABLED(ROCKCHIP_HMAC)
659 int rk_hmac_init(void *hw_ctx, u32 algo, u8 *key, u32 key_len)
660 {
661 	u32 reg_ctrl = 0;
662 	int ret;
663 
664 	if (!key || !key_len || key_len > 64)
665 		return -EINVAL;
666 
667 	clear_key_regs();
668 
669 	write_key_reg(0, key, key_len);
670 
671 	ret = rk_hash_init(hw_ctx, algo);
672 	if (ret)
673 		return ret;
674 
675 	reg_ctrl = crypto_read(CRYPTO_HASH_CTL) | CRYPTO_HMAC_ENABLE;
676 	crypto_write(reg_ctrl | CRYPTO_WRITE_MASK_ALL, CRYPTO_HASH_CTL);
677 
678 	return ret;
679 }
680 
681 static int rockchip_crypto_hmac_init(struct udevice *dev,
682 				     sha_context *ctx, u8 *key, u32 key_len)
683 {
684 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
685 	struct rk_hash_ctx *hash_ctx = priv->hw_ctx;
686 	int ret = 0;
687 
688 	if (!ctx)
689 		return -EINVAL;
690 
691 	memset(hash_ctx, 0x00, sizeof(*hash_ctx));
692 
693 	priv->length = 0;
694 
695 	hash_ctx->hash_cache = crypto_hash_cache_alloc(rk_hash_direct_calc,
696 						       priv, ctx->length,
697 						       DATA_ADDR_ALIGN_SIZE,
698 						       DATA_LEN_ALIGN_SIZE);
699 	if (!hash_ctx->hash_cache)
700 		return -EFAULT;
701 
702 	rk_crypto_enable_clk(dev);
703 	ret = rk_hmac_init(priv->hw_ctx, ctx->algo, key, key_len);
704 	if (ret)
705 		rk_crypto_disable_clk(dev);
706 
707 	return ret;
708 }
709 
710 static int rockchip_crypto_hmac_update(struct udevice *dev,
711 				       u32 *input, u32 len)
712 {
713 	return rockchip_crypto_sha_update(dev, input, len);
714 }
715 
716 static int rockchip_crypto_hmac_final(struct udevice *dev,
717 				      sha_context *ctx, u8 *output)
718 {
719 	return rockchip_crypto_sha_final(dev, ctx, output);
720 }
721 
722 #endif
723 
724 #if CONFIG_IS_ENABLED(ROCKCHIP_CIPHER)
725 static u8 g_key_chn;
726 
727 static const u32 rk_mode2bc_mode[RK_MODE_MAX] = {
728 	[RK_MODE_ECB] = CRYPTO_BC_ECB,
729 	[RK_MODE_CBC] = CRYPTO_BC_CBC,
730 	[RK_MODE_CTS] = CRYPTO_BC_CTS,
731 	[RK_MODE_CTR] = CRYPTO_BC_CTR,
732 	[RK_MODE_CFB] = CRYPTO_BC_CFB,
733 	[RK_MODE_OFB] = CRYPTO_BC_OFB,
734 	[RK_MODE_XTS] = CRYPTO_BC_XTS,
735 	[RK_MODE_CCM] = CRYPTO_BC_CCM,
736 	[RK_MODE_GCM] = CRYPTO_BC_GCM,
737 	[RK_MODE_CMAC] = CRYPTO_BC_CMAC,
738 	[RK_MODE_CBC_MAC] = CRYPTO_BC_CBC_MAC,
739 };
740 
741 static inline void set_pc_len_reg(u32 chn, u64 pc_len)
742 {
743 	u32 chn_base = CRYPTO_CH0_PC_LEN_0 + chn * 0x08;
744 
745 	crypto_write(pc_len & 0xffffffff, chn_base);
746 	crypto_write(pc_len >> 32, chn_base + 4);
747 }
748 
749 static inline void set_aad_len_reg(u32 chn, u64 pc_len)
750 {
751 	u32 chn_base = CRYPTO_CH0_AAD_LEN_0 + chn * 0x08;
752 
753 	crypto_write(pc_len & 0xffffffff, chn_base);
754 	crypto_write(pc_len >> 32, chn_base + 4);
755 }
756 
757 static inline bool is_des_mode(u32 rk_mode)
758 {
759 	return (rk_mode == RK_MODE_ECB ||
760 		rk_mode == RK_MODE_CBC ||
761 		rk_mode == RK_MODE_CFB ||
762 		rk_mode == RK_MODE_OFB);
763 }
764 
765 static void dump_crypto_state(struct crypto_lli_desc *desc,
766 			      u32 tmp, u32 expt_int,
767 			      const u8 *in, const u8 *out,
768 			      u32 len, int ret)
769 {
770 	IMSG("%s\n", ret == -ETIME ? "timeout" : "dismatch");
771 
772 	IMSG("CRYPTO_DMA_INT_ST = %08x, expect_int = %08x\n",
773 	     tmp, expt_int);
774 	IMSG("data desc		= %p\n", desc);
775 	IMSG("\taddr_in		= [%08x <=> %08x]\n",
776 	     desc->src_addr, (u32)virt_to_phys(in));
777 	IMSG("\taddr_out	= [%08x <=> %08x]\n",
778 	     desc->dst_addr, (u32)virt_to_phys(out));
779 	IMSG("\tsrc_len		= [%08x <=> %08x]\n",
780 	     desc->src_len, (u32)len);
781 	IMSG("\tdst_len		= %08x\n", desc->dst_len);
782 	IMSG("\tdma_ctl		= %08x\n", desc->dma_ctrl);
783 	IMSG("\tuser_define	= %08x\n", desc->user_define);
784 
785 	IMSG("\n\nDMA CRYPTO_DMA_LLI_ADDR status = %08x\n",
786 	     crypto_read(CRYPTO_DMA_LLI_ADDR));
787 	IMSG("DMA CRYPTO_DMA_ST status = %08x\n",
788 	     crypto_read(CRYPTO_DMA_ST));
789 	IMSG("DMA CRYPTO_DMA_STATE status = %08x\n",
790 	     crypto_read(CRYPTO_DMA_STATE));
791 	IMSG("DMA CRYPTO_DMA_LLI_RADDR status = %08x\n",
792 	     crypto_read(CRYPTO_DMA_LLI_RADDR));
793 	IMSG("DMA CRYPTO_DMA_SRC_RADDR status = %08x\n",
794 	     crypto_read(CRYPTO_DMA_SRC_RADDR));
795 	IMSG("DMA CRYPTO_DMA_DST_RADDR status = %08x\n",
796 	     crypto_read(CRYPTO_DMA_DST_RADDR));
797 	IMSG("DMA CRYPTO_CIPHER_ST status = %08x\n",
798 	     crypto_read(CRYPTO_CIPHER_ST));
799 	IMSG("DMA CRYPTO_CIPHER_STATE status = %08x\n",
800 	     crypto_read(CRYPTO_CIPHER_STATE));
801 	IMSG("DMA CRYPTO_TAG_VALID status = %08x\n",
802 	     crypto_read(CRYPTO_TAG_VALID));
803 	IMSG("LOCKSTEP status = %08x\n\n",
804 	     crypto_read(0x618));
805 
806 	IMSG("dst %dbyte not transferred\n",
807 	     desc->dst_addr + desc->dst_len -
808 	     crypto_read(CRYPTO_DMA_DST_RADDR));
809 }
810 
811 static int ccm128_set_iv_reg(u32 chn, const u8 *nonce, u32 nlen)
812 {
813 	u8 iv_buf[AES_BLOCK_SIZE];
814 	u32 L;
815 
816 	memset(iv_buf, 0x00, sizeof(iv_buf));
817 
818 	L = 15 - nlen;
819 	iv_buf[0] = ((u8)(L - 1) & 7);
820 
821 	/* the L parameter */
822 	L = iv_buf[0] & 7;
823 
824 	/* nonce is too short */
825 	if (nlen < (14 - L))
826 		return -EINVAL;
827 
828 	/* clear aad flag */
829 	iv_buf[0] &= ~0x40;
830 	memcpy(&iv_buf[1], nonce, 14 - L);
831 
832 	set_iv_reg(chn, iv_buf, AES_BLOCK_SIZE);
833 
834 	return 0;
835 }
836 
837 static void ccm_aad_padding(u32 aad_len, u8 *padding, u32 *padding_size)
838 {
839 	u32 i;
840 
841 	if (aad_len == 0) {
842 		*padding_size = 0;
843 		return;
844 	}
845 
846 	i = aad_len < (0x10000 - 0x100) ? 2 : 6;
847 
848 	if (i == 2) {
849 		padding[0] = (u8)(aad_len >> 8);
850 		padding[1] = (u8)aad_len;
851 	} else {
852 		padding[0] = 0xFF;
853 		padding[1] = 0xFE;
854 		padding[2] = (u8)(aad_len >> 24);
855 		padding[3] = (u8)(aad_len >> 16);
856 		padding[4] = (u8)(aad_len >> 8);
857 	}
858 
859 	*padding_size = i;
860 }
861 
862 static int ccm_compose_aad_iv(u8 *aad_iv, u32 data_len, u32 aad_len, u32 tag_size)
863 {
864 	aad_iv[0] |= ((u8)(((tag_size - 2) / 2) & 7) << 3);
865 
866 	aad_iv[12] = (u8)(data_len >> 24);
867 	aad_iv[13] = (u8)(data_len >> 16);
868 	aad_iv[14] = (u8)(data_len >> 8);
869 	aad_iv[15] = (u8)data_len;
870 
871 	if (aad_len)
872 		aad_iv[0] |= 0x40;	//set aad flag
873 
874 	return 0;
875 }
876 
877 static int hw_cipher_init(u32 chn, const u8 *key, const u8 *twk_key,
878 			  u32 key_len, const u8 *iv, u32 iv_len,
879 			  u32 algo, u32 mode, bool enc)
880 {
881 	u32 rk_mode = RK_GET_RK_MODE(mode);
882 	u32 key_chn_sel = chn;
883 	u32 reg_ctrl = 0;
884 
885 	IMSG("%s: key addr is %p, key_len is %d, iv addr is %p",
886 	     __func__, key, key_len, iv);
887 	if (rk_mode >= RK_MODE_MAX)
888 		return -EINVAL;
889 
890 	switch (algo) {
891 	case CRYPTO_DES:
892 		if (key_len > DES_BLOCK_SIZE)
893 			reg_ctrl |= CRYPTO_BC_TDES;
894 		else
895 			reg_ctrl |= CRYPTO_BC_DES;
896 		break;
897 	case CRYPTO_AES:
898 		reg_ctrl |= CRYPTO_BC_AES;
899 		break;
900 	case CRYPTO_SM4:
901 		reg_ctrl |= CRYPTO_BC_SM4;
902 		break;
903 	default:
904 		return -EINVAL;
905 	}
906 
907 	if (algo == CRYPTO_AES || algo == CRYPTO_SM4) {
908 		switch (key_len) {
909 		case AES_KEYSIZE_128:
910 			reg_ctrl |= CRYPTO_BC_128_bit_key;
911 			break;
912 		case AES_KEYSIZE_192:
913 			reg_ctrl |= CRYPTO_BC_192_bit_key;
914 			break;
915 		case AES_KEYSIZE_256:
916 			reg_ctrl |= CRYPTO_BC_256_bit_key;
917 			break;
918 		default:
919 			return -EINVAL;
920 		}
921 	}
922 
923 	reg_ctrl |= rk_mode2bc_mode[rk_mode];
924 	if (!enc)
925 		reg_ctrl |= CRYPTO_BC_DECRYPT;
926 
927 	/* write key data to reg */
928 	write_key_reg(key_chn_sel, key, key_len);
929 
930 	/* write twk key for xts mode */
931 	if (rk_mode == RK_MODE_XTS)
932 		write_key_reg(key_chn_sel + 4, twk_key, key_len);
933 
934 	/* set iv reg */
935 	if (rk_mode == RK_MODE_CCM)
936 		ccm128_set_iv_reg(chn, iv, iv_len);
937 	else
938 		set_iv_reg(chn, iv, iv_len);
939 
940 	/* din_swap set 1, dout_swap set 1, default 1. */
941 	crypto_write(0x00030003, CRYPTO_FIFO_CTL);
942 	crypto_write(0, CRYPTO_DMA_INT_EN);
943 
944 	crypto_write(reg_ctrl | CRYPTO_WRITE_MASK_ALL, CRYPTO_BC_CTL);
945 
946 	return 0;
947 }
948 
949 static int hw_cipher_crypt(const u8 *in, u8 *out, u64 len,
950 			   const u8 *aad, u32 aad_len,
951 			   u8 *tag, u32 tag_len, u32 mode)
952 {
953 	struct crypto_lli_desc *data_desc = NULL, *aad_desc = NULL;
954 	u8 *dma_in = NULL, *dma_out = NULL, *aad_tmp = NULL;
955 	u32 rk_mode = RK_GET_RK_MODE(mode);
956 	u32 reg_ctrl = 0, tmp_len = 0;
957 	u32 expt_int = 0, mask = 0;
958 	u32 key_chn = g_key_chn;
959 	u32 tmp, dst_len = 0;
960 	int ret = -1;
961 
962 	if (rk_mode == RK_MODE_CTS && len <= AES_BLOCK_SIZE) {
963 		printf("CTS mode length %u < 16Byte\n", (u32)len);
964 		return -EINVAL;
965 	}
966 
967 	tmp_len = (rk_mode == RK_MODE_CTR) ? ROUNDUP(len, AES_BLOCK_SIZE) : len;
968 
969 	data_desc = align_malloc(sizeof(*data_desc), LLI_ADDR_ALIGN_SIZE);
970 	if (!data_desc)
971 		goto exit;
972 
973 	if (IS_ALIGNED((ulong)in, DATA_ADDR_ALIGN_SIZE) && tmp_len == len)
974 		dma_in = (void *)in;
975 	else
976 		dma_in = align_malloc(tmp_len, DATA_ADDR_ALIGN_SIZE);
977 	if (!dma_in)
978 		goto exit;
979 
980 	if (out) {
981 		if (IS_ALIGNED((ulong)out, DATA_ADDR_ALIGN_SIZE) &&
982 		    tmp_len == len)
983 			dma_out = out;
984 		else
985 			dma_out = align_malloc(tmp_len, DATA_ADDR_ALIGN_SIZE);
986 		if (!dma_out)
987 			goto exit;
988 		dst_len = tmp_len;
989 	}
990 
991 	memset(data_desc, 0x00, sizeof(*data_desc));
992 	if (dma_in != in)
993 		memcpy(dma_in, in, len);
994 
995 	data_desc->src_addr    = (u32)virt_to_phys(dma_in);
996 	data_desc->src_len     = tmp_len;
997 	data_desc->dst_addr    = (u32)virt_to_phys(dma_out);
998 	data_desc->dst_len     = dst_len;
999 	data_desc->dma_ctrl    = LLI_DMA_CTRL_LAST;
1000 
1001 	if (IS_MAC_MODE(rk_mode)) {
1002 		expt_int = CRYPTO_LIST_DONE_INT_ST;
1003 		data_desc->dma_ctrl |= LLI_DMA_CTRL_LIST_DONE;
1004 	} else {
1005 		expt_int = CRYPTO_DST_ITEM_DONE_INT_ST;
1006 		data_desc->dma_ctrl |= LLI_DMA_CTRL_DST_DONE;
1007 	}
1008 
1009 	data_desc->user_define = LLI_USER_CIPHER_START |
1010 				 LLI_USER_STRING_START |
1011 				 LLI_USER_STRING_LAST |
1012 				 (key_chn << 4);
1013 	crypto_write((u32)virt_to_phys(data_desc), CRYPTO_DMA_LLI_ADDR);
1014 
1015 	if (rk_mode == RK_MODE_CCM || rk_mode == RK_MODE_GCM) {
1016 		u32 aad_tmp_len = 0;
1017 
1018 		aad_desc = align_malloc(sizeof(*aad_desc), LLI_ADDR_ALIGN_SIZE);
1019 		if (!aad_desc)
1020 			goto exit;
1021 
1022 		memset(aad_desc, 0x00, sizeof(*aad_desc));
1023 		aad_desc->next_addr = (u32)virt_to_phys(data_desc);
1024 		aad_desc->user_define = LLI_USER_CIPHER_START |
1025 					 LLI_USER_STRING_START |
1026 					 LLI_USER_STRING_LAST |
1027 					 LLI_USER_STRING_AAD |
1028 					 (key_chn << 4);
1029 
1030 		if (rk_mode == RK_MODE_CCM) {
1031 			u8 padding[AES_BLOCK_SIZE];
1032 			u32 padding_size = 0;
1033 
1034 			memset(padding, 0x00, sizeof(padding));
1035 			ccm_aad_padding(aad_len, padding, &padding_size);
1036 
1037 			aad_tmp_len = aad_len + AES_BLOCK_SIZE + padding_size;
1038 			aad_tmp_len = ROUNDUP(aad_tmp_len, AES_BLOCK_SIZE);
1039 			aad_tmp = align_malloc(aad_tmp_len,
1040 					       DATA_ADDR_ALIGN_SIZE);
1041 			if (!aad_tmp)
1042 				goto exit;
1043 
1044 			/* clear last block */
1045 			memset(aad_tmp + aad_tmp_len - AES_BLOCK_SIZE,
1046 			       0x00, AES_BLOCK_SIZE);
1047 
1048 			/* read iv data from reg */
1049 			get_iv_reg(key_chn, aad_tmp, AES_BLOCK_SIZE);
1050 			ccm_compose_aad_iv(aad_tmp, tmp_len, aad_len, tag_len);
1051 			memcpy(aad_tmp + AES_BLOCK_SIZE, padding, padding_size);
1052 
1053 			memcpy(aad_tmp + AES_BLOCK_SIZE + padding_size,
1054 			       aad, aad_len);
1055 		} else {
1056 			aad_tmp_len = aad_len;
1057 			if (IS_ALIGNED((ulong)aad, DATA_ADDR_ALIGN_SIZE)) {
1058 				aad_tmp = (void *)aad;
1059 			} else {
1060 				aad_tmp = align_malloc(aad_tmp_len,
1061 						       DATA_ADDR_ALIGN_SIZE);
1062 				if (!aad_tmp)
1063 					goto exit;
1064 
1065 				memcpy(aad_tmp, aad, aad_tmp_len);
1066 			}
1067 
1068 			set_aad_len_reg(key_chn, aad_tmp_len);
1069 			set_pc_len_reg(key_chn, tmp_len);
1070 		}
1071 
1072 		aad_desc->src_addr = (u32)virt_to_phys(aad_tmp);
1073 		aad_desc->src_len  = aad_tmp_len;
1074 
1075 		if (aad_tmp_len) {
1076 			data_desc->user_define = LLI_USER_STRING_START |
1077 						 LLI_USER_STRING_LAST |
1078 						 (key_chn << 4);
1079 			crypto_write((u32)virt_to_phys(aad_desc), CRYPTO_DMA_LLI_ADDR);
1080 			cache_op_inner(DCACHE_AREA_CLEAN, aad_tmp, aad_tmp_len);
1081 			cache_op_inner(DCACHE_AREA_CLEAN, aad_desc, sizeof(*aad_desc));
1082 		}
1083 	}
1084 
1085 	cache_op_inner(DCACHE_AREA_CLEAN, data_desc, sizeof(*data_desc));
1086 	cache_op_inner(DCACHE_AREA_CLEAN, dma_in, tmp_len);
1087 	cache_op_inner(DCACHE_AREA_INVALIDATE, dma_out, tmp_len);
1088 
1089 	/* din_swap set 1, dout_swap set 1, default 1. */
1090 	crypto_write(0x00030003, CRYPTO_FIFO_CTL);
1091 	crypto_write(0, CRYPTO_DMA_INT_EN);
1092 
1093 	reg_ctrl = crypto_read(CRYPTO_BC_CTL) | CRYPTO_BC_ENABLE;
1094 	crypto_write(reg_ctrl | CRYPTO_WRITE_MASK_ALL, CRYPTO_BC_CTL);
1095 	crypto_write(0x00010001, CRYPTO_DMA_CTL);//start
1096 
1097 	mask = ~(mask | CRYPTO_SYNC_LOCKSTEP_INT_ST);
1098 
1099 	/* wait calc ok */
1100 	ret = RK_POLL_TIMEOUT(!(crypto_read(CRYPTO_DMA_INT_ST) & mask),
1101 			      RK_CRYPTO_TIMEOUT);
1102 	tmp = crypto_read(CRYPTO_DMA_INT_ST);
1103 	crypto_write(tmp, CRYPTO_DMA_INT_ST);
1104 
1105 	if ((tmp & mask) == expt_int) {
1106 		if (out && out != dma_out)
1107 			memcpy(out, dma_out, len);
1108 
1109 		if (IS_NEED_TAG(rk_mode)) {
1110 			ret = WAIT_TAG_VALID(key_chn, RK_CRYPTO_TIMEOUT);
1111 			get_tag_from_reg(key_chn, tag, AES_BLOCK_SIZE);
1112 		}
1113 	} else {
1114 		dump_crypto_state(data_desc, tmp, expt_int, in, out, len, ret);
1115 		ret = -1;
1116 	}
1117 
1118 exit:
1119 	crypto_write(0xffff0000, CRYPTO_BC_CTL);//bc_ctl disable
1120 	align_free(data_desc);
1121 	align_free(aad_desc);
1122 	if (dma_in != in)
1123 		align_free(dma_in);
1124 	if (out && dma_out != out)
1125 		align_free(dma_out);
1126 	if (aad && aad != aad_tmp)
1127 		align_free(aad_tmp);
1128 
1129 	return ret;
1130 }
1131 
1132 static int hw_aes_init(u32 chn, const u8 *key, const u8 *twk_key, u32 key_len,
1133 		       const u8 *iv, u32 iv_len, u32 mode, bool enc)
1134 {
1135 	u32 rk_mode = RK_GET_RK_MODE(mode);
1136 
1137 	if (rk_mode > RK_MODE_XTS)
1138 		return -EINVAL;
1139 
1140 	if (iv_len > AES_BLOCK_SIZE)
1141 		return -EINVAL;
1142 
1143 	if (IS_NEED_IV(rk_mode)) {
1144 		if (!iv || iv_len != AES_BLOCK_SIZE)
1145 			return -EINVAL;
1146 	} else {
1147 		iv_len = 0;
1148 	}
1149 
1150 	if (rk_mode == RK_MODE_XTS) {
1151 		if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_256)
1152 			return -EINVAL;
1153 
1154 		if (!key || !twk_key)
1155 			return -EINVAL;
1156 	} else {
1157 		if (key_len != AES_KEYSIZE_128 &&
1158 		    key_len != AES_KEYSIZE_192 &&
1159 		    key_len != AES_KEYSIZE_256)
1160 			return -EINVAL;
1161 	}
1162 
1163 	return hw_cipher_init(chn, key, twk_key, key_len, iv, iv_len,
1164 			      CRYPTO_AES, mode, enc);
1165 }
1166 
1167 static int hw_sm4_init(u32  chn, const u8 *key, const u8 *twk_key, u32 key_len,
1168 		       const u8 *iv, u32 iv_len, u32 mode, bool enc)
1169 {
1170 	u32 rk_mode = RK_GET_RK_MODE(mode);
1171 
1172 	if (rk_mode > RK_MODE_XTS)
1173 		return -EINVAL;
1174 
1175 	if (iv_len > SM4_BLOCK_SIZE || key_len != SM4_KEYSIZE)
1176 		return -EINVAL;
1177 
1178 	if (IS_NEED_IV(rk_mode)) {
1179 		if (!iv || iv_len != SM4_BLOCK_SIZE)
1180 			return -EINVAL;
1181 	} else {
1182 		iv_len = 0;
1183 	}
1184 
1185 	if (rk_mode == RK_MODE_XTS) {
1186 		if (!key || !twk_key)
1187 			return -EINVAL;
1188 	}
1189 
1190 	return hw_cipher_init(chn, key, twk_key, key_len, iv, iv_len,
1191 			      CRYPTO_SM4, mode, enc);
1192 }
1193 
1194 int rk_crypto_des(struct udevice *dev, u32 mode, const u8 *key, u32 key_len,
1195 		  const u8 *iv, const u8 *in, u8 *out, u32 len, bool enc)
1196 {
1197 	u32 rk_mode = RK_GET_RK_MODE(mode);
1198 	u8 tmp_key[24];
1199 	int ret;
1200 
1201 	if (!is_des_mode(rk_mode))
1202 		return -EINVAL;
1203 
1204 	if (key_len == DES_BLOCK_SIZE || key_len == 3 * DES_BLOCK_SIZE) {
1205 		memcpy(tmp_key, key, key_len);
1206 	} else if (key_len == 2 * DES_BLOCK_SIZE) {
1207 		memcpy(tmp_key, key, 16);
1208 		memcpy(tmp_key + 16, key, 8);
1209 		key_len = 3 * DES_BLOCK_SIZE;
1210 	} else {
1211 		return -EINVAL;
1212 	}
1213 
1214 	ret = hw_cipher_init(0, tmp_key, NULL, key_len, iv, DES_BLOCK_SIZE,
1215 			     CRYPTO_DES, mode, enc);
1216 	if (ret)
1217 		goto exit;
1218 
1219 	ret = hw_cipher_crypt(in, out, len, NULL, 0,
1220 			      NULL, 0, mode);
1221 
1222 exit:
1223 	return ret;
1224 }
1225 
1226 int rk_crypto_aes(struct udevice *dev, u32 mode,
1227 		  const u8 *key, const u8 *twk_key, u32 key_len,
1228 		  const u8 *iv, u32 iv_len,
1229 		  const u8 *in, u8 *out, u32 len, bool enc)
1230 {
1231 	int ret;
1232 
1233 	/* RV1126/RV1109 do not support aes-192 */
1234 #if defined(CONFIG_ROCKCHIP_RV1126)
1235 	if (key_len == AES_KEYSIZE_192)
1236 		return -EINVAL;
1237 #endif
1238 
1239 	ret = hw_aes_init(0, key, twk_key, key_len, iv, iv_len, mode, enc);
1240 	if (ret)
1241 		return ret;
1242 
1243 	return hw_cipher_crypt(in, out, len, NULL, 0,
1244 			       NULL, 0, mode);
1245 }
1246 
1247 int rk_crypto_sm4(struct udevice *dev, u32 mode,
1248 		  const u8 *key, const u8 *twk_key, u32 key_len,
1249 		  const u8 *iv, u32 iv_len,
1250 		  const u8 *in, u8 *out, u32 len, bool enc)
1251 {
1252 	int ret;
1253 
1254 	ret = hw_sm4_init(0, key, twk_key, key_len, iv, iv_len, mode, enc);
1255 	if (ret)
1256 		return ret;
1257 
1258 	return hw_cipher_crypt(in, out, len, NULL, 0, NULL, 0, mode);
1259 }
1260 
1261 int rockchip_crypto_cipher(struct udevice *dev, cipher_context *ctx,
1262 			   const u8 *in, u8 *out, u32 len, bool enc)
1263 {
1264 	int ret;
1265 
1266 	rk_crypto_enable_clk(dev);
1267 
1268 	switch (ctx->algo) {
1269 	case CRYPTO_DES:
1270 		ret = rk_crypto_des(dev, ctx->mode, ctx->key, ctx->key_len,
1271 				    ctx->iv, in, out, len, enc);
1272 		break;
1273 	case CRYPTO_AES:
1274 		ret = rk_crypto_aes(dev, ctx->mode,
1275 				    ctx->key, ctx->twk_key, ctx->key_len,
1276 				    ctx->iv, ctx->iv_len, in, out, len, enc);
1277 		break;
1278 	case CRYPTO_SM4:
1279 		ret = rk_crypto_sm4(dev, ctx->mode,
1280 				    ctx->key, ctx->twk_key, ctx->key_len,
1281 				    ctx->iv, ctx->iv_len, in, out, len, enc);
1282 		break;
1283 	default:
1284 		ret = -EINVAL;
1285 		break;
1286 	}
1287 
1288 	rk_crypto_disable_clk(dev);
1289 
1290 	return ret;
1291 }
1292 
1293 int rk_crypto_mac(struct udevice *dev, u32 algo, u32 mode,
1294 		  const u8 *key, u32 key_len,
1295 		  const u8 *in, u32 len, u8 *tag)
1296 {
1297 	u32 rk_mode = RK_GET_RK_MODE(mode);
1298 	int ret;
1299 
1300 	if (!IS_MAC_MODE(rk_mode))
1301 		return -EINVAL;
1302 
1303 	if (algo != CRYPTO_AES && algo != CRYPTO_SM4)
1304 		return -EINVAL;
1305 
1306 	/* RV1126/RV1109 do not support aes-192 */
1307 #if defined(CONFIG_ROCKCHIP_RV1126)
1308 	if (algo == CRYPTO_AES && key_len == AES_KEYSIZE_192)
1309 		return -EINVAL;
1310 #endif
1311 
1312 	ret = hw_cipher_init(g_key_chn, key, NULL, key_len, NULL, 0,
1313 			     algo, mode, true);
1314 	if (ret)
1315 		return ret;
1316 
1317 	return hw_cipher_crypt(in, NULL, len, NULL, 0,
1318 			       tag, AES_BLOCK_SIZE, mode);
1319 }
1320 
1321 int rockchip_crypto_mac(struct udevice *dev, cipher_context *ctx,
1322 			const u8 *in, u32 len, u8 *tag)
1323 {
1324 	int ret = 0;
1325 
1326 	rk_crypto_enable_clk(dev);
1327 
1328 	ret = rk_crypto_mac(dev, ctx->algo, ctx->mode,
1329 			    ctx->key, ctx->key_len, in, len, tag);
1330 
1331 	rk_crypto_disable_clk(dev);
1332 
1333 	return ret;
1334 }
1335 
1336 int rk_crypto_ae(struct udevice *dev, u32 algo, u32 mode,
1337 		 const u8 *key, u32 key_len, const u8 *nonce, u32 nonce_len,
1338 		 const u8 *in, u32 len, const u8 *aad, u32 aad_len,
1339 		 u8 *out, u8 *tag)
1340 {
1341 	u32 rk_mode = RK_GET_RK_MODE(mode);
1342 	int ret;
1343 
1344 	if (!IS_AE_MODE(rk_mode))
1345 		return -EINVAL;
1346 
1347 	if (len == 0)
1348 		return -EINVAL;
1349 
1350 	if (algo != CRYPTO_AES && algo != CRYPTO_SM4)
1351 		return -EINVAL;
1352 
1353 	/* RV1126/RV1109 do not support aes-192 */
1354 #if defined(CONFIG_ROCKCHIP_RV1126)
1355 	if (algo == CRYPTO_AES && key_len == AES_KEYSIZE_192)
1356 		return -EINVAL;
1357 #endif
1358 
1359 	ret = hw_cipher_init(g_key_chn, key, NULL, key_len, nonce, nonce_len,
1360 			     algo, mode, true);
1361 	if (ret)
1362 		return ret;
1363 
1364 	return hw_cipher_crypt(in, out, len, aad, aad_len,
1365 			       tag, AES_BLOCK_SIZE, mode);
1366 }
1367 
1368 int rockchip_crypto_ae(struct udevice *dev, cipher_context *ctx,
1369 		       const u8 *in, u32 len, const u8 *aad, u32 aad_len,
1370 		       u8 *out, u8 *tag)
1371 
1372 {
1373 	int ret = 0;
1374 
1375 	rk_crypto_enable_clk(dev);
1376 
1377 	ret = rk_crypto_ae(dev, ctx->algo, ctx->mode, ctx->key, ctx->key_len,
1378 			   ctx->iv, ctx->iv_len, in, len,
1379 			   aad, aad_len, out, tag);
1380 
1381 	rk_crypto_disable_clk(dev);
1382 
1383 	return ret;
1384 }
1385 
1386 #endif
1387 
1388 #if CONFIG_IS_ENABLED(ROCKCHIP_RSA)
1389 static int rockchip_crypto_rsa_verify(struct udevice *dev, rsa_key *ctx,
1390 				      u8 *sign, u8 *output)
1391 {
1392 	struct mpa_num *mpa_m = NULL, *mpa_e = NULL, *mpa_n = NULL;
1393 	struct mpa_num *mpa_c = NULL, *mpa_result = NULL;
1394 	u32 n_bits, n_words;
1395 	int ret;
1396 
1397 	if (!ctx)
1398 		return -EINVAL;
1399 
1400 	if (ctx->algo != CRYPTO_RSA512 &&
1401 	    ctx->algo != CRYPTO_RSA1024 &&
1402 	    ctx->algo != CRYPTO_RSA2048 &&
1403 	    ctx->algo != CRYPTO_RSA3072 &&
1404 	    ctx->algo != CRYPTO_RSA4096)
1405 		return -EINVAL;
1406 
1407 	n_bits = crypto_algo_nbits(ctx->algo);
1408 	n_words = BITS2WORD(n_bits);
1409 
1410 	ret = rk_mpa_alloc(&mpa_m, sign, n_words);
1411 	if (ret)
1412 		goto exit;
1413 
1414 	ret = rk_mpa_alloc(&mpa_e, ctx->e, n_words);
1415 	if (ret)
1416 		goto exit;
1417 
1418 	ret = rk_mpa_alloc(&mpa_n, ctx->n, n_words);
1419 	if (ret)
1420 		goto exit;
1421 
1422 	if (ctx->c) {
1423 		ret = rk_mpa_alloc(&mpa_c, ctx->c, n_words);
1424 		if (ret)
1425 			goto exit;
1426 	}
1427 
1428 	ret = rk_mpa_alloc(&mpa_result, NULL, n_words);
1429 	if (ret)
1430 		goto exit;
1431 
1432 	rk_crypto_enable_clk(dev);
1433 	ret = rk_exptmod_np(mpa_m, mpa_e, mpa_n, mpa_c, mpa_result);
1434 	if (!ret)
1435 		memcpy(output, mpa_result->d, BITS2BYTE(n_bits));
1436 	rk_crypto_disable_clk(dev);
1437 
1438 exit:
1439 	rk_mpa_free(&mpa_m);
1440 	rk_mpa_free(&mpa_e);
1441 	rk_mpa_free(&mpa_n);
1442 	rk_mpa_free(&mpa_c);
1443 	rk_mpa_free(&mpa_result);
1444 
1445 	return ret;
1446 }
1447 #endif
1448 
1449 static const struct dm_crypto_ops rockchip_crypto_ops = {
1450 	.capability   = rockchip_crypto_capability,
1451 	.sha_init     = rockchip_crypto_sha_init,
1452 	.sha_update   = rockchip_crypto_sha_update,
1453 	.sha_final    = rockchip_crypto_sha_final,
1454 #if CONFIG_IS_ENABLED(ROCKCHIP_RSA)
1455 	.rsa_verify   = rockchip_crypto_rsa_verify,
1456 #endif
1457 #if CONFIG_IS_ENABLED(ROCKCHIP_HMAC)
1458 	.hmac_init    = rockchip_crypto_hmac_init,
1459 	.hmac_update  = rockchip_crypto_hmac_update,
1460 	.hmac_final   = rockchip_crypto_hmac_final,
1461 #endif
1462 #if CONFIG_IS_ENABLED(ROCKCHIP_CIPHER)
1463 	.cipher_crypt = rockchip_crypto_cipher,
1464 	.cipher_mac = rockchip_crypto_mac,
1465 	.cipher_ae  = rockchip_crypto_ae,
1466 #endif
1467 };
1468 
1469 /*
1470  * Only use "clocks" to parse crypto clock id and use rockchip_get_clk().
1471  * Because we always add crypto node in U-Boot dts, when kernel dtb enabled :
1472  *
1473  *   1. There is cru phandle mismatch between U-Boot and kernel dtb;
1474  *   2. CONFIG_OF_SPL_REMOVE_PROPS removes clock property;
1475  */
1476 static int rockchip_crypto_ofdata_to_platdata(struct udevice *dev)
1477 {
1478 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
1479 	int len, ret = -EINVAL;
1480 
1481 	memset(priv, 0x00, sizeof(*priv));
1482 
1483 	priv->reg = (fdt_addr_t)dev_read_addr_ptr(dev);
1484 	if (priv->reg == FDT_ADDR_T_NONE)
1485 		return -EINVAL;
1486 
1487 	crypto_base = priv->reg;
1488 
1489 	/* if there is no clocks in dts, just skip it */
1490 	if (!dev_read_prop(dev, "clocks", &len)) {
1491 		printf("Can't find \"clocks\" property\n");
1492 		return 0;
1493 	}
1494 
1495 	memset(priv, 0x00, sizeof(*priv));
1496 	priv->clocks = malloc(len);
1497 	if (!priv->clocks)
1498 		return -ENOMEM;
1499 
1500 	priv->nclocks = len / (2 * sizeof(u32));
1501 	if (dev_read_u32_array(dev, "clocks", (u32 *)priv->clocks,
1502 			       priv->nclocks)) {
1503 		printf("Can't read \"clocks\" property\n");
1504 		ret = -EINVAL;
1505 		goto exit;
1506 	}
1507 
1508 	if (dev_read_prop(dev, "clock-frequency", &len)) {
1509 		priv->frequencies = malloc(len);
1510 		if (!priv->frequencies) {
1511 			ret = -ENOMEM;
1512 			goto exit;
1513 		}
1514 		priv->freq_nclocks = len / sizeof(u32);
1515 		if (dev_read_u32_array(dev, "clock-frequency", priv->frequencies,
1516 				       priv->freq_nclocks)) {
1517 			printf("Can't read \"clock-frequency\" property\n");
1518 			ret = -EINVAL;
1519 			goto exit;
1520 		}
1521 	}
1522 
1523 	return 0;
1524 exit:
1525 	if (priv->clocks)
1526 		free(priv->clocks);
1527 
1528 	if (priv->frequencies)
1529 		free(priv->frequencies);
1530 
1531 	return ret;
1532 }
1533 
1534 static int rk_crypto_set_clk(struct udevice *dev)
1535 {
1536 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
1537 	struct clk clk;
1538 	int i, ret;
1539 
1540 	/* use standard "assigned-clock-rates" props */
1541 	if (dev_read_size(dev, "assigned-clock-rates") > 0)
1542 		return clk_set_defaults(dev);
1543 
1544 	/* use "clock-frequency" props */
1545 	if (priv->freq_nclocks == 0)
1546 		return 0;
1547 
1548 	for (i = 0; i < priv->freq_nclocks; i++) {
1549 		ret = clk_get_by_index(dev, i, &clk);
1550 		if (ret < 0) {
1551 			printf("Failed to get clk index %d, ret=%d\n", i, ret);
1552 			return ret;
1553 		}
1554 		ret = clk_set_rate(&clk, priv->frequencies[i]);
1555 		if (ret < 0) {
1556 			printf("%s: Failed to set clk(%ld): ret=%d\n",
1557 			       __func__, clk.id, ret);
1558 			return ret;
1559 		}
1560 	}
1561 
1562 	return 0;
1563 }
1564 
1565 static int rockchip_crypto_probe(struct udevice *dev)
1566 {
1567 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
1568 	struct rk_crypto_soc_data *sdata;
1569 	int ret = 0;
1570 
1571 	sdata = (struct rk_crypto_soc_data *)dev_get_driver_data(dev);
1572 
1573 	if (sdata->dynamic_cap)
1574 		sdata->capability = sdata->dynamic_cap();
1575 
1576 	priv->soc_data = sdata;
1577 
1578 	priv->hw_ctx = memalign(LLI_ADDR_ALIGN_SIZE,
1579 				sizeof(struct rk_hash_ctx));
1580 	if (!priv->hw_ctx)
1581 		return -ENOMEM;
1582 
1583 	ret = rk_crypto_set_clk(dev);
1584 	if (ret)
1585 		return ret;
1586 
1587 	rk_crypto_enable_clk(dev);
1588 
1589 	hw_crypto_reset();
1590 
1591 	rk_crypto_disable_clk(dev);
1592 
1593 	return 0;
1594 }
1595 
1596 static const struct rk_crypto_soc_data soc_data_base = {
1597 	.capability = CRYPTO_MD5 |
1598 		      CRYPTO_SHA1 |
1599 		      CRYPTO_SHA256 |
1600 		      CRYPTO_SHA512 |
1601 		      CRYPTO_HMAC_MD5 |
1602 		      CRYPTO_HMAC_SHA1 |
1603 		      CRYPTO_HMAC_SHA256 |
1604 		      CRYPTO_HMAC_SHA512 |
1605 		      CRYPTO_RSA512 |
1606 		      CRYPTO_RSA1024 |
1607 		      CRYPTO_RSA2048 |
1608 		      CRYPTO_RSA3072 |
1609 		      CRYPTO_RSA4096 |
1610 		      CRYPTO_DES |
1611 		      CRYPTO_AES,
1612 };
1613 
1614 static const struct rk_crypto_soc_data soc_data_base_sm = {
1615 	.capability = CRYPTO_MD5 |
1616 		      CRYPTO_SHA1 |
1617 		      CRYPTO_SHA256 |
1618 		      CRYPTO_SHA512 |
1619 		      CRYPTO_SM3 |
1620 		      CRYPTO_HMAC_MD5 |
1621 		      CRYPTO_HMAC_SHA1 |
1622 		      CRYPTO_HMAC_SHA256 |
1623 		      CRYPTO_HMAC_SHA512 |
1624 		      CRYPTO_HMAC_SM3 |
1625 		      CRYPTO_RSA512 |
1626 		      CRYPTO_RSA1024 |
1627 		      CRYPTO_RSA2048 |
1628 		      CRYPTO_RSA3072 |
1629 		      CRYPTO_RSA4096 |
1630 		      CRYPTO_DES |
1631 		      CRYPTO_AES |
1632 		      CRYPTO_SM4,
1633 };
1634 
1635 static const struct rk_crypto_soc_data soc_data_rk1808 = {
1636 	.capability = CRYPTO_MD5 |
1637 		      CRYPTO_SHA1 |
1638 		      CRYPTO_SHA256 |
1639 		      CRYPTO_HMAC_MD5 |
1640 		      CRYPTO_HMAC_SHA1 |
1641 		      CRYPTO_HMAC_SHA256 |
1642 		      CRYPTO_RSA512 |
1643 		      CRYPTO_RSA1024 |
1644 		      CRYPTO_RSA2048 |
1645 		      CRYPTO_RSA3072 |
1646 		      CRYPTO_RSA4096,
1647 };
1648 
1649 static const struct rk_crypto_soc_data soc_data_cryptov3 = {
1650 	.capability  = 0,
1651 	.dynamic_cap = crypto_v3_dynamic_cap,
1652 };
1653 
1654 static const struct udevice_id rockchip_crypto_ids[] = {
1655 	{
1656 		.compatible = "rockchip,px30-crypto",
1657 		.data = (ulong)&soc_data_base
1658 	},
1659 	{
1660 		.compatible = "rockchip,rk1808-crypto",
1661 		.data = (ulong)&soc_data_rk1808
1662 	},
1663 	{
1664 		.compatible = "rockchip,rk3308-crypto",
1665 		.data = (ulong)&soc_data_base
1666 	},
1667 	{
1668 		.compatible = "rockchip,rv1126-crypto",
1669 		.data = (ulong)&soc_data_base_sm
1670 	},
1671 	{
1672 		.compatible = "rockchip,rk3568-crypto",
1673 		.data = (ulong)&soc_data_base_sm
1674 	},
1675 	{
1676 		.compatible = "rockchip,rk3588-crypto",
1677 		.data = (ulong)&soc_data_base_sm
1678 	},
1679 	{
1680 		.compatible = "rockchip,crypto-v3",
1681 		.data = (ulong)&soc_data_cryptov3
1682 	},
1683 	{
1684 		.compatible = "rockchip,crypto-v4",
1685 		.data = (ulong)&soc_data_cryptov3 /* reuse crypto v3 config */
1686 	},
1687 	{ }
1688 };
1689 
1690 U_BOOT_DRIVER(rockchip_crypto_v2) = {
1691 	.name		= "rockchip_crypto_v2",
1692 	.id		= UCLASS_CRYPTO,
1693 	.of_match	= rockchip_crypto_ids,
1694 	.ops		= &rockchip_crypto_ops,
1695 	.probe		= rockchip_crypto_probe,
1696 	.ofdata_to_platdata = rockchip_crypto_ofdata_to_platdata,
1697 	.priv_auto_alloc_size = sizeof(struct rockchip_crypto_priv),
1698 };
1699