xref: /rk3399_rockchip-uboot/drivers/crypto/rockchip/crypto_v2.c (revision 2e5fd4b9dbacfce964e54d14c9c9035c9bd2f614)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
4  */
5 
6 #include <common.h>
7 #include <clk.h>
8 #include <crypto.h>
9 #include <dm.h>
10 #include <asm/io.h>
11 #include <asm/arch/hardware.h>
12 #include <asm/arch/clock.h>
13 #include <rockchip/crypto_hash_cache.h>
14 #include <rockchip/crypto_v2.h>
15 #include <rockchip/crypto_v2_pka.h>
16 
17 #define	RK_HASH_CTX_MAGIC		0x1A1A1A1A
18 
19 #ifdef DEBUG
20 #define IMSG(format, ...) printf("[%s, %05d]-trace: " format "\n", \
21 				 __func__, __LINE__, ##__VA_ARGS__)
22 #else
23 #define IMSG(format, ...)
24 #endif
25 
26 struct crypto_lli_desc {
27 	u32 src_addr;
28 	u32 src_len;
29 	u32 dst_addr;
30 	u32 dst_len;
31 	u32 user_define;
32 	u32 reserve;
33 	u32 dma_ctrl;
34 	u32 next_addr;
35 };
36 
37 struct rk_hash_ctx {
38 	struct crypto_lli_desc		data_lli;	/* lli desc */
39 	struct crypto_hash_cache	*hash_cache;
40 	u32				magic;		/* to check ctx */
41 	u32				algo;		/* hash algo */
42 	u8				digest_size;	/* hash out length */
43 	u8				reserved[3];
44 };
45 
46 struct rk_crypto_soc_data {
47 	u32 capability;
48 	u32 (*dynamic_cap)(void);
49 };
50 
51 struct rockchip_crypto_priv {
52 	fdt_addr_t			reg;
53 	u32				frequency;
54 	char				*clocks;
55 	u32				*frequencies;
56 	u32				nclocks;
57 	u32				freq_nclocks;
58 	u32				length;
59 	struct rk_hash_ctx		*hw_ctx;
60 	struct rk_crypto_soc_data	*soc_data;
61 };
62 
63 #define LLI_ADDR_ALIGN_SIZE	8
64 #define DATA_ADDR_ALIGN_SIZE	8
65 #define DATA_LEN_ALIGN_SIZE	64
66 
67 /* crypto timeout 500ms, must support more than 32M data per times*/
68 #define HASH_UPDATE_LIMIT	(32 * 1024 * 1024)
69 #define RK_CRYPTO_TIMEOUT	500000
70 
71 #define RK_POLL_TIMEOUT(condition, timeout) \
72 ({ \
73 	int time_out = timeout; \
74 	while (condition) { \
75 		if (--time_out <= 0) { \
76 			debug("[%s] %d: time out!\n", __func__,\
77 				__LINE__); \
78 			break; \
79 		} \
80 		udelay(1); \
81 	} \
82 	(time_out <= 0) ? -ETIMEDOUT : 0; \
83 })
84 
85 #define WAIT_TAG_VALID(channel, timeout) ({ \
86 	u32 tag_mask = CRYPTO_CH0_TAG_VALID << (channel);\
87 	int ret;\
88 	ret = RK_POLL_TIMEOUT(!(crypto_read(CRYPTO_TAG_VALID) & tag_mask),\
89 			      timeout);\
90 	crypto_write(crypto_read(CRYPTO_TAG_VALID) & tag_mask, CRYPTO_TAG_VALID);\
91 	ret;\
92 })
93 
94 #define virt_to_phys(addr)		(((unsigned long)addr) & 0xffffffff)
95 #define phys_to_virt(addr, area)	((unsigned long)addr)
96 
97 #define align_malloc(bytes, alignment)	memalign(alignment, bytes)
98 #define align_free(addr)		do {if (addr) free(addr);} while (0)
99 
100 #define ROUNDUP(size, alignment)	round_up(size, alignment)
101 #define cache_op_inner(type, addr, size) \
102 					crypto_flush_cacheline((ulong)addr, size)
103 
104 #define IS_NEED_IV(rk_mode) ((rk_mode) != RK_MODE_ECB && \
105 			     (rk_mode) != RK_MODE_CMAC && \
106 			     (rk_mode) != RK_MODE_CBC_MAC)
107 
108 #define IS_NEED_TAG(rk_mode) ((rk_mode) == RK_MODE_CMAC || \
109 			      (rk_mode) == RK_MODE_CBC_MAC || \
110 			      (rk_mode) == RK_MODE_CCM || \
111 			      (rk_mode) == RK_MODE_GCM)
112 
113 #define IS_MAC_MODE(rk_mode) ((rk_mode) == RK_MODE_CMAC || \
114 			      (rk_mode) == RK_MODE_CBC_MAC)
115 
116 #define IS_AE_MODE(rk_mode) ((rk_mode) == RK_MODE_CCM || \
117 			     (rk_mode) == RK_MODE_GCM)
118 
119 fdt_addr_t crypto_base;
120 
121 static inline void word2byte_be(u32 word, u8 *ch)
122 {
123 	ch[0] = (word >> 24) & 0xff;
124 	ch[1] = (word >> 16) & 0xff;
125 	ch[2] = (word >> 8) & 0xff;
126 	ch[3] = (word >> 0) & 0xff;
127 }
128 
129 static inline u32 byte2word_be(const u8 *ch)
130 {
131 	return (*ch << 24) + (*(ch + 1) << 16) + (*(ch + 2) << 8) + *(ch + 3);
132 }
133 
134 static inline void clear_regs(u32 base, u32 words)
135 {
136 	int i;
137 
138 	/*clear out register*/
139 	for (i = 0; i < words; i++)
140 		crypto_write(0, base + 4 * i);
141 }
142 
143 static inline void clear_hash_out_reg(void)
144 {
145 	clear_regs(CRYPTO_HASH_DOUT_0, 16);
146 }
147 
148 static inline void clear_key_regs(void)
149 {
150 	clear_regs(CRYPTO_CH0_KEY_0, CRYPTO_KEY_CHANNEL_NUM * 4);
151 }
152 
153 static inline void read_regs(u32 base, u8 *data, u32 data_len)
154 {
155 	u8 tmp_buf[4];
156 	u32 i;
157 
158 	for (i = 0; i < data_len / 4; i++)
159 		word2byte_be(crypto_read(base + i * 4),
160 			     data + i * 4);
161 
162 	if (data_len % 4) {
163 		word2byte_be(crypto_read(base + i * 4), tmp_buf);
164 		memcpy(data + i * 4, tmp_buf, data_len % 4);
165 	}
166 }
167 
168 static inline void write_regs(u32 base, const u8 *data, u32 data_len)
169 {
170 	u8 tmp_buf[4];
171 	u32 i;
172 
173 	for (i = 0; i < data_len / 4; i++, base += 4)
174 		crypto_write(byte2word_be(data + i * 4), base);
175 
176 	if (data_len % 4) {
177 		memset(tmp_buf, 0x00, sizeof(tmp_buf));
178 		memcpy((u8 *)tmp_buf, data + i * 4, data_len % 4);
179 		crypto_write(byte2word_be(tmp_buf), base);
180 	}
181 }
182 
183 static inline void write_key_reg(u32 chn, const u8 *key, u32 key_len)
184 {
185 	write_regs(CRYPTO_CH0_KEY_0 + chn * 0x10, key, key_len);
186 }
187 
188 static inline void set_iv_reg(u32 chn, const u8 *iv, u32 iv_len)
189 {
190 	u32 base_iv;
191 
192 	base_iv = CRYPTO_CH0_IV_0 + chn * 0x10;
193 
194 	/* clear iv */
195 	clear_regs(base_iv, 4);
196 
197 	if (!iv || iv_len == 0)
198 		return;
199 
200 	write_regs(base_iv, iv, iv_len);
201 
202 	crypto_write(iv_len, CRYPTO_CH0_IV_LEN_0 + 4 * chn);
203 }
204 
205 static inline void get_iv_reg(u32 chn, u8 *iv, u32 iv_len)
206 {
207 	u32 base_iv;
208 
209 	base_iv = CRYPTO_CH0_IV_0 + chn * 0x10;
210 
211 	read_regs(base_iv, iv, iv_len);
212 }
213 
214 static inline void get_tag_from_reg(u32 chn, u8 *tag, u32 tag_len)
215 {
216 	u32 i;
217 	u32 chn_base = CRYPTO_CH0_TAG_0 + 0x10 * chn;
218 
219 	for (i = 0; i < tag_len / 4; i++, chn_base += 4)
220 		word2byte_be(crypto_read(chn_base), tag + 4 * i);
221 }
222 
223 static u32 crypto_v3_dynamic_cap(void)
224 {
225 	u32 capability = 0;
226 	u32 ver_reg, i;
227 	struct cap_map {
228 		u32 ver_offset;
229 		u32 mask;
230 		u32 cap_bit;
231 	};
232 	const struct cap_map cap_tbl[] = {
233 	{CRYPTO_HASH_VERSION, CRYPTO_HASH_MD5_FLAG,    CRYPTO_MD5},
234 	{CRYPTO_HASH_VERSION, CRYPTO_HASH_SHA1_FLAG,   CRYPTO_SHA1},
235 	{CRYPTO_HASH_VERSION, CRYPTO_HASH_SHA256_FLAG, CRYPTO_SHA256},
236 	{CRYPTO_HASH_VERSION, CRYPTO_HASH_SHA512_FLAG, CRYPTO_SHA512},
237 	{CRYPTO_HASH_VERSION, CRYPTO_HASH_SM3_FLAG,    CRYPTO_SM3},
238 
239 	{CRYPTO_HMAC_VERSION, CRYPTO_HMAC_MD5_FLAG,    CRYPTO_HMAC_MD5},
240 	{CRYPTO_HMAC_VERSION, CRYPTO_HMAC_SHA1_FLAG,   CRYPTO_HMAC_SHA1},
241 	{CRYPTO_HMAC_VERSION, CRYPTO_HMAC_SHA256_FLAG, CRYPTO_HMAC_SHA256},
242 	{CRYPTO_HMAC_VERSION, CRYPTO_HMAC_SHA512_FLAG, CRYPTO_HMAC_SHA512},
243 	{CRYPTO_HMAC_VERSION, CRYPTO_HMAC_SM3_FLAG,    CRYPTO_HMAC_SM3},
244 
245 	{CRYPTO_AES_VERSION,  CRYPTO_AES256_FLAG,      CRYPTO_AES},
246 	{CRYPTO_DES_VERSION,  CRYPTO_TDES_FLAG,        CRYPTO_DES},
247 	{CRYPTO_SM4_VERSION,  CRYPTO_ECB_FLAG,         CRYPTO_SM4},
248 	};
249 
250 	/* rsa */
251 	capability = CRYPTO_RSA512 |
252 		     CRYPTO_RSA1024 |
253 		     CRYPTO_RSA2048 |
254 		     CRYPTO_RSA3072 |
255 		     CRYPTO_RSA4096;
256 
257 	for (i = 0; i < ARRAY_SIZE(cap_tbl); i++) {
258 		ver_reg = crypto_read(cap_tbl[i].ver_offset);
259 
260 		if ((ver_reg & cap_tbl[i].mask) == cap_tbl[i].mask)
261 			capability |= cap_tbl[i].cap_bit;
262 	}
263 
264 	return capability;
265 }
266 
267 static int hw_crypto_reset(void)
268 {
269 	u32 val = 0, mask = 0;
270 	int ret;
271 
272 	val = CRYPTO_SW_PKA_RESET | CRYPTO_SW_CC_RESET;
273 	mask = val << CRYPTO_WRITE_MASK_SHIFT;
274 
275 	/* reset pka and crypto modules*/
276 	crypto_write(val | mask, CRYPTO_RST_CTL);
277 
278 	/* wait reset compelete */
279 	ret = RK_POLL_TIMEOUT(crypto_read(CRYPTO_RST_CTL), RK_CRYPTO_TIMEOUT);
280 
281 	return ret;
282 }
283 
284 static void hw_hash_clean_ctx(struct rk_hash_ctx *ctx)
285 {
286 	/* clear hash status */
287 	crypto_write(CRYPTO_WRITE_MASK_ALL | 0, CRYPTO_HASH_CTL);
288 
289 	assert(ctx);
290 	assert(ctx->magic == RK_HASH_CTX_MAGIC);
291 
292 	crypto_hash_cache_free(ctx->hash_cache);
293 
294 	memset(ctx, 0x00, sizeof(*ctx));
295 }
296 
297 static int rk_hash_init(void *hw_ctx, u32 algo)
298 {
299 	struct rk_hash_ctx *tmp_ctx = (struct rk_hash_ctx *)hw_ctx;
300 	u32 reg_ctrl = 0;
301 	int ret;
302 
303 	if (!tmp_ctx)
304 		return -EINVAL;
305 
306 	reg_ctrl = CRYPTO_SW_CC_RESET;
307 	crypto_write(reg_ctrl | (reg_ctrl << CRYPTO_WRITE_MASK_SHIFT),
308 		     CRYPTO_RST_CTL);
309 
310 	/* wait reset compelete */
311 	ret = RK_POLL_TIMEOUT(crypto_read(CRYPTO_RST_CTL),
312 			      RK_CRYPTO_TIMEOUT);
313 
314 	reg_ctrl = 0;
315 	tmp_ctx->algo = algo;
316 	switch (algo) {
317 	case CRYPTO_MD5:
318 	case CRYPTO_HMAC_MD5:
319 		reg_ctrl |= CRYPTO_MODE_MD5;
320 		tmp_ctx->digest_size = 16;
321 		break;
322 	case CRYPTO_SHA1:
323 	case CRYPTO_HMAC_SHA1:
324 		reg_ctrl |= CRYPTO_MODE_SHA1;
325 		tmp_ctx->digest_size = 20;
326 		break;
327 	case CRYPTO_SHA256:
328 	case CRYPTO_HMAC_SHA256:
329 		reg_ctrl |= CRYPTO_MODE_SHA256;
330 		tmp_ctx->digest_size = 32;
331 		break;
332 	case CRYPTO_SHA512:
333 	case CRYPTO_HMAC_SHA512:
334 		reg_ctrl |= CRYPTO_MODE_SHA512;
335 		tmp_ctx->digest_size = 64;
336 		break;
337 	case CRYPTO_SM3:
338 	case CRYPTO_HMAC_SM3:
339 		reg_ctrl |= CRYPTO_MODE_SM3;
340 		tmp_ctx->digest_size = 32;
341 		break;
342 	default:
343 		ret = -EINVAL;
344 		goto exit;
345 	}
346 
347 	clear_hash_out_reg();
348 
349 	/* enable hardware padding */
350 	reg_ctrl |= CRYPTO_HW_PAD_ENABLE;
351 	crypto_write(reg_ctrl | CRYPTO_WRITE_MASK_ALL, CRYPTO_HASH_CTL);
352 
353 	/* FIFO input and output data byte swap */
354 	/* such as B0, B1, B2, B3 -> B3, B2, B1, B0 */
355 	reg_ctrl = CRYPTO_DOUT_BYTESWAP | CRYPTO_DOIN_BYTESWAP;
356 	crypto_write(reg_ctrl | CRYPTO_WRITE_MASK_ALL, CRYPTO_FIFO_CTL);
357 
358 	/* enable src_item_done interrupt */
359 	crypto_write(0, CRYPTO_DMA_INT_EN);
360 
361 	tmp_ctx->magic = RK_HASH_CTX_MAGIC;
362 
363 	return 0;
364 exit:
365 	/* clear hash setting if init failed */
366 	crypto_write(CRYPTO_WRITE_MASK_ALL | 0, CRYPTO_HASH_CTL);
367 
368 	return ret;
369 }
370 
371 static int rk_hash_direct_calc(void *hw_data, const u8 *data,
372 			       u32 data_len, u8 *started_flag, u8 is_last)
373 {
374 	struct rockchip_crypto_priv *priv = hw_data;
375 	struct rk_hash_ctx *hash_ctx = priv->hw_ctx;
376 	struct crypto_lli_desc *lli = &hash_ctx->data_lli;
377 	int ret = -EINVAL;
378 	u32 tmp = 0, mask = 0;
379 
380 	assert(IS_ALIGNED((ulong)data, DATA_ADDR_ALIGN_SIZE));
381 	assert(is_last || IS_ALIGNED(data_len, DATA_LEN_ALIGN_SIZE));
382 
383 	debug("%s: data = %p, len = %u, s = %x, l = %x\n",
384 	      __func__, data, data_len, *started_flag, is_last);
385 
386 	memset(lli, 0x00, sizeof(*lli));
387 	lli->src_addr = (u32)virt_to_phys(data);
388 	lli->src_len = data_len;
389 	lli->dma_ctrl = LLI_DMA_CTRL_SRC_DONE;
390 
391 	if (is_last) {
392 		lli->user_define |= LLI_USER_STRING_LAST;
393 		lli->dma_ctrl |= LLI_DMA_CTRL_LAST;
394 	} else {
395 		lli->next_addr = (u32)virt_to_phys(lli);
396 		lli->dma_ctrl |= LLI_DMA_CTRL_PAUSE;
397 	}
398 
399 	if (!(*started_flag)) {
400 		lli->user_define |=
401 			(LLI_USER_STRING_START | LLI_USER_CIPHER_START);
402 		crypto_write((u32)virt_to_phys(lli), CRYPTO_DMA_LLI_ADDR);
403 		crypto_write((CRYPTO_HASH_ENABLE << CRYPTO_WRITE_MASK_SHIFT) |
404 			     CRYPTO_HASH_ENABLE, CRYPTO_HASH_CTL);
405 		tmp = CRYPTO_DMA_START;
406 		*started_flag = 1;
407 	} else {
408 		tmp = CRYPTO_DMA_RESTART;
409 	}
410 
411 	/* flush cache */
412 	crypto_flush_cacheline((ulong)lli, sizeof(*lli));
413 	crypto_flush_cacheline((ulong)data, data_len);
414 
415 	/* start calculate */
416 	crypto_write(tmp << CRYPTO_WRITE_MASK_SHIFT | tmp,
417 		     CRYPTO_DMA_CTL);
418 
419 	/* mask CRYPTO_SYNC_LOCKSTEP_INT_ST flag */
420 	mask = ~(mask | CRYPTO_SYNC_LOCKSTEP_INT_ST);
421 
422 	/* wait calc ok */
423 	ret = RK_POLL_TIMEOUT(!(crypto_read(CRYPTO_DMA_INT_ST) & mask),
424 			      RK_CRYPTO_TIMEOUT);
425 
426 	/* clear interrupt status */
427 	tmp = crypto_read(CRYPTO_DMA_INT_ST);
428 	crypto_write(tmp, CRYPTO_DMA_INT_ST);
429 
430 	if ((tmp & mask) != CRYPTO_SRC_ITEM_DONE_INT_ST &&
431 	    (tmp & mask) != CRYPTO_ZERO_LEN_INT_ST) {
432 		ret = -EFAULT;
433 		debug("[%s] %d: CRYPTO_DMA_INT_ST = 0x%x\n",
434 		      __func__, __LINE__, tmp);
435 		goto exit;
436 	}
437 
438 	priv->length += data_len;
439 exit:
440 	return ret;
441 }
442 
443 int rk_hash_update(void *ctx, const u8 *data, u32 data_len)
444 {
445 	struct rk_hash_ctx *tmp_ctx = (struct rk_hash_ctx *)ctx;
446 	int ret = -EINVAL;
447 
448 	debug("\n");
449 	if (!tmp_ctx || !data)
450 		goto exit;
451 
452 	if (tmp_ctx->digest_size == 0 || tmp_ctx->magic != RK_HASH_CTX_MAGIC)
453 		goto exit;
454 
455 	ret = crypto_hash_update_with_cache(tmp_ctx->hash_cache,
456 					    data, data_len);
457 
458 exit:
459 	/* free lli list */
460 	if (ret)
461 		hw_hash_clean_ctx(tmp_ctx);
462 
463 	return ret;
464 }
465 
466 int rk_hash_final(void *ctx, u8 *digest, size_t len)
467 {
468 	struct rk_hash_ctx *tmp_ctx = (struct rk_hash_ctx *)ctx;
469 	int ret = -EINVAL;
470 
471 	if (!digest)
472 		goto exit;
473 
474 	if (!tmp_ctx ||
475 	    tmp_ctx->digest_size == 0 ||
476 	    len > tmp_ctx->digest_size ||
477 	    tmp_ctx->magic != RK_HASH_CTX_MAGIC) {
478 		goto exit;
479 	}
480 
481 	/* wait hash value ok */
482 	ret = RK_POLL_TIMEOUT(!crypto_read(CRYPTO_HASH_VALID),
483 			      RK_CRYPTO_TIMEOUT);
484 
485 	read_regs(CRYPTO_HASH_DOUT_0, digest, len);
486 
487 	/* clear hash status */
488 	crypto_write(CRYPTO_HASH_IS_VALID, CRYPTO_HASH_VALID);
489 	crypto_write(CRYPTO_WRITE_MASK_ALL | 0, CRYPTO_HASH_CTL);
490 
491 exit:
492 
493 	return ret;
494 }
495 
496 static u32 rockchip_crypto_capability(struct udevice *dev)
497 {
498 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
499 	u32 capability, mask = 0;
500 
501 	capability = priv->soc_data->capability;
502 
503 #if !(CONFIG_IS_ENABLED(ROCKCHIP_CIPHER))
504 	mask |= (CRYPTO_DES | CRYPTO_AES | CRYPTO_SM4);
505 #endif
506 
507 #if !(CONFIG_IS_ENABLED(ROCKCHIP_HMAC))
508 	mask |= (CRYPTO_HMAC_MD5 | CRYPTO_HMAC_SHA1 | CRYPTO_HMAC_SHA256 |
509 			 CRYPTO_HMAC_SHA512 | CRYPTO_HMAC_SM3);
510 #endif
511 
512 #if !(CONFIG_IS_ENABLED(ROCKCHIP_RSA))
513 	mask |= (CRYPTO_RSA512 | CRYPTO_RSA1024 | CRYPTO_RSA2048 |
514 			 CRYPTO_RSA3072 | CRYPTO_RSA4096);
515 #endif
516 
517 	return capability & (~mask);
518 }
519 
520 static int rockchip_crypto_sha_init(struct udevice *dev, sha_context *ctx)
521 {
522 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
523 	struct rk_hash_ctx *hash_ctx = priv->hw_ctx;
524 
525 	if (!ctx)
526 		return -EINVAL;
527 
528 	memset(hash_ctx, 0x00, sizeof(*hash_ctx));
529 
530 	priv->length = 0;
531 
532 	hash_ctx->hash_cache = crypto_hash_cache_alloc(rk_hash_direct_calc,
533 						       priv, ctx->length,
534 						       DATA_ADDR_ALIGN_SIZE,
535 						       DATA_LEN_ALIGN_SIZE);
536 	if (!hash_ctx->hash_cache)
537 		return -EFAULT;
538 
539 	return rk_hash_init(hash_ctx, ctx->algo);
540 }
541 
542 static int rockchip_crypto_sha_update(struct udevice *dev,
543 				      u32 *input, u32 len)
544 {
545 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
546 	int ret, i;
547 	u8 *p;
548 
549 	if (!len)
550 		return -EINVAL;
551 
552 	p = (u8 *)input;
553 
554 	for (i = 0; i < len / HASH_UPDATE_LIMIT; i++, p += HASH_UPDATE_LIMIT) {
555 		ret = rk_hash_update(priv->hw_ctx, p, HASH_UPDATE_LIMIT);
556 		if (ret)
557 			goto exit;
558 	}
559 
560 	if (len % HASH_UPDATE_LIMIT)
561 		ret = rk_hash_update(priv->hw_ctx, p, len % HASH_UPDATE_LIMIT);
562 
563 exit:
564 	return ret;
565 }
566 
567 static int rockchip_crypto_sha_final(struct udevice *dev,
568 				     sha_context *ctx, u8 *output)
569 {
570 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
571 	u32 nbits;
572 	int ret;
573 
574 	nbits = crypto_algo_nbits(ctx->algo);
575 
576 	if (priv->length != ctx->length) {
577 		printf("total length(0x%08x) != init length(0x%08x)!\n",
578 		       priv->length, ctx->length);
579 		ret = -EIO;
580 		goto exit;
581 	}
582 
583 	ret = rk_hash_final(priv->hw_ctx, (u8 *)output, BITS2BYTE(nbits));
584 
585 exit:
586 	hw_hash_clean_ctx(priv->hw_ctx);
587 	return ret;
588 }
589 
590 #if CONFIG_IS_ENABLED(ROCKCHIP_HMAC)
591 int rk_hmac_init(void *hw_ctx, u32 algo, u8 *key, u32 key_len)
592 {
593 	u32 reg_ctrl = 0;
594 	int ret;
595 
596 	if (!key || !key_len || key_len > 64)
597 		return -EINVAL;
598 
599 	clear_key_regs();
600 
601 	write_key_reg(0, key, key_len);
602 
603 	ret = rk_hash_init(hw_ctx, algo);
604 	if (ret)
605 		return ret;
606 
607 	reg_ctrl = crypto_read(CRYPTO_HASH_CTL) | CRYPTO_HMAC_ENABLE;
608 	crypto_write(reg_ctrl | CRYPTO_WRITE_MASK_ALL, CRYPTO_HASH_CTL);
609 
610 	return ret;
611 }
612 
613 static int rockchip_crypto_hmac_init(struct udevice *dev,
614 				     sha_context *ctx, u8 *key, u32 key_len)
615 {
616 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
617 	struct rk_hash_ctx *hash_ctx = priv->hw_ctx;
618 
619 	if (!ctx)
620 		return -EINVAL;
621 
622 	memset(hash_ctx, 0x00, sizeof(*hash_ctx));
623 
624 	priv->length = 0;
625 
626 	hash_ctx->hash_cache = crypto_hash_cache_alloc(rk_hash_direct_calc,
627 						       priv, ctx->length,
628 						       DATA_ADDR_ALIGN_SIZE,
629 						       DATA_LEN_ALIGN_SIZE);
630 	if (!hash_ctx->hash_cache)
631 		return -EFAULT;
632 
633 	return rk_hmac_init(priv->hw_ctx, ctx->algo, key, key_len);
634 }
635 
636 static int rockchip_crypto_hmac_update(struct udevice *dev,
637 				       u32 *input, u32 len)
638 {
639 	return rockchip_crypto_sha_update(dev, input, len);
640 }
641 
642 static int rockchip_crypto_hmac_final(struct udevice *dev,
643 				      sha_context *ctx, u8 *output)
644 {
645 	return rockchip_crypto_sha_final(dev, ctx, output);
646 }
647 
648 #endif
649 
650 #if CONFIG_IS_ENABLED(ROCKCHIP_CIPHER)
651 static u8 g_key_chn;
652 
653 static const u32 rk_mode2bc_mode[RK_MODE_MAX] = {
654 	[RK_MODE_ECB] = CRYPTO_BC_ECB,
655 	[RK_MODE_CBC] = CRYPTO_BC_CBC,
656 	[RK_MODE_CTS] = CRYPTO_BC_CTS,
657 	[RK_MODE_CTR] = CRYPTO_BC_CTR,
658 	[RK_MODE_CFB] = CRYPTO_BC_CFB,
659 	[RK_MODE_OFB] = CRYPTO_BC_OFB,
660 	[RK_MODE_XTS] = CRYPTO_BC_XTS,
661 	[RK_MODE_CCM] = CRYPTO_BC_CCM,
662 	[RK_MODE_GCM] = CRYPTO_BC_GCM,
663 	[RK_MODE_CMAC] = CRYPTO_BC_CMAC,
664 	[RK_MODE_CBC_MAC] = CRYPTO_BC_CBC_MAC,
665 };
666 
667 static inline void set_pc_len_reg(u32 chn, u64 pc_len)
668 {
669 	u32 chn_base = CRYPTO_CH0_PC_LEN_0 + chn * 0x08;
670 
671 	crypto_write(pc_len & 0xffffffff, chn_base);
672 	crypto_write(pc_len >> 32, chn_base + 4);
673 }
674 
675 static inline void set_aad_len_reg(u32 chn, u64 pc_len)
676 {
677 	u32 chn_base = CRYPTO_CH0_AAD_LEN_0 + chn * 0x08;
678 
679 	crypto_write(pc_len & 0xffffffff, chn_base);
680 	crypto_write(pc_len >> 32, chn_base + 4);
681 }
682 
683 static inline bool is_des_mode(u32 rk_mode)
684 {
685 	return (rk_mode == RK_MODE_ECB ||
686 		rk_mode == RK_MODE_CBC ||
687 		rk_mode == RK_MODE_CFB ||
688 		rk_mode == RK_MODE_OFB);
689 }
690 
691 static void dump_crypto_state(struct crypto_lli_desc *desc,
692 			      u32 tmp, u32 expt_int,
693 			      const u8 *in, const u8 *out,
694 			      u32 len, int ret)
695 {
696 	IMSG("%s\n", ret == -ETIME ? "timeout" : "dismatch");
697 
698 	IMSG("CRYPTO_DMA_INT_ST = %08x, expect_int = %08x\n",
699 	     tmp, expt_int);
700 	IMSG("data desc		= %p\n", desc);
701 	IMSG("\taddr_in		= [%08x <=> %08x]\n",
702 	     desc->src_addr, (u32)virt_to_phys(in));
703 	IMSG("\taddr_out	= [%08x <=> %08x]\n",
704 	     desc->dst_addr, (u32)virt_to_phys(out));
705 	IMSG("\tsrc_len		= [%08x <=> %08x]\n",
706 	     desc->src_len, (u32)len);
707 	IMSG("\tdst_len		= %08x\n", desc->dst_len);
708 	IMSG("\tdma_ctl		= %08x\n", desc->dma_ctrl);
709 	IMSG("\tuser_define	= %08x\n", desc->user_define);
710 
711 	IMSG("\n\nDMA CRYPTO_DMA_LLI_ADDR status = %08x\n",
712 	     crypto_read(CRYPTO_DMA_LLI_ADDR));
713 	IMSG("DMA CRYPTO_DMA_ST status = %08x\n",
714 	     crypto_read(CRYPTO_DMA_ST));
715 	IMSG("DMA CRYPTO_DMA_STATE status = %08x\n",
716 	     crypto_read(CRYPTO_DMA_STATE));
717 	IMSG("DMA CRYPTO_DMA_LLI_RADDR status = %08x\n",
718 	     crypto_read(CRYPTO_DMA_LLI_RADDR));
719 	IMSG("DMA CRYPTO_DMA_SRC_RADDR status = %08x\n",
720 	     crypto_read(CRYPTO_DMA_SRC_RADDR));
721 	IMSG("DMA CRYPTO_DMA_DST_RADDR status = %08x\n",
722 	     crypto_read(CRYPTO_DMA_DST_RADDR));
723 	IMSG("DMA CRYPTO_CIPHER_ST status = %08x\n",
724 	     crypto_read(CRYPTO_CIPHER_ST));
725 	IMSG("DMA CRYPTO_CIPHER_STATE status = %08x\n",
726 	     crypto_read(CRYPTO_CIPHER_STATE));
727 	IMSG("DMA CRYPTO_TAG_VALID status = %08x\n",
728 	     crypto_read(CRYPTO_TAG_VALID));
729 	IMSG("LOCKSTEP status = %08x\n\n",
730 	     crypto_read(0x618));
731 
732 	IMSG("dst %dbyte not transferred\n",
733 	     desc->dst_addr + desc->dst_len -
734 	     crypto_read(CRYPTO_DMA_DST_RADDR));
735 }
736 
737 static int ccm128_set_iv_reg(u32 chn, const u8 *nonce, u32 nlen)
738 {
739 	u8 iv_buf[AES_BLOCK_SIZE];
740 	u32 L;
741 
742 	memset(iv_buf, 0x00, sizeof(iv_buf));
743 
744 	L = 15 - nlen;
745 	iv_buf[0] = ((u8)(L - 1) & 7);
746 
747 	/* the L parameter */
748 	L = iv_buf[0] & 7;
749 
750 	/* nonce is too short */
751 	if (nlen < (14 - L))
752 		return -EINVAL;
753 
754 	/* clear aad flag */
755 	iv_buf[0] &= ~0x40;
756 	memcpy(&iv_buf[1], nonce, 14 - L);
757 
758 	set_iv_reg(chn, iv_buf, AES_BLOCK_SIZE);
759 
760 	return 0;
761 }
762 
763 static void ccm_aad_padding(u32 aad_len, u8 *padding, u32 *padding_size)
764 {
765 	u32 i;
766 
767 	if (aad_len == 0) {
768 		*padding_size = 0;
769 		return;
770 	}
771 
772 	i = aad_len < (0x10000 - 0x100) ? 2 : 6;
773 
774 	if (i == 2) {
775 		padding[0] = (u8)(aad_len >> 8);
776 		padding[1] = (u8)aad_len;
777 	} else {
778 		padding[0] = 0xFF;
779 		padding[1] = 0xFE;
780 		padding[2] = (u8)(aad_len >> 24);
781 		padding[3] = (u8)(aad_len >> 16);
782 		padding[4] = (u8)(aad_len >> 8);
783 	}
784 
785 	*padding_size = i;
786 }
787 
788 static int ccm_compose_aad_iv(u8 *aad_iv, u32 data_len, u32 aad_len, u32 tag_size)
789 {
790 	aad_iv[0] |= ((u8)(((tag_size - 2) / 2) & 7) << 3);
791 
792 	aad_iv[12] = (u8)(data_len >> 24);
793 	aad_iv[13] = (u8)(data_len >> 16);
794 	aad_iv[14] = (u8)(data_len >> 8);
795 	aad_iv[15] = (u8)data_len;
796 
797 	if (aad_len)
798 		aad_iv[0] |= 0x40;	//set aad flag
799 
800 	return 0;
801 }
802 
803 static int hw_cipher_init(u32 chn, const u8 *key, const u8 *twk_key,
804 			  u32 key_len, const u8 *iv, u32 iv_len,
805 			  u32 algo, u32 mode, bool enc)
806 {
807 	u32 rk_mode = RK_GET_RK_MODE(mode);
808 	u32 key_chn_sel = chn;
809 	u32 reg_ctrl = 0;
810 
811 	IMSG("%s: key addr is %p, key_len is %d, iv addr is %p",
812 	     __func__, key, key_len, iv);
813 	if (rk_mode >= RK_MODE_MAX)
814 		return -EINVAL;
815 
816 	switch (algo) {
817 	case CRYPTO_DES:
818 		if (key_len > DES_BLOCK_SIZE)
819 			reg_ctrl |= CRYPTO_BC_TDES;
820 		else
821 			reg_ctrl |= CRYPTO_BC_DES;
822 		break;
823 	case CRYPTO_AES:
824 		reg_ctrl |= CRYPTO_BC_AES;
825 		break;
826 	case CRYPTO_SM4:
827 		reg_ctrl |= CRYPTO_BC_SM4;
828 		break;
829 	default:
830 		return -EINVAL;
831 	}
832 
833 	if (algo == CRYPTO_AES || algo == CRYPTO_SM4) {
834 		switch (key_len) {
835 		case AES_KEYSIZE_128:
836 			reg_ctrl |= CRYPTO_BC_128_bit_key;
837 			break;
838 		case AES_KEYSIZE_192:
839 			reg_ctrl |= CRYPTO_BC_192_bit_key;
840 			break;
841 		case AES_KEYSIZE_256:
842 			reg_ctrl |= CRYPTO_BC_256_bit_key;
843 			break;
844 		default:
845 			return -EINVAL;
846 		}
847 	}
848 
849 	reg_ctrl |= rk_mode2bc_mode[rk_mode];
850 	if (!enc)
851 		reg_ctrl |= CRYPTO_BC_DECRYPT;
852 
853 	/* write key data to reg */
854 	write_key_reg(key_chn_sel, key, key_len);
855 
856 	/* write twk key for xts mode */
857 	if (rk_mode == RK_MODE_XTS)
858 		write_key_reg(key_chn_sel + 4, twk_key, key_len);
859 
860 	/* set iv reg */
861 	if (rk_mode == RK_MODE_CCM)
862 		ccm128_set_iv_reg(chn, iv, iv_len);
863 	else
864 		set_iv_reg(chn, iv, iv_len);
865 
866 	/* din_swap set 1, dout_swap set 1, default 1. */
867 	crypto_write(0x00030003, CRYPTO_FIFO_CTL);
868 	crypto_write(0, CRYPTO_DMA_INT_EN);
869 
870 	crypto_write(reg_ctrl | CRYPTO_WRITE_MASK_ALL, CRYPTO_BC_CTL);
871 
872 	return 0;
873 }
874 
875 static int hw_cipher_crypt(const u8 *in, u8 *out, u64 len,
876 			   const u8 *aad, u32 aad_len,
877 			   u8 *tag, u32 tag_len, u32 mode)
878 {
879 	struct crypto_lli_desc *data_desc = NULL, *aad_desc = NULL;
880 	u8 *dma_in = NULL, *dma_out = NULL, *aad_tmp = NULL;
881 	u32 rk_mode = RK_GET_RK_MODE(mode);
882 	u32 reg_ctrl = 0, tmp_len = 0;
883 	u32 expt_int = 0, mask = 0;
884 	u32 key_chn = g_key_chn;
885 	u32 tmp, dst_len = 0;
886 	int ret = -1;
887 
888 	if (rk_mode == RK_MODE_CTS && len <= AES_BLOCK_SIZE) {
889 		printf("CTS mode length %u < 16Byte\n", (u32)len);
890 		return -EINVAL;
891 	}
892 
893 	tmp_len = (rk_mode == RK_MODE_CTR) ? ROUNDUP(len, AES_BLOCK_SIZE) : len;
894 
895 	data_desc = align_malloc(sizeof(*data_desc), LLI_ADDR_ALIGN_SIZE);
896 	if (!data_desc)
897 		goto exit;
898 
899 	if (IS_ALIGNED((ulong)in, DATA_ADDR_ALIGN_SIZE) && tmp_len == len)
900 		dma_in = (void *)in;
901 	else
902 		dma_in = align_malloc(tmp_len, DATA_ADDR_ALIGN_SIZE);
903 	if (!dma_in)
904 		goto exit;
905 
906 	if (out) {
907 		if (IS_ALIGNED((ulong)out, DATA_ADDR_ALIGN_SIZE) &&
908 		    tmp_len == len)
909 			dma_out = out;
910 		else
911 			dma_out = align_malloc(tmp_len, DATA_ADDR_ALIGN_SIZE);
912 		if (!dma_out)
913 			goto exit;
914 		dst_len = tmp_len;
915 	}
916 
917 	memset(data_desc, 0x00, sizeof(*data_desc));
918 	if (dma_in != in)
919 		memcpy(dma_in, in, len);
920 
921 	data_desc->src_addr    = (u32)virt_to_phys(dma_in);
922 	data_desc->src_len     = tmp_len;
923 	data_desc->dst_addr    = (u32)virt_to_phys(dma_out);
924 	data_desc->dst_len     = dst_len;
925 	data_desc->dma_ctrl    = LLI_DMA_CTRL_LAST;
926 
927 	if (IS_MAC_MODE(rk_mode)) {
928 		expt_int = CRYPTO_LIST_DONE_INT_ST;
929 		data_desc->dma_ctrl |= LLI_DMA_CTRL_LIST_DONE;
930 	} else {
931 		expt_int = CRYPTO_DST_ITEM_DONE_INT_ST;
932 		data_desc->dma_ctrl |= LLI_DMA_CTRL_DST_DONE;
933 	}
934 
935 	data_desc->user_define = LLI_USER_CIPHER_START |
936 				 LLI_USER_STRING_START |
937 				 LLI_USER_STRING_LAST |
938 				 (key_chn << 4);
939 	crypto_write((u32)virt_to_phys(data_desc), CRYPTO_DMA_LLI_ADDR);
940 
941 	if (rk_mode == RK_MODE_CCM || rk_mode == RK_MODE_GCM) {
942 		u32 aad_tmp_len = 0;
943 
944 		aad_desc = align_malloc(sizeof(*aad_desc), LLI_ADDR_ALIGN_SIZE);
945 		if (!aad_desc)
946 			goto exit;
947 
948 		memset(aad_desc, 0x00, sizeof(*aad_desc));
949 		aad_desc->next_addr = (u32)virt_to_phys(data_desc);
950 		aad_desc->user_define = LLI_USER_CIPHER_START |
951 					 LLI_USER_STRING_START |
952 					 LLI_USER_STRING_LAST |
953 					 LLI_USER_STRING_AAD |
954 					 (key_chn << 4);
955 
956 		if (rk_mode == RK_MODE_CCM) {
957 			u8 padding[AES_BLOCK_SIZE];
958 			u32 padding_size = 0;
959 
960 			memset(padding, 0x00, sizeof(padding));
961 			ccm_aad_padding(aad_len, padding, &padding_size);
962 
963 			aad_tmp_len = aad_len + AES_BLOCK_SIZE + padding_size;
964 			aad_tmp_len = ROUNDUP(aad_tmp_len, AES_BLOCK_SIZE);
965 			aad_tmp = align_malloc(aad_tmp_len,
966 					       DATA_ADDR_ALIGN_SIZE);
967 			if (!aad_tmp)
968 				goto exit;
969 
970 			/* clear last block */
971 			memset(aad_tmp + aad_tmp_len - AES_BLOCK_SIZE,
972 			       0x00, AES_BLOCK_SIZE);
973 
974 			/* read iv data from reg */
975 			get_iv_reg(key_chn, aad_tmp, AES_BLOCK_SIZE);
976 			ccm_compose_aad_iv(aad_tmp, tmp_len, aad_len, tag_len);
977 			memcpy(aad_tmp + AES_BLOCK_SIZE, padding, padding_size);
978 
979 			memcpy(aad_tmp + AES_BLOCK_SIZE + padding_size,
980 			       aad, aad_len);
981 		} else {
982 			aad_tmp_len = aad_len;
983 			if (IS_ALIGNED((ulong)aad, DATA_ADDR_ALIGN_SIZE)) {
984 				aad_tmp = (void *)aad;
985 			} else {
986 				aad_tmp = align_malloc(aad_tmp_len,
987 						       DATA_ADDR_ALIGN_SIZE);
988 				if (!aad_tmp)
989 					goto exit;
990 
991 				memcpy(aad_tmp, aad, aad_tmp_len);
992 			}
993 
994 			set_aad_len_reg(key_chn, aad_tmp_len);
995 			set_pc_len_reg(key_chn, tmp_len);
996 		}
997 
998 		aad_desc->src_addr = (u32)virt_to_phys(aad_tmp);
999 		aad_desc->src_len  = aad_tmp_len;
1000 
1001 		if (aad_tmp_len) {
1002 			data_desc->user_define = LLI_USER_STRING_START |
1003 						 LLI_USER_STRING_LAST |
1004 						 (key_chn << 4);
1005 			crypto_write((u32)virt_to_phys(aad_desc), CRYPTO_DMA_LLI_ADDR);
1006 			cache_op_inner(DCACHE_AREA_CLEAN, aad_tmp, aad_tmp_len);
1007 			cache_op_inner(DCACHE_AREA_CLEAN, aad_desc, sizeof(*aad_desc));
1008 		}
1009 	}
1010 
1011 	cache_op_inner(DCACHE_AREA_CLEAN, data_desc, sizeof(*data_desc));
1012 	cache_op_inner(DCACHE_AREA_CLEAN, dma_in, tmp_len);
1013 	cache_op_inner(DCACHE_AREA_INVALIDATE, dma_out, tmp_len);
1014 
1015 	/* din_swap set 1, dout_swap set 1, default 1. */
1016 	crypto_write(0x00030003, CRYPTO_FIFO_CTL);
1017 	crypto_write(0, CRYPTO_DMA_INT_EN);
1018 
1019 	reg_ctrl = crypto_read(CRYPTO_BC_CTL) | CRYPTO_BC_ENABLE;
1020 	crypto_write(reg_ctrl | CRYPTO_WRITE_MASK_ALL, CRYPTO_BC_CTL);
1021 	crypto_write(0x00010001, CRYPTO_DMA_CTL);//start
1022 
1023 	mask = ~(mask | CRYPTO_SYNC_LOCKSTEP_INT_ST);
1024 
1025 	/* wait calc ok */
1026 	ret = RK_POLL_TIMEOUT(!(crypto_read(CRYPTO_DMA_INT_ST) & mask),
1027 			      RK_CRYPTO_TIMEOUT);
1028 	tmp = crypto_read(CRYPTO_DMA_INT_ST);
1029 	crypto_write(tmp, CRYPTO_DMA_INT_ST);
1030 
1031 	if ((tmp & mask) == expt_int) {
1032 		if (out && out != dma_out)
1033 			memcpy(out, dma_out, len);
1034 
1035 		if (IS_NEED_TAG(rk_mode)) {
1036 			ret = WAIT_TAG_VALID(key_chn, RK_CRYPTO_TIMEOUT);
1037 			get_tag_from_reg(key_chn, tag, AES_BLOCK_SIZE);
1038 		}
1039 	} else {
1040 		dump_crypto_state(data_desc, tmp, expt_int, in, out, len, ret);
1041 		ret = -1;
1042 	}
1043 
1044 exit:
1045 	crypto_write(0xffff0000, CRYPTO_BC_CTL);//bc_ctl disable
1046 	align_free(data_desc);
1047 	align_free(aad_desc);
1048 	if (dma_in != in)
1049 		align_free(dma_in);
1050 	if (out && dma_out != out)
1051 		align_free(dma_out);
1052 	if (aad && aad != aad_tmp)
1053 		align_free(aad_tmp);
1054 
1055 	return ret;
1056 }
1057 
1058 static int hw_aes_init(u32 chn, const u8 *key, const u8 *twk_key, u32 key_len,
1059 		       const u8 *iv, u32 iv_len, u32 mode, bool enc)
1060 {
1061 	u32 rk_mode = RK_GET_RK_MODE(mode);
1062 
1063 	if (rk_mode > RK_MODE_XTS)
1064 		return -EINVAL;
1065 
1066 	if (iv_len > AES_BLOCK_SIZE)
1067 		return -EINVAL;
1068 
1069 	if (IS_NEED_IV(rk_mode)) {
1070 		if (!iv || iv_len != AES_BLOCK_SIZE)
1071 			return -EINVAL;
1072 	} else {
1073 		iv_len = 0;
1074 	}
1075 
1076 	if (rk_mode == RK_MODE_XTS) {
1077 		if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_256)
1078 			return -EINVAL;
1079 
1080 		if (!key || !twk_key)
1081 			return -EINVAL;
1082 	} else {
1083 		if (key_len != AES_KEYSIZE_128 &&
1084 		    key_len != AES_KEYSIZE_192 &&
1085 		    key_len != AES_KEYSIZE_256)
1086 			return -EINVAL;
1087 	}
1088 
1089 	return hw_cipher_init(chn, key, twk_key, key_len, iv, iv_len,
1090 			      CRYPTO_AES, mode, enc);
1091 }
1092 
1093 static int hw_sm4_init(u32  chn, const u8 *key, const u8 *twk_key, u32 key_len,
1094 		       const u8 *iv, u32 iv_len, u32 mode, bool enc)
1095 {
1096 	u32 rk_mode = RK_GET_RK_MODE(mode);
1097 
1098 	if (rk_mode > RK_MODE_XTS)
1099 		return -EINVAL;
1100 
1101 	if (iv_len > SM4_BLOCK_SIZE || key_len != SM4_KEYSIZE)
1102 		return -EINVAL;
1103 
1104 	if (IS_NEED_IV(rk_mode)) {
1105 		if (!iv || iv_len != SM4_BLOCK_SIZE)
1106 			return -EINVAL;
1107 	} else {
1108 		iv_len = 0;
1109 	}
1110 
1111 	if (rk_mode == RK_MODE_XTS) {
1112 		if (!key || !twk_key)
1113 			return -EINVAL;
1114 	}
1115 
1116 	return hw_cipher_init(chn, key, twk_key, key_len, iv, iv_len,
1117 			      CRYPTO_SM4, mode, enc);
1118 }
1119 
1120 int rk_crypto_des(struct udevice *dev, u32 mode, const u8 *key, u32 key_len,
1121 		  const u8 *iv, const u8 *in, u8 *out, u32 len, bool enc)
1122 {
1123 	u32 rk_mode = RK_GET_RK_MODE(mode);
1124 	u8 tmp_key[24];
1125 	int ret;
1126 
1127 	if (!is_des_mode(rk_mode))
1128 		return -EINVAL;
1129 
1130 	if (key_len == DES_BLOCK_SIZE || key_len == 3 * DES_BLOCK_SIZE) {
1131 		memcpy(tmp_key, key, key_len);
1132 	} else if (key_len == 2 * DES_BLOCK_SIZE) {
1133 		memcpy(tmp_key, key, 16);
1134 		memcpy(tmp_key + 16, key, 8);
1135 		key_len = 3 * DES_BLOCK_SIZE;
1136 	} else {
1137 		return -EINVAL;
1138 	}
1139 
1140 	ret = hw_cipher_init(0, tmp_key, NULL, key_len, iv, DES_BLOCK_SIZE,
1141 			     CRYPTO_DES, mode, enc);
1142 	if (ret)
1143 		goto exit;
1144 
1145 	ret = hw_cipher_crypt(in, out, len, NULL, 0,
1146 			      NULL, 0, mode);
1147 
1148 exit:
1149 	return ret;
1150 }
1151 
1152 int rk_crypto_aes(struct udevice *dev, u32 mode,
1153 		  const u8 *key, const u8 *twk_key, u32 key_len,
1154 		  const u8 *iv, u32 iv_len,
1155 		  const u8 *in, u8 *out, u32 len, bool enc)
1156 {
1157 	int ret;
1158 
1159 	/* RV1126/RV1109 do not support aes-192 */
1160 #if defined(CONFIG_ROCKCHIP_RV1126)
1161 	if (key_len == AES_KEYSIZE_192)
1162 		return -EINVAL;
1163 #endif
1164 
1165 	ret = hw_aes_init(0, key, twk_key, key_len, iv, iv_len, mode, enc);
1166 	if (ret)
1167 		return ret;
1168 
1169 	return hw_cipher_crypt(in, out, len, NULL, 0,
1170 			       NULL, 0, mode);
1171 }
1172 
1173 int rk_crypto_sm4(struct udevice *dev, u32 mode,
1174 		  const u8 *key, const u8 *twk_key, u32 key_len,
1175 		  const u8 *iv, u32 iv_len,
1176 		  const u8 *in, u8 *out, u32 len, bool enc)
1177 {
1178 	int ret;
1179 
1180 	ret = hw_sm4_init(0, key, twk_key, key_len, iv, iv_len, mode, enc);
1181 	if (ret)
1182 		return ret;
1183 
1184 	return hw_cipher_crypt(in, out, len, NULL, 0, NULL, 0, mode);
1185 }
1186 
1187 int rockchip_crypto_cipher(struct udevice *dev, cipher_context *ctx,
1188 			   const u8 *in, u8 *out, u32 len, bool enc)
1189 {
1190 	switch (ctx->algo) {
1191 	case CRYPTO_DES:
1192 		return rk_crypto_des(dev, ctx->mode, ctx->key, ctx->key_len,
1193 				     ctx->iv, in, out, len, enc);
1194 	case CRYPTO_AES:
1195 		return rk_crypto_aes(dev, ctx->mode,
1196 				     ctx->key, ctx->twk_key, ctx->key_len,
1197 				     ctx->iv, ctx->iv_len, in, out, len, enc);
1198 	case CRYPTO_SM4:
1199 		return rk_crypto_sm4(dev, ctx->mode,
1200 				     ctx->key, ctx->twk_key, ctx->key_len,
1201 				     ctx->iv, ctx->iv_len, in, out, len, enc);
1202 	default:
1203 		return -EINVAL;
1204 	}
1205 }
1206 
1207 int rk_crypto_mac(struct udevice *dev, u32 algo, u32 mode,
1208 		  const u8 *key, u32 key_len,
1209 		  const u8 *in, u32 len, u8 *tag)
1210 {
1211 	u32 rk_mode = RK_GET_RK_MODE(mode);
1212 	int ret;
1213 
1214 	if (!IS_MAC_MODE(rk_mode))
1215 		return -EINVAL;
1216 
1217 	if (algo != CRYPTO_AES && algo != CRYPTO_SM4)
1218 		return -EINVAL;
1219 
1220 	/* RV1126/RV1109 do not support aes-192 */
1221 #if defined(CONFIG_ROCKCHIP_RV1126)
1222 	if (algo == CRYPTO_AES && key_len == AES_KEYSIZE_192)
1223 		return -EINVAL;
1224 #endif
1225 
1226 	ret = hw_cipher_init(g_key_chn, key, NULL, key_len, NULL, 0,
1227 			     algo, mode, true);
1228 	if (ret)
1229 		return ret;
1230 
1231 	return hw_cipher_crypt(in, NULL, len, NULL, 0,
1232 			       tag, AES_BLOCK_SIZE, mode);
1233 }
1234 
1235 int rockchip_crypto_mac(struct udevice *dev, cipher_context *ctx,
1236 			const u8 *in, u32 len, u8 *tag)
1237 {
1238 	return rk_crypto_mac(dev, ctx->algo, ctx->mode,
1239 			     ctx->key, ctx->key_len, in, len, tag);
1240 }
1241 
1242 int rk_crypto_ae(struct udevice *dev, u32 algo, u32 mode,
1243 		 const u8 *key, u32 key_len, const u8 *nonce, u32 nonce_len,
1244 		 const u8 *in, u32 len, const u8 *aad, u32 aad_len,
1245 		 u8 *out, u8 *tag)
1246 {
1247 	u32 rk_mode = RK_GET_RK_MODE(mode);
1248 	int ret;
1249 
1250 	if (!IS_AE_MODE(rk_mode))
1251 		return -EINVAL;
1252 
1253 	if (len == 0)
1254 		return -EINVAL;
1255 
1256 	if (algo != CRYPTO_AES && algo != CRYPTO_SM4)
1257 		return -EINVAL;
1258 
1259 	/* RV1126/RV1109 do not support aes-192 */
1260 #if defined(CONFIG_ROCKCHIP_RV1126)
1261 	if (algo == CRYPTO_AES && key_len == AES_KEYSIZE_192)
1262 		return -EINVAL;
1263 #endif
1264 
1265 	ret = hw_cipher_init(g_key_chn, key, NULL, key_len, nonce, nonce_len,
1266 			     algo, mode, true);
1267 	if (ret)
1268 		return ret;
1269 
1270 	return hw_cipher_crypt(in, out, len, aad, aad_len,
1271 			       tag, AES_BLOCK_SIZE, mode);
1272 }
1273 
1274 int rockchip_crypto_ae(struct udevice *dev, cipher_context *ctx,
1275 		       const u8 *in, u32 len, const u8 *aad, u32 aad_len,
1276 		       u8 *out, u8 *tag)
1277 
1278 {
1279 	return rk_crypto_ae(dev, ctx->algo, ctx->mode, ctx->key, ctx->key_len,
1280 			    ctx->iv, ctx->iv_len, in, len,
1281 			    aad, aad_len, out, tag);
1282 }
1283 
1284 #endif
1285 
1286 #if CONFIG_IS_ENABLED(ROCKCHIP_RSA)
1287 static int rockchip_crypto_rsa_verify(struct udevice *dev, rsa_key *ctx,
1288 				      u8 *sign, u8 *output)
1289 {
1290 	struct mpa_num *mpa_m = NULL, *mpa_e = NULL, *mpa_n = NULL;
1291 	struct mpa_num *mpa_c = NULL, *mpa_result = NULL;
1292 	u32 n_bits, n_words;
1293 	int ret;
1294 
1295 	if (!ctx)
1296 		return -EINVAL;
1297 
1298 	if (ctx->algo != CRYPTO_RSA512 &&
1299 	    ctx->algo != CRYPTO_RSA1024 &&
1300 	    ctx->algo != CRYPTO_RSA2048 &&
1301 	    ctx->algo != CRYPTO_RSA3072 &&
1302 	    ctx->algo != CRYPTO_RSA4096)
1303 		return -EINVAL;
1304 
1305 	n_bits = crypto_algo_nbits(ctx->algo);
1306 	n_words = BITS2WORD(n_bits);
1307 
1308 	ret = rk_mpa_alloc(&mpa_m, sign, n_words);
1309 	if (ret)
1310 		goto exit;
1311 
1312 	ret = rk_mpa_alloc(&mpa_e, ctx->e, n_words);
1313 	if (ret)
1314 		goto exit;
1315 
1316 	ret = rk_mpa_alloc(&mpa_n, ctx->n, n_words);
1317 	if (ret)
1318 		goto exit;
1319 
1320 	if (ctx->c) {
1321 		ret = rk_mpa_alloc(&mpa_c, ctx->c, n_words);
1322 		if (ret)
1323 			goto exit;
1324 	}
1325 
1326 	ret = rk_mpa_alloc(&mpa_result, NULL, n_words);
1327 	if (ret)
1328 		goto exit;
1329 
1330 	ret = rk_exptmod_np(mpa_m, mpa_e, mpa_n, mpa_c, mpa_result);
1331 	if (!ret)
1332 		memcpy(output, mpa_result->d, BITS2BYTE(n_bits));
1333 
1334 exit:
1335 	rk_mpa_free(&mpa_m);
1336 	rk_mpa_free(&mpa_e);
1337 	rk_mpa_free(&mpa_n);
1338 	rk_mpa_free(&mpa_c);
1339 	rk_mpa_free(&mpa_result);
1340 
1341 	return ret;
1342 }
1343 #endif
1344 
1345 static const struct dm_crypto_ops rockchip_crypto_ops = {
1346 	.capability   = rockchip_crypto_capability,
1347 	.sha_init     = rockchip_crypto_sha_init,
1348 	.sha_update   = rockchip_crypto_sha_update,
1349 	.sha_final    = rockchip_crypto_sha_final,
1350 #if CONFIG_IS_ENABLED(ROCKCHIP_RSA)
1351 	.rsa_verify   = rockchip_crypto_rsa_verify,
1352 #endif
1353 #if CONFIG_IS_ENABLED(ROCKCHIP_HMAC)
1354 	.hmac_init    = rockchip_crypto_hmac_init,
1355 	.hmac_update  = rockchip_crypto_hmac_update,
1356 	.hmac_final   = rockchip_crypto_hmac_final,
1357 #endif
1358 #if CONFIG_IS_ENABLED(ROCKCHIP_CIPHER)
1359 	.cipher_crypt = rockchip_crypto_cipher,
1360 	.cipher_mac = rockchip_crypto_mac,
1361 	.cipher_ae  = rockchip_crypto_ae,
1362 #endif
1363 };
1364 
1365 /*
1366  * Only use "clocks" to parse crypto clock id and use rockchip_get_clk().
1367  * Because we always add crypto node in U-Boot dts, when kernel dtb enabled :
1368  *
1369  *   1. There is cru phandle mismatch between U-Boot and kernel dtb;
1370  *   2. CONFIG_OF_SPL_REMOVE_PROPS removes clock property;
1371  */
1372 static int rockchip_crypto_ofdata_to_platdata(struct udevice *dev)
1373 {
1374 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
1375 	int len, ret = -EINVAL;
1376 
1377 	memset(priv, 0x00, sizeof(*priv));
1378 
1379 	priv->reg = (fdt_addr_t)dev_read_addr_ptr(dev);
1380 	if (priv->reg == FDT_ADDR_T_NONE)
1381 		return -EINVAL;
1382 
1383 	crypto_base = priv->reg;
1384 
1385 	/* if there is no clocks in dts, just skip it */
1386 	if (!dev_read_prop(dev, "clocks", &len)) {
1387 		printf("Can't find \"clocks\" property\n");
1388 		return 0;
1389 	}
1390 
1391 	memset(priv, 0x00, sizeof(*priv));
1392 	priv->clocks = malloc(len);
1393 	if (!priv->clocks)
1394 		return -ENOMEM;
1395 
1396 	priv->nclocks = len / (2 * sizeof(u32));
1397 	if (dev_read_u32_array(dev, "clocks", (u32 *)priv->clocks,
1398 			       priv->nclocks)) {
1399 		printf("Can't read \"clocks\" property\n");
1400 		ret = -EINVAL;
1401 		goto exit;
1402 	}
1403 
1404 	if (dev_read_prop(dev, "clock-frequency", &len)) {
1405 		priv->frequencies = malloc(len);
1406 		if (!priv->frequencies) {
1407 			ret = -ENOMEM;
1408 			goto exit;
1409 		}
1410 
1411 		priv->freq_nclocks = len / (2 * sizeof(u32));
1412 		if (dev_read_u32_array(dev, "clock-frequency", priv->frequencies,
1413 				       priv->freq_nclocks)) {
1414 			printf("Can't read \"clock-frequency\" property\n");
1415 			ret = -EINVAL;
1416 			goto exit;
1417 		}
1418 	}
1419 
1420 	return 0;
1421 exit:
1422 	if (priv->clocks)
1423 		free(priv->clocks);
1424 
1425 	if (priv->frequencies)
1426 		free(priv->frequencies);
1427 
1428 	return ret;
1429 }
1430 
1431 static int rk_crypto_set_clk(struct udevice *dev)
1432 {
1433 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
1434 	struct clk clk;
1435 	int i, ret;
1436 
1437 	ret = clk_set_defaults(dev);
1438 	if (!ret)
1439 		return ret;
1440 
1441 	if (priv->freq_nclocks == 0)
1442 		return 0;
1443 
1444 	for (i = 0; i < priv->freq_nclocks; i++) {
1445 		ret = clk_get_by_index(dev, i, &clk);
1446 		if (ret < 0) {
1447 			printf("Failed to get clk index %d, ret=%d\n", i, ret);
1448 			return ret;
1449 		}
1450 		ret = clk_set_rate(&clk, priv->frequencies[i]);
1451 		if (ret < 0) {
1452 			printf("%s: Failed to set clk(%ld): ret=%d\n",
1453 			       __func__, clk.id, ret);
1454 			return ret;
1455 		}
1456 	}
1457 
1458 	return 0;
1459 }
1460 
1461 static int rockchip_crypto_probe(struct udevice *dev)
1462 {
1463 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
1464 	struct rk_crypto_soc_data *sdata;
1465 	int ret = 0;
1466 
1467 	sdata = (struct rk_crypto_soc_data *)dev_get_driver_data(dev);
1468 
1469 	if (sdata->dynamic_cap)
1470 		sdata->capability = sdata->dynamic_cap();
1471 
1472 	priv->soc_data = sdata;
1473 
1474 	priv->hw_ctx = memalign(LLI_ADDR_ALIGN_SIZE,
1475 				sizeof(struct rk_hash_ctx));
1476 	if (!priv->hw_ctx)
1477 		return -ENOMEM;
1478 
1479 	ret = rk_crypto_set_clk(dev);
1480 	if (ret)
1481 		return ret;
1482 
1483 	hw_crypto_reset();
1484 
1485 	return 0;
1486 }
1487 
1488 static const struct rk_crypto_soc_data soc_data_base = {
1489 	.capability = CRYPTO_MD5 |
1490 		      CRYPTO_SHA1 |
1491 		      CRYPTO_SHA256 |
1492 		      CRYPTO_SHA512 |
1493 		      CRYPTO_HMAC_MD5 |
1494 		      CRYPTO_HMAC_SHA1 |
1495 		      CRYPTO_HMAC_SHA256 |
1496 		      CRYPTO_HMAC_SHA512 |
1497 		      CRYPTO_RSA512 |
1498 		      CRYPTO_RSA1024 |
1499 		      CRYPTO_RSA2048 |
1500 		      CRYPTO_RSA3072 |
1501 		      CRYPTO_RSA4096 |
1502 		      CRYPTO_DES |
1503 		      CRYPTO_AES,
1504 };
1505 
1506 static const struct rk_crypto_soc_data soc_data_base_sm = {
1507 	.capability = CRYPTO_MD5 |
1508 		      CRYPTO_SHA1 |
1509 		      CRYPTO_SHA256 |
1510 		      CRYPTO_SHA512 |
1511 		      CRYPTO_SM3 |
1512 		      CRYPTO_HMAC_MD5 |
1513 		      CRYPTO_HMAC_SHA1 |
1514 		      CRYPTO_HMAC_SHA256 |
1515 		      CRYPTO_HMAC_SHA512 |
1516 		      CRYPTO_HMAC_SM3 |
1517 		      CRYPTO_RSA512 |
1518 		      CRYPTO_RSA1024 |
1519 		      CRYPTO_RSA2048 |
1520 		      CRYPTO_RSA3072 |
1521 		      CRYPTO_RSA4096 |
1522 		      CRYPTO_DES |
1523 		      CRYPTO_AES |
1524 		      CRYPTO_SM4,
1525 };
1526 
1527 static const struct rk_crypto_soc_data soc_data_rk1808 = {
1528 	.capability = CRYPTO_MD5 |
1529 		      CRYPTO_SHA1 |
1530 		      CRYPTO_SHA256 |
1531 		      CRYPTO_HMAC_MD5 |
1532 		      CRYPTO_HMAC_SHA1 |
1533 		      CRYPTO_HMAC_SHA256 |
1534 		      CRYPTO_RSA512 |
1535 		      CRYPTO_RSA1024 |
1536 		      CRYPTO_RSA2048 |
1537 		      CRYPTO_RSA3072 |
1538 		      CRYPTO_RSA4096,
1539 };
1540 
1541 static const struct rk_crypto_soc_data soc_data_cryptov3 = {
1542 	.capability  = 0,
1543 	.dynamic_cap = crypto_v3_dynamic_cap,
1544 };
1545 
1546 static const struct udevice_id rockchip_crypto_ids[] = {
1547 	{
1548 		.compatible = "rockchip,px30-crypto",
1549 		.data = (ulong)&soc_data_base
1550 	},
1551 	{
1552 		.compatible = "rockchip,rk1808-crypto",
1553 		.data = (ulong)&soc_data_rk1808
1554 	},
1555 	{
1556 		.compatible = "rockchip,rk3308-crypto",
1557 		.data = (ulong)&soc_data_base
1558 	},
1559 	{
1560 		.compatible = "rockchip,rv1126-crypto",
1561 		.data = (ulong)&soc_data_base_sm
1562 	},
1563 	{
1564 		.compatible = "rockchip,rk3568-crypto",
1565 		.data = (ulong)&soc_data_base_sm
1566 	},
1567 	{
1568 		.compatible = "rockchip,rk3588-crypto",
1569 		.data = (ulong)&soc_data_base_sm
1570 	},
1571 	{
1572 		.compatible = "rockchip,crypto-v3",
1573 		.data = (ulong)&soc_data_cryptov3
1574 	},
1575 	{
1576 		.compatible = "rockchip,crypto-v4",
1577 		.data = (ulong)&soc_data_cryptov3 /* reuse crypto v3 config */
1578 	},
1579 	{ }
1580 };
1581 
1582 U_BOOT_DRIVER(rockchip_crypto_v2) = {
1583 	.name		= "rockchip_crypto_v2",
1584 	.id		= UCLASS_CRYPTO,
1585 	.of_match	= rockchip_crypto_ids,
1586 	.ops		= &rockchip_crypto_ops,
1587 	.probe		= rockchip_crypto_probe,
1588 	.ofdata_to_platdata = rockchip_crypto_ofdata_to_platdata,
1589 	.priv_auto_alloc_size = sizeof(struct rockchip_crypto_priv),
1590 };
1591