xref: /rk3399_rockchip-uboot/drivers/crypto/rockchip/crypto_v2.c (revision 827e2ae92e2103f82dab5b54228ad24e40db6263)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
4  */
5 
6 #include <common.h>
7 #include <clk.h>
8 #include <crypto.h>
9 #include <dm.h>
10 #include <asm/io.h>
11 #include <asm/arch/hardware.h>
12 #include <asm/arch/clock.h>
13 #include <rockchip/crypto_v2.h>
14 #include <rockchip/crypto_v2_pka.h>
15 
16 struct rockchip_crypto_priv {
17 	fdt_addr_t reg;
18 	struct clk clk;
19 	u32 frequency;
20 	char *clocks;
21 	u32 *frequencies;
22 	u32 nclocks;
23 	u32 length;
24 	struct rk_hash_ctx *hw_ctx;
25 };
26 
27 #define LLI_ADDR_ALIGIN_SIZE	8
28 #define DATA_ADDR_ALIGIN_SIZE	8
29 #define DATA_LEN_ALIGIN_SIZE	64
30 
31 #define RK_CRYPTO_TIME_OUT	50000  /* max 50ms */
32 
33 #define RK_WHILE_TIME_OUT(condition, timeout, ret) { \
34 			u32 time_out = timeout; \
35 			ret = 0; \
36 			while (condition) { \
37 				if (time_out-- == 0) { \
38 					debug("[%s] %d: time out!\n", __func__,\
39 						__LINE__); \
40 					ret = -ETIME; \
41 					break; \
42 				} \
43 				udelay(1); \
44 			} \
45 		} while (0)
46 
47 typedef u32 paddr_t;
48 #define virt_to_phys(addr)		(((unsigned long)addr) & 0xffffffff)
49 #define phys_to_virt(addr, area)	((unsigned long)addr)
50 
51 fdt_addr_t crypto_base;
52 
53 static void word2byte(u32 word, u8 *ch, u32 endian)
54 {
55 	/* 0: Big-Endian 1: Little-Endian */
56 	if (endian == BIG_ENDIAN) {
57 		ch[0] = (word >> 24) & 0xff;
58 		ch[1] = (word >> 16) & 0xff;
59 		ch[2] = (word >> 8) & 0xff;
60 		ch[3] = (word >> 0) & 0xff;
61 	} else if (endian == LITTLE_ENDIAN) {
62 		ch[0] = (word >> 0) & 0xff;
63 		ch[1] = (word >> 8) & 0xff;
64 		ch[2] = (word >> 16) & 0xff;
65 		ch[3] = (word >> 24) & 0xff;
66 	} else {
67 		ch[0] = 0;
68 		ch[1] = 0;
69 		ch[2] = 0;
70 		ch[3] = 0;
71 	}
72 }
73 
74 static void rk_flush_cache_align(ulong addr, ulong size, ulong alignment)
75 {
76 	ulong aligned_input, aligned_len;
77 
78 	/* Must flush dcache before crypto DMA fetch data region */
79 	aligned_input = round_down(addr, alignment);
80 	aligned_len = round_up(size + (addr - aligned_input), alignment);
81 	flush_cache(aligned_input, aligned_len);
82 }
83 
84 static inline void clear_hash_out_reg(void)
85 {
86 	int i;
87 
88 	/*clear out register*/
89 	for (i = 0; i < 16; i++)
90 		crypto_write(0, CRYPTO_HASH_DOUT_0 + 4 * i);
91 }
92 
93 static int hw_crypto_reset(void)
94 {
95 	u32 tmp = 0, tmp_mask = 0;
96 	int ret;
97 
98 	tmp = CRYPTO_SW_PKA_RESET | CRYPTO_SW_CC_RESET;
99 	tmp_mask = tmp << CRYPTO_WRITE_MASK_SHIFT;
100 
101 	/* reset pka and crypto modules*/
102 	crypto_write(tmp | tmp_mask, CRYPTO_RST_CTL);
103 
104 	/* wait reset compelete */
105 	RK_WHILE_TIME_OUT(crypto_read(CRYPTO_RST_CTL),
106 			  RK_CRYPTO_TIME_OUT, ret);
107 	return ret;
108 }
109 
110 static void hw_hash_clean_ctx(struct rk_hash_ctx *ctx)
111 {
112 	/* clear hash status */
113 	crypto_write(CRYPTO_WRITE_MASK_ALL | 0, CRYPTO_HASH_CTL);
114 
115 	assert(ctx);
116 	assert(ctx->magic == RK_HASH_CTX_MAGIC);
117 
118 	if (ctx->cache)
119 		free(ctx->cache);
120 
121 	memset(ctx, 0x00, sizeof(*ctx));
122 }
123 
124 int rk_hash_init(void *hw_ctx, u32 algo, u32 length)
125 {
126 	struct rk_hash_ctx *tmp_ctx = (struct rk_hash_ctx *)hw_ctx;
127 	u32 reg_ctrl = 0;
128 	int ret;
129 
130 	if (!tmp_ctx)
131 		return -EINVAL;
132 
133 	memset(tmp_ctx, 0x00, sizeof(*tmp_ctx));
134 
135 	reg_ctrl = CRYPTO_SW_CC_RESET;
136 	crypto_write(reg_ctrl | (reg_ctrl << CRYPTO_WRITE_MASK_SHIFT),
137 		     CRYPTO_RST_CTL);
138 
139 	/* wait reset compelete */
140 	RK_WHILE_TIME_OUT(crypto_read(CRYPTO_RST_CTL),
141 			  RK_CRYPTO_TIME_OUT, ret);
142 
143 	reg_ctrl = 0;
144 	tmp_ctx->algo = algo;
145 	switch (algo) {
146 	case CRYPTO_MD5:
147 		reg_ctrl |= CRYPTO_MODE_MD5;
148 		tmp_ctx->digest_size = 16;
149 		break;
150 	case CRYPTO_SHA1:
151 		reg_ctrl |= CRYPTO_MODE_SHA1;
152 		tmp_ctx->digest_size = 20;
153 		break;
154 	case CRYPTO_SHA256:
155 		reg_ctrl |= CRYPTO_MODE_SHA256;
156 		tmp_ctx->digest_size = 32;
157 		break;
158 	case CRYPTO_SHA512:
159 		reg_ctrl |= CRYPTO_MODE_SHA512;
160 		tmp_ctx->digest_size = 64;
161 		break;
162 
163 	default:
164 		ret = -EINVAL;
165 		goto exit;
166 	}
167 
168 	clear_hash_out_reg();
169 
170 	/* enable hardware padding */
171 	reg_ctrl |= CRYPTO_HW_PAD_ENABLE;
172 	crypto_write(reg_ctrl | CRYPTO_WRITE_MASK_ALL, CRYPTO_HASH_CTL);
173 
174 	/* FIFO input and output data byte swap */
175 	/* such as B0, B1, B2, B3 -> B3, B2, B1, B0 */
176 	reg_ctrl = CRYPTO_DOUT_BYTESWAP | CRYPTO_DOIN_BYTESWAP;
177 	crypto_write(reg_ctrl | CRYPTO_WRITE_MASK_ALL, CRYPTO_FIFO_CTL);
178 
179 	/* enable src_item_done interrupt */
180 	crypto_write(CRYPTO_SRC_ITEM_INT_EN, CRYPTO_DMA_INT_EN);
181 
182 	tmp_ctx->magic = RK_HASH_CTX_MAGIC;
183 	tmp_ctx->left_len = length;
184 
185 	return 0;
186 exit:
187 	/* clear hash setting if init failed */
188 	crypto_write(CRYPTO_WRITE_MASK_ALL | 0, CRYPTO_HASH_CTL);
189 
190 	return ret;
191 }
192 
193 static int rk_hash_direct_calc(struct crypto_lli_desc *lli, const u8 *data,
194 			       u32 data_len, u8 *started_flag, u8 is_last)
195 {
196 	int ret = -EINVAL;
197 	u32 tmp = 0;
198 
199 	assert(IS_ALIGNED((ulong)data, DATA_ADDR_ALIGIN_SIZE));
200 	assert(is_last || IS_ALIGNED(data_len, DATA_LEN_ALIGIN_SIZE));
201 
202 	debug("%s: data = %p, len = %u, s = %x, l = %x\n",
203 	      __func__, data, data_len, *started_flag, is_last);
204 
205 	memset(lli, 0x00, sizeof(*lli));
206 	lli->src_addr = (u32)virt_to_phys(data);
207 	lli->src_len = data_len;
208 	lli->dma_ctrl = LLI_DMA_CTRL_SRC_DONE;
209 
210 	if (is_last) {
211 		lli->user_define |= LLI_USER_STRING_LAST;
212 		lli->dma_ctrl |= LLI_DMA_CTRL_LAST;
213 	} else {
214 		lli->next_addr = (u32)virt_to_phys(lli);
215 		lli->dma_ctrl |= LLI_DMA_CTRL_PAUSE;
216 	}
217 
218 	if (!(*started_flag)) {
219 		lli->user_define |=
220 			(LLI_USER_STRING_START | LLI_USER_CPIHER_START);
221 		crypto_write((u32)virt_to_phys(lli), CRYPTO_DMA_LLI_ADDR);
222 		crypto_write((CRYPTO_HASH_ENABLE << CRYPTO_WRITE_MASK_SHIFT) |
223 			     CRYPTO_HASH_ENABLE, CRYPTO_HASH_CTL);
224 		tmp = CRYPTO_DMA_START;
225 		*started_flag = 1;
226 	} else {
227 		tmp = CRYPTO_DMA_RESTART;
228 	}
229 
230 	/* flush cache */
231 	rk_flush_cache_align((ulong)lli, sizeof(*lli),
232 			     CONFIG_SYS_CACHELINE_SIZE);
233 	rk_flush_cache_align((ulong)data, data_len, CONFIG_SYS_CACHELINE_SIZE);
234 
235 	/* start calculate */
236 	crypto_write(tmp << CRYPTO_WRITE_MASK_SHIFT | tmp,
237 		     CRYPTO_DMA_CTL);
238 
239 	/* wait calc ok */
240 	RK_WHILE_TIME_OUT(!crypto_read(CRYPTO_DMA_INT_ST),
241 			  RK_CRYPTO_TIME_OUT, ret);
242 
243 	/* clear interrupt status */
244 	tmp = crypto_read(CRYPTO_DMA_INT_ST);
245 	crypto_write(tmp, CRYPTO_DMA_INT_ST);
246 
247 	if (tmp != CRYPTO_SRC_ITEM_DONE_INT_ST &&
248 	    tmp != CRYPTO_ZERO_LEN_INT_ST) {
249 		debug("[%s] %d: CRYPTO_DMA_INT_ST = 0x%x\n",
250 		      __func__, __LINE__, tmp);
251 		goto exit;
252 	}
253 
254 exit:
255 	return ret;
256 }
257 
258 static int rk_hash_cache_calc(struct rk_hash_ctx *tmp_ctx, const u8 *data,
259 			      u32 data_len, u8 is_last)
260 {
261 	u32 left_len;
262 	int ret = 0;
263 
264 	if (!tmp_ctx->cache) {
265 		tmp_ctx->cache = (u8 *)memalign(DATA_ADDR_ALIGIN_SIZE,
266 						HASH_CACHE_SIZE);
267 		if (!tmp_ctx->cache)
268 			goto error;
269 
270 		tmp_ctx->cache_size = 0;
271 	}
272 
273 	left_len = tmp_ctx->left_len;
274 
275 	while (1) {
276 		u32 tmp_len = 0;
277 
278 		if (tmp_ctx->cache_size + data_len <= HASH_CACHE_SIZE) {
279 			/* copy to cache */
280 			debug("%s, %d: copy to cache %u\n",
281 			      __func__, __LINE__, data_len);
282 			memcpy(tmp_ctx->cache + tmp_ctx->cache_size, data,
283 			       data_len);
284 			tmp_ctx->cache_size += data_len;
285 
286 			/* if last one calc cache immediately */
287 			if (is_last) {
288 				debug("%s, %d: last one calc cache %u\n",
289 				      __func__, __LINE__, tmp_ctx->cache_size);
290 				ret = rk_hash_direct_calc(&tmp_ctx->data_lli,
291 							  tmp_ctx->cache,
292 							  tmp_ctx->cache_size,
293 							  &tmp_ctx->is_started,
294 							  is_last);
295 				if (ret)
296 					goto error;
297 			}
298 			left_len -= data_len;
299 			break;
300 		}
301 
302 		/* 1. make cache be full */
303 		/* 2. calc cache */
304 		tmp_len = HASH_CACHE_SIZE - tmp_ctx->cache_size;
305 		debug("%s, %d: make cache be full %u\n",
306 		      __func__, __LINE__, tmp_len);
307 		memcpy(tmp_ctx->cache + tmp_ctx->cache_size, data, tmp_len);
308 
309 		ret = rk_hash_direct_calc(&tmp_ctx->data_lli,
310 					  tmp_ctx->cache,
311 					  HASH_CACHE_SIZE,
312 					  &tmp_ctx->is_started,
313 					  0);
314 		if (ret)
315 			goto error;
316 
317 		data += tmp_len;
318 		data_len -= tmp_len;
319 		left_len -= tmp_len;
320 		tmp_ctx->cache_size = 0;
321 	}
322 
323 	return ret;
324 error:
325 	return -EINVAL;
326 }
327 
328 int rk_hash_update(void *ctx, const u8 *data, u32 data_len)
329 {
330 	struct rk_hash_ctx *tmp_ctx = (struct rk_hash_ctx *)ctx;
331 	const u8 *direct_data = NULL, *cache_data = NULL;
332 	u32 direct_data_len = 0, cache_data_len = 0;
333 	int ret = 0;
334 	u8 is_last = 0;
335 
336 	debug("\n");
337 	if (!tmp_ctx || !data)
338 		goto error;
339 
340 	if (tmp_ctx->digest_size == 0 || tmp_ctx->magic != RK_HASH_CTX_MAGIC)
341 		goto error;
342 
343 	if (tmp_ctx->left_len < data_len)
344 		goto error;
345 
346 	is_last = tmp_ctx->left_len == data_len ? 1 : 0;
347 
348 	if (!tmp_ctx->use_cache &&
349 	    IS_ALIGNED((ulong)data, DATA_ADDR_ALIGIN_SIZE)) {
350 		direct_data = data;
351 		if (IS_ALIGNED(data_len, DATA_LEN_ALIGIN_SIZE) || is_last) {
352 			/* calc all directly */
353 			debug("%s, %d: calc all directly\n",
354 			      __func__, __LINE__);
355 			direct_data_len = data_len;
356 		} else {
357 			/* calc some directly calc some in cache */
358 			debug("%s, %d: calc some directly calc some in cache\n",
359 			      __func__, __LINE__);
360 			direct_data_len = round_down((ulong)data_len,
361 						     DATA_LEN_ALIGIN_SIZE);
362 			cache_data = direct_data + direct_data_len;
363 			cache_data_len = data_len % DATA_LEN_ALIGIN_SIZE;
364 			tmp_ctx->use_cache = 1;
365 		}
366 	} else {
367 		/* calc all in cache */
368 		debug("%s, %d: calc all in cache\n", __func__, __LINE__);
369 		cache_data = data;
370 		cache_data_len = data_len;
371 		tmp_ctx->use_cache = 1;
372 	}
373 
374 	if (direct_data_len) {
375 		debug("%s, %d: calc direct data %u\n",
376 		      __func__, __LINE__, direct_data_len);
377 		ret = rk_hash_direct_calc(&tmp_ctx->data_lli, direct_data,
378 					  direct_data_len,
379 					  &tmp_ctx->is_started, is_last);
380 		if (ret)
381 			goto error;
382 		tmp_ctx->left_len -= direct_data_len;
383 	}
384 
385 	if (cache_data_len) {
386 		debug("%s, %d: calc cache data %u\n",
387 		      __func__, __LINE__, cache_data_len);
388 		ret = rk_hash_cache_calc(tmp_ctx, cache_data,
389 					 cache_data_len, is_last);
390 		if (ret)
391 			goto error;
392 		tmp_ctx->left_len -= cache_data_len;
393 	}
394 
395 	return ret;
396 error:
397 	/* free lli list */
398 	hw_hash_clean_ctx(tmp_ctx);
399 
400 	return -EINVAL;
401 }
402 
403 int rk_hash_final(void *ctx, u8 *digest, size_t len)
404 {
405 	struct rk_hash_ctx *tmp_ctx = (struct rk_hash_ctx *)ctx;
406 	int ret = -EINVAL;
407 	u32 i;
408 
409 	if (!digest)
410 		goto exit;
411 
412 	if (!tmp_ctx ||
413 	    tmp_ctx->digest_size == 0 ||
414 	    len > tmp_ctx->digest_size ||
415 	    tmp_ctx->magic != RK_HASH_CTX_MAGIC) {
416 		goto exit;
417 	}
418 
419 	/* wait hash value ok */
420 	RK_WHILE_TIME_OUT(!crypto_read(CRYPTO_HASH_VALID),
421 			  RK_CRYPTO_TIME_OUT, ret);
422 
423 	for (i = 0; i < len / 4; i++)
424 		word2byte(crypto_read(CRYPTO_HASH_DOUT_0 + i * 4),
425 			  digest + i * 4, BIG_ENDIAN);
426 
427 	if (len % 4) {
428 		u8 tmp_buf[4];
429 
430 		word2byte(crypto_read(CRYPTO_HASH_DOUT_0 + i * 4),
431 			  tmp_buf, BIG_ENDIAN);
432 		memcpy(digest + i * 4, tmp_buf, len % 4);
433 	}
434 
435 	/* clear hash status */
436 	crypto_write(CRYPTO_HASH_IS_VALID, CRYPTO_HASH_VALID);
437 	crypto_write(CRYPTO_WRITE_MASK_ALL | 0, CRYPTO_HASH_CTL);
438 
439 exit:
440 	/* free lli list */
441 	hw_hash_clean_ctx(tmp_ctx);
442 
443 	return ret;
444 }
445 
446 static int rk_trng(u8 *trng, u32 len)
447 {
448 	u32 i, reg_ctrl = 0;
449 	int ret = -EINVAL;
450 	u32 buf[8];
451 
452 	if (len > CRYPTO_TRNG_MAX)
453 		return -EINVAL;
454 
455 	memset(buf, 0, sizeof(buf));
456 
457 	/* enable osc_ring to get entropy, sample period is set as 50 */
458 	crypto_write(50, CRYPTO_RNG_SAMPLE_CNT);
459 
460 	reg_ctrl |= CRYPTO_RNG_256_bit_len;
461 	reg_ctrl |= CRYPTO_RNG_SLOWER_SOC_RING_1;
462 	reg_ctrl |= CRYPTO_RNG_ENABLE;
463 	reg_ctrl |= CRYPTO_RNG_START;
464 	reg_ctrl |= CRYPTO_WRITE_MASK_ALL;
465 
466 	crypto_write(reg_ctrl | CRYPTO_WRITE_MASK_ALL, CRYPTO_RNG_CTL);
467 	RK_WHILE_TIME_OUT(crypto_read(CRYPTO_RNG_CTL) & CRYPTO_RNG_START,
468 			  RK_CRYPTO_TIME_OUT, ret);
469 
470 	if (ret == 0) {
471 		for (i = 0; i < ARRAY_SIZE(buf); i++)
472 			buf[i] = crypto_read(CRYPTO_RNG_DOUT_0 + i * 4);
473 		memcpy(trng, buf, len);
474 	}
475 
476 	/* close TRNG */
477 	crypto_write(0 | CRYPTO_WRITE_MASK_ALL, CRYPTO_RNG_CTL);
478 
479 	return ret;
480 }
481 
482 static u32 rockchip_crypto_capability(struct udevice *dev)
483 {
484 	return CRYPTO_MD5 |
485 	       CRYPTO_SHA1 |
486 	       CRYPTO_SHA256 |
487 #if !defined(CONFIG_ROCKCHIP_RK1808)
488 	       CRYPTO_SHA512 |
489 #endif
490 	       CRYPTO_RSA512 |
491 	       CRYPTO_RSA1024 |
492 	       CRYPTO_RSA2048 |
493 	       CRYPTO_RSA3072 |
494 	       CRYPTO_RSA4096 |
495 	       CRYPTO_TRNG;
496 }
497 
498 static int rockchip_crypto_sha_init(struct udevice *dev, sha_context *ctx)
499 {
500 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
501 
502 	if (!ctx)
503 		return -EINVAL;
504 
505 	memset(priv->hw_ctx, 0x00, sizeof(struct rk_hash_ctx));
506 
507 	return rk_hash_init(priv->hw_ctx, ctx->algo, ctx->length);
508 }
509 
510 static int rockchip_crypto_sha_update(struct udevice *dev,
511 				      u32 *input, u32 len)
512 {
513 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
514 
515 	if (!len)
516 		return -EINVAL;
517 
518 	return rk_hash_update(priv->hw_ctx, (u8 *)input, len);
519 }
520 
521 static int rockchip_crypto_sha_final(struct udevice *dev,
522 				     sha_context *ctx, u8 *output)
523 {
524 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
525 	u32 nbits;
526 
527 	nbits = crypto_algo_nbits(ctx->algo);
528 
529 	return rk_hash_final(priv->hw_ctx, (u8 *)output, BITS2BYTE(nbits));
530 }
531 
532 static int rockchip_crypto_rsa_verify(struct udevice *dev, rsa_key *ctx,
533 				      u8 *sign, u8 *output)
534 {
535 	struct mpa_num *mpa_m = NULL, *mpa_e = NULL, *mpa_n = NULL;
536 	struct mpa_num *mpa_c = NULL, *mpa_result = NULL;
537 	u32 n_bits, n_words;
538 	u32 *rsa_result;
539 	int ret;
540 
541 	if (!ctx)
542 		return -EINVAL;
543 
544 	if (ctx->algo != CRYPTO_RSA512 &&
545 	    ctx->algo != CRYPTO_RSA1024 &&
546 	    ctx->algo != CRYPTO_RSA2048 &&
547 	    ctx->algo != CRYPTO_RSA3072 &&
548 	    ctx->algo != CRYPTO_RSA4096)
549 		return -EINVAL;
550 
551 	n_bits = crypto_algo_nbits(ctx->algo);
552 	n_words = BITS2WORD(n_bits);
553 
554 	rsa_result = malloc(BITS2BYTE(n_bits));
555 	if (!rsa_result)
556 		return -ENOMEM;
557 
558 	memset(rsa_result, 0x00, BITS2BYTE(n_bits));
559 
560 	ret = rk_mpa_alloc(&mpa_m);
561 	ret |= rk_mpa_alloc(&mpa_e);
562 	ret |= rk_mpa_alloc(&mpa_n);
563 	ret |= rk_mpa_alloc(&mpa_c);
564 	ret |= rk_mpa_alloc(&mpa_result);
565 	if (ret)
566 		goto exit;
567 
568 	mpa_m->d = (void *)sign;
569 	mpa_e->d = (void *)ctx->e;
570 	mpa_n->d = (void *)ctx->n;
571 	mpa_c->d = (void *)ctx->c;
572 	mpa_result->d = (void *)rsa_result;
573 
574 	mpa_m->size = n_words;
575 	mpa_e->size = n_words;
576 	mpa_n->size = n_words;
577 	mpa_c->size = n_words;
578 	mpa_result->size = n_words;
579 
580 	ret = rk_exptmod_np(mpa_m, mpa_e, mpa_n, mpa_c, mpa_result);
581 	if (!ret)
582 		memcpy(output, rsa_result, BITS2BYTE(n_bits));
583 
584 exit:
585 	free(rsa_result);
586 	rk_mpa_free(&mpa_m);
587 	rk_mpa_free(&mpa_e);
588 	rk_mpa_free(&mpa_n);
589 	rk_mpa_free(&mpa_c);
590 	rk_mpa_free(&mpa_result);
591 
592 	return ret;
593 }
594 
595 static int rockchip_crypto_get_trng(struct udevice *dev, u8 *output, u32 len)
596 {
597 	int ret;
598 	u32 i;
599 
600 	if (!dev || !output || !len)
601 		return -EINVAL;
602 
603 	for (i = 0; i < len / CRYPTO_TRNG_MAX; i++) {
604 		ret = rk_trng(output + i * CRYPTO_TRNG_MAX, CRYPTO_TRNG_MAX);
605 		if (ret)
606 			goto fail;
607 	}
608 
609 	ret = rk_trng(output + i * CRYPTO_TRNG_MAX, len % CRYPTO_TRNG_MAX);
610 
611 fail:
612 	return ret;
613 }
614 
615 static const struct dm_crypto_ops rockchip_crypto_ops = {
616 	.capability = rockchip_crypto_capability,
617 	.sha_init   = rockchip_crypto_sha_init,
618 	.sha_update = rockchip_crypto_sha_update,
619 	.sha_final  = rockchip_crypto_sha_final,
620 	.rsa_verify = rockchip_crypto_rsa_verify,
621 	.get_trng = rockchip_crypto_get_trng,
622 };
623 
624 /*
625  * Only use "clocks" to parse crypto clock id and use rockchip_get_clk().
626  * Because we always add crypto node in U-Boot dts, when kernel dtb enabled :
627  *
628  *   1. There is cru phandle mismatch between U-Boot and kernel dtb;
629  *   2. CONFIG_OF_SPL_REMOVE_PROPS removes clock property;
630  */
631 static int rockchip_crypto_ofdata_to_platdata(struct udevice *dev)
632 {
633 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
634 	int len, ret = -EINVAL;
635 
636 	if (!dev_read_prop(dev, "clocks", &len)) {
637 		printf("Can't find \"clocks\" property\n");
638 		return -EINVAL;
639 	}
640 
641 	memset(priv, 0x00, sizeof(*priv));
642 	priv->clocks = malloc(len);
643 	if (!priv->clocks)
644 		return -ENOMEM;
645 
646 	priv->nclocks = len / sizeof(u32);
647 	if (dev_read_u32_array(dev, "clocks", (u32 *)priv->clocks,
648 			       priv->nclocks)) {
649 		printf("Can't read \"clocks\" property\n");
650 		ret = -EINVAL;
651 		goto exit;
652 	}
653 
654 	if (!dev_read_prop(dev, "clock-frequency", &len)) {
655 		printf("Can't find \"clock-frequency\" property\n");
656 		ret = -EINVAL;
657 		goto exit;
658 	}
659 
660 	priv->frequencies = malloc(len);
661 	if (!priv->frequencies) {
662 		ret = -ENOMEM;
663 		goto exit;
664 	}
665 
666 	priv->nclocks = len / sizeof(u32);
667 	if (dev_read_u32_array(dev, "clock-frequency", priv->frequencies,
668 			       priv->nclocks)) {
669 		printf("Can't read \"clock-frequency\" property\n");
670 		ret = -EINVAL;
671 		goto exit;
672 	}
673 
674 	priv->reg = (fdt_addr_t)dev_read_addr_ptr(dev);
675 
676 	crypto_base = priv->reg;
677 
678 	return 0;
679 exit:
680 	if (priv->clocks)
681 		free(priv->clocks);
682 
683 	if (priv->frequencies)
684 		free(priv->frequencies);
685 
686 	return ret;
687 }
688 
689 static int rockchip_crypto_probe(struct udevice *dev)
690 {
691 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
692 	int i, ret = 0;
693 	u32* clocks;
694 
695 	priv->hw_ctx = memalign(LLI_ADDR_ALIGIN_SIZE,
696 				sizeof(struct rk_hash_ctx));
697 	if (!priv->hw_ctx)
698 		return -ENOMEM;
699 
700 	ret = rockchip_get_clk(&priv->clk.dev);
701 	if (ret) {
702 		printf("Failed to get clk device, ret=%d\n", ret);
703 		return ret;
704 	}
705 
706 	clocks = (u32 *)priv->clocks;
707 	for (i = 0; i < priv->nclocks; i++) {
708 		priv->clk.id = clocks[i * 2 + 1];
709 		ret = clk_set_rate(&priv->clk, priv->frequencies[i]);
710 		if (ret < 0) {
711 			printf("%s: Failed to set clk(%ld): ret=%d\n",
712 			       __func__, priv->clk.id, ret);
713 			return ret;
714 		}
715 	}
716 
717 	hw_crypto_reset();
718 
719 	return 0;
720 }
721 
722 static const struct udevice_id rockchip_crypto_ids[] = {
723 	{ .compatible = "rockchip,px30-crypto" },
724 	{ .compatible = "rockchip,rk1808-crypto" },
725 	{ .compatible = "rockchip,rk3308-crypto" },
726 	{ .compatible = "rockchip,rv1126-crypto" },
727 	{ }
728 };
729 
730 U_BOOT_DRIVER(rockchip_crypto_v2) = {
731 	.name		= "rockchip_crypto_v2",
732 	.id		= UCLASS_CRYPTO,
733 	.of_match	= rockchip_crypto_ids,
734 	.ops		= &rockchip_crypto_ops,
735 	.probe		= rockchip_crypto_probe,
736 	.ofdata_to_platdata = rockchip_crypto_ofdata_to_platdata,
737 	.priv_auto_alloc_size = sizeof(struct rockchip_crypto_priv),
738 };
739