xref: /rk3399_rockchip-uboot/drivers/crypto/rockchip/crypto_ce.c (revision 59992cdc4a619bba3348d50ac90dd462ba6ee8b6)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2025 Rockchip Electronics Co., Ltd
4  */
5 
6 #include <clk.h>
7 #include <clk-uclass.h>
8 #include <common.h>
9 #include <crypto.h>
10 #include <dm.h>
11 #include <asm/io.h>
12 #include <asm/arch/hardware.h>
13 #include <asm/arch/clock.h>
14 #include <rockchip/crypto_ecc.h>
15 #include <rockchip/crypto_v2_pka.h>
16 #include <rockchip/rkce_core.h>
17 
18 fdt_addr_t crypto_base;
19 
20 #define ROUNDUP(size, alignment)	round_up(size, alignment)
21 
22 #define RKCE_HASH_TIMEOUT_MS	1000
23 #define RKCE_SYMM_TIMEOUT_MS	1000
24 
25 struct rkce_sha_contex {
26 	u32				length;
27 	struct rkce_hash_td_ctrl	ctrl;
28 	struct rkce_hash_td		*td;
29 	struct rkce_hash_td_buf		*td_buf;
30 };
31 
32 struct rkce_cipher_contex {
33 	struct rkce_symm_td		*td;
34 	struct rkce_symm_td		*td_aad;
35 	struct rkce_symm_td_buf		*td_buf;
36 };
37 
38 struct rockchip_crypto_priv {
39 	fdt_addr_t			reg;
40 	u32				frequency;
41 	char				*clocks;
42 	u32				*frequencies;
43 	u32				nclocks;
44 	u32				freq_nclocks;
45 	u32				capability;
46 
47 	void				*hardware;
48 	struct rkce_sha_contex		*hash_ctx;
49 	u16				secure;
50 	u16				enabled;
51 };
52 
53 struct rockchip_map {
54 	u32				crypto;
55 	u32				rkce;
56 };
57 
58 static const struct rockchip_map rk_hash_map[] = {
59 	{CRYPTO_SM3,         RKCE_HASH_ALGO_SM3},
60 	{CRYPTO_MD5,         RKCE_HASH_ALGO_MD5},
61 	{CRYPTO_SHA1,        RKCE_HASH_ALGO_SHA1},
62 	{CRYPTO_SHA256,      RKCE_HASH_ALGO_SHA256},
63 	{CRYPTO_SHA512,      RKCE_HASH_ALGO_SHA512},
64 };
65 
66 #if CONFIG_IS_ENABLED(ROCKCHIP_HMAC)
67 static const struct rockchip_map rk_hmac_map[] = {
68 	{CRYPTO_HMAC_MD5,    RKCE_HASH_ALGO_MD5},
69 	{CRYPTO_HMAC_SHA1,   RKCE_HASH_ALGO_SHA1},
70 	{CRYPTO_HMAC_SHA256, RKCE_HASH_ALGO_SHA256},
71 	{CRYPTO_HMAC_SHA512, RKCE_HASH_ALGO_SHA512},
72 	{CRYPTO_HMAC_SM3,    RKCE_HASH_ALGO_SM3},
73 };
74 #endif
75 
76 #if CONFIG_IS_ENABLED(ROCKCHIP_CIPHER)
77 static const struct rockchip_map rk_cipher_map[] = {
78 	{CRYPTO_AES,         RKCE_SYMM_ALGO_AES},
79 	{CRYPTO_DES,         RKCE_SYMM_ALGO_TDES},
80 	{CRYPTO_SM4,         RKCE_SYMM_ALGO_SM4},
81 };
82 #endif
83 
84 #if CONFIG_IS_ENABLED(ROCKCHIP_RSA)
85 static const struct rockchip_map rk_rsa_map[] = {
86 	{CRYPTO_RSA512,       RKCE_ASYM_ALGO_RSA},
87 	{CRYPTO_RSA1024,      RKCE_ASYM_ALGO_RSA},
88 	{CRYPTO_RSA2048,      RKCE_ASYM_ALGO_RSA},
89 	{CRYPTO_RSA3072,      RKCE_ASYM_ALGO_RSA},
90 	{CRYPTO_RSA4096,      RKCE_ASYM_ALGO_RSA},
91 };
92 #endif
93 
94 #if CONFIG_IS_ENABLED(ROCKCHIP_EC)
95 static const struct rockchip_map rk_ec_map[] = {
96 	{CRYPTO_SM2,          RKCE_ASYM_ALGO_SM2},
97 	{CRYPTO_ECC_192R1,    RKCE_ASYM_ALGO_ECC_P192},
98 	{CRYPTO_ECC_224R1,    RKCE_ASYM_ALGO_ECC_P224},
99 	{CRYPTO_ECC_256R1,    RKCE_ASYM_ALGO_ECC_P256},
100 };
101 #endif
102 
103 static int rk_crypto_enable_clk(struct udevice *dev);
104 static int rk_crypto_disable_clk(struct udevice *dev);
105 
106 static void rk_crypto_soft_reset(struct udevice *dev, uint32_t reset_sel)
107 {
108 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
109 
110 	if (!priv->hardware)
111 		return;
112 
113 	rk_crypto_enable_clk(dev);
114 
115 	rkce_soft_reset(priv->hardware, reset_sel);
116 
117 	rk_crypto_disable_clk(dev);
118 }
119 
120 static void crypto_flush_cacheline(ulong addr, ulong size)
121 {
122 	ulong alignment = CONFIG_SYS_CACHELINE_SIZE;
123 	ulong aligned_input, aligned_len;
124 
125 	if (!addr || !size)
126 		return;
127 
128 	/* Must flush dcache before crypto DMA fetch data region */
129 	aligned_input = round_down(addr, alignment);
130 	aligned_len = round_up(size + (addr - aligned_input), alignment);
131 	flush_cache(aligned_input, aligned_len);
132 }
133 
134 static u32 rk_get_cemode(const struct rockchip_map *map, u32 num, u32 algo)
135 {
136 	u32 i, j;
137 	struct {
138 		const struct rockchip_map	*map;
139 		u32				num;
140 	} map_tbl[] = {
141 		{rk_hash_map, ARRAY_SIZE(rk_hash_map)},
142 #if CONFIG_IS_ENABLED(ROCKCHIP_HMAC)
143 		{rk_hmac_map, ARRAY_SIZE(rk_hmac_map)},
144 #endif
145 #if CONFIG_IS_ENABLED(ROCKCHIP_CIPHER)
146 		{rk_cipher_map, ARRAY_SIZE(rk_cipher_map)},
147 #endif
148 #if CONFIG_IS_ENABLED(ROCKCHIP_RSA)
149 		{rk_rsa_map, ARRAY_SIZE(rk_rsa_map)},
150 #endif
151 #if CONFIG_IS_ENABLED(ROCKCHIP_EC)
152 		{rk_ec_map, ARRAY_SIZE(rk_ec_map)},
153 #endif
154 	};
155 
156 	for (i = 0; i < ARRAY_SIZE(map_tbl); i++) {
157 		const struct rockchip_map *map = map_tbl[i].map;
158 		u32 num = map_tbl[i].num;
159 
160 		for (j = 0; j < num; j++) {
161 			if (map[j].crypto == algo)
162 				return map[j].rkce;
163 		}
164 	}
165 
166 	return 0;
167 }
168 
169 static u32 rk_load_map(struct rockchip_crypto_priv *priv, u32 algo_type,
170 		       const struct rockchip_map *map, u32 num)
171 {
172 	u32 i;
173 	u32 capability = 0;
174 
175 	for (i = 0; i < num; i++) {
176 		if (rkce_hw_algo_valid(priv->hardware, algo_type, map[i].rkce, 0))
177 			capability |= map[i].crypto;
178 	}
179 
180 	return capability;
181 }
182 
183 static u32 rockchip_crypto_capability(struct udevice *dev)
184 {
185 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
186 	u32 cap = 0;
187 
188 	if (!priv->enabled)
189 		return 0;
190 
191 	if (priv->capability)
192 		return priv->capability;
193 
194 	cap |= rk_load_map(priv, RKCE_ALGO_TYPE_HASH,
195 			   rk_hash_map, ARRAY_SIZE(rk_hash_map));
196 
197 #if CONFIG_IS_ENABLED(ROCKCHIP_HMAC)
198 	cap |= rk_load_map(priv, RKCE_ALGO_TYPE_HMAC,
199 			   rk_hmac_map, ARRAY_SIZE(rk_hmac_map));
200 #endif
201 
202 #if CONFIG_IS_ENABLED(ROCKCHIP_CIPHER)
203 	cap |= rk_load_map(priv, RKCE_ALGO_TYPE_CIPHER,
204 			   rk_cipher_map, ARRAY_SIZE(rk_cipher_map));
205 #endif
206 
207 #if CONFIG_IS_ENABLED(ROCKCHIP_RSA)
208 	cap |= rk_load_map(priv, RKCE_ALGO_TYPE_ASYM, rk_rsa_map,
209 			   ARRAY_SIZE(rk_rsa_map));
210 #endif
211 
212 #if CONFIG_IS_ENABLED(ROCKCHIP_EC)
213 	cap |= rk_load_map(priv, RKCE_ALGO_TYPE_ASYM, rk_ec_map,
214 			   ARRAY_SIZE(rk_ec_map));
215 #endif
216 
217 	return cap;
218 }
219 
220 static void *rkce_sha_ctx_alloc(void)
221 {
222 	struct rkce_sha_contex *hw_ctx;
223 
224 	hw_ctx = malloc(sizeof(*hw_ctx));
225 	if (!hw_ctx)
226 		return NULL;
227 
228 	memset(hw_ctx, 0x00, sizeof(*hw_ctx));
229 
230 	hw_ctx->td = rkce_cma_alloc(sizeof(struct rkce_hash_td));
231 	if (!hw_ctx->td)
232 		goto error;
233 
234 	memset(hw_ctx->td, 0x00, sizeof(struct rkce_hash_td));
235 
236 	hw_ctx->td_buf = rkce_cma_alloc(sizeof(struct rkce_hash_td_buf));
237 	if (!hw_ctx->td_buf)
238 		goto error;
239 
240 	memset(hw_ctx->td_buf, 0x00, sizeof(struct rkce_hash_td_buf));
241 
242 	return hw_ctx;
243 error:
244 	rkce_cma_free(hw_ctx->td);
245 	rkce_cma_free(hw_ctx->td_buf);
246 	free(hw_ctx);
247 
248 	return NULL;
249 }
250 
251 static void rkce_sha_ctx_free(struct rkce_sha_contex *hw_ctx)
252 {
253 	if (!hw_ctx)
254 		return;
255 
256 	rkce_cma_free(hw_ctx->td);
257 	rkce_cma_free(hw_ctx->td_buf);
258 	free(hw_ctx);
259 }
260 
261 static int rk_sha_init(struct udevice *dev, sha_context *ctx,
262 		       u8 *key, u32 key_len, bool is_hmac)
263 {
264 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
265 	struct rkce_sha_contex *hash_ctx = NULL;
266 	u32 ce_algo = 0;
267 	int ret = 0;
268 
269 	if ((ctx->algo & priv->capability) == 0)
270 		return -ENOSYS;
271 
272 	if (priv->hash_ctx)
273 		return -EFAULT;
274 
275 	rk_crypto_soft_reset(dev, RKCE_RESET_HASH);
276 
277 	hash_ctx = rkce_sha_ctx_alloc();
278 	if (!hash_ctx)
279 		return -ENOMEM;
280 
281 	ret = rkce_init_hash_td(hash_ctx->td, hash_ctx->td_buf);
282 	if (ret)
283 		goto exit;
284 
285 	ce_algo = rk_get_cemode(rk_hash_map, ARRAY_SIZE(rk_hash_map), ctx->algo);
286 
287 	hash_ctx->ctrl.td_type        = RKCE_TD_TYPE_HASH;
288 	hash_ctx->ctrl.hw_pad_en      = 1;
289 	hash_ctx->ctrl.first_pkg      = 1;
290 	hash_ctx->ctrl.last_pkg       = 0;
291 	hash_ctx->ctrl.hash_algo      = ce_algo;
292 	hash_ctx->ctrl.hmac_en        = is_hmac;
293 	hash_ctx->ctrl.is_preemptible = 0;
294 	hash_ctx->ctrl.int_en         = 1;
295 
296 	if (is_hmac) {
297 		if (key_len > 64) {
298 			ret = -EINVAL;
299 			goto exit;
300 		}
301 
302 		memcpy(hash_ctx->td_buf->key, key, key_len);
303 	}
304 
305 	priv->hash_ctx = hash_ctx;
306 exit:
307 	if (ret) {
308 		rkce_sha_ctx_free(hash_ctx);
309 		priv->hash_ctx = NULL;
310 	}
311 
312 	return ret;
313 }
314 
315 static int rk_sha_update(struct udevice *dev, u32 *input, u32 len, bool is_last)
316 {
317 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
318 	struct rkce_sha_contex *hash_ctx;
319 	struct rkce_hash_td *td;
320 	int ret = 0;
321 
322 	if (!priv->hash_ctx)
323 		return -EINVAL;
324 
325 	if (!is_last && (!input || len == 0))
326 		return -EINVAL;
327 
328 	hash_ctx = priv->hash_ctx;
329 	td = hash_ctx->td;
330 
331 	td->ctrl = hash_ctx->ctrl;
332 	memset(&td->sg, 0x00, sizeof(*td->sg));
333 
334 	if (hash_ctx->ctrl.first_pkg == 1)
335 		hash_ctx->ctrl.first_pkg = 0;
336 
337 	if (is_last) {
338 		td->ctrl.last_pkg = 1;
339 	} else {
340 #ifdef CONFIG_ARM64
341 		td->sg[0].src_addr_h = rkce_cma_virt2phys(input) >> 32;
342 #endif
343 		td->sg[0].src_addr_l = rkce_cma_virt2phys(input) & 0xffffffff;
344 		td->sg[0].src_size   = len;
345 		hash_ctx->length += len;
346 		crypto_flush_cacheline((ulong)input, len);
347 	}
348 
349 	rk_crypto_enable_clk(dev);
350 
351 	crypto_flush_cacheline((ulong)hash_ctx->td, sizeof(*hash_ctx->td));
352 	crypto_flush_cacheline((ulong)hash_ctx->td_buf, sizeof(*hash_ctx->td_buf));
353 
354 	ret = rkce_push_td_sync(priv->hardware, td, RKCE_HASH_TIMEOUT_MS);
355 	if (ret) {
356 		rkce_sha_ctx_free(hash_ctx);
357 		priv->hash_ctx = NULL;
358 	}
359 
360 	rk_crypto_disable_clk(dev);
361 
362 	return ret;
363 }
364 
365 static int rockchip_crypto_sha_init(struct udevice *dev, sha_context *ctx)
366 {
367 	return rk_sha_init(dev, ctx, NULL, 0, false);
368 }
369 
370 static int rockchip_crypto_sha_update(struct udevice *dev, u32 *input, u32 len)
371 {
372 	return rk_sha_update(dev, input, len, false);
373 }
374 
375 static int rockchip_crypto_sha_final(struct udevice *dev, sha_context *ctx, u8 *output)
376 {
377 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
378 	struct rkce_sha_contex *hash_ctx = priv->hash_ctx;
379 	u32 nbits;
380 	int ret;
381 
382 	if (!priv->hash_ctx)
383 		return -EINVAL;
384 
385 	nbits = crypto_algo_nbits(ctx->algo);
386 
387 	if (hash_ctx->length != ctx->length) {
388 		printf("total length(0x%08x) != init length(0x%08x)!\n",
389 		       hash_ctx->length, ctx->length);
390 		ret = -EIO;
391 		goto exit;
392 	}
393 
394 	ret = rk_sha_update(dev, NULL, 0, true);
395 	if (ret == 0)
396 		memcpy(output, hash_ctx->td_buf->hash, BITS2BYTE(nbits));
397 
398 exit:
399 	rkce_sha_ctx_free(hash_ctx);
400 	priv->hash_ctx = NULL;
401 
402 	return ret;
403 }
404 
405 #if CONFIG_IS_ENABLED(ROCKCHIP_HMAC)
406 static int rockchip_crypto_hmac_init(struct udevice *dev,  sha_context *ctx, u8 *key, u32 key_len)
407 {
408 	return rk_sha_init(dev, ctx, key, key_len, true);
409 }
410 
411 static int rockchip_crypto_hmac_update(struct udevice *dev, u32 *input, u32 len)
412 {
413 	return rockchip_crypto_sha_update(dev, input, len);
414 }
415 
416 static int rockchip_crypto_hmac_final(struct udevice *dev, sha_context *ctx, u8 *output)
417 {
418 	return rockchip_crypto_sha_final(dev, ctx, output);
419 }
420 #endif
421 
422 #if CONFIG_IS_ENABLED(ROCKCHIP_CIPHER)
423 
424 static int hw_crypto_ccm128_setiv(u8 *iv_buf, u8 *nonce, u32 nlen, u32 mlen)
425 {
426 	u32 L = iv_buf[0] & 7;	/* the L parameter */
427 
428 	if (nlen < (14 - L))
429 		return -1;	/* nonce is too short */
430 
431 	if (sizeof(mlen) == 8 && L >= 3) {
432 		iv_buf[8]  = mlen >> (56 % (sizeof(mlen) * 8));
433 		iv_buf[9]  = mlen >> (48 % (sizeof(mlen) * 8));
434 		iv_buf[10] = mlen >> (40 % (sizeof(mlen) * 8));
435 		iv_buf[11] = mlen >> (32 % (sizeof(mlen) * 8));
436 	}
437 
438 	iv_buf[12] = mlen >> 24;
439 	iv_buf[13] = mlen >> 16;
440 	iv_buf[14] = mlen >> 8;
441 	iv_buf[15] = mlen;
442 
443 	iv_buf[0] &= ~0x40;	/* clear aad flag */
444 	memcpy(&iv_buf[1], nonce, 14 - L);
445 
446 	return 0;
447 }
448 
449 static void hw_get_ccm_aad_padding(u32 aad_len, u8 *padding,  u32 *padding_size)
450 {
451 	u32 i = 0;
452 
453 	if (aad_len == 0) {
454 		*padding_size = 0;
455 		return;
456 	}
457 
458 	if (aad_len < (0x10000 - 0x100)) {
459 		i = 2;
460 	} else if (sizeof(aad_len) == 8 &&
461 		   aad_len >= (size_t)1 << (32 % (sizeof(aad_len) * 8))) {
462 		i = 10;
463 	} else {
464 		i = 6;
465 	}
466 
467 	if (i == 2) {
468 		padding[0] = aad_len >> 8;
469 		padding[1] = aad_len;
470 	} else if (i == 10) {
471 		padding[0] = 0xFF;
472 		padding[1] = 0xFF;
473 		padding[2] = aad_len >> (56 % (sizeof(aad_len) * 8));
474 		padding[3] = aad_len >> (48 % (sizeof(aad_len) * 8));
475 		padding[4] = aad_len >> (40 % (sizeof(aad_len) * 8));
476 		padding[5] = aad_len >> (32 % (sizeof(aad_len) * 8));
477 		padding[6] = aad_len >> 24;
478 		padding[7] = aad_len >> 16;
479 		padding[8] = aad_len >> 8;
480 		padding[9] = aad_len;
481 	} else {
482 		padding[0] = 0xFF;
483 		padding[1] = 0xFE;
484 		padding[2] = aad_len >> 24;
485 		padding[3] = aad_len >> 16;
486 		padding[4] = aad_len >> 8;
487 	}
488 
489 	*padding_size = i;
490 }
491 
492 static int hw_compose_ccm_aad_iv(u8 *aad_iv, u32 data_len,
493 				 u32 aad_len, u32 tag_size)
494 {
495 	u32 L;		/* the L parameter */
496 	u8 nonce[AES_BLOCK_SIZE];
497 
498 	L = aad_iv[0] & 7;
499 	aad_iv[0] |= ((u8)(((tag_size - 2) / 2) & 7) << 3);
500 
501 	if (sizeof(data_len) == 8 && L >= 3) {
502 		aad_iv[8]  = data_len >> (56 % (sizeof(data_len) * 8));
503 		aad_iv[9]  = data_len >> (48 % (sizeof(data_len) * 8));
504 		aad_iv[10] = data_len >> (40 % (sizeof(data_len) * 8));
505 		aad_iv[11] = data_len >> (32 % (sizeof(data_len) * 8));
506 	}
507 
508 	/* save nonce */
509 	memcpy(nonce, &aad_iv[1], 14 - L);
510 
511 	aad_iv[12] = data_len >> 24;
512 	aad_iv[13] = data_len >> 16;
513 	aad_iv[14] = data_len >> 8;
514 	aad_iv[15] = data_len;
515 
516 	/* restore nonce */
517 	memcpy(&aad_iv[1], nonce, 14 - L);
518 
519 	aad_iv[0] &= ~0x40;	/* clear Adata flag */
520 
521 	if (aad_len)
522 		aad_iv[0] |= 0x40;	//set aad flag
523 
524 	return 0;
525 }
526 
527 static void rkce_destroy_ccm_aad(u8 *new_aad)
528 {
529 	rkce_cma_free(new_aad);
530 }
531 
532 static int rkce_build_ccm_aad(const u8 *aad, u32 aad_len, u32 data_len,
533 			      u8 *iv, u32 iv_len,
534 			      u8 **new_aad, u32 *new_aad_len,
535 			      u8 *new_iv, u32 *new_iv_len)
536 {
537 	int ret = -RKCE_INVAL;
538 	u32 L;
539 	u8 nonce[AES_BLOCK_SIZE];
540 	u8 pad[AES_BLOCK_SIZE];
541 	u32 pad_size = 0;
542 	u32 tag_len = AES_BLOCK_SIZE;
543 	u8 *aad_tmp = NULL;
544 	u32 aad_tmp_len = 0;
545 
546 	memset(nonce, 0x00, sizeof(nonce));
547 
548 	L = 15 - iv_len;
549 	nonce[0] = (L - 1) & 7;
550 	ret = hw_crypto_ccm128_setiv(nonce, (u8 *)iv, iv_len, 0);
551 	if (ret)
552 		return ret;
553 
554 	memcpy(new_iv, nonce, sizeof(nonce));
555 	*new_iv_len = sizeof(nonce);
556 
557 	memset(pad, 0x00, sizeof(pad));
558 	hw_get_ccm_aad_padding(aad_len, pad, &pad_size);
559 
560 	aad_tmp_len = aad_len + AES_BLOCK_SIZE + pad_size;
561 	aad_tmp_len = ROUNDUP(aad_tmp_len, AES_BLOCK_SIZE);
562 
563 	aad_tmp = rkce_cma_alloc(aad_tmp_len);
564 	if (!aad_tmp) {
565 		ret = -RKCE_NOMEM;
566 		goto exit;
567 	}
568 
569 	/* clear last block */
570 	memset(aad_tmp + aad_tmp_len - AES_BLOCK_SIZE, 0x00, AES_BLOCK_SIZE);
571 	memcpy(aad_tmp, nonce, sizeof(nonce));
572 	hw_compose_ccm_aad_iv(aad_tmp, data_len, aad_len, tag_len);
573 	memcpy(aad_tmp + AES_BLOCK_SIZE, pad, pad_size);
574 
575 	memcpy(aad_tmp + AES_BLOCK_SIZE + pad_size, aad, aad_len);
576 
577 	*new_aad     = aad_tmp;
578 	*new_aad_len = aad_tmp_len;
579 
580 exit:
581 	return ret;
582 }
583 
584 static void *rkce_cipher_ctx_alloc(void)
585 {
586 	struct rkce_cipher_contex *hw_ctx;
587 
588 	hw_ctx = malloc(sizeof(*hw_ctx));
589 	if (!hw_ctx)
590 		return NULL;
591 
592 	hw_ctx->td = rkce_cma_alloc(sizeof(struct rkce_symm_td));
593 	if (!hw_ctx->td)
594 		goto error;
595 
596 	memset(hw_ctx->td, 0x00, sizeof(struct rkce_symm_td));
597 
598 	hw_ctx->td_aad = rkce_cma_alloc(sizeof(struct rkce_symm_td));
599 	if (!hw_ctx->td_aad)
600 		goto error;
601 
602 	memset(hw_ctx->td_aad, 0x00, sizeof(struct rkce_symm_td));
603 
604 	hw_ctx->td_buf = rkce_cma_alloc(sizeof(struct rkce_symm_td_buf));
605 	if (!hw_ctx->td_buf)
606 		goto error;
607 
608 	memset(hw_ctx->td_buf, 0x00, sizeof(struct rkce_symm_td_buf));
609 
610 	return hw_ctx;
611 error:
612 	rkce_cma_free(hw_ctx->td);
613 	rkce_cma_free(hw_ctx->td_aad);
614 	rkce_cma_free(hw_ctx->td_buf);
615 	free(hw_ctx);
616 
617 	return NULL;
618 }
619 
620 static void rkce_cipher_ctx_free(struct rkce_cipher_contex *hw_ctx)
621 {
622 	if (!hw_ctx)
623 		return;
624 
625 	rkce_cma_free(hw_ctx->td);
626 	rkce_cma_free(hw_ctx->td_aad);
627 	rkce_cma_free(hw_ctx->td_buf);
628 	free(hw_ctx);
629 }
630 
631 static void crypto_invalidate_cacheline(u32 addr, u32 size)
632 {
633 	ulong alignment = CONFIG_SYS_CACHELINE_SIZE;
634 	ulong aligned_input, aligned_len;
635 
636 	if (!addr || !size)
637 		return;
638 
639 	/* Must invalidate dcache after crypto DMA write data region */
640 	aligned_input = round_down(addr, alignment);
641 	aligned_len = round_up(size + (addr - aligned_input), alignment);
642 	invalidate_dcache_range(aligned_input, aligned_input + aligned_len);
643 }
644 
645 static const struct rockchip_map rk_cipher_algo_map[] = {
646 	{RK_MODE_ECB,     RKCE_SYMM_MODE_ECB},
647 	{RK_MODE_CBC,     RKCE_SYMM_MODE_CBC},
648 	{RK_MODE_CTS,     RKCE_SYMM_MODE_CTS},
649 	{RK_MODE_CTR,     RKCE_SYMM_MODE_CTR},
650 	{RK_MODE_CFB,     RKCE_SYMM_MODE_CFB},
651 	{RK_MODE_OFB,     RKCE_SYMM_MODE_OFB},
652 	{RK_MODE_XTS,     RKCE_SYMM_MODE_XTS},
653 	{RK_MODE_CCM,     RKCE_SYMM_MODE_CCM},
654 	{RK_MODE_GCM,     RKCE_SYMM_MODE_GCM},
655 	{RK_MODE_CMAC,    RKCE_SYMM_MODE_CMAC},
656 	{RK_MODE_CBC_MAC, RKCE_SYMM_MODE_CBC_MAC},
657 };
658 
659 static int rk_get_cipher_cemode(u32 algo, u32 mode, u32 *ce_algo, u32 *ce_mode)
660 {
661 	u32 i;
662 
663 	switch (algo) {
664 	case CRYPTO_DES:
665 		*ce_algo = RKCE_SYMM_ALGO_TDES;
666 		break;
667 	case CRYPTO_AES:
668 		*ce_algo = RKCE_SYMM_ALGO_AES;
669 		break;
670 	case CRYPTO_SM4:
671 		*ce_algo = RKCE_SYMM_ALGO_SM4;
672 		break;
673 	default:
674 		return -EINVAL;
675 	}
676 
677 	for (i = 0; i < ARRAY_SIZE(rk_cipher_algo_map); i++) {
678 		if (mode == rk_cipher_algo_map[i].crypto) {
679 			*ce_mode = rk_cipher_algo_map[i].rkce;
680 			return 0;
681 		}
682 	}
683 
684 	return -EINVAL;
685 }
686 
687 u32 rk_get_td_keysize(u32 ce_algo, u32 ce_mode,  u32 key_len)
688 {
689 	u32 key_size = 0;
690 
691 	if (ce_algo == RKCE_SYMM_ALGO_AES) {
692 		if (key_len == AES_KEYSIZE_128)
693 			key_size = RKCE_KEY_AES_128;
694 		else if (key_len == AES_KEYSIZE_192)
695 			key_size = RKCE_KEY_AES_192;
696 		else if (key_len == AES_KEYSIZE_256)
697 			key_size = RKCE_KEY_AES_256;
698 		else
699 			;
700 	}
701 
702 	return key_size;
703 }
704 
705 int rk_set_symm_td_buf_key(struct rkce_symm_td_buf *td_buf,
706 			   u32 ce_algo, u32 ce_mode, cipher_context *ctx)
707 {
708 	memset(td_buf->key1, 0x00, sizeof(td_buf->key1));
709 	memset(td_buf->key2, 0x00, sizeof(td_buf->key2));
710 
711 	if (ce_mode == RKCE_SYMM_MODE_XTS) {
712 		memcpy(td_buf->key1, ctx->key, ctx->key_len);
713 		memcpy(td_buf->key2, ctx->twk_key, ctx->key_len);
714 	} else {
715 		memcpy(td_buf->key1, ctx->key, ctx->key_len);
716 	}
717 
718 	if (ctx->key_len == DES_KEYSIZE * 2 &&
719 	    (ce_algo == RKCE_SYMM_ALGO_DES || ce_algo == RKCE_SYMM_ALGO_TDES))
720 		memcpy(td_buf->key1 + DES_KEYSIZE * 2, td_buf->key1, DES_KEYSIZE);
721 
722 	return 0;
723 }
724 
725 int rk_set_symm_td_sg(struct rkce_symm_td *td,
726 		      const u8 *in, u32 in_len, u8 *out, u32 out_len)
727 {
728 	memset(td->sg, 0x00, sizeof(td->sg));
729 
730 #ifdef CONFIG_ARM64
731 	td->sg[0].src_addr_h = rkce_cma_virt2phys(in) >> 32;
732 #endif
733 	td->sg[0].src_addr_l = rkce_cma_virt2phys(in) & 0xffffffff;
734 	td->sg[0].src_size   = in_len;
735 
736 	if (out && out_len) {
737 #ifdef CONFIG_ARM64
738 		td->sg[0].dst_addr_h = rkce_cma_virt2phys(out) >> 32;
739 #endif
740 		td->sg[0].dst_addr_l = rkce_cma_virt2phys(out) & 0xffffffff;
741 		td->sg[0].dst_size   = out_len;
742 	}
743 
744 	td->next_task = 0;
745 
746 	return 0;
747 }
748 
749 static int rk_crypto_cipher(struct udevice *dev, cipher_context *ctx,
750 			    const u8 *in, u8 *out, u32 len, bool enc,
751 			    const u8 *aad, u32 aad_len, u8 *tag)
752 {
753 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
754 	struct rkce_cipher_contex *hw_ctx = NULL;
755 	u32 ce_algo = 0, ce_mode = 0;
756 	bool use_otpkey = false;
757 	int ret = 0;
758 
759 	rk_crypto_soft_reset(dev, RKCE_RESET_SYMM);
760 
761 	if (!ctx->key && ctx->key_len)
762 		use_otpkey = true;
763 
764 	ret = rk_get_cipher_cemode(ctx->algo, ctx->mode, &ce_algo, &ce_mode);
765 	if (ret)
766 		return ret;
767 
768 	hw_ctx = rkce_cipher_ctx_alloc();
769 	if (!hw_ctx)
770 		return -ENOMEM;
771 
772 	rkce_init_symm_td(hw_ctx->td, hw_ctx->td_buf);
773 
774 	hw_ctx->td->ctrl.td_type   = RKCE_TD_TYPE_SYMM;
775 	hw_ctx->td->ctrl.is_dec    = !enc;
776 	hw_ctx->td->ctrl.symm_algo = ce_algo;
777 	hw_ctx->td->ctrl.symm_mode = ce_mode;
778 	hw_ctx->td->ctrl.key_size  = rk_get_td_keysize(ce_algo, ce_mode, ctx->key_len);
779 	hw_ctx->td->ctrl.first_pkg = 1;
780 	hw_ctx->td->ctrl.last_pkg  = 1;
781 	hw_ctx->td->ctrl.int_en    = 1;
782 	hw_ctx->td->ctrl.key_sel   = use_otpkey ? RKCE_KEY_SEL_KT : RKCE_KEY_SEL_USER;
783 
784 	memcpy(hw_ctx->td_buf->iv, ctx->iv, ctx->iv_len);
785 	hw_ctx->td->ctrl.iv_len    = ctx->iv_len;
786 
787 	if (!use_otpkey) {
788 		ret = rk_set_symm_td_buf_key(hw_ctx->td_buf, ce_algo, ce_mode, ctx);
789 		if (ret)
790 			goto exit;
791 	}
792 
793 	ret = rk_set_symm_td_sg(hw_ctx->td, in, len, out, len);
794 	if (ret)
795 		goto exit;
796 
797 	if (ce_mode == RKCE_SYMM_MODE_CCM) {
798 		u8 *new_aad = NULL;
799 		u32 new_aad_len = 0, new_iv_len = 0;
800 
801 		rkce_init_symm_td(hw_ctx->td_aad, hw_ctx->td_buf);
802 
803 		ret = rkce_build_ccm_aad(aad, aad_len, len,
804 					 hw_ctx->td_buf->iv, ctx->iv_len,
805 					 &new_aad, &new_aad_len,
806 					 hw_ctx->td_buf->iv, &new_iv_len);
807 		if (ret)
808 			goto exit;
809 
810 		ret = rk_set_symm_td_sg(hw_ctx->td_aad, new_aad, new_aad_len, NULL, 0);
811 		if (ret)
812 			goto exit;
813 
814 		hw_ctx->td->ctrl.iv_len = new_iv_len;
815 
816 		hw_ctx->td_buf->gcm_len.aad_len_l = new_aad_len;
817 
818 		hw_ctx->td_aad->ctrl = hw_ctx->td->ctrl;
819 		hw_ctx->td_aad->ctrl.is_aad = 1;
820 
821 		crypto_flush_cacheline((ulong)hw_ctx->td_aad, sizeof(*hw_ctx->td_aad));
822 		crypto_flush_cacheline((ulong)hw_ctx->td_buf, sizeof(*hw_ctx->td_buf));
823 		crypto_flush_cacheline((ulong)new_aad, new_aad_len);
824 
825 		rk_crypto_enable_clk(dev);
826 
827 		ret = rkce_push_td_sync(priv->hardware, hw_ctx->td_aad, RKCE_SYMM_TIMEOUT_MS);
828 
829 		rk_crypto_disable_clk(dev);
830 
831 		rkce_destroy_ccm_aad(new_aad);
832 
833 		if (ret) {
834 			printf("CCM calc aad data failed.\n");
835 			goto exit;
836 		}
837 	} else if (ce_mode == RKCE_SYMM_MODE_GCM) {
838 		rkce_init_symm_td(hw_ctx->td_aad, hw_ctx->td_buf);
839 
840 		ret = rk_set_symm_td_sg(hw_ctx->td_aad, aad, aad_len, NULL, 0);
841 		if (ret)
842 			goto exit;
843 
844 		hw_ctx->td_buf->gcm_len.aad_len_l = aad_len;
845 		hw_ctx->td_buf->gcm_len.pc_len_l = len;
846 
847 		hw_ctx->td_aad->ctrl = hw_ctx->td->ctrl;
848 		hw_ctx->td_aad->ctrl.is_aad = 1;
849 
850 		crypto_flush_cacheline((ulong)hw_ctx->td_aad, sizeof(*hw_ctx->td_aad));
851 		crypto_flush_cacheline((ulong)hw_ctx->td_buf, sizeof(*hw_ctx->td_buf));
852 		crypto_flush_cacheline((ulong)aad, aad_len);
853 
854 		rk_crypto_enable_clk(dev);
855 
856 		ret = rkce_push_td_sync(priv->hardware, hw_ctx->td_aad, RKCE_SYMM_TIMEOUT_MS);
857 
858 		rk_crypto_disable_clk(dev);
859 		if (ret) {
860 			printf("GCM calc aad data failed.\n");
861 			goto exit;
862 		}
863 	}
864 
865 	crypto_flush_cacheline((ulong)hw_ctx->td, sizeof(*hw_ctx->td));
866 	crypto_flush_cacheline((ulong)hw_ctx->td_buf, sizeof(*hw_ctx->td_buf));
867 	crypto_flush_cacheline((ulong)in, len);
868 	crypto_invalidate_cacheline((ulong)out, len);
869 
870 	rk_crypto_enable_clk(dev);
871 
872 	ret = rkce_push_td_sync(priv->hardware, hw_ctx->td, RKCE_SYMM_TIMEOUT_MS);
873 
874 	crypto_invalidate_cacheline((ulong)out, len);
875 
876 	rk_crypto_disable_clk(dev);
877 
878 	if (tag)
879 		memcpy(tag, hw_ctx->td_buf->tag, sizeof(hw_ctx->td_buf->tag));
880 exit:
881 	rkce_cipher_ctx_free(hw_ctx);
882 
883 	return ret;
884 }
885 
886 static int rockchip_crypto_cipher(struct udevice *dev, cipher_context *ctx,
887 				  const u8 *in, u8 *out, u32 len, bool enc)
888 {
889 	return rk_crypto_cipher(dev, ctx, in, out, len, enc, NULL, 0, NULL);
890 }
891 
892 static int rockchip_crypto_mac(struct udevice *dev, cipher_context *ctx,
893 			       const u8 *in, u32 len, u8 *tag)
894 {
895 	return rk_crypto_cipher(dev, ctx, in, NULL, len, true, NULL, 0, tag);
896 }
897 
898 static int rockchip_crypto_ae(struct udevice *dev, cipher_context *ctx,
899 			      const u8 *in, u32 len, const u8 *aad, u32 aad_len,
900 			      u8 *out, u8 *tag)
901 {
902 	return rk_crypto_cipher(dev, ctx, in, out, len, true, aad, aad_len, tag);
903 }
904 
905 #if CONFIG_IS_ENABLED(DM_KEYLAD)
906 int rockchip_crypto_fw_cipher(struct udevice *dev, cipher_fw_context *ctx,
907 			      const u8 *in, u8 *out, u32 len, bool enc)
908 {
909 	cipher_context cipher_ctx;
910 
911 	memset(&cipher_ctx, 0x00, sizeof(cipher_ctx));
912 
913 	cipher_ctx.algo    = ctx->algo;
914 	cipher_ctx.mode    = ctx->mode;
915 	cipher_ctx.key_len = ctx->key_len;
916 	cipher_ctx.iv      = ctx->iv;
917 	cipher_ctx.iv_len  = ctx->iv_len;
918 
919 	return rk_crypto_cipher(dev, &cipher_ctx, in, out, len, enc, NULL, 0, NULL);
920 }
921 
922 static ulong rockchip_get_keytable_addr(struct udevice *dev)
923 {
924 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
925 	ulong addr;
926 
927 	rk_crypto_enable_clk(dev);
928 
929 	addr = rkce_get_keytable_addr(priv->hardware);
930 
931 	rk_crypto_disable_clk(dev);
932 
933 	return addr;
934 }
935 #endif
936 
937 #endif
938 
939 #if CONFIG_IS_ENABLED(ROCKCHIP_RSA)
940 static int rockchip_crypto_rsa_verify(struct udevice *dev, rsa_key *ctx,
941 				      u8 *sign, u8 *output)
942 {
943 	struct mpa_num *mpa_m = NULL, *mpa_e = NULL, *mpa_n = NULL;
944 	struct mpa_num *mpa_c = NULL, *mpa_result = NULL;
945 	u32 n_bits, n_words;
946 	int ret;
947 
948 	if (!ctx)
949 		return -EINVAL;
950 
951 	if (ctx->algo != CRYPTO_RSA512 &&
952 	    ctx->algo != CRYPTO_RSA1024 &&
953 	    ctx->algo != CRYPTO_RSA2048 &&
954 	    ctx->algo != CRYPTO_RSA3072 &&
955 	    ctx->algo != CRYPTO_RSA4096)
956 		return -EINVAL;
957 
958 	n_bits = crypto_algo_nbits(ctx->algo);
959 	n_words = BITS2WORD(n_bits);
960 
961 	ret = rk_mpa_alloc(&mpa_m, sign, n_words);
962 	if (ret)
963 		goto exit;
964 
965 	ret = rk_mpa_alloc(&mpa_e, ctx->e, n_words);
966 	if (ret)
967 		goto exit;
968 
969 	ret = rk_mpa_alloc(&mpa_n, ctx->n, n_words);
970 	if (ret)
971 		goto exit;
972 
973 	if (ctx->c) {
974 		ret = rk_mpa_alloc(&mpa_c, ctx->c, n_words);
975 		if (ret)
976 			goto exit;
977 	}
978 
979 	ret = rk_mpa_alloc(&mpa_result, NULL, n_words);
980 	if (ret)
981 		goto exit;
982 
983 	rk_crypto_enable_clk(dev);
984 	ret = rk_exptmod_np(mpa_m, mpa_e, mpa_n, mpa_c, mpa_result);
985 	if (!ret)
986 		memcpy(output, mpa_result->d, BITS2BYTE(n_bits));
987 	rk_crypto_disable_clk(dev);
988 
989 exit:
990 	rk_mpa_free(&mpa_m);
991 	rk_mpa_free(&mpa_e);
992 	rk_mpa_free(&mpa_n);
993 	rk_mpa_free(&mpa_c);
994 	rk_mpa_free(&mpa_result);
995 
996 	return ret;
997 }
998 #endif
999 
1000 #if CONFIG_IS_ENABLED(ROCKCHIP_EC)
1001 static int rockchip_crypto_ec_verify(struct udevice *dev, ec_key *ctx,
1002 				     u8 *hash, u32 hash_len, u8 *sign)
1003 {
1004 	struct mpa_num *bn_sign = NULL;
1005 	struct rk_ecp_point point_P, point_sign;
1006 	u32 n_bits, n_words;
1007 	int ret;
1008 
1009 	if (!ctx)
1010 		return -EINVAL;
1011 
1012 	if (ctx->algo != CRYPTO_SM2 &&
1013 	    ctx->algo != CRYPTO_ECC_192R1 &&
1014 	    ctx->algo != CRYPTO_ECC_224R1 &&
1015 	    ctx->algo != CRYPTO_ECC_256R1)
1016 		return -EINVAL;
1017 
1018 	n_bits = crypto_algo_nbits(ctx->algo);
1019 	n_words = BITS2WORD(n_bits);
1020 
1021 	ret = rk_mpa_alloc(&bn_sign, sign, n_words);
1022 	if (ret)
1023 		goto exit;
1024 
1025 	ret = rk_mpa_alloc(&point_P.x, ctx->x, n_words);
1026 	ret |= rk_mpa_alloc(&point_P.y, ctx->y, n_words);
1027 	if (ret)
1028 		goto exit;
1029 
1030 	ret = rk_mpa_alloc(&point_sign.x, sign, n_words);
1031 	ret |= rk_mpa_alloc(&point_sign.y, sign + WORD2BYTE(n_words), n_words);
1032 	if (ret)
1033 		goto exit;
1034 
1035 	rk_crypto_enable_clk(dev);
1036 	ret = rockchip_ecc_verify(ctx->algo, hash, hash_len, &point_P, &point_sign);
1037 	rk_crypto_disable_clk(dev);
1038 exit:
1039 	rk_mpa_free(&bn_sign);
1040 	rk_mpa_free(&point_P.x);
1041 	rk_mpa_free(&point_P.y);
1042 	rk_mpa_free(&point_sign.x);
1043 	rk_mpa_free(&point_sign.y);
1044 
1045 	return ret;
1046 }
1047 #endif
1048 
1049 static bool rockchip_crypto_is_secure(struct udevice *dev)
1050 {
1051 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
1052 
1053 	return priv->secure;
1054 }
1055 
1056 static const struct dm_crypto_ops rockchip_crypto_ops = {
1057 	.capability   = rockchip_crypto_capability,
1058 	.sha_init     = rockchip_crypto_sha_init,
1059 	.sha_update   = rockchip_crypto_sha_update,
1060 	.sha_final    = rockchip_crypto_sha_final,
1061 #if CONFIG_IS_ENABLED(ROCKCHIP_HMAC)
1062 	.hmac_init    = rockchip_crypto_hmac_init,
1063 	.hmac_update  = rockchip_crypto_hmac_update,
1064 	.hmac_final   = rockchip_crypto_hmac_final,
1065 #endif
1066 #if CONFIG_IS_ENABLED(ROCKCHIP_RSA)
1067 	.rsa_verify   = rockchip_crypto_rsa_verify,
1068 #endif
1069 #if CONFIG_IS_ENABLED(ROCKCHIP_EC)
1070 	.ec_verify    = rockchip_crypto_ec_verify,
1071 #endif
1072 #if CONFIG_IS_ENABLED(ROCKCHIP_CIPHER)
1073 	.cipher_crypt    = rockchip_crypto_cipher,
1074 	.cipher_mac      = rockchip_crypto_mac,
1075 	.cipher_ae       = rockchip_crypto_ae,
1076 
1077 #if CONFIG_IS_ENABLED(DM_KEYLAD)
1078 	.cipher_fw_crypt = rockchip_crypto_fw_cipher,
1079 	.keytable_addr   = rockchip_get_keytable_addr,
1080 #endif
1081 
1082 #endif
1083 
1084 	.is_secure       = rockchip_crypto_is_secure,
1085 };
1086 
1087 /*
1088  * Only use "clocks" to parse crypto clock id and use rockchip_get_clk().
1089  * Because we always add crypto node in U-Boot dts, when kernel dtb enabled :
1090  *
1091  *   1. There is cru phandle mismatch between U-Boot and kernel dtb;
1092  *   2. CONFIG_OF_SPL_REMOVE_PROPS removes clock property;
1093  */
1094 static int rockchip_crypto_ofdata_to_platdata(struct udevice *dev)
1095 {
1096 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
1097 	int len, ret = -EINVAL;
1098 
1099 	memset(priv, 0x00, sizeof(*priv));
1100 
1101 	priv->reg = (fdt_addr_t)dev_read_addr_ptr(dev);
1102 	if (priv->reg == FDT_ADDR_T_NONE)
1103 		return -EINVAL;
1104 
1105 	crypto_base = priv->reg;
1106 
1107 	priv->secure = dev_read_bool(dev, "secure");
1108 	priv->enabled = true;
1109 
1110 #if !defined(CONFIG_SPL_BUILD)
1111 	/* uboot disabled secure crypto */
1112 	priv->enabled = !priv->secure;
1113 #endif
1114 	if (!priv->enabled)
1115 		return 0;
1116 
1117 	/* if there is no clocks in dts, just skip it */
1118 	if (!dev_read_prop(dev, "clocks", &len)) {
1119 		printf("Can't find \"clocks\" property\n");
1120 		return 0;
1121 	}
1122 
1123 	priv->clocks = malloc(len);
1124 	if (!priv->clocks)
1125 		return -ENOMEM;
1126 
1127 	priv->nclocks = len / (2 * sizeof(u32));
1128 	if (dev_read_u32_array(dev, "clocks", (u32 *)priv->clocks,
1129 			       priv->nclocks)) {
1130 		printf("Can't read \"clocks\" property\n");
1131 		ret = -EINVAL;
1132 		goto exit;
1133 	}
1134 
1135 	if (dev_read_prop(dev, "clock-frequency", &len)) {
1136 		priv->frequencies = malloc(len);
1137 		if (!priv->frequencies) {
1138 			ret = -ENOMEM;
1139 			goto exit;
1140 		}
1141 		priv->freq_nclocks = len / sizeof(u32);
1142 		if (dev_read_u32_array(dev, "clock-frequency", priv->frequencies,
1143 				       priv->freq_nclocks)) {
1144 			printf("Can't read \"clock-frequency\" property\n");
1145 			ret = -EINVAL;
1146 			goto exit;
1147 		}
1148 	}
1149 
1150 	return 0;
1151 exit:
1152 	if (priv->clocks)
1153 		free(priv->clocks);
1154 
1155 	if (priv->frequencies)
1156 		free(priv->frequencies);
1157 
1158 	return ret;
1159 }
1160 
1161 static int rk_crypto_do_enable_clk(struct udevice *dev, int enable)
1162 {
1163 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
1164 	struct clk clk;
1165 	int i, ret;
1166 
1167 	for (i = 0; i < priv->nclocks; i++) {
1168 		ret = clk_get_by_index(dev, i, &clk);
1169 		if (ret < 0) {
1170 			printf("Failed to get clk index %d, ret=%d\n", i, ret);
1171 			return ret;
1172 		}
1173 
1174 		if (enable)
1175 			ret = clk_enable(&clk);
1176 		else
1177 			ret = clk_disable(&clk);
1178 		if (ret < 0 && ret != -ENOSYS) {
1179 			debug("Failed to enable(%d) clk(%ld): ret=%d\n",
1180 			       enable, clk.id, ret);
1181 			return ret;
1182 		}
1183 	}
1184 
1185 	return 0;
1186 }
1187 
1188 static int rk_crypto_enable_clk(struct udevice *dev)
1189 {
1190 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
1191 
1192 	crypto_base = priv->reg;
1193 
1194 	return rk_crypto_do_enable_clk(dev, 1);
1195 }
1196 
1197 static int rk_crypto_disable_clk(struct udevice *dev)
1198 {
1199 	crypto_base = 0;
1200 
1201 	return rk_crypto_do_enable_clk(dev, 0);
1202 }
1203 
1204 static int rk_crypto_set_clk(struct udevice *dev)
1205 {
1206 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
1207 	struct clk clk;
1208 	int i, ret;
1209 
1210 	/* use standard "assigned-clock-rates" props */
1211 	if (dev_read_size(dev, "assigned-clock-rates") > 0)
1212 		return clk_set_defaults(dev);
1213 
1214 	/* use "clock-frequency" props */
1215 	if (priv->freq_nclocks == 0)
1216 		return 0;
1217 
1218 	for (i = 0; i < priv->freq_nclocks; i++) {
1219 		ret = clk_get_by_index(dev, i, &clk);
1220 		if (ret < 0) {
1221 			printf("Failed to get clk index %d, ret=%d\n", i, ret);
1222 			return ret;
1223 		}
1224 		ret = clk_set_rate(&clk, priv->frequencies[i]);
1225 		if (ret < 0) {
1226 			printf("%s: Failed to set clk(%ld): ret=%d\n",
1227 			       __func__, clk.id, ret);
1228 			return ret;
1229 		}
1230 	}
1231 
1232 	return 0;
1233 }
1234 
1235 static int rockchip_crypto_probe(struct udevice *dev)
1236 {
1237 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
1238 	int ret = 0;
1239 
1240 	if (!priv->enabled)
1241 		return 0;
1242 
1243 	ret = rk_crypto_set_clk(dev);
1244 	if (ret)
1245 		return ret;
1246 
1247 	rk_crypto_enable_clk(dev);
1248 
1249 	priv->hardware = rkce_hardware_alloc((void *)priv->reg);
1250 
1251 	if (!priv->hardware) {
1252 		ret = -ENOMEM;
1253 		goto exit;
1254 	}
1255 
1256 	priv->capability = rockchip_crypto_capability(dev);
1257 exit:
1258 	rk_crypto_disable_clk(dev);
1259 
1260 	return ret;
1261 }
1262 
1263 static const struct udevice_id rockchip_crypto_ids[] = {
1264 	{
1265 		.compatible = "rockchip,crypto-ce",
1266 	},
1267 	{ }
1268 };
1269 
1270 U_BOOT_DRIVER(rockchip_crypto_ce) = {
1271 	.name		= "rockchip_crypto_ce",
1272 	.id		= UCLASS_CRYPTO,
1273 	.of_match	= rockchip_crypto_ids,
1274 	.ops		= &rockchip_crypto_ops,
1275 	.probe		= rockchip_crypto_probe,
1276 	.ofdata_to_platdata = rockchip_crypto_ofdata_to_platdata,
1277 	.priv_auto_alloc_size = sizeof(struct rockchip_crypto_priv),
1278 };
1279