xref: /rk3399_rockchip-uboot/drivers/crypto/rockchip/crypto_ce.c (revision 8f7f431fc3314b4d44fc6ab0ecd2b6ae4ecaa9b9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2025 Rockchip Electronics Co., Ltd
4  */
5 
6 #include <clk.h>
7 #include <clk-uclass.h>
8 #include <common.h>
9 #include <crypto.h>
10 #include <dm.h>
11 #include <asm/io.h>
12 #include <asm/arch/hardware.h>
13 #include <asm/arch/clock.h>
14 #include <rockchip/crypto_ecc.h>
15 #include <rockchip/crypto_v2_pka.h>
16 #include <rockchip/rkce_core.h>
17 
18 fdt_addr_t crypto_base;
19 
20 #define ROUNDUP(size, alignment)	round_up(size, alignment)
21 
22 #define RKCE_HASH_TIMEOUT_MS	1000
23 #define RKCE_SYMM_TIMEOUT_MS	1000
24 
25 struct rkce_sha_contex {
26 	u32				length;
27 	struct rkce_hash_td_ctrl	ctrl;
28 	struct rkce_hash_td		*td;
29 	struct rkce_hash_td_buf		*td_buf;
30 };
31 
32 struct rkce_cipher_contex {
33 	struct rkce_symm_td		*td;
34 	struct rkce_symm_td		*td_aad;
35 	struct rkce_symm_td_buf		*td_buf;
36 };
37 
38 struct rockchip_crypto_priv {
39 	fdt_addr_t			reg;
40 	u32				frequency;
41 	char				*clocks;
42 	u32				*frequencies;
43 	u32				nclocks;
44 	u32				freq_nclocks;
45 	u32				capability;
46 
47 	void				*hardware;
48 	struct rkce_sha_contex		*hash_ctx;
49 };
50 
51 struct rockchip_map {
52 	u32				crypto;
53 	u32				rkce;
54 };
55 
56 static const struct rockchip_map rk_hash_map[] = {
57 	{CRYPTO_SM3,         RKCE_HASH_ALGO_SM3},
58 	{CRYPTO_MD5,         RKCE_HASH_ALGO_MD5},
59 	{CRYPTO_SHA1,        RKCE_HASH_ALGO_SHA1},
60 	{CRYPTO_SHA256,      RKCE_HASH_ALGO_SHA256},
61 	{CRYPTO_SHA512,      RKCE_HASH_ALGO_SHA512},
62 };
63 
64 #if CONFIG_IS_ENABLED(ROCKCHIP_HMAC)
65 static const struct rockchip_map rk_hmac_map[] = {
66 	{CRYPTO_HMAC_MD5,    RKCE_HASH_ALGO_MD5},
67 	{CRYPTO_HMAC_SHA1,   RKCE_HASH_ALGO_SHA1},
68 	{CRYPTO_HMAC_SHA256, RKCE_HASH_ALGO_SHA256},
69 	{CRYPTO_HMAC_SHA512, RKCE_HASH_ALGO_SHA512},
70 	{CRYPTO_HMAC_SM3,    RKCE_HASH_ALGO_SM3},
71 };
72 #endif
73 
74 #if CONFIG_IS_ENABLED(ROCKCHIP_CIPHER)
75 static const struct rockchip_map rk_cipher_map[] = {
76 	{CRYPTO_AES,         RKCE_SYMM_ALGO_AES},
77 	{CRYPTO_DES,         RKCE_SYMM_ALGO_TDES},
78 	{CRYPTO_SM4,         RKCE_SYMM_ALGO_SM4},
79 };
80 #endif
81 
82 #if CONFIG_IS_ENABLED(ROCKCHIP_RSA)
83 static const struct rockchip_map rk_rsa_map[] = {
84 	{CRYPTO_RSA512,       RKCE_ASYM_ALGO_RSA},
85 	{CRYPTO_RSA1024,      RKCE_ASYM_ALGO_RSA},
86 	{CRYPTO_RSA2048,      RKCE_ASYM_ALGO_RSA},
87 	{CRYPTO_RSA3072,      RKCE_ASYM_ALGO_RSA},
88 	{CRYPTO_RSA4096,      RKCE_ASYM_ALGO_RSA},
89 };
90 #endif
91 
92 #if CONFIG_IS_ENABLED(ROCKCHIP_EC)
93 static const struct rockchip_map rk_ec_map[] = {
94 	{CRYPTO_SM2,          RKCE_ASYM_ALGO_SM2},
95 	{CRYPTO_ECC_192R1,    RKCE_ASYM_ALGO_ECC_P192},
96 	{CRYPTO_ECC_224R1,    RKCE_ASYM_ALGO_ECC_P224},
97 	{CRYPTO_ECC_256R1,    RKCE_ASYM_ALGO_ECC_P256},
98 };
99 #endif
100 
101 static int rk_crypto_enable_clk(struct udevice *dev);
102 static int rk_crypto_disable_clk(struct udevice *dev);
103 
104 static void rk_crypto_soft_reset(struct udevice *dev, uint32_t reset_sel)
105 {
106 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
107 
108 	if (!priv->hardware)
109 		return;
110 
111 	rk_crypto_enable_clk(dev);
112 
113 	rkce_soft_reset(priv->hardware, reset_sel);
114 
115 	rk_crypto_disable_clk(dev);
116 }
117 
118 static void crypto_flush_cacheline(ulong addr, ulong size)
119 {
120 	ulong alignment = CONFIG_SYS_CACHELINE_SIZE;
121 	ulong aligned_input, aligned_len;
122 
123 	if (!addr || !size)
124 		return;
125 
126 	/* Must flush dcache before crypto DMA fetch data region */
127 	aligned_input = round_down(addr, alignment);
128 	aligned_len = round_up(size + (addr - aligned_input), alignment);
129 	flush_cache(aligned_input, aligned_len);
130 }
131 
132 static u32 rk_get_cemode(const struct rockchip_map *map, u32 num, u32 algo)
133 {
134 	u32 i, j;
135 	struct {
136 		const struct rockchip_map	*map;
137 		u32				num;
138 	} map_tbl[] = {
139 		{rk_hash_map, ARRAY_SIZE(rk_hash_map)},
140 #if CONFIG_IS_ENABLED(ROCKCHIP_HMAC)
141 		{rk_hmac_map, ARRAY_SIZE(rk_hmac_map)},
142 #endif
143 #if CONFIG_IS_ENABLED(ROCKCHIP_CIPHER)
144 		{rk_cipher_map, ARRAY_SIZE(rk_cipher_map)},
145 #endif
146 #if CONFIG_IS_ENABLED(ROCKCHIP_RSA)
147 		{rk_rsa_map, ARRAY_SIZE(rk_rsa_map)},
148 #endif
149 #if CONFIG_IS_ENABLED(ROCKCHIP_EC)
150 		{rk_ec_map, ARRAY_SIZE(rk_ec_map)},
151 #endif
152 	};
153 
154 	for (i = 0; i < ARRAY_SIZE(map_tbl); i++) {
155 		const struct rockchip_map *map = map_tbl[i].map;
156 		u32 num = map_tbl[i].num;
157 
158 		for (j = 0; j < num; j++) {
159 			if (map[j].crypto == algo)
160 				return map[j].rkce;
161 		}
162 	}
163 
164 	return 0;
165 }
166 
167 static u32 rk_load_map(struct rockchip_crypto_priv *priv, u32 algo_type,
168 		       const struct rockchip_map *map, u32 num)
169 {
170 	u32 i;
171 	u32 capability = 0;
172 
173 	for (i = 0; i < num; i++) {
174 		if (rkce_hw_algo_valid(priv->hardware, algo_type, map[i].rkce, 0))
175 			capability |= map[i].crypto;
176 	}
177 
178 	return capability;
179 }
180 
181 static u32 rockchip_crypto_capability(struct udevice *dev)
182 {
183 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
184 	u32 cap = 0;
185 
186 	if (priv->capability)
187 		return priv->capability;
188 
189 	cap |= rk_load_map(priv, RKCE_ALGO_TYPE_HASH,
190 			   rk_hash_map, ARRAY_SIZE(rk_hash_map));
191 
192 #if CONFIG_IS_ENABLED(ROCKCHIP_HMAC)
193 	cap |= rk_load_map(priv, RKCE_ALGO_TYPE_HMAC,
194 			   rk_hmac_map, ARRAY_SIZE(rk_hmac_map));
195 #endif
196 
197 #if CONFIG_IS_ENABLED(ROCKCHIP_CIPHER)
198 	cap |= rk_load_map(priv, RKCE_ALGO_TYPE_CIPHER,
199 			   rk_cipher_map, ARRAY_SIZE(rk_cipher_map));
200 #endif
201 
202 #if CONFIG_IS_ENABLED(ROCKCHIP_RSA)
203 	cap |= rk_load_map(priv, RKCE_ALGO_TYPE_ASYM, rk_rsa_map,
204 			   ARRAY_SIZE(rk_rsa_map));
205 #endif
206 
207 #if CONFIG_IS_ENABLED(ROCKCHIP_EC)
208 	cap |= rk_load_map(priv, RKCE_ALGO_TYPE_ASYM, rk_ec_map,
209 			   ARRAY_SIZE(rk_ec_map));
210 #endif
211 
212 	return cap;
213 }
214 
215 static void *rkce_sha_ctx_alloc(void)
216 {
217 	struct rkce_sha_contex *hw_ctx;
218 
219 	hw_ctx = malloc(sizeof(*hw_ctx));
220 	if (!hw_ctx)
221 		return NULL;
222 
223 	memset(hw_ctx, 0x00, sizeof(*hw_ctx));
224 
225 	hw_ctx->td = rkce_cma_alloc(sizeof(struct rkce_hash_td));
226 	if (!hw_ctx->td)
227 		goto error;
228 
229 	memset(hw_ctx->td, 0x00, sizeof(struct rkce_hash_td));
230 
231 	hw_ctx->td_buf = rkce_cma_alloc(sizeof(struct rkce_hash_td_buf));
232 	if (!hw_ctx->td_buf)
233 		goto error;
234 
235 	memset(hw_ctx->td_buf, 0x00, sizeof(struct rkce_hash_td_buf));
236 
237 	return hw_ctx;
238 error:
239 	rkce_cma_free(hw_ctx->td);
240 	rkce_cma_free(hw_ctx->td_buf);
241 	free(hw_ctx);
242 
243 	return NULL;
244 }
245 
246 static void rkce_sha_ctx_free(struct rkce_sha_contex *hw_ctx)
247 {
248 	if (!hw_ctx)
249 		return;
250 
251 	rkce_cma_free(hw_ctx->td);
252 	rkce_cma_free(hw_ctx->td_buf);
253 	free(hw_ctx);
254 }
255 
256 static int rk_sha_init(struct udevice *dev, sha_context *ctx,
257 		       u8 *key, u32 key_len, bool is_hmac)
258 {
259 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
260 	struct rkce_sha_contex *hash_ctx = NULL;
261 	u32 ce_algo = 0;
262 	int ret = 0;
263 
264 	if ((ctx->algo & priv->capability) == 0)
265 		return -ENOSYS;
266 
267 	if (priv->hash_ctx)
268 		return -EFAULT;
269 
270 	rk_crypto_soft_reset(dev, RKCE_RESET_HASH);
271 
272 	hash_ctx = rkce_sha_ctx_alloc();
273 	if (!hash_ctx)
274 		return -ENOMEM;
275 
276 	ret = rkce_init_hash_td(hash_ctx->td, hash_ctx->td_buf);
277 	if (ret)
278 		goto exit;
279 
280 	ce_algo = rk_get_cemode(rk_hash_map, ARRAY_SIZE(rk_hash_map), ctx->algo);
281 
282 	hash_ctx->ctrl.td_type        = RKCE_TD_TYPE_HASH;
283 	hash_ctx->ctrl.hw_pad_en      = 1;
284 	hash_ctx->ctrl.first_pkg      = 1;
285 	hash_ctx->ctrl.last_pkg       = 0;
286 	hash_ctx->ctrl.hash_algo      = ce_algo;
287 	hash_ctx->ctrl.hmac_en        = is_hmac;
288 	hash_ctx->ctrl.is_preemptible = 0;
289 	hash_ctx->ctrl.int_en         = 1;
290 
291 	if (is_hmac) {
292 		if (key_len > 64) {
293 			ret = -EINVAL;
294 			goto exit;
295 		}
296 
297 		memcpy(hash_ctx->td_buf->key, key, key_len);
298 	}
299 
300 	priv->hash_ctx = hash_ctx;
301 exit:
302 	if (ret) {
303 		rkce_sha_ctx_free(hash_ctx);
304 		priv->hash_ctx = NULL;
305 	}
306 
307 	return ret;
308 }
309 
310 static int rk_sha_update(struct udevice *dev, u32 *input, u32 len, bool is_last)
311 {
312 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
313 	struct rkce_sha_contex *hash_ctx;
314 	struct rkce_hash_td *td;
315 	int ret = 0;
316 
317 	if (!priv->hash_ctx)
318 		return -EINVAL;
319 
320 	if (!is_last && (!input || len == 0))
321 		return -EINVAL;
322 
323 	hash_ctx = priv->hash_ctx;
324 	td = hash_ctx->td;
325 
326 	td->ctrl = hash_ctx->ctrl;
327 	memset(&td->sg, 0x00, sizeof(*td->sg));
328 
329 	if (hash_ctx->ctrl.first_pkg == 1)
330 		hash_ctx->ctrl.first_pkg = 0;
331 
332 	if (is_last) {
333 		td->ctrl.last_pkg = 1;
334 	} else {
335 #ifdef CONFIG_ARM64
336 		td->sg[0].src_addr_h = rkce_cma_virt2phys(input) >> 32;
337 #endif
338 		td->sg[0].src_addr_l = rkce_cma_virt2phys(input) & 0xffffffff;
339 		td->sg[0].src_size   = len;
340 		hash_ctx->length += len;
341 		crypto_flush_cacheline((ulong)input, len);
342 	}
343 
344 	rk_crypto_enable_clk(dev);
345 
346 	crypto_flush_cacheline((ulong)hash_ctx->td, sizeof(*hash_ctx->td));
347 	crypto_flush_cacheline((ulong)hash_ctx->td_buf, sizeof(*hash_ctx->td_buf));
348 
349 	ret = rkce_push_td_sync(priv->hardware, td, RKCE_HASH_TIMEOUT_MS);
350 	if (ret) {
351 		rkce_sha_ctx_free(hash_ctx);
352 		priv->hash_ctx = NULL;
353 	}
354 
355 	rk_crypto_disable_clk(dev);
356 
357 	return ret;
358 }
359 
360 static int rockchip_crypto_sha_init(struct udevice *dev, sha_context *ctx)
361 {
362 	return rk_sha_init(dev, ctx, NULL, 0, false);
363 }
364 
365 static int rockchip_crypto_sha_update(struct udevice *dev, u32 *input, u32 len)
366 {
367 	return rk_sha_update(dev, input, len, false);
368 }
369 
370 static int rockchip_crypto_sha_final(struct udevice *dev, sha_context *ctx, u8 *output)
371 {
372 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
373 	struct rkce_sha_contex *hash_ctx = priv->hash_ctx;
374 	u32 nbits;
375 	int ret;
376 
377 	if (!priv->hash_ctx)
378 		return -EINVAL;
379 
380 	nbits = crypto_algo_nbits(ctx->algo);
381 
382 	if (hash_ctx->length != ctx->length) {
383 		printf("total length(0x%08x) != init length(0x%08x)!\n",
384 		       hash_ctx->length, ctx->length);
385 		ret = -EIO;
386 		goto exit;
387 	}
388 
389 	ret = rk_sha_update(dev, NULL, 0, true);
390 	if (ret == 0)
391 		memcpy(output, hash_ctx->td_buf->hash, BITS2BYTE(nbits));
392 
393 exit:
394 	rkce_sha_ctx_free(hash_ctx);
395 	priv->hash_ctx = NULL;
396 
397 	return ret;
398 }
399 
400 #if CONFIG_IS_ENABLED(ROCKCHIP_HMAC)
401 static int rockchip_crypto_hmac_init(struct udevice *dev,  sha_context *ctx, u8 *key, u32 key_len)
402 {
403 	return rk_sha_init(dev, ctx, key, key_len, true);
404 }
405 
406 static int rockchip_crypto_hmac_update(struct udevice *dev, u32 *input, u32 len)
407 {
408 	return rockchip_crypto_sha_update(dev, input, len);
409 }
410 
411 static int rockchip_crypto_hmac_final(struct udevice *dev, sha_context *ctx, u8 *output)
412 {
413 	return rockchip_crypto_sha_final(dev, ctx, output);
414 }
415 #endif
416 
417 #if CONFIG_IS_ENABLED(ROCKCHIP_CIPHER)
418 
419 static int hw_crypto_ccm128_setiv(u8 *iv_buf, u8 *nonce, u32 nlen, u32 mlen)
420 {
421 	u32 L = iv_buf[0] & 7;	/* the L parameter */
422 
423 	if (nlen < (14 - L))
424 		return -1;	/* nonce is too short */
425 
426 	if (sizeof(mlen) == 8 && L >= 3) {
427 		iv_buf[8]  = mlen >> (56 % (sizeof(mlen) * 8));
428 		iv_buf[9]  = mlen >> (48 % (sizeof(mlen) * 8));
429 		iv_buf[10] = mlen >> (40 % (sizeof(mlen) * 8));
430 		iv_buf[11] = mlen >> (32 % (sizeof(mlen) * 8));
431 	}
432 
433 	iv_buf[12] = mlen >> 24;
434 	iv_buf[13] = mlen >> 16;
435 	iv_buf[14] = mlen >> 8;
436 	iv_buf[15] = mlen;
437 
438 	iv_buf[0] &= ~0x40;	/* clear aad flag */
439 	memcpy(&iv_buf[1], nonce, 14 - L);
440 
441 	return 0;
442 }
443 
444 static void hw_get_ccm_aad_padding(u32 aad_len, u8 *padding,  u32 *padding_size)
445 {
446 	u32 i = 0;
447 
448 	if (aad_len == 0) {
449 		*padding_size = 0;
450 		return;
451 	}
452 
453 	if (aad_len < (0x10000 - 0x100)) {
454 		i = 2;
455 	} else if (sizeof(aad_len) == 8 &&
456 		   aad_len >= (size_t)1 << (32 % (sizeof(aad_len) * 8))) {
457 		i = 10;
458 	} else {
459 		i = 6;
460 	}
461 
462 	if (i == 2) {
463 		padding[0] = aad_len >> 8;
464 		padding[1] = aad_len;
465 	} else if (i == 10) {
466 		padding[0] = 0xFF;
467 		padding[1] = 0xFF;
468 		padding[2] = aad_len >> (56 % (sizeof(aad_len) * 8));
469 		padding[3] = aad_len >> (48 % (sizeof(aad_len) * 8));
470 		padding[4] = aad_len >> (40 % (sizeof(aad_len) * 8));
471 		padding[5] = aad_len >> (32 % (sizeof(aad_len) * 8));
472 		padding[6] = aad_len >> 24;
473 		padding[7] = aad_len >> 16;
474 		padding[8] = aad_len >> 8;
475 		padding[9] = aad_len;
476 	} else {
477 		padding[0] = 0xFF;
478 		padding[1] = 0xFE;
479 		padding[2] = aad_len >> 24;
480 		padding[3] = aad_len >> 16;
481 		padding[4] = aad_len >> 8;
482 	}
483 
484 	*padding_size = i;
485 }
486 
487 static int hw_compose_ccm_aad_iv(u8 *aad_iv, u32 data_len,
488 				 u32 aad_len, u32 tag_size)
489 {
490 	u32 L;		/* the L parameter */
491 	u8 nonce[AES_BLOCK_SIZE];
492 
493 	L = aad_iv[0] & 7;
494 	aad_iv[0] |= ((u8)(((tag_size - 2) / 2) & 7) << 3);
495 
496 	if (sizeof(data_len) == 8 && L >= 3) {
497 		aad_iv[8]  = data_len >> (56 % (sizeof(data_len) * 8));
498 		aad_iv[9]  = data_len >> (48 % (sizeof(data_len) * 8));
499 		aad_iv[10] = data_len >> (40 % (sizeof(data_len) * 8));
500 		aad_iv[11] = data_len >> (32 % (sizeof(data_len) * 8));
501 	}
502 
503 	/* save nonce */
504 	memcpy(nonce, &aad_iv[1], 14 - L);
505 
506 	aad_iv[12] = data_len >> 24;
507 	aad_iv[13] = data_len >> 16;
508 	aad_iv[14] = data_len >> 8;
509 	aad_iv[15] = data_len;
510 
511 	/* restore nonce */
512 	memcpy(&aad_iv[1], nonce, 14 - L);
513 
514 	aad_iv[0] &= ~0x40;	/* clear Adata flag */
515 
516 	if (aad_len)
517 		aad_iv[0] |= 0x40;	//set aad flag
518 
519 	return 0;
520 }
521 
522 static void rkce_destroy_ccm_aad(u8 *new_aad)
523 {
524 	rkce_cma_free(new_aad);
525 }
526 
527 static int rkce_build_ccm_aad(const u8 *aad, u32 aad_len, u32 data_len,
528 			      u8 *iv, u32 iv_len,
529 			      u8 **new_aad, u32 *new_aad_len,
530 			      u8 *new_iv, u32 *new_iv_len)
531 {
532 	int ret = -RKCE_INVAL;
533 	u32 L;
534 	u8 nonce[AES_BLOCK_SIZE];
535 	u8 pad[AES_BLOCK_SIZE];
536 	u32 pad_size = 0;
537 	u32 tag_len = AES_BLOCK_SIZE;
538 	u8 *aad_tmp = NULL;
539 	u32 aad_tmp_len = 0;
540 
541 	memset(nonce, 0x00, sizeof(nonce));
542 
543 	L = 15 - iv_len;
544 	nonce[0] = (L - 1) & 7;
545 	ret = hw_crypto_ccm128_setiv(nonce, (u8 *)iv, iv_len, 0);
546 	if (ret)
547 		return ret;
548 
549 	memcpy(new_iv, nonce, sizeof(nonce));
550 	*new_iv_len = sizeof(nonce);
551 
552 	memset(pad, 0x00, sizeof(pad));
553 	hw_get_ccm_aad_padding(aad_len, pad, &pad_size);
554 
555 	aad_tmp_len = aad_len + AES_BLOCK_SIZE + pad_size;
556 	aad_tmp_len = ROUNDUP(aad_tmp_len, AES_BLOCK_SIZE);
557 
558 	aad_tmp = rkce_cma_alloc(aad_tmp_len);
559 	if (!aad_tmp) {
560 		ret = -RKCE_NOMEM;
561 		goto exit;
562 	}
563 
564 	/* clear last block */
565 	memset(aad_tmp + aad_tmp_len - AES_BLOCK_SIZE, 0x00, AES_BLOCK_SIZE);
566 	memcpy(aad_tmp, nonce, sizeof(nonce));
567 	hw_compose_ccm_aad_iv(aad_tmp, data_len, aad_len, tag_len);
568 	memcpy(aad_tmp + AES_BLOCK_SIZE, pad, pad_size);
569 
570 	memcpy(aad_tmp + AES_BLOCK_SIZE + pad_size, aad, aad_len);
571 
572 	*new_aad     = aad_tmp;
573 	*new_aad_len = aad_tmp_len;
574 
575 exit:
576 	return ret;
577 }
578 
579 static void *rkce_cipher_ctx_alloc(void)
580 {
581 	struct rkce_cipher_contex *hw_ctx;
582 
583 	hw_ctx = malloc(sizeof(*hw_ctx));
584 	if (!hw_ctx)
585 		return NULL;
586 
587 	hw_ctx->td = rkce_cma_alloc(sizeof(struct rkce_symm_td));
588 	if (!hw_ctx->td)
589 		goto error;
590 
591 	memset(hw_ctx->td, 0x00, sizeof(struct rkce_symm_td));
592 
593 	hw_ctx->td_aad = rkce_cma_alloc(sizeof(struct rkce_symm_td));
594 	if (!hw_ctx->td_aad)
595 		goto error;
596 
597 	memset(hw_ctx->td_aad, 0x00, sizeof(struct rkce_symm_td));
598 
599 	hw_ctx->td_buf = rkce_cma_alloc(sizeof(struct rkce_symm_td_buf));
600 	if (!hw_ctx->td_buf)
601 		goto error;
602 
603 	memset(hw_ctx->td_buf, 0x00, sizeof(struct rkce_symm_td_buf));
604 
605 	return hw_ctx;
606 error:
607 	rkce_cma_free(hw_ctx->td);
608 	rkce_cma_free(hw_ctx->td_aad);
609 	rkce_cma_free(hw_ctx->td_buf);
610 	free(hw_ctx);
611 
612 	return NULL;
613 }
614 
615 static void rkce_cipher_ctx_free(struct rkce_cipher_contex *hw_ctx)
616 {
617 	if (!hw_ctx)
618 		return;
619 
620 	rkce_cma_free(hw_ctx->td);
621 	rkce_cma_free(hw_ctx->td_aad);
622 	rkce_cma_free(hw_ctx->td_buf);
623 	free(hw_ctx);
624 }
625 
626 static void crypto_invalidate_cacheline(u32 addr, u32 size)
627 {
628 	ulong alignment = CONFIG_SYS_CACHELINE_SIZE;
629 	ulong aligned_input, aligned_len;
630 
631 	if (!addr || !size)
632 		return;
633 
634 	/* Must invalidate dcache after crypto DMA write data region */
635 	aligned_input = round_down(addr, alignment);
636 	aligned_len = round_up(size + (addr - aligned_input), alignment);
637 	invalidate_dcache_range(aligned_input, aligned_input + aligned_len);
638 }
639 
640 static const struct rockchip_map rk_cipher_algo_map[] = {
641 	{RK_MODE_ECB,     RKCE_SYMM_MODE_ECB},
642 	{RK_MODE_CBC,     RKCE_SYMM_MODE_CBC},
643 	{RK_MODE_CTS,     RKCE_SYMM_MODE_CTS},
644 	{RK_MODE_CTR,     RKCE_SYMM_MODE_CTR},
645 	{RK_MODE_CFB,     RKCE_SYMM_MODE_CFB},
646 	{RK_MODE_OFB,     RKCE_SYMM_MODE_OFB},
647 	{RK_MODE_XTS,     RKCE_SYMM_MODE_XTS},
648 	{RK_MODE_CCM,     RKCE_SYMM_MODE_CCM},
649 	{RK_MODE_GCM,     RKCE_SYMM_MODE_GCM},
650 	{RK_MODE_CMAC,    RKCE_SYMM_MODE_CMAC},
651 	{RK_MODE_CBC_MAC, RKCE_SYMM_MODE_CBC_MAC},
652 };
653 
654 static int rk_get_cipher_cemode(u32 algo, u32 mode, u32 *ce_algo, u32 *ce_mode)
655 {
656 	u32 i;
657 
658 	switch (algo) {
659 	case CRYPTO_DES:
660 		*ce_algo = RKCE_SYMM_ALGO_TDES;
661 		break;
662 	case CRYPTO_AES:
663 		*ce_algo = RKCE_SYMM_ALGO_AES;
664 		break;
665 	case CRYPTO_SM4:
666 		*ce_algo = RKCE_SYMM_ALGO_SM4;
667 		break;
668 	default:
669 		return -EINVAL;
670 	}
671 
672 	for (i = 0; i < ARRAY_SIZE(rk_cipher_algo_map); i++) {
673 		if (mode == rk_cipher_algo_map[i].crypto) {
674 			*ce_mode = rk_cipher_algo_map[i].rkce;
675 			return 0;
676 		}
677 	}
678 
679 	return -EINVAL;
680 }
681 
682 u32 rk_get_td_keysize(u32 ce_algo, u32 ce_mode,  u32 key_len)
683 {
684 	u32 key_size = 0;
685 
686 	if (ce_algo == RKCE_SYMM_ALGO_AES) {
687 		if (key_len == AES_KEYSIZE_128)
688 			key_size = RKCE_KEY_AES_128;
689 		else if (key_len == AES_KEYSIZE_192)
690 			key_size = RKCE_KEY_AES_192;
691 		else if (key_len == AES_KEYSIZE_256)
692 			key_size = RKCE_KEY_AES_256;
693 		else
694 			;
695 	}
696 
697 	return key_size;
698 }
699 
700 int rk_set_symm_td_buf_key(struct rkce_symm_td_buf *td_buf,
701 			   u32 ce_algo, u32 ce_mode, cipher_context *ctx)
702 {
703 	memset(td_buf->key1, 0x00, sizeof(td_buf->key1));
704 	memset(td_buf->key2, 0x00, sizeof(td_buf->key2));
705 
706 	if (ce_mode == RKCE_SYMM_MODE_XTS) {
707 		memcpy(td_buf->key1, ctx->key, ctx->key_len);
708 		memcpy(td_buf->key2, ctx->twk_key, ctx->key_len);
709 	} else {
710 		memcpy(td_buf->key1, ctx->key, ctx->key_len);
711 	}
712 
713 	if (ctx->key_len == DES_KEYSIZE * 2 &&
714 	    (ce_algo == RKCE_SYMM_ALGO_DES || ce_algo == RKCE_SYMM_ALGO_TDES))
715 		memcpy(td_buf->key1 + DES_KEYSIZE * 2, td_buf->key1, DES_KEYSIZE);
716 
717 	return 0;
718 }
719 
720 int rk_set_symm_td_sg(struct rkce_symm_td *td,
721 		      const u8 *in, u32 in_len, u8 *out, u32 out_len)
722 {
723 	memset(td->sg, 0x00, sizeof(td->sg));
724 
725 #ifdef CONFIG_ARM64
726 	td->sg[0].src_addr_h = rkce_cma_virt2phys(in) >> 32;
727 #endif
728 	td->sg[0].src_addr_l = rkce_cma_virt2phys(in) & 0xffffffff;
729 	td->sg[0].src_size   = in_len;
730 
731 	if (out && out_len) {
732 #ifdef CONFIG_ARM64
733 		td->sg[0].dst_addr_h = rkce_cma_virt2phys(out) >> 32;
734 #endif
735 		td->sg[0].dst_addr_l = rkce_cma_virt2phys(out) & 0xffffffff;
736 		td->sg[0].dst_size   = out_len;
737 	}
738 
739 	td->next_task = 0;
740 
741 	return 0;
742 }
743 
744 static int rk_crypto_cipher(struct udevice *dev, cipher_context *ctx,
745 			    const u8 *in, u8 *out, u32 len, bool enc,
746 			    const u8 *aad, u32 aad_len, u8 *tag)
747 {
748 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
749 	struct rkce_cipher_contex *hw_ctx = NULL;
750 	u32 ce_algo = 0, ce_mode = 0;
751 	bool use_otpkey = false;
752 	int ret = 0;
753 
754 	rk_crypto_soft_reset(dev, RKCE_RESET_SYMM);
755 
756 	if (!ctx->key && ctx->key_len)
757 		use_otpkey = true;
758 
759 	ret = rk_get_cipher_cemode(ctx->algo, ctx->mode, &ce_algo, &ce_mode);
760 	if (ret)
761 		return ret;
762 
763 	hw_ctx = rkce_cipher_ctx_alloc();
764 	if (!hw_ctx)
765 		return -ENOMEM;
766 
767 	rkce_init_symm_td(hw_ctx->td, hw_ctx->td_buf);
768 
769 	hw_ctx->td->ctrl.td_type   = RKCE_TD_TYPE_SYMM;
770 	hw_ctx->td->ctrl.is_dec    = !enc;
771 	hw_ctx->td->ctrl.symm_algo = ce_algo;
772 	hw_ctx->td->ctrl.symm_mode = ce_mode;
773 	hw_ctx->td->ctrl.key_size  = rk_get_td_keysize(ce_algo, ce_mode, ctx->key_len);
774 	hw_ctx->td->ctrl.first_pkg = 1;
775 	hw_ctx->td->ctrl.last_pkg  = 1;
776 	hw_ctx->td->ctrl.int_en    = 1;
777 	hw_ctx->td->ctrl.key_sel   = use_otpkey ? RKCE_KEY_SEL_KT : RKCE_KEY_SEL_USER;
778 
779 	memcpy(hw_ctx->td_buf->iv, ctx->iv, ctx->iv_len);
780 	hw_ctx->td->ctrl.iv_len    = ctx->iv_len;
781 
782 	if (!use_otpkey) {
783 		ret = rk_set_symm_td_buf_key(hw_ctx->td_buf, ce_algo, ce_mode, ctx);
784 		if (ret)
785 			goto exit;
786 	}
787 
788 	ret = rk_set_symm_td_sg(hw_ctx->td, in, len, out, len);
789 	if (ret)
790 		goto exit;
791 
792 	if (ce_mode == RKCE_SYMM_MODE_CCM) {
793 		u8 *new_aad = NULL;
794 		u32 new_aad_len = 0, new_iv_len = 0;
795 
796 		rkce_init_symm_td(hw_ctx->td_aad, hw_ctx->td_buf);
797 
798 		ret = rkce_build_ccm_aad(aad, aad_len, len,
799 					 hw_ctx->td_buf->iv, ctx->iv_len,
800 					 &new_aad, &new_aad_len,
801 					 hw_ctx->td_buf->iv, &new_iv_len);
802 		if (ret)
803 			goto exit;
804 
805 		ret = rk_set_symm_td_sg(hw_ctx->td_aad, new_aad, new_aad_len, NULL, 0);
806 		if (ret)
807 			goto exit;
808 
809 		hw_ctx->td->ctrl.iv_len = new_iv_len;
810 
811 		hw_ctx->td_buf->gcm_len.aad_len_l = new_aad_len;
812 
813 		hw_ctx->td_aad->ctrl = hw_ctx->td->ctrl;
814 		hw_ctx->td_aad->ctrl.is_aad = 1;
815 
816 		crypto_flush_cacheline((ulong)hw_ctx->td_aad, sizeof(*hw_ctx->td_aad));
817 		crypto_flush_cacheline((ulong)hw_ctx->td_buf, sizeof(*hw_ctx->td_buf));
818 		crypto_flush_cacheline((ulong)new_aad, new_aad_len);
819 
820 		rk_crypto_enable_clk(dev);
821 
822 		ret = rkce_push_td_sync(priv->hardware, hw_ctx->td_aad, RKCE_SYMM_TIMEOUT_MS);
823 
824 		rk_crypto_disable_clk(dev);
825 
826 		rkce_destroy_ccm_aad(new_aad);
827 
828 		if (ret) {
829 			printf("CCM calc aad data failed.\n");
830 			goto exit;
831 		}
832 	} else if (ce_mode == RKCE_SYMM_MODE_GCM) {
833 		rkce_init_symm_td(hw_ctx->td_aad, hw_ctx->td_buf);
834 
835 		ret = rk_set_symm_td_sg(hw_ctx->td_aad, aad, aad_len, NULL, 0);
836 		if (ret)
837 			goto exit;
838 
839 		hw_ctx->td_buf->gcm_len.aad_len_l = aad_len;
840 		hw_ctx->td_buf->gcm_len.pc_len_l = len;
841 
842 		hw_ctx->td_aad->ctrl = hw_ctx->td->ctrl;
843 		hw_ctx->td_aad->ctrl.is_aad = 1;
844 
845 		crypto_flush_cacheline((ulong)hw_ctx->td_aad, sizeof(*hw_ctx->td_aad));
846 		crypto_flush_cacheline((ulong)hw_ctx->td_buf, sizeof(*hw_ctx->td_buf));
847 		crypto_flush_cacheline((ulong)aad, aad_len);
848 
849 		rk_crypto_enable_clk(dev);
850 
851 		ret = rkce_push_td_sync(priv->hardware, hw_ctx->td_aad, RKCE_SYMM_TIMEOUT_MS);
852 
853 		rk_crypto_disable_clk(dev);
854 		if (ret) {
855 			printf("GCM calc aad data failed.\n");
856 			goto exit;
857 		}
858 	}
859 
860 	crypto_flush_cacheline((ulong)hw_ctx->td, sizeof(*hw_ctx->td));
861 	crypto_flush_cacheline((ulong)hw_ctx->td_buf, sizeof(*hw_ctx->td_buf));
862 	crypto_flush_cacheline((ulong)in, len);
863 	crypto_invalidate_cacheline((ulong)out, len);
864 
865 	rk_crypto_enable_clk(dev);
866 
867 	ret = rkce_push_td_sync(priv->hardware, hw_ctx->td, RKCE_SYMM_TIMEOUT_MS);
868 
869 	crypto_invalidate_cacheline((ulong)out, len);
870 
871 	rk_crypto_disable_clk(dev);
872 
873 	if (tag)
874 		memcpy(tag, hw_ctx->td_buf->tag, sizeof(hw_ctx->td_buf->tag));
875 exit:
876 	rkce_cipher_ctx_free(hw_ctx);
877 
878 	return ret;
879 }
880 
881 static int rockchip_crypto_cipher(struct udevice *dev, cipher_context *ctx,
882 				  const u8 *in, u8 *out, u32 len, bool enc)
883 {
884 	return rk_crypto_cipher(dev, ctx, in, out, len, enc, NULL, 0, NULL);
885 }
886 
887 static int rockchip_crypto_mac(struct udevice *dev, cipher_context *ctx,
888 			       const u8 *in, u32 len, u8 *tag)
889 {
890 	return rk_crypto_cipher(dev, ctx, in, NULL, len, true, NULL, 0, tag);
891 }
892 
893 static int rockchip_crypto_ae(struct udevice *dev, cipher_context *ctx,
894 			      const u8 *in, u32 len, const u8 *aad, u32 aad_len,
895 			      u8 *out, u8 *tag)
896 {
897 	return rk_crypto_cipher(dev, ctx, in, out, len, true, aad, aad_len, tag);
898 }
899 
900 int rockchip_crypto_fw_cipher(struct udevice *dev, cipher_fw_context *ctx,
901 			      const u8 *in, u8 *out, u32 len, bool enc)
902 {
903 	cipher_context cipher_ctx;
904 
905 	memset(&cipher_ctx, 0x00, sizeof(cipher_ctx));
906 
907 	cipher_ctx.algo    = ctx->algo;
908 	cipher_ctx.mode    = ctx->mode;
909 	cipher_ctx.key_len = ctx->key_len;
910 	cipher_ctx.iv      = ctx->iv;
911 	cipher_ctx.iv_len  = ctx->iv_len;
912 
913 	return rk_crypto_cipher(dev, &cipher_ctx, in, out, len, enc, NULL, 0, NULL);
914 }
915 
916 static ulong rockchip_get_keytable_addr(struct udevice *dev)
917 {
918 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
919 	ulong addr;
920 
921 	rk_crypto_enable_clk(dev);
922 
923 	addr = rkce_get_keytable_addr(priv->hardware);
924 
925 	rk_crypto_disable_clk(dev);
926 
927 	return addr;
928 }
929 
930 #endif
931 
932 #if CONFIG_IS_ENABLED(ROCKCHIP_RSA)
933 static int rockchip_crypto_rsa_verify(struct udevice *dev, rsa_key *ctx,
934 				      u8 *sign, u8 *output)
935 {
936 	struct mpa_num *mpa_m = NULL, *mpa_e = NULL, *mpa_n = NULL;
937 	struct mpa_num *mpa_c = NULL, *mpa_result = NULL;
938 	u32 n_bits, n_words;
939 	int ret;
940 
941 	if (!ctx)
942 		return -EINVAL;
943 
944 	if (ctx->algo != CRYPTO_RSA512 &&
945 	    ctx->algo != CRYPTO_RSA1024 &&
946 	    ctx->algo != CRYPTO_RSA2048 &&
947 	    ctx->algo != CRYPTO_RSA3072 &&
948 	    ctx->algo != CRYPTO_RSA4096)
949 		return -EINVAL;
950 
951 	n_bits = crypto_algo_nbits(ctx->algo);
952 	n_words = BITS2WORD(n_bits);
953 
954 	ret = rk_mpa_alloc(&mpa_m, sign, n_words);
955 	if (ret)
956 		goto exit;
957 
958 	ret = rk_mpa_alloc(&mpa_e, ctx->e, n_words);
959 	if (ret)
960 		goto exit;
961 
962 	ret = rk_mpa_alloc(&mpa_n, ctx->n, n_words);
963 	if (ret)
964 		goto exit;
965 
966 	if (ctx->c) {
967 		ret = rk_mpa_alloc(&mpa_c, ctx->c, n_words);
968 		if (ret)
969 			goto exit;
970 	}
971 
972 	ret = rk_mpa_alloc(&mpa_result, NULL, n_words);
973 	if (ret)
974 		goto exit;
975 
976 	rk_crypto_enable_clk(dev);
977 	ret = rk_exptmod_np(mpa_m, mpa_e, mpa_n, mpa_c, mpa_result);
978 	if (!ret)
979 		memcpy(output, mpa_result->d, BITS2BYTE(n_bits));
980 	rk_crypto_disable_clk(dev);
981 
982 exit:
983 	rk_mpa_free(&mpa_m);
984 	rk_mpa_free(&mpa_e);
985 	rk_mpa_free(&mpa_n);
986 	rk_mpa_free(&mpa_c);
987 	rk_mpa_free(&mpa_result);
988 
989 	return ret;
990 }
991 #endif
992 
993 #if CONFIG_IS_ENABLED(ROCKCHIP_EC)
994 static int rockchip_crypto_ec_verify(struct udevice *dev, ec_key *ctx,
995 				     u8 *hash, u32 hash_len, u8 *sign)
996 {
997 	struct mpa_num *bn_sign = NULL;
998 	struct rk_ecp_point point_P, point_sign;
999 	u32 n_bits, n_words;
1000 	int ret;
1001 
1002 	if (!ctx)
1003 		return -EINVAL;
1004 
1005 	if (ctx->algo != CRYPTO_SM2 &&
1006 	    ctx->algo != CRYPTO_ECC_192R1 &&
1007 	    ctx->algo != CRYPTO_ECC_224R1 &&
1008 	    ctx->algo != CRYPTO_ECC_256R1)
1009 		return -EINVAL;
1010 
1011 	n_bits = crypto_algo_nbits(ctx->algo);
1012 	n_words = BITS2WORD(n_bits);
1013 
1014 	ret = rk_mpa_alloc(&bn_sign, sign, n_words);
1015 	if (ret)
1016 		goto exit;
1017 
1018 	ret = rk_mpa_alloc(&point_P.x, ctx->x, n_words);
1019 	ret |= rk_mpa_alloc(&point_P.y, ctx->y, n_words);
1020 	if (ret)
1021 		goto exit;
1022 
1023 	ret = rk_mpa_alloc(&point_sign.x, sign, n_words);
1024 	ret |= rk_mpa_alloc(&point_sign.y, sign + WORD2BYTE(n_words), n_words);
1025 	if (ret)
1026 		goto exit;
1027 
1028 	rk_crypto_enable_clk(dev);
1029 	ret = rockchip_ecc_verify(ctx->algo, hash, hash_len, &point_P, &point_sign);
1030 	rk_crypto_disable_clk(dev);
1031 exit:
1032 	rk_mpa_free(&bn_sign);
1033 	rk_mpa_free(&point_P.x);
1034 	rk_mpa_free(&point_P.y);
1035 	rk_mpa_free(&point_sign.x);
1036 	rk_mpa_free(&point_sign.y);
1037 
1038 	return ret;
1039 }
1040 #endif
1041 
1042 static const struct dm_crypto_ops rockchip_crypto_ops = {
1043 	.capability   = rockchip_crypto_capability,
1044 	.sha_init     = rockchip_crypto_sha_init,
1045 	.sha_update   = rockchip_crypto_sha_update,
1046 	.sha_final    = rockchip_crypto_sha_final,
1047 #if CONFIG_IS_ENABLED(ROCKCHIP_HMAC)
1048 	.hmac_init    = rockchip_crypto_hmac_init,
1049 	.hmac_update  = rockchip_crypto_hmac_update,
1050 	.hmac_final   = rockchip_crypto_hmac_final,
1051 #endif
1052 #if CONFIG_IS_ENABLED(ROCKCHIP_RSA)
1053 	.rsa_verify   = rockchip_crypto_rsa_verify,
1054 #endif
1055 #if CONFIG_IS_ENABLED(ROCKCHIP_EC)
1056 	.ec_verify    = rockchip_crypto_ec_verify,
1057 #endif
1058 #if CONFIG_IS_ENABLED(ROCKCHIP_CIPHER)
1059 	.cipher_crypt    = rockchip_crypto_cipher,
1060 	.cipher_mac      = rockchip_crypto_mac,
1061 	.cipher_ae       = rockchip_crypto_ae,
1062 	.cipher_fw_crypt = rockchip_crypto_fw_cipher,
1063 	.keytable_addr   = rockchip_get_keytable_addr,
1064 #endif
1065 
1066 };
1067 
1068 /*
1069  * Only use "clocks" to parse crypto clock id and use rockchip_get_clk().
1070  * Because we always add crypto node in U-Boot dts, when kernel dtb enabled :
1071  *
1072  *   1. There is cru phandle mismatch between U-Boot and kernel dtb;
1073  *   2. CONFIG_OF_SPL_REMOVE_PROPS removes clock property;
1074  */
1075 static int rockchip_crypto_ofdata_to_platdata(struct udevice *dev)
1076 {
1077 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
1078 	int len, ret = -EINVAL;
1079 
1080 	memset(priv, 0x00, sizeof(*priv));
1081 
1082 	priv->reg = (fdt_addr_t)dev_read_addr_ptr(dev);
1083 	if (priv->reg == FDT_ADDR_T_NONE)
1084 		return -EINVAL;
1085 
1086 	crypto_base = priv->reg;
1087 
1088 	/* if there is no clocks in dts, just skip it */
1089 	if (!dev_read_prop(dev, "clocks", &len)) {
1090 		printf("Can't find \"clocks\" property\n");
1091 		return 0;
1092 	}
1093 
1094 	priv->clocks = malloc(len);
1095 	if (!priv->clocks)
1096 		return -ENOMEM;
1097 
1098 	priv->nclocks = len / (2 * sizeof(u32));
1099 	if (dev_read_u32_array(dev, "clocks", (u32 *)priv->clocks,
1100 			       priv->nclocks)) {
1101 		printf("Can't read \"clocks\" property\n");
1102 		ret = -EINVAL;
1103 		goto exit;
1104 	}
1105 
1106 	if (dev_read_prop(dev, "clock-frequency", &len)) {
1107 		priv->frequencies = malloc(len);
1108 		if (!priv->frequencies) {
1109 			ret = -ENOMEM;
1110 			goto exit;
1111 		}
1112 		priv->freq_nclocks = len / sizeof(u32);
1113 		if (dev_read_u32_array(dev, "clock-frequency", priv->frequencies,
1114 				       priv->freq_nclocks)) {
1115 			printf("Can't read \"clock-frequency\" property\n");
1116 			ret = -EINVAL;
1117 			goto exit;
1118 		}
1119 	}
1120 
1121 	return 0;
1122 exit:
1123 	if (priv->clocks)
1124 		free(priv->clocks);
1125 
1126 	if (priv->frequencies)
1127 		free(priv->frequencies);
1128 
1129 	return ret;
1130 }
1131 
1132 static int rk_crypto_do_enable_clk(struct udevice *dev, int enable)
1133 {
1134 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
1135 	struct clk clk;
1136 	int i, ret;
1137 
1138 	for (i = 0; i < priv->nclocks; i++) {
1139 		ret = clk_get_by_index(dev, i, &clk);
1140 		if (ret < 0) {
1141 			printf("Failed to get clk index %d, ret=%d\n", i, ret);
1142 			return ret;
1143 		}
1144 
1145 		if (enable)
1146 			ret = clk_enable(&clk);
1147 		else
1148 			ret = clk_disable(&clk);
1149 		if (ret < 0 && ret != -ENOSYS) {
1150 			printf("Failed to enable(%d) clk(%ld): ret=%d\n",
1151 			       enable, clk.id, ret);
1152 			return ret;
1153 		}
1154 	}
1155 
1156 	return 0;
1157 }
1158 
1159 static int rk_crypto_enable_clk(struct udevice *dev)
1160 {
1161 	return rk_crypto_do_enable_clk(dev, 1);
1162 }
1163 
1164 static int rk_crypto_disable_clk(struct udevice *dev)
1165 {
1166 	return rk_crypto_do_enable_clk(dev, 0);
1167 }
1168 
1169 static int rk_crypto_set_clk(struct udevice *dev)
1170 {
1171 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
1172 	struct clk clk;
1173 	int i, ret;
1174 
1175 	/* use standard "assigned-clock-rates" props */
1176 	if (dev_read_size(dev, "assigned-clock-rates") > 0)
1177 		return clk_set_defaults(dev);
1178 
1179 	/* use "clock-frequency" props */
1180 	if (priv->freq_nclocks == 0)
1181 		return 0;
1182 
1183 	for (i = 0; i < priv->freq_nclocks; i++) {
1184 		ret = clk_get_by_index(dev, i, &clk);
1185 		if (ret < 0) {
1186 			printf("Failed to get clk index %d, ret=%d\n", i, ret);
1187 			return ret;
1188 		}
1189 		ret = clk_set_rate(&clk, priv->frequencies[i]);
1190 		if (ret < 0) {
1191 			printf("%s: Failed to set clk(%ld): ret=%d\n",
1192 			       __func__, clk.id, ret);
1193 			return ret;
1194 		}
1195 	}
1196 
1197 	return 0;
1198 }
1199 
1200 static int rockchip_crypto_probe(struct udevice *dev)
1201 {
1202 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
1203 	int ret = 0;
1204 
1205 	ret = rk_crypto_set_clk(dev);
1206 	if (ret)
1207 		return ret;
1208 
1209 	rk_crypto_enable_clk(dev);
1210 
1211 	priv->hardware = rkce_hardware_alloc((void *)priv->reg);
1212 
1213 	if (!priv->hardware) {
1214 		ret = -ENOMEM;
1215 		goto exit;
1216 	}
1217 
1218 	priv->capability = rockchip_crypto_capability(dev);
1219 exit:
1220 	rk_crypto_disable_clk(dev);
1221 
1222 	return ret;
1223 }
1224 
1225 static const struct udevice_id rockchip_crypto_ids[] = {
1226 	{
1227 		.compatible = "rockchip,crypto-ce",
1228 	},
1229 	{ }
1230 };
1231 
1232 U_BOOT_DRIVER(rockchip_crypto_ce) = {
1233 	.name		= "rockchip_crypto_ce",
1234 	.id		= UCLASS_CRYPTO,
1235 	.of_match	= rockchip_crypto_ids,
1236 	.ops		= &rockchip_crypto_ops,
1237 	.probe		= rockchip_crypto_probe,
1238 	.ofdata_to_platdata = rockchip_crypto_ofdata_to_platdata,
1239 	.priv_auto_alloc_size = sizeof(struct rockchip_crypto_priv),
1240 };
1241