xref: /rk3399_rockchip-uboot/drivers/crypto/rockchip/crypto_ce.c (revision a60d3c277477538996a68c0af79eeba95de13d91)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2025 Rockchip Electronics Co., Ltd
4  */
5 
6 #include <clk.h>
7 #include <clk-uclass.h>
8 #include <common.h>
9 #include <crypto.h>
10 #include <dm.h>
11 #include <asm/io.h>
12 #include <asm/arch/hardware.h>
13 #include <asm/arch/clock.h>
14 #include <rockchip/crypto_ecc.h>
15 #include <rockchip/crypto_v2_pka.h>
16 #include <rockchip/rkce_core.h>
17 
18 fdt_addr_t crypto_base;
19 
20 #define ROUNDUP(size, alignment)	round_up(size, alignment)
21 
22 #define RKCE_HASH_TIMEOUT_MS	1000
23 #define RKCE_SYMM_TIMEOUT_MS	1000
24 
25 struct rkce_sha_contex {
26 	u32				length;
27 	struct rkce_hash_td_ctrl	ctrl;
28 	struct rkce_hash_td		*td;
29 	struct rkce_hash_td_buf		*td_buf;
30 };
31 
32 struct rkce_cipher_contex {
33 	struct rkce_symm_td		*td;
34 	struct rkce_symm_td		*td_aad;
35 	struct rkce_symm_td_buf		*td_buf;
36 };
37 
38 struct rockchip_crypto_priv {
39 	fdt_addr_t			reg;
40 	u32				frequency;
41 	char				*clocks;
42 	u32				*frequencies;
43 	u32				nclocks;
44 	u32				freq_nclocks;
45 	u32				capability;
46 
47 	void				*hardware;
48 	struct rkce_sha_contex		*hash_ctx;
49 	u16				secure;
50 	u16				enabled;
51 };
52 
53 struct rockchip_map {
54 	u32				crypto;
55 	u32				rkce;
56 };
57 
58 static const struct rockchip_map rk_hash_map[] = {
59 	{CRYPTO_SM3,         RKCE_HASH_ALGO_SM3},
60 	{CRYPTO_MD5,         RKCE_HASH_ALGO_MD5},
61 	{CRYPTO_SHA1,        RKCE_HASH_ALGO_SHA1},
62 	{CRYPTO_SHA256,      RKCE_HASH_ALGO_SHA256},
63 	{CRYPTO_SHA512,      RKCE_HASH_ALGO_SHA512},
64 };
65 
66 #if CONFIG_IS_ENABLED(ROCKCHIP_HMAC)
67 static const struct rockchip_map rk_hmac_map[] = {
68 	{CRYPTO_HMAC_MD5,    RKCE_HASH_ALGO_MD5},
69 	{CRYPTO_HMAC_SHA1,   RKCE_HASH_ALGO_SHA1},
70 	{CRYPTO_HMAC_SHA256, RKCE_HASH_ALGO_SHA256},
71 	{CRYPTO_HMAC_SHA512, RKCE_HASH_ALGO_SHA512},
72 	{CRYPTO_HMAC_SM3,    RKCE_HASH_ALGO_SM3},
73 };
74 #endif
75 
76 #if CONFIG_IS_ENABLED(ROCKCHIP_CIPHER)
77 static const struct rockchip_map rk_cipher_map[] = {
78 	{CRYPTO_AES,         RKCE_SYMM_ALGO_AES},
79 	{CRYPTO_DES,         RKCE_SYMM_ALGO_TDES},
80 	{CRYPTO_SM4,         RKCE_SYMM_ALGO_SM4},
81 };
82 #endif
83 
84 #if CONFIG_IS_ENABLED(ROCKCHIP_RSA)
85 static const struct rockchip_map rk_rsa_map[] = {
86 	{CRYPTO_RSA512,       RKCE_ASYM_ALGO_RSA},
87 	{CRYPTO_RSA1024,      RKCE_ASYM_ALGO_RSA},
88 	{CRYPTO_RSA2048,      RKCE_ASYM_ALGO_RSA},
89 	{CRYPTO_RSA3072,      RKCE_ASYM_ALGO_RSA},
90 	{CRYPTO_RSA4096,      RKCE_ASYM_ALGO_RSA},
91 };
92 #endif
93 
94 #if CONFIG_IS_ENABLED(ROCKCHIP_EC)
95 static const struct rockchip_map rk_ec_map[] = {
96 	{CRYPTO_SM2,          RKCE_ASYM_ALGO_SM2},
97 	{CRYPTO_ECC_192R1,    RKCE_ASYM_ALGO_ECC_P192},
98 	{CRYPTO_ECC_224R1,    RKCE_ASYM_ALGO_ECC_P224},
99 	{CRYPTO_ECC_256R1,    RKCE_ASYM_ALGO_ECC_P256},
100 };
101 #endif
102 
103 static int rk_crypto_enable_clk(struct udevice *dev);
104 static int rk_crypto_disable_clk(struct udevice *dev);
105 
106 static void rk_crypto_soft_reset(struct udevice *dev, uint32_t reset_sel)
107 {
108 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
109 
110 	if (!priv->hardware)
111 		return;
112 
113 	rk_crypto_enable_clk(dev);
114 
115 	rkce_soft_reset(priv->hardware, reset_sel);
116 
117 	rk_crypto_disable_clk(dev);
118 }
119 
120 static void crypto_flush_cacheline(ulong addr, ulong size)
121 {
122 	ulong alignment = CONFIG_SYS_CACHELINE_SIZE;
123 	ulong aligned_input, aligned_len;
124 
125 	if (!addr || !size)
126 		return;
127 
128 	/* Must flush dcache before crypto DMA fetch data region */
129 	aligned_input = round_down(addr, alignment);
130 	aligned_len = round_up(size + (addr - aligned_input), alignment);
131 	flush_cache(aligned_input, aligned_len);
132 }
133 
134 static void crypto_invalidate_cacheline(uint32_t addr, uint32_t size)
135 {
136 	ulong alignment = CONFIG_SYS_CACHELINE_SIZE;
137 	ulong aligned_input, aligned_len;
138 
139 	if (!addr || !size)
140 		return;
141 
142 	/* Must invalidate dcache after crypto DMA write data region */
143 	aligned_input = round_down(addr, alignment);
144 	aligned_len = round_up(size + (addr - aligned_input), alignment);
145 	invalidate_dcache_range(aligned_input, aligned_input + aligned_len);
146 }
147 
148 static u32 rk_get_cemode(const struct rockchip_map *map, u32 num, u32 algo)
149 {
150 	u32 i, j;
151 	struct {
152 		const struct rockchip_map	*map;
153 		u32				num;
154 	} map_tbl[] = {
155 		{rk_hash_map, ARRAY_SIZE(rk_hash_map)},
156 #if CONFIG_IS_ENABLED(ROCKCHIP_HMAC)
157 		{rk_hmac_map, ARRAY_SIZE(rk_hmac_map)},
158 #endif
159 #if CONFIG_IS_ENABLED(ROCKCHIP_CIPHER)
160 		{rk_cipher_map, ARRAY_SIZE(rk_cipher_map)},
161 #endif
162 #if CONFIG_IS_ENABLED(ROCKCHIP_RSA)
163 		{rk_rsa_map, ARRAY_SIZE(rk_rsa_map)},
164 #endif
165 #if CONFIG_IS_ENABLED(ROCKCHIP_EC)
166 		{rk_ec_map, ARRAY_SIZE(rk_ec_map)},
167 #endif
168 	};
169 
170 	for (i = 0; i < ARRAY_SIZE(map_tbl); i++) {
171 		const struct rockchip_map *map = map_tbl[i].map;
172 		u32 num = map_tbl[i].num;
173 
174 		for (j = 0; j < num; j++) {
175 			if (map[j].crypto == algo)
176 				return map[j].rkce;
177 		}
178 	}
179 
180 	return 0;
181 }
182 
183 static u32 rk_load_map(struct rockchip_crypto_priv *priv, u32 algo_type,
184 		       const struct rockchip_map *map, u32 num)
185 {
186 	u32 i;
187 	u32 capability = 0;
188 
189 	for (i = 0; i < num; i++) {
190 		if (rkce_hw_algo_valid(priv->hardware, algo_type, map[i].rkce, 0))
191 			capability |= map[i].crypto;
192 	}
193 
194 	return capability;
195 }
196 
197 static u32 rockchip_crypto_capability(struct udevice *dev)
198 {
199 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
200 	u32 cap = 0;
201 
202 	if (!priv->enabled)
203 		return 0;
204 
205 	if (priv->capability)
206 		return priv->capability;
207 
208 	cap |= rk_load_map(priv, RKCE_ALGO_TYPE_HASH,
209 			   rk_hash_map, ARRAY_SIZE(rk_hash_map));
210 
211 #if CONFIG_IS_ENABLED(ROCKCHIP_HMAC)
212 	cap |= rk_load_map(priv, RKCE_ALGO_TYPE_HMAC,
213 			   rk_hmac_map, ARRAY_SIZE(rk_hmac_map));
214 #endif
215 
216 #if CONFIG_IS_ENABLED(ROCKCHIP_CIPHER)
217 	cap |= rk_load_map(priv, RKCE_ALGO_TYPE_CIPHER,
218 			   rk_cipher_map, ARRAY_SIZE(rk_cipher_map));
219 #endif
220 
221 #if CONFIG_IS_ENABLED(ROCKCHIP_RSA)
222 	cap |= rk_load_map(priv, RKCE_ALGO_TYPE_ASYM, rk_rsa_map,
223 			   ARRAY_SIZE(rk_rsa_map));
224 #endif
225 
226 #if CONFIG_IS_ENABLED(ROCKCHIP_EC)
227 	cap |= rk_load_map(priv, RKCE_ALGO_TYPE_ASYM, rk_ec_map,
228 			   ARRAY_SIZE(rk_ec_map));
229 #endif
230 
231 	return cap;
232 }
233 
234 static void *rkce_sha_ctx_alloc(void)
235 {
236 	struct rkce_sha_contex *hw_ctx;
237 
238 	hw_ctx = malloc(sizeof(*hw_ctx));
239 	if (!hw_ctx)
240 		return NULL;
241 
242 	memset(hw_ctx, 0x00, sizeof(*hw_ctx));
243 
244 	hw_ctx->td = rkce_cma_alloc(sizeof(struct rkce_hash_td));
245 	if (!hw_ctx->td)
246 		goto error;
247 
248 	memset(hw_ctx->td, 0x00, sizeof(struct rkce_hash_td));
249 
250 	hw_ctx->td_buf = rkce_cma_alloc(sizeof(struct rkce_hash_td_buf));
251 	if (!hw_ctx->td_buf)
252 		goto error;
253 
254 	memset(hw_ctx->td_buf, 0x00, sizeof(struct rkce_hash_td_buf));
255 
256 	return hw_ctx;
257 error:
258 	rkce_cma_free(hw_ctx->td);
259 	rkce_cma_free(hw_ctx->td_buf);
260 	free(hw_ctx);
261 
262 	return NULL;
263 }
264 
265 static void rkce_sha_ctx_free(struct rkce_sha_contex *hw_ctx)
266 {
267 	if (!hw_ctx)
268 		return;
269 
270 	rkce_cma_free(hw_ctx->td);
271 	rkce_cma_free(hw_ctx->td_buf);
272 	free(hw_ctx);
273 }
274 
275 static int rk_sha_init(struct udevice *dev, sha_context *ctx,
276 		       u8 *key, u32 key_len, bool is_hmac)
277 {
278 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
279 	struct rkce_sha_contex *hash_ctx = NULL;
280 	u32 ce_algo = 0;
281 	int ret = 0;
282 
283 	if ((ctx->algo & priv->capability) == 0)
284 		return -ENOSYS;
285 
286 	if (priv->hash_ctx)
287 		return -EFAULT;
288 
289 	rk_crypto_soft_reset(dev, RKCE_RESET_HASH);
290 
291 	hash_ctx = rkce_sha_ctx_alloc();
292 	if (!hash_ctx)
293 		return -ENOMEM;
294 
295 	ret = rkce_init_hash_td(hash_ctx->td, hash_ctx->td_buf);
296 	if (ret)
297 		goto exit;
298 
299 	ce_algo = rk_get_cemode(rk_hash_map, ARRAY_SIZE(rk_hash_map), ctx->algo);
300 
301 	hash_ctx->ctrl.td_type        = RKCE_TD_TYPE_HASH;
302 	hash_ctx->ctrl.hw_pad_en      = 1;
303 	hash_ctx->ctrl.first_pkg      = 1;
304 	hash_ctx->ctrl.last_pkg       = 0;
305 	hash_ctx->ctrl.hash_algo      = ce_algo;
306 	hash_ctx->ctrl.hmac_en        = is_hmac;
307 	hash_ctx->ctrl.is_preemptible = 0;
308 	hash_ctx->ctrl.int_en         = 1;
309 
310 	if (is_hmac) {
311 		if (key_len > 64) {
312 			ret = -EINVAL;
313 			goto exit;
314 		}
315 
316 		memcpy(hash_ctx->td_buf->key, key, key_len);
317 	}
318 
319 	priv->hash_ctx = hash_ctx;
320 exit:
321 	if (ret) {
322 		rkce_sha_ctx_free(hash_ctx);
323 		priv->hash_ctx = NULL;
324 	}
325 
326 	return ret;
327 }
328 
329 static int rk_sha_update(struct udevice *dev, u32 *input, u32 len, bool is_last)
330 {
331 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
332 	struct rkce_sha_contex *hash_ctx;
333 	struct rkce_hash_td *td;
334 	int ret = 0;
335 
336 	if (!priv->hash_ctx)
337 		return -EINVAL;
338 
339 	if (!is_last && (!input || len == 0))
340 		return -EINVAL;
341 
342 	hash_ctx = priv->hash_ctx;
343 	td = hash_ctx->td;
344 
345 	td->ctrl = hash_ctx->ctrl;
346 	memset(&td->sg, 0x00, sizeof(*td->sg));
347 
348 	if (hash_ctx->ctrl.first_pkg == 1)
349 		hash_ctx->ctrl.first_pkg = 0;
350 
351 	if (is_last) {
352 		td->ctrl.last_pkg = 1;
353 	} else {
354 #ifdef CONFIG_ARM64
355 		td->sg[0].src_addr_h = rkce_cma_virt2phys(input) >> 32;
356 #endif
357 		td->sg[0].src_addr_l = rkce_cma_virt2phys(input) & 0xffffffff;
358 		td->sg[0].src_size   = len;
359 		hash_ctx->length += len;
360 		crypto_flush_cacheline((ulong)input, len);
361 	}
362 
363 	rk_crypto_enable_clk(dev);
364 
365 	crypto_flush_cacheline((ulong)hash_ctx->td, sizeof(*hash_ctx->td));
366 	crypto_flush_cacheline((ulong)hash_ctx->td_buf, sizeof(*hash_ctx->td_buf));
367 
368 	ret = rkce_push_td_sync(priv->hardware, td, RKCE_HASH_TIMEOUT_MS);
369 	if (ret) {
370 		rkce_sha_ctx_free(hash_ctx);
371 		priv->hash_ctx = NULL;
372 	}
373 
374 	crypto_invalidate_cacheline((ulong)hash_ctx->td_buf, sizeof(*hash_ctx->td_buf));
375 
376 	rk_crypto_disable_clk(dev);
377 
378 	return ret;
379 }
380 
381 static int rockchip_crypto_sha_init(struct udevice *dev, sha_context *ctx)
382 {
383 	return rk_sha_init(dev, ctx, NULL, 0, false);
384 }
385 
386 static int rockchip_crypto_sha_update(struct udevice *dev, u32 *input, u32 len)
387 {
388 	return rk_sha_update(dev, input, len, false);
389 }
390 
391 static int rockchip_crypto_sha_final(struct udevice *dev, sha_context *ctx, u8 *output)
392 {
393 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
394 	struct rkce_sha_contex *hash_ctx = priv->hash_ctx;
395 	u32 nbits;
396 	int ret;
397 
398 	if (!priv->hash_ctx)
399 		return -EINVAL;
400 
401 	nbits = crypto_algo_nbits(ctx->algo);
402 
403 	if (hash_ctx->length != ctx->length) {
404 		printf("total length(0x%08x) != init length(0x%08x)!\n",
405 		       hash_ctx->length, ctx->length);
406 		ret = -EIO;
407 		goto exit;
408 	}
409 
410 	ret = rk_sha_update(dev, NULL, 0, true);
411 	if (ret == 0)
412 		memcpy(output, hash_ctx->td_buf->hash, BITS2BYTE(nbits));
413 
414 exit:
415 	rkce_sha_ctx_free(hash_ctx);
416 	priv->hash_ctx = NULL;
417 
418 	return ret;
419 }
420 
421 #if CONFIG_IS_ENABLED(ROCKCHIP_HMAC)
422 static int rockchip_crypto_hmac_init(struct udevice *dev,  sha_context *ctx, u8 *key, u32 key_len)
423 {
424 	return rk_sha_init(dev, ctx, key, key_len, true);
425 }
426 
427 static int rockchip_crypto_hmac_update(struct udevice *dev, u32 *input, u32 len)
428 {
429 	return rockchip_crypto_sha_update(dev, input, len);
430 }
431 
432 static int rockchip_crypto_hmac_final(struct udevice *dev, sha_context *ctx, u8 *output)
433 {
434 	return rockchip_crypto_sha_final(dev, ctx, output);
435 }
436 #endif
437 
438 #if CONFIG_IS_ENABLED(ROCKCHIP_CIPHER)
439 
440 static int hw_crypto_ccm128_setiv(u8 *iv_buf, u8 *nonce, u32 nlen, u32 mlen)
441 {
442 	u32 L = iv_buf[0] & 7;	/* the L parameter */
443 
444 	if (nlen < (14 - L))
445 		return -1;	/* nonce is too short */
446 
447 	if (sizeof(mlen) == 8 && L >= 3) {
448 		iv_buf[8]  = mlen >> (56 % (sizeof(mlen) * 8));
449 		iv_buf[9]  = mlen >> (48 % (sizeof(mlen) * 8));
450 		iv_buf[10] = mlen >> (40 % (sizeof(mlen) * 8));
451 		iv_buf[11] = mlen >> (32 % (sizeof(mlen) * 8));
452 	}
453 
454 	iv_buf[12] = mlen >> 24;
455 	iv_buf[13] = mlen >> 16;
456 	iv_buf[14] = mlen >> 8;
457 	iv_buf[15] = mlen;
458 
459 	iv_buf[0] &= ~0x40;	/* clear aad flag */
460 	memcpy(&iv_buf[1], nonce, 14 - L);
461 
462 	return 0;
463 }
464 
465 static void hw_get_ccm_aad_padding(u32 aad_len, u8 *padding,  u32 *padding_size)
466 {
467 	u32 i = 0;
468 
469 	if (aad_len == 0) {
470 		*padding_size = 0;
471 		return;
472 	}
473 
474 	if (aad_len < (0x10000 - 0x100)) {
475 		i = 2;
476 	} else if (sizeof(aad_len) == 8 &&
477 		   aad_len >= (size_t)1 << (32 % (sizeof(aad_len) * 8))) {
478 		i = 10;
479 	} else {
480 		i = 6;
481 	}
482 
483 	if (i == 2) {
484 		padding[0] = aad_len >> 8;
485 		padding[1] = aad_len;
486 	} else if (i == 10) {
487 		padding[0] = 0xFF;
488 		padding[1] = 0xFF;
489 		padding[2] = aad_len >> (56 % (sizeof(aad_len) * 8));
490 		padding[3] = aad_len >> (48 % (sizeof(aad_len) * 8));
491 		padding[4] = aad_len >> (40 % (sizeof(aad_len) * 8));
492 		padding[5] = aad_len >> (32 % (sizeof(aad_len) * 8));
493 		padding[6] = aad_len >> 24;
494 		padding[7] = aad_len >> 16;
495 		padding[8] = aad_len >> 8;
496 		padding[9] = aad_len;
497 	} else {
498 		padding[0] = 0xFF;
499 		padding[1] = 0xFE;
500 		padding[2] = aad_len >> 24;
501 		padding[3] = aad_len >> 16;
502 		padding[4] = aad_len >> 8;
503 	}
504 
505 	*padding_size = i;
506 }
507 
508 static int hw_compose_ccm_aad_iv(u8 *aad_iv, u32 data_len,
509 				 u32 aad_len, u32 tag_size)
510 {
511 	u32 L;		/* the L parameter */
512 	u8 nonce[AES_BLOCK_SIZE];
513 
514 	L = aad_iv[0] & 7;
515 	aad_iv[0] |= ((u8)(((tag_size - 2) / 2) & 7) << 3);
516 
517 	if (sizeof(data_len) == 8 && L >= 3) {
518 		aad_iv[8]  = data_len >> (56 % (sizeof(data_len) * 8));
519 		aad_iv[9]  = data_len >> (48 % (sizeof(data_len) * 8));
520 		aad_iv[10] = data_len >> (40 % (sizeof(data_len) * 8));
521 		aad_iv[11] = data_len >> (32 % (sizeof(data_len) * 8));
522 	}
523 
524 	/* save nonce */
525 	memcpy(nonce, &aad_iv[1], 14 - L);
526 
527 	aad_iv[12] = data_len >> 24;
528 	aad_iv[13] = data_len >> 16;
529 	aad_iv[14] = data_len >> 8;
530 	aad_iv[15] = data_len;
531 
532 	/* restore nonce */
533 	memcpy(&aad_iv[1], nonce, 14 - L);
534 
535 	aad_iv[0] &= ~0x40;	/* clear Adata flag */
536 
537 	if (aad_len)
538 		aad_iv[0] |= 0x40;	//set aad flag
539 
540 	return 0;
541 }
542 
543 static void rkce_destroy_ccm_aad(u8 *new_aad)
544 {
545 	rkce_cma_free(new_aad);
546 }
547 
548 static int rkce_build_ccm_aad(const u8 *aad, u32 aad_len, u32 data_len,
549 			      u8 *iv, u32 iv_len,
550 			      u8 **new_aad, u32 *new_aad_len,
551 			      u8 *new_iv, u32 *new_iv_len)
552 {
553 	int ret = -RKCE_INVAL;
554 	u32 L;
555 	u8 nonce[AES_BLOCK_SIZE];
556 	u8 pad[AES_BLOCK_SIZE];
557 	u32 pad_size = 0;
558 	u32 tag_len = AES_BLOCK_SIZE;
559 	u8 *aad_tmp = NULL;
560 	u32 aad_tmp_len = 0;
561 
562 	memset(nonce, 0x00, sizeof(nonce));
563 
564 	L = 15 - iv_len;
565 	nonce[0] = (L - 1) & 7;
566 	ret = hw_crypto_ccm128_setiv(nonce, (u8 *)iv, iv_len, 0);
567 	if (ret)
568 		return ret;
569 
570 	memcpy(new_iv, nonce, sizeof(nonce));
571 	*new_iv_len = sizeof(nonce);
572 
573 	memset(pad, 0x00, sizeof(pad));
574 	hw_get_ccm_aad_padding(aad_len, pad, &pad_size);
575 
576 	aad_tmp_len = aad_len + AES_BLOCK_SIZE + pad_size;
577 	aad_tmp_len = ROUNDUP(aad_tmp_len, AES_BLOCK_SIZE);
578 
579 	aad_tmp = rkce_cma_alloc(aad_tmp_len);
580 	if (!aad_tmp) {
581 		ret = -RKCE_NOMEM;
582 		goto exit;
583 	}
584 
585 	/* clear last block */
586 	memset(aad_tmp + aad_tmp_len - AES_BLOCK_SIZE, 0x00, AES_BLOCK_SIZE);
587 	memcpy(aad_tmp, nonce, sizeof(nonce));
588 	hw_compose_ccm_aad_iv(aad_tmp, data_len, aad_len, tag_len);
589 	memcpy(aad_tmp + AES_BLOCK_SIZE, pad, pad_size);
590 
591 	memcpy(aad_tmp + AES_BLOCK_SIZE + pad_size, aad, aad_len);
592 
593 	*new_aad     = aad_tmp;
594 	*new_aad_len = aad_tmp_len;
595 
596 exit:
597 	return ret;
598 }
599 
600 static void *rkce_cipher_ctx_alloc(void)
601 {
602 	struct rkce_cipher_contex *hw_ctx;
603 
604 	hw_ctx = malloc(sizeof(*hw_ctx));
605 	if (!hw_ctx)
606 		return NULL;
607 
608 	hw_ctx->td = rkce_cma_alloc(sizeof(struct rkce_symm_td));
609 	if (!hw_ctx->td)
610 		goto error;
611 
612 	memset(hw_ctx->td, 0x00, sizeof(struct rkce_symm_td));
613 
614 	hw_ctx->td_aad = rkce_cma_alloc(sizeof(struct rkce_symm_td));
615 	if (!hw_ctx->td_aad)
616 		goto error;
617 
618 	memset(hw_ctx->td_aad, 0x00, sizeof(struct rkce_symm_td));
619 
620 	hw_ctx->td_buf = rkce_cma_alloc(sizeof(struct rkce_symm_td_buf));
621 	if (!hw_ctx->td_buf)
622 		goto error;
623 
624 	memset(hw_ctx->td_buf, 0x00, sizeof(struct rkce_symm_td_buf));
625 
626 	return hw_ctx;
627 error:
628 	rkce_cma_free(hw_ctx->td);
629 	rkce_cma_free(hw_ctx->td_aad);
630 	rkce_cma_free(hw_ctx->td_buf);
631 	free(hw_ctx);
632 
633 	return NULL;
634 }
635 
636 static void rkce_cipher_ctx_free(struct rkce_cipher_contex *hw_ctx)
637 {
638 	if (!hw_ctx)
639 		return;
640 
641 	rkce_cma_free(hw_ctx->td);
642 	rkce_cma_free(hw_ctx->td_aad);
643 	rkce_cma_free(hw_ctx->td_buf);
644 	free(hw_ctx);
645 }
646 
647 static const struct rockchip_map rk_cipher_algo_map[] = {
648 	{RK_MODE_ECB,     RKCE_SYMM_MODE_ECB},
649 	{RK_MODE_CBC,     RKCE_SYMM_MODE_CBC},
650 	{RK_MODE_CTS,     RKCE_SYMM_MODE_CTS},
651 	{RK_MODE_CTR,     RKCE_SYMM_MODE_CTR},
652 	{RK_MODE_CFB,     RKCE_SYMM_MODE_CFB},
653 	{RK_MODE_OFB,     RKCE_SYMM_MODE_OFB},
654 	{RK_MODE_XTS,     RKCE_SYMM_MODE_XTS},
655 	{RK_MODE_CCM,     RKCE_SYMM_MODE_CCM},
656 	{RK_MODE_GCM,     RKCE_SYMM_MODE_GCM},
657 	{RK_MODE_CMAC,    RKCE_SYMM_MODE_CMAC},
658 	{RK_MODE_CBC_MAC, RKCE_SYMM_MODE_CBC_MAC},
659 };
660 
661 static int rk_get_cipher_cemode(u32 algo, u32 mode, u32 *ce_algo, u32 *ce_mode)
662 {
663 	u32 i;
664 
665 	switch (algo) {
666 	case CRYPTO_DES:
667 		*ce_algo = RKCE_SYMM_ALGO_TDES;
668 		break;
669 	case CRYPTO_AES:
670 		*ce_algo = RKCE_SYMM_ALGO_AES;
671 		break;
672 	case CRYPTO_SM4:
673 		*ce_algo = RKCE_SYMM_ALGO_SM4;
674 		break;
675 	default:
676 		return -EINVAL;
677 	}
678 
679 	for (i = 0; i < ARRAY_SIZE(rk_cipher_algo_map); i++) {
680 		if (mode == rk_cipher_algo_map[i].crypto) {
681 			*ce_mode = rk_cipher_algo_map[i].rkce;
682 			return 0;
683 		}
684 	}
685 
686 	return -EINVAL;
687 }
688 
689 u32 rk_get_td_keysize(u32 ce_algo, u32 ce_mode,  u32 key_len)
690 {
691 	u32 key_size = 0;
692 
693 	if (ce_algo == RKCE_SYMM_ALGO_AES) {
694 		if (key_len == AES_KEYSIZE_128)
695 			key_size = RKCE_KEY_AES_128;
696 		else if (key_len == AES_KEYSIZE_192)
697 			key_size = RKCE_KEY_AES_192;
698 		else if (key_len == AES_KEYSIZE_256)
699 			key_size = RKCE_KEY_AES_256;
700 		else
701 			;
702 	}
703 
704 	return key_size;
705 }
706 
707 int rk_set_symm_td_buf_key(struct rkce_symm_td_buf *td_buf,
708 			   u32 ce_algo, u32 ce_mode, cipher_context *ctx)
709 {
710 	memset(td_buf->key1, 0x00, sizeof(td_buf->key1));
711 	memset(td_buf->key2, 0x00, sizeof(td_buf->key2));
712 
713 	if (ce_mode == RKCE_SYMM_MODE_XTS) {
714 		memcpy(td_buf->key1, ctx->key, ctx->key_len);
715 		memcpy(td_buf->key2, ctx->twk_key, ctx->key_len);
716 	} else {
717 		memcpy(td_buf->key1, ctx->key, ctx->key_len);
718 	}
719 
720 	if (ctx->key_len == DES_KEYSIZE * 2 &&
721 	    (ce_algo == RKCE_SYMM_ALGO_DES || ce_algo == RKCE_SYMM_ALGO_TDES))
722 		memcpy(td_buf->key1 + DES_KEYSIZE * 2, td_buf->key1, DES_KEYSIZE);
723 
724 	return 0;
725 }
726 
727 int rk_set_symm_td_sg(struct rkce_symm_td *td,
728 		      const u8 *in, u32 in_len, u8 *out, u32 out_len)
729 {
730 	memset(td->sg, 0x00, sizeof(td->sg));
731 
732 #ifdef CONFIG_ARM64
733 	td->sg[0].src_addr_h = rkce_cma_virt2phys(in) >> 32;
734 #endif
735 	td->sg[0].src_addr_l = rkce_cma_virt2phys(in) & 0xffffffff;
736 	td->sg[0].src_size   = in_len;
737 
738 	if (out && out_len) {
739 #ifdef CONFIG_ARM64
740 		td->sg[0].dst_addr_h = rkce_cma_virt2phys(out) >> 32;
741 #endif
742 		td->sg[0].dst_addr_l = rkce_cma_virt2phys(out) & 0xffffffff;
743 		td->sg[0].dst_size   = out_len;
744 	}
745 
746 	td->next_task = 0;
747 
748 	return 0;
749 }
750 
751 static int rk_crypto_cipher(struct udevice *dev, cipher_context *ctx,
752 			    const u8 *in, u8 *out, u32 len, bool enc,
753 			    const u8 *aad, u32 aad_len, u8 *tag)
754 {
755 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
756 	struct rkce_cipher_contex *hw_ctx = NULL;
757 	u32 ce_algo = 0, ce_mode = 0;
758 	bool use_otpkey = false;
759 	int ret = 0;
760 
761 	rk_crypto_soft_reset(dev, RKCE_RESET_SYMM);
762 
763 	if (!ctx->key && ctx->key_len)
764 		use_otpkey = true;
765 
766 	ret = rk_get_cipher_cemode(ctx->algo, ctx->mode, &ce_algo, &ce_mode);
767 	if (ret)
768 		return ret;
769 
770 	hw_ctx = rkce_cipher_ctx_alloc();
771 	if (!hw_ctx)
772 		return -ENOMEM;
773 
774 	rkce_init_symm_td(hw_ctx->td, hw_ctx->td_buf);
775 
776 	hw_ctx->td->ctrl.td_type   = RKCE_TD_TYPE_SYMM;
777 	hw_ctx->td->ctrl.is_dec    = !enc;
778 	hw_ctx->td->ctrl.symm_algo = ce_algo;
779 	hw_ctx->td->ctrl.symm_mode = ce_mode;
780 	hw_ctx->td->ctrl.key_size  = rk_get_td_keysize(ce_algo, ce_mode, ctx->key_len);
781 	hw_ctx->td->ctrl.first_pkg = 1;
782 	hw_ctx->td->ctrl.last_pkg  = 1;
783 	hw_ctx->td->ctrl.int_en    = 1;
784 	hw_ctx->td->ctrl.key_sel   = use_otpkey ? RKCE_KEY_SEL_KT : RKCE_KEY_SEL_USER;
785 
786 	memcpy(hw_ctx->td_buf->iv, ctx->iv, ctx->iv_len);
787 	hw_ctx->td->ctrl.iv_len    = ctx->iv_len;
788 
789 	if (!use_otpkey) {
790 		ret = rk_set_symm_td_buf_key(hw_ctx->td_buf, ce_algo, ce_mode, ctx);
791 		if (ret)
792 			goto exit;
793 	}
794 
795 	ret = rk_set_symm_td_sg(hw_ctx->td, in, len, out, len);
796 	if (ret)
797 		goto exit;
798 
799 	if (ce_mode == RKCE_SYMM_MODE_CCM) {
800 		u8 *new_aad = NULL;
801 		u32 new_aad_len = 0, new_iv_len = 0;
802 
803 		rkce_init_symm_td(hw_ctx->td_aad, hw_ctx->td_buf);
804 
805 		ret = rkce_build_ccm_aad(aad, aad_len, len,
806 					 hw_ctx->td_buf->iv, ctx->iv_len,
807 					 &new_aad, &new_aad_len,
808 					 hw_ctx->td_buf->iv, &new_iv_len);
809 		if (ret)
810 			goto exit;
811 
812 		ret = rk_set_symm_td_sg(hw_ctx->td_aad, new_aad, new_aad_len, NULL, 0);
813 		if (ret)
814 			goto exit;
815 
816 		hw_ctx->td->ctrl.iv_len = new_iv_len;
817 
818 		hw_ctx->td_buf->gcm_len.aad_len_l = new_aad_len;
819 
820 		hw_ctx->td_aad->ctrl = hw_ctx->td->ctrl;
821 		hw_ctx->td_aad->ctrl.is_aad = 1;
822 
823 		crypto_flush_cacheline((ulong)hw_ctx->td_aad, sizeof(*hw_ctx->td_aad));
824 		crypto_flush_cacheline((ulong)hw_ctx->td_buf, sizeof(*hw_ctx->td_buf));
825 		crypto_flush_cacheline((ulong)new_aad, new_aad_len);
826 
827 		rk_crypto_enable_clk(dev);
828 
829 		ret = rkce_push_td_sync(priv->hardware, hw_ctx->td_aad, RKCE_SYMM_TIMEOUT_MS);
830 
831 		crypto_invalidate_cacheline((ulong)hw_ctx->td_buf, sizeof(*hw_ctx->td_buf));
832 
833 		rk_crypto_disable_clk(dev);
834 
835 		rkce_destroy_ccm_aad(new_aad);
836 
837 		if (ret) {
838 			printf("CCM calc aad data failed.\n");
839 			goto exit;
840 		}
841 	} else if (ce_mode == RKCE_SYMM_MODE_GCM) {
842 		rkce_init_symm_td(hw_ctx->td_aad, hw_ctx->td_buf);
843 
844 		ret = rk_set_symm_td_sg(hw_ctx->td_aad, aad, aad_len, NULL, 0);
845 		if (ret)
846 			goto exit;
847 
848 		hw_ctx->td_buf->gcm_len.aad_len_l = aad_len;
849 		hw_ctx->td_buf->gcm_len.pc_len_l = len;
850 
851 		hw_ctx->td_aad->ctrl = hw_ctx->td->ctrl;
852 		hw_ctx->td_aad->ctrl.is_aad = 1;
853 
854 		crypto_flush_cacheline((ulong)hw_ctx->td_aad, sizeof(*hw_ctx->td_aad));
855 		crypto_flush_cacheline((ulong)hw_ctx->td_buf, sizeof(*hw_ctx->td_buf));
856 		crypto_flush_cacheline((ulong)aad, aad_len);
857 
858 		rk_crypto_enable_clk(dev);
859 
860 		ret = rkce_push_td_sync(priv->hardware, hw_ctx->td_aad, RKCE_SYMM_TIMEOUT_MS);
861 
862 		crypto_invalidate_cacheline((ulong)hw_ctx->td_buf, sizeof(*hw_ctx->td_buf));
863 
864 		rk_crypto_disable_clk(dev);
865 		if (ret) {
866 			printf("GCM calc aad data failed.\n");
867 			goto exit;
868 		}
869 	}
870 
871 	crypto_flush_cacheline((ulong)hw_ctx->td, sizeof(*hw_ctx->td));
872 	crypto_flush_cacheline((ulong)hw_ctx->td_buf, sizeof(*hw_ctx->td_buf));
873 	crypto_flush_cacheline((ulong)in, len);
874 	if (in != out)
875 		crypto_flush_cacheline((ulong)out, len);
876 
877 	rk_crypto_enable_clk(dev);
878 
879 	ret = rkce_push_td_sync(priv->hardware, hw_ctx->td, RKCE_SYMM_TIMEOUT_MS);
880 
881 	crypto_invalidate_cacheline((ulong)out, len);
882 	crypto_invalidate_cacheline((ulong)hw_ctx->td_buf, sizeof(*hw_ctx->td_buf));
883 
884 	rk_crypto_disable_clk(dev);
885 
886 	if (tag)
887 		memcpy(tag, hw_ctx->td_buf->tag, sizeof(hw_ctx->td_buf->tag));
888 exit:
889 	rkce_cipher_ctx_free(hw_ctx);
890 
891 	return ret;
892 }
893 
894 static int rockchip_crypto_cipher(struct udevice *dev, cipher_context *ctx,
895 				  const u8 *in, u8 *out, u32 len, bool enc)
896 {
897 	return rk_crypto_cipher(dev, ctx, in, out, len, enc, NULL, 0, NULL);
898 }
899 
900 static int rockchip_crypto_mac(struct udevice *dev, cipher_context *ctx,
901 			       const u8 *in, u32 len, u8 *tag)
902 {
903 	return rk_crypto_cipher(dev, ctx, in, NULL, len, true, NULL, 0, tag);
904 }
905 
906 static int rockchip_crypto_ae(struct udevice *dev, cipher_context *ctx,
907 			      const u8 *in, u32 len, const u8 *aad, u32 aad_len,
908 			      u8 *out, u8 *tag)
909 {
910 	return rk_crypto_cipher(dev, ctx, in, out, len, true, aad, aad_len, tag);
911 }
912 
913 #if CONFIG_IS_ENABLED(DM_KEYLAD)
914 int rockchip_crypto_fw_cipher(struct udevice *dev, cipher_fw_context *ctx,
915 			      const u8 *in, u8 *out, u32 len, bool enc)
916 {
917 	cipher_context cipher_ctx;
918 
919 	memset(&cipher_ctx, 0x00, sizeof(cipher_ctx));
920 
921 	cipher_ctx.algo    = ctx->algo;
922 	cipher_ctx.mode    = ctx->mode;
923 	cipher_ctx.key_len = ctx->key_len;
924 	cipher_ctx.iv      = ctx->iv;
925 	cipher_ctx.iv_len  = ctx->iv_len;
926 
927 	return rk_crypto_cipher(dev, &cipher_ctx, in, out, len, enc, NULL, 0, NULL);
928 }
929 
930 static ulong rockchip_get_keytable_addr(struct udevice *dev)
931 {
932 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
933 	ulong addr;
934 
935 	rk_crypto_enable_clk(dev);
936 
937 	addr = rkce_get_keytable_addr(priv->hardware);
938 
939 	rk_crypto_disable_clk(dev);
940 
941 	return addr;
942 }
943 #endif
944 
945 #endif
946 
947 #if CONFIG_IS_ENABLED(ROCKCHIP_RSA)
948 static int rockchip_crypto_rsa_verify(struct udevice *dev, rsa_key *ctx,
949 				      u8 *sign, u8 *output)
950 {
951 	struct mpa_num *mpa_m = NULL, *mpa_e = NULL, *mpa_n = NULL;
952 	struct mpa_num *mpa_c = NULL, *mpa_result = NULL;
953 	u32 n_bits, n_words;
954 	int ret;
955 
956 	if (!ctx)
957 		return -EINVAL;
958 
959 	if (ctx->algo != CRYPTO_RSA512 &&
960 	    ctx->algo != CRYPTO_RSA1024 &&
961 	    ctx->algo != CRYPTO_RSA2048 &&
962 	    ctx->algo != CRYPTO_RSA3072 &&
963 	    ctx->algo != CRYPTO_RSA4096)
964 		return -EINVAL;
965 
966 	n_bits = crypto_algo_nbits(ctx->algo);
967 	n_words = BITS2WORD(n_bits);
968 
969 	ret = rk_mpa_alloc(&mpa_m, sign, n_words);
970 	if (ret)
971 		goto exit;
972 
973 	ret = rk_mpa_alloc(&mpa_e, ctx->e, n_words);
974 	if (ret)
975 		goto exit;
976 
977 	ret = rk_mpa_alloc(&mpa_n, ctx->n, n_words);
978 	if (ret)
979 		goto exit;
980 
981 	if (ctx->c) {
982 		ret = rk_mpa_alloc(&mpa_c, ctx->c, n_words);
983 		if (ret)
984 			goto exit;
985 	}
986 
987 	ret = rk_mpa_alloc(&mpa_result, NULL, n_words);
988 	if (ret)
989 		goto exit;
990 
991 	rk_crypto_enable_clk(dev);
992 	ret = rk_exptmod_np(mpa_m, mpa_e, mpa_n, mpa_c, mpa_result);
993 	if (!ret)
994 		memcpy(output, mpa_result->d, BITS2BYTE(n_bits));
995 	rk_crypto_disable_clk(dev);
996 
997 exit:
998 	rk_mpa_free(&mpa_m);
999 	rk_mpa_free(&mpa_e);
1000 	rk_mpa_free(&mpa_n);
1001 	rk_mpa_free(&mpa_c);
1002 	rk_mpa_free(&mpa_result);
1003 
1004 	return ret;
1005 }
1006 #endif
1007 
1008 #if CONFIG_IS_ENABLED(ROCKCHIP_EC)
1009 static int rockchip_crypto_ec_verify(struct udevice *dev, ec_key *ctx,
1010 				     u8 *hash, u32 hash_len, u8 *sign)
1011 {
1012 	struct mpa_num *bn_sign = NULL;
1013 	struct rk_ecp_point point_P, point_sign;
1014 	u32 n_bits, n_words;
1015 	int ret;
1016 
1017 	if (!ctx)
1018 		return -EINVAL;
1019 
1020 	if (ctx->algo != CRYPTO_SM2 &&
1021 	    ctx->algo != CRYPTO_ECC_192R1 &&
1022 	    ctx->algo != CRYPTO_ECC_224R1 &&
1023 	    ctx->algo != CRYPTO_ECC_256R1)
1024 		return -EINVAL;
1025 
1026 	n_bits = crypto_algo_nbits(ctx->algo);
1027 	n_words = BITS2WORD(n_bits);
1028 
1029 	ret = rk_mpa_alloc(&bn_sign, sign, n_words);
1030 	if (ret)
1031 		goto exit;
1032 
1033 	ret = rk_mpa_alloc(&point_P.x, ctx->x, n_words);
1034 	ret |= rk_mpa_alloc(&point_P.y, ctx->y, n_words);
1035 	if (ret)
1036 		goto exit;
1037 
1038 	ret = rk_mpa_alloc(&point_sign.x, sign, n_words);
1039 	ret |= rk_mpa_alloc(&point_sign.y, sign + WORD2BYTE(n_words), n_words);
1040 	if (ret)
1041 		goto exit;
1042 
1043 	rk_crypto_enable_clk(dev);
1044 	ret = rockchip_ecc_verify(ctx->algo, hash, hash_len, &point_P, &point_sign);
1045 	rk_crypto_disable_clk(dev);
1046 exit:
1047 	rk_mpa_free(&bn_sign);
1048 	rk_mpa_free(&point_P.x);
1049 	rk_mpa_free(&point_P.y);
1050 	rk_mpa_free(&point_sign.x);
1051 	rk_mpa_free(&point_sign.y);
1052 
1053 	return ret;
1054 }
1055 #endif
1056 
1057 static bool rockchip_crypto_is_secure(struct udevice *dev)
1058 {
1059 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
1060 
1061 	return priv->secure;
1062 }
1063 
1064 static const struct dm_crypto_ops rockchip_crypto_ops = {
1065 	.capability   = rockchip_crypto_capability,
1066 	.sha_init     = rockchip_crypto_sha_init,
1067 	.sha_update   = rockchip_crypto_sha_update,
1068 	.sha_final    = rockchip_crypto_sha_final,
1069 #if CONFIG_IS_ENABLED(ROCKCHIP_HMAC)
1070 	.hmac_init    = rockchip_crypto_hmac_init,
1071 	.hmac_update  = rockchip_crypto_hmac_update,
1072 	.hmac_final   = rockchip_crypto_hmac_final,
1073 #endif
1074 #if CONFIG_IS_ENABLED(ROCKCHIP_RSA)
1075 	.rsa_verify   = rockchip_crypto_rsa_verify,
1076 #endif
1077 #if CONFIG_IS_ENABLED(ROCKCHIP_EC)
1078 	.ec_verify    = rockchip_crypto_ec_verify,
1079 #endif
1080 #if CONFIG_IS_ENABLED(ROCKCHIP_CIPHER)
1081 	.cipher_crypt    = rockchip_crypto_cipher,
1082 	.cipher_mac      = rockchip_crypto_mac,
1083 	.cipher_ae       = rockchip_crypto_ae,
1084 
1085 #if CONFIG_IS_ENABLED(DM_KEYLAD)
1086 	.cipher_fw_crypt = rockchip_crypto_fw_cipher,
1087 	.keytable_addr   = rockchip_get_keytable_addr,
1088 #endif
1089 
1090 #endif
1091 
1092 	.is_secure       = rockchip_crypto_is_secure,
1093 };
1094 
1095 /*
1096  * Only use "clocks" to parse crypto clock id and use rockchip_get_clk().
1097  * Because we always add crypto node in U-Boot dts, when kernel dtb enabled :
1098  *
1099  *   1. There is cru phandle mismatch between U-Boot and kernel dtb;
1100  *   2. CONFIG_OF_SPL_REMOVE_PROPS removes clock property;
1101  */
1102 static int rockchip_crypto_ofdata_to_platdata(struct udevice *dev)
1103 {
1104 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
1105 	int len, ret = -EINVAL;
1106 
1107 	memset(priv, 0x00, sizeof(*priv));
1108 
1109 	priv->reg = (fdt_addr_t)dev_read_addr_ptr(dev);
1110 	if (priv->reg == FDT_ADDR_T_NONE)
1111 		return -EINVAL;
1112 
1113 	crypto_base = priv->reg;
1114 
1115 	priv->secure = dev_read_bool(dev, "secure");
1116 	priv->enabled = true;
1117 
1118 #if !defined(CONFIG_SPL_BUILD)
1119 	/* uboot disabled secure crypto */
1120 	priv->enabled = !priv->secure;
1121 #endif
1122 	if (!priv->enabled)
1123 		return 0;
1124 
1125 	/* if there is no clocks in dts, just skip it */
1126 	if (!dev_read_prop(dev, "clocks", &len)) {
1127 		printf("Can't find \"clocks\" property\n");
1128 		return 0;
1129 	}
1130 
1131 	priv->clocks = malloc(len);
1132 	if (!priv->clocks)
1133 		return -ENOMEM;
1134 
1135 	priv->nclocks = len / (2 * sizeof(u32));
1136 	if (dev_read_u32_array(dev, "clocks", (u32 *)priv->clocks,
1137 			       priv->nclocks)) {
1138 		printf("Can't read \"clocks\" property\n");
1139 		ret = -EINVAL;
1140 		goto exit;
1141 	}
1142 
1143 	if (dev_read_prop(dev, "clock-frequency", &len)) {
1144 		priv->frequencies = malloc(len);
1145 		if (!priv->frequencies) {
1146 			ret = -ENOMEM;
1147 			goto exit;
1148 		}
1149 		priv->freq_nclocks = len / sizeof(u32);
1150 		if (dev_read_u32_array(dev, "clock-frequency", priv->frequencies,
1151 				       priv->freq_nclocks)) {
1152 			printf("Can't read \"clock-frequency\" property\n");
1153 			ret = -EINVAL;
1154 			goto exit;
1155 		}
1156 	}
1157 
1158 	return 0;
1159 exit:
1160 	if (priv->clocks)
1161 		free(priv->clocks);
1162 
1163 	if (priv->frequencies)
1164 		free(priv->frequencies);
1165 
1166 	return ret;
1167 }
1168 
1169 static int rk_crypto_do_enable_clk(struct udevice *dev, int enable)
1170 {
1171 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
1172 	struct clk clk;
1173 	int i, ret;
1174 
1175 	for (i = 0; i < priv->nclocks; i++) {
1176 		ret = clk_get_by_index(dev, i, &clk);
1177 		if (ret < 0) {
1178 			printf("Failed to get clk index %d, ret=%d\n", i, ret);
1179 			return ret;
1180 		}
1181 
1182 		if (enable)
1183 			ret = clk_enable(&clk);
1184 		else
1185 			ret = clk_disable(&clk);
1186 		if (ret < 0 && ret != -ENOSYS) {
1187 			debug("Failed to enable(%d) clk(%ld): ret=%d\n",
1188 			       enable, clk.id, ret);
1189 			return ret;
1190 		}
1191 	}
1192 
1193 	return 0;
1194 }
1195 
1196 static int rk_crypto_enable_clk(struct udevice *dev)
1197 {
1198 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
1199 
1200 	crypto_base = priv->reg;
1201 
1202 	return rk_crypto_do_enable_clk(dev, 1);
1203 }
1204 
1205 static int rk_crypto_disable_clk(struct udevice *dev)
1206 {
1207 	crypto_base = 0;
1208 
1209 	return rk_crypto_do_enable_clk(dev, 0);
1210 }
1211 
1212 static int rk_crypto_set_clk(struct udevice *dev)
1213 {
1214 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
1215 	struct clk clk;
1216 	int i, ret;
1217 
1218 	/* use standard "assigned-clock-rates" props */
1219 	if (dev_read_size(dev, "assigned-clock-rates") > 0)
1220 		return clk_set_defaults(dev);
1221 
1222 	/* use "clock-frequency" props */
1223 	if (priv->freq_nclocks == 0)
1224 		return 0;
1225 
1226 	for (i = 0; i < priv->freq_nclocks; i++) {
1227 		ret = clk_get_by_index(dev, i, &clk);
1228 		if (ret < 0) {
1229 			printf("Failed to get clk index %d, ret=%d\n", i, ret);
1230 			return ret;
1231 		}
1232 		ret = clk_set_rate(&clk, priv->frequencies[i]);
1233 		if (ret < 0) {
1234 			printf("%s: Failed to set clk(%ld): ret=%d\n",
1235 			       __func__, clk.id, ret);
1236 			return ret;
1237 		}
1238 	}
1239 
1240 	return 0;
1241 }
1242 
1243 static int rockchip_crypto_probe(struct udevice *dev)
1244 {
1245 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
1246 	int ret = 0;
1247 
1248 	if (!priv->enabled)
1249 		return 0;
1250 
1251 	ret = rk_crypto_set_clk(dev);
1252 	if (ret)
1253 		return ret;
1254 
1255 	rk_crypto_enable_clk(dev);
1256 
1257 	priv->hardware = rkce_hardware_alloc((void *)priv->reg);
1258 
1259 	if (!priv->hardware) {
1260 		ret = -ENOMEM;
1261 		goto exit;
1262 	}
1263 
1264 	priv->capability = rockchip_crypto_capability(dev);
1265 exit:
1266 	rk_crypto_disable_clk(dev);
1267 
1268 	return ret;
1269 }
1270 
1271 static const struct udevice_id rockchip_crypto_ids[] = {
1272 	{
1273 		.compatible = "rockchip,crypto-ce",
1274 	},
1275 	{ }
1276 };
1277 
1278 U_BOOT_DRIVER(rockchip_crypto_ce) = {
1279 	.name		= "rockchip_crypto_ce",
1280 	.id		= UCLASS_CRYPTO,
1281 	.of_match	= rockchip_crypto_ids,
1282 	.ops		= &rockchip_crypto_ops,
1283 	.probe		= rockchip_crypto_probe,
1284 	.ofdata_to_platdata = rockchip_crypto_ofdata_to_platdata,
1285 	.priv_auto_alloc_size = sizeof(struct rockchip_crypto_priv),
1286 };
1287