xref: /rk3399_rockchip-uboot/drivers/crypto/rockchip/crypto_ce.c (revision e15c98ee20b8dd02519a58e16b25e68be6e7b08a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2025 Rockchip Electronics Co., Ltd
4  */
5 
6 #include <clk.h>
7 #include <clk-uclass.h>
8 #include <common.h>
9 #include <crypto.h>
10 #include <dm.h>
11 #include <asm/io.h>
12 #include <asm/arch/hardware.h>
13 #include <asm/arch/clock.h>
14 #include <rockchip/crypto_ecc.h>
15 #include <rockchip/crypto_v2_pka.h>
16 #include <rockchip/rkce_core.h>
17 
18 fdt_addr_t crypto_base;
19 
20 #define ROUNDUP(size, alignment)	round_up(size, alignment)
21 
22 #define RKCE_HASH_TIMEOUT_MS	1000
23 #define RKCE_SYMM_TIMEOUT_MS	1000
24 
25 struct rkce_sha_contex {
26 	u32				length;
27 	struct rkce_hash_td_ctrl	ctrl;
28 	struct rkce_hash_td		*td;
29 	struct rkce_hash_td_buf		*td_buf;
30 };
31 
32 struct rkce_cipher_contex {
33 	struct rkce_symm_td		*td;
34 	struct rkce_symm_td		*td_aad;
35 	struct rkce_symm_td_buf		*td_buf;
36 };
37 
38 struct rockchip_crypto_priv {
39 	fdt_addr_t			reg;
40 	u32				frequency;
41 	char				*clocks;
42 	u32				*frequencies;
43 	u32				nclocks;
44 	u32				freq_nclocks;
45 	u32				capability;
46 
47 	void				*hardware;
48 	struct rkce_sha_contex		*hash_ctx;
49 };
50 
51 struct rockchip_map {
52 	u32				crypto;
53 	u32				rkce;
54 };
55 
56 static const struct rockchip_map rk_hash_map[] = {
57 	{CRYPTO_SM3,         RKCE_HASH_ALGO_SM3},
58 	{CRYPTO_MD5,         RKCE_HASH_ALGO_MD5},
59 	{CRYPTO_SHA1,        RKCE_HASH_ALGO_SHA1},
60 	{CRYPTO_SHA256,      RKCE_HASH_ALGO_SHA256},
61 	{CRYPTO_SHA512,      RKCE_HASH_ALGO_SHA512},
62 };
63 
64 #if CONFIG_IS_ENABLED(ROCKCHIP_HMAC)
65 static const struct rockchip_map rk_hmac_map[] = {
66 	{CRYPTO_HMAC_MD5,    RKCE_HASH_ALGO_MD5},
67 	{CRYPTO_HMAC_SHA1,   RKCE_HASH_ALGO_SHA1},
68 	{CRYPTO_HMAC_SHA256, RKCE_HASH_ALGO_SHA256},
69 	{CRYPTO_HMAC_SHA512, RKCE_HASH_ALGO_SHA512},
70 	{CRYPTO_HMAC_SM3,    RKCE_HASH_ALGO_SM3},
71 };
72 #endif
73 
74 #if CONFIG_IS_ENABLED(ROCKCHIP_CIPHER)
75 static const struct rockchip_map rk_cipher_map[] = {
76 	{CRYPTO_AES,         RKCE_SYMM_ALGO_AES},
77 	{CRYPTO_DES,         RKCE_SYMM_ALGO_TDES},
78 	{CRYPTO_SM4,         RKCE_SYMM_ALGO_SM4},
79 };
80 #endif
81 
82 #if CONFIG_IS_ENABLED(ROCKCHIP_RSA)
83 static const struct rockchip_map rk_rsa_map[] = {
84 	{CRYPTO_RSA512,       RKCE_ASYM_ALGO_RSA},
85 	{CRYPTO_RSA1024,      RKCE_ASYM_ALGO_RSA},
86 	{CRYPTO_RSA2048,      RKCE_ASYM_ALGO_RSA},
87 	{CRYPTO_RSA3072,      RKCE_ASYM_ALGO_RSA},
88 	{CRYPTO_RSA4096,      RKCE_ASYM_ALGO_RSA},
89 };
90 #endif
91 
92 #if CONFIG_IS_ENABLED(ROCKCHIP_EC)
93 static const struct rockchip_map rk_ec_map[] = {
94 	{CRYPTO_SM2,          RKCE_ASYM_ALGO_SM2},
95 	{CRYPTO_ECC_192R1,    RKCE_ASYM_ALGO_ECC_P192},
96 	{CRYPTO_ECC_224R1,    RKCE_ASYM_ALGO_ECC_P224},
97 	{CRYPTO_ECC_256R1,    RKCE_ASYM_ALGO_ECC_P256},
98 };
99 #endif
100 
101 static int rk_crypto_enable_clk(struct udevice *dev);
102 static int rk_crypto_disable_clk(struct udevice *dev);
103 
104 static void crypto_flush_cacheline(ulong addr, ulong size)
105 {
106 	ulong alignment = CONFIG_SYS_CACHELINE_SIZE;
107 	ulong aligned_input, aligned_len;
108 
109 	if (!addr || !size)
110 		return;
111 
112 	/* Must flush dcache before crypto DMA fetch data region */
113 	aligned_input = round_down(addr, alignment);
114 	aligned_len = round_up(size + (addr - aligned_input), alignment);
115 	flush_cache(aligned_input, aligned_len);
116 }
117 
118 static u32 rk_get_cemode(const struct rockchip_map *map, u32 num, u32 algo)
119 {
120 	u32 i, j;
121 	struct {
122 		const struct rockchip_map	*map;
123 		u32				num;
124 	} map_tbl[] = {
125 		{rk_hash_map, ARRAY_SIZE(rk_hash_map)},
126 #if CONFIG_IS_ENABLED(ROCKCHIP_HMAC)
127 		{rk_hmac_map, ARRAY_SIZE(rk_hmac_map)},
128 #endif
129 #if CONFIG_IS_ENABLED(ROCKCHIP_CIPHER)
130 		{rk_cipher_map, ARRAY_SIZE(rk_cipher_map)},
131 #endif
132 #if CONFIG_IS_ENABLED(ROCKCHIP_RSA)
133 		{rk_rsa_map, ARRAY_SIZE(rk_rsa_map)},
134 #endif
135 #if CONFIG_IS_ENABLED(ROCKCHIP_EC)
136 		{rk_ec_map, ARRAY_SIZE(rk_ec_map)},
137 #endif
138 	};
139 
140 	for (i = 0; i < ARRAY_SIZE(map_tbl); i++) {
141 		const struct rockchip_map *map = map_tbl[i].map;
142 		u32 num = map_tbl[i].num;
143 
144 		for (j = 0; j < num; j++) {
145 			if (map[j].crypto == algo)
146 				return map[j].rkce;
147 		}
148 	}
149 
150 	return 0;
151 }
152 
153 static u32 rk_load_map(struct rockchip_crypto_priv *priv, u32 algo_type,
154 		       const struct rockchip_map *map, u32 num)
155 {
156 	u32 i;
157 	u32 capability = 0;
158 
159 	for (i = 0; i < num; i++) {
160 		if (rkce_hw_algo_valid(priv->hardware, algo_type, map[i].rkce, 0))
161 			capability |= map[i].crypto;
162 	}
163 
164 	return capability;
165 }
166 
167 static u32 rockchip_crypto_capability(struct udevice *dev)
168 {
169 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
170 	u32 cap = 0;
171 
172 	if (priv->capability)
173 		return priv->capability;
174 
175 	cap |= rk_load_map(priv, RKCE_ALGO_TYPE_HASH,
176 			   rk_hash_map, ARRAY_SIZE(rk_hash_map));
177 
178 #if CONFIG_IS_ENABLED(ROCKCHIP_HMAC)
179 	cap |= rk_load_map(priv, RKCE_ALGO_TYPE_HMAC,
180 			   rk_hmac_map, ARRAY_SIZE(rk_hmac_map));
181 #endif
182 
183 #if CONFIG_IS_ENABLED(ROCKCHIP_CIPHER)
184 	cap |= rk_load_map(priv, RKCE_ALGO_TYPE_CIPHER,
185 			   rk_cipher_map, ARRAY_SIZE(rk_cipher_map));
186 #endif
187 
188 #if CONFIG_IS_ENABLED(ROCKCHIP_RSA)
189 	cap |= rk_load_map(priv, RKCE_ALGO_TYPE_ASYM, rk_rsa_map,
190 			   ARRAY_SIZE(rk_rsa_map));
191 #endif
192 
193 #if CONFIG_IS_ENABLED(ROCKCHIP_EC)
194 	cap |= rk_load_map(priv, RKCE_ALGO_TYPE_ASYM, rk_ec_map,
195 			   ARRAY_SIZE(rk_ec_map));
196 #endif
197 
198 	return cap;
199 }
200 
201 static void *rkce_sha_ctx_alloc(void)
202 {
203 	struct rkce_sha_contex *hw_ctx;
204 
205 	hw_ctx = malloc(sizeof(*hw_ctx));
206 	if (!hw_ctx)
207 		return NULL;
208 
209 	memset(hw_ctx, 0x00, sizeof(*hw_ctx));
210 
211 	hw_ctx->td = rkce_cma_alloc(sizeof(struct rkce_hash_td));
212 	if (!hw_ctx->td)
213 		goto error;
214 
215 	memset(hw_ctx->td, 0x00, sizeof(struct rkce_hash_td));
216 
217 	hw_ctx->td_buf = rkce_cma_alloc(sizeof(struct rkce_hash_td_buf));
218 	if (!hw_ctx->td_buf)
219 		goto error;
220 
221 	memset(hw_ctx->td_buf, 0x00, sizeof(struct rkce_hash_td_buf));
222 
223 	return hw_ctx;
224 error:
225 	rkce_cma_free(hw_ctx->td);
226 	rkce_cma_free(hw_ctx->td_buf);
227 	free(hw_ctx);
228 
229 	return NULL;
230 }
231 
232 static void rkce_sha_ctx_free(struct rkce_sha_contex *hw_ctx)
233 {
234 	if (!hw_ctx)
235 		return;
236 
237 	rkce_cma_free(hw_ctx->td);
238 	rkce_cma_free(hw_ctx->td_buf);
239 	free(hw_ctx);
240 }
241 
242 static int rk_sha_init(struct udevice *dev, sha_context *ctx,
243 		       u8 *key, u32 key_len, bool is_hmac)
244 {
245 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
246 	struct rkce_sha_contex *hash_ctx = NULL;
247 	u32 ce_algo = 0;
248 	int ret = 0;
249 
250 	if ((ctx->algo & priv->capability) == 0)
251 		return -ENOSYS;
252 
253 	if (priv->hash_ctx)
254 		return -EFAULT;
255 
256 	rkce_soft_reset(priv->hardware, RKCE_RESET_HASH);
257 
258 	hash_ctx = rkce_sha_ctx_alloc();
259 	if (!hash_ctx)
260 		return -ENOMEM;
261 
262 	ret = rkce_init_hash_td(hash_ctx->td, hash_ctx->td_buf);
263 	if (ret)
264 		goto exit;
265 
266 	ce_algo = rk_get_cemode(rk_hash_map, ARRAY_SIZE(rk_hash_map), ctx->algo);
267 
268 	hash_ctx->ctrl.td_type        = RKCE_TD_TYPE_HASH;
269 	hash_ctx->ctrl.hw_pad_en      = 1;
270 	hash_ctx->ctrl.first_pkg      = 1;
271 	hash_ctx->ctrl.last_pkg       = 0;
272 	hash_ctx->ctrl.hash_algo      = ce_algo;
273 	hash_ctx->ctrl.hmac_en        = is_hmac;
274 	hash_ctx->ctrl.is_preemptible = 0;
275 	hash_ctx->ctrl.int_en         = 1;
276 
277 	if (is_hmac) {
278 		if (key_len > 64) {
279 			ret = -EINVAL;
280 			goto exit;
281 		}
282 
283 		memcpy(hash_ctx->td_buf->key, key, key_len);
284 	}
285 
286 	priv->hash_ctx = hash_ctx;
287 exit:
288 	if (ret) {
289 		rkce_sha_ctx_free(hash_ctx);
290 		priv->hash_ctx = NULL;
291 	}
292 
293 	return ret;
294 }
295 
296 static int rk_sha_update(struct udevice *dev, u32 *input, u32 len, bool is_last)
297 {
298 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
299 	struct rkce_sha_contex *hash_ctx;
300 	struct rkce_hash_td *td;
301 	int ret = 0;
302 
303 	if (!priv->hash_ctx)
304 		return -EINVAL;
305 
306 	if (!is_last && (!input || len == 0))
307 		return -EINVAL;
308 
309 	hash_ctx = priv->hash_ctx;
310 	td = hash_ctx->td;
311 
312 	td->ctrl = hash_ctx->ctrl;
313 	memset(&td->sg, 0x00, sizeof(*td->sg));
314 
315 	if (hash_ctx->ctrl.first_pkg == 1)
316 		hash_ctx->ctrl.first_pkg = 0;
317 
318 	if (is_last) {
319 		td->ctrl.last_pkg = 1;
320 	} else {
321 #ifdef CONFIG_ARM64
322 		td->sg[0].src_addr_h = rkce_cma_virt2phys(input) >> 32;
323 #endif
324 		td->sg[0].src_addr_l = rkce_cma_virt2phys(input) & 0xffffffff;
325 		td->sg[0].src_size   = len;
326 		hash_ctx->length += len;
327 		crypto_flush_cacheline((ulong)input, len);
328 	}
329 
330 	rk_crypto_enable_clk(dev);
331 
332 	crypto_flush_cacheline((ulong)hash_ctx->td, sizeof(*hash_ctx->td));
333 	crypto_flush_cacheline((ulong)hash_ctx->td_buf, sizeof(*hash_ctx->td_buf));
334 
335 	ret = rkce_push_td_sync(priv->hardware, td, RKCE_HASH_TIMEOUT_MS);
336 	if (ret) {
337 		rkce_sha_ctx_free(hash_ctx);
338 		priv->hash_ctx = NULL;
339 	}
340 
341 	rk_crypto_disable_clk(dev);
342 
343 	return ret;
344 }
345 
346 static int rockchip_crypto_sha_init(struct udevice *dev, sha_context *ctx)
347 {
348 	return rk_sha_init(dev, ctx, NULL, 0, false);
349 }
350 
351 static int rockchip_crypto_sha_update(struct udevice *dev, u32 *input, u32 len)
352 {
353 	return rk_sha_update(dev, input, len, false);
354 }
355 
356 static int rockchip_crypto_sha_final(struct udevice *dev, sha_context *ctx, u8 *output)
357 {
358 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
359 	struct rkce_sha_contex *hash_ctx = priv->hash_ctx;
360 	u32 nbits;
361 	int ret;
362 
363 	if (!priv->hash_ctx)
364 		return -EINVAL;
365 
366 	nbits = crypto_algo_nbits(ctx->algo);
367 
368 	if (hash_ctx->length != ctx->length) {
369 		printf("total length(0x%08x) != init length(0x%08x)!\n",
370 		       hash_ctx->length, ctx->length);
371 		ret = -EIO;
372 		goto exit;
373 	}
374 
375 	ret = rk_sha_update(dev, NULL, 0, true);
376 	if (ret == 0)
377 		memcpy(output, hash_ctx->td_buf->hash, BITS2BYTE(nbits));
378 
379 exit:
380 	rkce_sha_ctx_free(hash_ctx);
381 	priv->hash_ctx = NULL;
382 
383 	return ret;
384 }
385 
386 #if CONFIG_IS_ENABLED(ROCKCHIP_HMAC)
387 static int rockchip_crypto_hmac_init(struct udevice *dev,  sha_context *ctx, u8 *key, u32 key_len)
388 {
389 	return rk_sha_init(dev, ctx, key, key_len, true);
390 }
391 
392 static int rockchip_crypto_hmac_update(struct udevice *dev, u32 *input, u32 len)
393 {
394 	return rockchip_crypto_sha_update(dev, input, len);
395 }
396 
397 static int rockchip_crypto_hmac_final(struct udevice *dev, sha_context *ctx, u8 *output)
398 {
399 	return rockchip_crypto_sha_final(dev, ctx, output);
400 }
401 #endif
402 
403 #if CONFIG_IS_ENABLED(ROCKCHIP_CIPHER)
404 
405 static int hw_crypto_ccm128_setiv(u8 *iv_buf, u8 *nonce, u32 nlen, u32 mlen)
406 {
407 	u32 L = iv_buf[0] & 7;	/* the L parameter */
408 
409 	if (nlen < (14 - L))
410 		return -1;	/* nonce is too short */
411 
412 	if (sizeof(mlen) == 8 && L >= 3) {
413 		iv_buf[8]  = mlen >> (56 % (sizeof(mlen) * 8));
414 		iv_buf[9]  = mlen >> (48 % (sizeof(mlen) * 8));
415 		iv_buf[10] = mlen >> (40 % (sizeof(mlen) * 8));
416 		iv_buf[11] = mlen >> (32 % (sizeof(mlen) * 8));
417 	}
418 
419 	iv_buf[12] = mlen >> 24;
420 	iv_buf[13] = mlen >> 16;
421 	iv_buf[14] = mlen >> 8;
422 	iv_buf[15] = mlen;
423 
424 	iv_buf[0] &= ~0x40;	/* clear aad flag */
425 	memcpy(&iv_buf[1], nonce, 14 - L);
426 
427 	return 0;
428 }
429 
430 static void hw_get_ccm_aad_padding(u32 aad_len, u8 *padding,  u32 *padding_size)
431 {
432 	u32 i = 0;
433 
434 	if (aad_len == 0) {
435 		*padding_size = 0;
436 		return;
437 	}
438 
439 	if (aad_len < (0x10000 - 0x100)) {
440 		i = 2;
441 	} else if (sizeof(aad_len) == 8 &&
442 		   aad_len >= (size_t)1 << (32 % (sizeof(aad_len) * 8))) {
443 		i = 10;
444 	} else {
445 		i = 6;
446 	}
447 
448 	if (i == 2) {
449 		padding[0] = aad_len >> 8;
450 		padding[1] = aad_len;
451 	} else if (i == 10) {
452 		padding[0] = 0xFF;
453 		padding[1] = 0xFF;
454 		padding[2] = aad_len >> (56 % (sizeof(aad_len) * 8));
455 		padding[3] = aad_len >> (48 % (sizeof(aad_len) * 8));
456 		padding[4] = aad_len >> (40 % (sizeof(aad_len) * 8));
457 		padding[5] = aad_len >> (32 % (sizeof(aad_len) * 8));
458 		padding[6] = aad_len >> 24;
459 		padding[7] = aad_len >> 16;
460 		padding[8] = aad_len >> 8;
461 		padding[9] = aad_len;
462 	} else {
463 		padding[0] = 0xFF;
464 		padding[1] = 0xFE;
465 		padding[2] = aad_len >> 24;
466 		padding[3] = aad_len >> 16;
467 		padding[4] = aad_len >> 8;
468 	}
469 
470 	*padding_size = i;
471 }
472 
473 static int hw_compose_ccm_aad_iv(u8 *aad_iv, u32 data_len,
474 				 u32 aad_len, u32 tag_size)
475 {
476 	u32 L;		/* the L parameter */
477 	u8 nonce[AES_BLOCK_SIZE];
478 
479 	L = aad_iv[0] & 7;
480 	aad_iv[0] |= ((u8)(((tag_size - 2) / 2) & 7) << 3);
481 
482 	if (sizeof(data_len) == 8 && L >= 3) {
483 		aad_iv[8]  = data_len >> (56 % (sizeof(data_len) * 8));
484 		aad_iv[9]  = data_len >> (48 % (sizeof(data_len) * 8));
485 		aad_iv[10] = data_len >> (40 % (sizeof(data_len) * 8));
486 		aad_iv[11] = data_len >> (32 % (sizeof(data_len) * 8));
487 	}
488 
489 	/* save nonce */
490 	memcpy(nonce, &aad_iv[1], 14 - L);
491 
492 	aad_iv[12] = data_len >> 24;
493 	aad_iv[13] = data_len >> 16;
494 	aad_iv[14] = data_len >> 8;
495 	aad_iv[15] = data_len;
496 
497 	/* restore nonce */
498 	memcpy(&aad_iv[1], nonce, 14 - L);
499 
500 	aad_iv[0] &= ~0x40;	/* clear Adata flag */
501 
502 	if (aad_len)
503 		aad_iv[0] |= 0x40;	//set aad flag
504 
505 	return 0;
506 }
507 
508 static void rkce_destroy_ccm_aad(u8 *new_aad)
509 {
510 	rkce_cma_free(new_aad);
511 }
512 
513 static int rkce_build_ccm_aad(const u8 *aad, u32 aad_len, u32 data_len,
514 			      u8 *iv, u32 iv_len,
515 			      u8 **new_aad, u32 *new_aad_len,
516 			      u8 *new_iv, u32 *new_iv_len)
517 {
518 	int ret = -RKCE_INVAL;
519 	u32 L;
520 	u8 nonce[AES_BLOCK_SIZE];
521 	u8 pad[AES_BLOCK_SIZE];
522 	u32 pad_size = 0;
523 	u32 tag_len = AES_BLOCK_SIZE;
524 	u8 *aad_tmp = NULL;
525 	u32 aad_tmp_len = 0;
526 
527 	memset(nonce, 0x00, sizeof(nonce));
528 
529 	L = 15 - iv_len;
530 	nonce[0] = (L - 1) & 7;
531 	ret = hw_crypto_ccm128_setiv(nonce, (u8 *)iv, iv_len, 0);
532 	if (ret)
533 		return ret;
534 
535 	memcpy(new_iv, nonce, sizeof(nonce));
536 	*new_iv_len = sizeof(nonce);
537 
538 	memset(pad, 0x00, sizeof(pad));
539 	hw_get_ccm_aad_padding(aad_len, pad, &pad_size);
540 
541 	aad_tmp_len = aad_len + AES_BLOCK_SIZE + pad_size;
542 	aad_tmp_len = ROUNDUP(aad_tmp_len, AES_BLOCK_SIZE);
543 
544 	aad_tmp = rkce_cma_alloc(aad_tmp_len);
545 	if (!aad_tmp) {
546 		ret = -RKCE_NOMEM;
547 		goto exit;
548 	}
549 
550 	/* clear last block */
551 	memset(aad_tmp + aad_tmp_len - AES_BLOCK_SIZE, 0x00, AES_BLOCK_SIZE);
552 	memcpy(aad_tmp, nonce, sizeof(nonce));
553 	hw_compose_ccm_aad_iv(aad_tmp, data_len, aad_len, tag_len);
554 	memcpy(aad_tmp + AES_BLOCK_SIZE, pad, pad_size);
555 
556 	memcpy(aad_tmp + AES_BLOCK_SIZE + pad_size, aad, aad_len);
557 
558 	*new_aad     = aad_tmp;
559 	*new_aad_len = aad_tmp_len;
560 
561 exit:
562 	return ret;
563 }
564 
565 static void *rkce_cipher_ctx_alloc(void)
566 {
567 	struct rkce_cipher_contex *hw_ctx;
568 
569 	hw_ctx = malloc(sizeof(*hw_ctx));
570 	if (!hw_ctx)
571 		return NULL;
572 
573 	hw_ctx->td = rkce_cma_alloc(sizeof(struct rkce_symm_td));
574 	if (!hw_ctx->td)
575 		goto error;
576 
577 	memset(hw_ctx->td, 0x00, sizeof(struct rkce_symm_td));
578 
579 	hw_ctx->td_aad = rkce_cma_alloc(sizeof(struct rkce_symm_td));
580 	if (!hw_ctx->td_aad)
581 		goto error;
582 
583 	memset(hw_ctx->td_aad, 0x00, sizeof(struct rkce_symm_td));
584 
585 	hw_ctx->td_buf = rkce_cma_alloc(sizeof(struct rkce_symm_td_buf));
586 	if (!hw_ctx->td_buf)
587 		goto error;
588 
589 	memset(hw_ctx->td_buf, 0x00, sizeof(struct rkce_symm_td_buf));
590 
591 	return hw_ctx;
592 error:
593 	rkce_cma_free(hw_ctx->td);
594 	rkce_cma_free(hw_ctx->td_aad);
595 	rkce_cma_free(hw_ctx->td_buf);
596 	free(hw_ctx);
597 
598 	return NULL;
599 }
600 
601 static void rkce_cipher_ctx_free(struct rkce_cipher_contex *hw_ctx)
602 {
603 	if (!hw_ctx)
604 		return;
605 
606 	rkce_cma_free(hw_ctx->td);
607 	rkce_cma_free(hw_ctx->td_aad);
608 	rkce_cma_free(hw_ctx->td_buf);
609 	free(hw_ctx);
610 }
611 
612 static void crypto_invalidate_cacheline(u32 addr, u32 size)
613 {
614 	ulong alignment = CONFIG_SYS_CACHELINE_SIZE;
615 	ulong aligned_input, aligned_len;
616 
617 	if (!addr || !size)
618 		return;
619 
620 	/* Must invalidate dcache after crypto DMA write data region */
621 	aligned_input = round_down(addr, alignment);
622 	aligned_len = round_up(size + (addr - aligned_input), alignment);
623 	invalidate_dcache_range(aligned_input, aligned_input + aligned_len);
624 }
625 
626 static const struct rockchip_map rk_cipher_algo_map[] = {
627 	{RK_MODE_ECB,     RKCE_SYMM_MODE_ECB},
628 	{RK_MODE_CBC,     RKCE_SYMM_MODE_CBC},
629 	{RK_MODE_CTS,     RKCE_SYMM_MODE_CTS},
630 	{RK_MODE_CTR,     RKCE_SYMM_MODE_CTR},
631 	{RK_MODE_CFB,     RKCE_SYMM_MODE_CFB},
632 	{RK_MODE_OFB,     RKCE_SYMM_MODE_OFB},
633 	{RK_MODE_XTS,     RKCE_SYMM_MODE_XTS},
634 	{RK_MODE_CCM,     RKCE_SYMM_MODE_CCM},
635 	{RK_MODE_GCM,     RKCE_SYMM_MODE_GCM},
636 	{RK_MODE_CMAC,    RKCE_SYMM_MODE_CMAC},
637 	{RK_MODE_CBC_MAC, RKCE_SYMM_MODE_CBC_MAC},
638 };
639 
640 static int rk_get_cipher_cemode(u32 algo, u32 mode, u32 *ce_algo, u32 *ce_mode)
641 {
642 	u32 i;
643 
644 	switch (algo) {
645 	case CRYPTO_DES:
646 		*ce_algo = RKCE_SYMM_ALGO_TDES;
647 		break;
648 	case CRYPTO_AES:
649 		*ce_algo = RKCE_SYMM_ALGO_AES;
650 		break;
651 	case CRYPTO_SM4:
652 		*ce_algo = RKCE_SYMM_ALGO_SM4;
653 		break;
654 	default:
655 		return -EINVAL;
656 	}
657 
658 	for (i = 0; i < ARRAY_SIZE(rk_cipher_algo_map); i++) {
659 		if (mode == rk_cipher_algo_map[i].crypto) {
660 			*ce_mode = rk_cipher_algo_map[i].rkce;
661 			return 0;
662 		}
663 	}
664 
665 	return -EINVAL;
666 }
667 
668 u32 rk_get_td_keysize(u32 ce_algo, u32 ce_mode,  u32 key_len)
669 {
670 	u32 key_size = 0;
671 
672 	if (ce_algo == RKCE_SYMM_ALGO_AES) {
673 		if (key_len == AES_KEYSIZE_128)
674 			key_size = RKCE_KEY_AES_128;
675 		else if (key_len == AES_KEYSIZE_192)
676 			key_size = RKCE_KEY_AES_192;
677 		else if (key_len == AES_KEYSIZE_256)
678 			key_size = RKCE_KEY_AES_256;
679 		else
680 			;
681 	}
682 
683 	return key_size;
684 }
685 
686 int rk_set_symm_td_buf_key(struct rkce_symm_td_buf *td_buf,
687 			   u32 ce_algo, u32 ce_mode, cipher_context *ctx)
688 {
689 	memset(td_buf->key1, 0x00, sizeof(td_buf->key1));
690 	memset(td_buf->key2, 0x00, sizeof(td_buf->key2));
691 
692 	if (ce_mode == RKCE_SYMM_MODE_XTS) {
693 		memcpy(td_buf->key1, ctx->key, ctx->key_len);
694 		memcpy(td_buf->key2, ctx->twk_key, ctx->key_len);
695 	} else {
696 		memcpy(td_buf->key1, ctx->key, ctx->key_len);
697 	}
698 
699 	if (ctx->key_len == DES_KEYSIZE * 2 &&
700 	    (ce_algo == RKCE_SYMM_ALGO_DES || ce_algo == RKCE_SYMM_ALGO_TDES))
701 		memcpy(td_buf->key1 + DES_KEYSIZE * 2, td_buf->key1, DES_KEYSIZE);
702 
703 	return 0;
704 }
705 
706 int rk_set_symm_td_sg(struct rkce_symm_td *td,
707 		      const u8 *in, u32 in_len, u8 *out, u32 out_len)
708 {
709 	memset(td->sg, 0x00, sizeof(td->sg));
710 
711 #ifdef CONFIG_ARM64
712 	td->sg[0].src_addr_h = rkce_cma_virt2phys(in) >> 32;
713 #endif
714 	td->sg[0].src_addr_l = rkce_cma_virt2phys(in) & 0xffffffff;
715 	td->sg[0].src_size   = in_len;
716 
717 	if (out && out_len) {
718 #ifdef CONFIG_ARM64
719 		td->sg[0].dst_addr_h = rkce_cma_virt2phys(out) >> 32;
720 #endif
721 		td->sg[0].dst_addr_l = rkce_cma_virt2phys(out) & 0xffffffff;
722 		td->sg[0].dst_size   = out_len;
723 	}
724 
725 	td->next_task = 0;
726 
727 	return 0;
728 }
729 
730 static int rk_crypto_cipher(struct udevice *dev, cipher_context *ctx,
731 			    const u8 *in, u8 *out, u32 len, bool enc,
732 			    const u8 *aad, u32 aad_len, u8 *tag)
733 {
734 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
735 	struct rkce_cipher_contex *hw_ctx = NULL;
736 	u32 ce_algo = 0, ce_mode = 0;
737 	int ret = 0;
738 
739 	rkce_soft_reset(priv->hardware, RKCE_RESET_SYMM);
740 
741 	ret = rk_get_cipher_cemode(ctx->algo, ctx->mode, &ce_algo, &ce_mode);
742 	if (ret)
743 		return ret;
744 
745 	hw_ctx = rkce_cipher_ctx_alloc();
746 	if (!hw_ctx)
747 		return -ENOMEM;
748 
749 	rkce_init_symm_td(hw_ctx->td, hw_ctx->td_buf);
750 
751 	hw_ctx->td->ctrl.td_type   = RKCE_TD_TYPE_SYMM;
752 	hw_ctx->td->ctrl.is_dec    = !enc;
753 	hw_ctx->td->ctrl.symm_algo = ce_algo;
754 	hw_ctx->td->ctrl.symm_mode = ce_mode;
755 	hw_ctx->td->ctrl.key_size  = rk_get_td_keysize(ce_algo, ce_mode, ctx->key_len);
756 	hw_ctx->td->ctrl.first_pkg = 1;
757 	hw_ctx->td->ctrl.last_pkg  = 1;
758 	hw_ctx->td->ctrl.int_en    = 1;
759 
760 	memcpy(hw_ctx->td_buf->iv, ctx->iv, ctx->iv_len);
761 	hw_ctx->td->ctrl.iv_len    = ctx->iv_len;
762 
763 	ret = rk_set_symm_td_buf_key(hw_ctx->td_buf, ce_algo, ce_mode, ctx);
764 	if (ret)
765 		goto exit;
766 
767 	ret = rk_set_symm_td_sg(hw_ctx->td, in, len, out, len);
768 	if (ret)
769 		goto exit;
770 
771 	if (ce_mode == RKCE_SYMM_MODE_CCM) {
772 		u8 *new_aad = NULL;
773 		u32 new_aad_len = 0, new_iv_len = 0;
774 
775 		rkce_init_symm_td(hw_ctx->td_aad, hw_ctx->td_buf);
776 
777 		ret = rkce_build_ccm_aad(aad, aad_len, len,
778 					 hw_ctx->td_buf->iv, ctx->iv_len,
779 					 &new_aad, &new_aad_len,
780 					 hw_ctx->td_buf->iv, &new_iv_len);
781 		if (ret)
782 			goto exit;
783 
784 		ret = rk_set_symm_td_sg(hw_ctx->td_aad, new_aad, new_aad_len, NULL, 0);
785 		if (ret)
786 			goto exit;
787 
788 		hw_ctx->td->ctrl.iv_len = new_iv_len;
789 
790 		hw_ctx->td_buf->gcm_len.aad_len_l = new_aad_len;
791 
792 		hw_ctx->td_aad->ctrl = hw_ctx->td->ctrl;
793 		hw_ctx->td_aad->ctrl.is_aad = 1;
794 
795 		crypto_flush_cacheline((ulong)hw_ctx->td_aad, sizeof(*hw_ctx->td_aad));
796 		crypto_flush_cacheline((ulong)hw_ctx->td_buf, sizeof(*hw_ctx->td_buf));
797 		crypto_flush_cacheline((ulong)new_aad, new_aad_len);
798 
799 		rk_crypto_enable_clk(dev);
800 
801 		ret = rkce_push_td_sync(priv->hardware, hw_ctx->td_aad, RKCE_SYMM_TIMEOUT_MS);
802 
803 		rk_crypto_disable_clk(dev);
804 
805 		rkce_destroy_ccm_aad(new_aad);
806 
807 		if (ret) {
808 			printf("CCM calc aad data failed.\n");
809 			goto exit;
810 		}
811 	} else if (ce_mode == RKCE_SYMM_MODE_GCM) {
812 		rkce_init_symm_td(hw_ctx->td_aad, hw_ctx->td_buf);
813 
814 		ret = rk_set_symm_td_sg(hw_ctx->td_aad, aad, aad_len, NULL, 0);
815 		if (ret)
816 			goto exit;
817 
818 		hw_ctx->td_buf->gcm_len.aad_len_l = aad_len;
819 		hw_ctx->td_buf->gcm_len.pc_len_l = len;
820 
821 		hw_ctx->td_aad->ctrl = hw_ctx->td->ctrl;
822 		hw_ctx->td_aad->ctrl.is_aad = 1;
823 
824 		crypto_flush_cacheline((ulong)hw_ctx->td_aad, sizeof(*hw_ctx->td_aad));
825 		crypto_flush_cacheline((ulong)hw_ctx->td_buf, sizeof(*hw_ctx->td_buf));
826 		crypto_flush_cacheline((ulong)aad, aad_len);
827 
828 		rk_crypto_enable_clk(dev);
829 
830 		ret = rkce_push_td_sync(priv->hardware, hw_ctx->td_aad, RKCE_SYMM_TIMEOUT_MS);
831 
832 		rk_crypto_disable_clk(dev);
833 		if (ret) {
834 			printf("GCM calc aad data failed.\n");
835 			goto exit;
836 		}
837 	}
838 
839 	crypto_flush_cacheline((ulong)hw_ctx->td, sizeof(*hw_ctx->td));
840 	crypto_flush_cacheline((ulong)hw_ctx->td_buf, sizeof(*hw_ctx->td_buf));
841 	crypto_flush_cacheline((ulong)in, len);
842 	crypto_invalidate_cacheline((ulong)out, len);
843 
844 	rk_crypto_enable_clk(dev);
845 
846 	ret = rkce_push_td_sync(priv->hardware, hw_ctx->td, RKCE_SYMM_TIMEOUT_MS);
847 
848 	crypto_invalidate_cacheline((ulong)out, len);
849 
850 	rk_crypto_disable_clk(dev);
851 
852 	if (tag)
853 		memcpy(tag, hw_ctx->td_buf->tag, sizeof(hw_ctx->td_buf->tag));
854 exit:
855 	rkce_cipher_ctx_free(hw_ctx);
856 
857 	return ret;
858 }
859 
860 static int rockchip_crypto_cipher(struct udevice *dev, cipher_context *ctx,
861 				  const u8 *in, u8 *out, u32 len, bool enc)
862 {
863 	return rk_crypto_cipher(dev, ctx, in, out, len, enc, NULL, 0, NULL);
864 }
865 
866 static int rockchip_crypto_mac(struct udevice *dev, cipher_context *ctx,
867 			       const u8 *in, u32 len, u8 *tag)
868 {
869 	return rk_crypto_cipher(dev, ctx, in, NULL, len, true, NULL, 0, tag);
870 }
871 
872 static int rockchip_crypto_ae(struct udevice *dev, cipher_context *ctx,
873 			      const u8 *in, u32 len, const u8 *aad, u32 aad_len,
874 			      u8 *out, u8 *tag)
875 {
876 	return rk_crypto_cipher(dev, ctx, in, out, len, true, aad, aad_len, tag);
877 }
878 
879 #endif
880 
881 #if CONFIG_IS_ENABLED(ROCKCHIP_RSA)
882 static int rockchip_crypto_rsa_verify(struct udevice *dev, rsa_key *ctx,
883 				      u8 *sign, u8 *output)
884 {
885 	struct mpa_num *mpa_m = NULL, *mpa_e = NULL, *mpa_n = NULL;
886 	struct mpa_num *mpa_c = NULL, *mpa_result = NULL;
887 	u32 n_bits, n_words;
888 	int ret;
889 
890 	if (!ctx)
891 		return -EINVAL;
892 
893 	if (ctx->algo != CRYPTO_RSA512 &&
894 	    ctx->algo != CRYPTO_RSA1024 &&
895 	    ctx->algo != CRYPTO_RSA2048 &&
896 	    ctx->algo != CRYPTO_RSA3072 &&
897 	    ctx->algo != CRYPTO_RSA4096)
898 		return -EINVAL;
899 
900 	n_bits = crypto_algo_nbits(ctx->algo);
901 	n_words = BITS2WORD(n_bits);
902 
903 	ret = rk_mpa_alloc(&mpa_m, sign, n_words);
904 	if (ret)
905 		goto exit;
906 
907 	ret = rk_mpa_alloc(&mpa_e, ctx->e, n_words);
908 	if (ret)
909 		goto exit;
910 
911 	ret = rk_mpa_alloc(&mpa_n, ctx->n, n_words);
912 	if (ret)
913 		goto exit;
914 
915 	if (ctx->c) {
916 		ret = rk_mpa_alloc(&mpa_c, ctx->c, n_words);
917 		if (ret)
918 			goto exit;
919 	}
920 
921 	ret = rk_mpa_alloc(&mpa_result, NULL, n_words);
922 	if (ret)
923 		goto exit;
924 
925 	rk_crypto_enable_clk(dev);
926 	ret = rk_exptmod_np(mpa_m, mpa_e, mpa_n, mpa_c, mpa_result);
927 	if (!ret)
928 		memcpy(output, mpa_result->d, BITS2BYTE(n_bits));
929 	rk_crypto_disable_clk(dev);
930 
931 exit:
932 	rk_mpa_free(&mpa_m);
933 	rk_mpa_free(&mpa_e);
934 	rk_mpa_free(&mpa_n);
935 	rk_mpa_free(&mpa_c);
936 	rk_mpa_free(&mpa_result);
937 
938 	return ret;
939 }
940 #endif
941 
942 #if CONFIG_IS_ENABLED(ROCKCHIP_EC)
943 static int rockchip_crypto_ec_verify(struct udevice *dev, ec_key *ctx,
944 				     u8 *hash, u32 hash_len, u8 *sign)
945 {
946 	struct mpa_num *bn_sign = NULL;
947 	struct rk_ecp_point point_P, point_sign;
948 	u32 n_bits, n_words;
949 	int ret;
950 
951 	if (!ctx)
952 		return -EINVAL;
953 
954 	if (ctx->algo != CRYPTO_SM2 &&
955 	    ctx->algo != CRYPTO_ECC_192R1 &&
956 	    ctx->algo != CRYPTO_ECC_224R1 &&
957 	    ctx->algo != CRYPTO_ECC_256R1)
958 		return -EINVAL;
959 
960 	n_bits = crypto_algo_nbits(ctx->algo);
961 	n_words = BITS2WORD(n_bits);
962 
963 	ret = rk_mpa_alloc(&bn_sign, sign, n_words);
964 	if (ret)
965 		goto exit;
966 
967 	ret = rk_mpa_alloc(&point_P.x, ctx->x, n_words);
968 	ret |= rk_mpa_alloc(&point_P.y, ctx->y, n_words);
969 	if (ret)
970 		goto exit;
971 
972 	ret = rk_mpa_alloc(&point_sign.x, sign, n_words);
973 	ret |= rk_mpa_alloc(&point_sign.y, sign + WORD2BYTE(n_words), n_words);
974 	if (ret)
975 		goto exit;
976 
977 	rk_crypto_enable_clk(dev);
978 	ret = rockchip_ecc_verify(ctx->algo, hash, hash_len, &point_P, &point_sign);
979 	rk_crypto_disable_clk(dev);
980 exit:
981 	rk_mpa_free(&bn_sign);
982 	rk_mpa_free(&point_P.x);
983 	rk_mpa_free(&point_P.y);
984 	rk_mpa_free(&point_sign.x);
985 	rk_mpa_free(&point_sign.y);
986 
987 	return ret;
988 }
989 #endif
990 
991 static const struct dm_crypto_ops rockchip_crypto_ops = {
992 	.capability   = rockchip_crypto_capability,
993 	.sha_init     = rockchip_crypto_sha_init,
994 	.sha_update   = rockchip_crypto_sha_update,
995 	.sha_final    = rockchip_crypto_sha_final,
996 #if CONFIG_IS_ENABLED(ROCKCHIP_HMAC)
997 	.hmac_init    = rockchip_crypto_hmac_init,
998 	.hmac_update  = rockchip_crypto_hmac_update,
999 	.hmac_final   = rockchip_crypto_hmac_final,
1000 #endif
1001 #if CONFIG_IS_ENABLED(ROCKCHIP_RSA)
1002 	.rsa_verify   = rockchip_crypto_rsa_verify,
1003 #endif
1004 #if CONFIG_IS_ENABLED(ROCKCHIP_EC)
1005 	.ec_verify    = rockchip_crypto_ec_verify,
1006 #endif
1007 #if CONFIG_IS_ENABLED(ROCKCHIP_CIPHER)
1008 	.cipher_crypt = rockchip_crypto_cipher,
1009 	.cipher_mac   = rockchip_crypto_mac,
1010 	.cipher_ae    = rockchip_crypto_ae,
1011 #endif
1012 
1013 };
1014 
1015 /*
1016  * Only use "clocks" to parse crypto clock id and use rockchip_get_clk().
1017  * Because we always add crypto node in U-Boot dts, when kernel dtb enabled :
1018  *
1019  *   1. There is cru phandle mismatch between U-Boot and kernel dtb;
1020  *   2. CONFIG_OF_SPL_REMOVE_PROPS removes clock property;
1021  */
1022 static int rockchip_crypto_ofdata_to_platdata(struct udevice *dev)
1023 {
1024 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
1025 	int len, ret = -EINVAL;
1026 
1027 	memset(priv, 0x00, sizeof(*priv));
1028 
1029 	priv->reg = (fdt_addr_t)dev_read_addr_ptr(dev);
1030 	if (priv->reg == FDT_ADDR_T_NONE)
1031 		return -EINVAL;
1032 
1033 	crypto_base = priv->reg;
1034 
1035 	/* if there is no clocks in dts, just skip it */
1036 	if (!dev_read_prop(dev, "clocks", &len)) {
1037 		printf("Can't find \"clocks\" property\n");
1038 		return 0;
1039 	}
1040 
1041 	priv->clocks = malloc(len);
1042 	if (!priv->clocks)
1043 		return -ENOMEM;
1044 
1045 	priv->nclocks = len / (2 * sizeof(u32));
1046 	if (dev_read_u32_array(dev, "clocks", (u32 *)priv->clocks,
1047 			       priv->nclocks)) {
1048 		printf("Can't read \"clocks\" property\n");
1049 		ret = -EINVAL;
1050 		goto exit;
1051 	}
1052 
1053 	if (dev_read_prop(dev, "clock-frequency", &len)) {
1054 		priv->frequencies = malloc(len);
1055 		if (!priv->frequencies) {
1056 			ret = -ENOMEM;
1057 			goto exit;
1058 		}
1059 		priv->freq_nclocks = len / sizeof(u32);
1060 		if (dev_read_u32_array(dev, "clock-frequency", priv->frequencies,
1061 				       priv->freq_nclocks)) {
1062 			printf("Can't read \"clock-frequency\" property\n");
1063 			ret = -EINVAL;
1064 			goto exit;
1065 		}
1066 	}
1067 
1068 	return 0;
1069 exit:
1070 	if (priv->clocks)
1071 		free(priv->clocks);
1072 
1073 	if (priv->frequencies)
1074 		free(priv->frequencies);
1075 
1076 	return ret;
1077 }
1078 
1079 static int rk_crypto_do_enable_clk(struct udevice *dev, int enable)
1080 {
1081 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
1082 	struct clk clk;
1083 	int i, ret;
1084 
1085 	for (i = 0; i < priv->nclocks; i++) {
1086 		ret = clk_get_by_index(dev, i, &clk);
1087 		if (ret < 0) {
1088 			printf("Failed to get clk index %d, ret=%d\n", i, ret);
1089 			return ret;
1090 		}
1091 
1092 		if (enable)
1093 			ret = clk_enable(&clk);
1094 		else
1095 			ret = clk_disable(&clk);
1096 		if (ret < 0 && ret != -ENOSYS) {
1097 			printf("Failed to enable(%d) clk(%ld): ret=%d\n",
1098 			       enable, clk.id, ret);
1099 			return ret;
1100 		}
1101 	}
1102 
1103 	return 0;
1104 }
1105 
1106 static int rk_crypto_enable_clk(struct udevice *dev)
1107 {
1108 	return rk_crypto_do_enable_clk(dev, 1);
1109 }
1110 
1111 static int rk_crypto_disable_clk(struct udevice *dev)
1112 {
1113 	return rk_crypto_do_enable_clk(dev, 0);
1114 }
1115 
1116 static int rk_crypto_set_clk(struct udevice *dev)
1117 {
1118 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
1119 	struct clk clk;
1120 	int i, ret;
1121 
1122 	/* use standard "assigned-clock-rates" props */
1123 	if (dev_read_size(dev, "assigned-clock-rates") > 0)
1124 		return clk_set_defaults(dev);
1125 
1126 	/* use "clock-frequency" props */
1127 	if (priv->freq_nclocks == 0)
1128 		return 0;
1129 
1130 	for (i = 0; i < priv->freq_nclocks; i++) {
1131 		ret = clk_get_by_index(dev, i, &clk);
1132 		if (ret < 0) {
1133 			printf("Failed to get clk index %d, ret=%d\n", i, ret);
1134 			return ret;
1135 		}
1136 		ret = clk_set_rate(&clk, priv->frequencies[i]);
1137 		if (ret < 0) {
1138 			printf("%s: Failed to set clk(%ld): ret=%d\n",
1139 			       __func__, clk.id, ret);
1140 			return ret;
1141 		}
1142 	}
1143 
1144 	return 0;
1145 }
1146 
1147 static int rockchip_crypto_probe(struct udevice *dev)
1148 {
1149 	struct rockchip_crypto_priv *priv = dev_get_priv(dev);
1150 	int ret = 0;
1151 
1152 	ret = rk_crypto_set_clk(dev);
1153 	if (ret)
1154 		return ret;
1155 
1156 	rk_crypto_enable_clk(dev);
1157 
1158 	priv->hardware = rkce_hardware_alloc((void *)priv->reg);
1159 
1160 	if (!priv->hardware) {
1161 		ret = -ENOMEM;
1162 		goto exit;
1163 	}
1164 
1165 	priv->capability = rockchip_crypto_capability(dev);
1166 exit:
1167 	rk_crypto_disable_clk(dev);
1168 
1169 	return ret;
1170 }
1171 
1172 static const struct udevice_id rockchip_crypto_ids[] = {
1173 	{
1174 		.compatible = "rockchip,crypto-ce",
1175 	},
1176 	{ }
1177 };
1178 
1179 U_BOOT_DRIVER(rockchip_crypto_ce) = {
1180 	.name		= "rockchip_crypto_ce",
1181 	.id		= UCLASS_CRYPTO,
1182 	.of_match	= rockchip_crypto_ids,
1183 	.ops		= &rockchip_crypto_ops,
1184 	.probe		= rockchip_crypto_probe,
1185 	.ofdata_to_platdata = rockchip_crypto_ofdata_to_platdata,
1186 	.priv_auto_alloc_size = sizeof(struct rockchip_crypto_priv),
1187 };
1188