xref: /OK3568_Linux_fs/kernel/drivers/crypto/rockchip/rk_crypto_v2_skcipher.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Crypto acceleration support for Rockchip Crypto V2
4  *
5  * Copyright (c) 2018, Fuzhou Rockchip Electronics Co., Ltd
6  *
7  * Author: Lin Jinhan <troy.lin@rock-chips.com>
8  *
9  * Some ideas are from marvell-cesa.c and s5p-sss.c driver.
10  */
11 
12 #include <crypto/scatterwalk.h>
13 #include <linux/iopoll.h>
14 #include <linux/module.h>
15 #include <linux/platform_device.h>
16 
17 #include "rk_crypto_core.h"
18 #include "rk_crypto_utils.h"
19 #include "rk_crypto_skcipher_utils.h"
20 #include "rk_crypto_v2.h"
21 #include "rk_crypto_v2_reg.h"
22 
23 #define RK_POLL_PERIOD_US	100
24 #define RK_POLL_TIMEOUT_US	50000
25 
26 static const u32 cipher_algo2bc[] = {
27 	[CIPHER_ALGO_DES]      = CRYPTO_BC_DES,
28 	[CIPHER_ALGO_DES3_EDE] = CRYPTO_BC_TDES,
29 	[CIPHER_ALGO_AES]      = CRYPTO_BC_AES,
30 	[CIPHER_ALGO_SM4]      = CRYPTO_BC_SM4,
31 };
32 
33 static const u32 cipher_mode2bc[] = {
34 	[CIPHER_MODE_ECB] = CRYPTO_BC_ECB,
35 	[CIPHER_MODE_CBC] = CRYPTO_BC_CBC,
36 	[CIPHER_MODE_CFB] = CRYPTO_BC_CFB,
37 	[CIPHER_MODE_OFB] = CRYPTO_BC_OFB,
38 	[CIPHER_MODE_CTR] = CRYPTO_BC_CTR,
39 	[CIPHER_MODE_XTS] = CRYPTO_BC_XTS,
40 	[CIPHER_MODE_GCM] = CRYPTO_BC_GCM,
41 };
42 
rk_crypto_irq_handle(int irq,void * dev_id)43 static int rk_crypto_irq_handle(int irq, void *dev_id)
44 {
45 	struct rk_crypto_dev *rk_dev = platform_get_drvdata(dev_id);
46 	u32 interrupt_status;
47 	struct rk_hw_crypto_v2_info *hw_info =
48 			(struct rk_hw_crypto_v2_info *)rk_dev->hw_info;
49 	struct rk_alg_ctx *alg_ctx = rk_cipher_alg_ctx(rk_dev);
50 
51 	interrupt_status = CRYPTO_READ(rk_dev, CRYPTO_DMA_INT_ST);
52 	CRYPTO_WRITE(rk_dev, CRYPTO_DMA_INT_ST, interrupt_status);
53 
54 	interrupt_status &= CRYPTO_LOCKSTEP_MASK;
55 
56 	if (interrupt_status != CRYPTO_DST_ITEM_DONE_INT_ST) {
57 		dev_err(rk_dev->dev, "DMA desc = %p\n", hw_info->hw_desc.lli_head);
58 		dev_err(rk_dev->dev, "DMA addr_in = %08x\n",
59 			(u32)alg_ctx->addr_in);
60 		dev_err(rk_dev->dev, "DMA addr_out = %08x\n",
61 			(u32)alg_ctx->addr_out);
62 		dev_err(rk_dev->dev, "DMA count = %08x\n", alg_ctx->count);
63 		dev_err(rk_dev->dev, "DMA desc_dma = %08x\n",
64 			(u32)hw_info->hw_desc.lli_head_dma);
65 		dev_err(rk_dev->dev, "DMA Error status = %08x\n",
66 			interrupt_status);
67 		dev_err(rk_dev->dev, "DMA CRYPTO_DMA_LLI_ADDR status = %08x\n",
68 			CRYPTO_READ(rk_dev, CRYPTO_DMA_LLI_ADDR));
69 		dev_err(rk_dev->dev, "DMA CRYPTO_DMA_ST status = %08x\n",
70 			CRYPTO_READ(rk_dev, CRYPTO_DMA_ST));
71 		dev_err(rk_dev->dev, "DMA CRYPTO_DMA_STATE status = %08x\n",
72 			CRYPTO_READ(rk_dev, CRYPTO_DMA_STATE));
73 		dev_err(rk_dev->dev, "DMA CRYPTO_DMA_LLI_RADDR status = %08x\n",
74 			CRYPTO_READ(rk_dev, CRYPTO_DMA_LLI_RADDR));
75 		dev_err(rk_dev->dev, "DMA CRYPTO_DMA_SRC_RADDR status = %08x\n",
76 			CRYPTO_READ(rk_dev, CRYPTO_DMA_SRC_RADDR));
77 		dev_err(rk_dev->dev, "DMA CRYPTO_DMA_DST_RADDR status = %08x\n",
78 			CRYPTO_READ(rk_dev, CRYPTO_DMA_DST_RADDR));
79 		rk_dev->err = -EFAULT;
80 	}
81 
82 	return 0;
83 }
84 
set_pc_len_reg(struct rk_crypto_dev * rk_dev,u64 pc_len)85 static inline void set_pc_len_reg(struct rk_crypto_dev *rk_dev, u64 pc_len)
86 {
87 	u32 chn_base = CRYPTO_CH0_PC_LEN_0;
88 
89 	CRYPTO_TRACE("PC length = %lu\n", (unsigned long)pc_len);
90 
91 	CRYPTO_WRITE(rk_dev, chn_base, pc_len & 0xffffffff);
92 	CRYPTO_WRITE(rk_dev, chn_base + 4, pc_len >> 32);
93 }
94 
set_aad_len_reg(struct rk_crypto_dev * rk_dev,u64 aad_len)95 static inline void set_aad_len_reg(struct rk_crypto_dev *rk_dev, u64 aad_len)
96 {
97 	u32 chn_base = CRYPTO_CH0_AAD_LEN_0;
98 
99 	CRYPTO_TRACE("AAD length = %lu\n", (unsigned long)aad_len);
100 
101 	CRYPTO_WRITE(rk_dev, chn_base, aad_len & 0xffffffff);
102 	CRYPTO_WRITE(rk_dev, chn_base + 4, aad_len >> 32);
103 }
104 
set_iv_reg(struct rk_crypto_dev * rk_dev,const u8 * iv,u32 iv_len)105 static void set_iv_reg(struct rk_crypto_dev *rk_dev, const u8 *iv, u32 iv_len)
106 {
107 	if (!iv || iv_len == 0)
108 		return;
109 
110 	CRYPTO_DUMPHEX("set iv", iv, iv_len);
111 
112 	rk_crypto_write_regs(rk_dev, CRYPTO_CH0_IV_0, iv, iv_len);
113 
114 	CRYPTO_WRITE(rk_dev, CRYPTO_CH0_IV_LEN_0, iv_len);
115 }
116 
write_key_reg(struct rk_crypto_dev * rk_dev,const u8 * key,u32 key_len)117 static void write_key_reg(struct rk_crypto_dev *rk_dev, const u8 *key,
118 			  u32 key_len)
119 {
120 	rk_crypto_write_regs(rk_dev, CRYPTO_CH0_KEY_0, key, key_len);
121 }
122 
write_tkey_reg(struct rk_crypto_dev * rk_dev,const u8 * key,u32 key_len)123 static void write_tkey_reg(struct rk_crypto_dev *rk_dev, const u8 *key,
124 			   u32 key_len)
125 {
126 	rk_crypto_write_regs(rk_dev, CRYPTO_CH4_KEY_0, key, key_len);
127 }
128 
get_tag_reg(struct rk_crypto_dev * rk_dev,u8 * tag,u32 tag_len)129 static int get_tag_reg(struct rk_crypto_dev *rk_dev, u8 *tag, u32 tag_len)
130 {
131 	int ret;
132 	u32 reg_ctrl = 0;
133 
134 	CRYPTO_TRACE("tag_len = %u", tag_len);
135 
136 	if (tag_len > RK_MAX_TAG_SIZE)
137 		return -EINVAL;
138 
139 	ret = read_poll_timeout_atomic(CRYPTO_READ,
140 					reg_ctrl,
141 					reg_ctrl & CRYPTO_CH0_TAG_VALID,
142 					0,
143 					RK_POLL_TIMEOUT_US,
144 					false,
145 					rk_dev, CRYPTO_TAG_VALID);
146 	if (ret)
147 		goto exit;
148 
149 	rk_crypto_read_regs(rk_dev, CRYPTO_CH0_TAG_0, tag, tag_len);
150 exit:
151 	return ret;
152 }
153 
is_force_fallback(struct rk_crypto_algt * algt,uint32_t key_len)154 static bool is_force_fallback(struct rk_crypto_algt *algt, uint32_t key_len)
155 {
156 	if (algt->algo != CIPHER_ALGO_AES)
157 		return false;
158 
159 	/* crypto v2 not support xts with AES-192 */
160 	if (algt->mode == CIPHER_MODE_XTS && key_len == AES_KEYSIZE_192 * 2)
161 		return true;
162 
163 	if (algt->use_soft_aes192 && key_len == AES_KEYSIZE_192)
164 		return true;
165 
166 	return false;
167 }
168 
is_calc_need_round_up(struct skcipher_request * req)169 static bool is_calc_need_round_up(struct skcipher_request *req)
170 {
171 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
172 	struct rk_crypto_algt *algt = rk_cipher_get_algt(cipher);
173 
174 	return (algt->mode == CIPHER_MODE_CFB ||
175 			algt->mode == CIPHER_MODE_OFB ||
176 			algt->mode == CIPHER_MODE_CTR) ? true : false;
177 }
178 
rk_cipher_reset(struct rk_crypto_dev * rk_dev)179 static void rk_cipher_reset(struct rk_crypto_dev *rk_dev)
180 {
181 	int ret;
182 	u32 tmp = 0, tmp_mask = 0;
183 	unsigned int  pool_timeout_us = 1000;
184 
185 	CRYPTO_WRITE(rk_dev, CRYPTO_DMA_INT_EN, 0x00);
186 
187 	tmp = CRYPTO_SW_CC_RESET;
188 	tmp_mask = tmp << CRYPTO_WRITE_MASK_SHIFT;
189 
190 	CRYPTO_WRITE(rk_dev, CRYPTO_RST_CTL, tmp | tmp_mask);
191 
192 	/* This is usually done in 20 clock cycles */
193 	ret = read_poll_timeout_atomic(CRYPTO_READ, tmp, !tmp, 0,
194 				       pool_timeout_us, false, rk_dev, CRYPTO_RST_CTL);
195 	if (ret)
196 		dev_err(rk_dev->dev, "cipher reset pool timeout %ums.",
197 			pool_timeout_us);
198 
199 	CRYPTO_WRITE(rk_dev, CRYPTO_BC_CTL, 0xffff0000);
200 }
201 
rk_crypto_complete(struct crypto_async_request * base,int err)202 static void rk_crypto_complete(struct crypto_async_request *base, int err)
203 {
204 	struct rk_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
205 	struct rk_alg_ctx *alg_ctx = &ctx->algs_ctx;
206 	struct rk_hw_crypto_v2_info *hw_info = ctx->rk_dev->hw_info;
207 	struct crypto_lli_desc *lli_desc = hw_info->hw_desc.lli_head;
208 
209 	CRYPTO_WRITE(ctx->rk_dev, CRYPTO_BC_CTL, 0xffff0000);
210 	if (err) {
211 		rk_cipher_reset(ctx->rk_dev);
212 		pr_err("aligned = %u, align_size = %u\n",
213 		       alg_ctx->aligned, alg_ctx->align_size);
214 		pr_err("total = %u, left = %u, count = %u\n",
215 		       alg_ctx->total, alg_ctx->left_bytes, alg_ctx->count);
216 		pr_err("lli->src     = %08x\n", lli_desc->src_addr);
217 		pr_err("lli->src_len = %08x\n", lli_desc->src_len);
218 		pr_err("lli->dst     = %08x\n", lli_desc->dst_addr);
219 		pr_err("lli->dst_len = %08x\n", lli_desc->dst_len);
220 		pr_err("lli->dma_ctl = %08x\n", lli_desc->dma_ctrl);
221 		pr_err("lli->usr_def = %08x\n", lli_desc->user_define);
222 		pr_err("lli->next    = %08x\n\n\n", lli_desc->next_addr);
223 	}
224 
225 	if (base->complete)
226 		base->complete(base, err);
227 }
228 
rk_cipher_crypt(struct skcipher_request * req,bool encrypt)229 static int rk_cipher_crypt(struct skcipher_request *req, bool encrypt)
230 {
231 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
232 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
233 	struct rk_crypto_algt *algt = rk_cipher_get_algt(tfm);
234 
235 	CRYPTO_TRACE("%s total = %u",
236 		     encrypt ? "encrypt" : "decrypt", req->cryptlen);
237 
238 	if (!req->cryptlen) {
239 		if (algt->mode == CIPHER_MODE_ECB ||
240 		    algt->mode == CIPHER_MODE_CBC ||
241 		    algt->mode == CIPHER_MODE_CTR ||
242 		    algt->mode == CIPHER_MODE_CFB ||
243 		    algt->mode == CIPHER_MODE_OFB)
244 			return 0;
245 		else
246 			return -EINVAL;
247 	}
248 
249 	/* XTS data should >= chunksize */
250 	if (algt->mode == CIPHER_MODE_XTS) {
251 		if (req->cryptlen < crypto_skcipher_chunksize(tfm))
252 			return -EINVAL;
253 
254 		/* force use unalign branch */
255 		ctx->algs_ctx.align_size = ctx->rk_dev->vir_max;
256 
257 		/*  XTS can't pause when use hardware crypto */
258 		if (req->cryptlen > ctx->rk_dev->vir_max)
259 			return rk_cipher_fallback(req, ctx, encrypt);
260 	}
261 
262 	if (is_force_fallback(algt, ctx->keylen))
263 		return rk_cipher_fallback(req, ctx, encrypt);
264 
265 	ctx->mode = cipher_algo2bc[algt->algo] |
266 		    cipher_mode2bc[algt->mode];
267 	if (!encrypt)
268 		ctx->mode |= CRYPTO_BC_DECRYPT;
269 
270 	if (algt->algo == CIPHER_ALGO_AES) {
271 		uint32_t key_factor;
272 
273 		/* The key length of XTS is twice the normal length */
274 		key_factor = algt->mode == CIPHER_MODE_XTS ? 2 : 1;
275 
276 		if (ctx->keylen == AES_KEYSIZE_128 * key_factor)
277 			ctx->mode |= CRYPTO_BC_128_bit_key;
278 		else if (ctx->keylen == AES_KEYSIZE_192 * key_factor)
279 			ctx->mode |= CRYPTO_BC_192_bit_key;
280 		else if (ctx->keylen == AES_KEYSIZE_256 * key_factor)
281 			ctx->mode |= CRYPTO_BC_256_bit_key;
282 	}
283 
284 	ctx->iv_len = crypto_skcipher_ivsize(tfm);
285 
286 	memset(ctx->iv, 0x00, sizeof(ctx->iv));
287 	memcpy(ctx->iv, req->iv, ctx->iv_len);
288 
289 	ctx->is_enc = encrypt;
290 
291 	CRYPTO_MSG("ctx->mode = %x\n", ctx->mode);
292 	return rk_skcipher_handle_req(ctx->rk_dev, req);
293 }
294 
rk_cipher_encrypt(struct skcipher_request * req)295 static int rk_cipher_encrypt(struct skcipher_request *req)
296 {
297 	return rk_cipher_crypt(req, true);
298 }
299 
rk_cipher_decrypt(struct skcipher_request * req)300 static int rk_cipher_decrypt(struct skcipher_request *req)
301 {
302 	return rk_cipher_crypt(req, false);
303 }
304 
rk_ablk_hw_init(struct rk_crypto_dev * rk_dev,u32 algo,u32 mode)305 static int rk_ablk_hw_init(struct rk_crypto_dev *rk_dev, u32 algo, u32 mode)
306 {
307 	struct rk_cipher_ctx *ctx = rk_cipher_ctx_cast(rk_dev);
308 
309 	rk_cipher_reset(rk_dev);
310 
311 	CRYPTO_WRITE(rk_dev, CRYPTO_BC_CTL, 0x00010000);
312 
313 	if (mode == CIPHER_MODE_XTS) {
314 		uint32_t tmp_len = ctx->keylen / 2;
315 
316 		write_key_reg(ctx->rk_dev, ctx->key, tmp_len);
317 		write_tkey_reg(ctx->rk_dev, ctx->key + tmp_len, tmp_len);
318 	} else {
319 		write_key_reg(ctx->rk_dev, ctx->key, ctx->keylen);
320 	}
321 
322 	if (mode != CIPHER_MODE_ECB)
323 		set_iv_reg(rk_dev, ctx->iv, ctx->iv_len);
324 
325 	ctx->mode |= CRYPTO_BC_ENABLE;
326 
327 	CRYPTO_WRITE(rk_dev, CRYPTO_FIFO_CTL, 0x00030003);
328 
329 	CRYPTO_WRITE(rk_dev, CRYPTO_DMA_INT_EN, 0x7f);
330 
331 	CRYPTO_WRITE(rk_dev, CRYPTO_BC_CTL, ctx->mode | CRYPTO_WRITE_MASK_ALL);
332 
333 	return 0;
334 }
335 
crypto_dma_start(struct rk_crypto_dev * rk_dev,uint32_t flag)336 static int crypto_dma_start(struct rk_crypto_dev *rk_dev, uint32_t flag)
337 {
338 	struct rk_hw_crypto_v2_info *hw_info =
339 			(struct rk_hw_crypto_v2_info *)rk_dev->hw_info;
340 	struct skcipher_request *req =
341 		skcipher_request_cast(rk_dev->async_req);
342 	struct rk_alg_ctx *alg_ctx = rk_cipher_alg_ctx(rk_dev);
343 	struct crypto_lli_desc *lli_head, *lli_tail, *lli_aad;
344 	u32 calc_len = alg_ctx->count;
345 	u32 start_flag = CRYPTO_DMA_START;
346 	int ret;
347 
348 	if (alg_ctx->aligned)
349 		ret = rk_crypto_hw_desc_init(&hw_info->hw_desc,
350 					     alg_ctx->sg_src, alg_ctx->sg_dst, alg_ctx->count);
351 	else
352 		ret = rk_crypto_hw_desc_init(&hw_info->hw_desc,
353 					     &alg_ctx->sg_tmp, &alg_ctx->sg_tmp, alg_ctx->count);
354 	if (ret)
355 		return ret;
356 
357 	lli_head = hw_info->hw_desc.lli_head;
358 	lli_tail = hw_info->hw_desc.lli_tail;
359 	lli_aad  = hw_info->hw_desc.lli_aad;
360 
361 	/*
362 	 *	the data length is not aligned will use addr_vir to calculate,
363 	 *	so crypto v2 could round up data length to chunk_size
364 	 */
365 	if (!alg_ctx->is_aead && is_calc_need_round_up(req))
366 		calc_len = round_up(calc_len, alg_ctx->chunk_size);
367 
368 	CRYPTO_TRACE("calc_len = %u, cryptlen = %u, assoclen= %u, is_aead = %d",
369 		     calc_len, alg_ctx->total, alg_ctx->assoclen, alg_ctx->is_aead);
370 
371 	lli_head->user_define = LLI_USER_STRING_START | LLI_USER_CIPHER_START;
372 
373 	lli_tail->dma_ctrl     = LLI_DMA_CTRL_DST_DONE | LLI_DMA_CTRL_LAST;
374 	lli_tail->user_define |= LLI_USER_STRING_LAST;
375 	lli_tail->src_len     += (calc_len - alg_ctx->count);
376 	lli_tail->dst_len     += (calc_len - alg_ctx->count);
377 
378 	if (alg_ctx->is_aead) {
379 		lli_aad->src_addr    = alg_ctx->addr_aad_in;
380 		lli_aad->src_len     = alg_ctx->assoclen;
381 		lli_aad->user_define = LLI_USER_CIPHER_START |
382 				       LLI_USER_STRING_START |
383 				       LLI_USER_STRING_LAST |
384 				       LLI_USER_STRING_AAD;
385 		lli_aad->next_addr   = hw_info->hw_desc.lli_head_dma;
386 
387 		/* clear cipher start */
388 		lli_head->user_define &= (~((u32)LLI_USER_CIPHER_START));
389 
390 		set_pc_len_reg(rk_dev, alg_ctx->total);
391 		set_aad_len_reg(rk_dev, alg_ctx->assoclen);
392 	}
393 
394 	rk_crypto_dump_hw_desc(&hw_info->hw_desc);
395 
396 	dma_wmb();
397 
398 	if (alg_ctx->is_aead)
399 		CRYPTO_WRITE(rk_dev, CRYPTO_DMA_LLI_ADDR, hw_info->hw_desc.lli_aad_dma);
400 	else
401 		CRYPTO_WRITE(rk_dev, CRYPTO_DMA_LLI_ADDR, hw_info->hw_desc.lli_head_dma);
402 
403 	CRYPTO_WRITE(rk_dev, CRYPTO_DMA_CTL, start_flag | (start_flag << WRITE_MASK));
404 
405 	return 0;
406 }
407 
rk_ablk_init_tfm(struct crypto_skcipher * tfm)408 static int rk_ablk_init_tfm(struct crypto_skcipher *tfm)
409 {
410 	struct rk_crypto_algt *algt = rk_cipher_get_algt(tfm);
411 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
412 	const char *alg_name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm));
413 	struct rk_crypto_dev *rk_dev = algt->rk_dev;
414 	struct rk_alg_ctx *alg_ctx = &ctx->algs_ctx;
415 
416 	CRYPTO_TRACE();
417 
418 	memset(ctx, 0x00, sizeof(*ctx));
419 
420 	if (!rk_dev->request_crypto)
421 		return -EFAULT;
422 
423 	rk_dev->request_crypto(rk_dev, alg_name);
424 
425 	/* always not aligned for crypto v2 cipher */
426 	alg_ctx->align_size     = 64;
427 	alg_ctx->chunk_size     = crypto_skcipher_chunksize(tfm);
428 
429 	alg_ctx->ops.start      = rk_ablk_start;
430 	alg_ctx->ops.update     = rk_ablk_rx;
431 	alg_ctx->ops.complete   = rk_crypto_complete;
432 	alg_ctx->ops.irq_handle = rk_crypto_irq_handle;
433 
434 	alg_ctx->ops.hw_init      = rk_ablk_hw_init;
435 	alg_ctx->ops.hw_dma_start = crypto_dma_start;
436 	alg_ctx->ops.hw_write_iv  = set_iv_reg;
437 
438 	ctx->rk_dev = rk_dev;
439 
440 	if (algt->alg.crypto.base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
441 		CRYPTO_MSG("alloc fallback tfm, name = %s", alg_name);
442 		ctx->fallback_tfm = crypto_alloc_skcipher(alg_name, 0,
443 							  CRYPTO_ALG_ASYNC |
444 							  CRYPTO_ALG_NEED_FALLBACK);
445 		if (IS_ERR(ctx->fallback_tfm)) {
446 			CRYPTO_MSG("Could not load fallback driver %s : %ld.\n",
447 				   alg_name, PTR_ERR(ctx->fallback_tfm));
448 			ctx->fallback_tfm = NULL;
449 		}
450 	}
451 
452 	return 0;
453 }
454 
rk_ablk_exit_tfm(struct crypto_skcipher * tfm)455 static void rk_ablk_exit_tfm(struct crypto_skcipher *tfm)
456 {
457 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
458 	const char *alg_name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm));
459 
460 	CRYPTO_TRACE();
461 
462 	if (ctx->fallback_tfm) {
463 		CRYPTO_MSG("free fallback tfm");
464 		crypto_free_skcipher(ctx->fallback_tfm);
465 	}
466 
467 	ctx->rk_dev->release_crypto(ctx->rk_dev, alg_name);
468 }
469 
rk_aead_init_tfm(struct crypto_aead * tfm)470 static int rk_aead_init_tfm(struct crypto_aead *tfm)
471 {
472 	struct aead_alg *alg = crypto_aead_alg(tfm);
473 	struct rk_crypto_algt *algt =
474 		container_of(alg, struct rk_crypto_algt, alg.aead);
475 	struct rk_cipher_ctx *ctx = crypto_tfm_ctx(&tfm->base);
476 	const char *alg_name = crypto_tfm_alg_name(&tfm->base);
477 	struct rk_crypto_dev *rk_dev = algt->rk_dev;
478 	struct rk_alg_ctx *alg_ctx = &ctx->algs_ctx;
479 
480 	CRYPTO_TRACE();
481 
482 	if (!rk_dev->request_crypto)
483 		return -EFAULT;
484 
485 	rk_dev->request_crypto(rk_dev, alg_name);
486 
487 	alg_ctx->align_size     = 64;
488 	alg_ctx->chunk_size     = crypto_aead_chunksize(tfm);
489 
490 	alg_ctx->ops.start      = rk_aead_start;
491 	alg_ctx->ops.update     = rk_ablk_rx;
492 	alg_ctx->ops.complete   = rk_crypto_complete;
493 	alg_ctx->ops.irq_handle = rk_crypto_irq_handle;
494 
495 	alg_ctx->ops.hw_init       = rk_ablk_hw_init;
496 	alg_ctx->ops.hw_dma_start  = crypto_dma_start;
497 	alg_ctx->ops.hw_write_iv   = set_iv_reg;
498 	alg_ctx->ops.hw_get_result = get_tag_reg;
499 
500 	ctx->rk_dev      = rk_dev;
501 	alg_ctx->is_aead = 1;
502 
503 	if (algt->alg.crypto.base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
504 		CRYPTO_MSG("alloc fallback tfm, name = %s", alg_name);
505 		ctx->fallback_aead =
506 			crypto_alloc_aead(alg_name, 0,
507 					  CRYPTO_ALG_ASYNC |
508 					  CRYPTO_ALG_NEED_FALLBACK);
509 		if (IS_ERR(ctx->fallback_aead)) {
510 			dev_err(rk_dev->dev,
511 				"Load fallback driver %s err: %ld.\n",
512 				alg_name, PTR_ERR(ctx->fallback_aead));
513 			ctx->fallback_aead = NULL;
514 			crypto_aead_set_reqsize(tfm, sizeof(struct aead_request));
515 		} else {
516 			crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
517 						crypto_aead_reqsize(ctx->fallback_aead));
518 		}
519 	}
520 
521 	return 0;
522 }
523 
rk_aead_exit_tfm(struct crypto_aead * tfm)524 static void rk_aead_exit_tfm(struct crypto_aead *tfm)
525 {
526 	struct rk_cipher_ctx *ctx = crypto_tfm_ctx(&tfm->base);
527 
528 	CRYPTO_TRACE();
529 
530 	if (ctx->fallback_aead) {
531 		CRYPTO_MSG("free fallback tfm");
532 		crypto_free_aead(ctx->fallback_aead);
533 	}
534 
535 	ctx->rk_dev->release_crypto(ctx->rk_dev, crypto_tfm_alg_name(&tfm->base));
536 }
537 
rk_aead_crypt(struct aead_request * req,bool encrypt)538 static int rk_aead_crypt(struct aead_request *req, bool encrypt)
539 {
540 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
541 	struct rk_cipher_ctx *ctx = crypto_aead_ctx(tfm);
542 	struct rk_crypto_algt *algt = rk_aead_get_algt(tfm);
543 	struct scatterlist *sg_src, *sg_dst;
544 	struct scatterlist src[2], dst[2];
545 	u64 data_len;
546 	bool aligned;
547 	int ret = -EINVAL;
548 
549 	CRYPTO_TRACE("%s cryptlen = %u, assoclen = %u",
550 		     encrypt ? "encrypt" : "decrypt",
551 		     req->cryptlen, req->assoclen);
552 
553 	data_len = encrypt ? req->cryptlen : (req->cryptlen - crypto_aead_authsize(tfm));
554 
555 	if (req->assoclen == 0 ||
556 	    req->cryptlen == 0 ||
557 	    data_len == 0 ||
558 	    is_force_fallback(algt, ctx->keylen))
559 		return rk_aead_fallback(req, ctx, encrypt);
560 
561 	/* point sg_src and sg_dst skip assoc data */
562 	sg_src = scatterwalk_ffwd(src, req->src, req->assoclen);
563 	sg_dst = (req->src == req->dst) ? sg_src : scatterwalk_ffwd(dst, req->dst, req->assoclen);
564 
565 	aligned = rk_crypto_check_align(sg_src, sg_nents_for_len(sg_src, data_len),
566 					sg_dst, sg_nents_for_len(sg_dst, data_len),
567 					64);
568 
569 	if (sg_nents_for_len(sg_src, data_len) > RK_DEFAULT_LLI_CNT ||
570 	    sg_nents_for_len(sg_dst, data_len) > RK_DEFAULT_LLI_CNT)
571 		return rk_aead_fallback(req, ctx, encrypt);
572 
573 	if (!aligned) {
574 		if (req->assoclen > ctx->rk_dev->aad_max ||
575 		    data_len > ctx->rk_dev->vir_max)
576 			return rk_aead_fallback(req, ctx, encrypt);
577 	}
578 
579 	ctx->mode = cipher_algo2bc[algt->algo] |
580 		    cipher_mode2bc[algt->mode];
581 	if (!encrypt)
582 		ctx->mode |= CRYPTO_BC_DECRYPT;
583 
584 	if (algt->algo == CIPHER_ALGO_AES) {
585 		if (ctx->keylen == AES_KEYSIZE_128)
586 			ctx->mode |= CRYPTO_BC_128_bit_key;
587 		else if (ctx->keylen == AES_KEYSIZE_192)
588 			ctx->mode |= CRYPTO_BC_192_bit_key;
589 		else if (ctx->keylen == AES_KEYSIZE_256)
590 			ctx->mode |= CRYPTO_BC_256_bit_key;
591 	}
592 
593 	ctx->iv_len = crypto_aead_ivsize(tfm);
594 
595 	memset(ctx->iv, 0x00, sizeof(ctx->iv));
596 	memcpy(ctx->iv, req->iv, ctx->iv_len);
597 
598 	ctx->is_enc = encrypt;
599 
600 	CRYPTO_MSG("ctx->mode = %x\n", ctx->mode);
601 	ret = rk_aead_handle_req(ctx->rk_dev, req);
602 
603 	return ret;
604 }
605 
rk_aead_encrypt(struct aead_request * req)606 static int rk_aead_encrypt(struct aead_request *req)
607 {
608 	return rk_aead_crypt(req, true);
609 }
610 
rk_aead_decrypt(struct aead_request * req)611 static int rk_aead_decrypt(struct aead_request *req)
612 {
613 	return rk_aead_crypt(req, false);
614 }
615 
616 struct rk_crypto_algt rk_v2_ecb_sm4_alg =
617 	RK_CIPHER_ALGO_INIT(SM4, ECB, ecb(sm4), ecb-sm4-rk);
618 
619 struct rk_crypto_algt rk_v2_cbc_sm4_alg =
620 	RK_CIPHER_ALGO_INIT(SM4, CBC, cbc(sm4), cbc-sm4-rk);
621 
622 struct rk_crypto_algt rk_v2_xts_sm4_alg =
623 	RK_CIPHER_ALGO_XTS_INIT(SM4, xts(sm4), xts-sm4-rk);
624 
625 struct rk_crypto_algt rk_v2_cfb_sm4_alg =
626 	RK_CIPHER_ALGO_INIT(SM4, CFB, cfb(sm4), cfb-sm4-rk);
627 
628 struct rk_crypto_algt rk_v2_ofb_sm4_alg =
629 	RK_CIPHER_ALGO_INIT(SM4, OFB, ofb(sm4), ofb-sm4-rk);
630 
631 struct rk_crypto_algt rk_v2_ctr_sm4_alg =
632 	RK_CIPHER_ALGO_INIT(SM4, CTR, ctr(sm4), ctr-sm4-rk);
633 
634 struct rk_crypto_algt rk_v2_gcm_sm4_alg =
635 	RK_AEAD_ALGO_INIT(SM4, GCM, gcm(sm4), gcm-sm4-rk);
636 
637 struct rk_crypto_algt rk_v2_ecb_aes_alg =
638 	RK_CIPHER_ALGO_INIT(AES, ECB, ecb(aes), ecb-aes-rk);
639 
640 struct rk_crypto_algt rk_v2_cbc_aes_alg =
641 	RK_CIPHER_ALGO_INIT(AES, CBC, cbc(aes), cbc-aes-rk);
642 
643 struct rk_crypto_algt rk_v2_xts_aes_alg =
644 	RK_CIPHER_ALGO_XTS_INIT(AES, xts(aes), xts-aes-rk);
645 
646 struct rk_crypto_algt rk_v2_cfb_aes_alg =
647 	RK_CIPHER_ALGO_INIT(AES, CFB, cfb(aes), cfb-aes-rk);
648 
649 struct rk_crypto_algt rk_v2_ofb_aes_alg =
650 	RK_CIPHER_ALGO_INIT(AES, OFB, ofb(aes), ofb-aes-rk);
651 
652 struct rk_crypto_algt rk_v2_ctr_aes_alg =
653 	RK_CIPHER_ALGO_INIT(AES, CTR, ctr(aes), ctr-aes-rk);
654 
655 struct rk_crypto_algt rk_v2_gcm_aes_alg =
656 	RK_AEAD_ALGO_INIT(AES, GCM, gcm(aes), gcm-aes-rk);
657 
658 struct rk_crypto_algt rk_v2_ecb_des_alg =
659 	RK_CIPHER_ALGO_INIT(DES, ECB, ecb(des), ecb-des-rk);
660 
661 struct rk_crypto_algt rk_v2_cbc_des_alg =
662 	RK_CIPHER_ALGO_INIT(DES, CBC, cbc(des), cbc-des-rk);
663 
664 struct rk_crypto_algt rk_v2_cfb_des_alg =
665 	RK_CIPHER_ALGO_INIT(DES, CFB, cfb(des), cfb-des-rk);
666 
667 struct rk_crypto_algt rk_v2_ofb_des_alg =
668 	RK_CIPHER_ALGO_INIT(DES, OFB, ofb(des), ofb-des-rk);
669 
670 struct rk_crypto_algt rk_v2_ecb_des3_ede_alg =
671 	RK_CIPHER_ALGO_INIT(DES3_EDE, ECB, ecb(des3_ede), ecb-des3_ede-rk);
672 
673 struct rk_crypto_algt rk_v2_cbc_des3_ede_alg =
674 	RK_CIPHER_ALGO_INIT(DES3_EDE, CBC, cbc(des3_ede), cbc-des3_ede-rk);
675 
676 struct rk_crypto_algt rk_v2_cfb_des3_ede_alg =
677 	RK_CIPHER_ALGO_INIT(DES3_EDE, CFB, cfb(des3_ede), cfb-des3_ede-rk);
678 
679 struct rk_crypto_algt rk_v2_ofb_des3_ede_alg =
680 	RK_CIPHER_ALGO_INIT(DES3_EDE, OFB, ofb(des3_ede), ofb-des3_ede-rk);
681 
682