1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Crypto acceleration support for Rockchip Crypto V2
4 *
5 * Copyright (c) 2022, Fuzhou Rockchip Electronics Co., Ltd
6 *
7 * Author: Lin Jinhan <troy.lin@rock-chips.com>
8 *
9 */
10
11 #include <crypto/scatterwalk.h>
12 #include <linux/iopoll.h>
13 #include <linux/module.h>
14 #include <linux/platform_device.h>
15
16 #include "rk_crypto_core.h"
17 #include "rk_crypto_utils.h"
18 #include "rk_crypto_skcipher_utils.h"
19 #include "rk_crypto_v3.h"
20 #include "rk_crypto_v3_reg.h"
21
22 #define RK_POLL_PERIOD_US 100
23 #define RK_POLL_TIMEOUT_US 50000
24
25 static const u32 cipher_algo2bc[] = {
26 [CIPHER_ALGO_DES] = CRYPTO_BC_DES,
27 [CIPHER_ALGO_DES3_EDE] = CRYPTO_BC_TDES,
28 [CIPHER_ALGO_AES] = CRYPTO_BC_AES,
29 [CIPHER_ALGO_SM4] = CRYPTO_BC_SM4,
30 };
31
32 static const u32 cipher_mode2bc[] = {
33 [CIPHER_MODE_ECB] = CRYPTO_BC_ECB,
34 [CIPHER_MODE_CBC] = CRYPTO_BC_CBC,
35 [CIPHER_MODE_CFB] = CRYPTO_BC_CFB,
36 [CIPHER_MODE_OFB] = CRYPTO_BC_OFB,
37 [CIPHER_MODE_CTR] = CRYPTO_BC_CTR,
38 [CIPHER_MODE_XTS] = CRYPTO_BC_XTS,
39 [CIPHER_MODE_GCM] = CRYPTO_BC_GCM,
40 };
41
rk_crypto_irq_handle(int irq,void * dev_id)42 static int rk_crypto_irq_handle(int irq, void *dev_id)
43 {
44 struct rk_crypto_dev *rk_dev = platform_get_drvdata(dev_id);
45 u32 interrupt_status;
46 struct rk_hw_crypto_v3_info *hw_info =
47 (struct rk_hw_crypto_v3_info *)rk_dev->hw_info;
48 struct rk_alg_ctx *alg_ctx = rk_cipher_alg_ctx(rk_dev);
49
50 interrupt_status = CRYPTO_READ(rk_dev, CRYPTO_DMA_INT_ST);
51 CRYPTO_WRITE(rk_dev, CRYPTO_DMA_INT_ST, interrupt_status);
52
53 interrupt_status &= CRYPTO_LOCKSTEP_MASK;
54
55 if (interrupt_status != CRYPTO_DST_ITEM_DONE_INT_ST) {
56 dev_err(rk_dev->dev, "DMA desc = %p\n", hw_info->hw_desc.lli_head);
57 dev_err(rk_dev->dev, "DMA addr_in = %08x\n",
58 (u32)alg_ctx->addr_in);
59 dev_err(rk_dev->dev, "DMA addr_out = %08x\n",
60 (u32)alg_ctx->addr_out);
61 dev_err(rk_dev->dev, "DMA count = %08x\n", alg_ctx->count);
62 dev_err(rk_dev->dev, "DMA desc_dma = %08x\n",
63 (u32)hw_info->hw_desc.lli_head_dma);
64 dev_err(rk_dev->dev, "DMA Error status = %08x\n",
65 interrupt_status);
66 dev_err(rk_dev->dev, "DMA CRYPTO_DMA_LLI_ADDR status = %08x\n",
67 CRYPTO_READ(rk_dev, CRYPTO_DMA_LLI_ADDR));
68 dev_err(rk_dev->dev, "DMA CRYPTO_DMA_ST status = %08x\n",
69 CRYPTO_READ(rk_dev, CRYPTO_DMA_ST));
70 dev_err(rk_dev->dev, "DMA CRYPTO_DMA_STATE status = %08x\n",
71 CRYPTO_READ(rk_dev, CRYPTO_DMA_STATE));
72 dev_err(rk_dev->dev, "DMA CRYPTO_DMA_LLI_RADDR status = %08x\n",
73 CRYPTO_READ(rk_dev, CRYPTO_DMA_LLI_RADDR));
74 dev_err(rk_dev->dev, "DMA CRYPTO_DMA_SRC_RADDR status = %08x\n",
75 CRYPTO_READ(rk_dev, CRYPTO_DMA_SRC_RADDR));
76 dev_err(rk_dev->dev, "DMA CRYPTO_DMA_DST_RADDR status = %08x\n",
77 CRYPTO_READ(rk_dev, CRYPTO_DMA_DST_RADDR));
78 rk_dev->err = -EFAULT;
79 }
80
81 return 0;
82 }
83
set_pc_len_reg(struct rk_crypto_dev * rk_dev,u64 pc_len)84 static inline void set_pc_len_reg(struct rk_crypto_dev *rk_dev, u64 pc_len)
85 {
86 u32 chn_base = CRYPTO_CH0_PC_LEN_0;
87
88 CRYPTO_TRACE("PC length = %lu\n", (unsigned long)pc_len);
89
90 CRYPTO_WRITE(rk_dev, chn_base, pc_len & 0xffffffff);
91 CRYPTO_WRITE(rk_dev, chn_base + 4, pc_len >> 32);
92 }
93
set_aad_len_reg(struct rk_crypto_dev * rk_dev,u64 aad_len)94 static inline void set_aad_len_reg(struct rk_crypto_dev *rk_dev, u64 aad_len)
95 {
96 u32 chn_base = CRYPTO_CH0_AAD_LEN_0;
97
98 CRYPTO_TRACE("AAD length = %lu\n", (unsigned long)aad_len);
99
100 CRYPTO_WRITE(rk_dev, chn_base, aad_len & 0xffffffff);
101 CRYPTO_WRITE(rk_dev, chn_base + 4, aad_len >> 32);
102 }
103
set_iv_reg(struct rk_crypto_dev * rk_dev,const u8 * iv,u32 iv_len)104 static void set_iv_reg(struct rk_crypto_dev *rk_dev, const u8 *iv, u32 iv_len)
105 {
106 if (!iv || iv_len == 0)
107 return;
108
109 CRYPTO_DUMPHEX("set iv", iv, iv_len);
110
111 rk_crypto_write_regs(rk_dev, CRYPTO_CH0_IV_0, iv, iv_len);
112
113 CRYPTO_WRITE(rk_dev, CRYPTO_CH0_IV_LEN_0, iv_len);
114 }
115
write_key_reg(struct rk_crypto_dev * rk_dev,const u8 * key,u32 key_len)116 static void write_key_reg(struct rk_crypto_dev *rk_dev, const u8 *key,
117 u32 key_len)
118 {
119 rk_crypto_write_regs(rk_dev, CRYPTO_CH0_KEY_0, key, key_len);
120 }
121
write_tkey_reg(struct rk_crypto_dev * rk_dev,const u8 * key,u32 key_len)122 static void write_tkey_reg(struct rk_crypto_dev *rk_dev, const u8 *key,
123 u32 key_len)
124 {
125 rk_crypto_write_regs(rk_dev, CRYPTO_CH4_KEY_0, key, key_len);
126 }
127
get_tag_reg(struct rk_crypto_dev * rk_dev,u8 * tag,u32 tag_len)128 static int get_tag_reg(struct rk_crypto_dev *rk_dev, u8 *tag, u32 tag_len)
129 {
130 int ret;
131 u32 reg_ctrl = 0;
132
133 CRYPTO_TRACE("tag_len = %u", tag_len);
134
135 if (tag_len > RK_MAX_TAG_SIZE)
136 return -EINVAL;
137
138 ret = read_poll_timeout_atomic(CRYPTO_READ,
139 reg_ctrl,
140 reg_ctrl & CRYPTO_CH0_TAG_VALID,
141 0,
142 RK_POLL_TIMEOUT_US,
143 false,
144 rk_dev, CRYPTO_TAG_VALID);
145 if (ret)
146 goto exit;
147
148 rk_crypto_read_regs(rk_dev, CRYPTO_CH0_TAG_0, tag, tag_len);
149 exit:
150 return ret;
151 }
152
is_force_fallback(struct rk_crypto_algt * algt,uint32_t key_len)153 static bool is_force_fallback(struct rk_crypto_algt *algt, uint32_t key_len)
154 {
155 if (algt->algo != CIPHER_ALGO_AES)
156 return false;
157
158 /* crypto v2 not support xts with AES-192 */
159 if (algt->mode == CIPHER_MODE_XTS && key_len == AES_KEYSIZE_192 * 2)
160 return true;
161
162 if (algt->use_soft_aes192 && key_len == AES_KEYSIZE_192)
163 return true;
164
165 return false;
166 }
167
is_calc_need_round_up(struct skcipher_request * req)168 static bool is_calc_need_round_up(struct skcipher_request *req)
169 {
170 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
171 struct rk_crypto_algt *algt = rk_cipher_get_algt(cipher);
172
173 return (algt->mode == CIPHER_MODE_CFB ||
174 algt->mode == CIPHER_MODE_OFB ||
175 algt->mode == CIPHER_MODE_CTR) ? true : false;
176 }
177
rk_cipher_reset(struct rk_crypto_dev * rk_dev)178 static void rk_cipher_reset(struct rk_crypto_dev *rk_dev)
179 {
180 int ret;
181 u32 tmp = 0, tmp_mask = 0;
182 unsigned int pool_timeout_us = 1000;
183
184 CRYPTO_WRITE(rk_dev, CRYPTO_DMA_INT_EN, 0x00);
185
186 tmp = CRYPTO_SW_CC_RESET;
187 tmp_mask = tmp << CRYPTO_WRITE_MASK_SHIFT;
188
189 CRYPTO_WRITE(rk_dev, CRYPTO_RST_CTL, tmp | tmp_mask);
190
191 /* This is usually done in 20 clock cycles */
192 ret = read_poll_timeout_atomic(CRYPTO_READ, tmp, !tmp, 0,
193 pool_timeout_us, false, rk_dev, CRYPTO_RST_CTL);
194 if (ret)
195 dev_err(rk_dev->dev, "cipher reset pool timeout %ums.",
196 pool_timeout_us);
197
198 CRYPTO_WRITE(rk_dev, CRYPTO_BC_CTL, 0xffff0000);
199 }
200
rk_crypto_complete(struct crypto_async_request * base,int err)201 static void rk_crypto_complete(struct crypto_async_request *base, int err)
202 {
203 struct rk_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
204 struct rk_alg_ctx *alg_ctx = &ctx->algs_ctx;
205 struct rk_hw_crypto_v3_info *hw_info = ctx->rk_dev->hw_info;
206 struct crypto_lli_desc *lli_desc = hw_info->hw_desc.lli_head;
207
208 CRYPTO_WRITE(ctx->rk_dev, CRYPTO_BC_CTL, 0xffff0000);
209 if (err) {
210 rk_cipher_reset(ctx->rk_dev);
211 pr_err("aligned = %u, align_size = %u\n",
212 alg_ctx->aligned, alg_ctx->align_size);
213 pr_err("total = %u, left = %u, count = %u\n",
214 alg_ctx->total, alg_ctx->left_bytes, alg_ctx->count);
215 pr_err("lli->src = %08x\n", lli_desc->src_addr);
216 pr_err("lli->src_len = %08x\n", lli_desc->src_len);
217 pr_err("lli->dst = %08x\n", lli_desc->dst_addr);
218 pr_err("lli->dst_len = %08x\n", lli_desc->dst_len);
219 pr_err("lli->dma_ctl = %08x\n", lli_desc->dma_ctrl);
220 pr_err("lli->usr_def = %08x\n", lli_desc->user_define);
221 pr_err("lli->next = %08x\n\n\n", lli_desc->next_addr);
222 }
223
224 if (base->complete)
225 base->complete(base, err);
226 }
227
rk_cipher_crypt(struct skcipher_request * req,bool encrypt)228 static int rk_cipher_crypt(struct skcipher_request *req, bool encrypt)
229 {
230 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
231 struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
232 struct rk_crypto_algt *algt = rk_cipher_get_algt(tfm);
233
234 CRYPTO_TRACE("%s total = %u",
235 encrypt ? "encrypt" : "decrypt", req->cryptlen);
236
237 if (!req->cryptlen) {
238 if (algt->mode == CIPHER_MODE_ECB ||
239 algt->mode == CIPHER_MODE_CBC ||
240 algt->mode == CIPHER_MODE_CTR ||
241 algt->mode == CIPHER_MODE_CFB ||
242 algt->mode == CIPHER_MODE_OFB)
243 return 0;
244 else
245 return -EINVAL;
246 }
247
248 /* XTS data should >= chunksize */
249 if (algt->mode == CIPHER_MODE_XTS) {
250 if (req->cryptlen < crypto_skcipher_chunksize(tfm))
251 return -EINVAL;
252
253 /* force use unalign branch */
254 ctx->algs_ctx.align_size = ctx->rk_dev->vir_max;
255
256 /* XTS can't pause when use hardware crypto */
257 if (req->cryptlen > ctx->rk_dev->vir_max)
258 return rk_cipher_fallback(req, ctx, encrypt);
259 }
260
261 if (is_force_fallback(algt, ctx->keylen))
262 return rk_cipher_fallback(req, ctx, encrypt);
263
264 ctx->mode = cipher_algo2bc[algt->algo] |
265 cipher_mode2bc[algt->mode];
266 if (!encrypt)
267 ctx->mode |= CRYPTO_BC_DECRYPT;
268
269 if (algt->algo == CIPHER_ALGO_AES) {
270 uint32_t key_factor;
271
272 /* The key length of XTS is twice the normal length */
273 key_factor = algt->mode == CIPHER_MODE_XTS ? 2 : 1;
274
275 if (ctx->keylen == AES_KEYSIZE_128 * key_factor)
276 ctx->mode |= CRYPTO_BC_128_bit_key;
277 else if (ctx->keylen == AES_KEYSIZE_192 * key_factor)
278 ctx->mode |= CRYPTO_BC_192_bit_key;
279 else if (ctx->keylen == AES_KEYSIZE_256 * key_factor)
280 ctx->mode |= CRYPTO_BC_256_bit_key;
281 }
282
283 ctx->iv_len = crypto_skcipher_ivsize(tfm);
284
285 memset(ctx->iv, 0x00, sizeof(ctx->iv));
286 memcpy(ctx->iv, req->iv, ctx->iv_len);
287
288 ctx->is_enc = encrypt;
289
290 CRYPTO_MSG("ctx->mode = %x\n", ctx->mode);
291 return rk_skcipher_handle_req(ctx->rk_dev, req);
292 }
293
rk_cipher_encrypt(struct skcipher_request * req)294 static int rk_cipher_encrypt(struct skcipher_request *req)
295 {
296 return rk_cipher_crypt(req, true);
297 }
298
rk_cipher_decrypt(struct skcipher_request * req)299 static int rk_cipher_decrypt(struct skcipher_request *req)
300 {
301 return rk_cipher_crypt(req, false);
302 }
303
rk_ablk_hw_init(struct rk_crypto_dev * rk_dev,u32 algo,u32 mode)304 static int rk_ablk_hw_init(struct rk_crypto_dev *rk_dev, u32 algo, u32 mode)
305 {
306 struct rk_cipher_ctx *ctx = rk_cipher_ctx_cast(rk_dev);
307
308 rk_cipher_reset(rk_dev);
309
310 CRYPTO_WRITE(rk_dev, CRYPTO_BC_CTL, 0x00010000);
311
312 if (mode == CIPHER_MODE_XTS) {
313 uint32_t tmp_len = ctx->keylen / 2;
314
315 write_key_reg(ctx->rk_dev, ctx->key, tmp_len);
316 write_tkey_reg(ctx->rk_dev, ctx->key + tmp_len, tmp_len);
317 } else {
318 write_key_reg(ctx->rk_dev, ctx->key, ctx->keylen);
319 }
320
321 if (mode != CIPHER_MODE_ECB)
322 set_iv_reg(rk_dev, ctx->iv, ctx->iv_len);
323
324 ctx->mode |= CRYPTO_BC_ENABLE;
325
326 CRYPTO_WRITE(rk_dev, CRYPTO_FIFO_CTL, 0x00030003);
327
328 CRYPTO_WRITE(rk_dev, CRYPTO_DMA_INT_EN, 0x7f);
329
330 CRYPTO_WRITE(rk_dev, CRYPTO_BC_CTL, ctx->mode | CRYPTO_WRITE_MASK_ALL);
331
332 return 0;
333 }
334
crypto_dma_start(struct rk_crypto_dev * rk_dev,uint32_t flag)335 static int crypto_dma_start(struct rk_crypto_dev *rk_dev, uint32_t flag)
336 {
337 struct rk_hw_crypto_v3_info *hw_info =
338 (struct rk_hw_crypto_v3_info *)rk_dev->hw_info;
339 struct skcipher_request *req =
340 skcipher_request_cast(rk_dev->async_req);
341 struct rk_alg_ctx *alg_ctx = rk_cipher_alg_ctx(rk_dev);
342 struct crypto_lli_desc *lli_head, *lli_tail, *lli_aad;
343 u32 calc_len = alg_ctx->count;
344 u32 start_flag = CRYPTO_DMA_START;
345 int ret;
346
347 if (alg_ctx->aligned)
348 ret = rk_crypto_hw_desc_init(&hw_info->hw_desc,
349 alg_ctx->sg_src, alg_ctx->sg_dst, alg_ctx->count);
350 else
351 ret = rk_crypto_hw_desc_init(&hw_info->hw_desc,
352 &alg_ctx->sg_tmp, &alg_ctx->sg_tmp, alg_ctx->count);
353 if (ret)
354 return ret;
355
356 lli_head = hw_info->hw_desc.lli_head;
357 lli_tail = hw_info->hw_desc.lli_tail;
358 lli_aad = hw_info->hw_desc.lli_aad;
359
360 /*
361 * the data length is not aligned will use addr_vir to calculate,
362 * so crypto v2 could round up data length to chunk_size
363 */
364 if (!alg_ctx->is_aead && is_calc_need_round_up(req))
365 calc_len = round_up(calc_len, alg_ctx->chunk_size);
366
367 CRYPTO_TRACE("calc_len = %u, cryptlen = %u, assoclen= %u, is_aead = %d",
368 calc_len, alg_ctx->total, alg_ctx->assoclen, alg_ctx->is_aead);
369
370 lli_head->user_define = LLI_USER_STRING_START | LLI_USER_CIPHER_START;
371
372 lli_tail->dma_ctrl = LLI_DMA_CTRL_DST_DONE | LLI_DMA_CTRL_LAST;
373 lli_tail->user_define |= LLI_USER_STRING_LAST;
374 lli_tail->src_len += (calc_len - alg_ctx->count);
375 lli_tail->dst_len += (calc_len - alg_ctx->count);
376
377 if (alg_ctx->is_aead) {
378 lli_aad->src_addr = alg_ctx->addr_aad_in;
379 lli_aad->src_len = alg_ctx->assoclen;
380 lli_aad->user_define = LLI_USER_CIPHER_START |
381 LLI_USER_STRING_START |
382 LLI_USER_STRING_LAST |
383 LLI_USER_STRING_AAD;
384 lli_aad->next_addr = hw_info->hw_desc.lli_head_dma;
385
386 /* clear cipher start */
387 lli_head->user_define &= (~((u32)LLI_USER_CIPHER_START));
388
389 set_pc_len_reg(rk_dev, alg_ctx->total);
390 set_aad_len_reg(rk_dev, alg_ctx->assoclen);
391 }
392
393 rk_crypto_dump_hw_desc(&hw_info->hw_desc);
394
395 dma_wmb();
396
397 if (alg_ctx->is_aead)
398 CRYPTO_WRITE(rk_dev, CRYPTO_DMA_LLI_ADDR, hw_info->hw_desc.lli_aad_dma);
399 else
400 CRYPTO_WRITE(rk_dev, CRYPTO_DMA_LLI_ADDR, hw_info->hw_desc.lli_head_dma);
401
402 CRYPTO_WRITE(rk_dev, CRYPTO_DMA_CTL, start_flag | (start_flag << WRITE_MASK));
403
404 return 0;
405 }
406
rk_ablk_init_tfm(struct crypto_skcipher * tfm)407 static int rk_ablk_init_tfm(struct crypto_skcipher *tfm)
408 {
409 struct rk_crypto_algt *algt = rk_cipher_get_algt(tfm);
410 struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
411 const char *alg_name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm));
412 struct rk_crypto_dev *rk_dev = algt->rk_dev;
413 struct rk_alg_ctx *alg_ctx = &ctx->algs_ctx;
414
415 CRYPTO_TRACE();
416
417 memset(ctx, 0x00, sizeof(*ctx));
418
419 if (!rk_dev->request_crypto)
420 return -EFAULT;
421
422 rk_dev->request_crypto(rk_dev, alg_name);
423
424 /* always not aligned for crypto v2 cipher */
425 alg_ctx->align_size = 64;
426 alg_ctx->chunk_size = crypto_skcipher_chunksize(tfm);
427
428 alg_ctx->ops.start = rk_ablk_start;
429 alg_ctx->ops.update = rk_ablk_rx;
430 alg_ctx->ops.complete = rk_crypto_complete;
431 alg_ctx->ops.irq_handle = rk_crypto_irq_handle;
432
433 alg_ctx->ops.hw_init = rk_ablk_hw_init;
434 alg_ctx->ops.hw_dma_start = crypto_dma_start;
435 alg_ctx->ops.hw_write_iv = set_iv_reg;
436
437 ctx->rk_dev = rk_dev;
438
439 if (algt->alg.crypto.base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
440 CRYPTO_MSG("alloc fallback tfm, name = %s", alg_name);
441 ctx->fallback_tfm = crypto_alloc_skcipher(alg_name, 0,
442 CRYPTO_ALG_ASYNC |
443 CRYPTO_ALG_NEED_FALLBACK);
444 if (IS_ERR(ctx->fallback_tfm)) {
445 CRYPTO_MSG("Could not load fallback driver %s : %ld.\n",
446 alg_name, PTR_ERR(ctx->fallback_tfm));
447 ctx->fallback_tfm = NULL;
448 }
449 }
450
451 return 0;
452 }
453
rk_ablk_exit_tfm(struct crypto_skcipher * tfm)454 static void rk_ablk_exit_tfm(struct crypto_skcipher *tfm)
455 {
456 struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
457 const char *alg_name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm));
458
459 CRYPTO_TRACE();
460
461 if (ctx->fallback_tfm) {
462 CRYPTO_MSG("free fallback tfm");
463 crypto_free_skcipher(ctx->fallback_tfm);
464 }
465
466 ctx->rk_dev->release_crypto(ctx->rk_dev, alg_name);
467 }
468
rk_aead_init_tfm(struct crypto_aead * tfm)469 static int rk_aead_init_tfm(struct crypto_aead *tfm)
470 {
471 struct aead_alg *alg = crypto_aead_alg(tfm);
472 struct rk_crypto_algt *algt =
473 container_of(alg, struct rk_crypto_algt, alg.aead);
474 struct rk_cipher_ctx *ctx = crypto_tfm_ctx(&tfm->base);
475 const char *alg_name = crypto_tfm_alg_name(&tfm->base);
476 struct rk_crypto_dev *rk_dev = algt->rk_dev;
477 struct rk_alg_ctx *alg_ctx = &ctx->algs_ctx;
478
479 CRYPTO_TRACE();
480
481 if (!rk_dev->request_crypto)
482 return -EFAULT;
483
484 rk_dev->request_crypto(rk_dev, alg_name);
485
486 alg_ctx->align_size = 64;
487 alg_ctx->chunk_size = crypto_aead_chunksize(tfm);
488
489 alg_ctx->ops.start = rk_aead_start;
490 alg_ctx->ops.update = rk_ablk_rx;
491 alg_ctx->ops.complete = rk_crypto_complete;
492 alg_ctx->ops.irq_handle = rk_crypto_irq_handle;
493
494 alg_ctx->ops.hw_init = rk_ablk_hw_init;
495 alg_ctx->ops.hw_dma_start = crypto_dma_start;
496 alg_ctx->ops.hw_write_iv = set_iv_reg;
497 alg_ctx->ops.hw_get_result = get_tag_reg;
498
499 ctx->rk_dev = rk_dev;
500 alg_ctx->is_aead = 1;
501
502 if (algt->alg.crypto.base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
503 CRYPTO_MSG("alloc fallback tfm, name = %s", alg_name);
504 ctx->fallback_aead =
505 crypto_alloc_aead(alg_name, 0,
506 CRYPTO_ALG_ASYNC |
507 CRYPTO_ALG_NEED_FALLBACK);
508 if (IS_ERR(ctx->fallback_aead)) {
509 dev_err(rk_dev->dev,
510 "Load fallback driver %s err: %ld.\n",
511 alg_name, PTR_ERR(ctx->fallback_aead));
512 ctx->fallback_aead = NULL;
513 crypto_aead_set_reqsize(tfm, sizeof(struct aead_request));
514 } else {
515 crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
516 crypto_aead_reqsize(ctx->fallback_aead));
517 }
518 }
519
520 return 0;
521 }
522
rk_aead_exit_tfm(struct crypto_aead * tfm)523 static void rk_aead_exit_tfm(struct crypto_aead *tfm)
524 {
525 struct rk_cipher_ctx *ctx = crypto_tfm_ctx(&tfm->base);
526
527 CRYPTO_TRACE();
528
529 if (ctx->fallback_aead) {
530 CRYPTO_MSG("free fallback tfm");
531 crypto_free_aead(ctx->fallback_aead);
532 }
533
534 ctx->rk_dev->release_crypto(ctx->rk_dev, crypto_tfm_alg_name(&tfm->base));
535 }
536
rk_aead_crypt(struct aead_request * req,bool encrypt)537 static int rk_aead_crypt(struct aead_request *req, bool encrypt)
538 {
539 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
540 struct rk_cipher_ctx *ctx = crypto_aead_ctx(tfm);
541 struct rk_crypto_algt *algt = rk_aead_get_algt(tfm);
542 struct scatterlist *sg_src, *sg_dst;
543 struct scatterlist src[2], dst[2];
544 u64 data_len;
545 bool aligned;
546 int ret = -EINVAL;
547
548 CRYPTO_TRACE("%s cryptlen = %u, assoclen = %u",
549 encrypt ? "encrypt" : "decrypt",
550 req->cryptlen, req->assoclen);
551
552 data_len = encrypt ? req->cryptlen : (req->cryptlen - crypto_aead_authsize(tfm));
553
554 if (req->assoclen == 0 ||
555 req->cryptlen == 0 ||
556 data_len == 0 ||
557 is_force_fallback(algt, ctx->keylen))
558 return rk_aead_fallback(req, ctx, encrypt);
559
560 /* point sg_src and sg_dst skip assoc data */
561 sg_src = scatterwalk_ffwd(src, req->src, req->assoclen);
562 sg_dst = (req->src == req->dst) ? sg_src : scatterwalk_ffwd(dst, req->dst, req->assoclen);
563
564 aligned = rk_crypto_check_align(sg_src, sg_nents_for_len(sg_src, data_len),
565 sg_dst, sg_nents_for_len(sg_dst, data_len),
566 64);
567
568 if (sg_nents_for_len(sg_src, data_len) > RK_DEFAULT_LLI_CNT ||
569 sg_nents_for_len(sg_dst, data_len) > RK_DEFAULT_LLI_CNT)
570 return rk_aead_fallback(req, ctx, encrypt);
571
572 if (!aligned) {
573 if (req->assoclen > ctx->rk_dev->aad_max ||
574 data_len > ctx->rk_dev->vir_max)
575 return rk_aead_fallback(req, ctx, encrypt);
576 }
577
578 ctx->mode = cipher_algo2bc[algt->algo] |
579 cipher_mode2bc[algt->mode];
580 if (!encrypt)
581 ctx->mode |= CRYPTO_BC_DECRYPT;
582
583 if (algt->algo == CIPHER_ALGO_AES) {
584 if (ctx->keylen == AES_KEYSIZE_128)
585 ctx->mode |= CRYPTO_BC_128_bit_key;
586 else if (ctx->keylen == AES_KEYSIZE_192)
587 ctx->mode |= CRYPTO_BC_192_bit_key;
588 else if (ctx->keylen == AES_KEYSIZE_256)
589 ctx->mode |= CRYPTO_BC_256_bit_key;
590 }
591
592 ctx->iv_len = crypto_aead_ivsize(tfm);
593
594 memset(ctx->iv, 0x00, sizeof(ctx->iv));
595 memcpy(ctx->iv, req->iv, ctx->iv_len);
596
597 ctx->is_enc = encrypt;
598
599 CRYPTO_MSG("ctx->mode = %x\n", ctx->mode);
600 ret = rk_aead_handle_req(ctx->rk_dev, req);
601
602 return ret;
603 }
604
rk_aead_encrypt(struct aead_request * req)605 static int rk_aead_encrypt(struct aead_request *req)
606 {
607 return rk_aead_crypt(req, true);
608 }
609
rk_aead_decrypt(struct aead_request * req)610 static int rk_aead_decrypt(struct aead_request *req)
611 {
612 return rk_aead_crypt(req, false);
613 }
614
615 struct rk_crypto_algt rk_v3_ecb_sm4_alg =
616 RK_CIPHER_ALGO_INIT(SM4, ECB, ecb(sm4), ecb-sm4-rk);
617
618 struct rk_crypto_algt rk_v3_cbc_sm4_alg =
619 RK_CIPHER_ALGO_INIT(SM4, CBC, cbc(sm4), cbc-sm4-rk);
620
621 struct rk_crypto_algt rk_v3_xts_sm4_alg =
622 RK_CIPHER_ALGO_XTS_INIT(SM4, xts(sm4), xts-sm4-rk);
623
624 struct rk_crypto_algt rk_v3_cfb_sm4_alg =
625 RK_CIPHER_ALGO_INIT(SM4, CFB, cfb(sm4), cfb-sm4-rk);
626
627 struct rk_crypto_algt rk_v3_ofb_sm4_alg =
628 RK_CIPHER_ALGO_INIT(SM4, OFB, ofb(sm4), ofb-sm4-rk);
629
630 struct rk_crypto_algt rk_v3_ctr_sm4_alg =
631 RK_CIPHER_ALGO_INIT(SM4, CTR, ctr(sm4), ctr-sm4-rk);
632
633 struct rk_crypto_algt rk_v3_gcm_sm4_alg =
634 RK_AEAD_ALGO_INIT(SM4, GCM, gcm(sm4), gcm-sm4-rk);
635
636 struct rk_crypto_algt rk_v3_ecb_aes_alg =
637 RK_CIPHER_ALGO_INIT(AES, ECB, ecb(aes), ecb-aes-rk);
638
639 struct rk_crypto_algt rk_v3_cbc_aes_alg =
640 RK_CIPHER_ALGO_INIT(AES, CBC, cbc(aes), cbc-aes-rk);
641
642 struct rk_crypto_algt rk_v3_xts_aes_alg =
643 RK_CIPHER_ALGO_XTS_INIT(AES, xts(aes), xts-aes-rk);
644
645 struct rk_crypto_algt rk_v3_cfb_aes_alg =
646 RK_CIPHER_ALGO_INIT(AES, CFB, cfb(aes), cfb-aes-rk);
647
648 struct rk_crypto_algt rk_v3_ofb_aes_alg =
649 RK_CIPHER_ALGO_INIT(AES, OFB, ofb(aes), ofb-aes-rk);
650
651 struct rk_crypto_algt rk_v3_ctr_aes_alg =
652 RK_CIPHER_ALGO_INIT(AES, CTR, ctr(aes), ctr-aes-rk);
653
654 struct rk_crypto_algt rk_v3_gcm_aes_alg =
655 RK_AEAD_ALGO_INIT(AES, GCM, gcm(aes), gcm-aes-rk);
656
657 struct rk_crypto_algt rk_v3_ecb_des_alg =
658 RK_CIPHER_ALGO_INIT(DES, ECB, ecb(des), ecb-des-rk);
659
660 struct rk_crypto_algt rk_v3_cbc_des_alg =
661 RK_CIPHER_ALGO_INIT(DES, CBC, cbc(des), cbc-des-rk);
662
663 struct rk_crypto_algt rk_v3_cfb_des_alg =
664 RK_CIPHER_ALGO_INIT(DES, CFB, cfb(des), cfb-des-rk);
665
666 struct rk_crypto_algt rk_v3_ofb_des_alg =
667 RK_CIPHER_ALGO_INIT(DES, OFB, ofb(des), ofb-des-rk);
668
669 struct rk_crypto_algt rk_v3_ecb_des3_ede_alg =
670 RK_CIPHER_ALGO_INIT(DES3_EDE, ECB, ecb(des3_ede), ecb-des3_ede-rk);
671
672 struct rk_crypto_algt rk_v3_cbc_des3_ede_alg =
673 RK_CIPHER_ALGO_INIT(DES3_EDE, CBC, cbc(des3_ede), cbc-des3_ede-rk);
674
675 struct rk_crypto_algt rk_v3_cfb_des3_ede_alg =
676 RK_CIPHER_ALGO_INIT(DES3_EDE, CFB, cfb(des3_ede), cfb-des3_ede-rk);
677
678 struct rk_crypto_algt rk_v3_ofb_des3_ede_alg =
679 RK_CIPHER_ALGO_INIT(DES3_EDE, OFB, ofb(des3_ede), ofb-des3_ede-rk);
680
681