1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Crypto acceleration support for Rockchip RK3288
4 *
5 * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
6 *
7 * Author: Zain Wang <zain.wang@rock-chips.com>
8 *
9 * Some ideas are from marvell-cesa.c and s5p-sss.c driver.
10 */
11 #include "rk_crypto_core.h"
12 #include "rk_crypto_v1.h"
13 #include "rk_crypto_v1_reg.h"
14
15 #define RK_CRYPTO_DEC BIT(0)
16
rk_alg_ctx_cast(struct rk_crypto_dev * rk_dev)17 static struct rk_alg_ctx *rk_alg_ctx_cast(
18 struct rk_crypto_dev *rk_dev)
19 {
20 struct skcipher_request *req =
21 skcipher_request_cast(rk_dev->async_req);
22 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
23 struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
24
25 return &ctx->algs_ctx;
26 }
27
rk_crypto_irq_handle(int irq,void * dev_id)28 static int rk_crypto_irq_handle(int irq, void *dev_id)
29 {
30 struct rk_crypto_dev *rk_dev = platform_get_drvdata(dev_id);
31 u32 interrupt_status;
32
33 interrupt_status = CRYPTO_READ(rk_dev, RK_CRYPTO_INTSTS);
34 CRYPTO_WRITE(rk_dev, RK_CRYPTO_INTSTS, interrupt_status);
35
36 if (interrupt_status & 0x0a) {
37 dev_warn(rk_dev->dev, "DMA Error\n");
38 rk_dev->err = -EFAULT;
39 }
40
41 return 0;
42 }
43
rk_crypto_complete(struct crypto_async_request * base,int err)44 static void rk_crypto_complete(struct crypto_async_request *base, int err)
45 {
46 if (base->complete)
47 base->complete(base, err);
48 }
49
rk_handle_req(struct rk_crypto_dev * rk_dev,struct skcipher_request * req)50 static int rk_handle_req(struct rk_crypto_dev *rk_dev,
51 struct skcipher_request *req)
52 {
53 struct rk_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
54
55 if (!IS_ALIGNED(req->cryptlen, ctx->algs_ctx.align_size))
56 return -EINVAL;
57 else
58 return rk_dev->enqueue(rk_dev, &req->base);
59 }
60
rk_get_bc(u32 algo,u32 mode,u32 * bc_val)61 static int rk_get_bc(u32 algo, u32 mode, u32 *bc_val)
62 {
63 /* default DES ECB mode */
64 *bc_val = 0;
65
66 switch (algo) {
67 case CIPHER_ALGO_DES3_EDE:
68 *bc_val |= RK_CRYPTO_TDES_SELECT;
69 fallthrough;
70 case CIPHER_ALGO_DES:
71 if (mode == CIPHER_MODE_ECB)
72 *bc_val = 0;
73 else if (mode == CIPHER_MODE_CBC)
74 *bc_val = RK_CRYPTO_TDES_CHAINMODE_CBC;
75 else
76 goto error;
77 break;
78 case CIPHER_ALGO_AES:
79 if (mode == CIPHER_MODE_ECB)
80 *bc_val = RK_CRYPTO_AES_ECB_MODE;
81 else if (mode == CIPHER_MODE_CBC)
82 *bc_val = RK_CRYPTO_AES_CBC_MODE;
83 else
84 goto error;
85 break;
86 default:
87 goto error;
88 }
89
90 return 0;
91 error:
92 return -EINVAL;
93 }
94
rk_cipher_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int keylen)95 static int rk_cipher_setkey(struct crypto_skcipher *cipher,
96 const u8 *key, unsigned int keylen)
97 {
98 struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
99 struct skcipher_alg *alg = crypto_skcipher_alg(cipher);
100 struct rk_crypto_algt *algt;
101 int err;
102
103 algt = container_of(alg, struct rk_crypto_algt, alg.crypto);
104
105 CRYPTO_MSG("algo = %x, mode = %x, key_len = %d\n",
106 algt->algo, algt->mode, keylen);
107
108 switch (algt->algo) {
109 case CIPHER_ALGO_DES:
110 if (keylen != DES_KEY_SIZE)
111 goto error;
112
113 err = verify_skcipher_des_key(cipher, key);
114 if (err)
115 goto error;
116
117 break;
118 case CIPHER_ALGO_DES3_EDE:
119 err = verify_skcipher_des3_key(cipher, key);
120 if (err)
121 goto error;
122 break;
123 case CIPHER_ALGO_AES:
124 if (keylen != AES_KEYSIZE_128 &&
125 keylen != AES_KEYSIZE_192 &&
126 keylen != AES_KEYSIZE_256)
127 goto error;
128 break;
129 default:
130 goto error;
131 }
132
133 memcpy(ctx->key, key, keylen);
134 ctx->keylen = keylen;
135
136 return 0;
137
138 error:
139 return -EINVAL;
140 }
141
142
rk_cipher_encrypt(struct skcipher_request * req)143 static int rk_cipher_encrypt(struct skcipher_request *req)
144 {
145 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
146 struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
147 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
148 struct rk_crypto_dev *rk_dev = ctx->rk_dev;
149 struct rk_crypto_algt *algt;
150 int ret;
151
152 algt = container_of(alg, struct rk_crypto_algt, alg.crypto);
153
154 ret = rk_get_bc(algt->algo, algt->mode, &ctx->mode);
155 if (ret)
156 return ret;
157
158 CRYPTO_MSG("ctx->mode = %x\n", ctx->mode);
159
160 return rk_handle_req(rk_dev, req);
161 }
162
rk_cipher_decrypt(struct skcipher_request * req)163 static int rk_cipher_decrypt(struct skcipher_request *req)
164 {
165 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
166 struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
167 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
168 struct rk_crypto_dev *rk_dev = ctx->rk_dev;
169 struct rk_crypto_algt *algt;
170 int ret;
171
172 algt = container_of(alg, struct rk_crypto_algt, alg.crypto);
173
174 ret = rk_get_bc(algt->algo, algt->mode, &ctx->mode);
175 if (ret)
176 return ret;
177
178 ctx->mode |= RK_CRYPTO_DEC;
179
180 CRYPTO_MSG("ctx->mode = %x\n", ctx->mode);
181
182 return rk_handle_req(rk_dev, req);
183 }
184
rk_ablk_hw_init(struct rk_crypto_dev * rk_dev)185 static void rk_ablk_hw_init(struct rk_crypto_dev *rk_dev)
186 {
187 struct skcipher_request *req =
188 skcipher_request_cast(rk_dev->async_req);
189 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
190 struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
191 struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
192 u32 ivsize, block, conf_reg = 0;
193
194 block = crypto_tfm_alg_blocksize(tfm);
195 ivsize = crypto_skcipher_ivsize(cipher);
196
197 if (block == DES_BLOCK_SIZE) {
198 memcpy_toio(ctx->rk_dev->reg + RK_CRYPTO_TDES_KEY1_0,
199 ctx->key, ctx->keylen);
200 ctx->mode |= RK_CRYPTO_TDES_FIFO_MODE |
201 RK_CRYPTO_TDES_BYTESWAP_KEY |
202 RK_CRYPTO_TDES_BYTESWAP_IV;
203 CRYPTO_WRITE(rk_dev, RK_CRYPTO_TDES_CTRL, ctx->mode);
204 memcpy_toio(rk_dev->reg + RK_CRYPTO_TDES_IV_0,
205 req->iv, ivsize);
206 conf_reg = RK_CRYPTO_DESSEL;
207 } else {
208 memcpy_toio(ctx->rk_dev->reg + RK_CRYPTO_AES_KEY_0,
209 ctx->key, ctx->keylen);
210 ctx->mode |= RK_CRYPTO_AES_FIFO_MODE |
211 RK_CRYPTO_AES_KEY_CHANGE |
212 RK_CRYPTO_AES_BYTESWAP_KEY |
213 RK_CRYPTO_AES_BYTESWAP_IV;
214 if (ctx->keylen == AES_KEYSIZE_192)
215 ctx->mode |= RK_CRYPTO_AES_192BIT_key;
216 else if (ctx->keylen == AES_KEYSIZE_256)
217 ctx->mode |= RK_CRYPTO_AES_256BIT_key;
218 CRYPTO_WRITE(rk_dev, RK_CRYPTO_AES_CTRL, ctx->mode);
219 memcpy_toio(rk_dev->reg + RK_CRYPTO_AES_IV_0,
220 req->iv, ivsize);
221 }
222 conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO |
223 RK_CRYPTO_BYTESWAP_BRFIFO;
224 CRYPTO_WRITE(rk_dev, RK_CRYPTO_CONF, conf_reg);
225 CRYPTO_WRITE(rk_dev, RK_CRYPTO_INTENA,
226 RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA);
227 }
228
crypto_dma_start(struct rk_crypto_dev * rk_dev)229 static void crypto_dma_start(struct rk_crypto_dev *rk_dev)
230 {
231 struct rk_alg_ctx *alg_ctx = rk_alg_ctx_cast(rk_dev);
232
233 CRYPTO_WRITE(rk_dev, RK_CRYPTO_BRDMAS, alg_ctx->addr_in);
234 CRYPTO_WRITE(rk_dev, RK_CRYPTO_BRDMAL, alg_ctx->count / 4);
235 CRYPTO_WRITE(rk_dev, RK_CRYPTO_BTDMAS, alg_ctx->addr_out);
236 CRYPTO_WRITE(rk_dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START |
237 _SBF(RK_CRYPTO_BLOCK_START, 16));
238 }
239
rk_set_data_start(struct rk_crypto_dev * rk_dev)240 static int rk_set_data_start(struct rk_crypto_dev *rk_dev)
241 {
242 int err;
243 struct skcipher_request *req =
244 skcipher_request_cast(rk_dev->async_req);
245 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
246 struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
247 struct rk_alg_ctx *alg_ctx = rk_alg_ctx_cast(rk_dev);
248 u32 ivsize = crypto_skcipher_ivsize(tfm);
249 u8 *src_last_blk = page_address(sg_page(alg_ctx->sg_src)) +
250 alg_ctx->sg_src->offset + alg_ctx->sg_src->length - ivsize;
251
252 /* Store the iv that need to be updated in chain mode.
253 * And update the IV buffer to contain the next IV for decryption mode.
254 */
255 if (ctx->mode & RK_CRYPTO_DEC) {
256 memcpy(ctx->iv, src_last_blk, ivsize);
257 sg_pcopy_to_buffer(alg_ctx->req_src, alg_ctx->src_nents,
258 req->iv, ivsize, alg_ctx->total - ivsize);
259 }
260
261 err = rk_dev->load_data(rk_dev, alg_ctx->sg_src, alg_ctx->sg_dst);
262 if (!err)
263 crypto_dma_start(rk_dev);
264 return err;
265 }
266
rk_ablk_start(struct rk_crypto_dev * rk_dev)267 static int rk_ablk_start(struct rk_crypto_dev *rk_dev)
268 {
269 struct skcipher_request *req =
270 skcipher_request_cast(rk_dev->async_req);
271 struct rk_alg_ctx *alg_ctx = rk_alg_ctx_cast(rk_dev);
272 int err = 0;
273
274 alg_ctx->left_bytes = req->cryptlen;
275 alg_ctx->total = req->cryptlen;
276 alg_ctx->sg_src = req->src;
277 alg_ctx->req_src = req->src;
278 alg_ctx->src_nents = sg_nents_for_len(req->src, req->cryptlen);
279 alg_ctx->sg_dst = req->dst;
280 alg_ctx->req_dst = req->dst;
281 alg_ctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
282
283 rk_ablk_hw_init(rk_dev);
284 err = rk_set_data_start(rk_dev);
285
286 return err;
287 }
288
rk_iv_copyback(struct rk_crypto_dev * rk_dev)289 static void rk_iv_copyback(struct rk_crypto_dev *rk_dev)
290 {
291 struct skcipher_request *req =
292 skcipher_request_cast(rk_dev->async_req);
293 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
294 struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
295 struct rk_alg_ctx *alg_ctx = rk_alg_ctx_cast(rk_dev);
296 u32 ivsize = crypto_skcipher_ivsize(tfm);
297
298 /* Update the IV buffer to contain the next IV for encryption mode. */
299 if (!(ctx->mode & RK_CRYPTO_DEC) && req->iv) {
300 if (alg_ctx->aligned) {
301 memcpy(req->iv, sg_virt(alg_ctx->sg_dst) +
302 alg_ctx->sg_dst->length - ivsize, ivsize);
303 } else {
304 memcpy(req->iv, rk_dev->addr_vir +
305 alg_ctx->count - ivsize, ivsize);
306 }
307 }
308 }
309
rk_update_iv(struct rk_crypto_dev * rk_dev)310 static void rk_update_iv(struct rk_crypto_dev *rk_dev)
311 {
312 struct skcipher_request *req =
313 skcipher_request_cast(rk_dev->async_req);
314 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
315 struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
316 struct rk_alg_ctx *alg_ctx = rk_alg_ctx_cast(rk_dev);
317 u32 ivsize = crypto_skcipher_ivsize(tfm);
318 u8 *new_iv = NULL;
319
320 if (ctx->mode & RK_CRYPTO_DEC) {
321 new_iv = ctx->iv;
322 } else {
323 new_iv = page_address(sg_page(alg_ctx->sg_dst)) +
324 alg_ctx->sg_dst->offset +
325 alg_ctx->sg_dst->length - ivsize;
326 }
327
328 if (ivsize == DES_BLOCK_SIZE)
329 memcpy_toio(rk_dev->reg + RK_CRYPTO_TDES_IV_0, new_iv, ivsize);
330 else if (ivsize == AES_BLOCK_SIZE)
331 memcpy_toio(rk_dev->reg + RK_CRYPTO_AES_IV_0, new_iv, ivsize);
332 }
333
334 /* return:
335 * true some err was occurred
336 * fault no err, continue
337 */
rk_ablk_rx(struct rk_crypto_dev * rk_dev)338 static int rk_ablk_rx(struct rk_crypto_dev *rk_dev)
339 {
340 int err = 0;
341 struct rk_alg_ctx *alg_ctx = rk_alg_ctx_cast(rk_dev);
342
343 CRYPTO_TRACE("left_bytes = %u\n", alg_ctx->left_bytes);
344
345 err = rk_dev->unload_data(rk_dev);
346 if (err)
347 goto out_rx;
348
349 if (alg_ctx->left_bytes) {
350 rk_update_iv(rk_dev);
351 if (alg_ctx->aligned) {
352 if (sg_is_last(alg_ctx->sg_src)) {
353 dev_err(rk_dev->dev, "[%s:%d] Lack of data\n",
354 __func__, __LINE__);
355 err = -ENOMEM;
356 goto out_rx;
357 }
358 alg_ctx->sg_src = sg_next(alg_ctx->sg_src);
359 alg_ctx->sg_dst = sg_next(alg_ctx->sg_dst);
360 }
361 err = rk_set_data_start(rk_dev);
362 } else {
363 rk_iv_copyback(rk_dev);
364 }
365 out_rx:
366 return err;
367 }
368
rk_ablk_init_tfm(struct crypto_skcipher * tfm)369 static int rk_ablk_init_tfm(struct crypto_skcipher *tfm)
370 {
371 struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
372 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
373 struct rk_alg_ctx *alg_ctx = &ctx->algs_ctx;
374 struct rk_crypto_algt *algt;
375 struct rk_crypto_dev *rk_dev;
376 const char *alg_name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm));
377
378 algt = container_of(alg, struct rk_crypto_algt, alg.crypto);
379 rk_dev = algt->rk_dev;
380
381 memset(ctx, 0x00, sizeof(*ctx));
382
383 if (!rk_dev->request_crypto)
384 return -EFAULT;
385
386 rk_dev->request_crypto(rk_dev, alg_name);
387
388 alg_ctx->align_size = crypto_skcipher_alignmask(tfm) + 1;
389
390 alg_ctx->ops.start = rk_ablk_start;
391 alg_ctx->ops.update = rk_ablk_rx;
392 alg_ctx->ops.complete = rk_crypto_complete;
393 alg_ctx->ops.irq_handle = rk_crypto_irq_handle;
394
395 ctx->rk_dev = rk_dev;
396
397 return 0;
398 }
399
rk_ablk_exit_tfm(struct crypto_skcipher * tfm)400 static void rk_ablk_exit_tfm(struct crypto_skcipher *tfm)
401 {
402 struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
403 const char *alg_name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm));
404
405 ctx->rk_dev->release_crypto(ctx->rk_dev, alg_name);
406 }
407
408 struct rk_crypto_algt rk_v1_ecb_aes_alg =
409 RK_CIPHER_ALGO_INIT(AES, ECB, ecb(aes), ecb-aes-rk);
410
411 struct rk_crypto_algt rk_v1_cbc_aes_alg =
412 RK_CIPHER_ALGO_INIT(AES, CBC, cbc(aes), cbc-aes-rk);
413
414 struct rk_crypto_algt rk_v1_ecb_des_alg =
415 RK_CIPHER_ALGO_INIT(DES, ECB, ecb(des), ecb-des-rk);
416
417 struct rk_crypto_algt rk_v1_cbc_des_alg =
418 RK_CIPHER_ALGO_INIT(DES, CBC, cbc(des), cbc-des-rk);
419
420 struct rk_crypto_algt rk_v1_ecb_des3_ede_alg =
421 RK_CIPHER_ALGO_INIT(DES3_EDE, ECB, ecb(des3_ede), ecb-des3_ede-rk);
422
423 struct rk_crypto_algt rk_v1_cbc_des3_ede_alg =
424 RK_CIPHER_ALGO_INIT(DES3_EDE, CBC, cbc(des3_ede), cbc-des3_ede-rk);
425