xref: /OK3568_Linux_fs/kernel/drivers/crypto/rockchip/cryptodev_linux/rk_cryptodev.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Crypto acceleration support for Rockchip crypto
4  *
5  * Copyright (c) 2021, Rockchip Electronics Co., Ltd
6  *
7  * Author: Lin Jinhan <troy.lin@rock-chips.com>
8  *
9  */
10 #include <crypto/internal/akcipher.h>
11 #include <crypto/internal/rsa.h>
12 #include <linux/kernel.h>
13 #include <linux/scatterlist.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/sysctl.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/dma-direct.h>
18 #include <linux/dma-buf.h>
19 #include <linux/list.h>
20 
21 #include "version.h"
22 #include "cipherapi.h"
23 #include "rk_cryptodev.h"
24 
25 #define MAX_CRYPTO_DEV		1
26 #define MAX_CRYPTO_NAME_LEN	64
27 
28 struct dma_fd_map_node {
29 	struct kernel_crypt_fd_map_op fd_map;
30 	struct sg_table *sgtbl;
31 	struct dma_buf *dmabuf;
32 	struct dma_buf_attachment *dma_attach;
33 	struct list_head	list;
34 };
35 
36 struct crypto_dev_info {
37 	struct device *dev;
38 	char name[MAX_CRYPTO_NAME_LEN];
39 	bool is_multi_thread;
40 };
41 
42 static struct crypto_dev_info g_dev_infos[MAX_CRYPTO_DEV];
43 
44 /*
45  * rk_cryptodev_register_dev - register crypto device into rk_cryptodev.
46  * @dev:	[in]	crypto device to register
47  * @name:	[in]	crypto device name to register
48  */
rk_cryptodev_register_dev(struct device * dev,const char * name)49 int rk_cryptodev_register_dev(struct device *dev, const char *name)
50 {
51 	uint32_t i;
52 
53 	if (WARN_ON(!dev))
54 		return -EINVAL;
55 
56 	if (WARN_ON(!name))
57 		return -EINVAL;
58 
59 	for (i = 0; i < ARRAY_SIZE(g_dev_infos); i++) {
60 		if (!g_dev_infos[i].dev) {
61 			memset(&g_dev_infos[i], 0x00, sizeof(g_dev_infos[i]));
62 
63 			g_dev_infos[i].dev = dev;
64 			strncpy(g_dev_infos[i].name, name, sizeof(g_dev_infos[i].name));
65 
66 			g_dev_infos[i].is_multi_thread = strstr(g_dev_infos[i].name, "multi");
67 			dev_info(dev, "register to cryptodev ok!\n");
68 			return 0;
69 		}
70 	}
71 
72 	return -ENOMEM;
73 }
74 EXPORT_SYMBOL_GPL(rk_cryptodev_register_dev);
75 
76 /*
77  * rk_cryptodev_unregister_dev - unregister crypto device from rk_cryptodev
78  * @dev:	[in]	crypto device to unregister
79  */
rk_cryptodev_unregister_dev(struct device * dev)80 int rk_cryptodev_unregister_dev(struct device *dev)
81 {
82 	uint32_t i;
83 
84 	if (WARN_ON(!dev))
85 		return -EINVAL;
86 
87 	for (i = 0; i < ARRAY_SIZE(g_dev_infos); i++) {
88 		if (g_dev_infos[i].dev == dev) {
89 			memset(&g_dev_infos[i], 0x00, sizeof(g_dev_infos[i]));
90 			return 0;
91 		}
92 	}
93 
94 	return -EINVAL;
95 }
96 EXPORT_SYMBOL_GPL(rk_cryptodev_unregister_dev);
97 
rk_cryptodev_find_dev(const char * name)98 static struct device *rk_cryptodev_find_dev(const char *name)
99 {
100 	uint32_t i;
101 
102 	for (i = 0; i < ARRAY_SIZE(g_dev_infos); i++) {
103 		if (g_dev_infos[i].dev)
104 			return g_dev_infos[i].dev;
105 	}
106 
107 	return NULL;
108 }
109 
110 /* this function has to be called from process context */
fill_kcop_fd_from_cop(struct kernel_crypt_fd_op * kcop,struct fcrypt * fcr)111 static int fill_kcop_fd_from_cop(struct kernel_crypt_fd_op *kcop, struct fcrypt *fcr)
112 {
113 	struct crypt_fd_op *cop = &kcop->cop;
114 	struct csession *ses_ptr;
115 	int rc;
116 
117 	/* this also enters ses_ptr->sem */
118 	ses_ptr = crypto_get_session_by_sid(fcr, cop->ses);
119 	if (unlikely(!ses_ptr)) {
120 		derr(1, "invalid session ID=0x%08X", cop->ses);
121 		return -EINVAL;
122 	}
123 	kcop->ivlen = cop->iv ? ses_ptr->cdata.ivsize : 0;
124 	kcop->digestsize = 0; /* will be updated during operation */
125 
126 	crypto_put_session(ses_ptr);
127 
128 	kcop->task = current;
129 	kcop->mm = current->mm;
130 
131 	if (cop->iv) {
132 		rc = copy_from_user(kcop->iv, cop->iv, kcop->ivlen);
133 		if (unlikely(rc)) {
134 			derr(1, "error copying IV (%d bytes), returned %d for addr %p",
135 			     kcop->ivlen, rc, cop->iv);
136 			return -EFAULT;
137 		}
138 	}
139 
140 	return 0;
141 }
142 
143 
144 /* this function has to be called from process context */
fill_cop_fd_from_kcop(struct kernel_crypt_fd_op * kcop,struct fcrypt * fcr)145 static int fill_cop_fd_from_kcop(struct kernel_crypt_fd_op *kcop, struct fcrypt *fcr)
146 {
147 	int ret;
148 
149 	if (kcop->digestsize) {
150 		ret = copy_to_user(kcop->cop.mac,
151 				  kcop->hash_output, kcop->digestsize);
152 		if (unlikely(ret))
153 			return -EFAULT;
154 	}
155 	if (kcop->ivlen && kcop->cop.flags & COP_FLAG_WRITE_IV) {
156 		ret = copy_to_user(kcop->cop.iv,
157 				   kcop->iv, kcop->ivlen);
158 		if (unlikely(ret))
159 			return -EFAULT;
160 	}
161 	return 0;
162 }
163 
kcop_fd_from_user(struct kernel_crypt_fd_op * kcop,struct fcrypt * fcr,void __user * arg)164 static int kcop_fd_from_user(struct kernel_crypt_fd_op *kcop,
165 			struct fcrypt *fcr, void __user *arg)
166 {
167 	if (unlikely(copy_from_user(&kcop->cop, arg, sizeof(kcop->cop))))
168 		return -EFAULT;
169 
170 	return fill_kcop_fd_from_cop(kcop, fcr);
171 }
172 
kcop_fd_to_user(struct kernel_crypt_fd_op * kcop,struct fcrypt * fcr,void __user * arg)173 static int kcop_fd_to_user(struct kernel_crypt_fd_op *kcop,
174 			   struct fcrypt *fcr, void __user *arg)
175 {
176 	int ret;
177 
178 	ret = fill_cop_fd_from_kcop(kcop, fcr);
179 	if (unlikely(ret)) {
180 		derr(1, "Error in fill_cop_from_kcop");
181 		return ret;
182 	}
183 
184 	if (unlikely(copy_to_user(arg, &kcop->cop, sizeof(kcop->cop)))) {
185 		derr(1, "Cannot copy to userspace");
186 		return -EFAULT;
187 	}
188 
189 	return 0;
190 }
191 
192 static int
hash_n_crypt_fd(struct csession * ses_ptr,struct crypt_fd_op * cop,struct scatterlist * src_sg,struct scatterlist * dst_sg,uint32_t len)193 hash_n_crypt_fd(struct csession *ses_ptr, struct crypt_fd_op *cop,
194 		struct scatterlist *src_sg, struct scatterlist *dst_sg,
195 		uint32_t len)
196 {
197 	int ret;
198 
199 	/* Always hash before encryption and after decryption. Maybe
200 	 * we should introduce a flag to switch... TBD later on.
201 	 */
202 	if (cop->op == COP_ENCRYPT) {
203 		if (ses_ptr->hdata.init != 0) {
204 			ret = cryptodev_hash_update(&ses_ptr->hdata,
205 						    src_sg, len);
206 			if (unlikely(ret))
207 				goto out_err;
208 		}
209 		if (ses_ptr->cdata.init != 0) {
210 			ret = cryptodev_cipher_encrypt(&ses_ptr->cdata,
211 						       src_sg, dst_sg, len);
212 
213 			if (unlikely(ret))
214 				goto out_err;
215 		}
216 	} else {
217 		if (ses_ptr->cdata.init != 0) {
218 			ret = cryptodev_cipher_decrypt(&ses_ptr->cdata,
219 						       src_sg, dst_sg, len);
220 
221 			if (unlikely(ret))
222 				goto out_err;
223 		}
224 
225 		if (ses_ptr->hdata.init != 0) {
226 			ret = cryptodev_hash_update(&ses_ptr->hdata,
227 						    dst_sg, len);
228 			if (unlikely(ret))
229 				goto out_err;
230 		}
231 	}
232 	return 0;
233 out_err:
234 	derr(0, "CryptoAPI failure: %d", ret);
235 	return ret;
236 }
237 
get_dmafd_sgtbl(int dma_fd,unsigned int dma_len,enum dma_data_direction dir,struct sg_table ** sg_tbl,struct dma_buf_attachment ** dma_attach,struct dma_buf ** dmabuf)238 static int get_dmafd_sgtbl(int dma_fd, unsigned int dma_len, enum dma_data_direction dir,
239 			   struct sg_table **sg_tbl, struct dma_buf_attachment **dma_attach,
240 			   struct dma_buf **dmabuf)
241 {
242 	struct device *crypto_dev = rk_cryptodev_find_dev(NULL);
243 
244 	if (!crypto_dev)
245 		return -EINVAL;
246 
247 	*sg_tbl     = NULL;
248 	*dmabuf     = NULL;
249 	*dma_attach = NULL;
250 
251 	*dmabuf = dma_buf_get(dma_fd);
252 	if (IS_ERR(*dmabuf)) {
253 		derr(1, "dmabuf error! ret = %d", (int)PTR_ERR(*dmabuf));
254 		*dmabuf = NULL;
255 		goto error;
256 	}
257 
258 	*dma_attach = dma_buf_attach(*dmabuf, crypto_dev);
259 	if (IS_ERR(*dma_attach)) {
260 		derr(1, "dma_attach error! ret = %d", (int)PTR_ERR(*dma_attach));
261 		*dma_attach = NULL;
262 		goto error;
263 	}
264 
265 	/*
266 	 * DMA_TO_DEVICE  : cache clean for input data
267 	 * DMA_FROM_DEVICE: cache invalidate for output data
268 	 */
269 	*sg_tbl = dma_buf_map_attachment(*dma_attach, dir);
270 	if (IS_ERR(*sg_tbl)) {
271 		derr(1, "sg_tbl error! ret = %d", (int)PTR_ERR(*sg_tbl));
272 		*sg_tbl = NULL;
273 		goto error;
274 	}
275 
276 	/* cache invalidate for input data */
277 	if (dir == DMA_TO_DEVICE)
278 		dma_sync_sg_for_cpu(crypto_dev, (*sg_tbl)->sgl, (*sg_tbl)->nents, DMA_FROM_DEVICE);
279 
280 	return 0;
281 error:
282 	if (*sg_tbl)
283 		dma_buf_unmap_attachment(*dma_attach, *sg_tbl, dir);
284 
285 	if (*dma_attach)
286 		dma_buf_detach(*dmabuf, *dma_attach);
287 
288 	if (*dmabuf)
289 		dma_buf_put(*dmabuf);
290 
291 	return -EINVAL;
292 }
293 
put_dmafd_sgtbl(int dma_fd,enum dma_data_direction dir,struct sg_table * sg_tbl,struct dma_buf_attachment * dma_attach,struct dma_buf * dmabuf)294 static int put_dmafd_sgtbl(int dma_fd, enum dma_data_direction dir,
295 			   struct sg_table *sg_tbl, struct dma_buf_attachment *dma_attach,
296 			   struct dma_buf *dmabuf)
297 {
298 	struct device *crypto_dev = rk_cryptodev_find_dev(NULL);
299 
300 	if (!crypto_dev)
301 		return -EINVAL;
302 
303 	if (!sg_tbl || !dma_attach || !dmabuf)
304 		return -EINVAL;
305 
306 	/* cache clean for output data */
307 	if (dir == DMA_FROM_DEVICE)
308 		dma_sync_sg_for_device(crypto_dev, sg_tbl->sgl, sg_tbl->nents, DMA_TO_DEVICE);
309 
310 	/*
311 	 * DMA_TO_DEVICE  : do nothing for input data
312 	 * DMA_FROM_DEVICE: cache invalidate for output data
313 	 */
314 	dma_buf_unmap_attachment(dma_attach, sg_tbl, dir);
315 	dma_buf_detach(dmabuf, dma_attach);
316 	dma_buf_put(dmabuf);
317 
318 	return 0;
319 }
320 
dma_fd_find_node(struct fcrypt * fcr,int dma_fd)321 static struct dma_fd_map_node *dma_fd_find_node(struct fcrypt *fcr, int dma_fd)
322 {
323 	struct dma_fd_map_node *map_node = NULL;
324 
325 	mutex_lock(&fcr->sem);
326 
327 	list_for_each_entry(map_node, &fcr->dma_map_list, list) {
328 		if (unlikely(map_node->fd_map.mop.dma_fd == dma_fd)) {
329 			mutex_unlock(&fcr->sem);
330 			return map_node;
331 		}
332 	}
333 
334 	mutex_unlock(&fcr->sem);
335 
336 	return NULL;
337 }
338 
339 /* This is the main crypto function - zero-copy edition */
__crypto_fd_run(struct fcrypt * fcr,struct csession * ses_ptr,struct kernel_crypt_fd_op * kcop)340 static int __crypto_fd_run(struct fcrypt *fcr, struct csession *ses_ptr,
341 			   struct kernel_crypt_fd_op *kcop)
342 {
343 	struct crypt_fd_op *cop = &kcop->cop;
344 	struct dma_buf *dma_buf_in = NULL, *dma_buf_out = NULL;
345 	struct sg_table sg_tmp;
346 	struct sg_table *sg_tbl_in = NULL, *sg_tbl_out = NULL;
347 	struct dma_buf_attachment *dma_attach_in = NULL, *dma_attach_out = NULL;
348 	struct dma_fd_map_node *node_src = NULL, *node_dst = NULL;
349 	int ret = 0;
350 
351 	node_src = dma_fd_find_node(fcr, kcop->cop.src_fd);
352 	if (node_src) {
353 		sg_tbl_in = node_src->sgtbl;
354 	} else {
355 		ret = get_dmafd_sgtbl(kcop->cop.src_fd, kcop->cop.len, DMA_TO_DEVICE,
356 				&sg_tbl_in, &dma_attach_in, &dma_buf_in);
357 		if (unlikely(ret)) {
358 			derr(1, "Error get_dmafd_sgtbl src.");
359 			goto exit;
360 		}
361 	}
362 
363 	/* only cipher has dst */
364 	if (ses_ptr->cdata.init) {
365 		node_dst = dma_fd_find_node(fcr, kcop->cop.dst_fd);
366 		if (node_dst) {
367 			sg_tbl_out = node_dst->sgtbl;
368 		} else {
369 			ret = get_dmafd_sgtbl(kcop->cop.dst_fd, kcop->cop.len, DMA_FROM_DEVICE,
370 				&sg_tbl_out, &dma_attach_out, &dma_buf_out);
371 			if (unlikely(ret)) {
372 				derr(1, "Error get_dmafd_sgtbl dst.");
373 				goto exit;
374 			}
375 		}
376 	} else {
377 		memset(&sg_tmp, 0x00, sizeof(sg_tmp));
378 		sg_tbl_out = &sg_tmp;
379 	}
380 
381 	ret = hash_n_crypt_fd(ses_ptr, cop, sg_tbl_in->sgl, sg_tbl_out->sgl, cop->len);
382 
383 exit:
384 	if (dma_buf_in)
385 		put_dmafd_sgtbl(kcop->cop.src_fd, DMA_TO_DEVICE,
386 				sg_tbl_in, dma_attach_in, dma_buf_in);
387 
388 	if (dma_buf_out)
389 		put_dmafd_sgtbl(kcop->cop.dst_fd, DMA_FROM_DEVICE,
390 				sg_tbl_out, dma_attach_out, dma_buf_out);
391 	return ret;
392 }
393 
crypto_fd_run(struct fcrypt * fcr,struct kernel_crypt_fd_op * kcop)394 static int crypto_fd_run(struct fcrypt *fcr, struct kernel_crypt_fd_op *kcop)
395 {
396 	struct csession *ses_ptr;
397 	struct crypt_fd_op *cop = &kcop->cop;
398 	int ret = -EINVAL;
399 
400 	if (unlikely(cop->op != COP_ENCRYPT && cop->op != COP_DECRYPT)) {
401 		ddebug(1, "invalid operation op=%u", cop->op);
402 		return -EINVAL;
403 	}
404 
405 	/* this also enters ses_ptr->sem */
406 	ses_ptr = crypto_get_session_by_sid(fcr, cop->ses);
407 	if (unlikely(!ses_ptr)) {
408 		derr(1, "invalid session ID=0x%08X", cop->ses);
409 		return -EINVAL;
410 	}
411 
412 	if (ses_ptr->hdata.init != 0 && (cop->flags == 0 || cop->flags & COP_FLAG_RESET)) {
413 		ret = cryptodev_hash_reset(&ses_ptr->hdata);
414 		if (unlikely(ret)) {
415 			derr(1, "error in cryptodev_hash_reset()");
416 			goto out_unlock;
417 		}
418 	}
419 
420 	if (ses_ptr->cdata.init != 0) {
421 		int blocksize = ses_ptr->cdata.blocksize;
422 
423 		if (unlikely(cop->len % blocksize)) {
424 			derr(1, "data size (%u) isn't a multiple of block size (%u)",
425 				cop->len, blocksize);
426 			ret = -EINVAL;
427 			goto out_unlock;
428 		}
429 
430 		cryptodev_cipher_set_iv(&ses_ptr->cdata, kcop->iv,
431 					min(ses_ptr->cdata.ivsize, kcop->ivlen));
432 	}
433 
434 	if (likely(cop->len)) {
435 		ret = __crypto_fd_run(fcr, ses_ptr, kcop);
436 		if (unlikely(ret))
437 			goto out_unlock;
438 	}
439 
440 	if (ses_ptr->cdata.init != 0) {
441 		cryptodev_cipher_get_iv(&ses_ptr->cdata, kcop->iv,
442 					min(ses_ptr->cdata.ivsize, kcop->ivlen));
443 	}
444 
445 	if (ses_ptr->hdata.init != 0 &&
446 		((cop->flags & COP_FLAG_FINAL) ||
447 		 (!(cop->flags & COP_FLAG_UPDATE) || cop->len == 0))) {
448 
449 		ret = cryptodev_hash_final(&ses_ptr->hdata, kcop->hash_output);
450 		if (unlikely(ret)) {
451 			derr(0, "CryptoAPI failure: %d", ret);
452 			goto out_unlock;
453 		}
454 		kcop->digestsize = ses_ptr->hdata.digestsize;
455 	}
456 
457 out_unlock:
458 	crypto_put_session(ses_ptr);
459 
460 	return ret;
461 }
462 
kcop_map_fd_from_user(struct kernel_crypt_fd_map_op * kcop,struct fcrypt * fcr,void __user * arg)463 static int kcop_map_fd_from_user(struct kernel_crypt_fd_map_op *kcop,
464 			struct fcrypt *fcr, void __user *arg)
465 {
466 	if (unlikely(copy_from_user(&kcop->mop, arg, sizeof(kcop->mop))))
467 		return -EFAULT;
468 
469 	return 0;
470 }
471 
kcop_map_fd_to_user(struct kernel_crypt_fd_map_op * kcop,struct fcrypt * fcr,void __user * arg)472 static int kcop_map_fd_to_user(struct kernel_crypt_fd_map_op *kcop,
473 			   struct fcrypt *fcr, void __user *arg)
474 {
475 	if (unlikely(copy_to_user(arg, &kcop->mop, sizeof(kcop->mop)))) {
476 		derr(1, "Cannot copy to userspace");
477 		return -EFAULT;
478 	}
479 
480 	return 0;
481 }
482 
dma_fd_map_for_user(struct fcrypt * fcr,struct kernel_crypt_fd_map_op * kmop)483 static int dma_fd_map_for_user(struct fcrypt *fcr, struct kernel_crypt_fd_map_op *kmop)
484 {
485 	struct device *crypto_dev = NULL;
486 	struct dma_fd_map_node *map_node = NULL;
487 
488 	/* check if dma_fd is already mapped */
489 	map_node = dma_fd_find_node(fcr, kmop->mop.dma_fd);
490 	if (map_node) {
491 		kmop->mop.phys_addr = map_node->fd_map.mop.phys_addr;
492 		return 0;
493 	}
494 
495 	crypto_dev = rk_cryptodev_find_dev(NULL);
496 	if (!crypto_dev)
497 		return -EINVAL;
498 
499 	map_node = kzalloc(sizeof(*map_node), GFP_KERNEL);
500 	if (!map_node)
501 		return -ENOMEM;
502 
503 	map_node->dmabuf = dma_buf_get(kmop->mop.dma_fd);
504 	if (IS_ERR(map_node->dmabuf)) {
505 		derr(1, "dmabuf error! ret = %d", (int)PTR_ERR(map_node->dmabuf));
506 		map_node->dmabuf = NULL;
507 		goto error;
508 	}
509 
510 	map_node->dma_attach = dma_buf_attach(map_node->dmabuf, crypto_dev);
511 	if (IS_ERR(map_node->dma_attach)) {
512 		derr(1, "dma_attach error! ret = %d", (int)PTR_ERR(map_node->dma_attach));
513 		map_node->dma_attach = NULL;
514 		goto error;
515 	}
516 
517 	map_node->sgtbl = dma_buf_map_attachment(map_node->dma_attach, DMA_BIDIRECTIONAL);
518 	if (IS_ERR(map_node->sgtbl)) {
519 		derr(1, "sg_tbl error! ret = %d", (int)PTR_ERR(map_node->sgtbl));
520 		map_node->sgtbl = NULL;
521 		goto error;
522 	}
523 
524 	map_node->fd_map.mop.dma_fd    = kmop->mop.dma_fd;
525 	map_node->fd_map.mop.phys_addr = map_node->sgtbl->sgl->dma_address;
526 
527 	mutex_lock(&fcr->sem);
528 	list_add(&map_node->list, &fcr->dma_map_list);
529 	mutex_unlock(&fcr->sem);
530 
531 	kmop->mop.phys_addr = map_node->fd_map.mop.phys_addr;
532 
533 	return 0;
534 error:
535 	if (map_node->sgtbl)
536 		dma_buf_unmap_attachment(map_node->dma_attach, map_node->sgtbl, DMA_BIDIRECTIONAL);
537 
538 	if (map_node->dma_attach)
539 		dma_buf_detach(map_node->dmabuf, map_node->dma_attach);
540 
541 	if (map_node->dmabuf)
542 		dma_buf_put(map_node->dmabuf);
543 
544 	kfree(map_node);
545 
546 	return -EINVAL;
547 }
548 
dma_fd_unmap_for_user(struct fcrypt * fcr,struct kernel_crypt_fd_map_op * kmop)549 static int dma_fd_unmap_for_user(struct fcrypt *fcr, struct kernel_crypt_fd_map_op *kmop)
550 {
551 	struct dma_fd_map_node *tmp, *map_node;
552 	bool is_found = false;
553 	int ret = 0;
554 
555 	mutex_lock(&fcr->sem);
556 	list_for_each_entry_safe(map_node, tmp, &fcr->dma_map_list, list) {
557 		if (map_node->fd_map.mop.dma_fd == kmop->mop.dma_fd &&
558 		    map_node->fd_map.mop.phys_addr == kmop->mop.phys_addr) {
559 			dma_buf_unmap_attachment(map_node->dma_attach, map_node->sgtbl,
560 						 DMA_BIDIRECTIONAL);
561 			dma_buf_detach(map_node->dmabuf, map_node->dma_attach);
562 			dma_buf_put(map_node->dmabuf);
563 			list_del(&map_node->list);
564 			kfree(map_node);
565 			kmop->mop.phys_addr = 0;
566 			is_found = true;
567 			break;
568 		}
569 	}
570 
571 	if (unlikely(!is_found)) {
572 		derr(1, "dmafd =0x%08X not found!", kmop->mop.dma_fd);
573 		ret = -ENOENT;
574 		mutex_unlock(&fcr->sem);
575 		goto exit;
576 	}
577 
578 	mutex_unlock(&fcr->sem);
579 
580 exit:
581 	return ret;
582 }
583 
dma_fd_begin_cpu_access(struct fcrypt * fcr,struct kernel_crypt_fd_map_op * kmop)584 static int dma_fd_begin_cpu_access(struct fcrypt *fcr, struct kernel_crypt_fd_map_op *kmop)
585 {
586 	struct dma_fd_map_node *map_node = NULL;
587 
588 	map_node = dma_fd_find_node(fcr, kmop->mop.dma_fd);
589 	if (unlikely(!map_node)) {
590 		derr(1, "dmafd =0x%08X not found!", kmop->mop.dma_fd);
591 		return -ENOENT;
592 	}
593 
594 	return dma_buf_begin_cpu_access(map_node->dmabuf, DMA_BIDIRECTIONAL);
595 }
596 
dma_fd_end_cpu_access(struct fcrypt * fcr,struct kernel_crypt_fd_map_op * kmop)597 static int dma_fd_end_cpu_access(struct fcrypt *fcr, struct kernel_crypt_fd_map_op *kmop)
598 {
599 	struct dma_fd_map_node *map_node = NULL;
600 
601 	map_node = dma_fd_find_node(fcr, kmop->mop.dma_fd);
602 	if (unlikely(!map_node)) {
603 		derr(1, "dmafd =0x%08X not found!", kmop->mop.dma_fd);
604 		return -ENOENT;
605 	}
606 
607 	return dma_buf_end_cpu_access(map_node->dmabuf, DMA_BIDIRECTIONAL);
608 }
609 
kcop_rsa_from_user(struct kernel_crypt_rsa_op * kcop,struct fcrypt * fcr,void __user * arg)610 static int kcop_rsa_from_user(struct kernel_crypt_rsa_op *kcop,
611 			struct fcrypt *fcr, void __user *arg)
612 {
613 	if (unlikely(copy_from_user(&kcop->rop, arg, sizeof(kcop->rop))))
614 		return -EFAULT;
615 
616 	return 0;
617 }
618 
kcop_rsa_to_user(struct kernel_crypt_rsa_op * kcop,struct fcrypt * fcr,void __user * arg)619 static int kcop_rsa_to_user(struct kernel_crypt_rsa_op *kcop,
620 			   struct fcrypt *fcr, void __user *arg)
621 {
622 	if (unlikely(copy_to_user(arg, &kcop->rop, sizeof(kcop->rop)))) {
623 		derr(1, "Cannot copy to userspace");
624 		return -EFAULT;
625 	}
626 
627 	return 0;
628 }
629 
crypto_rsa_run(struct fcrypt * fcr,struct kernel_crypt_rsa_op * krop)630 static int crypto_rsa_run(struct fcrypt *fcr, struct kernel_crypt_rsa_op *krop)
631 {
632 	int ret;
633 	u8 *key = NULL, *in = NULL, *out = NULL;
634 	u32 out_len_max;
635 	struct crypt_rsa_op *rop = &krop->rop;
636 	const char *driver = "rsa-rk";
637 	struct crypto_akcipher *tfm = NULL;
638 	struct akcipher_request *req = NULL;
639 	struct crypto_wait wait;
640 	struct scatterlist src, dst;
641 	bool is_priv_key = (rop->flags & COP_FLAG_RSA_PRIV) == COP_FLAG_RSA_PRIV;
642 
643 	/* The key size cannot exceed RK_RSA_BER_KEY_MAX Byte */
644 	if (rop->key_len > RK_RSA_BER_KEY_MAX)
645 		return -ENOKEY;
646 
647 	if (rop->in_len  > RK_RSA_KEY_MAX_BYTES ||
648 	    rop->out_len > RK_RSA_KEY_MAX_BYTES)
649 		return -EINVAL;
650 
651 	tfm = crypto_alloc_akcipher(driver, 0, 0);
652 	if (IS_ERR(tfm)) {
653 		ddebug(2, "alg: akcipher: Failed to load tfm for %s: %ld\n",
654 		       driver, PTR_ERR(tfm));
655 		return PTR_ERR(tfm);
656 	}
657 
658 	req = akcipher_request_alloc(tfm, GFP_KERNEL);
659 	if (!req) {
660 		ddebug(2, "akcipher_request_alloc failed\n");
661 		ret = -ENOMEM;
662 		goto exit;
663 	}
664 
665 	key = kzalloc(rop->key_len, GFP_KERNEL);
666 	if (!key) {
667 		ret = -ENOMEM;
668 		goto exit;
669 	}
670 
671 	if (unlikely(copy_from_user(key, u64_to_user_ptr(rop->key), rop->key_len))) {
672 		ret = -EFAULT;
673 		goto exit;
674 	}
675 
676 	in = kzalloc(rop->in_len, GFP_KERNEL);
677 	if (!in) {
678 		ret = -ENOMEM;
679 		goto exit;
680 	}
681 
682 	if (unlikely(copy_from_user(in, u64_to_user_ptr(rop->in), rop->in_len))) {
683 		ret = -EFAULT;
684 		goto exit;
685 	}
686 
687 	if (is_priv_key)
688 		ret = crypto_akcipher_set_priv_key(tfm, key, rop->key_len);
689 	else
690 		ret = crypto_akcipher_set_pub_key(tfm, key, rop->key_len);
691 	if (ret) {
692 		derr(1, "crypto_akcipher_set_%s_key error[%d]",
693 		     is_priv_key ? "priv" : "pub", ret);
694 		ret = -ENOKEY;
695 		goto exit;
696 	}
697 
698 	out_len_max = crypto_akcipher_maxsize(tfm);
699 	out = kzalloc(out_len_max, GFP_KERNEL);
700 	if (!out) {
701 		ret = -ENOMEM;
702 		goto exit;
703 	}
704 
705 	sg_init_one(&src, in, rop->in_len);
706 	sg_init_one(&dst, out, out_len_max);
707 
708 	crypto_init_wait(&wait);
709 	akcipher_request_set_crypt(req, &src, &dst, rop->in_len, out_len_max);
710 
711 	switch (rop->op) {
712 	case AOP_ENCRYPT:
713 		ret = crypto_wait_req(crypto_akcipher_encrypt(req), &wait);
714 		break;
715 	case AOP_DECRYPT:
716 		ret = crypto_wait_req(crypto_akcipher_decrypt(req), &wait);
717 		break;
718 	default:
719 		derr(1, "unknown ops %x", rop->op);
720 		ret = -EINVAL;
721 		break;
722 	}
723 
724 	if (ret) {
725 		derr(1, "alg: akcipher: failed %d\n", ret);
726 		goto exit;
727 	}
728 
729 	if (unlikely(copy_to_user(u64_to_user_ptr(rop->out), out, req->dst_len))) {
730 		derr(1, "Cannot copy to userspace");
731 		ret = -EFAULT;
732 		goto exit;
733 	}
734 
735 	rop->out_len = req->dst_len;
736 exit:
737 	kfree(out);
738 	kfree(in);
739 	kfree(key);
740 	akcipher_request_free(req);
741 	crypto_free_akcipher(tfm);
742 
743 	return ret;
744 }
745 
746 /* Typical AEAD (i.e. GCM) encryption/decryption.
747  * During decryption the tag is verified.
748  */
rk_auth_fd_n_crypt(struct csession * ses_ptr,struct kernel_crypt_auth_fd_op * kcaop,struct scatterlist * auth_sg,uint32_t auth_len,struct scatterlist * src_sg,struct scatterlist * dst_sg,uint32_t len)749 static int rk_auth_fd_n_crypt(struct csession *ses_ptr, struct kernel_crypt_auth_fd_op *kcaop,
750 			      struct scatterlist *auth_sg, uint32_t auth_len,
751 			      struct scatterlist *src_sg,
752 			      struct scatterlist *dst_sg, uint32_t len)
753 {
754 	int ret;
755 	struct crypt_auth_fd_op *caop = &kcaop->caop;
756 	int max_tag_len;
757 
758 	max_tag_len = cryptodev_cipher_get_tag_size(&ses_ptr->cdata);
759 	if (unlikely(caop->tag_len > max_tag_len)) {
760 		derr(0, "Illegal tag length: %d", caop->tag_len);
761 		return -EINVAL;
762 	}
763 
764 	if (caop->tag_len)
765 		cryptodev_cipher_set_tag_size(&ses_ptr->cdata, caop->tag_len);
766 	else
767 		caop->tag_len = max_tag_len;
768 
769 	cryptodev_cipher_auth(&ses_ptr->cdata, auth_sg, auth_len);
770 
771 	if (caop->op == COP_ENCRYPT) {
772 		ret = cryptodev_cipher_encrypt(&ses_ptr->cdata,
773 					       src_sg, dst_sg, len);
774 		if (unlikely(ret)) {
775 			derr(0, "cryptodev_cipher_encrypt: %d", ret);
776 			return ret;
777 		}
778 	} else {
779 		ret = cryptodev_cipher_decrypt(&ses_ptr->cdata,
780 					       src_sg, dst_sg, len);
781 
782 		if (unlikely(ret)) {
783 			derr(0, "cryptodev_cipher_decrypt: %d", ret);
784 			return ret;
785 		}
786 	}
787 
788 	return 0;
789 }
790 
sg_init_table_set_page(struct scatterlist * sgl_dst,unsigned int nents_dst,struct scatterlist * sgl_src,unsigned int len)791 static void sg_init_table_set_page(struct scatterlist *sgl_dst, unsigned int nents_dst,
792 				   struct scatterlist *sgl_src, unsigned int len)
793 {
794 	sg_init_table(sgl_dst, nents_dst);
795 	sg_set_page(sgl_dst, sg_page(sgl_src), len, sgl_src->offset);
796 
797 	sg_dma_address(sgl_dst) = sg_dma_address(sgl_src);
798 	sg_dma_len(sgl_dst)     = len;
799 }
800 
801 /* This is the main crypto function - zero-copy edition */
crypto_auth_fd_zc_rk(struct fcrypt * fcr,struct csession * ses_ptr,struct kernel_crypt_auth_fd_op * kcaop)802 static int crypto_auth_fd_zc_rk(struct fcrypt *fcr, struct csession *ses_ptr,
803 				  struct kernel_crypt_auth_fd_op *kcaop)
804 {
805 	struct crypt_auth_fd_op *caop = &kcaop->caop;
806 	struct dma_buf *dma_buf_in = NULL, *dma_buf_out = NULL, *dma_buf_auth = NULL;
807 	struct sg_table *sg_tbl_in = NULL, *sg_tbl_out = NULL, *sg_tbl_auth = NULL;
808 	struct dma_buf_attachment *dma_attach_in = NULL, *dma_attach_out = NULL;
809 	struct dma_buf_attachment *dma_attach_auth = NULL;
810 	struct dma_fd_map_node *node_src = NULL, *node_dst = NULL, *node_auth = NULL;
811 	struct scatterlist *dst_sg, *src_sg;
812 	struct scatterlist auth_src[2], auth_dst[2], src[2], dst[2], tag[2];
813 	unsigned char *tag_buf = NULL;
814 	int ret = 0;
815 
816 	node_src = dma_fd_find_node(fcr, caop->src_fd);
817 	if (node_src) {
818 		sg_tbl_in = node_src->sgtbl;
819 	} else {
820 		ret = get_dmafd_sgtbl(caop->src_fd, caop->len, DMA_TO_DEVICE,
821 				      &sg_tbl_in, &dma_attach_in, &dma_buf_in);
822 		if (unlikely(ret)) {
823 			derr(1, "Error get_dmafd_sgtbl src.");
824 			goto exit;
825 		}
826 	}
827 
828 	node_dst = dma_fd_find_node(fcr, caop->dst_fd);
829 	if (node_dst) {
830 		sg_tbl_out = node_dst->sgtbl;
831 	} else {
832 		ret = get_dmafd_sgtbl(caop->dst_fd, caop->len, DMA_FROM_DEVICE,
833 				      &sg_tbl_out, &dma_attach_out, &dma_buf_out);
834 		if (unlikely(ret)) {
835 			derr(1, "Error get_dmafd_sgtbl dst.");
836 			goto exit;
837 		}
838 	}
839 
840 	src_sg = sg_tbl_in->sgl;
841 	dst_sg = sg_tbl_out->sgl;
842 
843 	if (caop->auth_len > 0) {
844 		node_auth = dma_fd_find_node(fcr, caop->auth_fd);
845 		if (node_auth) {
846 			sg_tbl_auth = node_auth->sgtbl;
847 		} else {
848 			ret = get_dmafd_sgtbl(caop->auth_fd, caop->auth_len, DMA_TO_DEVICE,
849 					      &sg_tbl_auth, &dma_attach_auth, &dma_buf_auth);
850 			if (unlikely(ret)) {
851 				derr(1, "Error get_dmafd_sgtbl auth.");
852 				goto exit;
853 			}
854 		}
855 
856 		sg_init_table_set_page(auth_src, ARRAY_SIZE(auth_src),
857 				       sg_tbl_auth->sgl, caop->auth_len);
858 
859 		sg_init_table_set_page(auth_dst, ARRAY_SIZE(auth_dst),
860 				       sg_tbl_auth->sgl, caop->auth_len);
861 
862 		sg_init_table_set_page(src, ARRAY_SIZE(src),
863 				       sg_tbl_in->sgl, caop->len);
864 
865 		sg_init_table_set_page(dst, ARRAY_SIZE(dst),
866 				       sg_tbl_out->sgl, caop->len);
867 
868 		sg_chain(auth_src, 2, src);
869 		sg_chain(auth_dst, 2, dst);
870 		src_sg = auth_src;
871 		dst_sg = auth_dst;
872 	}
873 
874 	/* get tag */
875 	if (caop->tag && caop->tag_len > 0) {
876 		tag_buf = kcalloc(caop->tag_len, sizeof(*tag_buf), GFP_KERNEL);
877 		if (unlikely(!tag_buf)) {
878 			derr(1, "unable to kcalloc %d.", caop->tag_len);
879 			ret = -EFAULT;
880 			goto exit;
881 		}
882 
883 		ret = copy_from_user(tag_buf, u64_to_user_ptr((u64)caop->tag), caop->tag_len);
884 		if (unlikely(ret)) {
885 			derr(1, "unable to copy tag data from userspace.");
886 			ret = -EFAULT;
887 			goto exit;
888 		}
889 
890 		sg_init_table(tag, 2);
891 		sg_set_buf(tag, tag_buf, caop->tag_len);
892 
893 		if (caop->op == COP_ENCRYPT)
894 			sg_chain(dst, 2, tag);
895 		else
896 			sg_chain(src, 2, tag);
897 	}
898 
899 	if (caop->op == COP_ENCRYPT)
900 		ret = rk_auth_fd_n_crypt(ses_ptr, kcaop, NULL, caop->auth_len,
901 					 src_sg, dst_sg, caop->len);
902 	else
903 		ret = rk_auth_fd_n_crypt(ses_ptr, kcaop, NULL, caop->auth_len,
904 					 src_sg, dst_sg, caop->len + caop->tag_len);
905 
906 	if (!ret && caop->op == COP_ENCRYPT && tag_buf) {
907 		ret = copy_to_user(u64_to_user_ptr((u64)kcaop->caop.tag), tag_buf, caop->tag_len);
908 		if (unlikely(ret)) {
909 			derr(1, "Error in copying to userspace");
910 			ret = -EFAULT;
911 			goto exit;
912 		}
913 	}
914 
915 exit:
916 	kfree(tag_buf);
917 
918 	if (dma_buf_in)
919 		put_dmafd_sgtbl(caop->src_fd, DMA_TO_DEVICE,
920 				sg_tbl_in, dma_attach_in, dma_buf_in);
921 
922 	if (dma_buf_out)
923 		put_dmafd_sgtbl(caop->dst_fd, DMA_FROM_DEVICE,
924 				sg_tbl_out, dma_attach_out, dma_buf_out);
925 
926 	if (dma_buf_auth)
927 		put_dmafd_sgtbl(caop->auth_fd, DMA_TO_DEVICE,
928 				sg_tbl_auth, dma_attach_auth, dma_buf_auth);
929 
930 	return ret;
931 }
932 
__crypto_auth_fd_run_zc(struct fcrypt * fcr,struct csession * ses_ptr,struct kernel_crypt_auth_fd_op * kcaop)933 static int __crypto_auth_fd_run_zc(struct fcrypt *fcr, struct csession *ses_ptr,
934 				   struct kernel_crypt_auth_fd_op *kcaop)
935 {
936 	struct crypt_auth_fd_op *caop = &kcaop->caop;
937 	int ret;
938 
939 	if (caop->flags & COP_FLAG_AEAD_RK_TYPE)
940 		ret = crypto_auth_fd_zc_rk(fcr, ses_ptr, kcaop);
941 	else
942 		ret = -EINVAL; /* other types, not implemented */
943 
944 	return ret;
945 }
946 
crypto_auth_fd_run(struct fcrypt * fcr,struct kernel_crypt_auth_fd_op * kcaop)947 static int crypto_auth_fd_run(struct fcrypt *fcr, struct kernel_crypt_auth_fd_op *kcaop)
948 {
949 	struct csession *ses_ptr;
950 	struct crypt_auth_fd_op *caop = &kcaop->caop;
951 	int ret = -EINVAL;
952 
953 	if (unlikely(caop->op != COP_ENCRYPT && caop->op != COP_DECRYPT)) {
954 		ddebug(1, "invalid operation op=%u", caop->op);
955 		return -EINVAL;
956 	}
957 
958 	/* this also enters ses_ptr->sem */
959 	ses_ptr = crypto_get_session_by_sid(fcr, caop->ses);
960 	if (unlikely(!ses_ptr)) {
961 		derr(1, "invalid session ID=0x%08X", caop->ses);
962 		return -EINVAL;
963 	}
964 
965 	if (unlikely(ses_ptr->cdata.init == 0)) {
966 		derr(1, "cipher context not initialized");
967 		ret = -EINVAL;
968 		goto out_unlock;
969 	}
970 
971 	/* If we have a hash/mac handle reset its state */
972 	if (ses_ptr->hdata.init != 0) {
973 		ret = cryptodev_hash_reset(&ses_ptr->hdata);
974 		if (unlikely(ret)) {
975 			derr(1, "error in cryptodev_hash_reset()");
976 			goto out_unlock;
977 		}
978 	}
979 
980 	cryptodev_cipher_set_iv(&ses_ptr->cdata, kcaop->iv,
981 				min(ses_ptr->cdata.ivsize, kcaop->ivlen));
982 
983 	ret = __crypto_auth_fd_run_zc(fcr, ses_ptr, kcaop);
984 	if (unlikely(ret)) {
985 		derr(1, "error in __crypto_auth_fd_run_zc()");
986 		goto out_unlock;
987 	}
988 
989 	ret = 0;
990 
991 	cryptodev_cipher_get_iv(&ses_ptr->cdata, kcaop->iv,
992 				min(ses_ptr->cdata.ivsize, kcaop->ivlen));
993 
994 out_unlock:
995 	crypto_put_session(ses_ptr);
996 	return ret;
997 }
998 
999 /*
1000  * Return tag (digest) length for authenticated encryption
1001  * If the cipher and digest are separate, hdata.init is set - just return
1002  * digest length. Otherwise return digest length for aead ciphers
1003  */
rk_cryptodev_get_tag_len(struct csession * ses_ptr)1004 static int rk_cryptodev_get_tag_len(struct csession *ses_ptr)
1005 {
1006 	if (ses_ptr->hdata.init)
1007 		return ses_ptr->hdata.digestsize;
1008 	else
1009 		return cryptodev_cipher_get_tag_size(&ses_ptr->cdata);
1010 }
1011 
1012 /*
1013  * Calculate destination buffer length for authenticated encryption. The
1014  * expectation is that user-space code allocates exactly the same space for
1015  * destination buffer before calling cryptodev. The result is cipher-dependent.
1016  */
rk_cryptodev_fd_get_dst_len(struct crypt_auth_fd_op * caop,struct csession * ses_ptr)1017 static int rk_cryptodev_fd_get_dst_len(struct crypt_auth_fd_op *caop, struct csession *ses_ptr)
1018 {
1019 	int dst_len = caop->len;
1020 
1021 	if (caop->op == COP_DECRYPT)
1022 		return dst_len;
1023 
1024 	dst_len += caop->tag_len;
1025 
1026 	/* for TLS always add some padding so the total length is rounded to
1027 	 * cipher block size
1028 	 */
1029 	if (caop->flags & COP_FLAG_AEAD_TLS_TYPE) {
1030 		int bs = ses_ptr->cdata.blocksize;
1031 
1032 		dst_len += bs - (dst_len % bs);
1033 	}
1034 
1035 	return dst_len;
1036 }
1037 
fill_kcaop_fd_from_caop(struct kernel_crypt_auth_fd_op * kcaop,struct fcrypt * fcr)1038 static int fill_kcaop_fd_from_caop(struct kernel_crypt_auth_fd_op *kcaop, struct fcrypt *fcr)
1039 {
1040 	struct crypt_auth_fd_op *caop = &kcaop->caop;
1041 	struct csession *ses_ptr;
1042 	int ret;
1043 
1044 	/* this also enters ses_ptr->sem */
1045 	ses_ptr = crypto_get_session_by_sid(fcr, caop->ses);
1046 	if (unlikely(!ses_ptr)) {
1047 		derr(1, "invalid session ID=0x%08X", caop->ses);
1048 		return -EINVAL;
1049 	}
1050 
1051 	if (caop->tag_len == 0)
1052 		caop->tag_len = rk_cryptodev_get_tag_len(ses_ptr);
1053 
1054 	kcaop->ivlen   = caop->iv ? ses_ptr->cdata.ivsize : 0;
1055 	kcaop->dst_len = rk_cryptodev_fd_get_dst_len(caop, ses_ptr);
1056 	kcaop->task    = current;
1057 	kcaop->mm      = current->mm;
1058 
1059 	if (caop->iv) {
1060 		ret = copy_from_user(kcaop->iv, u64_to_user_ptr((u64)caop->iv), kcaop->ivlen);
1061 		if (unlikely(ret)) {
1062 			derr(1, "error copy_from_user IV (%d bytes) returned %d for address %llu",
1063 			     kcaop->ivlen, ret, caop->iv);
1064 			ret = -EFAULT;
1065 			goto out_unlock;
1066 		}
1067 	}
1068 
1069 	ret = 0;
1070 
1071 out_unlock:
1072 	crypto_put_session(ses_ptr);
1073 	return ret;
1074 }
1075 
fill_caop_fd_from_kcaop(struct kernel_crypt_auth_fd_op * kcaop,struct fcrypt * fcr)1076 static int fill_caop_fd_from_kcaop(struct kernel_crypt_auth_fd_op *kcaop, struct fcrypt *fcr)
1077 {
1078 	int ret;
1079 
1080 	kcaop->caop.len = kcaop->dst_len;
1081 
1082 	if (kcaop->ivlen && kcaop->caop.flags & COP_FLAG_WRITE_IV) {
1083 		ret = copy_to_user(u64_to_user_ptr((u64)kcaop->caop.iv), kcaop->iv, kcaop->ivlen);
1084 		if (unlikely(ret)) {
1085 			derr(1, "Error in copying iv to userspace");
1086 			return -EFAULT;
1087 		}
1088 	}
1089 
1090 	return 0;
1091 }
1092 
kcaop_fd_from_user(struct kernel_crypt_auth_fd_op * kcaop,struct fcrypt * fcr,void __user * arg)1093 static int kcaop_fd_from_user(struct kernel_crypt_auth_fd_op *kcaop,
1094 			      struct fcrypt *fcr, void __user *arg)
1095 {
1096 	if (unlikely(copy_from_user(&kcaop->caop, arg, sizeof(kcaop->caop)))) {
1097 		derr(1, "Error in copying from userspace");
1098 		return -EFAULT;
1099 	}
1100 
1101 	return fill_kcaop_fd_from_caop(kcaop, fcr);
1102 }
1103 
kcaop_fd_to_user(struct kernel_crypt_auth_fd_op * kcaop,struct fcrypt * fcr,void __user * arg)1104 static int kcaop_fd_to_user(struct kernel_crypt_auth_fd_op *kcaop,
1105 			    struct fcrypt *fcr, void __user *arg)
1106 {
1107 	int ret;
1108 
1109 	ret = fill_caop_fd_from_kcaop(kcaop, fcr);
1110 	if (unlikely(ret)) {
1111 		derr(1, "Error in fill_caop_from_kcaop");
1112 		return ret;
1113 	}
1114 
1115 	if (unlikely(copy_to_user(arg, &kcaop->caop, sizeof(kcaop->caop)))) {
1116 		derr(1, "Cannot copy to userspace");
1117 		return -EFAULT;
1118 	}
1119 
1120 	return 0;
1121 }
1122 
1123 long
rk_cryptodev_ioctl(struct fcrypt * fcr,unsigned int cmd,unsigned long arg_)1124 rk_cryptodev_ioctl(struct fcrypt *fcr, unsigned int cmd, unsigned long arg_)
1125 {
1126 	struct kernel_crypt_fd_op kcop;
1127 	struct kernel_crypt_fd_map_op kmop;
1128 	struct kernel_crypt_rsa_op krop;
1129 	struct kernel_crypt_auth_fd_op kcaop;
1130 	void __user *arg = (void __user *)arg_;
1131 	int ret;
1132 
1133 	switch (cmd) {
1134 	case RIOCCRYPT_FD:
1135 		ret = kcop_fd_from_user(&kcop, fcr, arg);
1136 		if (unlikely(ret)) {
1137 			dwarning(1, "Error copying from user");
1138 			return ret;
1139 		}
1140 
1141 		ret = crypto_fd_run(fcr, &kcop);
1142 		if (unlikely(ret)) {
1143 			dwarning(1, "Error in crypto_run");
1144 			return ret;
1145 		}
1146 
1147 		return kcop_fd_to_user(&kcop, fcr, arg);
1148 	case RIOCAUTHCRYPT_FD:
1149 		ret = kcaop_fd_from_user(&kcaop, fcr, arg);
1150 		if (unlikely(ret)) {
1151 			dwarning(1, "Error copying from user");
1152 			return ret;
1153 		}
1154 
1155 		ret = crypto_auth_fd_run(fcr, &kcaop);
1156 		if (unlikely(ret)) {
1157 			dwarning(1, "Error in crypto_run");
1158 			return ret;
1159 		}
1160 
1161 		return kcaop_fd_to_user(&kcaop, fcr, arg);
1162 	case RIOCCRYPT_FD_MAP:
1163 		ret = kcop_map_fd_from_user(&kmop, fcr, arg);
1164 		if (unlikely(ret)) {
1165 			dwarning(1, "Error copying from user");
1166 			return ret;
1167 		}
1168 
1169 		ret = dma_fd_map_for_user(fcr, &kmop);
1170 		if (unlikely(ret)) {
1171 			dwarning(1, "Error in dma_fd_map_for_user");
1172 			return ret;
1173 		}
1174 
1175 		return kcop_map_fd_to_user(&kmop, fcr, arg);
1176 	case RIOCCRYPT_FD_UNMAP:
1177 		ret = kcop_map_fd_from_user(&kmop, fcr, arg);
1178 		if (unlikely(ret)) {
1179 			dwarning(1, "Error copying from user");
1180 			return ret;
1181 		}
1182 
1183 		ret = dma_fd_unmap_for_user(fcr, &kmop);
1184 		if (unlikely(ret))
1185 			dwarning(1, "Error in dma_fd_unmap_for_user");
1186 
1187 		return ret;
1188 	case RIOCCRYPT_CPU_ACCESS:
1189 		ret = kcop_map_fd_from_user(&kmop, fcr, arg);
1190 		if (unlikely(ret)) {
1191 			dwarning(1, "Error copying from user");
1192 			return ret;
1193 		}
1194 
1195 		ret = dma_fd_begin_cpu_access(fcr, &kmop);
1196 		if (unlikely(ret))
1197 			dwarning(1, "Error in dma_fd_begin_cpu_access");
1198 
1199 		return ret;
1200 	case RIOCCRYPT_DEV_ACCESS:
1201 		ret = kcop_map_fd_from_user(&kmop, fcr, arg);
1202 		if (unlikely(ret)) {
1203 			dwarning(1, "Error copying from user");
1204 			return ret;
1205 		}
1206 
1207 		ret = dma_fd_end_cpu_access(fcr, &kmop);
1208 		if (unlikely(ret))
1209 			dwarning(1, "Error in dma_fd_end_cpu_access");
1210 
1211 		return ret;
1212 	case RIOCCRYPT_RSA_CRYPT:
1213 		ret = kcop_rsa_from_user(&krop, fcr, arg);
1214 		if (unlikely(ret)) {
1215 			dwarning(1, "Error copying from user");
1216 			return ret;
1217 		}
1218 
1219 		ret = crypto_rsa_run(fcr, &krop);
1220 		if (unlikely(ret)) {
1221 			dwarning(1, "Error in rsa_run");
1222 			return ret;
1223 		}
1224 
1225 		return kcop_rsa_to_user(&krop, fcr, arg);
1226 	default:
1227 		return -EINVAL;
1228 	}
1229 }
1230 
1231 /* compatibility code for 32bit userlands */
1232 #ifdef CONFIG_COMPAT
1233 
1234 static inline void
compat_to_crypt_fd_op(struct compat_crypt_fd_op * compat,struct crypt_fd_op * cop)1235 compat_to_crypt_fd_op(struct compat_crypt_fd_op *compat, struct crypt_fd_op *cop)
1236 {
1237 	cop->ses    = compat->ses;
1238 	cop->op     = compat->op;
1239 	cop->flags  = compat->flags;
1240 	cop->len    = compat->len;
1241 
1242 	cop->src_fd = compat->src_fd;
1243 	cop->dst_fd = compat->dst_fd;
1244 	cop->mac    = compat_ptr(compat->mac);
1245 	cop->iv     = compat_ptr(compat->iv);
1246 }
1247 
1248 static inline void
crypt_fd_op_to_compat(struct crypt_fd_op * cop,struct compat_crypt_fd_op * compat)1249 crypt_fd_op_to_compat(struct crypt_fd_op *cop, struct compat_crypt_fd_op *compat)
1250 {
1251 	compat->ses    = cop->ses;
1252 	compat->op     = cop->op;
1253 	compat->flags  = cop->flags;
1254 	compat->len    = cop->len;
1255 
1256 	compat->src_fd = cop->src_fd;
1257 	compat->dst_fd = cop->dst_fd;
1258 	compat->mac    = ptr_to_compat(cop->mac);
1259 	compat->iv     = ptr_to_compat(cop->iv);
1260 }
1261 
compat_kcop_fd_from_user(struct kernel_crypt_fd_op * kcop,struct fcrypt * fcr,void __user * arg)1262 static int compat_kcop_fd_from_user(struct kernel_crypt_fd_op *kcop,
1263 				    struct fcrypt *fcr, void __user *arg)
1264 {
1265 	struct compat_crypt_fd_op compat_cop;
1266 
1267 	if (unlikely(copy_from_user(&compat_cop, arg, sizeof(compat_cop))))
1268 		return -EFAULT;
1269 	compat_to_crypt_fd_op(&compat_cop, &kcop->cop);
1270 
1271 	return fill_kcop_fd_from_cop(kcop, fcr);
1272 }
1273 
compat_kcop_fd_to_user(struct kernel_crypt_fd_op * kcop,struct fcrypt * fcr,void __user * arg)1274 static int compat_kcop_fd_to_user(struct kernel_crypt_fd_op *kcop,
1275 				  struct fcrypt *fcr, void __user *arg)
1276 {
1277 	int ret;
1278 	struct compat_crypt_fd_op compat_cop;
1279 
1280 	ret = fill_cop_fd_from_kcop(kcop, fcr);
1281 	if (unlikely(ret)) {
1282 		dwarning(1, "Error in fill_cop_from_kcop");
1283 		return ret;
1284 	}
1285 	crypt_fd_op_to_compat(&kcop->cop, &compat_cop);
1286 
1287 	if (unlikely(copy_to_user(arg, &compat_cop, sizeof(compat_cop)))) {
1288 		dwarning(1, "Error copying to user");
1289 		return -EFAULT;
1290 	}
1291 	return 0;
1292 }
1293 
1294 static inline void
compat_to_crypt_fd_map_op(struct compat_crypt_fd_map_op * compat,struct crypt_fd_map_op * mop)1295 compat_to_crypt_fd_map_op(struct compat_crypt_fd_map_op *compat, struct crypt_fd_map_op *mop)
1296 {
1297 	mop->dma_fd    = compat->dma_fd;
1298 	mop->phys_addr = compat->phys_addr;
1299 }
1300 
1301 static inline void
crypt_fd_map_op_to_compat(struct crypt_fd_map_op * mop,struct compat_crypt_fd_map_op * compat)1302 crypt_fd_map_op_to_compat(struct crypt_fd_map_op *mop, struct compat_crypt_fd_map_op *compat)
1303 {
1304 	compat->dma_fd    = mop->dma_fd;
1305 	compat->phys_addr = mop->phys_addr;
1306 }
1307 
compat_kcop_map_fd_from_user(struct kernel_crypt_fd_map_op * kcop,struct fcrypt * fcr,void __user * arg)1308 static int compat_kcop_map_fd_from_user(struct kernel_crypt_fd_map_op *kcop,
1309 			struct fcrypt *fcr, void __user *arg)
1310 {
1311 	struct compat_crypt_fd_map_op compat_mop;
1312 
1313 	if (unlikely(copy_from_user(&compat_mop, arg, sizeof(compat_mop))))
1314 		return -EFAULT;
1315 
1316 	compat_to_crypt_fd_map_op(&compat_mop, &kcop->mop);
1317 
1318 	return 0;
1319 }
1320 
compat_kcop_map_fd_to_user(struct kernel_crypt_fd_map_op * kcop,struct fcrypt * fcr,void __user * arg)1321 static int compat_kcop_map_fd_to_user(struct kernel_crypt_fd_map_op *kcop,
1322 			   struct fcrypt *fcr, void __user *arg)
1323 {
1324 	struct compat_crypt_fd_map_op compat_mop;
1325 
1326 	crypt_fd_map_op_to_compat(&kcop->mop, &compat_mop);
1327 	if (unlikely(copy_to_user(arg, &compat_mop, sizeof(compat_mop)))) {
1328 		derr(1, "Cannot copy to userspace");
1329 		return -EFAULT;
1330 	}
1331 
1332 	return 0;
1333 }
1334 
1335 long
rk_compat_cryptodev_ioctl(struct fcrypt * fcr,unsigned int cmd,unsigned long arg_)1336 rk_compat_cryptodev_ioctl(struct fcrypt *fcr, unsigned int cmd, unsigned long arg_)
1337 {
1338 	struct kernel_crypt_fd_op kcop;
1339 	struct kernel_crypt_fd_map_op kmop;
1340 	void __user *arg = (void __user *)arg_;
1341 	int ret;
1342 
1343 	switch (cmd) {
1344 	case COMPAT_RIOCCRYPT_FD:
1345 		ret = compat_kcop_fd_from_user(&kcop, fcr, arg);
1346 		if (unlikely(ret)) {
1347 			dwarning(1, "Error copying from user");
1348 			return ret;
1349 		}
1350 
1351 		ret = crypto_fd_run(fcr, &kcop);
1352 		if (unlikely(ret)) {
1353 			dwarning(1, "Error in crypto_run");
1354 			return ret;
1355 		}
1356 
1357 		return compat_kcop_fd_to_user(&kcop, fcr, arg);
1358 	case COMPAT_RIOCCRYPT_FD_MAP:
1359 		ret = compat_kcop_map_fd_from_user(&kmop, fcr, arg);
1360 		if (unlikely(ret)) {
1361 			dwarning(1, "Error copying from user");
1362 			return ret;
1363 		}
1364 
1365 		ret = dma_fd_map_for_user(fcr, &kmop);
1366 		if (unlikely(ret)) {
1367 			dwarning(1, "Error in dma_fd_map_for_user");
1368 			return ret;
1369 		}
1370 
1371 		return compat_kcop_map_fd_to_user(&kmop, fcr, arg);
1372 	case COMPAT_RIOCCRYPT_FD_UNMAP:
1373 		ret = compat_kcop_map_fd_from_user(&kmop, fcr, arg);
1374 		if (unlikely(ret)) {
1375 			dwarning(1, "Error copying from user");
1376 			return ret;
1377 		}
1378 
1379 		ret = dma_fd_unmap_for_user(fcr, &kmop);
1380 		if (unlikely(ret))
1381 			dwarning(1, "Error in dma_fd_unmap_for_user");
1382 
1383 		return ret;
1384 	case COMPAT_RIOCCRYPT_CPU_ACCESS:
1385 		ret = compat_kcop_map_fd_from_user(&kmop, fcr, arg);
1386 		if (unlikely(ret)) {
1387 			dwarning(1, "Error copying from user");
1388 			return ret;
1389 		}
1390 
1391 		ret = dma_fd_begin_cpu_access(fcr, &kmop);
1392 		if (unlikely(ret)) {
1393 			dwarning(1, "Error in dma_fd_begin_cpu_access");
1394 			return ret;
1395 		}
1396 
1397 		return compat_kcop_map_fd_to_user(&kmop, fcr, arg);
1398 	case COMPAT_RIOCCRYPT_DEV_ACCESS:
1399 		ret = compat_kcop_map_fd_from_user(&kmop, fcr, arg);
1400 		if (unlikely(ret)) {
1401 			dwarning(1, "Error copying from user");
1402 			return ret;
1403 		}
1404 
1405 		ret = dma_fd_end_cpu_access(fcr, &kmop);
1406 		if (unlikely(ret))
1407 			dwarning(1, "Error in dma_fd_end_cpu_access");
1408 
1409 		return ret;
1410 	default:
1411 		return rk_cryptodev_ioctl(fcr, cmd, arg_);
1412 	}
1413 }
1414 
1415 #endif /* CONFIG_COMPAT */
1416 
1417 struct cipher_algo_name_map {
1418 	uint32_t	id;
1419 	const char	*name;
1420 	int		is_stream;
1421 	int		is_aead;
1422 };
1423 
1424 struct hash_algo_name_map {
1425 	uint32_t	id;
1426 	const char	*name;
1427 	int		is_hmac;
1428 };
1429 
1430 static const struct cipher_algo_name_map c_algo_map_tbl[] = {
1431 	{CRYPTO_RK_DES_ECB,     "ecb-des-rk",      0, 0},
1432 	{CRYPTO_RK_DES_CBC,     "cbc-des-rk",      0, 0},
1433 	{CRYPTO_RK_DES_CFB,     "cfb-des-rk",      0, 0},
1434 	{CRYPTO_RK_DES_OFB,     "ofb-des-rk",      0, 0},
1435 	{CRYPTO_RK_3DES_ECB,    "ecb-des3_ede-rk", 0, 0},
1436 	{CRYPTO_RK_3DES_CBC,    "cbc-des3_ede-rk", 0, 0},
1437 	{CRYPTO_RK_3DES_CFB,    "cfb-des3_ede-rk", 0, 0},
1438 	{CRYPTO_RK_3DES_OFB,    "ofb-des3_ede-rk", 0, 0},
1439 	{CRYPTO_RK_SM4_ECB,     "ecb-sm4-rk",      0, 0},
1440 	{CRYPTO_RK_SM4_CBC,     "cbc-sm4-rk",      0, 0},
1441 	{CRYPTO_RK_SM4_CFB,     "cfb-sm4-rk",      0, 0},
1442 	{CRYPTO_RK_SM4_OFB,     "ofb-sm4-rk",      0, 0},
1443 	{CRYPTO_RK_SM4_CTS,     "cts-sm4-rk",      0, 0},
1444 	{CRYPTO_RK_SM4_CTR,     "ctr-sm4-rk",      1, 0},
1445 	{CRYPTO_RK_SM4_XTS,     "xts-sm4-rk",      0, 0},
1446 	{CRYPTO_RK_SM4_CCM,     "ccm-sm4-rk",      1, 1},
1447 	{CRYPTO_RK_SM4_GCM,     "gcm-sm4-rk",      1, 1},
1448 	{CRYPTO_RK_AES_ECB,     "ecb-aes-rk",      0, 0},
1449 	{CRYPTO_RK_AES_CBC,     "cbc-aes-rk",      0, 0},
1450 	{CRYPTO_RK_AES_CFB,     "cfb-aes-rk",      0, 0},
1451 	{CRYPTO_RK_AES_OFB,     "ofb-aes-rk",      0, 0},
1452 	{CRYPTO_RK_AES_CTS,     "cts-aes-rk",      0, 0},
1453 	{CRYPTO_RK_AES_CTR,     "ctr-aes-rk",      1, 0},
1454 	{CRYPTO_RK_AES_XTS,     "xts-aes-rk",      0, 0},
1455 	{CRYPTO_RK_AES_CCM,     "ccm-aes-rk",      1, 1},
1456 	{CRYPTO_RK_AES_GCM,     "gcm-aes-rk",      1, 1},
1457 };
1458 
1459 static const struct hash_algo_name_map h_algo_map_tbl[] = {
1460 
1461 	{CRYPTO_RK_MD5,         "md5-rk",         0},
1462 	{CRYPTO_RK_SHA1,        "sha1-rk",        0},
1463 	{CRYPTO_RK_SHA224,      "sha224-rk",      0},
1464 	{CRYPTO_RK_SHA256,      "sha256-rk",      0},
1465 	{CRYPTO_RK_SHA384,      "sha384-rk",      0},
1466 	{CRYPTO_RK_SHA512,      "sha512-rk",      0},
1467 	{CRYPTO_RK_SHA512_224,  "sha512_224-rk",  0},
1468 	{CRYPTO_RK_SHA512_256,  "sha512_256-rk",  0},
1469 	{CRYPTO_RK_SM3,         "sm3-rk",         0},
1470 	{CRYPTO_RK_MD5_HMAC,    "hmac-md5-rk",    1},
1471 	{CRYPTO_RK_SHA1_HMAC,   "hmac-sha1-rk",   1},
1472 	{CRYPTO_RK_SHA256_HMAC, "hmac-sha256-rk", 1},
1473 	{CRYPTO_RK_SHA512_HMAC, "hmac-sha512-rk", 1},
1474 	{CRYPTO_RK_SM3_HMAC,    "hmac-sm3-rk",    1},
1475 	{CRYPTO_RK_SM4_CMAC,    "cmac-sm4-rk",    1},
1476 	{CRYPTO_RK_SM4_CBC_MAC, "cbcmac-sm4-rk",  1},
1477 	{CRYPTO_RK_AES_CMAC,    "cmac-aes-rk",    1},
1478 	{CRYPTO_RK_AES_CBC_MAC, "cbcmac-aes-rk",  1},
1479 };
1480 
rk_get_cipher_name(uint32_t id,int * is_stream,int * is_aead)1481 const char *rk_get_cipher_name(uint32_t id, int *is_stream, int *is_aead)
1482 {
1483 	uint32_t i;
1484 
1485 	*is_stream  = 0;
1486 	*is_aead    = 0;
1487 
1488 	for (i = 0; i < ARRAY_SIZE(c_algo_map_tbl); i++) {
1489 		if (id == c_algo_map_tbl[i].id) {
1490 			*is_stream = c_algo_map_tbl[i].is_stream;
1491 			*is_aead   = c_algo_map_tbl[i].is_aead;
1492 			return c_algo_map_tbl[i].name;
1493 		}
1494 	}
1495 
1496 	return NULL;
1497 }
1498 
rk_get_hash_name(uint32_t id,int * is_hmac)1499 const char *rk_get_hash_name(uint32_t id, int *is_hmac)
1500 {
1501 	uint32_t i;
1502 
1503 	*is_hmac    = 0;
1504 
1505 	for (i = 0; i < ARRAY_SIZE(h_algo_map_tbl); i++) {
1506 		if (id == h_algo_map_tbl[i].id) {
1507 			*is_hmac = h_algo_map_tbl[i].is_hmac;
1508 			return h_algo_map_tbl[i].name;
1509 		}
1510 	}
1511 
1512 	return NULL;
1513 }
1514 
rk_cryptodev_multi_thread(const char * name)1515 bool rk_cryptodev_multi_thread(const char *name)
1516 {
1517 	uint32_t i;
1518 
1519 	for (i = 0; i < ARRAY_SIZE(g_dev_infos); i++) {
1520 		if (g_dev_infos[i].dev)
1521 			return g_dev_infos[i].is_multi_thread;
1522 	}
1523 
1524 	return false;
1525 }
1526