xref: /OK3568_Linux_fs/kernel/drivers/crypto/rockchip/cryptodev_linux/ioctl.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * Driver for /dev/crypto device (aka CryptoDev)
3  *
4  * Copyright (c) 2004 Michal Ludvig <mludvig@logix.net.nz>, SuSE Labs
5  * Copyright (c) 2009,2010,2011 Nikos Mavrogiannopoulos <nmav@gnutls.org>
6  * Copyright (c) 2010 Phil Sutter
7  *
8  * This file is part of linux cryptodev.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License
12  * as published by the Free Software Foundation; either version 2
13  * of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc.,
23  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
24  */
25 
26 /*
27  * Device /dev/crypto provides an interface for
28  * accessing kernel CryptoAPI algorithms (ciphers,
29  * hashes) from userspace programs.
30  *
31  * /dev/crypto interface was originally introduced in
32  * OpenBSD and this module attempts to keep the API.
33  *
34  */
35 
36 #include <crypto/hash.h>
37 #include <linux/mm.h>
38 #include <linux/highmem.h>
39 #include <linux/ioctl.h>
40 #include <linux/random.h>
41 #include <linux/syscalls.h>
42 #include <linux/pagemap.h>
43 #include <linux/poll.h>
44 #include <linux/uaccess.h>
45 #include <linux/scatterlist.h>
46 #include <linux/rtnetlink.h>
47 #include <crypto/authenc.h>
48 
49 #include <linux/sysctl.h>
50 
51 #include "cryptodev.h"
52 #include "zc.h"
53 #include "version.h"
54 #include "cipherapi.h"
55 
56 #include "rk_cryptodev.h"
57 
58 MODULE_AUTHOR("Nikos Mavrogiannopoulos <nmav@gnutls.org>");
59 MODULE_DESCRIPTION("CryptoDev driver");
60 MODULE_LICENSE("GPL");
61 
62 /* ====== Compile-time config ====== */
63 
64 /* Default (pre-allocated) and maximum size of the job queue.
65  * These are free, pending and done items all together. */
66 #define DEF_COP_RINGSIZE 16
67 #define MAX_COP_RINGSIZE 64
68 
69 /* ====== Module parameters ====== */
70 
71 int cryptodev_verbosity;
72 module_param(cryptodev_verbosity, int, 0644);
73 MODULE_PARM_DESC(cryptodev_verbosity, "0: normal, 1: verbose, 2: debug");
74 
75 /* ====== CryptoAPI ====== */
76 struct todo_list_item {
77 	struct list_head __hook;
78 	struct kernel_crypt_op kcop;
79 	int result;
80 };
81 
82 struct locked_list {
83 	struct list_head list;
84 	struct mutex lock;
85 };
86 
87 struct crypt_priv {
88 	struct fcrypt fcrypt;
89 	struct locked_list free, todo, done;
90 	int itemcount;
91 	struct work_struct cryptask;
92 	wait_queue_head_t user_waiter;
93 };
94 
95 #define FILL_SG(sg, ptr, len)					\
96 	do {							\
97 		(sg)->page = virt_to_page(ptr);			\
98 		(sg)->offset = offset_in_page(ptr);		\
99 		(sg)->length = len;				\
100 		(sg)->dma_address = 0;				\
101 	} while (0)
102 
103 /* cryptodev's own workqueue, keeps crypto tasks from disturbing the force */
104 static struct workqueue_struct *cryptodev_wq;
105 static atomic_t cryptodev_sess = ATOMIC_INIT(1);
106 
107 /* Prepare session for future use. */
108 static int
crypto_create_session(struct fcrypt * fcr,struct session_op * sop)109 crypto_create_session(struct fcrypt *fcr, struct session_op *sop)
110 {
111 	struct csession	*ses_new = NULL, *ses_ptr;
112 	int ret = 0;
113 	const char *alg_name = NULL;
114 	const char *hash_name = NULL;
115 	int hmac_mode = 1, stream = 0, aead = 0;
116 	/*
117 	 * With composite aead ciphers, only ckey is used and it can cover all the
118 	 * structure space; otherwise both keys may be used simultaneously but they
119 	 * are confined to their spaces
120 	 */
121 	struct {
122 		uint8_t ckey[CRYPTO_CIPHER_MAX_KEY_LEN];
123 		uint8_t mkey[CRYPTO_HMAC_MAX_KEY_LEN];
124 		/* padding space for aead keys */
125 		uint8_t pad[RTA_SPACE(sizeof(struct crypto_authenc_key_param))];
126 	} keys;
127 
128 	/* Does the request make sense? */
129 	if (unlikely(!sop->cipher && !sop->mac)) {
130 		ddebug(1, "Both 'cipher' and 'mac' unset.");
131 		return -EINVAL;
132 	}
133 
134 	switch (sop->cipher) {
135 	case 0:
136 		break;
137 	case CRYPTO_DES_CBC:
138 		alg_name = "cbc(des)";
139 		break;
140 	case CRYPTO_3DES_CBC:
141 		alg_name = "cbc(des3_ede)";
142 		break;
143 	case CRYPTO_BLF_CBC:
144 		alg_name = "cbc(blowfish)";
145 		break;
146 	case CRYPTO_AES_CBC:
147 		alg_name = "cbc(aes)";
148 		break;
149 	case CRYPTO_AES_ECB:
150 		alg_name = "ecb(aes)";
151 		break;
152 	case CRYPTO_AES_XTS:
153 		alg_name = "xts(aes)";
154 		break;
155 	case CRYPTO_CAMELLIA_CBC:
156 		alg_name = "cbc(camellia)";
157 		break;
158 	case CRYPTO_AES_CTR:
159 		alg_name = "ctr(aes)";
160 		stream = 1;
161 		break;
162 	case CRYPTO_AES_GCM:
163 		alg_name = "gcm(aes)";
164 		stream = 1;
165 		aead = 1;
166 		break;
167 	case CRYPTO_TLS11_AES_CBC_HMAC_SHA1:
168 		alg_name = "tls11(hmac(sha1),cbc(aes))";
169 		stream = 0;
170 		aead = 1;
171 		break;
172 	case CRYPTO_TLS12_AES_CBC_HMAC_SHA256:
173 		alg_name = "tls12(hmac(sha256),cbc(aes))";
174 		stream = 0;
175 		aead = 1;
176 		break;
177 	case CRYPTO_NULL:
178 		alg_name = "ecb(cipher_null)";
179 		stream = 1;
180 		break;
181 	default:
182 		alg_name = rk_get_cipher_name(sop->cipher, &stream, &aead);
183 		if (!alg_name) {
184 			ddebug(1, "bad cipher: %d", sop->cipher);
185 			return -EINVAL;
186 		}
187 		break;
188 	}
189 
190 	switch (sop->mac) {
191 	case 0:
192 		break;
193 	case CRYPTO_MD5_HMAC:
194 		hash_name = "hmac(md5)";
195 		break;
196 	case CRYPTO_RIPEMD160_HMAC:
197 		hash_name = "hmac(rmd160)";
198 		break;
199 	case CRYPTO_SHA1_HMAC:
200 		hash_name = "hmac(sha1)";
201 		break;
202 	case CRYPTO_SHA2_224_HMAC:
203 		hash_name = "hmac(sha224)";
204 		break;
205 
206 	case CRYPTO_SHA2_256_HMAC:
207 		hash_name = "hmac(sha256)";
208 		break;
209 	case CRYPTO_SHA2_384_HMAC:
210 		hash_name = "hmac(sha384)";
211 		break;
212 	case CRYPTO_SHA2_512_HMAC:
213 		hash_name = "hmac(sha512)";
214 		break;
215 
216 	/* non-hmac cases */
217 	case CRYPTO_MD5:
218 		hash_name = "md5";
219 		hmac_mode = 0;
220 		break;
221 	case CRYPTO_RIPEMD160:
222 		hash_name = "rmd160";
223 		hmac_mode = 0;
224 		break;
225 	case CRYPTO_SHA1:
226 		hash_name = "sha1";
227 		hmac_mode = 0;
228 		break;
229 	case CRYPTO_SHA2_224:
230 		hash_name = "sha224";
231 		hmac_mode = 0;
232 		break;
233 	case CRYPTO_SHA2_256:
234 		hash_name = "sha256";
235 		hmac_mode = 0;
236 		break;
237 	case CRYPTO_SHA2_384:
238 		hash_name = "sha384";
239 		hmac_mode = 0;
240 		break;
241 	case CRYPTO_SHA2_512:
242 		hash_name = "sha512";
243 		hmac_mode = 0;
244 		break;
245 	default:
246 		hash_name = rk_get_hash_name(sop->mac, &hmac_mode);
247 		if (!hash_name) {
248 			ddebug(1, "bad mac: %d", sop->mac);
249 			return -EINVAL;
250 		}
251 		break;
252 	}
253 
254 	/* Create a session and put it to the list. Zeroing the structure helps
255 	 * also with a single exit point in case of errors */
256 	ses_new = kzalloc(sizeof(*ses_new), GFP_KERNEL);
257 	if (!ses_new)
258 		return -ENOMEM;
259 
260 	/* Set-up crypto transform. */
261 	if (alg_name) {
262 		unsigned int keylen;
263 		ret = cryptodev_get_cipher_keylen(&keylen, sop, aead);
264 		if (unlikely(ret < 0)) {
265 			ddebug(1, "Setting key failed for %s-%zu.",
266 				alg_name, (size_t)sop->keylen*8);
267 			goto session_error;
268 		}
269 
270 		ret = cryptodev_get_cipher_key(keys.ckey, sop, aead);
271 		if (unlikely(ret < 0))
272 			goto session_error;
273 
274 		ret = cryptodev_cipher_init(&ses_new->cdata, alg_name, keys.ckey,
275 						keylen, stream, aead);
276 		if (ret < 0) {
277 			ddebug(1, "Failed to load cipher for %s", alg_name);
278 			goto session_error;
279 		}
280 	}
281 
282 	if (hash_name && aead == 0) {
283 		if (unlikely(sop->mackeylen > CRYPTO_HMAC_MAX_KEY_LEN)) {
284 			ddebug(1, "Setting key failed for %s-%zu.",
285 				hash_name, (size_t)sop->mackeylen*8);
286 			ret = -EINVAL;
287 			goto session_error;
288 		}
289 
290 		if (sop->mackey && unlikely(copy_from_user(keys.mkey, sop->mackey,
291 					    sop->mackeylen))) {
292 			ret = -EFAULT;
293 			goto session_error;
294 		}
295 
296 		ret = cryptodev_hash_init(&ses_new->hdata, hash_name, hmac_mode,
297 							keys.mkey, sop->mackeylen);
298 		if (ret != 0) {
299 			ddebug(1, "Failed to load hash for %s", hash_name);
300 			goto session_error;
301 		}
302 
303 		ret = cryptodev_hash_reset(&ses_new->hdata);
304 		if (ret != 0) {
305 			goto session_error;
306 		}
307 	}
308 
309 	ses_new->alignmask = max(ses_new->cdata.alignmask,
310 	                                          ses_new->hdata.alignmask);
311 	ddebug(2, "got alignmask %d", ses_new->alignmask);
312 
313 	ses_new->array_size = DEFAULT_PREALLOC_PAGES;
314 	ddebug(2, "preallocating for %d user pages", ses_new->array_size);
315 	ses_new->pages = kzalloc(ses_new->array_size *
316 			sizeof(struct page *), GFP_KERNEL);
317 	ses_new->sg = kzalloc(ses_new->array_size *
318 			sizeof(struct scatterlist), GFP_KERNEL);
319 	if (ses_new->sg == NULL || ses_new->pages == NULL) {
320 		ddebug(0, "Memory error");
321 		ret = -ENOMEM;
322 		goto session_error;
323 	}
324 
325 	/* Non-multithreaded can only create one session */
326 	if (!rk_cryptodev_multi_thread(NULL) &&
327 	    !atomic_dec_and_test(&cryptodev_sess)) {
328 		atomic_inc(&cryptodev_sess);
329 		ddebug(2, "Non-multithreaded can only create one session. sess = %d",
330 		       atomic_read(&cryptodev_sess));
331 		ret = -EBUSY;
332 		goto session_error;
333 	}
334 
335 	/* put the new session to the list */
336 	get_random_bytes(&ses_new->sid, sizeof(ses_new->sid));
337 	mutex_init(&ses_new->sem);
338 
339 	mutex_lock(&fcr->sem);
340 restart:
341 	list_for_each_entry(ses_ptr, &fcr->list, entry) {
342 		/* Check for duplicate SID */
343 		if (unlikely(ses_new->sid == ses_ptr->sid)) {
344 			get_random_bytes(&ses_new->sid, sizeof(ses_new->sid));
345 			/* Unless we have a broken RNG this
346 			   shouldn't loop forever... ;-) */
347 			goto restart;
348 		}
349 	}
350 
351 	list_add(&ses_new->entry, &fcr->list);
352 	mutex_unlock(&fcr->sem);
353 
354 	/* Fill in some values for the user. */
355 	sop->ses = ses_new->sid;
356 	return 0;
357 
358 	/* We count on ses_new to be initialized with zeroes
359 	 * Since hdata and cdata are embedded within ses_new, it follows that
360 	 * hdata->init and cdata->init are either zero or one as they have been
361 	 * initialized or not */
362 session_error:
363 	cryptodev_hash_deinit(&ses_new->hdata);
364 	cryptodev_cipher_deinit(&ses_new->cdata);
365 	kfree(ses_new->sg);
366 	kfree(ses_new->pages);
367 	kfree(ses_new);
368 	return ret;
369 }
370 
371 /* Everything that needs to be done when removing a session. */
372 static inline void
crypto_destroy_session(struct csession * ses_ptr)373 crypto_destroy_session(struct csession *ses_ptr)
374 {
375 	if (!mutex_trylock(&ses_ptr->sem)) {
376 		ddebug(2, "Waiting for semaphore of sid=0x%08X", ses_ptr->sid);
377 		mutex_lock(&ses_ptr->sem);
378 	}
379 	ddebug(2, "Removed session 0x%08X", ses_ptr->sid);
380 	cryptodev_cipher_deinit(&ses_ptr->cdata);
381 	cryptodev_hash_deinit(&ses_ptr->hdata);
382 	ddebug(2, "freeing space for %d user pages", ses_ptr->array_size);
383 	kfree(ses_ptr->pages);
384 	kfree(ses_ptr->sg);
385 	mutex_unlock(&ses_ptr->sem);
386 	mutex_destroy(&ses_ptr->sem);
387 	kfree(ses_ptr);
388 
389 	/* Non-multithreaded can only create one session */
390 	if (!rk_cryptodev_multi_thread(NULL)) {
391 		atomic_inc(&cryptodev_sess);
392 		ddebug(2, "Release cryptodev_sess = %d", atomic_read(&cryptodev_sess));
393 	}
394 }
395 
396 /* Look up a session by ID and remove. */
397 static int
crypto_finish_session(struct fcrypt * fcr,uint32_t sid)398 crypto_finish_session(struct fcrypt *fcr, uint32_t sid)
399 {
400 	struct csession *tmp, *ses_ptr;
401 	struct list_head *head;
402 	int ret = 0;
403 
404 	mutex_lock(&fcr->sem);
405 	head = &fcr->list;
406 	list_for_each_entry_safe(ses_ptr, tmp, head, entry) {
407 		if (ses_ptr->sid == sid) {
408 			list_del(&ses_ptr->entry);
409 			crypto_destroy_session(ses_ptr);
410 			break;
411 		}
412 	}
413 
414 	if (unlikely(!ses_ptr)) {
415 		derr(1, "Session with sid=0x%08X not found!", sid);
416 		ret = -ENOENT;
417 	}
418 	mutex_unlock(&fcr->sem);
419 
420 	return ret;
421 }
422 
423 /* Remove all sessions when closing the file */
424 static int
crypto_finish_all_sessions(struct fcrypt * fcr)425 crypto_finish_all_sessions(struct fcrypt *fcr)
426 {
427 	struct csession *tmp, *ses_ptr;
428 	struct list_head *head;
429 
430 	mutex_lock(&fcr->sem);
431 
432 	head = &fcr->list;
433 	list_for_each_entry_safe(ses_ptr, tmp, head, entry) {
434 		list_del(&ses_ptr->entry);
435 		crypto_destroy_session(ses_ptr);
436 	}
437 	mutex_unlock(&fcr->sem);
438 
439 	return 0;
440 }
441 
442 /* Look up session by session ID. The returned session is locked. */
443 struct csession *
crypto_get_session_by_sid(struct fcrypt * fcr,uint32_t sid)444 crypto_get_session_by_sid(struct fcrypt *fcr, uint32_t sid)
445 {
446 	struct csession *ses_ptr, *retval = NULL;
447 
448 	if (unlikely(fcr == NULL))
449 		return NULL;
450 
451 	mutex_lock(&fcr->sem);
452 	list_for_each_entry(ses_ptr, &fcr->list, entry) {
453 		if (ses_ptr->sid == sid) {
454 			mutex_lock(&ses_ptr->sem);
455 			retval = ses_ptr;
456 			break;
457 		}
458 	}
459 	mutex_unlock(&fcr->sem);
460 
461 	return retval;
462 }
463 
mutex_lock_double(struct mutex * a,struct mutex * b)464 static void mutex_lock_double(struct mutex *a, struct mutex *b)
465 {
466 	if (b < a)
467 		swap(a, b);
468 
469 	mutex_lock(a);
470 	mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
471 }
472 
473 int
crypto_get_sessions_by_sid(struct fcrypt * fcr,uint32_t sid_1,struct csession ** ses_ptr_1,uint32_t sid_2,struct csession ** ses_ptr_2)474 crypto_get_sessions_by_sid(struct fcrypt *fcr,
475 			   uint32_t sid_1, struct csession **ses_ptr_1,
476 			   uint32_t sid_2, struct csession **ses_ptr_2)
477 {
478 	struct csession *ses_ptr;
479 	int retval;
480 
481 	if (unlikely(fcr == NULL)) {
482 		retval = -ENOENT;
483 		goto out;
484 	}
485 
486 	if (sid_1 == sid_2) {
487 		retval = -EDEADLK;
488 		goto out;
489 	}
490 
491 	mutex_lock(&fcr->sem);
492 
493 	list_for_each_entry(ses_ptr, &fcr->list, entry) {
494 		if (ses_ptr->sid == sid_1)
495 			*ses_ptr_1 = ses_ptr;
496 		else if (ses_ptr->sid == sid_2)
497 			*ses_ptr_2 = ses_ptr;
498 	}
499 
500 	if (*ses_ptr_1 && *ses_ptr_2) {
501 		mutex_lock_double(&(*ses_ptr_1)->sem, &(*ses_ptr_2)->sem);
502 		retval = 0;
503 	} else {
504 		retval = -ENOENT;
505 	}
506 
507 	mutex_unlock(&fcr->sem);
508 
509 out:
510 	if (retval) {
511 		*ses_ptr_1 = NULL;
512 		*ses_ptr_2 = NULL;
513 	}
514 	return retval;
515 }
516 
517 #ifdef CIOCCPHASH
518 /* Copy the hash state from one session to another */
519 static int
crypto_copy_hash_state(struct fcrypt * fcr,uint32_t dst_sid,uint32_t src_sid)520 crypto_copy_hash_state(struct fcrypt *fcr, uint32_t dst_sid, uint32_t src_sid)
521 {
522 	struct csession *src_ses, *dst_ses;
523 	int ret;
524 
525 	ret = crypto_get_sessions_by_sid(fcr, src_sid, &src_ses,
526 					 dst_sid, &dst_ses);
527 	if (unlikely(ret)) {
528 		derr(1, "Failed to get sesssions with sid=0x%08X sid=%0x08X!",
529 		     src_sid, dst_sid);
530 		return ret;
531 	}
532 
533 	ret = cryptodev_hash_copy(&dst_ses->hdata, &src_ses->hdata);
534 	crypto_put_session(src_ses);
535 	crypto_put_session(dst_ses);
536 	return ret;
537 }
538 #endif /* CIOCCPHASH */
539 
cryptask_routine(struct work_struct * work)540 static void cryptask_routine(struct work_struct *work)
541 {
542 	struct crypt_priv *pcr = container_of(work, struct crypt_priv, cryptask);
543 	struct todo_list_item *item;
544 	LIST_HEAD(tmp);
545 
546 	/* fetch all pending jobs into the temporary list */
547 	mutex_lock(&pcr->todo.lock);
548 	list_cut_position(&tmp, &pcr->todo.list, pcr->todo.list.prev);
549 	mutex_unlock(&pcr->todo.lock);
550 
551 	/* handle each job locklessly */
552 	list_for_each_entry(item, &tmp, __hook) {
553 		item->result = crypto_run(&pcr->fcrypt, &item->kcop);
554 		if (unlikely(item->result))
555 			derr(0, "crypto_run() failed: %d", item->result);
556 	}
557 
558 	/* push all handled jobs to the done list at once */
559 	mutex_lock(&pcr->done.lock);
560 	list_splice_tail(&tmp, &pcr->done.list);
561 	mutex_unlock(&pcr->done.lock);
562 
563 	/* wake for POLLIN */
564 	wake_up_interruptible(&pcr->user_waiter);
565 }
566 
567 /* ====== /dev/crypto ====== */
568 static atomic_t cryptodev_node = ATOMIC_INIT(1);
569 
570 static int
cryptodev_open(struct inode * inode,struct file * filp)571 cryptodev_open(struct inode *inode, struct file *filp)
572 {
573 	struct todo_list_item *tmp, *tmp_next;
574 	struct crypt_priv *pcr;
575 	int i;
576 
577 	/* Non-multithreaded can only be opened once */
578 	if (!rk_cryptodev_multi_thread(NULL) &&
579 	    !atomic_dec_and_test(&cryptodev_node)) {
580 		atomic_inc(&cryptodev_node);
581 		ddebug(2, "Non-multithreaded can only be opened once. node = %d",
582 		       atomic_read(&cryptodev_node));
583 		return -EBUSY;
584 	}
585 
586 	/* make sure sess == 1 after open */
587 	atomic_set(&cryptodev_sess, 1);
588 
589 	pcr = kzalloc(sizeof(*pcr), GFP_KERNEL);
590 	if (!pcr)
591 		return -ENOMEM;
592 	filp->private_data = pcr;
593 
594 	mutex_init(&pcr->fcrypt.sem);
595 	mutex_init(&pcr->free.lock);
596 	mutex_init(&pcr->todo.lock);
597 	mutex_init(&pcr->done.lock);
598 
599 	INIT_LIST_HEAD(&pcr->fcrypt.list);
600 	INIT_LIST_HEAD(&pcr->fcrypt.dma_map_list);
601 	INIT_LIST_HEAD(&pcr->free.list);
602 	INIT_LIST_HEAD(&pcr->todo.list);
603 	INIT_LIST_HEAD(&pcr->done.list);
604 
605 	INIT_WORK(&pcr->cryptask, cryptask_routine);
606 
607 	init_waitqueue_head(&pcr->user_waiter);
608 
609 	for (i = 0; i < DEF_COP_RINGSIZE; i++) {
610 		tmp = kzalloc(sizeof(struct todo_list_item), GFP_KERNEL);
611 		if (!tmp)
612 			goto err_ringalloc;
613 		pcr->itemcount++;
614 		ddebug(2, "allocated new item at %p", tmp);
615 		list_add(&tmp->__hook, &pcr->free.list);
616 	}
617 
618 	ddebug(2, "Cryptodev handle initialised, %d elements in queue",
619 			DEF_COP_RINGSIZE);
620 	return 0;
621 
622 /* In case of errors, free any memory allocated so far */
623 err_ringalloc:
624 	list_for_each_entry_safe(tmp, tmp_next, &pcr->free.list, __hook) {
625 		list_del(&tmp->__hook);
626 		kfree(tmp);
627 	}
628 	mutex_destroy(&pcr->done.lock);
629 	mutex_destroy(&pcr->todo.lock);
630 	mutex_destroy(&pcr->free.lock);
631 	mutex_destroy(&pcr->fcrypt.sem);
632 	kfree(pcr);
633 	filp->private_data = NULL;
634 	return -ENOMEM;
635 }
636 
637 static int
cryptodev_release(struct inode * inode,struct file * filp)638 cryptodev_release(struct inode *inode, struct file *filp)
639 {
640 	struct crypt_priv *pcr = filp->private_data;
641 	struct todo_list_item *item, *item_safe;
642 	int items_freed = 0;
643 
644 	if (!pcr)
645 		return 0;
646 
647 	/* Non-multithreaded can only be opened once */
648 	if (!rk_cryptodev_multi_thread(NULL)) {
649 		atomic_inc(&cryptodev_node);
650 		ddebug(2, "Release cryptodev_node = %d", atomic_read(&cryptodev_node));
651 	}
652 
653 	cancel_work_sync(&pcr->cryptask);
654 
655 	list_splice_tail(&pcr->todo.list, &pcr->free.list);
656 	list_splice_tail(&pcr->done.list, &pcr->free.list);
657 
658 	list_for_each_entry_safe(item, item_safe, &pcr->free.list, __hook) {
659 		ddebug(2, "freeing item at %p", item);
660 		list_del(&item->__hook);
661 		kfree(item);
662 		items_freed++;
663 	}
664 
665 	if (items_freed != pcr->itemcount) {
666 		derr(0, "freed %d items, but %d should exist!",
667 				items_freed, pcr->itemcount);
668 	}
669 
670 	crypto_finish_all_sessions(&pcr->fcrypt);
671 
672 	mutex_destroy(&pcr->done.lock);
673 	mutex_destroy(&pcr->todo.lock);
674 	mutex_destroy(&pcr->free.lock);
675 	mutex_destroy(&pcr->fcrypt.sem);
676 
677 	kfree(pcr);
678 	filp->private_data = NULL;
679 
680 	ddebug(2, "Cryptodev handle deinitialised, %d elements freed",
681 			items_freed);
682 	return 0;
683 }
684 
685 #ifdef ENABLE_ASYNC
686 /* enqueue a job for asynchronous completion
687  *
688  * returns:
689  * -EBUSY when there are no free queue slots left
690  *        (and the number of slots has reached it MAX_COP_RINGSIZE)
691  * -EFAULT when there was a memory allocation error
692  * 0 on success */
crypto_async_run(struct crypt_priv * pcr,struct kernel_crypt_op * kcop)693 static int crypto_async_run(struct crypt_priv *pcr, struct kernel_crypt_op *kcop)
694 {
695 	struct todo_list_item *item = NULL;
696 
697 	if (unlikely(kcop->cop.flags & COP_FLAG_NO_ZC))
698 		return -EINVAL;
699 
700 	mutex_lock(&pcr->free.lock);
701 	if (likely(!list_empty(&pcr->free.list))) {
702 		item = list_first_entry(&pcr->free.list,
703 				struct todo_list_item, __hook);
704 		list_del(&item->__hook);
705 	} else if (pcr->itemcount < MAX_COP_RINGSIZE) {
706 		pcr->itemcount++;
707 	} else {
708 		mutex_unlock(&pcr->free.lock);
709 		return -EBUSY;
710 	}
711 	mutex_unlock(&pcr->free.lock);
712 
713 	if (unlikely(!item)) {
714 		item = kzalloc(sizeof(struct todo_list_item), GFP_KERNEL);
715 		if (unlikely(!item))
716 			return -EFAULT;
717 		dinfo(1, "increased item count to %d", pcr->itemcount);
718 	}
719 
720 	memcpy(&item->kcop, kcop, sizeof(struct kernel_crypt_op));
721 
722 	mutex_lock(&pcr->todo.lock);
723 	list_add_tail(&item->__hook, &pcr->todo.list);
724 	mutex_unlock(&pcr->todo.lock);
725 
726 	queue_work(cryptodev_wq, &pcr->cryptask);
727 	return 0;
728 }
729 
730 /* get the first completed job from the "done" queue
731  *
732  * returns:
733  * -EBUSY if no completed jobs are ready (yet)
734  * the return value of crypto_run() otherwise */
crypto_async_fetch(struct crypt_priv * pcr,struct kernel_crypt_op * kcop)735 static int crypto_async_fetch(struct crypt_priv *pcr,
736 		struct kernel_crypt_op *kcop)
737 {
738 	struct todo_list_item *item;
739 	int retval;
740 
741 	mutex_lock(&pcr->done.lock);
742 	if (list_empty(&pcr->done.list)) {
743 		mutex_unlock(&pcr->done.lock);
744 		return -EBUSY;
745 	}
746 	item = list_first_entry(&pcr->done.list, struct todo_list_item, __hook);
747 	list_del(&item->__hook);
748 	mutex_unlock(&pcr->done.lock);
749 
750 	memcpy(kcop, &item->kcop, sizeof(struct kernel_crypt_op));
751 	retval = item->result;
752 
753 	mutex_lock(&pcr->free.lock);
754 	list_add_tail(&item->__hook, &pcr->free.list);
755 	mutex_unlock(&pcr->free.lock);
756 
757 	/* wake for POLLOUT */
758 	wake_up_interruptible(&pcr->user_waiter);
759 
760 	return retval;
761 }
762 #endif
763 
764 /* this function has to be called from process context */
fill_kcop_from_cop(struct kernel_crypt_op * kcop,struct fcrypt * fcr)765 static int fill_kcop_from_cop(struct kernel_crypt_op *kcop, struct fcrypt *fcr)
766 {
767 	struct crypt_op *cop = &kcop->cop;
768 	struct csession *ses_ptr;
769 	int rc;
770 
771 	/* this also enters ses_ptr->sem */
772 	ses_ptr = crypto_get_session_by_sid(fcr, cop->ses);
773 	if (unlikely(!ses_ptr)) {
774 		derr(1, "invalid session ID=0x%08X", cop->ses);
775 		return -EINVAL;
776 	}
777 	kcop->ivlen = cop->iv ? ses_ptr->cdata.ivsize : 0;
778 	kcop->digestsize = 0; /* will be updated during operation */
779 
780 	crypto_put_session(ses_ptr);
781 
782 	kcop->task = current;
783 	kcop->mm = current->mm;
784 
785 	if (cop->iv) {
786 		rc = copy_from_user(kcop->iv, cop->iv, kcop->ivlen);
787 		if (unlikely(rc)) {
788 			derr(1, "error copying IV (%d bytes), copy_from_user returned %d for address %p",
789 					kcop->ivlen, rc, cop->iv);
790 			return -EFAULT;
791 		}
792 	}
793 
794 	return 0;
795 }
796 
797 /* this function has to be called from process context */
fill_cop_from_kcop(struct kernel_crypt_op * kcop,struct fcrypt * fcr)798 static int fill_cop_from_kcop(struct kernel_crypt_op *kcop, struct fcrypt *fcr)
799 {
800 	int ret;
801 
802 	if (kcop->digestsize) {
803 		ret = copy_to_user(kcop->cop.mac,
804 				kcop->hash_output, kcop->digestsize);
805 		if (unlikely(ret))
806 			return -EFAULT;
807 	}
808 	if (kcop->ivlen && kcop->cop.flags & COP_FLAG_WRITE_IV) {
809 		ret = copy_to_user(kcop->cop.iv,
810 				kcop->iv, kcop->ivlen);
811 		if (unlikely(ret))
812 			return -EFAULT;
813 	}
814 	return 0;
815 }
816 
kcop_from_user(struct kernel_crypt_op * kcop,struct fcrypt * fcr,void __user * arg)817 static int kcop_from_user(struct kernel_crypt_op *kcop,
818 			struct fcrypt *fcr, void __user *arg)
819 {
820 	if (unlikely(copy_from_user(&kcop->cop, arg, sizeof(kcop->cop))))
821 		return -EFAULT;
822 
823 	return fill_kcop_from_cop(kcop, fcr);
824 }
825 
kcop_to_user(struct kernel_crypt_op * kcop,struct fcrypt * fcr,void __user * arg)826 static int kcop_to_user(struct kernel_crypt_op *kcop,
827 			struct fcrypt *fcr, void __user *arg)
828 {
829 	int ret;
830 
831 	ret = fill_cop_from_kcop(kcop, fcr);
832 	if (unlikely(ret)) {
833 		derr(1, "Error in fill_cop_from_kcop");
834 		return ret;
835 	}
836 
837 	if (unlikely(copy_to_user(arg, &kcop->cop, sizeof(kcop->cop)))) {
838 		derr(1, "Cannot copy to userspace");
839 		return -EFAULT;
840 	}
841 	return 0;
842 }
843 
tfm_info_to_alg_info(struct alg_info * dst,struct crypto_tfm * tfm)844 static inline void tfm_info_to_alg_info(struct alg_info *dst, struct crypto_tfm *tfm)
845 {
846 	snprintf(dst->cra_name, CRYPTODEV_MAX_ALG_NAME,
847 			"%s", crypto_tfm_alg_name(tfm));
848 	snprintf(dst->cra_driver_name, CRYPTODEV_MAX_ALG_NAME,
849 			"%s", crypto_tfm_alg_driver_name(tfm));
850 }
851 
852 #ifndef CRYPTO_ALG_KERN_DRIVER_ONLY
is_known_accelerated(struct crypto_tfm * tfm)853 static unsigned int is_known_accelerated(struct crypto_tfm *tfm)
854 {
855 	const char *name = crypto_tfm_alg_driver_name(tfm);
856 
857 	if (name == NULL)
858 		return 1; /* assume accelerated */
859 
860 	/* look for known crypto engine names */
861 	if (strstr(name, "-talitos")	||
862 	    !strncmp(name, "mv-", 3)	||
863 	    !strncmp(name, "atmel-", 6)	||
864 	    strstr(name, "geode")	||
865 	    strstr(name, "hifn")	||
866 	    strstr(name, "-ixp4xx")	||
867 	    strstr(name, "-omap")	||
868 	    strstr(name, "-picoxcell")	||
869 	    strstr(name, "-s5p")	||
870 	    strstr(name, "-ppc4xx")	||
871 	    strstr(name, "-caam")	||
872 	    strstr(name, "-n2"))
873 		return 1;
874 
875 	return 0;
876 }
877 #endif
878 
get_session_info(struct fcrypt * fcr,struct session_info_op * siop)879 static int get_session_info(struct fcrypt *fcr, struct session_info_op *siop)
880 {
881 	struct csession *ses_ptr;
882 	struct crypto_tfm *tfm;
883 
884 	/* this also enters ses_ptr->sem */
885 	ses_ptr = crypto_get_session_by_sid(fcr, siop->ses);
886 	if (unlikely(!ses_ptr)) {
887 		derr(1, "invalid session ID=0x%08X", siop->ses);
888 		return -EINVAL;
889 	}
890 
891 	siop->flags = 0;
892 
893 	if (ses_ptr->cdata.init) {
894 		if (ses_ptr->cdata.aead == 0)
895 			tfm = cryptodev_crypto_blkcipher_tfm(ses_ptr->cdata.async.s);
896 		else
897 			tfm = crypto_aead_tfm(ses_ptr->cdata.async.as);
898 		tfm_info_to_alg_info(&siop->cipher_info, tfm);
899 #ifdef CRYPTO_ALG_KERN_DRIVER_ONLY
900 		if (tfm->__crt_alg->cra_flags & CRYPTO_ALG_KERN_DRIVER_ONLY)
901 			siop->flags |= SIOP_FLAG_KERNEL_DRIVER_ONLY;
902 #else
903 		if (is_known_accelerated(tfm))
904 			siop->flags |= SIOP_FLAG_KERNEL_DRIVER_ONLY;
905 #endif
906 	}
907 	if (ses_ptr->hdata.init) {
908 		tfm = crypto_ahash_tfm(ses_ptr->hdata.async.s);
909 		tfm_info_to_alg_info(&siop->hash_info, tfm);
910 #ifdef CRYPTO_ALG_KERN_DRIVER_ONLY
911 		if (tfm->__crt_alg->cra_flags & CRYPTO_ALG_KERN_DRIVER_ONLY)
912 			siop->flags |= SIOP_FLAG_KERNEL_DRIVER_ONLY;
913 #else
914 		if (is_known_accelerated(tfm))
915 			siop->flags |= SIOP_FLAG_KERNEL_DRIVER_ONLY;
916 #endif
917 	}
918 
919 	siop->alignmask = ses_ptr->alignmask;
920 
921 	crypto_put_session(ses_ptr);
922 	return 0;
923 }
924 
925 static long
cryptodev_ioctl(struct file * filp,unsigned int cmd,unsigned long arg_)926 cryptodev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg_)
927 {
928 	void __user *arg = (void __user *)arg_;
929 	int __user *p = arg;
930 	struct session_op sop;
931 	struct kernel_crypt_op kcop;
932 	struct kernel_crypt_auth_op kcaop;
933 	struct crypt_priv *pcr = filp->private_data;
934 	struct fcrypt *fcr;
935 	struct session_info_op siop;
936 #ifdef CIOCCPHASH
937 	struct cphash_op cphop;
938 #endif
939 	uint32_t ses;
940 	int ret, fd;
941 
942 	if (unlikely(!pcr))
943 		BUG();
944 
945 	fcr = &pcr->fcrypt;
946 
947 	switch (cmd) {
948 	case CIOCASYMFEAT:
949 		return put_user(0, p);
950 	case CRIOGET:
951 		fd = get_unused_fd_flags(0);
952 		if (unlikely(fd < 0))
953 			return fd;
954 
955 		ret = put_user(fd, p);
956 		if (unlikely(ret)) {
957 			put_unused_fd(fd);
958 			return ret;
959 		}
960 
961 		get_file(filp);
962 		fd_install(fd, filp);
963 
964 		return ret;
965 	case CIOCGSESSION:
966 		if (unlikely(copy_from_user(&sop, arg, sizeof(sop))))
967 			return -EFAULT;
968 
969 		ret = crypto_create_session(fcr, &sop);
970 		if (unlikely(ret))
971 			return ret;
972 		ret = copy_to_user(arg, &sop, sizeof(sop));
973 		if (unlikely(ret)) {
974 			crypto_finish_session(fcr, sop.ses);
975 			return -EFAULT;
976 		}
977 		return ret;
978 	case CIOCFSESSION:
979 		ret = get_user(ses, (uint32_t __user *)arg);
980 		if (unlikely(ret))
981 			return ret;
982 		ret = crypto_finish_session(fcr, ses);
983 		return ret;
984 	case CIOCGSESSINFO:
985 		if (unlikely(copy_from_user(&siop, arg, sizeof(siop))))
986 			return -EFAULT;
987 
988 		ret = get_session_info(fcr, &siop);
989 		if (unlikely(ret))
990 			return ret;
991 		return copy_to_user(arg, &siop, sizeof(siop));
992 #ifdef CIOCCPHASH
993 	case CIOCCPHASH:
994 		if (unlikely(copy_from_user(&cphop, arg, sizeof(cphop))))
995 			return -EFAULT;
996 		return crypto_copy_hash_state(fcr, cphop.dst_ses, cphop.src_ses);
997 #endif /* CIOCPHASH */
998 	case CIOCCRYPT:
999 		if (unlikely(ret = kcop_from_user(&kcop, fcr, arg))) {
1000 			dwarning(1, "Error copying from user");
1001 			return ret;
1002 		}
1003 
1004 		ret = crypto_run(fcr, &kcop);
1005 		if (unlikely(ret)) {
1006 			dwarning(1, "Error in crypto_run");
1007 			return ret;
1008 		}
1009 
1010 		return kcop_to_user(&kcop, fcr, arg);
1011 	case CIOCAUTHCRYPT:
1012 		if (unlikely(ret = cryptodev_kcaop_from_user(&kcaop, fcr, arg))) {
1013 			dwarning(1, "Error copying from user");
1014 			return ret;
1015 		}
1016 
1017 		ret = crypto_auth_run(fcr, &kcaop);
1018 		if (unlikely(ret)) {
1019 			dwarning(1, "Error in crypto_auth_run");
1020 			return ret;
1021 		}
1022 		return cryptodev_kcaop_to_user(&kcaop, fcr, arg);
1023 #ifdef ENABLE_ASYNC
1024 	case CIOCASYNCCRYPT:
1025 		if (unlikely(ret = kcop_from_user(&kcop, fcr, arg)))
1026 			return ret;
1027 
1028 		return crypto_async_run(pcr, &kcop);
1029 	case CIOCASYNCFETCH:
1030 		ret = crypto_async_fetch(pcr, &kcop);
1031 		if (unlikely(ret))
1032 			return ret;
1033 
1034 		return kcop_to_user(&kcop, fcr, arg);
1035 #endif
1036 	default:
1037 		return rk_cryptodev_ioctl(fcr, cmd, arg_);
1038 	}
1039 }
1040 
1041 /* compatibility code for 32bit userlands */
1042 #ifdef CONFIG_COMPAT
1043 
1044 static inline void
compat_to_session_op(struct compat_session_op * compat,struct session_op * sop)1045 compat_to_session_op(struct compat_session_op *compat, struct session_op *sop)
1046 {
1047 	sop->cipher = compat->cipher;
1048 	sop->mac = compat->mac;
1049 	sop->keylen = compat->keylen;
1050 
1051 	sop->key       = compat_ptr(compat->key);
1052 	sop->mackeylen = compat->mackeylen;
1053 	sop->mackey    = compat_ptr(compat->mackey);
1054 	sop->ses       = compat->ses;
1055 }
1056 
1057 static inline void
session_op_to_compat(struct session_op * sop,struct compat_session_op * compat)1058 session_op_to_compat(struct session_op *sop, struct compat_session_op *compat)
1059 {
1060 	compat->cipher = sop->cipher;
1061 	compat->mac = sop->mac;
1062 	compat->keylen = sop->keylen;
1063 
1064 	compat->key       = ptr_to_compat(sop->key);
1065 	compat->mackeylen = sop->mackeylen;
1066 	compat->mackey    = ptr_to_compat(sop->mackey);
1067 	compat->ses       = sop->ses;
1068 }
1069 
1070 static inline void
compat_to_crypt_op(struct compat_crypt_op * compat,struct crypt_op * cop)1071 compat_to_crypt_op(struct compat_crypt_op *compat, struct crypt_op *cop)
1072 {
1073 	cop->ses = compat->ses;
1074 	cop->op = compat->op;
1075 	cop->flags = compat->flags;
1076 	cop->len = compat->len;
1077 
1078 	cop->src = compat_ptr(compat->src);
1079 	cop->dst = compat_ptr(compat->dst);
1080 	cop->mac = compat_ptr(compat->mac);
1081 	cop->iv  = compat_ptr(compat->iv);
1082 }
1083 
1084 static inline void
crypt_op_to_compat(struct crypt_op * cop,struct compat_crypt_op * compat)1085 crypt_op_to_compat(struct crypt_op *cop, struct compat_crypt_op *compat)
1086 {
1087 	compat->ses = cop->ses;
1088 	compat->op = cop->op;
1089 	compat->flags = cop->flags;
1090 	compat->len = cop->len;
1091 
1092 	compat->src = ptr_to_compat(cop->src);
1093 	compat->dst = ptr_to_compat(cop->dst);
1094 	compat->mac = ptr_to_compat(cop->mac);
1095 	compat->iv  = ptr_to_compat(cop->iv);
1096 }
1097 
compat_kcop_from_user(struct kernel_crypt_op * kcop,struct fcrypt * fcr,void __user * arg)1098 static int compat_kcop_from_user(struct kernel_crypt_op *kcop,
1099                                  struct fcrypt *fcr, void __user *arg)
1100 {
1101 	struct compat_crypt_op compat_cop;
1102 
1103 	if (unlikely(copy_from_user(&compat_cop, arg, sizeof(compat_cop))))
1104 		return -EFAULT;
1105 	compat_to_crypt_op(&compat_cop, &kcop->cop);
1106 
1107 	return fill_kcop_from_cop(kcop, fcr);
1108 }
1109 
compat_kcop_to_user(struct kernel_crypt_op * kcop,struct fcrypt * fcr,void __user * arg)1110 static int compat_kcop_to_user(struct kernel_crypt_op *kcop,
1111                                struct fcrypt *fcr, void __user *arg)
1112 {
1113 	int ret;
1114 	struct compat_crypt_op compat_cop;
1115 
1116 	ret = fill_cop_from_kcop(kcop, fcr);
1117 	if (unlikely(ret)) {
1118 		dwarning(1, "Error in fill_cop_from_kcop");
1119 		return ret;
1120 	}
1121 	crypt_op_to_compat(&kcop->cop, &compat_cop);
1122 
1123 	if (unlikely(copy_to_user(arg, &compat_cop, sizeof(compat_cop)))) {
1124 		dwarning(1, "Error copying to user");
1125 		return -EFAULT;
1126 	}
1127 	return 0;
1128 }
1129 
1130 static long
cryptodev_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg_)1131 cryptodev_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg_)
1132 {
1133 	void __user *arg = (void __user *)arg_;
1134 	struct crypt_priv *pcr = file->private_data;
1135 	struct fcrypt *fcr;
1136 	struct session_op sop;
1137 	struct compat_session_op compat_sop;
1138 	struct kernel_crypt_op kcop;
1139 	struct kernel_crypt_auth_op kcaop;
1140 	int ret;
1141 
1142 	if (unlikely(!pcr))
1143 		BUG();
1144 
1145 	fcr = &pcr->fcrypt;
1146 
1147 	switch (cmd) {
1148 	case CIOCASYMFEAT:
1149 	case CRIOGET:
1150 	case CIOCFSESSION:
1151 	case CIOCGSESSINFO:
1152 		return cryptodev_ioctl(file, cmd, arg_);
1153 
1154 	case COMPAT_CIOCGSESSION:
1155 		if (unlikely(copy_from_user(&compat_sop, arg,
1156 					    sizeof(compat_sop))))
1157 			return -EFAULT;
1158 		compat_to_session_op(&compat_sop, &sop);
1159 
1160 		ret = crypto_create_session(fcr, &sop);
1161 		if (unlikely(ret))
1162 			return ret;
1163 
1164 		session_op_to_compat(&sop, &compat_sop);
1165 		ret = copy_to_user(arg, &compat_sop, sizeof(compat_sop));
1166 		if (unlikely(ret)) {
1167 			crypto_finish_session(fcr, sop.ses);
1168 			return -EFAULT;
1169 		}
1170 		return ret;
1171 
1172 	case COMPAT_CIOCCRYPT:
1173 		ret = compat_kcop_from_user(&kcop, fcr, arg);
1174 		if (unlikely(ret))
1175 			return ret;
1176 
1177 		ret = crypto_run(fcr, &kcop);
1178 		if (unlikely(ret))
1179 			return ret;
1180 
1181 		return compat_kcop_to_user(&kcop, fcr, arg);
1182 
1183 	case COMPAT_CIOCAUTHCRYPT:
1184 		ret = compat_kcaop_from_user(&kcaop, fcr, arg);
1185 		if (unlikely(ret)) {
1186 			dwarning(1, "Error copying from user");
1187 			return ret;
1188 		}
1189 
1190 		ret = crypto_auth_run(fcr, &kcaop);
1191 		if (unlikely(ret)) {
1192 			dwarning(1, "Error in crypto_auth_run");
1193 			return ret;
1194 		}
1195 		return compat_kcaop_to_user(&kcaop, fcr, arg);
1196 #ifdef ENABLE_ASYNC
1197 	case COMPAT_CIOCASYNCCRYPT:
1198 		if (unlikely(ret = compat_kcop_from_user(&kcop, fcr, arg)))
1199 			return ret;
1200 
1201 		return crypto_async_run(pcr, &kcop);
1202 	case COMPAT_CIOCASYNCFETCH:
1203 		ret = crypto_async_fetch(pcr, &kcop);
1204 		if (unlikely(ret))
1205 			return ret;
1206 
1207 		return compat_kcop_to_user(&kcop, fcr, arg);
1208 #endif
1209 	default:
1210 		return rk_compat_cryptodev_ioctl(fcr, cmd, arg_);
1211 	}
1212 }
1213 
1214 #endif /* CONFIG_COMPAT */
1215 
cryptodev_poll(struct file * file,poll_table * wait)1216 static unsigned int cryptodev_poll(struct file *file, poll_table *wait)
1217 {
1218 	struct crypt_priv *pcr = file->private_data;
1219 	unsigned int ret = 0;
1220 
1221 	poll_wait(file, &pcr->user_waiter, wait);
1222 
1223 	if (!list_empty_careful(&pcr->done.list))
1224 		ret |= POLLIN | POLLRDNORM;
1225 	if (!list_empty_careful(&pcr->free.list) || pcr->itemcount < MAX_COP_RINGSIZE)
1226 		ret |= POLLOUT | POLLWRNORM;
1227 
1228 	return ret;
1229 }
1230 
1231 static const struct file_operations cryptodev_fops = {
1232 	.owner = THIS_MODULE,
1233 	.open = cryptodev_open,
1234 	.release = cryptodev_release,
1235 	.unlocked_ioctl = cryptodev_ioctl,
1236 #ifdef CONFIG_COMPAT
1237 	.compat_ioctl = cryptodev_compat_ioctl,
1238 #endif /* CONFIG_COMPAT */
1239 	.poll = cryptodev_poll,
1240 };
1241 
1242 static struct miscdevice cryptodev = {
1243 	.minor = MISC_DYNAMIC_MINOR,
1244 	.name = "crypto",
1245 	.fops = &cryptodev_fops,
1246 	.mode = S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP|S_IROTH|S_IWOTH,
1247 };
1248 
1249 static int __init
cryptodev_register(void)1250 cryptodev_register(void)
1251 {
1252 	int rc;
1253 
1254 	rc = misc_register(&cryptodev);
1255 	if (unlikely(rc)) {
1256 		pr_err(PFX "registration of /dev/crypto failed\n");
1257 		return rc;
1258 	}
1259 
1260 	return 0;
1261 }
1262 
1263 static void __exit
cryptodev_deregister(void)1264 cryptodev_deregister(void)
1265 {
1266 	misc_deregister(&cryptodev);
1267 }
1268 
1269 /* ====== Module init/exit ====== */
1270 static struct ctl_table verbosity_ctl_dir[] = {
1271 	{
1272 		.procname       = "cryptodev_verbosity",
1273 		.data           = &cryptodev_verbosity,
1274 		.maxlen         = sizeof(int),
1275 		.mode           = 0644,
1276 		.proc_handler   = proc_dointvec,
1277 	},
1278 	{},
1279 };
1280 
1281 static struct ctl_table verbosity_ctl_root[] = {
1282 	{
1283 		.procname       = "ioctl",
1284 		.mode           = 0555,
1285 		.child          = verbosity_ctl_dir,
1286 	},
1287 	{},
1288 };
1289 static struct ctl_table_header *verbosity_sysctl_header;
init_cryptodev(void)1290 static int __init init_cryptodev(void)
1291 {
1292 	int rc;
1293 
1294 	cryptodev_wq = create_workqueue("cryptodev_queue");
1295 	if (unlikely(!cryptodev_wq)) {
1296 		pr_err(PFX "failed to allocate the cryptodev workqueue\n");
1297 		return -EFAULT;
1298 	}
1299 
1300 	rc = cryptodev_register();
1301 	if (unlikely(rc)) {
1302 		destroy_workqueue(cryptodev_wq);
1303 		return rc;
1304 	}
1305 
1306 	verbosity_sysctl_header = register_sysctl_table(verbosity_ctl_root);
1307 
1308 	pr_info(PFX "driver %s loaded.\n", VERSION);
1309 
1310 	return 0;
1311 }
1312 
exit_cryptodev(void)1313 static void __exit exit_cryptodev(void)
1314 {
1315 	flush_workqueue(cryptodev_wq);
1316 	destroy_workqueue(cryptodev_wq);
1317 
1318 	if (verbosity_sysctl_header)
1319 		unregister_sysctl_table(verbosity_sysctl_header);
1320 
1321 	cryptodev_deregister();
1322 	pr_info(PFX "driver unloaded.\n");
1323 }
1324 
1325 module_init(init_cryptodev);
1326 module_exit(exit_cryptodev);
1327 
1328