1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Driver for /dev/crypto device (aka CryptoDev)
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (c) 2004 Michal Ludvig <mludvig@logix.net.nz>, SuSE Labs
5*4882a593Smuzhiyun * Copyright (c) 2009,2010,2011 Nikos Mavrogiannopoulos <nmav@gnutls.org>
6*4882a593Smuzhiyun * Copyright (c) 2010 Phil Sutter
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * This file is part of linux cryptodev.
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or
11*4882a593Smuzhiyun * modify it under the terms of the GNU General Public License
12*4882a593Smuzhiyun * as published by the Free Software Foundation; either version 2
13*4882a593Smuzhiyun * of the License, or (at your option) any later version.
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * This program is distributed in the hope that it will be useful,
16*4882a593Smuzhiyun * but WITHOUT ANY WARRANTY; without even the implied warranty of
17*4882a593Smuzhiyun * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18*4882a593Smuzhiyun * GNU General Public License for more details.
19*4882a593Smuzhiyun *
20*4882a593Smuzhiyun * You should have received a copy of the GNU General Public License
21*4882a593Smuzhiyun * along with this program; if not, write to the Free Software
22*4882a593Smuzhiyun * Foundation, Inc.,
23*4882a593Smuzhiyun * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
24*4882a593Smuzhiyun */
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun /*
27*4882a593Smuzhiyun * Device /dev/crypto provides an interface for
28*4882a593Smuzhiyun * accessing kernel CryptoAPI algorithms (ciphers,
29*4882a593Smuzhiyun * hashes) from userspace programs.
30*4882a593Smuzhiyun *
31*4882a593Smuzhiyun * /dev/crypto interface was originally introduced in
32*4882a593Smuzhiyun * OpenBSD and this module attempts to keep the API.
33*4882a593Smuzhiyun *
34*4882a593Smuzhiyun */
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #include <crypto/hash.h>
37*4882a593Smuzhiyun #include <linux/mm.h>
38*4882a593Smuzhiyun #include <linux/highmem.h>
39*4882a593Smuzhiyun #include <linux/ioctl.h>
40*4882a593Smuzhiyun #include <linux/random.h>
41*4882a593Smuzhiyun #include <linux/syscalls.h>
42*4882a593Smuzhiyun #include <linux/pagemap.h>
43*4882a593Smuzhiyun #include <linux/poll.h>
44*4882a593Smuzhiyun #include <linux/uaccess.h>
45*4882a593Smuzhiyun #include <linux/scatterlist.h>
46*4882a593Smuzhiyun #include <linux/rtnetlink.h>
47*4882a593Smuzhiyun #include <crypto/authenc.h>
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun #include <linux/sysctl.h>
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun #include "cryptodev.h"
52*4882a593Smuzhiyun #include "zc.h"
53*4882a593Smuzhiyun #include "version.h"
54*4882a593Smuzhiyun #include "cipherapi.h"
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun #include "rk_cryptodev.h"
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun MODULE_AUTHOR("Nikos Mavrogiannopoulos <nmav@gnutls.org>");
59*4882a593Smuzhiyun MODULE_DESCRIPTION("CryptoDev driver");
60*4882a593Smuzhiyun MODULE_LICENSE("GPL");
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun /* ====== Compile-time config ====== */
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /* Default (pre-allocated) and maximum size of the job queue.
65*4882a593Smuzhiyun * These are free, pending and done items all together. */
66*4882a593Smuzhiyun #define DEF_COP_RINGSIZE 16
67*4882a593Smuzhiyun #define MAX_COP_RINGSIZE 64
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /* ====== Module parameters ====== */
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun int cryptodev_verbosity;
72*4882a593Smuzhiyun module_param(cryptodev_verbosity, int, 0644);
73*4882a593Smuzhiyun MODULE_PARM_DESC(cryptodev_verbosity, "0: normal, 1: verbose, 2: debug");
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun /* ====== CryptoAPI ====== */
76*4882a593Smuzhiyun struct todo_list_item {
77*4882a593Smuzhiyun struct list_head __hook;
78*4882a593Smuzhiyun struct kernel_crypt_op kcop;
79*4882a593Smuzhiyun int result;
80*4882a593Smuzhiyun };
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun struct locked_list {
83*4882a593Smuzhiyun struct list_head list;
84*4882a593Smuzhiyun struct mutex lock;
85*4882a593Smuzhiyun };
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun struct crypt_priv {
88*4882a593Smuzhiyun struct fcrypt fcrypt;
89*4882a593Smuzhiyun struct locked_list free, todo, done;
90*4882a593Smuzhiyun int itemcount;
91*4882a593Smuzhiyun struct work_struct cryptask;
92*4882a593Smuzhiyun wait_queue_head_t user_waiter;
93*4882a593Smuzhiyun };
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun #define FILL_SG(sg, ptr, len) \
96*4882a593Smuzhiyun do { \
97*4882a593Smuzhiyun (sg)->page = virt_to_page(ptr); \
98*4882a593Smuzhiyun (sg)->offset = offset_in_page(ptr); \
99*4882a593Smuzhiyun (sg)->length = len; \
100*4882a593Smuzhiyun (sg)->dma_address = 0; \
101*4882a593Smuzhiyun } while (0)
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun /* cryptodev's own workqueue, keeps crypto tasks from disturbing the force */
104*4882a593Smuzhiyun static struct workqueue_struct *cryptodev_wq;
105*4882a593Smuzhiyun static atomic_t cryptodev_sess = ATOMIC_INIT(1);
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun /* Prepare session for future use. */
108*4882a593Smuzhiyun static int
crypto_create_session(struct fcrypt * fcr,struct session_op * sop)109*4882a593Smuzhiyun crypto_create_session(struct fcrypt *fcr, struct session_op *sop)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun struct csession *ses_new = NULL, *ses_ptr;
112*4882a593Smuzhiyun int ret = 0;
113*4882a593Smuzhiyun const char *alg_name = NULL;
114*4882a593Smuzhiyun const char *hash_name = NULL;
115*4882a593Smuzhiyun int hmac_mode = 1, stream = 0, aead = 0;
116*4882a593Smuzhiyun /*
117*4882a593Smuzhiyun * With composite aead ciphers, only ckey is used and it can cover all the
118*4882a593Smuzhiyun * structure space; otherwise both keys may be used simultaneously but they
119*4882a593Smuzhiyun * are confined to their spaces
120*4882a593Smuzhiyun */
121*4882a593Smuzhiyun struct {
122*4882a593Smuzhiyun uint8_t ckey[CRYPTO_CIPHER_MAX_KEY_LEN];
123*4882a593Smuzhiyun uint8_t mkey[CRYPTO_HMAC_MAX_KEY_LEN];
124*4882a593Smuzhiyun /* padding space for aead keys */
125*4882a593Smuzhiyun uint8_t pad[RTA_SPACE(sizeof(struct crypto_authenc_key_param))];
126*4882a593Smuzhiyun } keys;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun /* Does the request make sense? */
129*4882a593Smuzhiyun if (unlikely(!sop->cipher && !sop->mac)) {
130*4882a593Smuzhiyun ddebug(1, "Both 'cipher' and 'mac' unset.");
131*4882a593Smuzhiyun return -EINVAL;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun switch (sop->cipher) {
135*4882a593Smuzhiyun case 0:
136*4882a593Smuzhiyun break;
137*4882a593Smuzhiyun case CRYPTO_DES_CBC:
138*4882a593Smuzhiyun alg_name = "cbc(des)";
139*4882a593Smuzhiyun break;
140*4882a593Smuzhiyun case CRYPTO_3DES_CBC:
141*4882a593Smuzhiyun alg_name = "cbc(des3_ede)";
142*4882a593Smuzhiyun break;
143*4882a593Smuzhiyun case CRYPTO_BLF_CBC:
144*4882a593Smuzhiyun alg_name = "cbc(blowfish)";
145*4882a593Smuzhiyun break;
146*4882a593Smuzhiyun case CRYPTO_AES_CBC:
147*4882a593Smuzhiyun alg_name = "cbc(aes)";
148*4882a593Smuzhiyun break;
149*4882a593Smuzhiyun case CRYPTO_AES_ECB:
150*4882a593Smuzhiyun alg_name = "ecb(aes)";
151*4882a593Smuzhiyun break;
152*4882a593Smuzhiyun case CRYPTO_AES_XTS:
153*4882a593Smuzhiyun alg_name = "xts(aes)";
154*4882a593Smuzhiyun break;
155*4882a593Smuzhiyun case CRYPTO_CAMELLIA_CBC:
156*4882a593Smuzhiyun alg_name = "cbc(camellia)";
157*4882a593Smuzhiyun break;
158*4882a593Smuzhiyun case CRYPTO_AES_CTR:
159*4882a593Smuzhiyun alg_name = "ctr(aes)";
160*4882a593Smuzhiyun stream = 1;
161*4882a593Smuzhiyun break;
162*4882a593Smuzhiyun case CRYPTO_AES_GCM:
163*4882a593Smuzhiyun alg_name = "gcm(aes)";
164*4882a593Smuzhiyun stream = 1;
165*4882a593Smuzhiyun aead = 1;
166*4882a593Smuzhiyun break;
167*4882a593Smuzhiyun case CRYPTO_TLS11_AES_CBC_HMAC_SHA1:
168*4882a593Smuzhiyun alg_name = "tls11(hmac(sha1),cbc(aes))";
169*4882a593Smuzhiyun stream = 0;
170*4882a593Smuzhiyun aead = 1;
171*4882a593Smuzhiyun break;
172*4882a593Smuzhiyun case CRYPTO_TLS12_AES_CBC_HMAC_SHA256:
173*4882a593Smuzhiyun alg_name = "tls12(hmac(sha256),cbc(aes))";
174*4882a593Smuzhiyun stream = 0;
175*4882a593Smuzhiyun aead = 1;
176*4882a593Smuzhiyun break;
177*4882a593Smuzhiyun case CRYPTO_NULL:
178*4882a593Smuzhiyun alg_name = "ecb(cipher_null)";
179*4882a593Smuzhiyun stream = 1;
180*4882a593Smuzhiyun break;
181*4882a593Smuzhiyun default:
182*4882a593Smuzhiyun alg_name = rk_get_cipher_name(sop->cipher, &stream, &aead);
183*4882a593Smuzhiyun if (!alg_name) {
184*4882a593Smuzhiyun ddebug(1, "bad cipher: %d", sop->cipher);
185*4882a593Smuzhiyun return -EINVAL;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun break;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun switch (sop->mac) {
191*4882a593Smuzhiyun case 0:
192*4882a593Smuzhiyun break;
193*4882a593Smuzhiyun case CRYPTO_MD5_HMAC:
194*4882a593Smuzhiyun hash_name = "hmac(md5)";
195*4882a593Smuzhiyun break;
196*4882a593Smuzhiyun case CRYPTO_RIPEMD160_HMAC:
197*4882a593Smuzhiyun hash_name = "hmac(rmd160)";
198*4882a593Smuzhiyun break;
199*4882a593Smuzhiyun case CRYPTO_SHA1_HMAC:
200*4882a593Smuzhiyun hash_name = "hmac(sha1)";
201*4882a593Smuzhiyun break;
202*4882a593Smuzhiyun case CRYPTO_SHA2_224_HMAC:
203*4882a593Smuzhiyun hash_name = "hmac(sha224)";
204*4882a593Smuzhiyun break;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun case CRYPTO_SHA2_256_HMAC:
207*4882a593Smuzhiyun hash_name = "hmac(sha256)";
208*4882a593Smuzhiyun break;
209*4882a593Smuzhiyun case CRYPTO_SHA2_384_HMAC:
210*4882a593Smuzhiyun hash_name = "hmac(sha384)";
211*4882a593Smuzhiyun break;
212*4882a593Smuzhiyun case CRYPTO_SHA2_512_HMAC:
213*4882a593Smuzhiyun hash_name = "hmac(sha512)";
214*4882a593Smuzhiyun break;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun /* non-hmac cases */
217*4882a593Smuzhiyun case CRYPTO_MD5:
218*4882a593Smuzhiyun hash_name = "md5";
219*4882a593Smuzhiyun hmac_mode = 0;
220*4882a593Smuzhiyun break;
221*4882a593Smuzhiyun case CRYPTO_RIPEMD160:
222*4882a593Smuzhiyun hash_name = "rmd160";
223*4882a593Smuzhiyun hmac_mode = 0;
224*4882a593Smuzhiyun break;
225*4882a593Smuzhiyun case CRYPTO_SHA1:
226*4882a593Smuzhiyun hash_name = "sha1";
227*4882a593Smuzhiyun hmac_mode = 0;
228*4882a593Smuzhiyun break;
229*4882a593Smuzhiyun case CRYPTO_SHA2_224:
230*4882a593Smuzhiyun hash_name = "sha224";
231*4882a593Smuzhiyun hmac_mode = 0;
232*4882a593Smuzhiyun break;
233*4882a593Smuzhiyun case CRYPTO_SHA2_256:
234*4882a593Smuzhiyun hash_name = "sha256";
235*4882a593Smuzhiyun hmac_mode = 0;
236*4882a593Smuzhiyun break;
237*4882a593Smuzhiyun case CRYPTO_SHA2_384:
238*4882a593Smuzhiyun hash_name = "sha384";
239*4882a593Smuzhiyun hmac_mode = 0;
240*4882a593Smuzhiyun break;
241*4882a593Smuzhiyun case CRYPTO_SHA2_512:
242*4882a593Smuzhiyun hash_name = "sha512";
243*4882a593Smuzhiyun hmac_mode = 0;
244*4882a593Smuzhiyun break;
245*4882a593Smuzhiyun default:
246*4882a593Smuzhiyun hash_name = rk_get_hash_name(sop->mac, &hmac_mode);
247*4882a593Smuzhiyun if (!hash_name) {
248*4882a593Smuzhiyun ddebug(1, "bad mac: %d", sop->mac);
249*4882a593Smuzhiyun return -EINVAL;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun break;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun /* Create a session and put it to the list. Zeroing the structure helps
255*4882a593Smuzhiyun * also with a single exit point in case of errors */
256*4882a593Smuzhiyun ses_new = kzalloc(sizeof(*ses_new), GFP_KERNEL);
257*4882a593Smuzhiyun if (!ses_new)
258*4882a593Smuzhiyun return -ENOMEM;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun /* Set-up crypto transform. */
261*4882a593Smuzhiyun if (alg_name) {
262*4882a593Smuzhiyun unsigned int keylen;
263*4882a593Smuzhiyun ret = cryptodev_get_cipher_keylen(&keylen, sop, aead);
264*4882a593Smuzhiyun if (unlikely(ret < 0)) {
265*4882a593Smuzhiyun ddebug(1, "Setting key failed for %s-%zu.",
266*4882a593Smuzhiyun alg_name, (size_t)sop->keylen*8);
267*4882a593Smuzhiyun goto session_error;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun ret = cryptodev_get_cipher_key(keys.ckey, sop, aead);
271*4882a593Smuzhiyun if (unlikely(ret < 0))
272*4882a593Smuzhiyun goto session_error;
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun ret = cryptodev_cipher_init(&ses_new->cdata, alg_name, keys.ckey,
275*4882a593Smuzhiyun keylen, stream, aead);
276*4882a593Smuzhiyun if (ret < 0) {
277*4882a593Smuzhiyun ddebug(1, "Failed to load cipher for %s", alg_name);
278*4882a593Smuzhiyun goto session_error;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun if (hash_name && aead == 0) {
283*4882a593Smuzhiyun if (unlikely(sop->mackeylen > CRYPTO_HMAC_MAX_KEY_LEN)) {
284*4882a593Smuzhiyun ddebug(1, "Setting key failed for %s-%zu.",
285*4882a593Smuzhiyun hash_name, (size_t)sop->mackeylen*8);
286*4882a593Smuzhiyun ret = -EINVAL;
287*4882a593Smuzhiyun goto session_error;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun if (sop->mackey && unlikely(copy_from_user(keys.mkey, sop->mackey,
291*4882a593Smuzhiyun sop->mackeylen))) {
292*4882a593Smuzhiyun ret = -EFAULT;
293*4882a593Smuzhiyun goto session_error;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun ret = cryptodev_hash_init(&ses_new->hdata, hash_name, hmac_mode,
297*4882a593Smuzhiyun keys.mkey, sop->mackeylen);
298*4882a593Smuzhiyun if (ret != 0) {
299*4882a593Smuzhiyun ddebug(1, "Failed to load hash for %s", hash_name);
300*4882a593Smuzhiyun goto session_error;
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun ret = cryptodev_hash_reset(&ses_new->hdata);
304*4882a593Smuzhiyun if (ret != 0) {
305*4882a593Smuzhiyun goto session_error;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun ses_new->alignmask = max(ses_new->cdata.alignmask,
310*4882a593Smuzhiyun ses_new->hdata.alignmask);
311*4882a593Smuzhiyun ddebug(2, "got alignmask %d", ses_new->alignmask);
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun ses_new->array_size = DEFAULT_PREALLOC_PAGES;
314*4882a593Smuzhiyun ddebug(2, "preallocating for %d user pages", ses_new->array_size);
315*4882a593Smuzhiyun ses_new->pages = kzalloc(ses_new->array_size *
316*4882a593Smuzhiyun sizeof(struct page *), GFP_KERNEL);
317*4882a593Smuzhiyun ses_new->sg = kzalloc(ses_new->array_size *
318*4882a593Smuzhiyun sizeof(struct scatterlist), GFP_KERNEL);
319*4882a593Smuzhiyun if (ses_new->sg == NULL || ses_new->pages == NULL) {
320*4882a593Smuzhiyun ddebug(0, "Memory error");
321*4882a593Smuzhiyun ret = -ENOMEM;
322*4882a593Smuzhiyun goto session_error;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun /* Non-multithreaded can only create one session */
326*4882a593Smuzhiyun if (!rk_cryptodev_multi_thread(NULL) &&
327*4882a593Smuzhiyun !atomic_dec_and_test(&cryptodev_sess)) {
328*4882a593Smuzhiyun atomic_inc(&cryptodev_sess);
329*4882a593Smuzhiyun ddebug(2, "Non-multithreaded can only create one session. sess = %d",
330*4882a593Smuzhiyun atomic_read(&cryptodev_sess));
331*4882a593Smuzhiyun ret = -EBUSY;
332*4882a593Smuzhiyun goto session_error;
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun /* put the new session to the list */
336*4882a593Smuzhiyun get_random_bytes(&ses_new->sid, sizeof(ses_new->sid));
337*4882a593Smuzhiyun mutex_init(&ses_new->sem);
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun mutex_lock(&fcr->sem);
340*4882a593Smuzhiyun restart:
341*4882a593Smuzhiyun list_for_each_entry(ses_ptr, &fcr->list, entry) {
342*4882a593Smuzhiyun /* Check for duplicate SID */
343*4882a593Smuzhiyun if (unlikely(ses_new->sid == ses_ptr->sid)) {
344*4882a593Smuzhiyun get_random_bytes(&ses_new->sid, sizeof(ses_new->sid));
345*4882a593Smuzhiyun /* Unless we have a broken RNG this
346*4882a593Smuzhiyun shouldn't loop forever... ;-) */
347*4882a593Smuzhiyun goto restart;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun list_add(&ses_new->entry, &fcr->list);
352*4882a593Smuzhiyun mutex_unlock(&fcr->sem);
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun /* Fill in some values for the user. */
355*4882a593Smuzhiyun sop->ses = ses_new->sid;
356*4882a593Smuzhiyun return 0;
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun /* We count on ses_new to be initialized with zeroes
359*4882a593Smuzhiyun * Since hdata and cdata are embedded within ses_new, it follows that
360*4882a593Smuzhiyun * hdata->init and cdata->init are either zero or one as they have been
361*4882a593Smuzhiyun * initialized or not */
362*4882a593Smuzhiyun session_error:
363*4882a593Smuzhiyun cryptodev_hash_deinit(&ses_new->hdata);
364*4882a593Smuzhiyun cryptodev_cipher_deinit(&ses_new->cdata);
365*4882a593Smuzhiyun kfree(ses_new->sg);
366*4882a593Smuzhiyun kfree(ses_new->pages);
367*4882a593Smuzhiyun kfree(ses_new);
368*4882a593Smuzhiyun return ret;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun /* Everything that needs to be done when removing a session. */
372*4882a593Smuzhiyun static inline void
crypto_destroy_session(struct csession * ses_ptr)373*4882a593Smuzhiyun crypto_destroy_session(struct csession *ses_ptr)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun if (!mutex_trylock(&ses_ptr->sem)) {
376*4882a593Smuzhiyun ddebug(2, "Waiting for semaphore of sid=0x%08X", ses_ptr->sid);
377*4882a593Smuzhiyun mutex_lock(&ses_ptr->sem);
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun ddebug(2, "Removed session 0x%08X", ses_ptr->sid);
380*4882a593Smuzhiyun cryptodev_cipher_deinit(&ses_ptr->cdata);
381*4882a593Smuzhiyun cryptodev_hash_deinit(&ses_ptr->hdata);
382*4882a593Smuzhiyun ddebug(2, "freeing space for %d user pages", ses_ptr->array_size);
383*4882a593Smuzhiyun kfree(ses_ptr->pages);
384*4882a593Smuzhiyun kfree(ses_ptr->sg);
385*4882a593Smuzhiyun mutex_unlock(&ses_ptr->sem);
386*4882a593Smuzhiyun mutex_destroy(&ses_ptr->sem);
387*4882a593Smuzhiyun kfree(ses_ptr);
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun /* Non-multithreaded can only create one session */
390*4882a593Smuzhiyun if (!rk_cryptodev_multi_thread(NULL)) {
391*4882a593Smuzhiyun atomic_inc(&cryptodev_sess);
392*4882a593Smuzhiyun ddebug(2, "Release cryptodev_sess = %d", atomic_read(&cryptodev_sess));
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun /* Look up a session by ID and remove. */
397*4882a593Smuzhiyun static int
crypto_finish_session(struct fcrypt * fcr,uint32_t sid)398*4882a593Smuzhiyun crypto_finish_session(struct fcrypt *fcr, uint32_t sid)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun struct csession *tmp, *ses_ptr;
401*4882a593Smuzhiyun struct list_head *head;
402*4882a593Smuzhiyun int ret = 0;
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun mutex_lock(&fcr->sem);
405*4882a593Smuzhiyun head = &fcr->list;
406*4882a593Smuzhiyun list_for_each_entry_safe(ses_ptr, tmp, head, entry) {
407*4882a593Smuzhiyun if (ses_ptr->sid == sid) {
408*4882a593Smuzhiyun list_del(&ses_ptr->entry);
409*4882a593Smuzhiyun crypto_destroy_session(ses_ptr);
410*4882a593Smuzhiyun break;
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun if (unlikely(!ses_ptr)) {
415*4882a593Smuzhiyun derr(1, "Session with sid=0x%08X not found!", sid);
416*4882a593Smuzhiyun ret = -ENOENT;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun mutex_unlock(&fcr->sem);
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun return ret;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun /* Remove all sessions when closing the file */
424*4882a593Smuzhiyun static int
crypto_finish_all_sessions(struct fcrypt * fcr)425*4882a593Smuzhiyun crypto_finish_all_sessions(struct fcrypt *fcr)
426*4882a593Smuzhiyun {
427*4882a593Smuzhiyun struct csession *tmp, *ses_ptr;
428*4882a593Smuzhiyun struct list_head *head;
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun mutex_lock(&fcr->sem);
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun head = &fcr->list;
433*4882a593Smuzhiyun list_for_each_entry_safe(ses_ptr, tmp, head, entry) {
434*4882a593Smuzhiyun list_del(&ses_ptr->entry);
435*4882a593Smuzhiyun crypto_destroy_session(ses_ptr);
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun mutex_unlock(&fcr->sem);
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun return 0;
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun /* Look up session by session ID. The returned session is locked. */
443*4882a593Smuzhiyun struct csession *
crypto_get_session_by_sid(struct fcrypt * fcr,uint32_t sid)444*4882a593Smuzhiyun crypto_get_session_by_sid(struct fcrypt *fcr, uint32_t sid)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun struct csession *ses_ptr, *retval = NULL;
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun if (unlikely(fcr == NULL))
449*4882a593Smuzhiyun return NULL;
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun mutex_lock(&fcr->sem);
452*4882a593Smuzhiyun list_for_each_entry(ses_ptr, &fcr->list, entry) {
453*4882a593Smuzhiyun if (ses_ptr->sid == sid) {
454*4882a593Smuzhiyun mutex_lock(&ses_ptr->sem);
455*4882a593Smuzhiyun retval = ses_ptr;
456*4882a593Smuzhiyun break;
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun mutex_unlock(&fcr->sem);
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun return retval;
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun
mutex_lock_double(struct mutex * a,struct mutex * b)464*4882a593Smuzhiyun static void mutex_lock_double(struct mutex *a, struct mutex *b)
465*4882a593Smuzhiyun {
466*4882a593Smuzhiyun if (b < a)
467*4882a593Smuzhiyun swap(a, b);
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun mutex_lock(a);
470*4882a593Smuzhiyun mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun int
crypto_get_sessions_by_sid(struct fcrypt * fcr,uint32_t sid_1,struct csession ** ses_ptr_1,uint32_t sid_2,struct csession ** ses_ptr_2)474*4882a593Smuzhiyun crypto_get_sessions_by_sid(struct fcrypt *fcr,
475*4882a593Smuzhiyun uint32_t sid_1, struct csession **ses_ptr_1,
476*4882a593Smuzhiyun uint32_t sid_2, struct csession **ses_ptr_2)
477*4882a593Smuzhiyun {
478*4882a593Smuzhiyun struct csession *ses_ptr;
479*4882a593Smuzhiyun int retval;
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun if (unlikely(fcr == NULL)) {
482*4882a593Smuzhiyun retval = -ENOENT;
483*4882a593Smuzhiyun goto out;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun if (sid_1 == sid_2) {
487*4882a593Smuzhiyun retval = -EDEADLK;
488*4882a593Smuzhiyun goto out;
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun mutex_lock(&fcr->sem);
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun list_for_each_entry(ses_ptr, &fcr->list, entry) {
494*4882a593Smuzhiyun if (ses_ptr->sid == sid_1)
495*4882a593Smuzhiyun *ses_ptr_1 = ses_ptr;
496*4882a593Smuzhiyun else if (ses_ptr->sid == sid_2)
497*4882a593Smuzhiyun *ses_ptr_2 = ses_ptr;
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun if (*ses_ptr_1 && *ses_ptr_2) {
501*4882a593Smuzhiyun mutex_lock_double(&(*ses_ptr_1)->sem, &(*ses_ptr_2)->sem);
502*4882a593Smuzhiyun retval = 0;
503*4882a593Smuzhiyun } else {
504*4882a593Smuzhiyun retval = -ENOENT;
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun mutex_unlock(&fcr->sem);
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun out:
510*4882a593Smuzhiyun if (retval) {
511*4882a593Smuzhiyun *ses_ptr_1 = NULL;
512*4882a593Smuzhiyun *ses_ptr_2 = NULL;
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun return retval;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun #ifdef CIOCCPHASH
518*4882a593Smuzhiyun /* Copy the hash state from one session to another */
519*4882a593Smuzhiyun static int
crypto_copy_hash_state(struct fcrypt * fcr,uint32_t dst_sid,uint32_t src_sid)520*4882a593Smuzhiyun crypto_copy_hash_state(struct fcrypt *fcr, uint32_t dst_sid, uint32_t src_sid)
521*4882a593Smuzhiyun {
522*4882a593Smuzhiyun struct csession *src_ses, *dst_ses;
523*4882a593Smuzhiyun int ret;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun ret = crypto_get_sessions_by_sid(fcr, src_sid, &src_ses,
526*4882a593Smuzhiyun dst_sid, &dst_ses);
527*4882a593Smuzhiyun if (unlikely(ret)) {
528*4882a593Smuzhiyun derr(1, "Failed to get sesssions with sid=0x%08X sid=%0x08X!",
529*4882a593Smuzhiyun src_sid, dst_sid);
530*4882a593Smuzhiyun return ret;
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun ret = cryptodev_hash_copy(&dst_ses->hdata, &src_ses->hdata);
534*4882a593Smuzhiyun crypto_put_session(src_ses);
535*4882a593Smuzhiyun crypto_put_session(dst_ses);
536*4882a593Smuzhiyun return ret;
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun #endif /* CIOCCPHASH */
539*4882a593Smuzhiyun
cryptask_routine(struct work_struct * work)540*4882a593Smuzhiyun static void cryptask_routine(struct work_struct *work)
541*4882a593Smuzhiyun {
542*4882a593Smuzhiyun struct crypt_priv *pcr = container_of(work, struct crypt_priv, cryptask);
543*4882a593Smuzhiyun struct todo_list_item *item;
544*4882a593Smuzhiyun LIST_HEAD(tmp);
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun /* fetch all pending jobs into the temporary list */
547*4882a593Smuzhiyun mutex_lock(&pcr->todo.lock);
548*4882a593Smuzhiyun list_cut_position(&tmp, &pcr->todo.list, pcr->todo.list.prev);
549*4882a593Smuzhiyun mutex_unlock(&pcr->todo.lock);
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun /* handle each job locklessly */
552*4882a593Smuzhiyun list_for_each_entry(item, &tmp, __hook) {
553*4882a593Smuzhiyun item->result = crypto_run(&pcr->fcrypt, &item->kcop);
554*4882a593Smuzhiyun if (unlikely(item->result))
555*4882a593Smuzhiyun derr(0, "crypto_run() failed: %d", item->result);
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun /* push all handled jobs to the done list at once */
559*4882a593Smuzhiyun mutex_lock(&pcr->done.lock);
560*4882a593Smuzhiyun list_splice_tail(&tmp, &pcr->done.list);
561*4882a593Smuzhiyun mutex_unlock(&pcr->done.lock);
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun /* wake for POLLIN */
564*4882a593Smuzhiyun wake_up_interruptible(&pcr->user_waiter);
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun /* ====== /dev/crypto ====== */
568*4882a593Smuzhiyun static atomic_t cryptodev_node = ATOMIC_INIT(1);
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun static int
cryptodev_open(struct inode * inode,struct file * filp)571*4882a593Smuzhiyun cryptodev_open(struct inode *inode, struct file *filp)
572*4882a593Smuzhiyun {
573*4882a593Smuzhiyun struct todo_list_item *tmp, *tmp_next;
574*4882a593Smuzhiyun struct crypt_priv *pcr;
575*4882a593Smuzhiyun int i;
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun /* Non-multithreaded can only be opened once */
578*4882a593Smuzhiyun if (!rk_cryptodev_multi_thread(NULL) &&
579*4882a593Smuzhiyun !atomic_dec_and_test(&cryptodev_node)) {
580*4882a593Smuzhiyun atomic_inc(&cryptodev_node);
581*4882a593Smuzhiyun ddebug(2, "Non-multithreaded can only be opened once. node = %d",
582*4882a593Smuzhiyun atomic_read(&cryptodev_node));
583*4882a593Smuzhiyun return -EBUSY;
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun /* make sure sess == 1 after open */
587*4882a593Smuzhiyun atomic_set(&cryptodev_sess, 1);
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun pcr = kzalloc(sizeof(*pcr), GFP_KERNEL);
590*4882a593Smuzhiyun if (!pcr)
591*4882a593Smuzhiyun return -ENOMEM;
592*4882a593Smuzhiyun filp->private_data = pcr;
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun mutex_init(&pcr->fcrypt.sem);
595*4882a593Smuzhiyun mutex_init(&pcr->free.lock);
596*4882a593Smuzhiyun mutex_init(&pcr->todo.lock);
597*4882a593Smuzhiyun mutex_init(&pcr->done.lock);
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun INIT_LIST_HEAD(&pcr->fcrypt.list);
600*4882a593Smuzhiyun INIT_LIST_HEAD(&pcr->fcrypt.dma_map_list);
601*4882a593Smuzhiyun INIT_LIST_HEAD(&pcr->free.list);
602*4882a593Smuzhiyun INIT_LIST_HEAD(&pcr->todo.list);
603*4882a593Smuzhiyun INIT_LIST_HEAD(&pcr->done.list);
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun INIT_WORK(&pcr->cryptask, cryptask_routine);
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun init_waitqueue_head(&pcr->user_waiter);
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun for (i = 0; i < DEF_COP_RINGSIZE; i++) {
610*4882a593Smuzhiyun tmp = kzalloc(sizeof(struct todo_list_item), GFP_KERNEL);
611*4882a593Smuzhiyun if (!tmp)
612*4882a593Smuzhiyun goto err_ringalloc;
613*4882a593Smuzhiyun pcr->itemcount++;
614*4882a593Smuzhiyun ddebug(2, "allocated new item at %p", tmp);
615*4882a593Smuzhiyun list_add(&tmp->__hook, &pcr->free.list);
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun ddebug(2, "Cryptodev handle initialised, %d elements in queue",
619*4882a593Smuzhiyun DEF_COP_RINGSIZE);
620*4882a593Smuzhiyun return 0;
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun /* In case of errors, free any memory allocated so far */
623*4882a593Smuzhiyun err_ringalloc:
624*4882a593Smuzhiyun list_for_each_entry_safe(tmp, tmp_next, &pcr->free.list, __hook) {
625*4882a593Smuzhiyun list_del(&tmp->__hook);
626*4882a593Smuzhiyun kfree(tmp);
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun mutex_destroy(&pcr->done.lock);
629*4882a593Smuzhiyun mutex_destroy(&pcr->todo.lock);
630*4882a593Smuzhiyun mutex_destroy(&pcr->free.lock);
631*4882a593Smuzhiyun mutex_destroy(&pcr->fcrypt.sem);
632*4882a593Smuzhiyun kfree(pcr);
633*4882a593Smuzhiyun filp->private_data = NULL;
634*4882a593Smuzhiyun return -ENOMEM;
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun static int
cryptodev_release(struct inode * inode,struct file * filp)638*4882a593Smuzhiyun cryptodev_release(struct inode *inode, struct file *filp)
639*4882a593Smuzhiyun {
640*4882a593Smuzhiyun struct crypt_priv *pcr = filp->private_data;
641*4882a593Smuzhiyun struct todo_list_item *item, *item_safe;
642*4882a593Smuzhiyun int items_freed = 0;
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun if (!pcr)
645*4882a593Smuzhiyun return 0;
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun /* Non-multithreaded can only be opened once */
648*4882a593Smuzhiyun if (!rk_cryptodev_multi_thread(NULL)) {
649*4882a593Smuzhiyun atomic_inc(&cryptodev_node);
650*4882a593Smuzhiyun ddebug(2, "Release cryptodev_node = %d", atomic_read(&cryptodev_node));
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun cancel_work_sync(&pcr->cryptask);
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun list_splice_tail(&pcr->todo.list, &pcr->free.list);
656*4882a593Smuzhiyun list_splice_tail(&pcr->done.list, &pcr->free.list);
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun list_for_each_entry_safe(item, item_safe, &pcr->free.list, __hook) {
659*4882a593Smuzhiyun ddebug(2, "freeing item at %p", item);
660*4882a593Smuzhiyun list_del(&item->__hook);
661*4882a593Smuzhiyun kfree(item);
662*4882a593Smuzhiyun items_freed++;
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun if (items_freed != pcr->itemcount) {
666*4882a593Smuzhiyun derr(0, "freed %d items, but %d should exist!",
667*4882a593Smuzhiyun items_freed, pcr->itemcount);
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun crypto_finish_all_sessions(&pcr->fcrypt);
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun mutex_destroy(&pcr->done.lock);
673*4882a593Smuzhiyun mutex_destroy(&pcr->todo.lock);
674*4882a593Smuzhiyun mutex_destroy(&pcr->free.lock);
675*4882a593Smuzhiyun mutex_destroy(&pcr->fcrypt.sem);
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun kfree(pcr);
678*4882a593Smuzhiyun filp->private_data = NULL;
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun ddebug(2, "Cryptodev handle deinitialised, %d elements freed",
681*4882a593Smuzhiyun items_freed);
682*4882a593Smuzhiyun return 0;
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun #ifdef ENABLE_ASYNC
686*4882a593Smuzhiyun /* enqueue a job for asynchronous completion
687*4882a593Smuzhiyun *
688*4882a593Smuzhiyun * returns:
689*4882a593Smuzhiyun * -EBUSY when there are no free queue slots left
690*4882a593Smuzhiyun * (and the number of slots has reached it MAX_COP_RINGSIZE)
691*4882a593Smuzhiyun * -EFAULT when there was a memory allocation error
692*4882a593Smuzhiyun * 0 on success */
crypto_async_run(struct crypt_priv * pcr,struct kernel_crypt_op * kcop)693*4882a593Smuzhiyun static int crypto_async_run(struct crypt_priv *pcr, struct kernel_crypt_op *kcop)
694*4882a593Smuzhiyun {
695*4882a593Smuzhiyun struct todo_list_item *item = NULL;
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun if (unlikely(kcop->cop.flags & COP_FLAG_NO_ZC))
698*4882a593Smuzhiyun return -EINVAL;
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun mutex_lock(&pcr->free.lock);
701*4882a593Smuzhiyun if (likely(!list_empty(&pcr->free.list))) {
702*4882a593Smuzhiyun item = list_first_entry(&pcr->free.list,
703*4882a593Smuzhiyun struct todo_list_item, __hook);
704*4882a593Smuzhiyun list_del(&item->__hook);
705*4882a593Smuzhiyun } else if (pcr->itemcount < MAX_COP_RINGSIZE) {
706*4882a593Smuzhiyun pcr->itemcount++;
707*4882a593Smuzhiyun } else {
708*4882a593Smuzhiyun mutex_unlock(&pcr->free.lock);
709*4882a593Smuzhiyun return -EBUSY;
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun mutex_unlock(&pcr->free.lock);
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun if (unlikely(!item)) {
714*4882a593Smuzhiyun item = kzalloc(sizeof(struct todo_list_item), GFP_KERNEL);
715*4882a593Smuzhiyun if (unlikely(!item))
716*4882a593Smuzhiyun return -EFAULT;
717*4882a593Smuzhiyun dinfo(1, "increased item count to %d", pcr->itemcount);
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun memcpy(&item->kcop, kcop, sizeof(struct kernel_crypt_op));
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun mutex_lock(&pcr->todo.lock);
723*4882a593Smuzhiyun list_add_tail(&item->__hook, &pcr->todo.list);
724*4882a593Smuzhiyun mutex_unlock(&pcr->todo.lock);
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun queue_work(cryptodev_wq, &pcr->cryptask);
727*4882a593Smuzhiyun return 0;
728*4882a593Smuzhiyun }
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun /* get the first completed job from the "done" queue
731*4882a593Smuzhiyun *
732*4882a593Smuzhiyun * returns:
733*4882a593Smuzhiyun * -EBUSY if no completed jobs are ready (yet)
734*4882a593Smuzhiyun * the return value of crypto_run() otherwise */
crypto_async_fetch(struct crypt_priv * pcr,struct kernel_crypt_op * kcop)735*4882a593Smuzhiyun static int crypto_async_fetch(struct crypt_priv *pcr,
736*4882a593Smuzhiyun struct kernel_crypt_op *kcop)
737*4882a593Smuzhiyun {
738*4882a593Smuzhiyun struct todo_list_item *item;
739*4882a593Smuzhiyun int retval;
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun mutex_lock(&pcr->done.lock);
742*4882a593Smuzhiyun if (list_empty(&pcr->done.list)) {
743*4882a593Smuzhiyun mutex_unlock(&pcr->done.lock);
744*4882a593Smuzhiyun return -EBUSY;
745*4882a593Smuzhiyun }
746*4882a593Smuzhiyun item = list_first_entry(&pcr->done.list, struct todo_list_item, __hook);
747*4882a593Smuzhiyun list_del(&item->__hook);
748*4882a593Smuzhiyun mutex_unlock(&pcr->done.lock);
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun memcpy(kcop, &item->kcop, sizeof(struct kernel_crypt_op));
751*4882a593Smuzhiyun retval = item->result;
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun mutex_lock(&pcr->free.lock);
754*4882a593Smuzhiyun list_add_tail(&item->__hook, &pcr->free.list);
755*4882a593Smuzhiyun mutex_unlock(&pcr->free.lock);
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun /* wake for POLLOUT */
758*4882a593Smuzhiyun wake_up_interruptible(&pcr->user_waiter);
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun return retval;
761*4882a593Smuzhiyun }
762*4882a593Smuzhiyun #endif
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun /* this function has to be called from process context */
fill_kcop_from_cop(struct kernel_crypt_op * kcop,struct fcrypt * fcr)765*4882a593Smuzhiyun static int fill_kcop_from_cop(struct kernel_crypt_op *kcop, struct fcrypt *fcr)
766*4882a593Smuzhiyun {
767*4882a593Smuzhiyun struct crypt_op *cop = &kcop->cop;
768*4882a593Smuzhiyun struct csession *ses_ptr;
769*4882a593Smuzhiyun int rc;
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun /* this also enters ses_ptr->sem */
772*4882a593Smuzhiyun ses_ptr = crypto_get_session_by_sid(fcr, cop->ses);
773*4882a593Smuzhiyun if (unlikely(!ses_ptr)) {
774*4882a593Smuzhiyun derr(1, "invalid session ID=0x%08X", cop->ses);
775*4882a593Smuzhiyun return -EINVAL;
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun kcop->ivlen = cop->iv ? ses_ptr->cdata.ivsize : 0;
778*4882a593Smuzhiyun kcop->digestsize = 0; /* will be updated during operation */
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun crypto_put_session(ses_ptr);
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun kcop->task = current;
783*4882a593Smuzhiyun kcop->mm = current->mm;
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun if (cop->iv) {
786*4882a593Smuzhiyun rc = copy_from_user(kcop->iv, cop->iv, kcop->ivlen);
787*4882a593Smuzhiyun if (unlikely(rc)) {
788*4882a593Smuzhiyun derr(1, "error copying IV (%d bytes), copy_from_user returned %d for address %p",
789*4882a593Smuzhiyun kcop->ivlen, rc, cop->iv);
790*4882a593Smuzhiyun return -EFAULT;
791*4882a593Smuzhiyun }
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun return 0;
795*4882a593Smuzhiyun }
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun /* this function has to be called from process context */
fill_cop_from_kcop(struct kernel_crypt_op * kcop,struct fcrypt * fcr)798*4882a593Smuzhiyun static int fill_cop_from_kcop(struct kernel_crypt_op *kcop, struct fcrypt *fcr)
799*4882a593Smuzhiyun {
800*4882a593Smuzhiyun int ret;
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun if (kcop->digestsize) {
803*4882a593Smuzhiyun ret = copy_to_user(kcop->cop.mac,
804*4882a593Smuzhiyun kcop->hash_output, kcop->digestsize);
805*4882a593Smuzhiyun if (unlikely(ret))
806*4882a593Smuzhiyun return -EFAULT;
807*4882a593Smuzhiyun }
808*4882a593Smuzhiyun if (kcop->ivlen && kcop->cop.flags & COP_FLAG_WRITE_IV) {
809*4882a593Smuzhiyun ret = copy_to_user(kcop->cop.iv,
810*4882a593Smuzhiyun kcop->iv, kcop->ivlen);
811*4882a593Smuzhiyun if (unlikely(ret))
812*4882a593Smuzhiyun return -EFAULT;
813*4882a593Smuzhiyun }
814*4882a593Smuzhiyun return 0;
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun
kcop_from_user(struct kernel_crypt_op * kcop,struct fcrypt * fcr,void __user * arg)817*4882a593Smuzhiyun static int kcop_from_user(struct kernel_crypt_op *kcop,
818*4882a593Smuzhiyun struct fcrypt *fcr, void __user *arg)
819*4882a593Smuzhiyun {
820*4882a593Smuzhiyun if (unlikely(copy_from_user(&kcop->cop, arg, sizeof(kcop->cop))))
821*4882a593Smuzhiyun return -EFAULT;
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun return fill_kcop_from_cop(kcop, fcr);
824*4882a593Smuzhiyun }
825*4882a593Smuzhiyun
kcop_to_user(struct kernel_crypt_op * kcop,struct fcrypt * fcr,void __user * arg)826*4882a593Smuzhiyun static int kcop_to_user(struct kernel_crypt_op *kcop,
827*4882a593Smuzhiyun struct fcrypt *fcr, void __user *arg)
828*4882a593Smuzhiyun {
829*4882a593Smuzhiyun int ret;
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun ret = fill_cop_from_kcop(kcop, fcr);
832*4882a593Smuzhiyun if (unlikely(ret)) {
833*4882a593Smuzhiyun derr(1, "Error in fill_cop_from_kcop");
834*4882a593Smuzhiyun return ret;
835*4882a593Smuzhiyun }
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun if (unlikely(copy_to_user(arg, &kcop->cop, sizeof(kcop->cop)))) {
838*4882a593Smuzhiyun derr(1, "Cannot copy to userspace");
839*4882a593Smuzhiyun return -EFAULT;
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun return 0;
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun
tfm_info_to_alg_info(struct alg_info * dst,struct crypto_tfm * tfm)844*4882a593Smuzhiyun static inline void tfm_info_to_alg_info(struct alg_info *dst, struct crypto_tfm *tfm)
845*4882a593Smuzhiyun {
846*4882a593Smuzhiyun snprintf(dst->cra_name, CRYPTODEV_MAX_ALG_NAME,
847*4882a593Smuzhiyun "%s", crypto_tfm_alg_name(tfm));
848*4882a593Smuzhiyun snprintf(dst->cra_driver_name, CRYPTODEV_MAX_ALG_NAME,
849*4882a593Smuzhiyun "%s", crypto_tfm_alg_driver_name(tfm));
850*4882a593Smuzhiyun }
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun #ifndef CRYPTO_ALG_KERN_DRIVER_ONLY
is_known_accelerated(struct crypto_tfm * tfm)853*4882a593Smuzhiyun static unsigned int is_known_accelerated(struct crypto_tfm *tfm)
854*4882a593Smuzhiyun {
855*4882a593Smuzhiyun const char *name = crypto_tfm_alg_driver_name(tfm);
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun if (name == NULL)
858*4882a593Smuzhiyun return 1; /* assume accelerated */
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun /* look for known crypto engine names */
861*4882a593Smuzhiyun if (strstr(name, "-talitos") ||
862*4882a593Smuzhiyun !strncmp(name, "mv-", 3) ||
863*4882a593Smuzhiyun !strncmp(name, "atmel-", 6) ||
864*4882a593Smuzhiyun strstr(name, "geode") ||
865*4882a593Smuzhiyun strstr(name, "hifn") ||
866*4882a593Smuzhiyun strstr(name, "-ixp4xx") ||
867*4882a593Smuzhiyun strstr(name, "-omap") ||
868*4882a593Smuzhiyun strstr(name, "-picoxcell") ||
869*4882a593Smuzhiyun strstr(name, "-s5p") ||
870*4882a593Smuzhiyun strstr(name, "-ppc4xx") ||
871*4882a593Smuzhiyun strstr(name, "-caam") ||
872*4882a593Smuzhiyun strstr(name, "-n2"))
873*4882a593Smuzhiyun return 1;
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun return 0;
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun #endif
878*4882a593Smuzhiyun
get_session_info(struct fcrypt * fcr,struct session_info_op * siop)879*4882a593Smuzhiyun static int get_session_info(struct fcrypt *fcr, struct session_info_op *siop)
880*4882a593Smuzhiyun {
881*4882a593Smuzhiyun struct csession *ses_ptr;
882*4882a593Smuzhiyun struct crypto_tfm *tfm;
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun /* this also enters ses_ptr->sem */
885*4882a593Smuzhiyun ses_ptr = crypto_get_session_by_sid(fcr, siop->ses);
886*4882a593Smuzhiyun if (unlikely(!ses_ptr)) {
887*4882a593Smuzhiyun derr(1, "invalid session ID=0x%08X", siop->ses);
888*4882a593Smuzhiyun return -EINVAL;
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun siop->flags = 0;
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun if (ses_ptr->cdata.init) {
894*4882a593Smuzhiyun if (ses_ptr->cdata.aead == 0)
895*4882a593Smuzhiyun tfm = cryptodev_crypto_blkcipher_tfm(ses_ptr->cdata.async.s);
896*4882a593Smuzhiyun else
897*4882a593Smuzhiyun tfm = crypto_aead_tfm(ses_ptr->cdata.async.as);
898*4882a593Smuzhiyun tfm_info_to_alg_info(&siop->cipher_info, tfm);
899*4882a593Smuzhiyun #ifdef CRYPTO_ALG_KERN_DRIVER_ONLY
900*4882a593Smuzhiyun if (tfm->__crt_alg->cra_flags & CRYPTO_ALG_KERN_DRIVER_ONLY)
901*4882a593Smuzhiyun siop->flags |= SIOP_FLAG_KERNEL_DRIVER_ONLY;
902*4882a593Smuzhiyun #else
903*4882a593Smuzhiyun if (is_known_accelerated(tfm))
904*4882a593Smuzhiyun siop->flags |= SIOP_FLAG_KERNEL_DRIVER_ONLY;
905*4882a593Smuzhiyun #endif
906*4882a593Smuzhiyun }
907*4882a593Smuzhiyun if (ses_ptr->hdata.init) {
908*4882a593Smuzhiyun tfm = crypto_ahash_tfm(ses_ptr->hdata.async.s);
909*4882a593Smuzhiyun tfm_info_to_alg_info(&siop->hash_info, tfm);
910*4882a593Smuzhiyun #ifdef CRYPTO_ALG_KERN_DRIVER_ONLY
911*4882a593Smuzhiyun if (tfm->__crt_alg->cra_flags & CRYPTO_ALG_KERN_DRIVER_ONLY)
912*4882a593Smuzhiyun siop->flags |= SIOP_FLAG_KERNEL_DRIVER_ONLY;
913*4882a593Smuzhiyun #else
914*4882a593Smuzhiyun if (is_known_accelerated(tfm))
915*4882a593Smuzhiyun siop->flags |= SIOP_FLAG_KERNEL_DRIVER_ONLY;
916*4882a593Smuzhiyun #endif
917*4882a593Smuzhiyun }
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun siop->alignmask = ses_ptr->alignmask;
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun crypto_put_session(ses_ptr);
922*4882a593Smuzhiyun return 0;
923*4882a593Smuzhiyun }
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun static long
cryptodev_ioctl(struct file * filp,unsigned int cmd,unsigned long arg_)926*4882a593Smuzhiyun cryptodev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg_)
927*4882a593Smuzhiyun {
928*4882a593Smuzhiyun void __user *arg = (void __user *)arg_;
929*4882a593Smuzhiyun int __user *p = arg;
930*4882a593Smuzhiyun struct session_op sop;
931*4882a593Smuzhiyun struct kernel_crypt_op kcop;
932*4882a593Smuzhiyun struct kernel_crypt_auth_op kcaop;
933*4882a593Smuzhiyun struct crypt_priv *pcr = filp->private_data;
934*4882a593Smuzhiyun struct fcrypt *fcr;
935*4882a593Smuzhiyun struct session_info_op siop;
936*4882a593Smuzhiyun #ifdef CIOCCPHASH
937*4882a593Smuzhiyun struct cphash_op cphop;
938*4882a593Smuzhiyun #endif
939*4882a593Smuzhiyun uint32_t ses;
940*4882a593Smuzhiyun int ret, fd;
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun if (unlikely(!pcr))
943*4882a593Smuzhiyun BUG();
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun fcr = &pcr->fcrypt;
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun switch (cmd) {
948*4882a593Smuzhiyun case CIOCASYMFEAT:
949*4882a593Smuzhiyun return put_user(0, p);
950*4882a593Smuzhiyun case CRIOGET:
951*4882a593Smuzhiyun fd = get_unused_fd_flags(0);
952*4882a593Smuzhiyun if (unlikely(fd < 0))
953*4882a593Smuzhiyun return fd;
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun ret = put_user(fd, p);
956*4882a593Smuzhiyun if (unlikely(ret)) {
957*4882a593Smuzhiyun put_unused_fd(fd);
958*4882a593Smuzhiyun return ret;
959*4882a593Smuzhiyun }
960*4882a593Smuzhiyun
961*4882a593Smuzhiyun get_file(filp);
962*4882a593Smuzhiyun fd_install(fd, filp);
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun return ret;
965*4882a593Smuzhiyun case CIOCGSESSION:
966*4882a593Smuzhiyun if (unlikely(copy_from_user(&sop, arg, sizeof(sop))))
967*4882a593Smuzhiyun return -EFAULT;
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun ret = crypto_create_session(fcr, &sop);
970*4882a593Smuzhiyun if (unlikely(ret))
971*4882a593Smuzhiyun return ret;
972*4882a593Smuzhiyun ret = copy_to_user(arg, &sop, sizeof(sop));
973*4882a593Smuzhiyun if (unlikely(ret)) {
974*4882a593Smuzhiyun crypto_finish_session(fcr, sop.ses);
975*4882a593Smuzhiyun return -EFAULT;
976*4882a593Smuzhiyun }
977*4882a593Smuzhiyun return ret;
978*4882a593Smuzhiyun case CIOCFSESSION:
979*4882a593Smuzhiyun ret = get_user(ses, (uint32_t __user *)arg);
980*4882a593Smuzhiyun if (unlikely(ret))
981*4882a593Smuzhiyun return ret;
982*4882a593Smuzhiyun ret = crypto_finish_session(fcr, ses);
983*4882a593Smuzhiyun return ret;
984*4882a593Smuzhiyun case CIOCGSESSINFO:
985*4882a593Smuzhiyun if (unlikely(copy_from_user(&siop, arg, sizeof(siop))))
986*4882a593Smuzhiyun return -EFAULT;
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun ret = get_session_info(fcr, &siop);
989*4882a593Smuzhiyun if (unlikely(ret))
990*4882a593Smuzhiyun return ret;
991*4882a593Smuzhiyun return copy_to_user(arg, &siop, sizeof(siop));
992*4882a593Smuzhiyun #ifdef CIOCCPHASH
993*4882a593Smuzhiyun case CIOCCPHASH:
994*4882a593Smuzhiyun if (unlikely(copy_from_user(&cphop, arg, sizeof(cphop))))
995*4882a593Smuzhiyun return -EFAULT;
996*4882a593Smuzhiyun return crypto_copy_hash_state(fcr, cphop.dst_ses, cphop.src_ses);
997*4882a593Smuzhiyun #endif /* CIOCPHASH */
998*4882a593Smuzhiyun case CIOCCRYPT:
999*4882a593Smuzhiyun if (unlikely(ret = kcop_from_user(&kcop, fcr, arg))) {
1000*4882a593Smuzhiyun dwarning(1, "Error copying from user");
1001*4882a593Smuzhiyun return ret;
1002*4882a593Smuzhiyun }
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun ret = crypto_run(fcr, &kcop);
1005*4882a593Smuzhiyun if (unlikely(ret)) {
1006*4882a593Smuzhiyun dwarning(1, "Error in crypto_run");
1007*4882a593Smuzhiyun return ret;
1008*4882a593Smuzhiyun }
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun return kcop_to_user(&kcop, fcr, arg);
1011*4882a593Smuzhiyun case CIOCAUTHCRYPT:
1012*4882a593Smuzhiyun if (unlikely(ret = cryptodev_kcaop_from_user(&kcaop, fcr, arg))) {
1013*4882a593Smuzhiyun dwarning(1, "Error copying from user");
1014*4882a593Smuzhiyun return ret;
1015*4882a593Smuzhiyun }
1016*4882a593Smuzhiyun
1017*4882a593Smuzhiyun ret = crypto_auth_run(fcr, &kcaop);
1018*4882a593Smuzhiyun if (unlikely(ret)) {
1019*4882a593Smuzhiyun dwarning(1, "Error in crypto_auth_run");
1020*4882a593Smuzhiyun return ret;
1021*4882a593Smuzhiyun }
1022*4882a593Smuzhiyun return cryptodev_kcaop_to_user(&kcaop, fcr, arg);
1023*4882a593Smuzhiyun #ifdef ENABLE_ASYNC
1024*4882a593Smuzhiyun case CIOCASYNCCRYPT:
1025*4882a593Smuzhiyun if (unlikely(ret = kcop_from_user(&kcop, fcr, arg)))
1026*4882a593Smuzhiyun return ret;
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun return crypto_async_run(pcr, &kcop);
1029*4882a593Smuzhiyun case CIOCASYNCFETCH:
1030*4882a593Smuzhiyun ret = crypto_async_fetch(pcr, &kcop);
1031*4882a593Smuzhiyun if (unlikely(ret))
1032*4882a593Smuzhiyun return ret;
1033*4882a593Smuzhiyun
1034*4882a593Smuzhiyun return kcop_to_user(&kcop, fcr, arg);
1035*4882a593Smuzhiyun #endif
1036*4882a593Smuzhiyun default:
1037*4882a593Smuzhiyun return rk_cryptodev_ioctl(fcr, cmd, arg_);
1038*4882a593Smuzhiyun }
1039*4882a593Smuzhiyun }
1040*4882a593Smuzhiyun
1041*4882a593Smuzhiyun /* compatibility code for 32bit userlands */
1042*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun static inline void
compat_to_session_op(struct compat_session_op * compat,struct session_op * sop)1045*4882a593Smuzhiyun compat_to_session_op(struct compat_session_op *compat, struct session_op *sop)
1046*4882a593Smuzhiyun {
1047*4882a593Smuzhiyun sop->cipher = compat->cipher;
1048*4882a593Smuzhiyun sop->mac = compat->mac;
1049*4882a593Smuzhiyun sop->keylen = compat->keylen;
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun sop->key = compat_ptr(compat->key);
1052*4882a593Smuzhiyun sop->mackeylen = compat->mackeylen;
1053*4882a593Smuzhiyun sop->mackey = compat_ptr(compat->mackey);
1054*4882a593Smuzhiyun sop->ses = compat->ses;
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun
1057*4882a593Smuzhiyun static inline void
session_op_to_compat(struct session_op * sop,struct compat_session_op * compat)1058*4882a593Smuzhiyun session_op_to_compat(struct session_op *sop, struct compat_session_op *compat)
1059*4882a593Smuzhiyun {
1060*4882a593Smuzhiyun compat->cipher = sop->cipher;
1061*4882a593Smuzhiyun compat->mac = sop->mac;
1062*4882a593Smuzhiyun compat->keylen = sop->keylen;
1063*4882a593Smuzhiyun
1064*4882a593Smuzhiyun compat->key = ptr_to_compat(sop->key);
1065*4882a593Smuzhiyun compat->mackeylen = sop->mackeylen;
1066*4882a593Smuzhiyun compat->mackey = ptr_to_compat(sop->mackey);
1067*4882a593Smuzhiyun compat->ses = sop->ses;
1068*4882a593Smuzhiyun }
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun static inline void
compat_to_crypt_op(struct compat_crypt_op * compat,struct crypt_op * cop)1071*4882a593Smuzhiyun compat_to_crypt_op(struct compat_crypt_op *compat, struct crypt_op *cop)
1072*4882a593Smuzhiyun {
1073*4882a593Smuzhiyun cop->ses = compat->ses;
1074*4882a593Smuzhiyun cop->op = compat->op;
1075*4882a593Smuzhiyun cop->flags = compat->flags;
1076*4882a593Smuzhiyun cop->len = compat->len;
1077*4882a593Smuzhiyun
1078*4882a593Smuzhiyun cop->src = compat_ptr(compat->src);
1079*4882a593Smuzhiyun cop->dst = compat_ptr(compat->dst);
1080*4882a593Smuzhiyun cop->mac = compat_ptr(compat->mac);
1081*4882a593Smuzhiyun cop->iv = compat_ptr(compat->iv);
1082*4882a593Smuzhiyun }
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun static inline void
crypt_op_to_compat(struct crypt_op * cop,struct compat_crypt_op * compat)1085*4882a593Smuzhiyun crypt_op_to_compat(struct crypt_op *cop, struct compat_crypt_op *compat)
1086*4882a593Smuzhiyun {
1087*4882a593Smuzhiyun compat->ses = cop->ses;
1088*4882a593Smuzhiyun compat->op = cop->op;
1089*4882a593Smuzhiyun compat->flags = cop->flags;
1090*4882a593Smuzhiyun compat->len = cop->len;
1091*4882a593Smuzhiyun
1092*4882a593Smuzhiyun compat->src = ptr_to_compat(cop->src);
1093*4882a593Smuzhiyun compat->dst = ptr_to_compat(cop->dst);
1094*4882a593Smuzhiyun compat->mac = ptr_to_compat(cop->mac);
1095*4882a593Smuzhiyun compat->iv = ptr_to_compat(cop->iv);
1096*4882a593Smuzhiyun }
1097*4882a593Smuzhiyun
compat_kcop_from_user(struct kernel_crypt_op * kcop,struct fcrypt * fcr,void __user * arg)1098*4882a593Smuzhiyun static int compat_kcop_from_user(struct kernel_crypt_op *kcop,
1099*4882a593Smuzhiyun struct fcrypt *fcr, void __user *arg)
1100*4882a593Smuzhiyun {
1101*4882a593Smuzhiyun struct compat_crypt_op compat_cop;
1102*4882a593Smuzhiyun
1103*4882a593Smuzhiyun if (unlikely(copy_from_user(&compat_cop, arg, sizeof(compat_cop))))
1104*4882a593Smuzhiyun return -EFAULT;
1105*4882a593Smuzhiyun compat_to_crypt_op(&compat_cop, &kcop->cop);
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun return fill_kcop_from_cop(kcop, fcr);
1108*4882a593Smuzhiyun }
1109*4882a593Smuzhiyun
compat_kcop_to_user(struct kernel_crypt_op * kcop,struct fcrypt * fcr,void __user * arg)1110*4882a593Smuzhiyun static int compat_kcop_to_user(struct kernel_crypt_op *kcop,
1111*4882a593Smuzhiyun struct fcrypt *fcr, void __user *arg)
1112*4882a593Smuzhiyun {
1113*4882a593Smuzhiyun int ret;
1114*4882a593Smuzhiyun struct compat_crypt_op compat_cop;
1115*4882a593Smuzhiyun
1116*4882a593Smuzhiyun ret = fill_cop_from_kcop(kcop, fcr);
1117*4882a593Smuzhiyun if (unlikely(ret)) {
1118*4882a593Smuzhiyun dwarning(1, "Error in fill_cop_from_kcop");
1119*4882a593Smuzhiyun return ret;
1120*4882a593Smuzhiyun }
1121*4882a593Smuzhiyun crypt_op_to_compat(&kcop->cop, &compat_cop);
1122*4882a593Smuzhiyun
1123*4882a593Smuzhiyun if (unlikely(copy_to_user(arg, &compat_cop, sizeof(compat_cop)))) {
1124*4882a593Smuzhiyun dwarning(1, "Error copying to user");
1125*4882a593Smuzhiyun return -EFAULT;
1126*4882a593Smuzhiyun }
1127*4882a593Smuzhiyun return 0;
1128*4882a593Smuzhiyun }
1129*4882a593Smuzhiyun
1130*4882a593Smuzhiyun static long
cryptodev_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg_)1131*4882a593Smuzhiyun cryptodev_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg_)
1132*4882a593Smuzhiyun {
1133*4882a593Smuzhiyun void __user *arg = (void __user *)arg_;
1134*4882a593Smuzhiyun struct crypt_priv *pcr = file->private_data;
1135*4882a593Smuzhiyun struct fcrypt *fcr;
1136*4882a593Smuzhiyun struct session_op sop;
1137*4882a593Smuzhiyun struct compat_session_op compat_sop;
1138*4882a593Smuzhiyun struct kernel_crypt_op kcop;
1139*4882a593Smuzhiyun struct kernel_crypt_auth_op kcaop;
1140*4882a593Smuzhiyun int ret;
1141*4882a593Smuzhiyun
1142*4882a593Smuzhiyun if (unlikely(!pcr))
1143*4882a593Smuzhiyun BUG();
1144*4882a593Smuzhiyun
1145*4882a593Smuzhiyun fcr = &pcr->fcrypt;
1146*4882a593Smuzhiyun
1147*4882a593Smuzhiyun switch (cmd) {
1148*4882a593Smuzhiyun case CIOCASYMFEAT:
1149*4882a593Smuzhiyun case CRIOGET:
1150*4882a593Smuzhiyun case CIOCFSESSION:
1151*4882a593Smuzhiyun case CIOCGSESSINFO:
1152*4882a593Smuzhiyun return cryptodev_ioctl(file, cmd, arg_);
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun case COMPAT_CIOCGSESSION:
1155*4882a593Smuzhiyun if (unlikely(copy_from_user(&compat_sop, arg,
1156*4882a593Smuzhiyun sizeof(compat_sop))))
1157*4882a593Smuzhiyun return -EFAULT;
1158*4882a593Smuzhiyun compat_to_session_op(&compat_sop, &sop);
1159*4882a593Smuzhiyun
1160*4882a593Smuzhiyun ret = crypto_create_session(fcr, &sop);
1161*4882a593Smuzhiyun if (unlikely(ret))
1162*4882a593Smuzhiyun return ret;
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun session_op_to_compat(&sop, &compat_sop);
1165*4882a593Smuzhiyun ret = copy_to_user(arg, &compat_sop, sizeof(compat_sop));
1166*4882a593Smuzhiyun if (unlikely(ret)) {
1167*4882a593Smuzhiyun crypto_finish_session(fcr, sop.ses);
1168*4882a593Smuzhiyun return -EFAULT;
1169*4882a593Smuzhiyun }
1170*4882a593Smuzhiyun return ret;
1171*4882a593Smuzhiyun
1172*4882a593Smuzhiyun case COMPAT_CIOCCRYPT:
1173*4882a593Smuzhiyun ret = compat_kcop_from_user(&kcop, fcr, arg);
1174*4882a593Smuzhiyun if (unlikely(ret))
1175*4882a593Smuzhiyun return ret;
1176*4882a593Smuzhiyun
1177*4882a593Smuzhiyun ret = crypto_run(fcr, &kcop);
1178*4882a593Smuzhiyun if (unlikely(ret))
1179*4882a593Smuzhiyun return ret;
1180*4882a593Smuzhiyun
1181*4882a593Smuzhiyun return compat_kcop_to_user(&kcop, fcr, arg);
1182*4882a593Smuzhiyun
1183*4882a593Smuzhiyun case COMPAT_CIOCAUTHCRYPT:
1184*4882a593Smuzhiyun ret = compat_kcaop_from_user(&kcaop, fcr, arg);
1185*4882a593Smuzhiyun if (unlikely(ret)) {
1186*4882a593Smuzhiyun dwarning(1, "Error copying from user");
1187*4882a593Smuzhiyun return ret;
1188*4882a593Smuzhiyun }
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun ret = crypto_auth_run(fcr, &kcaop);
1191*4882a593Smuzhiyun if (unlikely(ret)) {
1192*4882a593Smuzhiyun dwarning(1, "Error in crypto_auth_run");
1193*4882a593Smuzhiyun return ret;
1194*4882a593Smuzhiyun }
1195*4882a593Smuzhiyun return compat_kcaop_to_user(&kcaop, fcr, arg);
1196*4882a593Smuzhiyun #ifdef ENABLE_ASYNC
1197*4882a593Smuzhiyun case COMPAT_CIOCASYNCCRYPT:
1198*4882a593Smuzhiyun if (unlikely(ret = compat_kcop_from_user(&kcop, fcr, arg)))
1199*4882a593Smuzhiyun return ret;
1200*4882a593Smuzhiyun
1201*4882a593Smuzhiyun return crypto_async_run(pcr, &kcop);
1202*4882a593Smuzhiyun case COMPAT_CIOCASYNCFETCH:
1203*4882a593Smuzhiyun ret = crypto_async_fetch(pcr, &kcop);
1204*4882a593Smuzhiyun if (unlikely(ret))
1205*4882a593Smuzhiyun return ret;
1206*4882a593Smuzhiyun
1207*4882a593Smuzhiyun return compat_kcop_to_user(&kcop, fcr, arg);
1208*4882a593Smuzhiyun #endif
1209*4882a593Smuzhiyun default:
1210*4882a593Smuzhiyun return rk_compat_cryptodev_ioctl(fcr, cmd, arg_);
1211*4882a593Smuzhiyun }
1212*4882a593Smuzhiyun }
1213*4882a593Smuzhiyun
1214*4882a593Smuzhiyun #endif /* CONFIG_COMPAT */
1215*4882a593Smuzhiyun
cryptodev_poll(struct file * file,poll_table * wait)1216*4882a593Smuzhiyun static unsigned int cryptodev_poll(struct file *file, poll_table *wait)
1217*4882a593Smuzhiyun {
1218*4882a593Smuzhiyun struct crypt_priv *pcr = file->private_data;
1219*4882a593Smuzhiyun unsigned int ret = 0;
1220*4882a593Smuzhiyun
1221*4882a593Smuzhiyun poll_wait(file, &pcr->user_waiter, wait);
1222*4882a593Smuzhiyun
1223*4882a593Smuzhiyun if (!list_empty_careful(&pcr->done.list))
1224*4882a593Smuzhiyun ret |= POLLIN | POLLRDNORM;
1225*4882a593Smuzhiyun if (!list_empty_careful(&pcr->free.list) || pcr->itemcount < MAX_COP_RINGSIZE)
1226*4882a593Smuzhiyun ret |= POLLOUT | POLLWRNORM;
1227*4882a593Smuzhiyun
1228*4882a593Smuzhiyun return ret;
1229*4882a593Smuzhiyun }
1230*4882a593Smuzhiyun
1231*4882a593Smuzhiyun static const struct file_operations cryptodev_fops = {
1232*4882a593Smuzhiyun .owner = THIS_MODULE,
1233*4882a593Smuzhiyun .open = cryptodev_open,
1234*4882a593Smuzhiyun .release = cryptodev_release,
1235*4882a593Smuzhiyun .unlocked_ioctl = cryptodev_ioctl,
1236*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
1237*4882a593Smuzhiyun .compat_ioctl = cryptodev_compat_ioctl,
1238*4882a593Smuzhiyun #endif /* CONFIG_COMPAT */
1239*4882a593Smuzhiyun .poll = cryptodev_poll,
1240*4882a593Smuzhiyun };
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun static struct miscdevice cryptodev = {
1243*4882a593Smuzhiyun .minor = MISC_DYNAMIC_MINOR,
1244*4882a593Smuzhiyun .name = "crypto",
1245*4882a593Smuzhiyun .fops = &cryptodev_fops,
1246*4882a593Smuzhiyun .mode = S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP|S_IROTH|S_IWOTH,
1247*4882a593Smuzhiyun };
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun static int __init
cryptodev_register(void)1250*4882a593Smuzhiyun cryptodev_register(void)
1251*4882a593Smuzhiyun {
1252*4882a593Smuzhiyun int rc;
1253*4882a593Smuzhiyun
1254*4882a593Smuzhiyun rc = misc_register(&cryptodev);
1255*4882a593Smuzhiyun if (unlikely(rc)) {
1256*4882a593Smuzhiyun pr_err(PFX "registration of /dev/crypto failed\n");
1257*4882a593Smuzhiyun return rc;
1258*4882a593Smuzhiyun }
1259*4882a593Smuzhiyun
1260*4882a593Smuzhiyun return 0;
1261*4882a593Smuzhiyun }
1262*4882a593Smuzhiyun
1263*4882a593Smuzhiyun static void __exit
cryptodev_deregister(void)1264*4882a593Smuzhiyun cryptodev_deregister(void)
1265*4882a593Smuzhiyun {
1266*4882a593Smuzhiyun misc_deregister(&cryptodev);
1267*4882a593Smuzhiyun }
1268*4882a593Smuzhiyun
1269*4882a593Smuzhiyun /* ====== Module init/exit ====== */
1270*4882a593Smuzhiyun static struct ctl_table verbosity_ctl_dir[] = {
1271*4882a593Smuzhiyun {
1272*4882a593Smuzhiyun .procname = "cryptodev_verbosity",
1273*4882a593Smuzhiyun .data = &cryptodev_verbosity,
1274*4882a593Smuzhiyun .maxlen = sizeof(int),
1275*4882a593Smuzhiyun .mode = 0644,
1276*4882a593Smuzhiyun .proc_handler = proc_dointvec,
1277*4882a593Smuzhiyun },
1278*4882a593Smuzhiyun {},
1279*4882a593Smuzhiyun };
1280*4882a593Smuzhiyun
1281*4882a593Smuzhiyun static struct ctl_table verbosity_ctl_root[] = {
1282*4882a593Smuzhiyun {
1283*4882a593Smuzhiyun .procname = "ioctl",
1284*4882a593Smuzhiyun .mode = 0555,
1285*4882a593Smuzhiyun .child = verbosity_ctl_dir,
1286*4882a593Smuzhiyun },
1287*4882a593Smuzhiyun {},
1288*4882a593Smuzhiyun };
1289*4882a593Smuzhiyun static struct ctl_table_header *verbosity_sysctl_header;
init_cryptodev(void)1290*4882a593Smuzhiyun static int __init init_cryptodev(void)
1291*4882a593Smuzhiyun {
1292*4882a593Smuzhiyun int rc;
1293*4882a593Smuzhiyun
1294*4882a593Smuzhiyun cryptodev_wq = create_workqueue("cryptodev_queue");
1295*4882a593Smuzhiyun if (unlikely(!cryptodev_wq)) {
1296*4882a593Smuzhiyun pr_err(PFX "failed to allocate the cryptodev workqueue\n");
1297*4882a593Smuzhiyun return -EFAULT;
1298*4882a593Smuzhiyun }
1299*4882a593Smuzhiyun
1300*4882a593Smuzhiyun rc = cryptodev_register();
1301*4882a593Smuzhiyun if (unlikely(rc)) {
1302*4882a593Smuzhiyun destroy_workqueue(cryptodev_wq);
1303*4882a593Smuzhiyun return rc;
1304*4882a593Smuzhiyun }
1305*4882a593Smuzhiyun
1306*4882a593Smuzhiyun verbosity_sysctl_header = register_sysctl_table(verbosity_ctl_root);
1307*4882a593Smuzhiyun
1308*4882a593Smuzhiyun pr_info(PFX "driver %s loaded.\n", VERSION);
1309*4882a593Smuzhiyun
1310*4882a593Smuzhiyun return 0;
1311*4882a593Smuzhiyun }
1312*4882a593Smuzhiyun
exit_cryptodev(void)1313*4882a593Smuzhiyun static void __exit exit_cryptodev(void)
1314*4882a593Smuzhiyun {
1315*4882a593Smuzhiyun flush_workqueue(cryptodev_wq);
1316*4882a593Smuzhiyun destroy_workqueue(cryptodev_wq);
1317*4882a593Smuzhiyun
1318*4882a593Smuzhiyun if (verbosity_sysctl_header)
1319*4882a593Smuzhiyun unregister_sysctl_table(verbosity_sysctl_header);
1320*4882a593Smuzhiyun
1321*4882a593Smuzhiyun cryptodev_deregister();
1322*4882a593Smuzhiyun pr_info(PFX "driver unloaded.\n");
1323*4882a593Smuzhiyun }
1324*4882a593Smuzhiyun
1325*4882a593Smuzhiyun module_init(init_cryptodev);
1326*4882a593Smuzhiyun module_exit(exit_cryptodev);
1327*4882a593Smuzhiyun
1328