1 /*
2 * Driver for /dev/crypto device (aka CryptoDev)
3 *
4 * Copyright (c) 2011, 2012 OpenSSL Software Foundation, Inc.
5 *
6 * Author: Nikos Mavrogiannopoulos
7 *
8 * This file is part of linux cryptodev.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc.,
23 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
24 */
25
26 /*
27 * This file handles the AEAD part of /dev/crypto.
28 *
29 */
30
31 #include <crypto/hash.h>
32 #include <linux/crypto.h>
33 #include <linux/mm.h>
34 #include <linux/highmem.h>
35 #include <linux/ioctl.h>
36 #include <linux/random.h>
37 #include <linux/syscalls.h>
38 #include <linux/pagemap.h>
39 #include <linux/poll.h>
40 #include <linux/uaccess.h>
41 #include <crypto/scatterwalk.h>
42 #include <linux/scatterlist.h>
43 #include "cryptodev.h"
44 #include "zc.h"
45 #include "util.h"
46 #include "cryptlib.h"
47 #include "version.h"
48
49
50 /* make caop->dst available in scatterlist.
51 * (caop->src is assumed to be equal to caop->dst)
52 */
get_userbuf_tls(struct csession * ses,struct kernel_crypt_auth_op * kcaop,struct scatterlist ** dst_sg)53 static int get_userbuf_tls(struct csession *ses, struct kernel_crypt_auth_op *kcaop,
54 struct scatterlist **dst_sg)
55 {
56 int pagecount = 0;
57 struct crypt_auth_op *caop = &kcaop->caop;
58 int rc;
59
60 if (caop->dst == NULL)
61 return -EINVAL;
62
63 if (ses->alignmask) {
64 if (!IS_ALIGNED((unsigned long)caop->dst, ses->alignmask + 1))
65 dwarning(2, "careful - source address %p is not %d byte aligned",
66 caop->dst, ses->alignmask + 1);
67 }
68
69 if (kcaop->dst_len == 0) {
70 dwarning(1, "Destination length cannot be zero");
71 return -EINVAL;
72 }
73
74 pagecount = PAGECOUNT(caop->dst, kcaop->dst_len);
75
76 ses->used_pages = pagecount;
77 ses->readonly_pages = 0;
78
79 rc = cryptodev_adjust_sg_array(ses, pagecount);
80 if (rc)
81 return rc;
82
83 rc = __cryptodev_get_userbuf(caop->dst, kcaop->dst_len, 1, pagecount,
84 ses->pages, ses->sg, kcaop->task, kcaop->mm);
85 if (unlikely(rc)) {
86 derr(1, "failed to get user pages for data input");
87 return -EINVAL;
88 }
89
90 (*dst_sg) = ses->sg;
91
92 return 0;
93 }
94
95
96 #define MAX_SRTP_AUTH_DATA_DIFF 256
97
98 /* Makes caop->auth_src available as scatterlist.
99 * It also provides a pointer to caop->dst, which however,
100 * is assumed to be within the caop->auth_src buffer. If not
101 * (if their difference exceeds MAX_SRTP_AUTH_DATA_DIFF) it
102 * returns error.
103 */
get_userbuf_srtp(struct csession * ses,struct kernel_crypt_auth_op * kcaop,struct scatterlist ** auth_sg,struct scatterlist ** dst_sg)104 static int get_userbuf_srtp(struct csession *ses, struct kernel_crypt_auth_op *kcaop,
105 struct scatterlist **auth_sg, struct scatterlist **dst_sg)
106 {
107 int pagecount, diff;
108 int auth_pagecount = 0;
109 struct crypt_auth_op *caop = &kcaop->caop;
110 int rc;
111
112 if (caop->dst == NULL && caop->auth_src == NULL) {
113 derr(1, "dst and auth_src cannot be both null");
114 return -EINVAL;
115 }
116
117 if (ses->alignmask) {
118 if (!IS_ALIGNED((unsigned long)caop->dst, ses->alignmask + 1))
119 dwarning(2, "careful - source address %p is not %d byte aligned",
120 caop->dst, ses->alignmask + 1);
121 if (!IS_ALIGNED((unsigned long)caop->auth_src, ses->alignmask + 1))
122 dwarning(2, "careful - source address %p is not %d byte aligned",
123 caop->auth_src, ses->alignmask + 1);
124 }
125
126 if (unlikely(kcaop->dst_len == 0 || caop->auth_len == 0)) {
127 dwarning(1, "Destination length cannot be zero");
128 return -EINVAL;
129 }
130
131 /* Note that in SRTP auth data overlap with data to be encrypted (dst)
132 */
133
134 auth_pagecount = PAGECOUNT(caop->auth_src, caop->auth_len);
135 diff = (int)(caop->src - caop->auth_src);
136 if (diff > MAX_SRTP_AUTH_DATA_DIFF || diff < 0) {
137 dwarning(1, "auth_src must overlap with src (diff: %d).", diff);
138 return -EINVAL;
139 }
140
141 pagecount = auth_pagecount;
142
143 rc = cryptodev_adjust_sg_array(ses, pagecount*2); /* double pages to have pages for dst(=auth_src) */
144 if (rc) {
145 derr(1, "cannot adjust sg array");
146 return rc;
147 }
148
149 rc = __cryptodev_get_userbuf(caop->auth_src, caop->auth_len, 1, auth_pagecount,
150 ses->pages, ses->sg, kcaop->task, kcaop->mm);
151 if (unlikely(rc)) {
152 derr(1, "failed to get user pages for data input");
153 return -EINVAL;
154 }
155
156 ses->used_pages = pagecount;
157 ses->readonly_pages = 0;
158
159 (*auth_sg) = ses->sg;
160
161 (*dst_sg) = ses->sg + auth_pagecount;
162 sg_init_table(*dst_sg, auth_pagecount);
163 cryptodev_sg_copy(ses->sg, (*dst_sg), caop->auth_len);
164 (*dst_sg) = cryptodev_sg_advance(*dst_sg, diff);
165 if (*dst_sg == NULL) {
166 cryptodev_release_user_pages(ses);
167 derr(1, "failed to get enough pages for auth data");
168 return -EINVAL;
169 }
170
171 return 0;
172 }
173
174 /*
175 * Return tag (digest) length for authenticated encryption
176 * If the cipher and digest are separate, hdata.init is set - just return
177 * digest length. Otherwise return digest length for aead ciphers
178 */
cryptodev_get_tag_len(struct csession * ses_ptr)179 static int cryptodev_get_tag_len(struct csession *ses_ptr)
180 {
181 if (ses_ptr->hdata.init)
182 return ses_ptr->hdata.digestsize;
183 else
184 return cryptodev_cipher_get_tag_size(&ses_ptr->cdata);
185 }
186
187 /*
188 * Calculate destination buffer length for authenticated encryption. The
189 * expectation is that user-space code allocates exactly the same space for
190 * destination buffer before calling cryptodev. The result is cipher-dependent.
191 */
cryptodev_get_dst_len(struct crypt_auth_op * caop,struct csession * ses_ptr)192 static int cryptodev_get_dst_len(struct crypt_auth_op *caop, struct csession *ses_ptr)
193 {
194 int dst_len = caop->len;
195 if (caop->op == COP_DECRYPT)
196 return dst_len;
197
198 if (caop->flags & COP_FLAG_AEAD_RK_TYPE)
199 return dst_len;
200
201 dst_len += caop->tag_len;
202
203 /* for TLS always add some padding so the total length is rounded to
204 * cipher block size */
205 if (caop->flags & COP_FLAG_AEAD_TLS_TYPE) {
206 int bs = ses_ptr->cdata.blocksize;
207 dst_len += bs - (dst_len % bs);
208 }
209
210 return dst_len;
211 }
212
fill_kcaop_from_caop(struct kernel_crypt_auth_op * kcaop,struct fcrypt * fcr)213 static int fill_kcaop_from_caop(struct kernel_crypt_auth_op *kcaop, struct fcrypt *fcr)
214 {
215 struct crypt_auth_op *caop = &kcaop->caop;
216 struct csession *ses_ptr;
217 int ret;
218
219 /* this also enters ses_ptr->sem */
220 ses_ptr = crypto_get_session_by_sid(fcr, caop->ses);
221 if (unlikely(!ses_ptr)) {
222 derr(1, "invalid session ID=0x%08X", caop->ses);
223 return -EINVAL;
224 }
225
226 if (caop->flags & COP_FLAG_AEAD_TLS_TYPE || caop->flags & COP_FLAG_AEAD_SRTP_TYPE) {
227 if (caop->src != caop->dst) {
228 derr(1, "Non-inplace encryption and decryption is not efficient and not implemented");
229 ret = -EINVAL;
230 goto out_unlock;
231 }
232 }
233
234 if (caop->tag_len == 0)
235 caop->tag_len = cryptodev_get_tag_len(ses_ptr);
236
237 kcaop->ivlen = caop->iv ? ses_ptr->cdata.ivsize : 0;
238 kcaop->dst_len = cryptodev_get_dst_len(caop, ses_ptr);
239 kcaop->task = current;
240 kcaop->mm = current->mm;
241
242 if (caop->iv) {
243 ret = copy_from_user(kcaop->iv, caop->iv, kcaop->ivlen);
244 if (unlikely(ret)) {
245 derr(1, "error copying IV (%d bytes), copy_from_user returned %d for address %p",
246 kcaop->ivlen, ret, caop->iv);
247 ret = -EFAULT;
248 goto out_unlock;
249 }
250 }
251
252 ret = 0;
253
254 out_unlock:
255 crypto_put_session(ses_ptr);
256 return ret;
257
258 }
259
fill_caop_from_kcaop(struct kernel_crypt_auth_op * kcaop,struct fcrypt * fcr)260 static int fill_caop_from_kcaop(struct kernel_crypt_auth_op *kcaop, struct fcrypt *fcr)
261 {
262 int ret;
263
264 kcaop->caop.len = kcaop->dst_len;
265
266 if (kcaop->ivlen && kcaop->caop.flags & COP_FLAG_WRITE_IV) {
267 ret = copy_to_user(kcaop->caop.iv,
268 kcaop->iv, kcaop->ivlen);
269 if (unlikely(ret)) {
270 derr(1, "Error in copying to userspace");
271 return -EFAULT;
272 }
273 }
274 return 0;
275 }
276
277
cryptodev_kcaop_from_user(struct kernel_crypt_auth_op * kcaop,struct fcrypt * fcr,void __user * arg)278 int cryptodev_kcaop_from_user(struct kernel_crypt_auth_op *kcaop,
279 struct fcrypt *fcr, void __user *arg)
280 {
281 if (unlikely(copy_from_user(&kcaop->caop, arg, sizeof(kcaop->caop)))) {
282 derr(1, "Error in copying from userspace");
283 return -EFAULT;
284 }
285
286 return fill_kcaop_from_caop(kcaop, fcr);
287 }
288
cryptodev_kcaop_to_user(struct kernel_crypt_auth_op * kcaop,struct fcrypt * fcr,void __user * arg)289 int cryptodev_kcaop_to_user(struct kernel_crypt_auth_op *kcaop,
290 struct fcrypt *fcr, void __user *arg)
291 {
292 int ret;
293
294 ret = fill_caop_from_kcaop(kcaop, fcr);
295 if (unlikely(ret)) {
296 derr(1, "fill_caop_from_kcaop");
297 return ret;
298 }
299
300 if (unlikely(copy_to_user(arg, &kcaop->caop, sizeof(kcaop->caop)))) {
301 derr(1, "Error in copying to userspace");
302 return -EFAULT;
303 }
304 return 0;
305 }
306
307 /* compatibility code for 32bit userlands */
308 #ifdef CONFIG_COMPAT
309
310 static inline void
compat_to_crypt_auth_op(struct compat_crypt_auth_op * compat,struct crypt_auth_op * caop)311 compat_to_crypt_auth_op(struct compat_crypt_auth_op *compat, struct crypt_auth_op *caop)
312 {
313 caop->ses = compat->ses;
314 caop->op = compat->op;
315 caop->flags = compat->flags;
316 caop->len = compat->len;
317 caop->auth_len = compat->auth_len;
318 caop->tag_len = compat->tag_len;
319 caop->iv_len = compat->iv_len;
320
321 caop->auth_src = compat_ptr(compat->auth_src);
322 caop->src = compat_ptr(compat->src);
323 caop->dst = compat_ptr(compat->dst);
324 caop->tag = compat_ptr(compat->tag);
325 caop->iv = compat_ptr(compat->iv);
326 }
327
328 static inline void
crypt_auth_op_to_compat(struct crypt_auth_op * caop,struct compat_crypt_auth_op * compat)329 crypt_auth_op_to_compat(struct crypt_auth_op *caop, struct compat_crypt_auth_op *compat)
330 {
331 compat->ses = caop->ses;
332 compat->op = caop->op;
333 compat->flags = caop->flags;
334 compat->len = caop->len;
335 compat->auth_len = caop->auth_len;
336 compat->tag_len = caop->tag_len;
337 compat->iv_len = caop->iv_len;
338
339 compat->auth_src = ptr_to_compat(caop->auth_src);
340 compat->src = ptr_to_compat(caop->src);
341 compat->dst = ptr_to_compat(caop->dst);
342 compat->tag = ptr_to_compat(caop->tag);
343 compat->iv = ptr_to_compat(caop->iv);
344 }
345
compat_kcaop_from_user(struct kernel_crypt_auth_op * kcaop,struct fcrypt * fcr,void __user * arg)346 int compat_kcaop_from_user(struct kernel_crypt_auth_op *kcaop,
347 struct fcrypt *fcr, void __user *arg)
348 {
349 int ret;
350 struct compat_crypt_auth_op compat_auth_cop;
351
352 ret = copy_from_user(&compat_auth_cop, arg, sizeof(compat_auth_cop));
353 if (unlikely(ret)) {
354 derr(1, "Error in copying from userspace");
355 return -EFAULT;
356 }
357
358 compat_to_crypt_auth_op(&compat_auth_cop, &kcaop->caop);
359
360 return fill_kcaop_from_caop(kcaop, fcr);
361 }
362
compat_kcaop_to_user(struct kernel_crypt_auth_op * kcaop,struct fcrypt * fcr,void __user * arg)363 int compat_kcaop_to_user(struct kernel_crypt_auth_op *kcaop,
364 struct fcrypt *fcr, void __user *arg)
365 {
366 int ret;
367 struct compat_crypt_auth_op compat_auth_cop;
368
369 ret = fill_caop_from_kcaop(kcaop, fcr);
370 if (unlikely(ret)) {
371 derr(1, "fill_caop_from_kcaop");
372 return ret;
373 }
374
375 crypt_auth_op_to_compat(&kcaop->caop, &compat_auth_cop);
376
377 if (unlikely(copy_to_user(arg, &compat_auth_cop, sizeof(compat_auth_cop)))) {
378 derr(1, "Error in copying to userspace");
379 return -EFAULT;
380 }
381 return 0;
382 }
383
384 #endif /* CONFIG_COMPAT */
385
copy_tls_hash(struct scatterlist * dst_sg,int len,void * hash,int hash_len)386 static void copy_tls_hash(struct scatterlist *dst_sg, int len, void *hash, int hash_len)
387 {
388 scatterwalk_map_and_copy(hash, dst_sg, len, hash_len, 1);
389 }
390
read_tls_hash(struct scatterlist * dst_sg,int len,void * hash,int hash_len)391 static void read_tls_hash(struct scatterlist *dst_sg, int len, void *hash, int hash_len)
392 {
393 scatterwalk_map_and_copy(hash, dst_sg, len - hash_len, hash_len, 0);
394 }
395
396 #define TLS_MAX_PADDING_SIZE 256
pad_record(struct scatterlist * dst_sg,int len,int block_size)397 static int pad_record(struct scatterlist *dst_sg, int len, int block_size)
398 {
399 uint8_t pad[TLS_MAX_PADDING_SIZE];
400 int pad_size = block_size - (len % block_size);
401
402 memset(pad, pad_size - 1, pad_size);
403
404 scatterwalk_map_and_copy(pad, dst_sg, len, pad_size, 1);
405
406 return pad_size;
407 }
408
verify_tls_record_pad(struct scatterlist * dst_sg,int len,int block_size)409 static int verify_tls_record_pad(struct scatterlist *dst_sg, int len, int block_size)
410 {
411 uint8_t pad[TLS_MAX_PADDING_SIZE];
412 uint8_t pad_size;
413 int i;
414
415 scatterwalk_map_and_copy(&pad_size, dst_sg, len - 1, 1, 0);
416
417 if (pad_size + 1 > len) {
418 derr(1, "Pad size: %d", pad_size);
419 return -EBADMSG;
420 }
421
422 scatterwalk_map_and_copy(pad, dst_sg, len - pad_size - 1, pad_size + 1, 0);
423
424 for (i = 0; i < pad_size; i++)
425 if (pad[i] != pad_size) {
426 derr(1, "Pad size: %u, pad: %d", pad_size, pad[i]);
427 return -EBADMSG;
428 }
429
430 return pad_size + 1;
431 }
432
433 /* Authenticate and encrypt the TLS way (also perform padding).
434 * During decryption it verifies the pad and tag and returns -EBADMSG on error.
435 */
436 static int
tls_auth_n_crypt(struct csession * ses_ptr,struct kernel_crypt_auth_op * kcaop,struct scatterlist * auth_sg,uint32_t auth_len,struct scatterlist * dst_sg,uint32_t len)437 tls_auth_n_crypt(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop,
438 struct scatterlist *auth_sg, uint32_t auth_len,
439 struct scatterlist *dst_sg, uint32_t len)
440 {
441 int ret, fail = 0;
442 struct crypt_auth_op *caop = &kcaop->caop;
443 uint8_t vhash[AALG_MAX_RESULT_LEN];
444 uint8_t hash_output[AALG_MAX_RESULT_LEN];
445
446 /* TLS authenticates the plaintext except for the padding.
447 */
448 if (caop->op == COP_ENCRYPT) {
449 if (ses_ptr->hdata.init != 0) {
450 if (auth_len > 0) {
451 ret = cryptodev_hash_update(&ses_ptr->hdata,
452 auth_sg, auth_len);
453 if (unlikely(ret)) {
454 derr(0, "cryptodev_hash_update: %d", ret);
455 return ret;
456 }
457 }
458
459 if (len > 0) {
460 ret = cryptodev_hash_update(&ses_ptr->hdata,
461 dst_sg, len);
462 if (unlikely(ret)) {
463 derr(0, "cryptodev_hash_update: %d", ret);
464 return ret;
465 }
466 }
467
468 ret = cryptodev_hash_final(&ses_ptr->hdata, hash_output);
469 if (unlikely(ret)) {
470 derr(0, "cryptodev_hash_final: %d", ret);
471 return ret;
472 }
473
474 copy_tls_hash(dst_sg, len, hash_output, caop->tag_len);
475 len += caop->tag_len;
476 }
477
478 if (ses_ptr->cdata.init != 0) {
479 if (ses_ptr->cdata.blocksize > 1) {
480 ret = pad_record(dst_sg, len, ses_ptr->cdata.blocksize);
481 len += ret;
482 }
483
484 ret = cryptodev_cipher_encrypt(&ses_ptr->cdata,
485 dst_sg, dst_sg, len);
486 if (unlikely(ret)) {
487 derr(0, "cryptodev_cipher_encrypt: %d", ret);
488 return ret;
489 }
490 }
491 } else {
492 if (ses_ptr->cdata.init != 0) {
493 ret = cryptodev_cipher_decrypt(&ses_ptr->cdata,
494 dst_sg, dst_sg, len);
495
496 if (unlikely(ret)) {
497 derr(0, "cryptodev_cipher_decrypt: %d", ret);
498 return ret;
499 }
500
501 if (ses_ptr->cdata.blocksize > 1) {
502 ret = verify_tls_record_pad(dst_sg, len, ses_ptr->cdata.blocksize);
503 if (unlikely(ret < 0)) {
504 derr(2, "verify_record_pad: %d", ret);
505 fail = 1;
506 } else {
507 len -= ret;
508 }
509 }
510 }
511
512 if (ses_ptr->hdata.init != 0) {
513 if (unlikely(caop->tag_len > sizeof(vhash) || caop->tag_len > len)) {
514 derr(1, "Illegal tag len size");
515 return -EINVAL;
516 }
517
518 read_tls_hash(dst_sg, len, vhash, caop->tag_len);
519 len -= caop->tag_len;
520
521 if (auth_len > 0) {
522 ret = cryptodev_hash_update(&ses_ptr->hdata,
523 auth_sg, auth_len);
524 if (unlikely(ret)) {
525 derr(0, "cryptodev_hash_update: %d", ret);
526 return ret;
527 }
528 }
529
530 if (len > 0) {
531 ret = cryptodev_hash_update(&ses_ptr->hdata,
532 dst_sg, len);
533 if (unlikely(ret)) {
534 derr(0, "cryptodev_hash_update: %d", ret);
535 return ret;
536 }
537 }
538
539 ret = cryptodev_hash_final(&ses_ptr->hdata, hash_output);
540 if (unlikely(ret)) {
541 derr(0, "cryptodev_hash_final: %d", ret);
542 return ret;
543 }
544
545 if (memcmp(vhash, hash_output, caop->tag_len) != 0 || fail != 0) {
546 derr(2, "MAC verification failed (tag_len: %d)", caop->tag_len);
547 return -EBADMSG;
548 }
549 }
550 }
551 kcaop->dst_len = len;
552 return 0;
553 }
554
555 /* Authenticate and encrypt the SRTP way. During decryption
556 * it verifies the tag and returns -EBADMSG on error.
557 */
558 static int
srtp_auth_n_crypt(struct csession * ses_ptr,struct kernel_crypt_auth_op * kcaop,struct scatterlist * auth_sg,uint32_t auth_len,struct scatterlist * dst_sg,uint32_t len)559 srtp_auth_n_crypt(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop,
560 struct scatterlist *auth_sg, uint32_t auth_len,
561 struct scatterlist *dst_sg, uint32_t len)
562 {
563 int ret, fail = 0;
564 struct crypt_auth_op *caop = &kcaop->caop;
565 uint8_t vhash[AALG_MAX_RESULT_LEN];
566 uint8_t hash_output[AALG_MAX_RESULT_LEN];
567
568 /* SRTP authenticates the encrypted data.
569 */
570 if (caop->op == COP_ENCRYPT) {
571 if (ses_ptr->cdata.init != 0) {
572 ret = cryptodev_cipher_encrypt(&ses_ptr->cdata,
573 dst_sg, dst_sg, len);
574 if (unlikely(ret)) {
575 derr(0, "cryptodev_cipher_encrypt: %d", ret);
576 return ret;
577 }
578 }
579
580 if (ses_ptr->hdata.init != 0) {
581 if (auth_len > 0) {
582 ret = cryptodev_hash_update(&ses_ptr->hdata,
583 auth_sg, auth_len);
584 if (unlikely(ret)) {
585 derr(0, "cryptodev_hash_update: %d", ret);
586 return ret;
587 }
588 }
589
590 ret = cryptodev_hash_final(&ses_ptr->hdata, hash_output);
591 if (unlikely(ret)) {
592 derr(0, "cryptodev_hash_final: %d", ret);
593 return ret;
594 }
595
596 if (unlikely(copy_to_user(caop->tag, hash_output, caop->tag_len)))
597 return -EFAULT;
598 }
599
600 } else {
601 if (ses_ptr->hdata.init != 0) {
602 if (unlikely(caop->tag_len > sizeof(vhash) || caop->tag_len > len)) {
603 derr(1, "Illegal tag len size");
604 return -EINVAL;
605 }
606
607 if (unlikely(copy_from_user(vhash, caop->tag, caop->tag_len)))
608 return -EFAULT;
609
610 ret = cryptodev_hash_update(&ses_ptr->hdata,
611 auth_sg, auth_len);
612 if (unlikely(ret)) {
613 derr(0, "cryptodev_hash_update: %d", ret);
614 return ret;
615 }
616
617 ret = cryptodev_hash_final(&ses_ptr->hdata, hash_output);
618 if (unlikely(ret)) {
619 derr(0, "cryptodev_hash_final: %d", ret);
620 return ret;
621 }
622
623 if (memcmp(vhash, hash_output, caop->tag_len) != 0 || fail != 0) {
624 derr(2, "MAC verification failed");
625 return -EBADMSG;
626 }
627 }
628
629 if (ses_ptr->cdata.init != 0) {
630 ret = cryptodev_cipher_decrypt(&ses_ptr->cdata,
631 dst_sg, dst_sg, len);
632
633 if (unlikely(ret)) {
634 derr(0, "cryptodev_cipher_decrypt: %d", ret);
635 return ret;
636 }
637 }
638
639 }
640 kcaop->dst_len = len;
641 return 0;
642 }
643
rk_auth_n_crypt(struct csession * ses_ptr,struct kernel_crypt_auth_op * kcaop,struct scatterlist * auth_sg,uint32_t auth_len,struct scatterlist * src_sg,struct scatterlist * dst_sg,uint32_t len)644 static int rk_auth_n_crypt(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop,
645 struct scatterlist *auth_sg, uint32_t auth_len,
646 struct scatterlist *src_sg,
647 struct scatterlist *dst_sg, uint32_t len)
648 {
649 int ret;
650 struct crypt_auth_op *caop = &kcaop->caop;
651 int max_tag_len;
652
653 max_tag_len = cryptodev_cipher_get_tag_size(&ses_ptr->cdata);
654 if (unlikely(caop->tag_len > max_tag_len)) {
655 derr(0, "Illegal tag length: %d", caop->tag_len);
656 return -EINVAL;
657 }
658
659 if (caop->tag_len)
660 cryptodev_cipher_set_tag_size(&ses_ptr->cdata, caop->tag_len);
661 else
662 caop->tag_len = max_tag_len;
663
664 cryptodev_cipher_auth(&ses_ptr->cdata, auth_sg, auth_len);
665
666 if (caop->op == COP_ENCRYPT) {
667 ret = cryptodev_cipher_encrypt(&ses_ptr->cdata, src_sg, dst_sg, len);
668 if (unlikely(ret)) {
669 derr(0, "cryptodev_cipher_encrypt: %d", ret);
670 return ret;
671 }
672 } else {
673 ret = cryptodev_cipher_decrypt(&ses_ptr->cdata, src_sg, dst_sg, len);
674 if (unlikely(ret)) {
675 derr(0, "cryptodev_cipher_decrypt: %d", ret);
676 return ret;
677 }
678 }
679
680 return 0;
681 }
682
683 /* Typical AEAD (i.e. GCM) encryption/decryption.
684 * During decryption the tag is verified.
685 */
686 static int
auth_n_crypt(struct csession * ses_ptr,struct kernel_crypt_auth_op * kcaop,struct scatterlist * auth_sg,uint32_t auth_len,struct scatterlist * src_sg,struct scatterlist * dst_sg,uint32_t len)687 auth_n_crypt(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop,
688 struct scatterlist *auth_sg, uint32_t auth_len,
689 struct scatterlist *src_sg,
690 struct scatterlist *dst_sg, uint32_t len)
691 {
692 int ret;
693 struct crypt_auth_op *caop = &kcaop->caop;
694 int max_tag_len;
695
696 max_tag_len = cryptodev_cipher_get_tag_size(&ses_ptr->cdata);
697 if (unlikely(caop->tag_len > max_tag_len)) {
698 derr(0, "Illegal tag length: %d", caop->tag_len);
699 return -EINVAL;
700 }
701
702 if (caop->tag_len)
703 cryptodev_cipher_set_tag_size(&ses_ptr->cdata, caop->tag_len);
704 else
705 caop->tag_len = max_tag_len;
706
707 cryptodev_cipher_auth(&ses_ptr->cdata, auth_sg, auth_len);
708
709 if (caop->op == COP_ENCRYPT) {
710 ret = cryptodev_cipher_encrypt(&ses_ptr->cdata,
711 src_sg, dst_sg, len);
712 if (unlikely(ret)) {
713 derr(0, "cryptodev_cipher_encrypt: %d", ret);
714 return ret;
715 }
716 kcaop->dst_len = len + caop->tag_len;
717 caop->tag = caop->dst + len;
718 } else {
719 ret = cryptodev_cipher_decrypt(&ses_ptr->cdata,
720 src_sg, dst_sg, len);
721
722 if (unlikely(ret)) {
723 derr(0, "cryptodev_cipher_decrypt: %d", ret);
724 return ret;
725 }
726 kcaop->dst_len = len - caop->tag_len;
727 caop->tag = caop->dst + len - caop->tag_len;
728 }
729
730 return 0;
731 }
732
crypto_auth_zc_srtp(struct csession * ses_ptr,struct kernel_crypt_auth_op * kcaop)733 static int crypto_auth_zc_srtp(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop)
734 {
735 struct scatterlist *dst_sg, *auth_sg;
736 struct crypt_auth_op *caop = &kcaop->caop;
737 int ret;
738
739 if (unlikely(ses_ptr->cdata.init != 0 &&
740 (ses_ptr->cdata.stream == 0 || ses_ptr->cdata.aead != 0))) {
741 derr(0, "Only stream modes are allowed in SRTP mode (but not AEAD)");
742 return -EINVAL;
743 }
744
745 ret = get_userbuf_srtp(ses_ptr, kcaop, &auth_sg, &dst_sg);
746 if (unlikely(ret)) {
747 derr(1, "get_userbuf_srtp(): Error getting user pages.");
748 return ret;
749 }
750
751 ret = srtp_auth_n_crypt(ses_ptr, kcaop, auth_sg, caop->auth_len,
752 dst_sg, caop->len);
753
754 cryptodev_release_user_pages(ses_ptr);
755
756 return ret;
757 }
758
crypto_auth_zc_tls(struct csession * ses_ptr,struct kernel_crypt_auth_op * kcaop)759 static int crypto_auth_zc_tls(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop)
760 {
761 struct crypt_auth_op *caop = &kcaop->caop;
762 struct scatterlist *dst_sg, *auth_sg;
763 unsigned char *auth_buf = NULL;
764 struct scatterlist tmp;
765 int ret;
766
767 if (unlikely(caop->auth_len > PAGE_SIZE)) {
768 derr(1, "auth data len is excessive.");
769 return -EINVAL;
770 }
771
772 auth_buf = (char *)__get_free_page(GFP_KERNEL);
773 if (unlikely(!auth_buf)) {
774 derr(1, "unable to get a free page.");
775 return -ENOMEM;
776 }
777
778 if (caop->auth_src && caop->auth_len > 0) {
779 if (unlikely(copy_from_user(auth_buf, caop->auth_src, caop->auth_len))) {
780 derr(1, "unable to copy auth data from userspace.");
781 ret = -EFAULT;
782 goto free_auth_buf;
783 }
784
785 sg_init_one(&tmp, auth_buf, caop->auth_len);
786 auth_sg = &tmp;
787 } else {
788 auth_sg = NULL;
789 }
790
791 ret = get_userbuf_tls(ses_ptr, kcaop, &dst_sg);
792 if (unlikely(ret)) {
793 derr(1, "get_userbuf_tls(): Error getting user pages.");
794 goto free_auth_buf;
795 }
796
797 ret = tls_auth_n_crypt(ses_ptr, kcaop, auth_sg, caop->auth_len,
798 dst_sg, caop->len);
799 cryptodev_release_user_pages(ses_ptr);
800
801 free_auth_buf:
802 free_page((unsigned long)auth_buf);
803 return ret;
804 }
805
crypto_auth_zc_aead(struct csession * ses_ptr,struct kernel_crypt_auth_op * kcaop)806 static int crypto_auth_zc_aead(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop)
807 {
808 struct scatterlist *dst_sg;
809 struct scatterlist *src_sg;
810 struct crypt_auth_op *caop = &kcaop->caop;
811 unsigned char *auth_buf = NULL;
812 int ret;
813
814 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0))
815 struct scatterlist tmp;
816 struct scatterlist *auth_sg;
817 #else
818 struct scatterlist auth1[2];
819 struct scatterlist auth2[2];
820 #endif
821
822 if (unlikely(ses_ptr->cdata.init == 0 ||
823 (ses_ptr->cdata.stream == 0 && ses_ptr->cdata.aead == 0))) {
824 derr(0, "Only stream and AEAD ciphers are allowed for authenc");
825 return -EINVAL;
826 }
827
828 if (unlikely(caop->auth_len > PAGE_SIZE)) {
829 derr(1, "auth data len is excessive.");
830 return -EINVAL;
831 }
832
833 auth_buf = (char *)__get_free_page(GFP_KERNEL);
834 if (unlikely(!auth_buf)) {
835 derr(1, "unable to get a free page.");
836 return -ENOMEM;
837 }
838
839 ret = cryptodev_get_userbuf(ses_ptr, caop->src, caop->len, caop->dst, kcaop->dst_len,
840 kcaop->task, kcaop->mm, &src_sg, &dst_sg);
841 if (unlikely(ret)) {
842 derr(1, "get_userbuf(): Error getting user pages.");
843 goto free_auth_buf;
844 }
845
846 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0))
847 if (caop->auth_src && caop->auth_len > 0) {
848 if (unlikely(copy_from_user(auth_buf, caop->auth_src, caop->auth_len))) {
849 derr(1, "unable to copy auth data from userspace.");
850 ret = -EFAULT;
851 goto free_pages;
852 }
853
854 sg_init_one(&tmp, auth_buf, caop->auth_len);
855 auth_sg = &tmp;
856 } else {
857 auth_sg = NULL;
858 }
859
860 ret = auth_n_crypt(ses_ptr, kcaop, auth_sg, caop->auth_len,
861 src_sg, dst_sg, caop->len);
862 #else
863 if (caop->auth_src && caop->auth_len > 0) {
864 if (unlikely(copy_from_user(auth_buf, caop->auth_src, caop->auth_len))) {
865 derr(1, "unable to copy auth data from userspace.");
866 ret = -EFAULT;
867 goto free_pages;
868 }
869
870 sg_init_table(auth1, 2);
871 sg_set_buf(auth1, auth_buf, caop->auth_len);
872 sg_chain(auth1, 2, src_sg);
873
874 if (src_sg == dst_sg) {
875 src_sg = auth1;
876 dst_sg = auth1;
877 } else {
878 sg_init_table(auth2, 2);
879 sg_set_buf(auth2, auth_buf, caop->auth_len);
880 sg_chain(auth2, 2, dst_sg);
881 src_sg = auth1;
882 dst_sg = auth2;
883 }
884 }
885
886 ret = auth_n_crypt(ses_ptr, kcaop, NULL, caop->auth_len,
887 src_sg, dst_sg, caop->len);
888 #endif
889
890 free_pages:
891 cryptodev_release_user_pages(ses_ptr);
892
893 free_auth_buf:
894 free_page((unsigned long)auth_buf);
895
896 return ret;
897 }
898
899 /* Chain two sglists together. It will keep the last nent of priv
900 * and invalidate the first nent of sgl
901 */
sg_copy_chain(struct scatterlist * prv,unsigned int prv_nents,struct scatterlist * sgl)902 static struct scatterlist *sg_copy_chain(struct scatterlist *prv,
903 unsigned int prv_nents,
904 struct scatterlist *sgl)
905 {
906 struct scatterlist *sg_tmp = sg_last(prv, prv_nents);
907
908 sg_set_page(sgl, sg_page(sg_tmp), sg_tmp->length, sg_tmp->offset);
909
910 if (prv_nents > 1) {
911 sg_chain(prv, prv_nents, sgl);
912 return prv;
913 } else {
914 return sgl;
915 }
916 }
917
crypto_auth_zc_rk(struct csession * ses_ptr,struct kernel_crypt_auth_op * kcaop)918 static int crypto_auth_zc_rk(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop)
919 {
920 struct scatterlist *dst;
921 struct scatterlist *src;
922 struct scatterlist *dst_sg;
923 struct scatterlist *src_sg;
924 struct crypt_auth_op *caop = &kcaop->caop;
925 unsigned char *auth_buf = NULL, *tag_buf = NULL;
926 struct scatterlist auth_src[2], auth_dst[2], tag[3];
927 int ret;
928
929 if (unlikely(ses_ptr->cdata.init == 0 ||
930 (ses_ptr->cdata.stream == 0 && ses_ptr->cdata.aead == 0))) {
931 derr(0, "Only stream and AEAD ciphers are allowed for authenc");
932 return -EINVAL;
933 }
934
935 if (unlikely(caop->auth_len > PAGE_SIZE)) {
936 derr(1, "auth data len is excessive.");
937 return -EINVAL;
938 }
939
940 ret = cryptodev_get_userbuf(ses_ptr, caop->src, caop->len,
941 caop->dst, kcaop->dst_len,
942 kcaop->task, kcaop->mm, &src_sg, &dst_sg);
943 if (unlikely(ret)) {
944 derr(1, "get_userbuf(): Error getting user pages.");
945 ret = -EFAULT;
946 goto exit;
947 }
948
949 dst = dst_sg;
950 src = src_sg;
951
952 /* chain tag */
953 if (caop->tag && caop->tag_len > 0) {
954 tag_buf = kcalloc(caop->tag_len, sizeof(*tag_buf), GFP_KERNEL);
955 if (unlikely(!tag_buf)) {
956 derr(1, "unable to kcalloc %d.", caop->tag_len);
957 ret = -EFAULT;
958 goto free_pages;
959 }
960
961 if (unlikely(copy_from_user(tag_buf, caop->tag, caop->tag_len))) {
962 derr(1, "unable to copy tag data from userspace.");
963 ret = -EFAULT;
964 goto free_pages;
965 }
966
967 sg_init_table(tag, ARRAY_SIZE(tag));
968 sg_set_buf(&tag[1], tag_buf, caop->tag_len);
969
970 /* Since the sg_chain() requires the last sg in the list is empty and
971 * used for link information, we can not directly link src/dst_sg to tags
972 */
973 if (caop->op == COP_ENCRYPT)
974 dst = sg_copy_chain(dst_sg, sg_nents(dst_sg), tag);
975 else
976 src = sg_copy_chain(src_sg, sg_nents(src_sg), tag);
977 }
978
979 /* chain auth */
980 auth_buf = (char *)__get_free_page(GFP_KERNEL);
981 if (unlikely(!auth_buf)) {
982 derr(1, "unable to get a free page.");
983 ret = -EFAULT;
984 goto free_pages;
985 }
986
987 if (caop->auth_src && caop->auth_len > 0) {
988 if (unlikely(copy_from_user(auth_buf, caop->auth_src, caop->auth_len))) {
989 derr(1, "unable to copy auth data from userspace.");
990 ret = -EFAULT;
991 goto free_pages;
992 }
993
994 sg_init_table(auth_src, ARRAY_SIZE(auth_src));
995 sg_set_buf(auth_src, auth_buf, caop->auth_len);
996 sg_init_table(auth_dst, ARRAY_SIZE(auth_dst));
997 sg_set_buf(auth_dst, auth_buf, caop->auth_len);
998
999 sg_chain(auth_src, 2, src);
1000 sg_chain(auth_dst, 2, dst);
1001 src = auth_src;
1002 dst = auth_dst;
1003 }
1004
1005 if (caop->op == COP_ENCRYPT)
1006 ret = rk_auth_n_crypt(ses_ptr, kcaop, NULL, caop->auth_len,
1007 src, dst, caop->len);
1008 else
1009 ret = rk_auth_n_crypt(ses_ptr, kcaop, NULL, caop->auth_len,
1010 src, dst, caop->len + caop->tag_len);
1011
1012 if (!ret && caop->op == COP_ENCRYPT) {
1013 if (unlikely(copy_to_user(kcaop->caop.tag, tag_buf, caop->tag_len))) {
1014 derr(1, "Error in copying to userspace");
1015 ret = -EFAULT;
1016 goto free_pages;
1017 }
1018 }
1019
1020 free_pages:
1021 cryptodev_release_user_pages(ses_ptr);
1022
1023 exit:
1024 if (auth_buf)
1025 free_page((unsigned long)auth_buf);
1026
1027 kfree(tag_buf);
1028
1029 return ret;
1030 }
1031
1032 static int
__crypto_auth_run_zc(struct csession * ses_ptr,struct kernel_crypt_auth_op * kcaop)1033 __crypto_auth_run_zc(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop)
1034 {
1035 struct crypt_auth_op *caop = &kcaop->caop;
1036 int ret;
1037
1038 if (caop->flags & COP_FLAG_AEAD_SRTP_TYPE) {
1039 ret = crypto_auth_zc_srtp(ses_ptr, kcaop);
1040 } else if (caop->flags & COP_FLAG_AEAD_TLS_TYPE &&
1041 ses_ptr->cdata.aead == 0) {
1042 ret = crypto_auth_zc_tls(ses_ptr, kcaop);
1043 } else if (caop->flags & COP_FLAG_AEAD_RK_TYPE &&
1044 ses_ptr->cdata.aead) {
1045 ret = crypto_auth_zc_rk(ses_ptr, kcaop);
1046 } else if (ses_ptr->cdata.aead) {
1047 ret = crypto_auth_zc_aead(ses_ptr, kcaop);
1048 } else {
1049 ret = -EINVAL;
1050 }
1051
1052 return ret;
1053 }
1054
1055
crypto_auth_run(struct fcrypt * fcr,struct kernel_crypt_auth_op * kcaop)1056 int crypto_auth_run(struct fcrypt *fcr, struct kernel_crypt_auth_op *kcaop)
1057 {
1058 struct csession *ses_ptr;
1059 struct crypt_auth_op *caop = &kcaop->caop;
1060 int ret;
1061
1062 if (unlikely(caop->op != COP_ENCRYPT && caop->op != COP_DECRYPT)) {
1063 ddebug(1, "invalid operation op=%u", caop->op);
1064 return -EINVAL;
1065 }
1066
1067 /* this also enters ses_ptr->sem */
1068 ses_ptr = crypto_get_session_by_sid(fcr, caop->ses);
1069 if (unlikely(!ses_ptr)) {
1070 derr(1, "invalid session ID=0x%08X", caop->ses);
1071 return -EINVAL;
1072 }
1073
1074 if (unlikely(ses_ptr->cdata.init == 0)) {
1075 derr(1, "cipher context not initialized");
1076 ret = -EINVAL;
1077 goto out_unlock;
1078 }
1079
1080 /* If we have a hash/mac handle reset its state */
1081 if (ses_ptr->hdata.init != 0) {
1082 ret = cryptodev_hash_reset(&ses_ptr->hdata);
1083 if (unlikely(ret)) {
1084 derr(1, "error in cryptodev_hash_reset()");
1085 goto out_unlock;
1086 }
1087 }
1088
1089 cryptodev_cipher_set_iv(&ses_ptr->cdata, kcaop->iv,
1090 min(ses_ptr->cdata.ivsize, kcaop->ivlen));
1091
1092 ret = __crypto_auth_run_zc(ses_ptr, kcaop);
1093 if (unlikely(ret)) {
1094 derr(1, "error in __crypto_auth_run_zc()");
1095 goto out_unlock;
1096 }
1097
1098 ret = 0;
1099
1100 cryptodev_cipher_get_iv(&ses_ptr->cdata, kcaop->iv,
1101 min(ses_ptr->cdata.ivsize, kcaop->ivlen));
1102
1103 out_unlock:
1104 crypto_put_session(ses_ptr);
1105 return ret;
1106 }
1107