xref: /OK3568_Linux_fs/kernel/drivers/crypto/rockchip/cryptodev_linux/authenc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Driver for /dev/crypto device (aka CryptoDev)
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (c) 2011, 2012 OpenSSL Software Foundation, Inc.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Author: Nikos Mavrogiannopoulos
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * This file is part of linux cryptodev.
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * This program is free software; you can redistribute it and/or
11*4882a593Smuzhiyun  * modify it under the terms of the GNU General Public License
12*4882a593Smuzhiyun  * as published by the Free Software Foundation; either version 2
13*4882a593Smuzhiyun  * of the License, or (at your option) any later version.
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * This program is distributed in the hope that it will be useful,
16*4882a593Smuzhiyun  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17*4882a593Smuzhiyun  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18*4882a593Smuzhiyun  * GNU General Public License for more details.
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  * You should have received a copy of the GNU General Public License
21*4882a593Smuzhiyun  * along with this program; if not, write to the Free Software
22*4882a593Smuzhiyun  * Foundation, Inc.,
23*4882a593Smuzhiyun  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
24*4882a593Smuzhiyun  */
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun /*
27*4882a593Smuzhiyun  * This file handles the AEAD part of /dev/crypto.
28*4882a593Smuzhiyun  *
29*4882a593Smuzhiyun  */
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #include <crypto/hash.h>
32*4882a593Smuzhiyun #include <linux/crypto.h>
33*4882a593Smuzhiyun #include <linux/mm.h>
34*4882a593Smuzhiyun #include <linux/highmem.h>
35*4882a593Smuzhiyun #include <linux/ioctl.h>
36*4882a593Smuzhiyun #include <linux/random.h>
37*4882a593Smuzhiyun #include <linux/syscalls.h>
38*4882a593Smuzhiyun #include <linux/pagemap.h>
39*4882a593Smuzhiyun #include <linux/poll.h>
40*4882a593Smuzhiyun #include <linux/uaccess.h>
41*4882a593Smuzhiyun #include <crypto/scatterwalk.h>
42*4882a593Smuzhiyun #include <linux/scatterlist.h>
43*4882a593Smuzhiyun #include "cryptodev.h"
44*4882a593Smuzhiyun #include "zc.h"
45*4882a593Smuzhiyun #include "util.h"
46*4882a593Smuzhiyun #include "cryptlib.h"
47*4882a593Smuzhiyun #include "version.h"
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun /* make caop->dst available in scatterlist.
51*4882a593Smuzhiyun  * (caop->src is assumed to be equal to caop->dst)
52*4882a593Smuzhiyun  */
get_userbuf_tls(struct csession * ses,struct kernel_crypt_auth_op * kcaop,struct scatterlist ** dst_sg)53*4882a593Smuzhiyun static int get_userbuf_tls(struct csession *ses, struct kernel_crypt_auth_op *kcaop,
54*4882a593Smuzhiyun 			struct scatterlist **dst_sg)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	int pagecount = 0;
57*4882a593Smuzhiyun 	struct crypt_auth_op *caop = &kcaop->caop;
58*4882a593Smuzhiyun 	int rc;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	if (caop->dst == NULL)
61*4882a593Smuzhiyun 		return -EINVAL;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	if (ses->alignmask) {
64*4882a593Smuzhiyun 		if (!IS_ALIGNED((unsigned long)caop->dst, ses->alignmask + 1))
65*4882a593Smuzhiyun 			dwarning(2, "careful - source address %p is not %d byte aligned",
66*4882a593Smuzhiyun 					caop->dst, ses->alignmask + 1);
67*4882a593Smuzhiyun 	}
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	if (kcaop->dst_len == 0) {
70*4882a593Smuzhiyun 		dwarning(1, "Destination length cannot be zero");
71*4882a593Smuzhiyun 		return -EINVAL;
72*4882a593Smuzhiyun 	}
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	pagecount = PAGECOUNT(caop->dst, kcaop->dst_len);
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	ses->used_pages = pagecount;
77*4882a593Smuzhiyun 	ses->readonly_pages = 0;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	rc = cryptodev_adjust_sg_array(ses, pagecount);
80*4882a593Smuzhiyun 	if (rc)
81*4882a593Smuzhiyun 		return rc;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	rc = __cryptodev_get_userbuf(caop->dst, kcaop->dst_len, 1, pagecount,
84*4882a593Smuzhiyun 	                   ses->pages, ses->sg, kcaop->task, kcaop->mm);
85*4882a593Smuzhiyun 	if (unlikely(rc)) {
86*4882a593Smuzhiyun 		derr(1, "failed to get user pages for data input");
87*4882a593Smuzhiyun 		return -EINVAL;
88*4882a593Smuzhiyun 	}
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	(*dst_sg) = ses->sg;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	return 0;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun #define MAX_SRTP_AUTH_DATA_DIFF 256
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun /* Makes caop->auth_src available as scatterlist.
99*4882a593Smuzhiyun  * It also provides a pointer to caop->dst, which however,
100*4882a593Smuzhiyun  * is assumed to be within the caop->auth_src buffer. If not
101*4882a593Smuzhiyun  * (if their difference exceeds MAX_SRTP_AUTH_DATA_DIFF) it
102*4882a593Smuzhiyun  * returns error.
103*4882a593Smuzhiyun  */
get_userbuf_srtp(struct csession * ses,struct kernel_crypt_auth_op * kcaop,struct scatterlist ** auth_sg,struct scatterlist ** dst_sg)104*4882a593Smuzhiyun static int get_userbuf_srtp(struct csession *ses, struct kernel_crypt_auth_op *kcaop,
105*4882a593Smuzhiyun 			struct scatterlist **auth_sg, struct scatterlist **dst_sg)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	int pagecount, diff;
108*4882a593Smuzhiyun 	int auth_pagecount = 0;
109*4882a593Smuzhiyun 	struct crypt_auth_op *caop = &kcaop->caop;
110*4882a593Smuzhiyun 	int rc;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	if (caop->dst == NULL && caop->auth_src == NULL) {
113*4882a593Smuzhiyun 		derr(1, "dst and auth_src cannot be both null");
114*4882a593Smuzhiyun 		return -EINVAL;
115*4882a593Smuzhiyun 	}
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	if (ses->alignmask) {
118*4882a593Smuzhiyun 		if (!IS_ALIGNED((unsigned long)caop->dst, ses->alignmask + 1))
119*4882a593Smuzhiyun 			dwarning(2, "careful - source address %p is not %d byte aligned",
120*4882a593Smuzhiyun 					caop->dst, ses->alignmask + 1);
121*4882a593Smuzhiyun 		if (!IS_ALIGNED((unsigned long)caop->auth_src, ses->alignmask + 1))
122*4882a593Smuzhiyun 			dwarning(2, "careful - source address %p is not %d byte aligned",
123*4882a593Smuzhiyun 					caop->auth_src, ses->alignmask + 1);
124*4882a593Smuzhiyun 	}
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	if (unlikely(kcaop->dst_len == 0 || caop->auth_len == 0)) {
127*4882a593Smuzhiyun 		dwarning(1, "Destination length cannot be zero");
128*4882a593Smuzhiyun 		return -EINVAL;
129*4882a593Smuzhiyun 	}
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	/* Note that in SRTP auth data overlap with data to be encrypted (dst)
132*4882a593Smuzhiyun          */
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	auth_pagecount = PAGECOUNT(caop->auth_src, caop->auth_len);
135*4882a593Smuzhiyun 	diff = (int)(caop->src - caop->auth_src);
136*4882a593Smuzhiyun 	if (diff > MAX_SRTP_AUTH_DATA_DIFF || diff < 0) {
137*4882a593Smuzhiyun 		dwarning(1, "auth_src must overlap with src (diff: %d).", diff);
138*4882a593Smuzhiyun 		return -EINVAL;
139*4882a593Smuzhiyun 	}
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	pagecount = auth_pagecount;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	rc = cryptodev_adjust_sg_array(ses, pagecount*2); /* double pages to have pages for dst(=auth_src) */
144*4882a593Smuzhiyun 	if (rc) {
145*4882a593Smuzhiyun 		derr(1, "cannot adjust sg array");
146*4882a593Smuzhiyun 		return rc;
147*4882a593Smuzhiyun 	}
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	rc = __cryptodev_get_userbuf(caop->auth_src, caop->auth_len, 1, auth_pagecount,
150*4882a593Smuzhiyun 			   ses->pages, ses->sg, kcaop->task, kcaop->mm);
151*4882a593Smuzhiyun 	if (unlikely(rc)) {
152*4882a593Smuzhiyun 		derr(1, "failed to get user pages for data input");
153*4882a593Smuzhiyun 		return -EINVAL;
154*4882a593Smuzhiyun 	}
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	ses->used_pages = pagecount;
157*4882a593Smuzhiyun 	ses->readonly_pages = 0;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	(*auth_sg) = ses->sg;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	(*dst_sg) = ses->sg + auth_pagecount;
162*4882a593Smuzhiyun 	sg_init_table(*dst_sg, auth_pagecount);
163*4882a593Smuzhiyun 	cryptodev_sg_copy(ses->sg, (*dst_sg), caop->auth_len);
164*4882a593Smuzhiyun 	(*dst_sg) = cryptodev_sg_advance(*dst_sg, diff);
165*4882a593Smuzhiyun 	if (*dst_sg == NULL) {
166*4882a593Smuzhiyun 		cryptodev_release_user_pages(ses);
167*4882a593Smuzhiyun 		derr(1, "failed to get enough pages for auth data");
168*4882a593Smuzhiyun 		return -EINVAL;
169*4882a593Smuzhiyun 	}
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	return 0;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun /*
175*4882a593Smuzhiyun  * Return tag (digest) length for authenticated encryption
176*4882a593Smuzhiyun  * If the cipher and digest are separate, hdata.init is set - just return
177*4882a593Smuzhiyun  * digest length. Otherwise return digest length for aead ciphers
178*4882a593Smuzhiyun  */
cryptodev_get_tag_len(struct csession * ses_ptr)179*4882a593Smuzhiyun static int cryptodev_get_tag_len(struct csession *ses_ptr)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun 	if (ses_ptr->hdata.init)
182*4882a593Smuzhiyun 		return ses_ptr->hdata.digestsize;
183*4882a593Smuzhiyun 	else
184*4882a593Smuzhiyun 		return cryptodev_cipher_get_tag_size(&ses_ptr->cdata);
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun /*
188*4882a593Smuzhiyun  * Calculate destination buffer length for authenticated encryption. The
189*4882a593Smuzhiyun  * expectation is that user-space code allocates exactly the same space for
190*4882a593Smuzhiyun  * destination buffer before calling cryptodev. The result is cipher-dependent.
191*4882a593Smuzhiyun  */
cryptodev_get_dst_len(struct crypt_auth_op * caop,struct csession * ses_ptr)192*4882a593Smuzhiyun static int cryptodev_get_dst_len(struct crypt_auth_op *caop, struct csession *ses_ptr)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun 	int dst_len = caop->len;
195*4882a593Smuzhiyun 	if (caop->op == COP_DECRYPT)
196*4882a593Smuzhiyun 		return dst_len;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	if (caop->flags & COP_FLAG_AEAD_RK_TYPE)
199*4882a593Smuzhiyun 		return dst_len;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	dst_len += caop->tag_len;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	/* for TLS always add some padding so the total length is rounded to
204*4882a593Smuzhiyun 	 * cipher block size */
205*4882a593Smuzhiyun 	if (caop->flags & COP_FLAG_AEAD_TLS_TYPE) {
206*4882a593Smuzhiyun 		int bs = ses_ptr->cdata.blocksize;
207*4882a593Smuzhiyun 		dst_len += bs - (dst_len % bs);
208*4882a593Smuzhiyun 	}
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	return dst_len;
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun 
fill_kcaop_from_caop(struct kernel_crypt_auth_op * kcaop,struct fcrypt * fcr)213*4882a593Smuzhiyun static int fill_kcaop_from_caop(struct kernel_crypt_auth_op *kcaop, struct fcrypt *fcr)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun 	struct crypt_auth_op *caop = &kcaop->caop;
216*4882a593Smuzhiyun 	struct csession *ses_ptr;
217*4882a593Smuzhiyun 	int ret;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	/* this also enters ses_ptr->sem */
220*4882a593Smuzhiyun 	ses_ptr = crypto_get_session_by_sid(fcr, caop->ses);
221*4882a593Smuzhiyun 	if (unlikely(!ses_ptr)) {
222*4882a593Smuzhiyun 		derr(1, "invalid session ID=0x%08X", caop->ses);
223*4882a593Smuzhiyun 		return -EINVAL;
224*4882a593Smuzhiyun 	}
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	if (caop->flags & COP_FLAG_AEAD_TLS_TYPE || caop->flags & COP_FLAG_AEAD_SRTP_TYPE) {
227*4882a593Smuzhiyun 		if (caop->src != caop->dst) {
228*4882a593Smuzhiyun 			derr(1, "Non-inplace encryption and decryption is not efficient and not implemented");
229*4882a593Smuzhiyun 			ret = -EINVAL;
230*4882a593Smuzhiyun 			goto out_unlock;
231*4882a593Smuzhiyun 		}
232*4882a593Smuzhiyun 	}
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	if (caop->tag_len == 0)
235*4882a593Smuzhiyun 		caop->tag_len = cryptodev_get_tag_len(ses_ptr);
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	kcaop->ivlen = caop->iv ? ses_ptr->cdata.ivsize : 0;
238*4882a593Smuzhiyun 	kcaop->dst_len = cryptodev_get_dst_len(caop, ses_ptr);
239*4882a593Smuzhiyun 	kcaop->task = current;
240*4882a593Smuzhiyun 	kcaop->mm = current->mm;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	if (caop->iv) {
243*4882a593Smuzhiyun 		ret = copy_from_user(kcaop->iv, caop->iv, kcaop->ivlen);
244*4882a593Smuzhiyun 		if (unlikely(ret)) {
245*4882a593Smuzhiyun 			derr(1, "error copying IV (%d bytes), copy_from_user returned %d for address %p",
246*4882a593Smuzhiyun 					kcaop->ivlen, ret, caop->iv);
247*4882a593Smuzhiyun 			ret = -EFAULT;
248*4882a593Smuzhiyun 			goto out_unlock;
249*4882a593Smuzhiyun 		}
250*4882a593Smuzhiyun 	}
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	ret = 0;
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun out_unlock:
255*4882a593Smuzhiyun 	crypto_put_session(ses_ptr);
256*4882a593Smuzhiyun 	return ret;
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun 
fill_caop_from_kcaop(struct kernel_crypt_auth_op * kcaop,struct fcrypt * fcr)260*4882a593Smuzhiyun static int fill_caop_from_kcaop(struct kernel_crypt_auth_op *kcaop, struct fcrypt *fcr)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun 	int ret;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	kcaop->caop.len = kcaop->dst_len;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	if (kcaop->ivlen && kcaop->caop.flags & COP_FLAG_WRITE_IV) {
267*4882a593Smuzhiyun 		ret = copy_to_user(kcaop->caop.iv,
268*4882a593Smuzhiyun 				kcaop->iv, kcaop->ivlen);
269*4882a593Smuzhiyun 		if (unlikely(ret)) {
270*4882a593Smuzhiyun 			derr(1, "Error in copying to userspace");
271*4882a593Smuzhiyun 			return -EFAULT;
272*4882a593Smuzhiyun 		}
273*4882a593Smuzhiyun 	}
274*4882a593Smuzhiyun 	return 0;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 
cryptodev_kcaop_from_user(struct kernel_crypt_auth_op * kcaop,struct fcrypt * fcr,void __user * arg)278*4882a593Smuzhiyun int cryptodev_kcaop_from_user(struct kernel_crypt_auth_op *kcaop,
279*4882a593Smuzhiyun 			struct fcrypt *fcr, void __user *arg)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun 	if (unlikely(copy_from_user(&kcaop->caop, arg, sizeof(kcaop->caop)))) {
282*4882a593Smuzhiyun 		derr(1, "Error in copying from userspace");
283*4882a593Smuzhiyun 		return -EFAULT;
284*4882a593Smuzhiyun 	}
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	return fill_kcaop_from_caop(kcaop, fcr);
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun 
cryptodev_kcaop_to_user(struct kernel_crypt_auth_op * kcaop,struct fcrypt * fcr,void __user * arg)289*4882a593Smuzhiyun int cryptodev_kcaop_to_user(struct kernel_crypt_auth_op *kcaop,
290*4882a593Smuzhiyun 		struct fcrypt *fcr, void __user *arg)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun 	int ret;
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	ret = fill_caop_from_kcaop(kcaop, fcr);
295*4882a593Smuzhiyun 	if (unlikely(ret)) {
296*4882a593Smuzhiyun 		derr(1, "fill_caop_from_kcaop");
297*4882a593Smuzhiyun 		return ret;
298*4882a593Smuzhiyun 	}
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	if (unlikely(copy_to_user(arg, &kcaop->caop, sizeof(kcaop->caop)))) {
301*4882a593Smuzhiyun 		derr(1, "Error in copying to userspace");
302*4882a593Smuzhiyun 		return -EFAULT;
303*4882a593Smuzhiyun 	}
304*4882a593Smuzhiyun 	return 0;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun /* compatibility code for 32bit userlands */
308*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun static inline void
compat_to_crypt_auth_op(struct compat_crypt_auth_op * compat,struct crypt_auth_op * caop)311*4882a593Smuzhiyun compat_to_crypt_auth_op(struct compat_crypt_auth_op *compat, struct crypt_auth_op *caop)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun 	caop->ses      = compat->ses;
314*4882a593Smuzhiyun 	caop->op       = compat->op;
315*4882a593Smuzhiyun 	caop->flags    = compat->flags;
316*4882a593Smuzhiyun 	caop->len      = compat->len;
317*4882a593Smuzhiyun 	caop->auth_len = compat->auth_len;
318*4882a593Smuzhiyun 	caop->tag_len  = compat->tag_len;
319*4882a593Smuzhiyun 	caop->iv_len   = compat->iv_len;
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	caop->auth_src = compat_ptr(compat->auth_src);
322*4882a593Smuzhiyun 	caop->src      = compat_ptr(compat->src);
323*4882a593Smuzhiyun 	caop->dst      = compat_ptr(compat->dst);
324*4882a593Smuzhiyun 	caop->tag      = compat_ptr(compat->tag);
325*4882a593Smuzhiyun 	caop->iv       = compat_ptr(compat->iv);
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun static inline void
crypt_auth_op_to_compat(struct crypt_auth_op * caop,struct compat_crypt_auth_op * compat)329*4882a593Smuzhiyun crypt_auth_op_to_compat(struct crypt_auth_op *caop, struct compat_crypt_auth_op *compat)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun 	compat->ses      = caop->ses;
332*4882a593Smuzhiyun 	compat->op       = caop->op;
333*4882a593Smuzhiyun 	compat->flags    = caop->flags;
334*4882a593Smuzhiyun 	compat->len      = caop->len;
335*4882a593Smuzhiyun 	compat->auth_len = caop->auth_len;
336*4882a593Smuzhiyun 	compat->tag_len  = caop->tag_len;
337*4882a593Smuzhiyun 	compat->iv_len   = caop->iv_len;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	compat->auth_src = ptr_to_compat(caop->auth_src);
340*4882a593Smuzhiyun 	compat->src      = ptr_to_compat(caop->src);
341*4882a593Smuzhiyun 	compat->dst      = ptr_to_compat(caop->dst);
342*4882a593Smuzhiyun 	compat->tag      = ptr_to_compat(caop->tag);
343*4882a593Smuzhiyun 	compat->iv       = ptr_to_compat(caop->iv);
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun 
compat_kcaop_from_user(struct kernel_crypt_auth_op * kcaop,struct fcrypt * fcr,void __user * arg)346*4882a593Smuzhiyun int compat_kcaop_from_user(struct kernel_crypt_auth_op *kcaop,
347*4882a593Smuzhiyun 			   struct fcrypt *fcr, void __user *arg)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun 	int ret;
350*4882a593Smuzhiyun 	struct compat_crypt_auth_op compat_auth_cop;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	ret = copy_from_user(&compat_auth_cop, arg, sizeof(compat_auth_cop));
353*4882a593Smuzhiyun 	if (unlikely(ret)) {
354*4882a593Smuzhiyun 		derr(1, "Error in copying from userspace");
355*4882a593Smuzhiyun 		return -EFAULT;
356*4882a593Smuzhiyun 	}
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	compat_to_crypt_auth_op(&compat_auth_cop, &kcaop->caop);
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	return fill_kcaop_from_caop(kcaop, fcr);
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun 
compat_kcaop_to_user(struct kernel_crypt_auth_op * kcaop,struct fcrypt * fcr,void __user * arg)363*4882a593Smuzhiyun int compat_kcaop_to_user(struct kernel_crypt_auth_op *kcaop,
364*4882a593Smuzhiyun 			 struct fcrypt *fcr, void __user *arg)
365*4882a593Smuzhiyun {
366*4882a593Smuzhiyun 	int ret;
367*4882a593Smuzhiyun 	struct compat_crypt_auth_op compat_auth_cop;
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	ret = fill_caop_from_kcaop(kcaop, fcr);
370*4882a593Smuzhiyun 	if (unlikely(ret)) {
371*4882a593Smuzhiyun 		derr(1, "fill_caop_from_kcaop");
372*4882a593Smuzhiyun 		return ret;
373*4882a593Smuzhiyun 	}
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	crypt_auth_op_to_compat(&kcaop->caop, &compat_auth_cop);
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	if (unlikely(copy_to_user(arg, &compat_auth_cop, sizeof(compat_auth_cop)))) {
378*4882a593Smuzhiyun 		derr(1, "Error in copying to userspace");
379*4882a593Smuzhiyun 		return -EFAULT;
380*4882a593Smuzhiyun 	}
381*4882a593Smuzhiyun 	return 0;
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun #endif /* CONFIG_COMPAT */
385*4882a593Smuzhiyun 
copy_tls_hash(struct scatterlist * dst_sg,int len,void * hash,int hash_len)386*4882a593Smuzhiyun static void copy_tls_hash(struct scatterlist *dst_sg, int len, void *hash, int hash_len)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun 	scatterwalk_map_and_copy(hash, dst_sg, len, hash_len, 1);
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun 
read_tls_hash(struct scatterlist * dst_sg,int len,void * hash,int hash_len)391*4882a593Smuzhiyun static void read_tls_hash(struct scatterlist *dst_sg, int len, void *hash, int hash_len)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun 	scatterwalk_map_and_copy(hash, dst_sg, len - hash_len, hash_len, 0);
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun #define TLS_MAX_PADDING_SIZE 256
pad_record(struct scatterlist * dst_sg,int len,int block_size)397*4882a593Smuzhiyun static int pad_record(struct scatterlist *dst_sg, int len, int block_size)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun 	uint8_t pad[TLS_MAX_PADDING_SIZE];
400*4882a593Smuzhiyun 	int pad_size = block_size - (len % block_size);
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	memset(pad, pad_size - 1, pad_size);
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	scatterwalk_map_and_copy(pad, dst_sg, len, pad_size, 1);
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	return pad_size;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun 
verify_tls_record_pad(struct scatterlist * dst_sg,int len,int block_size)409*4882a593Smuzhiyun static int verify_tls_record_pad(struct scatterlist *dst_sg, int len, int block_size)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun 	uint8_t pad[TLS_MAX_PADDING_SIZE];
412*4882a593Smuzhiyun 	uint8_t pad_size;
413*4882a593Smuzhiyun 	int i;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	scatterwalk_map_and_copy(&pad_size, dst_sg, len - 1, 1, 0);
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	if (pad_size + 1 > len) {
418*4882a593Smuzhiyun 		derr(1, "Pad size: %d", pad_size);
419*4882a593Smuzhiyun 		return -EBADMSG;
420*4882a593Smuzhiyun 	}
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	scatterwalk_map_and_copy(pad, dst_sg, len - pad_size - 1, pad_size + 1, 0);
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	for (i = 0; i < pad_size; i++)
425*4882a593Smuzhiyun 		if (pad[i] != pad_size) {
426*4882a593Smuzhiyun 			derr(1, "Pad size: %u, pad: %d", pad_size, pad[i]);
427*4882a593Smuzhiyun 			return -EBADMSG;
428*4882a593Smuzhiyun 		}
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	return pad_size + 1;
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun /* Authenticate and encrypt the TLS way (also perform padding).
434*4882a593Smuzhiyun  * During decryption it verifies the pad and tag and returns -EBADMSG on error.
435*4882a593Smuzhiyun  */
436*4882a593Smuzhiyun static int
tls_auth_n_crypt(struct csession * ses_ptr,struct kernel_crypt_auth_op * kcaop,struct scatterlist * auth_sg,uint32_t auth_len,struct scatterlist * dst_sg,uint32_t len)437*4882a593Smuzhiyun tls_auth_n_crypt(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop,
438*4882a593Smuzhiyun 		 struct scatterlist *auth_sg, uint32_t auth_len,
439*4882a593Smuzhiyun 		 struct scatterlist *dst_sg, uint32_t len)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun 	int ret, fail = 0;
442*4882a593Smuzhiyun 	struct crypt_auth_op *caop = &kcaop->caop;
443*4882a593Smuzhiyun 	uint8_t vhash[AALG_MAX_RESULT_LEN];
444*4882a593Smuzhiyun 	uint8_t hash_output[AALG_MAX_RESULT_LEN];
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	/* TLS authenticates the plaintext except for the padding.
447*4882a593Smuzhiyun 	 */
448*4882a593Smuzhiyun 	if (caop->op == COP_ENCRYPT) {
449*4882a593Smuzhiyun 		if (ses_ptr->hdata.init != 0) {
450*4882a593Smuzhiyun 			if (auth_len > 0) {
451*4882a593Smuzhiyun 				ret = cryptodev_hash_update(&ses_ptr->hdata,
452*4882a593Smuzhiyun 								auth_sg, auth_len);
453*4882a593Smuzhiyun 				if (unlikely(ret)) {
454*4882a593Smuzhiyun 					derr(0, "cryptodev_hash_update: %d", ret);
455*4882a593Smuzhiyun 					return ret;
456*4882a593Smuzhiyun 				}
457*4882a593Smuzhiyun 			}
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 			if (len > 0) {
460*4882a593Smuzhiyun 				ret = cryptodev_hash_update(&ses_ptr->hdata,
461*4882a593Smuzhiyun 								dst_sg, len);
462*4882a593Smuzhiyun 				if (unlikely(ret)) {
463*4882a593Smuzhiyun 					derr(0, "cryptodev_hash_update: %d", ret);
464*4882a593Smuzhiyun 					return ret;
465*4882a593Smuzhiyun 				}
466*4882a593Smuzhiyun 			}
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 			ret = cryptodev_hash_final(&ses_ptr->hdata, hash_output);
469*4882a593Smuzhiyun 			if (unlikely(ret)) {
470*4882a593Smuzhiyun 				derr(0, "cryptodev_hash_final: %d", ret);
471*4882a593Smuzhiyun 				return ret;
472*4882a593Smuzhiyun 			}
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 			copy_tls_hash(dst_sg, len, hash_output, caop->tag_len);
475*4882a593Smuzhiyun 			len += caop->tag_len;
476*4882a593Smuzhiyun 		}
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 		if (ses_ptr->cdata.init != 0) {
479*4882a593Smuzhiyun 			if (ses_ptr->cdata.blocksize > 1) {
480*4882a593Smuzhiyun 				ret = pad_record(dst_sg, len, ses_ptr->cdata.blocksize);
481*4882a593Smuzhiyun 				len += ret;
482*4882a593Smuzhiyun 			}
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 			ret = cryptodev_cipher_encrypt(&ses_ptr->cdata,
485*4882a593Smuzhiyun 							dst_sg, dst_sg, len);
486*4882a593Smuzhiyun 			if (unlikely(ret)) {
487*4882a593Smuzhiyun 				derr(0, "cryptodev_cipher_encrypt: %d", ret);
488*4882a593Smuzhiyun 				return ret;
489*4882a593Smuzhiyun 			}
490*4882a593Smuzhiyun 		}
491*4882a593Smuzhiyun 	} else {
492*4882a593Smuzhiyun 		if (ses_ptr->cdata.init != 0) {
493*4882a593Smuzhiyun 			ret = cryptodev_cipher_decrypt(&ses_ptr->cdata,
494*4882a593Smuzhiyun 							dst_sg, dst_sg, len);
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 			if (unlikely(ret)) {
497*4882a593Smuzhiyun 				derr(0, "cryptodev_cipher_decrypt: %d", ret);
498*4882a593Smuzhiyun 				return ret;
499*4882a593Smuzhiyun 			}
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 			if (ses_ptr->cdata.blocksize > 1) {
502*4882a593Smuzhiyun 				ret = verify_tls_record_pad(dst_sg, len, ses_ptr->cdata.blocksize);
503*4882a593Smuzhiyun 				if (unlikely(ret < 0)) {
504*4882a593Smuzhiyun 					derr(2, "verify_record_pad: %d", ret);
505*4882a593Smuzhiyun 					fail = 1;
506*4882a593Smuzhiyun 				} else {
507*4882a593Smuzhiyun 					len -= ret;
508*4882a593Smuzhiyun 				}
509*4882a593Smuzhiyun 			}
510*4882a593Smuzhiyun 		}
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 		if (ses_ptr->hdata.init != 0) {
513*4882a593Smuzhiyun 			if (unlikely(caop->tag_len > sizeof(vhash) || caop->tag_len > len)) {
514*4882a593Smuzhiyun 				derr(1, "Illegal tag len size");
515*4882a593Smuzhiyun 				return -EINVAL;
516*4882a593Smuzhiyun 			}
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 			read_tls_hash(dst_sg, len, vhash, caop->tag_len);
519*4882a593Smuzhiyun 			len -= caop->tag_len;
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 			if (auth_len > 0) {
522*4882a593Smuzhiyun 				ret = cryptodev_hash_update(&ses_ptr->hdata,
523*4882a593Smuzhiyun 								auth_sg, auth_len);
524*4882a593Smuzhiyun 				if (unlikely(ret)) {
525*4882a593Smuzhiyun 					derr(0, "cryptodev_hash_update: %d", ret);
526*4882a593Smuzhiyun 					return ret;
527*4882a593Smuzhiyun 				}
528*4882a593Smuzhiyun 			}
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 			if (len > 0) {
531*4882a593Smuzhiyun 				ret = cryptodev_hash_update(&ses_ptr->hdata,
532*4882a593Smuzhiyun 									dst_sg, len);
533*4882a593Smuzhiyun 				if (unlikely(ret)) {
534*4882a593Smuzhiyun 					derr(0, "cryptodev_hash_update: %d", ret);
535*4882a593Smuzhiyun 					return ret;
536*4882a593Smuzhiyun 				}
537*4882a593Smuzhiyun 			}
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 			ret = cryptodev_hash_final(&ses_ptr->hdata, hash_output);
540*4882a593Smuzhiyun 			if (unlikely(ret)) {
541*4882a593Smuzhiyun 				derr(0, "cryptodev_hash_final: %d", ret);
542*4882a593Smuzhiyun 				return ret;
543*4882a593Smuzhiyun 			}
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 			if (memcmp(vhash, hash_output, caop->tag_len) != 0 || fail != 0) {
546*4882a593Smuzhiyun 				derr(2, "MAC verification failed (tag_len: %d)", caop->tag_len);
547*4882a593Smuzhiyun 				return -EBADMSG;
548*4882a593Smuzhiyun 			}
549*4882a593Smuzhiyun 		}
550*4882a593Smuzhiyun 	}
551*4882a593Smuzhiyun 	kcaop->dst_len = len;
552*4882a593Smuzhiyun 	return 0;
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun /* Authenticate and encrypt the SRTP way. During decryption
556*4882a593Smuzhiyun  * it verifies the tag and returns -EBADMSG on error.
557*4882a593Smuzhiyun  */
558*4882a593Smuzhiyun static int
srtp_auth_n_crypt(struct csession * ses_ptr,struct kernel_crypt_auth_op * kcaop,struct scatterlist * auth_sg,uint32_t auth_len,struct scatterlist * dst_sg,uint32_t len)559*4882a593Smuzhiyun srtp_auth_n_crypt(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop,
560*4882a593Smuzhiyun 		  struct scatterlist *auth_sg, uint32_t auth_len,
561*4882a593Smuzhiyun 		  struct scatterlist *dst_sg, uint32_t len)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun 	int ret, fail = 0;
564*4882a593Smuzhiyun 	struct crypt_auth_op *caop = &kcaop->caop;
565*4882a593Smuzhiyun 	uint8_t vhash[AALG_MAX_RESULT_LEN];
566*4882a593Smuzhiyun 	uint8_t hash_output[AALG_MAX_RESULT_LEN];
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	/* SRTP authenticates the encrypted data.
569*4882a593Smuzhiyun 	 */
570*4882a593Smuzhiyun 	if (caop->op == COP_ENCRYPT) {
571*4882a593Smuzhiyun 		if (ses_ptr->cdata.init != 0) {
572*4882a593Smuzhiyun 			ret = cryptodev_cipher_encrypt(&ses_ptr->cdata,
573*4882a593Smuzhiyun 							dst_sg, dst_sg, len);
574*4882a593Smuzhiyun 			if (unlikely(ret)) {
575*4882a593Smuzhiyun 				derr(0, "cryptodev_cipher_encrypt: %d", ret);
576*4882a593Smuzhiyun 				return ret;
577*4882a593Smuzhiyun 			}
578*4882a593Smuzhiyun 		}
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 		if (ses_ptr->hdata.init != 0) {
581*4882a593Smuzhiyun 			if (auth_len > 0) {
582*4882a593Smuzhiyun 				ret = cryptodev_hash_update(&ses_ptr->hdata,
583*4882a593Smuzhiyun 								auth_sg, auth_len);
584*4882a593Smuzhiyun 				if (unlikely(ret)) {
585*4882a593Smuzhiyun 					derr(0, "cryptodev_hash_update: %d", ret);
586*4882a593Smuzhiyun 					return ret;
587*4882a593Smuzhiyun 				}
588*4882a593Smuzhiyun 			}
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 			ret = cryptodev_hash_final(&ses_ptr->hdata, hash_output);
591*4882a593Smuzhiyun 			if (unlikely(ret)) {
592*4882a593Smuzhiyun 				derr(0, "cryptodev_hash_final: %d", ret);
593*4882a593Smuzhiyun 				return ret;
594*4882a593Smuzhiyun 			}
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 			if (unlikely(copy_to_user(caop->tag, hash_output, caop->tag_len)))
597*4882a593Smuzhiyun 				return -EFAULT;
598*4882a593Smuzhiyun 		}
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 	} else {
601*4882a593Smuzhiyun 		if (ses_ptr->hdata.init != 0) {
602*4882a593Smuzhiyun 			if (unlikely(caop->tag_len > sizeof(vhash) || caop->tag_len > len)) {
603*4882a593Smuzhiyun 				derr(1, "Illegal tag len size");
604*4882a593Smuzhiyun 				return -EINVAL;
605*4882a593Smuzhiyun 			}
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 			if (unlikely(copy_from_user(vhash, caop->tag, caop->tag_len)))
608*4882a593Smuzhiyun 				return -EFAULT;
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 			ret = cryptodev_hash_update(&ses_ptr->hdata,
611*4882a593Smuzhiyun 							auth_sg, auth_len);
612*4882a593Smuzhiyun 			if (unlikely(ret)) {
613*4882a593Smuzhiyun 				derr(0, "cryptodev_hash_update: %d", ret);
614*4882a593Smuzhiyun 				return ret;
615*4882a593Smuzhiyun 			}
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 			ret = cryptodev_hash_final(&ses_ptr->hdata, hash_output);
618*4882a593Smuzhiyun 			if (unlikely(ret)) {
619*4882a593Smuzhiyun 				derr(0, "cryptodev_hash_final: %d", ret);
620*4882a593Smuzhiyun 				return ret;
621*4882a593Smuzhiyun 			}
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 			if (memcmp(vhash, hash_output, caop->tag_len) != 0 || fail != 0) {
624*4882a593Smuzhiyun 				derr(2, "MAC verification failed");
625*4882a593Smuzhiyun 				return -EBADMSG;
626*4882a593Smuzhiyun 			}
627*4882a593Smuzhiyun 		}
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 		if (ses_ptr->cdata.init != 0) {
630*4882a593Smuzhiyun 			ret = cryptodev_cipher_decrypt(&ses_ptr->cdata,
631*4882a593Smuzhiyun 							dst_sg, dst_sg, len);
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 			if (unlikely(ret)) {
634*4882a593Smuzhiyun 				derr(0, "cryptodev_cipher_decrypt: %d", ret);
635*4882a593Smuzhiyun 				return ret;
636*4882a593Smuzhiyun 			}
637*4882a593Smuzhiyun 		}
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 	}
640*4882a593Smuzhiyun 	kcaop->dst_len = len;
641*4882a593Smuzhiyun 	return 0;
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun 
rk_auth_n_crypt(struct csession * ses_ptr,struct kernel_crypt_auth_op * kcaop,struct scatterlist * auth_sg,uint32_t auth_len,struct scatterlist * src_sg,struct scatterlist * dst_sg,uint32_t len)644*4882a593Smuzhiyun static int rk_auth_n_crypt(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop,
645*4882a593Smuzhiyun 			   struct scatterlist *auth_sg, uint32_t auth_len,
646*4882a593Smuzhiyun 			   struct scatterlist *src_sg,
647*4882a593Smuzhiyun 			   struct scatterlist *dst_sg, uint32_t len)
648*4882a593Smuzhiyun {
649*4882a593Smuzhiyun 	int ret;
650*4882a593Smuzhiyun 	struct crypt_auth_op *caop = &kcaop->caop;
651*4882a593Smuzhiyun 	int max_tag_len;
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 	max_tag_len = cryptodev_cipher_get_tag_size(&ses_ptr->cdata);
654*4882a593Smuzhiyun 	if (unlikely(caop->tag_len > max_tag_len)) {
655*4882a593Smuzhiyun 		derr(0, "Illegal tag length: %d", caop->tag_len);
656*4882a593Smuzhiyun 		return -EINVAL;
657*4882a593Smuzhiyun 	}
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	if (caop->tag_len)
660*4882a593Smuzhiyun 		cryptodev_cipher_set_tag_size(&ses_ptr->cdata, caop->tag_len);
661*4882a593Smuzhiyun 	else
662*4882a593Smuzhiyun 		caop->tag_len = max_tag_len;
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	cryptodev_cipher_auth(&ses_ptr->cdata, auth_sg, auth_len);
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	if (caop->op == COP_ENCRYPT) {
667*4882a593Smuzhiyun 		ret = cryptodev_cipher_encrypt(&ses_ptr->cdata, src_sg, dst_sg, len);
668*4882a593Smuzhiyun 		if (unlikely(ret)) {
669*4882a593Smuzhiyun 			derr(0, "cryptodev_cipher_encrypt: %d", ret);
670*4882a593Smuzhiyun 			return ret;
671*4882a593Smuzhiyun 		}
672*4882a593Smuzhiyun 	} else {
673*4882a593Smuzhiyun 		ret = cryptodev_cipher_decrypt(&ses_ptr->cdata, src_sg, dst_sg, len);
674*4882a593Smuzhiyun 		if (unlikely(ret)) {
675*4882a593Smuzhiyun 			derr(0, "cryptodev_cipher_decrypt: %d", ret);
676*4882a593Smuzhiyun 			return ret;
677*4882a593Smuzhiyun 		}
678*4882a593Smuzhiyun 	}
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	return 0;
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun /* Typical AEAD (i.e. GCM) encryption/decryption.
684*4882a593Smuzhiyun  * During decryption the tag is verified.
685*4882a593Smuzhiyun  */
686*4882a593Smuzhiyun static int
auth_n_crypt(struct csession * ses_ptr,struct kernel_crypt_auth_op * kcaop,struct scatterlist * auth_sg,uint32_t auth_len,struct scatterlist * src_sg,struct scatterlist * dst_sg,uint32_t len)687*4882a593Smuzhiyun auth_n_crypt(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop,
688*4882a593Smuzhiyun 		  struct scatterlist *auth_sg, uint32_t auth_len,
689*4882a593Smuzhiyun 		  struct scatterlist *src_sg,
690*4882a593Smuzhiyun 		  struct scatterlist *dst_sg, uint32_t len)
691*4882a593Smuzhiyun {
692*4882a593Smuzhiyun 	int ret;
693*4882a593Smuzhiyun 	struct crypt_auth_op *caop = &kcaop->caop;
694*4882a593Smuzhiyun 	int max_tag_len;
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	max_tag_len = cryptodev_cipher_get_tag_size(&ses_ptr->cdata);
697*4882a593Smuzhiyun 	if (unlikely(caop->tag_len > max_tag_len)) {
698*4882a593Smuzhiyun 		derr(0, "Illegal tag length: %d", caop->tag_len);
699*4882a593Smuzhiyun 		return -EINVAL;
700*4882a593Smuzhiyun 	}
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun 	if (caop->tag_len)
703*4882a593Smuzhiyun 		cryptodev_cipher_set_tag_size(&ses_ptr->cdata, caop->tag_len);
704*4882a593Smuzhiyun 	else
705*4882a593Smuzhiyun 		caop->tag_len = max_tag_len;
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 	cryptodev_cipher_auth(&ses_ptr->cdata, auth_sg, auth_len);
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	if (caop->op == COP_ENCRYPT) {
710*4882a593Smuzhiyun 		ret = cryptodev_cipher_encrypt(&ses_ptr->cdata,
711*4882a593Smuzhiyun 						src_sg, dst_sg, len);
712*4882a593Smuzhiyun 		if (unlikely(ret)) {
713*4882a593Smuzhiyun 			derr(0, "cryptodev_cipher_encrypt: %d", ret);
714*4882a593Smuzhiyun 			return ret;
715*4882a593Smuzhiyun 		}
716*4882a593Smuzhiyun 		kcaop->dst_len = len + caop->tag_len;
717*4882a593Smuzhiyun 		caop->tag = caop->dst + len;
718*4882a593Smuzhiyun 	} else {
719*4882a593Smuzhiyun 		ret = cryptodev_cipher_decrypt(&ses_ptr->cdata,
720*4882a593Smuzhiyun 						src_sg, dst_sg, len);
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 		if (unlikely(ret)) {
723*4882a593Smuzhiyun 			derr(0, "cryptodev_cipher_decrypt: %d", ret);
724*4882a593Smuzhiyun 			return ret;
725*4882a593Smuzhiyun 		}
726*4882a593Smuzhiyun 		kcaop->dst_len = len - caop->tag_len;
727*4882a593Smuzhiyun 		caop->tag = caop->dst + len - caop->tag_len;
728*4882a593Smuzhiyun 	}
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 	return 0;
731*4882a593Smuzhiyun }
732*4882a593Smuzhiyun 
crypto_auth_zc_srtp(struct csession * ses_ptr,struct kernel_crypt_auth_op * kcaop)733*4882a593Smuzhiyun static int crypto_auth_zc_srtp(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop)
734*4882a593Smuzhiyun {
735*4882a593Smuzhiyun 	struct scatterlist *dst_sg, *auth_sg;
736*4882a593Smuzhiyun 	struct crypt_auth_op *caop = &kcaop->caop;
737*4882a593Smuzhiyun 	int ret;
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun 	if (unlikely(ses_ptr->cdata.init != 0 &&
740*4882a593Smuzhiyun 		(ses_ptr->cdata.stream == 0 || ses_ptr->cdata.aead != 0))) {
741*4882a593Smuzhiyun 		derr(0, "Only stream modes are allowed in SRTP mode (but not AEAD)");
742*4882a593Smuzhiyun 		return -EINVAL;
743*4882a593Smuzhiyun 	}
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	ret = get_userbuf_srtp(ses_ptr, kcaop, &auth_sg, &dst_sg);
746*4882a593Smuzhiyun 	if (unlikely(ret)) {
747*4882a593Smuzhiyun 		derr(1, "get_userbuf_srtp(): Error getting user pages.");
748*4882a593Smuzhiyun 		return ret;
749*4882a593Smuzhiyun 	}
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	ret = srtp_auth_n_crypt(ses_ptr, kcaop, auth_sg, caop->auth_len,
752*4882a593Smuzhiyun 			dst_sg, caop->len);
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 	cryptodev_release_user_pages(ses_ptr);
755*4882a593Smuzhiyun 
756*4882a593Smuzhiyun 	return ret;
757*4882a593Smuzhiyun }
758*4882a593Smuzhiyun 
crypto_auth_zc_tls(struct csession * ses_ptr,struct kernel_crypt_auth_op * kcaop)759*4882a593Smuzhiyun static int crypto_auth_zc_tls(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop)
760*4882a593Smuzhiyun {
761*4882a593Smuzhiyun 	struct crypt_auth_op *caop = &kcaop->caop;
762*4882a593Smuzhiyun 	struct scatterlist *dst_sg, *auth_sg;
763*4882a593Smuzhiyun 	unsigned char *auth_buf = NULL;
764*4882a593Smuzhiyun 	struct scatterlist tmp;
765*4882a593Smuzhiyun 	int ret;
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	if (unlikely(caop->auth_len > PAGE_SIZE)) {
768*4882a593Smuzhiyun 		derr(1, "auth data len is excessive.");
769*4882a593Smuzhiyun 		return -EINVAL;
770*4882a593Smuzhiyun 	}
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 	auth_buf = (char *)__get_free_page(GFP_KERNEL);
773*4882a593Smuzhiyun 	if (unlikely(!auth_buf)) {
774*4882a593Smuzhiyun 		derr(1, "unable to get a free page.");
775*4882a593Smuzhiyun 		return -ENOMEM;
776*4882a593Smuzhiyun 	}
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun 	if (caop->auth_src && caop->auth_len > 0) {
779*4882a593Smuzhiyun 		if (unlikely(copy_from_user(auth_buf, caop->auth_src, caop->auth_len))) {
780*4882a593Smuzhiyun 			derr(1, "unable to copy auth data from userspace.");
781*4882a593Smuzhiyun 			ret = -EFAULT;
782*4882a593Smuzhiyun 			goto free_auth_buf;
783*4882a593Smuzhiyun 		}
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 		sg_init_one(&tmp, auth_buf, caop->auth_len);
786*4882a593Smuzhiyun 		auth_sg = &tmp;
787*4882a593Smuzhiyun 	} else {
788*4882a593Smuzhiyun 		auth_sg = NULL;
789*4882a593Smuzhiyun 	}
790*4882a593Smuzhiyun 
791*4882a593Smuzhiyun 	ret = get_userbuf_tls(ses_ptr, kcaop, &dst_sg);
792*4882a593Smuzhiyun 	if (unlikely(ret)) {
793*4882a593Smuzhiyun 		derr(1, "get_userbuf_tls(): Error getting user pages.");
794*4882a593Smuzhiyun 		goto free_auth_buf;
795*4882a593Smuzhiyun 	}
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 	ret = tls_auth_n_crypt(ses_ptr, kcaop, auth_sg, caop->auth_len,
798*4882a593Smuzhiyun 			dst_sg, caop->len);
799*4882a593Smuzhiyun 	cryptodev_release_user_pages(ses_ptr);
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun free_auth_buf:
802*4882a593Smuzhiyun 	free_page((unsigned long)auth_buf);
803*4882a593Smuzhiyun 	return ret;
804*4882a593Smuzhiyun }
805*4882a593Smuzhiyun 
crypto_auth_zc_aead(struct csession * ses_ptr,struct kernel_crypt_auth_op * kcaop)806*4882a593Smuzhiyun static int crypto_auth_zc_aead(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop)
807*4882a593Smuzhiyun {
808*4882a593Smuzhiyun 	struct scatterlist *dst_sg;
809*4882a593Smuzhiyun 	struct scatterlist *src_sg;
810*4882a593Smuzhiyun 	struct crypt_auth_op *caop = &kcaop->caop;
811*4882a593Smuzhiyun 	unsigned char *auth_buf = NULL;
812*4882a593Smuzhiyun 	int ret;
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0))
815*4882a593Smuzhiyun 	struct scatterlist tmp;
816*4882a593Smuzhiyun 	struct scatterlist *auth_sg;
817*4882a593Smuzhiyun #else
818*4882a593Smuzhiyun 	struct scatterlist auth1[2];
819*4882a593Smuzhiyun 	struct scatterlist auth2[2];
820*4882a593Smuzhiyun #endif
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 	if (unlikely(ses_ptr->cdata.init == 0 ||
823*4882a593Smuzhiyun 		(ses_ptr->cdata.stream == 0 && ses_ptr->cdata.aead == 0))) {
824*4882a593Smuzhiyun 		derr(0, "Only stream and AEAD ciphers are allowed for authenc");
825*4882a593Smuzhiyun 		return -EINVAL;
826*4882a593Smuzhiyun 	}
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun 	if (unlikely(caop->auth_len > PAGE_SIZE)) {
829*4882a593Smuzhiyun 		derr(1, "auth data len is excessive.");
830*4882a593Smuzhiyun 		return -EINVAL;
831*4882a593Smuzhiyun 	}
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 	auth_buf = (char *)__get_free_page(GFP_KERNEL);
834*4882a593Smuzhiyun 	if (unlikely(!auth_buf)) {
835*4882a593Smuzhiyun 		derr(1, "unable to get a free page.");
836*4882a593Smuzhiyun 		return -ENOMEM;
837*4882a593Smuzhiyun 	}
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun 	ret = cryptodev_get_userbuf(ses_ptr, caop->src, caop->len, caop->dst, kcaop->dst_len,
840*4882a593Smuzhiyun 			kcaop->task, kcaop->mm, &src_sg, &dst_sg);
841*4882a593Smuzhiyun 	if (unlikely(ret)) {
842*4882a593Smuzhiyun 		derr(1, "get_userbuf(): Error getting user pages.");
843*4882a593Smuzhiyun 		goto free_auth_buf;
844*4882a593Smuzhiyun 	}
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0))
847*4882a593Smuzhiyun 	if (caop->auth_src && caop->auth_len > 0) {
848*4882a593Smuzhiyun 		if (unlikely(copy_from_user(auth_buf, caop->auth_src, caop->auth_len))) {
849*4882a593Smuzhiyun 			derr(1, "unable to copy auth data from userspace.");
850*4882a593Smuzhiyun 			ret = -EFAULT;
851*4882a593Smuzhiyun 			goto free_pages;
852*4882a593Smuzhiyun 		}
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun 		sg_init_one(&tmp, auth_buf, caop->auth_len);
855*4882a593Smuzhiyun 		auth_sg = &tmp;
856*4882a593Smuzhiyun 	} else {
857*4882a593Smuzhiyun 		auth_sg = NULL;
858*4882a593Smuzhiyun 	}
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun 	ret = auth_n_crypt(ses_ptr, kcaop, auth_sg, caop->auth_len,
861*4882a593Smuzhiyun 			src_sg, dst_sg, caop->len);
862*4882a593Smuzhiyun #else
863*4882a593Smuzhiyun 	if (caop->auth_src && caop->auth_len > 0) {
864*4882a593Smuzhiyun 		if (unlikely(copy_from_user(auth_buf, caop->auth_src, caop->auth_len))) {
865*4882a593Smuzhiyun 			derr(1, "unable to copy auth data from userspace.");
866*4882a593Smuzhiyun 			ret = -EFAULT;
867*4882a593Smuzhiyun 			goto free_pages;
868*4882a593Smuzhiyun 		}
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun 		sg_init_table(auth1, 2);
871*4882a593Smuzhiyun 		sg_set_buf(auth1, auth_buf, caop->auth_len);
872*4882a593Smuzhiyun 		sg_chain(auth1, 2, src_sg);
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun 		if (src_sg == dst_sg) {
875*4882a593Smuzhiyun 			src_sg = auth1;
876*4882a593Smuzhiyun 			dst_sg = auth1;
877*4882a593Smuzhiyun 		} else {
878*4882a593Smuzhiyun 			sg_init_table(auth2, 2);
879*4882a593Smuzhiyun 			sg_set_buf(auth2, auth_buf, caop->auth_len);
880*4882a593Smuzhiyun 			sg_chain(auth2, 2, dst_sg);
881*4882a593Smuzhiyun 			src_sg = auth1;
882*4882a593Smuzhiyun 			dst_sg = auth2;
883*4882a593Smuzhiyun 		}
884*4882a593Smuzhiyun 	}
885*4882a593Smuzhiyun 
886*4882a593Smuzhiyun 	ret = auth_n_crypt(ses_ptr, kcaop, NULL, caop->auth_len,
887*4882a593Smuzhiyun 			src_sg, dst_sg, caop->len);
888*4882a593Smuzhiyun #endif
889*4882a593Smuzhiyun 
890*4882a593Smuzhiyun free_pages:
891*4882a593Smuzhiyun 	cryptodev_release_user_pages(ses_ptr);
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun free_auth_buf:
894*4882a593Smuzhiyun 	free_page((unsigned long)auth_buf);
895*4882a593Smuzhiyun 
896*4882a593Smuzhiyun 	return ret;
897*4882a593Smuzhiyun }
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun /* Chain two sglists together. It will keep the last nent of priv
900*4882a593Smuzhiyun  * and invalidate the first nent of sgl
901*4882a593Smuzhiyun  */
sg_copy_chain(struct scatterlist * prv,unsigned int prv_nents,struct scatterlist * sgl)902*4882a593Smuzhiyun static struct scatterlist *sg_copy_chain(struct scatterlist *prv,
903*4882a593Smuzhiyun 					 unsigned int prv_nents,
904*4882a593Smuzhiyun 					 struct scatterlist *sgl)
905*4882a593Smuzhiyun {
906*4882a593Smuzhiyun 	struct scatterlist *sg_tmp = sg_last(prv, prv_nents);
907*4882a593Smuzhiyun 
908*4882a593Smuzhiyun 	sg_set_page(sgl, sg_page(sg_tmp), sg_tmp->length, sg_tmp->offset);
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 	if (prv_nents > 1) {
911*4882a593Smuzhiyun 		sg_chain(prv, prv_nents, sgl);
912*4882a593Smuzhiyun 		return prv;
913*4882a593Smuzhiyun 	} else {
914*4882a593Smuzhiyun 		return sgl;
915*4882a593Smuzhiyun 	}
916*4882a593Smuzhiyun }
917*4882a593Smuzhiyun 
crypto_auth_zc_rk(struct csession * ses_ptr,struct kernel_crypt_auth_op * kcaop)918*4882a593Smuzhiyun static int crypto_auth_zc_rk(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop)
919*4882a593Smuzhiyun {
920*4882a593Smuzhiyun 	struct scatterlist *dst;
921*4882a593Smuzhiyun 	struct scatterlist *src;
922*4882a593Smuzhiyun 	struct scatterlist *dst_sg;
923*4882a593Smuzhiyun 	struct scatterlist *src_sg;
924*4882a593Smuzhiyun 	struct crypt_auth_op *caop = &kcaop->caop;
925*4882a593Smuzhiyun 	unsigned char *auth_buf = NULL, *tag_buf = NULL;
926*4882a593Smuzhiyun 	struct scatterlist auth_src[2], auth_dst[2], tag[3];
927*4882a593Smuzhiyun 	int ret;
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 	if (unlikely(ses_ptr->cdata.init == 0 ||
930*4882a593Smuzhiyun 	    (ses_ptr->cdata.stream == 0 && ses_ptr->cdata.aead == 0))) {
931*4882a593Smuzhiyun 		derr(0, "Only stream and AEAD ciphers are allowed for authenc");
932*4882a593Smuzhiyun 		return -EINVAL;
933*4882a593Smuzhiyun 	}
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun 	if (unlikely(caop->auth_len > PAGE_SIZE)) {
936*4882a593Smuzhiyun 		derr(1, "auth data len is excessive.");
937*4882a593Smuzhiyun 		return -EINVAL;
938*4882a593Smuzhiyun 	}
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun 	ret = cryptodev_get_userbuf(ses_ptr, caop->src, caop->len,
941*4882a593Smuzhiyun 				    caop->dst, kcaop->dst_len,
942*4882a593Smuzhiyun 				    kcaop->task, kcaop->mm, &src_sg, &dst_sg);
943*4882a593Smuzhiyun 	if (unlikely(ret)) {
944*4882a593Smuzhiyun 		derr(1, "get_userbuf(): Error getting user pages.");
945*4882a593Smuzhiyun 		ret = -EFAULT;
946*4882a593Smuzhiyun 		goto exit;
947*4882a593Smuzhiyun 	}
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun 	dst = dst_sg;
950*4882a593Smuzhiyun 	src = src_sg;
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun 	/* chain tag */
953*4882a593Smuzhiyun 	if (caop->tag && caop->tag_len > 0) {
954*4882a593Smuzhiyun 		tag_buf = kcalloc(caop->tag_len, sizeof(*tag_buf), GFP_KERNEL);
955*4882a593Smuzhiyun 		if (unlikely(!tag_buf)) {
956*4882a593Smuzhiyun 			derr(1, "unable to kcalloc %d.", caop->tag_len);
957*4882a593Smuzhiyun 			ret = -EFAULT;
958*4882a593Smuzhiyun 			goto free_pages;
959*4882a593Smuzhiyun 		}
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun 		if (unlikely(copy_from_user(tag_buf, caop->tag, caop->tag_len))) {
962*4882a593Smuzhiyun 			derr(1, "unable to copy tag data from userspace.");
963*4882a593Smuzhiyun 			ret = -EFAULT;
964*4882a593Smuzhiyun 			goto free_pages;
965*4882a593Smuzhiyun 		}
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 		sg_init_table(tag, ARRAY_SIZE(tag));
968*4882a593Smuzhiyun 		sg_set_buf(&tag[1], tag_buf, caop->tag_len);
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun 		/* Since the sg_chain() requires the last sg in the list is empty and
971*4882a593Smuzhiyun 		 * used for link information, we can not directly link src/dst_sg to tags
972*4882a593Smuzhiyun 		 */
973*4882a593Smuzhiyun 		if (caop->op == COP_ENCRYPT)
974*4882a593Smuzhiyun 			dst = sg_copy_chain(dst_sg, sg_nents(dst_sg), tag);
975*4882a593Smuzhiyun 		else
976*4882a593Smuzhiyun 			src = sg_copy_chain(src_sg, sg_nents(src_sg), tag);
977*4882a593Smuzhiyun 	}
978*4882a593Smuzhiyun 
979*4882a593Smuzhiyun 	/* chain auth */
980*4882a593Smuzhiyun 	auth_buf = (char *)__get_free_page(GFP_KERNEL);
981*4882a593Smuzhiyun 	if (unlikely(!auth_buf)) {
982*4882a593Smuzhiyun 		derr(1, "unable to get a free page.");
983*4882a593Smuzhiyun 		ret = -EFAULT;
984*4882a593Smuzhiyun 		goto free_pages;
985*4882a593Smuzhiyun 	}
986*4882a593Smuzhiyun 
987*4882a593Smuzhiyun 	if (caop->auth_src && caop->auth_len > 0) {
988*4882a593Smuzhiyun 		if (unlikely(copy_from_user(auth_buf, caop->auth_src, caop->auth_len))) {
989*4882a593Smuzhiyun 			derr(1, "unable to copy auth data from userspace.");
990*4882a593Smuzhiyun 			ret = -EFAULT;
991*4882a593Smuzhiyun 			goto free_pages;
992*4882a593Smuzhiyun 		}
993*4882a593Smuzhiyun 
994*4882a593Smuzhiyun 		sg_init_table(auth_src, ARRAY_SIZE(auth_src));
995*4882a593Smuzhiyun 		sg_set_buf(auth_src, auth_buf, caop->auth_len);
996*4882a593Smuzhiyun 		sg_init_table(auth_dst, ARRAY_SIZE(auth_dst));
997*4882a593Smuzhiyun 		sg_set_buf(auth_dst, auth_buf, caop->auth_len);
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 		sg_chain(auth_src, 2, src);
1000*4882a593Smuzhiyun 		sg_chain(auth_dst, 2, dst);
1001*4882a593Smuzhiyun 		src = auth_src;
1002*4882a593Smuzhiyun 		dst = auth_dst;
1003*4882a593Smuzhiyun 	}
1004*4882a593Smuzhiyun 
1005*4882a593Smuzhiyun 	if (caop->op == COP_ENCRYPT)
1006*4882a593Smuzhiyun 		ret = rk_auth_n_crypt(ses_ptr, kcaop, NULL, caop->auth_len,
1007*4882a593Smuzhiyun 				      src, dst, caop->len);
1008*4882a593Smuzhiyun 	else
1009*4882a593Smuzhiyun 		ret = rk_auth_n_crypt(ses_ptr, kcaop, NULL, caop->auth_len,
1010*4882a593Smuzhiyun 				      src, dst, caop->len + caop->tag_len);
1011*4882a593Smuzhiyun 
1012*4882a593Smuzhiyun 	if (!ret && caop->op == COP_ENCRYPT) {
1013*4882a593Smuzhiyun 		if (unlikely(copy_to_user(kcaop->caop.tag, tag_buf, caop->tag_len))) {
1014*4882a593Smuzhiyun 			derr(1, "Error in copying to userspace");
1015*4882a593Smuzhiyun 			ret = -EFAULT;
1016*4882a593Smuzhiyun 			goto free_pages;
1017*4882a593Smuzhiyun 		}
1018*4882a593Smuzhiyun 	}
1019*4882a593Smuzhiyun 
1020*4882a593Smuzhiyun free_pages:
1021*4882a593Smuzhiyun 	cryptodev_release_user_pages(ses_ptr);
1022*4882a593Smuzhiyun 
1023*4882a593Smuzhiyun exit:
1024*4882a593Smuzhiyun 	if (auth_buf)
1025*4882a593Smuzhiyun 		free_page((unsigned long)auth_buf);
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun 	kfree(tag_buf);
1028*4882a593Smuzhiyun 
1029*4882a593Smuzhiyun 	return ret;
1030*4882a593Smuzhiyun }
1031*4882a593Smuzhiyun 
1032*4882a593Smuzhiyun static int
__crypto_auth_run_zc(struct csession * ses_ptr,struct kernel_crypt_auth_op * kcaop)1033*4882a593Smuzhiyun __crypto_auth_run_zc(struct csession *ses_ptr, struct kernel_crypt_auth_op *kcaop)
1034*4882a593Smuzhiyun {
1035*4882a593Smuzhiyun 	struct crypt_auth_op *caop = &kcaop->caop;
1036*4882a593Smuzhiyun 	int ret;
1037*4882a593Smuzhiyun 
1038*4882a593Smuzhiyun 	if (caop->flags & COP_FLAG_AEAD_SRTP_TYPE) {
1039*4882a593Smuzhiyun 		ret = crypto_auth_zc_srtp(ses_ptr, kcaop);
1040*4882a593Smuzhiyun 	} else if (caop->flags & COP_FLAG_AEAD_TLS_TYPE &&
1041*4882a593Smuzhiyun 		   ses_ptr->cdata.aead == 0) {
1042*4882a593Smuzhiyun 		ret = crypto_auth_zc_tls(ses_ptr, kcaop);
1043*4882a593Smuzhiyun 	} else if (caop->flags & COP_FLAG_AEAD_RK_TYPE &&
1044*4882a593Smuzhiyun 		   ses_ptr->cdata.aead) {
1045*4882a593Smuzhiyun 		ret = crypto_auth_zc_rk(ses_ptr, kcaop);
1046*4882a593Smuzhiyun 	} else if (ses_ptr->cdata.aead) {
1047*4882a593Smuzhiyun 		ret = crypto_auth_zc_aead(ses_ptr, kcaop);
1048*4882a593Smuzhiyun 	} else {
1049*4882a593Smuzhiyun 		ret = -EINVAL;
1050*4882a593Smuzhiyun 	}
1051*4882a593Smuzhiyun 
1052*4882a593Smuzhiyun 	return ret;
1053*4882a593Smuzhiyun }
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun 
crypto_auth_run(struct fcrypt * fcr,struct kernel_crypt_auth_op * kcaop)1056*4882a593Smuzhiyun int crypto_auth_run(struct fcrypt *fcr, struct kernel_crypt_auth_op *kcaop)
1057*4882a593Smuzhiyun {
1058*4882a593Smuzhiyun 	struct csession *ses_ptr;
1059*4882a593Smuzhiyun 	struct crypt_auth_op *caop = &kcaop->caop;
1060*4882a593Smuzhiyun 	int ret;
1061*4882a593Smuzhiyun 
1062*4882a593Smuzhiyun 	if (unlikely(caop->op != COP_ENCRYPT && caop->op != COP_DECRYPT)) {
1063*4882a593Smuzhiyun 		ddebug(1, "invalid operation op=%u", caop->op);
1064*4882a593Smuzhiyun 		return -EINVAL;
1065*4882a593Smuzhiyun 	}
1066*4882a593Smuzhiyun 
1067*4882a593Smuzhiyun 	/* this also enters ses_ptr->sem */
1068*4882a593Smuzhiyun 	ses_ptr = crypto_get_session_by_sid(fcr, caop->ses);
1069*4882a593Smuzhiyun 	if (unlikely(!ses_ptr)) {
1070*4882a593Smuzhiyun 		derr(1, "invalid session ID=0x%08X", caop->ses);
1071*4882a593Smuzhiyun 		return -EINVAL;
1072*4882a593Smuzhiyun 	}
1073*4882a593Smuzhiyun 
1074*4882a593Smuzhiyun 	if (unlikely(ses_ptr->cdata.init == 0)) {
1075*4882a593Smuzhiyun 		derr(1, "cipher context not initialized");
1076*4882a593Smuzhiyun 		ret = -EINVAL;
1077*4882a593Smuzhiyun 		goto out_unlock;
1078*4882a593Smuzhiyun 	}
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun 	/* If we have a hash/mac handle reset its state */
1081*4882a593Smuzhiyun 	if (ses_ptr->hdata.init != 0) {
1082*4882a593Smuzhiyun 		ret = cryptodev_hash_reset(&ses_ptr->hdata);
1083*4882a593Smuzhiyun 		if (unlikely(ret)) {
1084*4882a593Smuzhiyun 			derr(1, "error in cryptodev_hash_reset()");
1085*4882a593Smuzhiyun 			goto out_unlock;
1086*4882a593Smuzhiyun 		}
1087*4882a593Smuzhiyun 	}
1088*4882a593Smuzhiyun 
1089*4882a593Smuzhiyun 	cryptodev_cipher_set_iv(&ses_ptr->cdata, kcaop->iv,
1090*4882a593Smuzhiyun 				min(ses_ptr->cdata.ivsize, kcaop->ivlen));
1091*4882a593Smuzhiyun 
1092*4882a593Smuzhiyun 	ret = __crypto_auth_run_zc(ses_ptr, kcaop);
1093*4882a593Smuzhiyun 	if (unlikely(ret)) {
1094*4882a593Smuzhiyun 		derr(1, "error in __crypto_auth_run_zc()");
1095*4882a593Smuzhiyun 		goto out_unlock;
1096*4882a593Smuzhiyun 	}
1097*4882a593Smuzhiyun 
1098*4882a593Smuzhiyun 	ret = 0;
1099*4882a593Smuzhiyun 
1100*4882a593Smuzhiyun 	cryptodev_cipher_get_iv(&ses_ptr->cdata, kcaop->iv,
1101*4882a593Smuzhiyun 				min(ses_ptr->cdata.ivsize, kcaop->ivlen));
1102*4882a593Smuzhiyun 
1103*4882a593Smuzhiyun out_unlock:
1104*4882a593Smuzhiyun 	crypto_put_session(ses_ptr);
1105*4882a593Smuzhiyun 	return ret;
1106*4882a593Smuzhiyun }
1107