xref: /OK3568_Linux_fs/kernel/drivers/crypto/rockchip/cryptodev_linux/cryptlib.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * Driver for /dev/crypto device (aka CryptoDev)
3  *
4  * Copyright (c) 2010,2011 Nikos Mavrogiannopoulos <nmav@gnutls.org>
5  * Portions Copyright (c) 2010 Michael Weiser
6  * Portions Copyright (c) 2010 Phil Sutter
7  *
8  * This file is part of linux cryptodev.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License
12  * as published by the Free Software Foundation; either version 2
13  * of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc.,
23  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
24  */
25 
26 #include <linux/mm.h>
27 #include <linux/highmem.h>
28 #include <linux/ioctl.h>
29 #include <linux/random.h>
30 #include <linux/scatterlist.h>
31 #include <linux/uaccess.h>
32 #include <crypto/algapi.h>
33 #include <crypto/hash.h>
34 #include <crypto/aead.h>
35 #include <linux/rtnetlink.h>
36 #include <crypto/authenc.h>
37 #include "cryptodev.h"
38 #include "cipherapi.h"
39 
40 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0))
41 extern const struct crypto_type crypto_givcipher_type;
42 #endif
43 
cryptodev_complete(struct crypto_async_request * req,int err)44 static void cryptodev_complete(struct crypto_async_request *req, int err)
45 {
46 	struct cryptodev_result *res = req->data;
47 
48 	if (err == -EINPROGRESS)
49 		return;
50 
51 	res->err = err;
52 	complete(&res->completion);
53 }
54 
cryptodev_get_cipher_keylen(unsigned int * keylen,struct session_op * sop,int aead)55 int cryptodev_get_cipher_keylen(unsigned int *keylen, struct session_op *sop,
56 		int aead)
57 {
58 	/*
59 	 * For blockciphers (AES-CBC) or non-composite aead ciphers (like AES-GCM),
60 	 * the key length is simply the cipher keylen obtained from userspace. If
61 	 * the cipher is composite aead, the keylen is the sum of cipher keylen,
62 	 * hmac keylen and a key header length. This key format is the one used in
63 	 * Linux kernel for composite aead ciphers (crypto/authenc.c)
64 	 */
65 	unsigned int klen = sop->keylen;
66 
67 	if (unlikely(sop->keylen > CRYPTO_CIPHER_MAX_KEY_LEN))
68 		return -EINVAL;
69 
70 	if (aead && sop->mackeylen) {
71 		if (unlikely(sop->mackeylen > CRYPTO_HMAC_MAX_KEY_LEN))
72 			return -EINVAL;
73 		klen += sop->mackeylen;
74 		klen += RTA_SPACE(sizeof(struct crypto_authenc_key_param));
75 	}
76 
77 	*keylen = klen;
78 	return 0;
79 }
80 
cryptodev_get_cipher_key(uint8_t * key,struct session_op * sop,int aead)81 int cryptodev_get_cipher_key(uint8_t *key, struct session_op *sop, int aead)
82 {
83 	/*
84 	 * Get cipher key from user-space. For blockciphers just copy it from
85 	 * user-space. For composite aead ciphers combine it with the hmac key in
86 	 * the format used by Linux kernel in crypto/authenc.c:
87 	 *
88 	 * [[AUTHENC_KEY_HEADER + CIPHER_KEYLEN] [AUTHENTICATION KEY] [CIPHER KEY]]
89 	 */
90 	struct crypto_authenc_key_param *param;
91 	struct rtattr *rta;
92 	int ret = 0;
93 
94 	if (aead && sop->mackeylen) {
95 		/*
96 		 * Composite aead ciphers. The first four bytes are the header type and
97 		 * header length for aead keys
98 		 */
99 		rta = (void *)key;
100 		rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
101 		rta->rta_len = RTA_LENGTH(sizeof(*param));
102 
103 		/*
104 		 * The next four bytes hold the length of the encryption key
105 		 */
106 		param = RTA_DATA(rta);
107 		param->enckeylen = cpu_to_be32(sop->keylen);
108 
109 		/* Advance key pointer eight bytes and copy the hmac key */
110 		key += RTA_SPACE(sizeof(*param));
111 		if (unlikely(copy_from_user(key, sop->mackey, sop->mackeylen))) {
112 			ret = -EFAULT;
113 			goto error;
114 		}
115 		/* Advance key pointer past the hmac key */
116 		key += sop->mackeylen;
117 	}
118 	/* now copy the blockcipher key */
119 	if (unlikely(copy_from_user(key, sop->key, sop->keylen)))
120 		ret = -EFAULT;
121 
122 error:
123 	return ret;
124 }
125 
126 /* Was correct key length supplied? */
check_key_size(size_t keylen,const char * alg_name,unsigned int min_keysize,unsigned int max_keysize)127 static int check_key_size(size_t keylen, const char *alg_name,
128 			  unsigned int min_keysize, unsigned int max_keysize)
129 {
130 	if (max_keysize > 0 && unlikely((keylen < min_keysize) ||
131 					(keylen > max_keysize))) {
132 		ddebug(1, "Wrong keylen '%zu' for algorithm '%s'. Use %u to %u.",
133 		       keylen, alg_name, min_keysize, max_keysize);
134 		return -EINVAL;
135 	}
136 
137 	return 0;
138 }
139 
cryptodev_cipher_init(struct cipher_data * out,const char * alg_name,uint8_t * keyp,size_t keylen,int stream,int aead)140 int cryptodev_cipher_init(struct cipher_data *out, const char *alg_name,
141 				uint8_t *keyp, size_t keylen, int stream, int aead)
142 {
143 	int ret;
144 
145 	if (aead == 0) {
146 		unsigned int min_keysize, max_keysize;
147 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
148 		struct crypto_tfm *tfm;
149 #else
150 		struct ablkcipher_alg *alg;
151 #endif
152 
153 		out->async.s = cryptodev_crypto_alloc_blkcipher(alg_name, 0, 0);
154 		if (unlikely(IS_ERR(out->async.s))) {
155 			ddebug(1, "Failed to load cipher %s", alg_name);
156 			return PTR_ERR(out->async.s);
157 		}
158 
159 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
160 		tfm = crypto_skcipher_tfm(out->async.s);
161 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(5, 4, 0))
162 		if ((tfm->__crt_alg->cra_type == &crypto_ablkcipher_type)
163 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0))
164 		    || (tfm->__crt_alg->cra_type == &crypto_givcipher_type)
165 #endif
166 							) {
167 			struct ablkcipher_alg *alg;
168 
169 			alg = &tfm->__crt_alg->cra_ablkcipher;
170 			min_keysize = alg->min_keysize;
171 			max_keysize = alg->max_keysize;
172 		} else
173 #endif
174 		{
175 			struct skcipher_alg *alg;
176 
177 			alg = crypto_skcipher_alg(out->async.s);
178 			min_keysize = alg->min_keysize;
179 			max_keysize = alg->max_keysize;
180 		}
181 #else
182 		alg = crypto_ablkcipher_alg(out->async.s);
183 		min_keysize = alg->min_keysize;
184 		max_keysize = alg->max_keysize;
185 #endif
186 		ret = check_key_size(keylen, alg_name, min_keysize,
187 				     max_keysize);
188 		if (ret)
189 			goto error;
190 
191 		out->blocksize = cryptodev_crypto_blkcipher_blocksize(out->async.s);
192 		out->ivsize = cryptodev_crypto_blkcipher_ivsize(out->async.s);
193 		out->alignmask = cryptodev_crypto_blkcipher_alignmask(out->async.s);
194 
195 		ret = cryptodev_crypto_blkcipher_setkey(out->async.s, keyp, keylen);
196 	} else {
197 		out->async.as = crypto_alloc_aead(alg_name, 0, 0);
198 		if (unlikely(IS_ERR(out->async.as))) {
199 			ddebug(1, "Failed to load cipher %s", alg_name);
200 			return PTR_ERR(out->async.as);
201 		}
202 
203 		out->blocksize = crypto_aead_blocksize(out->async.as);
204 		out->ivsize = crypto_aead_ivsize(out->async.as);
205 		out->alignmask = crypto_aead_alignmask(out->async.as);
206 
207 		ret = crypto_aead_setkey(out->async.as, keyp, keylen);
208 	}
209 
210 	if (unlikely(ret)) {
211 		ddebug(1, "Setting key failed for %s-%zu.", alg_name, keylen*8);
212 		ret = -EINVAL;
213 		goto error;
214 	}
215 
216 	out->stream = stream;
217 	out->aead = aead;
218 
219 	init_completion(&out->async.result.completion);
220 
221 	if (aead == 0) {
222 		out->async.request = cryptodev_blkcipher_request_alloc(out->async.s, GFP_KERNEL);
223 		if (unlikely(!out->async.request)) {
224 			derr(1, "error allocating async crypto request");
225 			ret = -ENOMEM;
226 			goto error;
227 		}
228 
229 		cryptodev_blkcipher_request_set_callback(out->async.request,
230 					CRYPTO_TFM_REQ_MAY_BACKLOG,
231 					cryptodev_complete, &out->async.result);
232 	} else {
233 		out->async.arequest = aead_request_alloc(out->async.as, GFP_KERNEL);
234 		if (unlikely(!out->async.arequest)) {
235 			derr(1, "error allocating async crypto request");
236 			ret = -ENOMEM;
237 			goto error;
238 		}
239 
240 		aead_request_set_callback(out->async.arequest,
241 					CRYPTO_TFM_REQ_MAY_BACKLOG,
242 					cryptodev_complete, &out->async.result);
243 	}
244 
245 	out->init = 1;
246 	return 0;
247 error:
248 	if (aead == 0) {
249 		cryptodev_blkcipher_request_free(out->async.request);
250 		cryptodev_crypto_free_blkcipher(out->async.s);
251 	} else {
252 		if (out->async.arequest)
253 			aead_request_free(out->async.arequest);
254 		if (out->async.as)
255 			crypto_free_aead(out->async.as);
256 	}
257 
258 	return ret;
259 }
260 
cryptodev_cipher_deinit(struct cipher_data * cdata)261 void cryptodev_cipher_deinit(struct cipher_data *cdata)
262 {
263 	if (cdata->init) {
264 		if (cdata->aead == 0) {
265 			cryptodev_blkcipher_request_free(cdata->async.request);
266 			cryptodev_crypto_free_blkcipher(cdata->async.s);
267 		} else {
268 			if (cdata->async.arequest)
269 				aead_request_free(cdata->async.arequest);
270 			if (cdata->async.as)
271 				crypto_free_aead(cdata->async.as);
272 		}
273 
274 		cdata->init = 0;
275 	}
276 }
277 
waitfor(struct cryptodev_result * cr,ssize_t ret)278 static inline int waitfor(struct cryptodev_result *cr, ssize_t ret)
279 {
280 	switch (ret) {
281 	case 0:
282 		break;
283 	case -EINPROGRESS:
284 	case -EBUSY:
285 		wait_for_completion(&cr->completion);
286 		/* At this point we known for sure the request has finished,
287 		 * because wait_for_completion above was not interruptible.
288 		 * This is important because otherwise hardware or driver
289 		 * might try to access memory which will be freed or reused for
290 		 * another request. */
291 
292 		if (unlikely(cr->err)) {
293 			derr(0, "error from async request: %d", cr->err);
294 			return cr->err;
295 		}
296 
297 		break;
298 	default:
299 		return ret;
300 	}
301 
302 	return 0;
303 }
304 
cryptodev_cipher_encrypt(struct cipher_data * cdata,const struct scatterlist * src,struct scatterlist * dst,size_t len)305 ssize_t cryptodev_cipher_encrypt(struct cipher_data *cdata,
306 		const struct scatterlist *src, struct scatterlist *dst,
307 		size_t len)
308 {
309 	int ret;
310 
311 	reinit_completion(&cdata->async.result.completion);
312 
313 	if (cdata->aead == 0) {
314 		cryptodev_blkcipher_request_set_crypt(cdata->async.request,
315 			(struct scatterlist *)src, dst,
316 			len, cdata->async.iv);
317 		ret = cryptodev_crypto_blkcipher_encrypt(cdata->async.request);
318 	} else {
319 		aead_request_set_crypt(cdata->async.arequest,
320 			(struct scatterlist *)src, dst,
321 			len, cdata->async.iv);
322 		ret = crypto_aead_encrypt(cdata->async.arequest);
323 	}
324 
325 	return waitfor(&cdata->async.result, ret);
326 }
327 
cryptodev_cipher_decrypt(struct cipher_data * cdata,const struct scatterlist * src,struct scatterlist * dst,size_t len)328 ssize_t cryptodev_cipher_decrypt(struct cipher_data *cdata,
329 		const struct scatterlist *src, struct scatterlist *dst,
330 		size_t len)
331 {
332 	int ret;
333 
334 	reinit_completion(&cdata->async.result.completion);
335 	if (cdata->aead == 0) {
336 		cryptodev_blkcipher_request_set_crypt(cdata->async.request,
337 			(struct scatterlist *)src, dst,
338 			len, cdata->async.iv);
339 		ret = cryptodev_crypto_blkcipher_decrypt(cdata->async.request);
340 	} else {
341 		aead_request_set_crypt(cdata->async.arequest,
342 			(struct scatterlist *)src, dst,
343 			len, cdata->async.iv);
344 		ret = crypto_aead_decrypt(cdata->async.arequest);
345 	}
346 
347 	return waitfor(&cdata->async.result, ret);
348 }
349 
350 /* Hash functions */
351 
cryptodev_hash_init(struct hash_data * hdata,const char * alg_name,int hmac_mode,void * mackey,size_t mackeylen)352 int cryptodev_hash_init(struct hash_data *hdata, const char *alg_name,
353 			int hmac_mode, void *mackey, size_t mackeylen)
354 {
355 	int ret;
356 
357 	hdata->async.s = crypto_alloc_ahash(alg_name, 0, 0);
358 	if (unlikely(IS_ERR(hdata->async.s))) {
359 		ddebug(1, "Failed to load transform for %s", alg_name);
360 		return PTR_ERR(hdata->async.s);
361 	}
362 
363 	/* Copy the key from user and set to TFM. */
364 	if (hmac_mode != 0) {
365 		ret = crypto_ahash_setkey(hdata->async.s, mackey, mackeylen);
366 		if (unlikely(ret)) {
367 			ddebug(1, "Setting hmac key failed for %s-%zu.",
368 					alg_name, mackeylen*8);
369 			ret = -EINVAL;
370 			goto error;
371 		}
372 	}
373 
374 	hdata->digestsize = crypto_ahash_digestsize(hdata->async.s);
375 	hdata->alignmask = crypto_ahash_alignmask(hdata->async.s);
376 
377 	init_completion(&hdata->async.result.completion);
378 
379 	hdata->async.request = ahash_request_alloc(hdata->async.s, GFP_KERNEL);
380 	if (unlikely(!hdata->async.request)) {
381 		derr(0, "error allocating async crypto request");
382 		ret = -ENOMEM;
383 		goto error;
384 	}
385 
386 	ahash_request_set_callback(hdata->async.request,
387 			CRYPTO_TFM_REQ_MAY_BACKLOG,
388 			cryptodev_complete, &hdata->async.result);
389 	hdata->init = 1;
390 	return 0;
391 
392 error:
393 	crypto_free_ahash(hdata->async.s);
394 	return ret;
395 }
396 
cryptodev_hash_deinit(struct hash_data * hdata)397 void cryptodev_hash_deinit(struct hash_data *hdata)
398 {
399 	if (hdata->init) {
400 		ahash_request_free(hdata->async.request);
401 		crypto_free_ahash(hdata->async.s);
402 		hdata->init = 0;
403 	}
404 }
405 
cryptodev_hash_reset(struct hash_data * hdata)406 int cryptodev_hash_reset(struct hash_data *hdata)
407 {
408 	int ret;
409 
410 	ret = crypto_ahash_init(hdata->async.request);
411 	if (unlikely(ret)) {
412 		derr(0, "error in crypto_hash_init()");
413 		return ret;
414 	}
415 
416 	return 0;
417 
418 }
419 
cryptodev_hash_update(struct hash_data * hdata,struct scatterlist * sg,size_t len)420 ssize_t cryptodev_hash_update(struct hash_data *hdata,
421 				struct scatterlist *sg, size_t len)
422 {
423 	int ret;
424 
425 	reinit_completion(&hdata->async.result.completion);
426 	ahash_request_set_crypt(hdata->async.request, sg, NULL, len);
427 
428 	ret = crypto_ahash_update(hdata->async.request);
429 
430 	return waitfor(&hdata->async.result, ret);
431 }
432 
cryptodev_hash_final(struct hash_data * hdata,void * output)433 int cryptodev_hash_final(struct hash_data *hdata, void *output)
434 {
435 	int ret;
436 
437 	reinit_completion(&hdata->async.result.completion);
438 	ahash_request_set_crypt(hdata->async.request, NULL, output, 0);
439 
440 	ret = crypto_ahash_final(hdata->async.request);
441 
442 	return waitfor(&hdata->async.result, ret);
443 }
444 
445 #ifdef CIOCCPHASH
446 /* import the current hash state of src to dst */
cryptodev_hash_copy(struct hash_data * dst,struct hash_data * src)447 int cryptodev_hash_copy(struct hash_data *dst, struct hash_data *src)
448 {
449 	int ret, statesize;
450 	void *statedata = NULL;
451 	struct crypto_tfm *tfm;
452 
453 	if (unlikely(src == NULL || !src->init ||
454 		     dst == NULL || !dst->init)) {
455 		return -EINVAL;
456 	}
457 
458 	reinit_completion(&src->async.result.completion);
459 
460 	statesize = crypto_ahash_statesize(src->async.s);
461 	if (unlikely(statesize <= 0)) {
462 		return -EINVAL;
463 	}
464 
465 	statedata = kzalloc(statesize, GFP_KERNEL);
466 	if (unlikely(statedata == NULL)) {
467 		return -ENOMEM;
468 	}
469 
470 	ret = crypto_ahash_export(src->async.request, statedata);
471 	if (unlikely(ret < 0)) {
472 		if (unlikely(ret == -ENOSYS)) {
473 			tfm = crypto_ahash_tfm(src->async.s);
474 			derr(0, "cryptodev_hash_copy: crypto_ahash_export not implemented for "
475 				"alg='%s', driver='%s'", crypto_tfm_alg_name(tfm),
476 				crypto_tfm_alg_driver_name(tfm));
477 		}
478 		goto out;
479 	}
480 
481 	ret = crypto_ahash_import(dst->async.request, statedata);
482 	if (unlikely(ret == -ENOSYS)) {
483 		tfm = crypto_ahash_tfm(dst->async.s);
484 		derr(0, "cryptodev_hash_copy: crypto_ahash_import not implemented for "
485 			"alg='%s', driver='%s'", crypto_tfm_alg_name(tfm),
486 			crypto_tfm_alg_driver_name(tfm));
487 	}
488 out:
489 	kfree(statedata);
490 	return ret;
491 }
492 #endif /* CIOCCPHASH */
493