xref: /OK3568_Linux_fs/kernel/drivers/crypto/rockchip/rk_crypto_skcipher_utils.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Rockchip crypto skcipher uitls
4  *
5  * Copyright (c) 2022, Rockchip Electronics Co., Ltd
6  *
7  * Author: Lin Jinhan <troy.lin@rock-chips.com>
8  *
9  */
10 
11 #include "rk_crypto_skcipher_utils.h"
12 
rk_cipher_get_algt(struct crypto_skcipher * tfm)13 struct rk_crypto_algt *rk_cipher_get_algt(struct crypto_skcipher *tfm)
14 {
15 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
16 
17 	return container_of(alg, struct rk_crypto_algt, alg.crypto);
18 }
19 
rk_aead_get_algt(struct crypto_aead * tfm)20 struct rk_crypto_algt *rk_aead_get_algt(struct crypto_aead *tfm)
21 {
22 	struct aead_alg *alg = crypto_aead_alg(tfm);
23 
24 	return container_of(alg, struct rk_crypto_algt, alg.aead);
25 }
26 
rk_cipher_ctx_cast(struct rk_crypto_dev * rk_dev)27 struct rk_cipher_ctx *rk_cipher_ctx_cast(struct rk_crypto_dev *rk_dev)
28 {
29 	struct rk_cipher_ctx *ctx = crypto_tfm_ctx(rk_dev->async_req->tfm);
30 
31 	return ctx;
32 }
33 
rk_cipher_alg_ctx(struct rk_crypto_dev * rk_dev)34 struct rk_alg_ctx *rk_cipher_alg_ctx(struct rk_crypto_dev *rk_dev)
35 {
36 	return &(rk_cipher_ctx_cast(rk_dev)->algs_ctx);
37 }
38 
is_no_multi_blocksize(uint32_t mode)39 static bool is_no_multi_blocksize(uint32_t mode)
40 {
41 	return (mode == CIPHER_MODE_CFB ||
42 		mode == CIPHER_MODE_OFB ||
43 		mode == CIPHER_MODE_CTR ||
44 		mode == CIPHER_MODE_XTS ||
45 		mode == CIPHER_MODE_GCM) ? true : false;
46 }
47 
rk_cipher_fallback(struct skcipher_request * req,struct rk_cipher_ctx * ctx,bool encrypt)48 int rk_cipher_fallback(struct skcipher_request *req, struct rk_cipher_ctx *ctx, bool encrypt)
49 {
50 	int ret;
51 
52 	CRYPTO_MSG("use fallback tfm");
53 
54 	if (!ctx->fallback_tfm) {
55 		ret = -ENODEV;
56 		CRYPTO_MSG("fallback_tfm is empty!\n");
57 		goto exit;
58 	}
59 
60 	if (!ctx->fallback_key_inited) {
61 		ret = crypto_skcipher_setkey(ctx->fallback_tfm,
62 					     ctx->key, ctx->keylen);
63 		if (ret) {
64 			CRYPTO_MSG("fallback crypto_skcipher_setkey err = %d\n",
65 				   ret);
66 			goto exit;
67 		}
68 
69 		ctx->fallback_key_inited = true;
70 	}
71 
72 	skcipher_request_set_tfm(&ctx->fallback_req, ctx->fallback_tfm);
73 	skcipher_request_set_callback(&ctx->fallback_req,
74 				      req->base.flags,
75 				      req->base.complete,
76 				      req->base.data);
77 
78 	skcipher_request_set_crypt(&ctx->fallback_req, req->src,
79 				   req->dst, req->cryptlen, req->iv);
80 
81 	ret = encrypt ? crypto_skcipher_encrypt(&ctx->fallback_req) :
82 			crypto_skcipher_decrypt(&ctx->fallback_req);
83 
84 exit:
85 	return ret;
86 }
87 
88 /* increment counter (128-bit int) by 1 */
rk_ctr128_inc(uint8_t * counter)89 static void rk_ctr128_inc(uint8_t *counter)
90 {
91 	u32 n = 16;
92 	u8  c;
93 
94 	do {
95 		--n;
96 		c = counter[n];
97 		++c;
98 		counter[n] = c;
99 		if (c)
100 			return;
101 	} while (n);
102 }
103 
rk_ctr128_calc(uint8_t * counter,uint32_t data_len)104 static void rk_ctr128_calc(uint8_t *counter, uint32_t data_len)
105 {
106 	u32 i;
107 	u32 chunksize = AES_BLOCK_SIZE;
108 
109 	for (i = 0; i < DIV_ROUND_UP(data_len, chunksize); i++)
110 		rk_ctr128_inc(counter);
111 }
112 
rk_get_new_iv(struct rk_cipher_ctx * ctx,u32 mode,bool is_enc,uint8_t * iv)113 static uint32_t rk_get_new_iv(struct rk_cipher_ctx *ctx, u32 mode, bool is_enc, uint8_t *iv)
114 {
115 	struct scatterlist *sg_dst;
116 	struct rk_alg_ctx *alg_ctx = &ctx->algs_ctx;
117 	uint32_t ivsize = alg_ctx->chunk_size;
118 
119 	if (!iv)
120 		return 0;
121 
122 	sg_dst = alg_ctx->aligned ? alg_ctx->sg_dst : &alg_ctx->sg_tmp;
123 
124 	CRYPTO_TRACE("aligned = %u, count = %u, ivsize = %u, is_enc = %d\n",
125 		     alg_ctx->aligned, alg_ctx->count, ivsize, is_enc);
126 
127 	switch (mode) {
128 	case CIPHER_MODE_CTR:
129 		rk_ctr128_calc(iv, alg_ctx->count);
130 		break;
131 	case CIPHER_MODE_CBC:
132 	case CIPHER_MODE_CFB:
133 		if (is_enc)
134 			sg_pcopy_to_buffer(sg_dst, alg_ctx->map_nents,
135 					   iv, ivsize, alg_ctx->count - ivsize);
136 		else
137 			memcpy(iv, ctx->lastc, ivsize);
138 		break;
139 	case CIPHER_MODE_OFB:
140 		sg_pcopy_to_buffer(sg_dst, alg_ctx->map_nents,
141 				   iv, ivsize, alg_ctx->count - ivsize);
142 		crypto_xor(iv, ctx->lastc, ivsize);
143 		break;
144 	default:
145 		return 0;
146 	}
147 
148 	return ivsize;
149 }
150 
rk_iv_copyback(struct rk_crypto_dev * rk_dev)151 static void rk_iv_copyback(struct rk_crypto_dev *rk_dev)
152 {
153 	uint32_t iv_size;
154 	struct skcipher_request *req = skcipher_request_cast(rk_dev->async_req);
155 	struct rk_cipher_ctx *ctx = rk_cipher_ctx_cast(rk_dev);
156 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
157 	struct rk_crypto_algt *algt = rk_cipher_get_algt(cipher);
158 
159 	iv_size = rk_get_new_iv(ctx, algt->mode, ctx->is_enc, ctx->iv);
160 
161 	if (iv_size && req->iv)
162 		memcpy(req->iv, ctx->iv, iv_size);
163 }
164 
rk_update_iv(struct rk_crypto_dev * rk_dev)165 static void rk_update_iv(struct rk_crypto_dev *rk_dev)
166 {
167 	uint32_t iv_size;
168 	struct rk_cipher_ctx *ctx = rk_cipher_ctx_cast(rk_dev);
169 	struct rk_alg_ctx *algs_ctx = &ctx->algs_ctx;
170 	struct skcipher_request *req = skcipher_request_cast(rk_dev->async_req);
171 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
172 	struct rk_crypto_algt *algt = rk_cipher_get_algt(cipher);
173 
174 	iv_size = rk_get_new_iv(ctx, algt->mode, ctx->is_enc, ctx->iv);
175 
176 	if (iv_size)
177 		algs_ctx->ops.hw_write_iv(rk_dev, ctx->iv, iv_size);
178 }
179 
rk_set_data_start(struct rk_crypto_dev * rk_dev)180 static int rk_set_data_start(struct rk_crypto_dev *rk_dev)
181 {
182 	int err;
183 	struct rk_alg_ctx *alg_ctx = rk_cipher_alg_ctx(rk_dev);
184 
185 	err = rk_dev->load_data(rk_dev, alg_ctx->sg_src, alg_ctx->sg_dst);
186 	if (!err) {
187 		u32 ivsize = alg_ctx->chunk_size;
188 		struct scatterlist *src_sg;
189 		struct rk_cipher_ctx *ctx = rk_cipher_ctx_cast(rk_dev);
190 
191 		memset(ctx->lastc, 0x00, sizeof(ctx->lastc));
192 
193 		src_sg = alg_ctx->aligned ? alg_ctx->sg_src : &alg_ctx->sg_tmp;
194 
195 		ivsize = alg_ctx->count > ivsize ? ivsize : alg_ctx->count;
196 
197 		sg_pcopy_to_buffer(src_sg, alg_ctx->map_nents,
198 				   ctx->lastc, ivsize, alg_ctx->count - ivsize);
199 
200 		alg_ctx->ops.hw_dma_start(rk_dev, true);
201 	}
202 
203 	return err;
204 }
205 
rk_cipher_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int keylen)206 int rk_cipher_setkey(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen)
207 {
208 	struct rk_crypto_algt *algt = rk_cipher_get_algt(cipher);
209 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
210 	uint32_t key_factor;
211 	int ret = -EINVAL;
212 
213 	CRYPTO_MSG("algo = %x, mode = %x, key_len = %d\n",
214 		   algt->algo, algt->mode, keylen);
215 
216 	/* The key length of XTS is twice the normal length */
217 	key_factor = algt->mode == CIPHER_MODE_XTS ? 2 : 1;
218 
219 	switch (algt->algo) {
220 	case CIPHER_ALGO_DES:
221 		ret = verify_skcipher_des_key(cipher, key);
222 		if (ret)
223 			goto exit;
224 		break;
225 	case CIPHER_ALGO_DES3_EDE:
226 		ret = verify_skcipher_des3_key(cipher, key);
227 		if (ret)
228 			goto exit;
229 		break;
230 	case CIPHER_ALGO_AES:
231 		if (keylen != (AES_KEYSIZE_128 * key_factor) &&
232 		    keylen != (AES_KEYSIZE_192 * key_factor) &&
233 		    keylen != (AES_KEYSIZE_256 * key_factor))
234 			goto exit;
235 		break;
236 	case CIPHER_ALGO_SM4:
237 		if (keylen != (SM4_KEY_SIZE * key_factor))
238 			goto exit;
239 		break;
240 	default:
241 		ret = -EINVAL;
242 		goto exit;
243 	}
244 
245 	memcpy(ctx->key, key, keylen);
246 	ctx->keylen = keylen;
247 	ctx->fallback_key_inited = false;
248 
249 	ret = 0;
250 exit:
251 	return ret;
252 }
253 
rk_ablk_rx(struct rk_crypto_dev * rk_dev)254 int rk_ablk_rx(struct rk_crypto_dev *rk_dev)
255 {
256 	int err = 0;
257 	struct rk_cipher_ctx *ctx = rk_cipher_ctx_cast(rk_dev);
258 	struct rk_alg_ctx *alg_ctx = rk_cipher_alg_ctx(rk_dev);
259 
260 	CRYPTO_TRACE("left_bytes = %u\n", alg_ctx->left_bytes);
261 
262 	err = rk_dev->unload_data(rk_dev);
263 	if (err)
264 		goto out_rx;
265 
266 	if (alg_ctx->left_bytes) {
267 		rk_update_iv(rk_dev);
268 		if (alg_ctx->aligned) {
269 			if (sg_is_last(alg_ctx->sg_src)) {
270 				dev_err(rk_dev->dev, "[%s:%d] Lack of data\n",
271 					__func__, __LINE__);
272 				err = -ENOMEM;
273 				goto out_rx;
274 			}
275 			alg_ctx->sg_src = sg_next(alg_ctx->sg_src);
276 			alg_ctx->sg_dst = sg_next(alg_ctx->sg_dst);
277 		}
278 		err = rk_set_data_start(rk_dev);
279 	} else {
280 		if (alg_ctx->is_aead) {
281 			u8 hard_tag[RK_MAX_TAG_SIZE];
282 			u8 user_tag[RK_MAX_TAG_SIZE];
283 			struct aead_request *req =
284 				aead_request_cast(rk_dev->async_req);
285 			struct crypto_aead *tfm = crypto_aead_reqtfm(req);
286 
287 			unsigned int authsize = crypto_aead_authsize(tfm);
288 
289 			CRYPTO_TRACE("cryptlen = %u, assoclen = %u, aead authsize = %u",
290 				     alg_ctx->total, alg_ctx->assoclen, authsize);
291 
292 			err = alg_ctx->ops.hw_get_result(rk_dev, hard_tag, authsize);
293 			if (err)
294 				goto out_rx;
295 
296 			CRYPTO_DUMPHEX("hard_tag", hard_tag, authsize);
297 			if (!ctx->is_enc) {
298 				if (!sg_pcopy_to_buffer(alg_ctx->req_src,
299 							sg_nents(alg_ctx->req_src),
300 							user_tag, authsize,
301 							alg_ctx->total +
302 							alg_ctx->assoclen)) {
303 					err = -EINVAL;
304 					goto out_rx;
305 				}
306 
307 				CRYPTO_DUMPHEX("user_tag", user_tag, authsize);
308 				err = crypto_memneq(user_tag, hard_tag, authsize) ? -EBADMSG : 0;
309 			} else {
310 				if (!sg_pcopy_from_buffer(alg_ctx->req_dst,
311 							  sg_nents(alg_ctx->req_dst),
312 							  hard_tag, authsize,
313 							  alg_ctx->total +
314 							  alg_ctx->assoclen)) {
315 					err = -EINVAL;
316 					goto out_rx;
317 				}
318 			}
319 		} else {
320 			rk_iv_copyback(rk_dev);
321 		}
322 	}
323 out_rx:
324 	return err;
325 }
326 
rk_ablk_start(struct rk_crypto_dev * rk_dev)327 int rk_ablk_start(struct rk_crypto_dev *rk_dev)
328 {
329 	struct skcipher_request *req =
330 		skcipher_request_cast(rk_dev->async_req);
331 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
332 	struct rk_crypto_algt *algt = rk_cipher_get_algt(tfm);
333 	struct rk_alg_ctx *alg_ctx = rk_cipher_alg_ctx(rk_dev);
334 	int err = 0;
335 
336 	alg_ctx->left_bytes = req->cryptlen;
337 	alg_ctx->total      = req->cryptlen;
338 	alg_ctx->sg_src     = req->src;
339 	alg_ctx->req_src    = req->src;
340 	alg_ctx->src_nents  = sg_nents_for_len(req->src, req->cryptlen);
341 	alg_ctx->sg_dst     = req->dst;
342 	alg_ctx->req_dst    = req->dst;
343 	alg_ctx->dst_nents  = sg_nents_for_len(req->dst, req->cryptlen);
344 
345 	CRYPTO_TRACE("total = %u", alg_ctx->total);
346 
347 	alg_ctx->ops.hw_init(rk_dev, algt->algo, algt->mode);
348 	err = rk_set_data_start(rk_dev);
349 
350 	return err;
351 }
352 
rk_skcipher_handle_req(struct rk_crypto_dev * rk_dev,struct skcipher_request * req)353 int rk_skcipher_handle_req(struct rk_crypto_dev *rk_dev, struct skcipher_request *req)
354 {
355 	struct rk_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
356 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
357 	struct rk_crypto_algt *algt = rk_cipher_get_algt(cipher);
358 
359 	if (!IS_ALIGNED(req->cryptlen, ctx->algs_ctx.chunk_size) &&
360 	    !is_no_multi_blocksize(algt->mode))
361 		return -EINVAL;
362 	else
363 		return rk_dev->enqueue(rk_dev, &req->base);
364 }
365 
rk_aead_fallback(struct aead_request * req,struct rk_cipher_ctx * ctx,bool encrypt)366 int rk_aead_fallback(struct aead_request *req, struct rk_cipher_ctx *ctx, bool encrypt)
367 {
368 	int ret;
369 	struct aead_request *subreq = aead_request_ctx(req);
370 
371 	if (!ctx->fallback_aead) {
372 		CRYPTO_TRACE("fallback_tfm is empty");
373 		return -EINVAL;
374 	}
375 
376 	CRYPTO_MSG("use fallback tfm");
377 
378 	if (!ctx->fallback_key_inited) {
379 		ret = crypto_aead_setkey(ctx->fallback_aead, ctx->key, ctx->keylen);
380 		if (ret) {
381 			CRYPTO_MSG("fallback crypto_skcipher_setkey err = %d\n", ret);
382 			goto exit;
383 		}
384 
385 		ctx->fallback_key_inited = true;
386 	}
387 
388 	aead_request_set_tfm(subreq, ctx->fallback_aead);
389 	aead_request_set_callback(subreq, req->base.flags, req->base.complete, req->base.data);
390 	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, req->iv);
391 	aead_request_set_ad(subreq, req->assoclen);
392 
393 	ret = encrypt ? crypto_aead_encrypt(subreq) : crypto_aead_decrypt(subreq);
394 
395 exit:
396 	return ret;
397 }
398 
rk_aead_setkey(struct crypto_aead * cipher,const u8 * key,unsigned int keylen)399 int rk_aead_setkey(struct crypto_aead *cipher, const u8 *key, unsigned int keylen)
400 {
401 	struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
402 	struct rk_crypto_algt *algt = rk_aead_get_algt(cipher);
403 	struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
404 	int ret = -EINVAL;
405 
406 	CRYPTO_MSG("algo = %x, mode = %x, key_len = %d\n", algt->algo, algt->mode, keylen);
407 
408 	switch (algt->algo) {
409 	case CIPHER_ALGO_AES:
410 		if (keylen != AES_KEYSIZE_128 &&
411 		    keylen != AES_KEYSIZE_192 &&
412 		    keylen != AES_KEYSIZE_256)
413 			goto error;
414 
415 		break;
416 	case CIPHER_ALGO_SM4:
417 		if (keylen != SM4_KEY_SIZE)
418 			goto error;
419 
420 		break;
421 	default:
422 		CRYPTO_TRACE();
423 		goto error;
424 	}
425 
426 	memcpy(ctx->key, key, keylen);
427 	ctx->keylen = keylen;
428 	ctx->fallback_key_inited = false;
429 
430 	return 0;
431 
432 error:
433 	return ret;
434 }
435 
rk_aead_start(struct rk_crypto_dev * rk_dev)436 int rk_aead_start(struct rk_crypto_dev *rk_dev)
437 {
438 	struct aead_request *req = aead_request_cast(rk_dev->async_req);
439 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
440 	struct rk_cipher_ctx *ctx = crypto_aead_ctx(tfm);
441 	struct rk_crypto_algt *algt = rk_aead_get_algt(tfm);
442 	struct rk_alg_ctx *alg_ctx = rk_cipher_alg_ctx(rk_dev);
443 	unsigned int total = 0, authsize;
444 	int err = 0;
445 
446 	total = req->cryptlen + req->assoclen;
447 
448 	authsize = ctx->is_enc ? 0 : crypto_aead_authsize(tfm);
449 
450 	alg_ctx->total      = req->cryptlen - authsize;
451 	alg_ctx->assoclen   = req->assoclen;
452 	alg_ctx->sg_src     = req->src;
453 	alg_ctx->req_src    = req->src;
454 	alg_ctx->src_nents  = sg_nents_for_len(req->src, total);
455 	alg_ctx->sg_dst     = req->dst;
456 	alg_ctx->req_dst    = req->dst;
457 	alg_ctx->dst_nents  = sg_nents_for_len(req->dst, total - authsize);
458 	alg_ctx->left_bytes = alg_ctx->total;
459 
460 	CRYPTO_TRACE("src_nents = %zu, dst_nents = %zu", alg_ctx->src_nents, alg_ctx->dst_nents);
461 	CRYPTO_TRACE("is_enc = %d, authsize = %u, cryptlen = %u, total = %u, assoclen = %u",
462 		     ctx->is_enc, authsize, req->cryptlen, alg_ctx->total, alg_ctx->assoclen);
463 
464 	alg_ctx->ops.hw_init(rk_dev, algt->algo, algt->mode);
465 	err = rk_set_data_start(rk_dev);
466 
467 	return err;
468 }
469 
rk_aead_gcm_setauthsize(struct crypto_aead * tfm,unsigned int authsize)470 int rk_aead_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
471 {
472 	return crypto_gcm_check_authsize(authsize);
473 }
474 
rk_aead_handle_req(struct rk_crypto_dev * rk_dev,struct aead_request * req)475 int rk_aead_handle_req(struct rk_crypto_dev *rk_dev, struct aead_request *req)
476 {
477 	return rk_dev->enqueue(rk_dev, &req->base);
478 }
479