xref: /OK3568_Linux_fs/kernel/drivers/crypto/rockchip/rk_crypto_ahash_utils.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Rockchip crypto hash uitls
4  *
5  * Copyright (c) 2022, Rockchip Electronics Co., Ltd
6  *
7  * Author: Lin Jinhan <troy.lin@rock-chips.com>
8  *
9  */
10 
11 #include "rk_crypto_core.h"
12 #include "rk_crypto_ahash_utils.h"
13 
14 static const char * const hash_algo2name[] = {
15 	[HASH_ALGO_MD5]    = "md5",
16 	[HASH_ALGO_SHA1]   = "sha1",
17 	[HASH_ALGO_SHA224] = "sha224",
18 	[HASH_ALGO_SHA256] = "sha256",
19 	[HASH_ALGO_SHA384] = "sha384",
20 	[HASH_ALGO_SHA512] = "sha512",
21 	[HASH_ALGO_SM3]    = "sm3",
22 };
23 
rk_alg_ctx_clear(struct rk_alg_ctx * alg_ctx)24 static void rk_alg_ctx_clear(struct rk_alg_ctx *alg_ctx)
25 {
26 	alg_ctx->total	    = 0;
27 	alg_ctx->left_bytes = 0;
28 	alg_ctx->count      = 0;
29 	alg_ctx->sg_src     = 0;
30 	alg_ctx->req_src    = 0;
31 	alg_ctx->src_nents  = 0;
32 }
33 
rk_ahash_ctx_clear(struct rk_ahash_ctx * ctx)34 static void rk_ahash_ctx_clear(struct rk_ahash_ctx *ctx)
35 {
36 	rk_alg_ctx_clear(&ctx->algs_ctx);
37 
38 	memset(ctx->hash_tmp, 0x00, RK_DMA_ALIGNMENT);
39 	memset(ctx->lastc, 0x00, sizeof(ctx->lastc));
40 
41 	ctx->hash_tmp_len = 0;
42 	ctx->calc_cnt     = 0;
43 	ctx->lastc_len    = 0;
44 }
45 
rk_ahash_ctx_cast(struct rk_crypto_dev * rk_dev)46 struct rk_ahash_ctx *rk_ahash_ctx_cast(struct rk_crypto_dev *rk_dev)
47 {
48 	struct ahash_request *req = ahash_request_cast(rk_dev->async_req);
49 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
50 
51 	return crypto_ahash_ctx(tfm);
52 }
53 
rk_ahash_alg_ctx(struct rk_crypto_dev * rk_dev)54 struct rk_alg_ctx *rk_ahash_alg_ctx(struct rk_crypto_dev *rk_dev)
55 {
56 	return &(rk_ahash_ctx_cast(rk_dev))->algs_ctx;
57 }
58 
rk_ahash_get_algt(struct crypto_ahash * tfm)59 struct rk_crypto_algt *rk_ahash_get_algt(struct crypto_ahash *tfm)
60 {
61 	struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
62 
63 	return container_of(alg, struct rk_crypto_algt, alg.hash);
64 }
65 
rk_ahash_set_data_start(struct rk_crypto_dev * rk_dev,uint32_t flag)66 static int rk_ahash_set_data_start(struct rk_crypto_dev *rk_dev, uint32_t flag)
67 {
68 	int err;
69 	struct rk_alg_ctx *alg_ctx = rk_ahash_alg_ctx(rk_dev);
70 
71 	CRYPTO_TRACE();
72 
73 	err = rk_dev->load_data(rk_dev, alg_ctx->sg_src, alg_ctx->sg_dst);
74 	if (!err)
75 		err = alg_ctx->ops.hw_dma_start(rk_dev, flag);
76 
77 	return err;
78 }
79 
rk_calc_lastc_new_len(u32 nbytes,u32 old_len)80 static u32 rk_calc_lastc_new_len(u32 nbytes, u32 old_len)
81 {
82 	u32 total_len = nbytes + old_len;
83 
84 	if (total_len <= RK_DMA_ALIGNMENT)
85 		return nbytes;
86 
87 	if (total_len % RK_DMA_ALIGNMENT)
88 		return total_len % RK_DMA_ALIGNMENT;
89 
90 	return RK_DMA_ALIGNMENT;
91 }
92 
rk_ahash_fallback_digest(const char * alg_name,bool is_hmac,const u8 * key,u32 key_len,const u8 * msg,u32 msg_len,u8 * digest)93 static int rk_ahash_fallback_digest(const char *alg_name, bool is_hmac,
94 				    const u8 *key, u32 key_len,
95 				    const u8 *msg, u32 msg_len,
96 				    u8 *digest)
97 {
98 	struct crypto_ahash *ahash_tfm;
99 	struct ahash_request *req;
100 	struct crypto_wait wait;
101 	struct scatterlist sg;
102 	int ret;
103 
104 	CRYPTO_TRACE("%s, is_hmac = %d, key_len = %u, msg_len = %u",
105 		     alg_name, is_hmac, key_len, msg_len);
106 
107 	ahash_tfm = crypto_alloc_ahash(alg_name, 0, CRYPTO_ALG_NEED_FALLBACK);
108 	if (IS_ERR(ahash_tfm))
109 		return PTR_ERR(ahash_tfm);
110 
111 	req = ahash_request_alloc(ahash_tfm, GFP_KERNEL);
112 	if (!req) {
113 		crypto_free_ahash(ahash_tfm);
114 		return -ENOMEM;
115 	}
116 
117 	init_completion(&wait.completion);
118 
119 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
120 				   crypto_req_done, &wait);
121 
122 	crypto_ahash_clear_flags(ahash_tfm, ~0);
123 
124 	sg_init_one(&sg, msg, msg_len);
125 	ahash_request_set_crypt(req, &sg, digest, msg_len);
126 
127 	if (is_hmac)
128 		crypto_ahash_setkey(ahash_tfm, key, key_len);
129 
130 	ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
131 	if (ret) {
132 		CRYPTO_MSG("digest failed, ret = %d", ret);
133 		goto exit;
134 	}
135 
136 exit:
137 	ahash_request_free(req);
138 	crypto_free_ahash(ahash_tfm);
139 
140 	return ret;
141 }
142 
rk_ahash_get_zero_result(struct ahash_request * req)143 static int rk_ahash_get_zero_result(struct ahash_request *req)
144 {
145 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
146 	struct rk_crypto_algt *algt = rk_ahash_get_algt(tfm);
147 	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
148 
149 	return rk_ahash_fallback_digest(crypto_ahash_alg_name(tfm),
150 					algt->type == ALG_TYPE_HMAC,
151 					ctx->authkey, ctx->authkey_len,
152 					NULL, 0, req->result);
153 }
154 
rk_ahash_hmac_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)155 int rk_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen)
156 {
157 	unsigned int blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
158 	struct rk_crypto_algt *algt = rk_ahash_get_algt(tfm);
159 	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
160 	const char *alg_name;
161 	int ret = 0;
162 
163 	CRYPTO_MSG();
164 
165 	if (algt->algo >= ARRAY_SIZE(hash_algo2name)) {
166 		CRYPTO_MSG("hash algo %d invalid\n", algt->algo);
167 		return -EINVAL;
168 	}
169 
170 	memset(ctx->authkey, 0, sizeof(ctx->authkey));
171 
172 	if (keylen <= blocksize) {
173 		memcpy(ctx->authkey, key, keylen);
174 		ctx->authkey_len = keylen;
175 		goto exit;
176 	}
177 
178 	alg_name = hash_algo2name[algt->algo];
179 
180 	CRYPTO_TRACE("calc key digest %s", alg_name);
181 
182 	ret = rk_ahash_fallback_digest(alg_name, false, NULL, 0, key, keylen,
183 				       ctx->authkey);
184 	if (ret) {
185 		CRYPTO_MSG("rk_ahash_fallback_digest error ret = %d\n", ret);
186 		goto exit;
187 	}
188 
189 	ctx->authkey_len = crypto_ahash_digestsize(tfm);
190 exit:
191 	return ret;
192 }
193 
rk_ahash_init(struct ahash_request * req)194 int rk_ahash_init(struct ahash_request *req)
195 {
196 	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
197 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
198 	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
199 
200 	CRYPTO_TRACE();
201 
202 	memset(rctx, 0x00, sizeof(*rctx));
203 	rk_ahash_ctx_clear(ctx);
204 
205 	return 0;
206 }
207 
rk_ahash_update(struct ahash_request * req)208 int rk_ahash_update(struct ahash_request *req)
209 {
210 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
211 	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
212 	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
213 	struct rk_crypto_dev *rk_dev = ctx->rk_dev;
214 
215 	CRYPTO_TRACE("nbytes = %u", req->nbytes);
216 
217 	memset(rctx, 0x00, sizeof(*rctx));
218 
219 	rctx->flag = RK_FLAG_UPDATE;
220 
221 	return rk_dev->enqueue(rk_dev, &req->base);
222 }
223 
rk_ahash_final(struct ahash_request * req)224 int rk_ahash_final(struct ahash_request *req)
225 {
226 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
227 	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
228 	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
229 	struct rk_crypto_dev *rk_dev = ctx->rk_dev;
230 
231 	CRYPTO_TRACE();
232 
233 	memset(rctx, 0x00, sizeof(*rctx));
234 
235 	rctx->flag = RK_FLAG_FINAL;
236 
237 	/* use fallback hash */
238 	if (ctx->calc_cnt == 0 &&
239 	    ctx->hash_tmp_len == 0 &&
240 	    ctx->lastc_len == 0) {
241 		CRYPTO_TRACE("use fallback hash");
242 		return rk_ahash_get_zero_result(req);
243 	}
244 
245 	return rk_dev->enqueue(rk_dev, &req->base);
246 }
247 
rk_ahash_finup(struct ahash_request * req)248 int rk_ahash_finup(struct ahash_request *req)
249 {
250 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
251 	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
252 	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
253 	struct rk_crypto_dev *rk_dev = ctx->rk_dev;
254 
255 	CRYPTO_TRACE("nbytes = %u", req->nbytes);
256 
257 	memset(rctx, 0x00, sizeof(*rctx));
258 
259 	rctx->flag = RK_FLAG_UPDATE | RK_FLAG_FINAL;
260 
261 	/* use fallback hash */
262 	if (req->nbytes == 0 &&
263 	    ctx->calc_cnt == 0 &&
264 	    ctx->hash_tmp_len == 0 &&
265 	    ctx->lastc_len == 0) {
266 		CRYPTO_TRACE("use fallback hash");
267 		return rk_ahash_get_zero_result(req);
268 	}
269 
270 	return rk_dev->enqueue(rk_dev, &req->base);
271 }
272 
rk_ahash_digest(struct ahash_request * req)273 int rk_ahash_digest(struct ahash_request *req)
274 {
275 	CRYPTO_TRACE("calc data %u bytes.", req->nbytes);
276 
277 	return rk_ahash_init(req) ?: rk_ahash_finup(req);
278 }
279 
rk_ahash_start(struct rk_crypto_dev * rk_dev)280 int rk_ahash_start(struct rk_crypto_dev *rk_dev)
281 {
282 	struct ahash_request *req = ahash_request_cast(rk_dev->async_req);
283 	struct rk_alg_ctx *alg_ctx = rk_ahash_alg_ctx(rk_dev);
284 	struct rk_ahash_ctx *ctx = rk_ahash_ctx_cast(rk_dev);
285 	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
286 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
287 	struct rk_crypto_algt *algt = rk_ahash_get_algt(tfm);
288 	struct scatterlist *src_sg;
289 	unsigned int nbytes;
290 	int ret = 0;
291 
292 	CRYPTO_TRACE("origin: old_len = %u, new_len = %u, nbytes = %u, flag = %d",
293 		     ctx->hash_tmp_len, ctx->lastc_len, req->nbytes, rctx->flag);
294 
295 	/* update 0Byte do nothing */
296 	if (req->nbytes == 0 && !(rctx->flag & RK_FLAG_FINAL))
297 		goto no_calc;
298 
299 	if (ctx->lastc_len) {
300 		/* move lastc saved last time to the head of this calculation */
301 		memcpy(ctx->hash_tmp + ctx->hash_tmp_len, ctx->lastc, ctx->lastc_len);
302 		ctx->hash_tmp_len = ctx->hash_tmp_len + ctx->lastc_len;
303 		ctx->lastc_len = 0;
304 	}
305 
306 	CRYPTO_TRACE("hash_tmp_len = %u", ctx->hash_tmp_len);
307 
308 	/* final request no need to save lastc_new */
309 	if ((rctx->flag & RK_FLAG_UPDATE) && (rctx->flag & RK_FLAG_FINAL)) {
310 		nbytes = req->nbytes + ctx->hash_tmp_len;
311 
312 		CRYPTO_TRACE("finup %u bytes", nbytes);
313 	} else if (rctx->flag & RK_FLAG_UPDATE) {
314 		ctx->lastc_len = rk_calc_lastc_new_len(req->nbytes, ctx->hash_tmp_len);
315 
316 		CRYPTO_TRACE("nents = %u, ctx->lastc_len = %u, offset = %u",
317 			sg_nents_for_len(req->src, req->nbytes), ctx->lastc_len,
318 			req->nbytes - ctx->lastc_len);
319 
320 		if (!sg_pcopy_to_buffer(req->src, sg_nents_for_len(req->src, req->nbytes),
321 			  ctx->lastc, ctx->lastc_len, req->nbytes - ctx->lastc_len)) {
322 			ret = -EINVAL;
323 			goto exit;
324 		}
325 
326 		nbytes = ctx->hash_tmp_len + req->nbytes - ctx->lastc_len;
327 
328 		/* not enough data */
329 		if (nbytes < RK_DMA_ALIGNMENT) {
330 			CRYPTO_TRACE("nbytes = %u, not enough data", nbytes);
331 			memcpy(ctx->hash_tmp + ctx->hash_tmp_len,
332 			       ctx->lastc, ctx->lastc_len);
333 			ctx->hash_tmp_len = ctx->hash_tmp_len + ctx->lastc_len;
334 			ctx->lastc_len = 0;
335 			goto no_calc;
336 		}
337 
338 		CRYPTO_TRACE("update nbytes = %u", nbytes);
339 	} else {
340 		/* final just calc lastc_old */
341 		nbytes = ctx->hash_tmp_len;
342 
343 		CRYPTO_TRACE("final nbytes = %u", nbytes);
344 	}
345 
346 	if (ctx->hash_tmp_len) {
347 		/* Concatenate old data to the header */
348 		sg_init_table(ctx->hash_sg, ARRAY_SIZE(ctx->hash_sg));
349 		sg_set_buf(ctx->hash_sg, ctx->hash_tmp, ctx->hash_tmp_len);
350 
351 		if (rk_crypto_check_dmafd(req->src, sg_nents_for_len(req->src, req->nbytes))) {
352 			CRYPTO_TRACE("is hash dmafd");
353 			if (!dma_map_sg(rk_dev->dev, &ctx->hash_sg[0], 1, DMA_TO_DEVICE)) {
354 				dev_err(rk_dev->dev, "[%s:%d] dma_map_sg(hash_sg)  error\n",
355 					__func__, __LINE__);
356 				ret = -ENOMEM;
357 				goto exit;
358 			}
359 			ctx->hash_tmp_mapped = true;
360 		}
361 
362 		sg_chain(ctx->hash_sg, ARRAY_SIZE(ctx->hash_sg), req->src);
363 
364 		src_sg = &ctx->hash_sg[0];
365 		ctx->hash_tmp_len = 0;
366 	} else {
367 		src_sg = req->src;
368 	}
369 
370 	alg_ctx->total      = nbytes;
371 	alg_ctx->left_bytes = nbytes;
372 	alg_ctx->sg_src     = src_sg;
373 	alg_ctx->req_src    = src_sg;
374 	alg_ctx->src_nents  = sg_nents_for_len(src_sg, nbytes);
375 
376 	CRYPTO_TRACE("adjust: old_len = %u, new_len = %u, nbytes = %u",
377 		     ctx->hash_tmp_len, ctx->lastc_len, nbytes);
378 
379 	if (nbytes) {
380 		if (ctx->calc_cnt == 0)
381 			alg_ctx->ops.hw_init(rk_dev, algt->algo, algt->type);
382 
383 		/* flush all 64byte key buffer for hmac */
384 		alg_ctx->ops.hw_write_key(ctx->rk_dev, ctx->authkey, sizeof(ctx->authkey));
385 		ret = rk_ahash_set_data_start(rk_dev, rctx->flag);
386 	}
387 exit:
388 	return ret;
389 no_calc:
390 	CRYPTO_TRACE("no calc");
391 	rk_alg_ctx_clear(alg_ctx);
392 
393 	return 0;
394 }
395 
rk_ahash_crypto_rx(struct rk_crypto_dev * rk_dev)396 int rk_ahash_crypto_rx(struct rk_crypto_dev *rk_dev)
397 {
398 	int err = 0;
399 	struct ahash_request *req = ahash_request_cast(rk_dev->async_req);
400 	struct rk_alg_ctx *alg_ctx = rk_ahash_alg_ctx(rk_dev);
401 	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
402 	struct rk_ahash_ctx *ctx = rk_ahash_ctx_cast(rk_dev);
403 
404 	CRYPTO_TRACE("left bytes = %u, flag = %d", alg_ctx->left_bytes, rctx->flag);
405 
406 	err = rk_dev->unload_data(rk_dev);
407 	if (err)
408 		goto out_rx;
409 
410 	ctx->calc_cnt += alg_ctx->count;
411 
412 	if (alg_ctx->left_bytes) {
413 		if (alg_ctx->aligned) {
414 			if (sg_is_last(alg_ctx->sg_src)) {
415 				dev_warn(rk_dev->dev, "[%s:%d], Lack of data\n",
416 					 __func__, __LINE__);
417 				err = -ENOMEM;
418 				goto out_rx;
419 			}
420 			alg_ctx->sg_src = sg_next(alg_ctx->sg_src);
421 		}
422 		err = rk_ahash_set_data_start(rk_dev, rctx->flag);
423 	} else {
424 		/*
425 		 * it will take some time to process date after last dma
426 		 * transmission.
427 		 */
428 		struct crypto_ahash *tfm;
429 
430 		if (ctx->hash_tmp_mapped)
431 			dma_unmap_sg(rk_dev->dev, &ctx->hash_sg[0], 1, DMA_TO_DEVICE);
432 
433 		/* only final will get result */
434 		if (!(rctx->flag & RK_FLAG_FINAL))
435 			goto out_rx;
436 
437 		if (!req->result) {
438 			err = -EINVAL;
439 			goto out_rx;
440 		}
441 
442 		tfm = crypto_ahash_reqtfm(req);
443 
444 		err = alg_ctx->ops.hw_get_result(rk_dev, req->result,
445 						 crypto_ahash_digestsize(tfm));
446 	}
447 
448 out_rx:
449 	return err;
450 }
451