xref: /OK3568_Linux_fs/kernel/drivers/crypto/marvell/cesa/hash.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
6*4882a593Smuzhiyun  * Author: Arnaud Ebalard <arno@natisbad.org>
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * This work is based on an initial version written by
9*4882a593Smuzhiyun  * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <crypto/hmac.h>
13*4882a593Smuzhiyun #include <crypto/md5.h>
14*4882a593Smuzhiyun #include <crypto/sha.h>
15*4882a593Smuzhiyun #include <linux/device.h>
16*4882a593Smuzhiyun #include <linux/dma-mapping.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include "cesa.h"
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun struct mv_cesa_ahash_dma_iter {
21*4882a593Smuzhiyun 	struct mv_cesa_dma_iter base;
22*4882a593Smuzhiyun 	struct mv_cesa_sg_dma_iter src;
23*4882a593Smuzhiyun };
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun static inline void
mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter * iter,struct ahash_request * req)26*4882a593Smuzhiyun mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter,
27*4882a593Smuzhiyun 			    struct ahash_request *req)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
30*4882a593Smuzhiyun 	unsigned int len = req->nbytes + creq->cache_ptr;
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun 	if (!creq->last_req)
33*4882a593Smuzhiyun 		len &= ~CESA_HASH_BLOCK_SIZE_MSK;
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	mv_cesa_req_dma_iter_init(&iter->base, len);
36*4882a593Smuzhiyun 	mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
37*4882a593Smuzhiyun 	iter->src.op_offset = creq->cache_ptr;
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun static inline bool
mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter * iter)41*4882a593Smuzhiyun mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun 	iter->src.op_offset = 0;
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	return mv_cesa_req_dma_iter_next_op(&iter->base);
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun static inline int
mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req * req,gfp_t flags)49*4882a593Smuzhiyun mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req *req, gfp_t flags)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun 	req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags,
52*4882a593Smuzhiyun 				    &req->cache_dma);
53*4882a593Smuzhiyun 	if (!req->cache)
54*4882a593Smuzhiyun 		return -ENOMEM;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	return 0;
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun static inline void
mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req * req)60*4882a593Smuzhiyun mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun 	if (!req->cache)
63*4882a593Smuzhiyun 		return;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	dma_pool_free(cesa_dev->dma->cache_pool, req->cache,
66*4882a593Smuzhiyun 		      req->cache_dma);
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun 
mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req * req,gfp_t flags)69*4882a593Smuzhiyun static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req,
70*4882a593Smuzhiyun 					   gfp_t flags)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun 	if (req->padding)
73*4882a593Smuzhiyun 		return 0;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags,
76*4882a593Smuzhiyun 				      &req->padding_dma);
77*4882a593Smuzhiyun 	if (!req->padding)
78*4882a593Smuzhiyun 		return -ENOMEM;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	return 0;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun 
mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req * req)83*4882a593Smuzhiyun static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	if (!req->padding)
86*4882a593Smuzhiyun 		return;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	dma_pool_free(cesa_dev->dma->padding_pool, req->padding,
89*4882a593Smuzhiyun 		      req->padding_dma);
90*4882a593Smuzhiyun 	req->padding = NULL;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun 
mv_cesa_ahash_dma_last_cleanup(struct ahash_request * req)93*4882a593Smuzhiyun static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	mv_cesa_ahash_dma_free_padding(&creq->req.dma);
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun 
mv_cesa_ahash_dma_cleanup(struct ahash_request * req)100*4882a593Smuzhiyun static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
105*4882a593Smuzhiyun 	mv_cesa_ahash_dma_free_cache(&creq->req.dma);
106*4882a593Smuzhiyun 	mv_cesa_dma_cleanup(&creq->base);
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun 
mv_cesa_ahash_cleanup(struct ahash_request * req)109*4882a593Smuzhiyun static inline void mv_cesa_ahash_cleanup(struct ahash_request *req)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
114*4882a593Smuzhiyun 		mv_cesa_ahash_dma_cleanup(req);
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun 
mv_cesa_ahash_last_cleanup(struct ahash_request * req)117*4882a593Smuzhiyun static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
122*4882a593Smuzhiyun 		mv_cesa_ahash_dma_last_cleanup(req);
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun 
mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req * creq)125*4882a593Smuzhiyun static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun 	unsigned int index, padlen;
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
130*4882a593Smuzhiyun 	padlen = (index < 56) ? (56 - index) : (64 + 56 - index);
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	return padlen;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun 
mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req * creq,u8 * buf)135*4882a593Smuzhiyun static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun 	unsigned int padlen;
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	buf[0] = 0x80;
140*4882a593Smuzhiyun 	/* Pad out to 56 mod 64 */
141*4882a593Smuzhiyun 	padlen = mv_cesa_ahash_pad_len(creq);
142*4882a593Smuzhiyun 	memset(buf + 1, 0, padlen - 1);
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	if (creq->algo_le) {
145*4882a593Smuzhiyun 		__le64 bits = cpu_to_le64(creq->len << 3);
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 		memcpy(buf + padlen, &bits, sizeof(bits));
148*4882a593Smuzhiyun 	} else {
149*4882a593Smuzhiyun 		__be64 bits = cpu_to_be64(creq->len << 3);
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 		memcpy(buf + padlen, &bits, sizeof(bits));
152*4882a593Smuzhiyun 	}
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	return padlen + 8;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun 
mv_cesa_ahash_std_step(struct ahash_request * req)157*4882a593Smuzhiyun static void mv_cesa_ahash_std_step(struct ahash_request *req)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
160*4882a593Smuzhiyun 	struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
161*4882a593Smuzhiyun 	struct mv_cesa_engine *engine = creq->base.engine;
162*4882a593Smuzhiyun 	struct mv_cesa_op_ctx *op;
163*4882a593Smuzhiyun 	unsigned int new_cache_ptr = 0;
164*4882a593Smuzhiyun 	u32 frag_mode;
165*4882a593Smuzhiyun 	size_t  len;
166*4882a593Smuzhiyun 	unsigned int digsize;
167*4882a593Smuzhiyun 	int i;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	mv_cesa_adjust_op(engine, &creq->op_tmpl);
170*4882a593Smuzhiyun 	memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	if (!sreq->offset) {
173*4882a593Smuzhiyun 		digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
174*4882a593Smuzhiyun 		for (i = 0; i < digsize / 4; i++)
175*4882a593Smuzhiyun 			writel_relaxed(creq->state[i],
176*4882a593Smuzhiyun 				       engine->regs + CESA_IVDIG(i));
177*4882a593Smuzhiyun 	}
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	if (creq->cache_ptr)
180*4882a593Smuzhiyun 		memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
181*4882a593Smuzhiyun 			    creq->cache, creq->cache_ptr);
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
184*4882a593Smuzhiyun 		    CESA_SA_SRAM_PAYLOAD_SIZE);
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	if (!creq->last_req) {
187*4882a593Smuzhiyun 		new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK;
188*4882a593Smuzhiyun 		len &= ~CESA_HASH_BLOCK_SIZE_MSK;
189*4882a593Smuzhiyun 	}
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	if (len - creq->cache_ptr)
192*4882a593Smuzhiyun 		sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents,
193*4882a593Smuzhiyun 						   engine->sram +
194*4882a593Smuzhiyun 						   CESA_SA_DATA_SRAM_OFFSET +
195*4882a593Smuzhiyun 						   creq->cache_ptr,
196*4882a593Smuzhiyun 						   len - creq->cache_ptr,
197*4882a593Smuzhiyun 						   sreq->offset);
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	op = &creq->op_tmpl;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	if (creq->last_req && sreq->offset == req->nbytes &&
204*4882a593Smuzhiyun 	    creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
205*4882a593Smuzhiyun 		if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
206*4882a593Smuzhiyun 			frag_mode = CESA_SA_DESC_CFG_NOT_FRAG;
207*4882a593Smuzhiyun 		else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG)
208*4882a593Smuzhiyun 			frag_mode = CESA_SA_DESC_CFG_LAST_FRAG;
209*4882a593Smuzhiyun 	}
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG ||
212*4882a593Smuzhiyun 	    frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) {
213*4882a593Smuzhiyun 		if (len &&
214*4882a593Smuzhiyun 		    creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
215*4882a593Smuzhiyun 			mv_cesa_set_mac_op_total_len(op, creq->len);
216*4882a593Smuzhiyun 		} else {
217*4882a593Smuzhiyun 			int trailerlen = mv_cesa_ahash_pad_len(creq) + 8;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 			if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) {
220*4882a593Smuzhiyun 				len &= CESA_HASH_BLOCK_SIZE_MSK;
221*4882a593Smuzhiyun 				new_cache_ptr = 64 - trailerlen;
222*4882a593Smuzhiyun 				memcpy_fromio(creq->cache,
223*4882a593Smuzhiyun 					      engine->sram +
224*4882a593Smuzhiyun 					      CESA_SA_DATA_SRAM_OFFSET + len,
225*4882a593Smuzhiyun 					      new_cache_ptr);
226*4882a593Smuzhiyun 			} else {
227*4882a593Smuzhiyun 				i = mv_cesa_ahash_pad_req(creq, creq->cache);
228*4882a593Smuzhiyun 				len += i;
229*4882a593Smuzhiyun 				memcpy_toio(engine->sram + len +
230*4882a593Smuzhiyun 					    CESA_SA_DATA_SRAM_OFFSET,
231*4882a593Smuzhiyun 					    creq->cache, i);
232*4882a593Smuzhiyun 			}
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 			if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG)
235*4882a593Smuzhiyun 				frag_mode = CESA_SA_DESC_CFG_MID_FRAG;
236*4882a593Smuzhiyun 			else
237*4882a593Smuzhiyun 				frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG;
238*4882a593Smuzhiyun 		}
239*4882a593Smuzhiyun 	}
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	mv_cesa_set_mac_op_frag_len(op, len);
242*4882a593Smuzhiyun 	mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK);
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	/* FIXME: only update enc_len field */
245*4882a593Smuzhiyun 	memcpy_toio(engine->sram, op, sizeof(*op));
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
248*4882a593Smuzhiyun 		mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG,
249*4882a593Smuzhiyun 				      CESA_SA_DESC_CFG_FRAG_MSK);
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	creq->cache_ptr = new_cache_ptr;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
254*4882a593Smuzhiyun 	writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
255*4882a593Smuzhiyun 	WARN_ON(readl(engine->regs + CESA_SA_CMD) &
256*4882a593Smuzhiyun 		CESA_SA_CMD_EN_CESA_SA_ACCL0);
257*4882a593Smuzhiyun 	writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun 
mv_cesa_ahash_std_process(struct ahash_request * req,u32 status)260*4882a593Smuzhiyun static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
263*4882a593Smuzhiyun 	struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	if (sreq->offset < (req->nbytes - creq->cache_ptr))
266*4882a593Smuzhiyun 		return -EINPROGRESS;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	return 0;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun 
mv_cesa_ahash_dma_prepare(struct ahash_request * req)271*4882a593Smuzhiyun static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
274*4882a593Smuzhiyun 	struct mv_cesa_req *basereq = &creq->base;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	mv_cesa_dma_prepare(basereq, basereq->engine);
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun 
mv_cesa_ahash_std_prepare(struct ahash_request * req)279*4882a593Smuzhiyun static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
282*4882a593Smuzhiyun 	struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	sreq->offset = 0;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun 
mv_cesa_ahash_dma_step(struct ahash_request * req)287*4882a593Smuzhiyun static void mv_cesa_ahash_dma_step(struct ahash_request *req)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
290*4882a593Smuzhiyun 	struct mv_cesa_req *base = &creq->base;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	/* We must explicitly set the digest state. */
293*4882a593Smuzhiyun 	if (base->chain.first->flags & CESA_TDMA_SET_STATE) {
294*4882a593Smuzhiyun 		struct mv_cesa_engine *engine = base->engine;
295*4882a593Smuzhiyun 		int i;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 		/* Set the hash state in the IVDIG regs. */
298*4882a593Smuzhiyun 		for (i = 0; i < ARRAY_SIZE(creq->state); i++)
299*4882a593Smuzhiyun 			writel_relaxed(creq->state[i], engine->regs +
300*4882a593Smuzhiyun 				       CESA_IVDIG(i));
301*4882a593Smuzhiyun 	}
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	mv_cesa_dma_step(base);
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun 
mv_cesa_ahash_step(struct crypto_async_request * req)306*4882a593Smuzhiyun static void mv_cesa_ahash_step(struct crypto_async_request *req)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun 	struct ahash_request *ahashreq = ahash_request_cast(req);
309*4882a593Smuzhiyun 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
312*4882a593Smuzhiyun 		mv_cesa_ahash_dma_step(ahashreq);
313*4882a593Smuzhiyun 	else
314*4882a593Smuzhiyun 		mv_cesa_ahash_std_step(ahashreq);
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun 
mv_cesa_ahash_process(struct crypto_async_request * req,u32 status)317*4882a593Smuzhiyun static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun 	struct ahash_request *ahashreq = ahash_request_cast(req);
320*4882a593Smuzhiyun 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
323*4882a593Smuzhiyun 		return mv_cesa_dma_process(&creq->base, status);
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	return mv_cesa_ahash_std_process(ahashreq, status);
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun 
mv_cesa_ahash_complete(struct crypto_async_request * req)328*4882a593Smuzhiyun static void mv_cesa_ahash_complete(struct crypto_async_request *req)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun 	struct ahash_request *ahashreq = ahash_request_cast(req);
331*4882a593Smuzhiyun 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
332*4882a593Smuzhiyun 	struct mv_cesa_engine *engine = creq->base.engine;
333*4882a593Smuzhiyun 	unsigned int digsize;
334*4882a593Smuzhiyun 	int i;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
339*4882a593Smuzhiyun 	    (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) ==
340*4882a593Smuzhiyun 	     CESA_TDMA_RESULT) {
341*4882a593Smuzhiyun 		__le32 *data = NULL;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 		/*
344*4882a593Smuzhiyun 		 * Result is already in the correct endianness when the SA is
345*4882a593Smuzhiyun 		 * used
346*4882a593Smuzhiyun 		 */
347*4882a593Smuzhiyun 		data = creq->base.chain.last->op->ctx.hash.hash;
348*4882a593Smuzhiyun 		for (i = 0; i < digsize / 4; i++)
349*4882a593Smuzhiyun 			creq->state[i] = le32_to_cpu(data[i]);
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 		memcpy(ahashreq->result, data, digsize);
352*4882a593Smuzhiyun 	} else {
353*4882a593Smuzhiyun 		for (i = 0; i < digsize / 4; i++)
354*4882a593Smuzhiyun 			creq->state[i] = readl_relaxed(engine->regs +
355*4882a593Smuzhiyun 						       CESA_IVDIG(i));
356*4882a593Smuzhiyun 		if (creq->last_req) {
357*4882a593Smuzhiyun 			/*
358*4882a593Smuzhiyun 			 * Hardware's MD5 digest is in little endian format, but
359*4882a593Smuzhiyun 			 * SHA in big endian format
360*4882a593Smuzhiyun 			 */
361*4882a593Smuzhiyun 			if (creq->algo_le) {
362*4882a593Smuzhiyun 				__le32 *result = (void *)ahashreq->result;
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 				for (i = 0; i < digsize / 4; i++)
365*4882a593Smuzhiyun 					result[i] = cpu_to_le32(creq->state[i]);
366*4882a593Smuzhiyun 			} else {
367*4882a593Smuzhiyun 				__be32 *result = (void *)ahashreq->result;
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 				for (i = 0; i < digsize / 4; i++)
370*4882a593Smuzhiyun 					result[i] = cpu_to_be32(creq->state[i]);
371*4882a593Smuzhiyun 			}
372*4882a593Smuzhiyun 		}
373*4882a593Smuzhiyun 	}
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	atomic_sub(ahashreq->nbytes, &engine->load);
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun 
mv_cesa_ahash_prepare(struct crypto_async_request * req,struct mv_cesa_engine * engine)378*4882a593Smuzhiyun static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
379*4882a593Smuzhiyun 				  struct mv_cesa_engine *engine)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun 	struct ahash_request *ahashreq = ahash_request_cast(req);
382*4882a593Smuzhiyun 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	creq->base.engine = engine;
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
387*4882a593Smuzhiyun 		mv_cesa_ahash_dma_prepare(ahashreq);
388*4882a593Smuzhiyun 	else
389*4882a593Smuzhiyun 		mv_cesa_ahash_std_prepare(ahashreq);
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun 
mv_cesa_ahash_req_cleanup(struct crypto_async_request * req)392*4882a593Smuzhiyun static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun 	struct ahash_request *ahashreq = ahash_request_cast(req);
395*4882a593Smuzhiyun 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	if (creq->last_req)
398*4882a593Smuzhiyun 		mv_cesa_ahash_last_cleanup(ahashreq);
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	mv_cesa_ahash_cleanup(ahashreq);
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	if (creq->cache_ptr)
403*4882a593Smuzhiyun 		sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
404*4882a593Smuzhiyun 				   creq->cache,
405*4882a593Smuzhiyun 				   creq->cache_ptr,
406*4882a593Smuzhiyun 				   ahashreq->nbytes - creq->cache_ptr);
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = {
410*4882a593Smuzhiyun 	.step = mv_cesa_ahash_step,
411*4882a593Smuzhiyun 	.process = mv_cesa_ahash_process,
412*4882a593Smuzhiyun 	.cleanup = mv_cesa_ahash_req_cleanup,
413*4882a593Smuzhiyun 	.complete = mv_cesa_ahash_complete,
414*4882a593Smuzhiyun };
415*4882a593Smuzhiyun 
mv_cesa_ahash_init(struct ahash_request * req,struct mv_cesa_op_ctx * tmpl,bool algo_le)416*4882a593Smuzhiyun static void mv_cesa_ahash_init(struct ahash_request *req,
417*4882a593Smuzhiyun 			      struct mv_cesa_op_ctx *tmpl, bool algo_le)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	memset(creq, 0, sizeof(*creq));
422*4882a593Smuzhiyun 	mv_cesa_update_op_cfg(tmpl,
423*4882a593Smuzhiyun 			      CESA_SA_DESC_CFG_OP_MAC_ONLY |
424*4882a593Smuzhiyun 			      CESA_SA_DESC_CFG_FIRST_FRAG,
425*4882a593Smuzhiyun 			      CESA_SA_DESC_CFG_OP_MSK |
426*4882a593Smuzhiyun 			      CESA_SA_DESC_CFG_FRAG_MSK);
427*4882a593Smuzhiyun 	mv_cesa_set_mac_op_total_len(tmpl, 0);
428*4882a593Smuzhiyun 	mv_cesa_set_mac_op_frag_len(tmpl, 0);
429*4882a593Smuzhiyun 	creq->op_tmpl = *tmpl;
430*4882a593Smuzhiyun 	creq->len = 0;
431*4882a593Smuzhiyun 	creq->algo_le = algo_le;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun 
mv_cesa_ahash_cra_init(struct crypto_tfm * tfm)434*4882a593Smuzhiyun static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun 	struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm);
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	ctx->base.ops = &mv_cesa_ahash_req_ops;
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
441*4882a593Smuzhiyun 				 sizeof(struct mv_cesa_ahash_req));
442*4882a593Smuzhiyun 	return 0;
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun 
mv_cesa_ahash_cache_req(struct ahash_request * req)445*4882a593Smuzhiyun static bool mv_cesa_ahash_cache_req(struct ahash_request *req)
446*4882a593Smuzhiyun {
447*4882a593Smuzhiyun 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
448*4882a593Smuzhiyun 	bool cached = false;
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE &&
451*4882a593Smuzhiyun 	    !creq->last_req) {
452*4882a593Smuzhiyun 		cached = true;
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 		if (!req->nbytes)
455*4882a593Smuzhiyun 			return cached;
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 		sg_pcopy_to_buffer(req->src, creq->src_nents,
458*4882a593Smuzhiyun 				   creq->cache + creq->cache_ptr,
459*4882a593Smuzhiyun 				   req->nbytes, 0);
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 		creq->cache_ptr += req->nbytes;
462*4882a593Smuzhiyun 	}
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	return cached;
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun static struct mv_cesa_op_ctx *
mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain * chain,struct mv_cesa_op_ctx * tmpl,unsigned int frag_len,gfp_t flags)468*4882a593Smuzhiyun mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain *chain,
469*4882a593Smuzhiyun 		     struct mv_cesa_op_ctx *tmpl, unsigned int frag_len,
470*4882a593Smuzhiyun 		     gfp_t flags)
471*4882a593Smuzhiyun {
472*4882a593Smuzhiyun 	struct mv_cesa_op_ctx *op;
473*4882a593Smuzhiyun 	int ret;
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	op = mv_cesa_dma_add_op(chain, tmpl, false, flags);
476*4882a593Smuzhiyun 	if (IS_ERR(op))
477*4882a593Smuzhiyun 		return op;
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	/* Set the operation block fragment length. */
480*4882a593Smuzhiyun 	mv_cesa_set_mac_op_frag_len(op, frag_len);
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	/* Append dummy desc to launch operation */
483*4882a593Smuzhiyun 	ret = mv_cesa_dma_add_dummy_launch(chain, flags);
484*4882a593Smuzhiyun 	if (ret)
485*4882a593Smuzhiyun 		return ERR_PTR(ret);
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	if (mv_cesa_mac_op_is_first_frag(tmpl))
488*4882a593Smuzhiyun 		mv_cesa_update_op_cfg(tmpl,
489*4882a593Smuzhiyun 				      CESA_SA_DESC_CFG_MID_FRAG,
490*4882a593Smuzhiyun 				      CESA_SA_DESC_CFG_FRAG_MSK);
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	return op;
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun static int
mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain * chain,struct mv_cesa_ahash_req * creq,gfp_t flags)496*4882a593Smuzhiyun mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain,
497*4882a593Smuzhiyun 			    struct mv_cesa_ahash_req *creq,
498*4882a593Smuzhiyun 			    gfp_t flags)
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun 	struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
501*4882a593Smuzhiyun 	int ret;
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	if (!creq->cache_ptr)
504*4882a593Smuzhiyun 		return 0;
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	ret = mv_cesa_ahash_dma_alloc_cache(ahashdreq, flags);
507*4882a593Smuzhiyun 	if (ret)
508*4882a593Smuzhiyun 		return ret;
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr);
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	return mv_cesa_dma_add_data_transfer(chain,
513*4882a593Smuzhiyun 					     CESA_SA_DATA_SRAM_OFFSET,
514*4882a593Smuzhiyun 					     ahashdreq->cache_dma,
515*4882a593Smuzhiyun 					     creq->cache_ptr,
516*4882a593Smuzhiyun 					     CESA_TDMA_DST_IN_SRAM,
517*4882a593Smuzhiyun 					     flags);
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun static struct mv_cesa_op_ctx *
mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain * chain,struct mv_cesa_ahash_dma_iter * dma_iter,struct mv_cesa_ahash_req * creq,unsigned int frag_len,gfp_t flags)521*4882a593Smuzhiyun mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain,
522*4882a593Smuzhiyun 			   struct mv_cesa_ahash_dma_iter *dma_iter,
523*4882a593Smuzhiyun 			   struct mv_cesa_ahash_req *creq,
524*4882a593Smuzhiyun 			   unsigned int frag_len, gfp_t flags)
525*4882a593Smuzhiyun {
526*4882a593Smuzhiyun 	struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
527*4882a593Smuzhiyun 	unsigned int len, trailerlen, padoff = 0;
528*4882a593Smuzhiyun 	struct mv_cesa_op_ctx *op;
529*4882a593Smuzhiyun 	int ret;
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	/*
532*4882a593Smuzhiyun 	 * If the transfer is smaller than our maximum length, and we have
533*4882a593Smuzhiyun 	 * some data outstanding, we can ask the engine to finish the hash.
534*4882a593Smuzhiyun 	 */
535*4882a593Smuzhiyun 	if (creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX && frag_len) {
536*4882a593Smuzhiyun 		op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len,
537*4882a593Smuzhiyun 					  flags);
538*4882a593Smuzhiyun 		if (IS_ERR(op))
539*4882a593Smuzhiyun 			return op;
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 		mv_cesa_set_mac_op_total_len(op, creq->len);
542*4882a593Smuzhiyun 		mv_cesa_update_op_cfg(op, mv_cesa_mac_op_is_first_frag(op) ?
543*4882a593Smuzhiyun 						CESA_SA_DESC_CFG_NOT_FRAG :
544*4882a593Smuzhiyun 						CESA_SA_DESC_CFG_LAST_FRAG,
545*4882a593Smuzhiyun 				      CESA_SA_DESC_CFG_FRAG_MSK);
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 		ret = mv_cesa_dma_add_result_op(chain,
548*4882a593Smuzhiyun 						CESA_SA_CFG_SRAM_OFFSET,
549*4882a593Smuzhiyun 						CESA_SA_DATA_SRAM_OFFSET,
550*4882a593Smuzhiyun 						CESA_TDMA_SRC_IN_SRAM, flags);
551*4882a593Smuzhiyun 		if (ret)
552*4882a593Smuzhiyun 			return ERR_PTR(-ENOMEM);
553*4882a593Smuzhiyun 		return op;
554*4882a593Smuzhiyun 	}
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	/*
557*4882a593Smuzhiyun 	 * The request is longer than the engine can handle, or we have
558*4882a593Smuzhiyun 	 * no data outstanding. Manually generate the padding, adding it
559*4882a593Smuzhiyun 	 * as a "mid" fragment.
560*4882a593Smuzhiyun 	 */
561*4882a593Smuzhiyun 	ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags);
562*4882a593Smuzhiyun 	if (ret)
563*4882a593Smuzhiyun 		return ERR_PTR(ret);
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding);
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	len = min(CESA_SA_SRAM_PAYLOAD_SIZE - frag_len, trailerlen);
568*4882a593Smuzhiyun 	if (len) {
569*4882a593Smuzhiyun 		ret = mv_cesa_dma_add_data_transfer(chain,
570*4882a593Smuzhiyun 						CESA_SA_DATA_SRAM_OFFSET +
571*4882a593Smuzhiyun 						frag_len,
572*4882a593Smuzhiyun 						ahashdreq->padding_dma,
573*4882a593Smuzhiyun 						len, CESA_TDMA_DST_IN_SRAM,
574*4882a593Smuzhiyun 						flags);
575*4882a593Smuzhiyun 		if (ret)
576*4882a593Smuzhiyun 			return ERR_PTR(ret);
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 		op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len + len,
579*4882a593Smuzhiyun 					  flags);
580*4882a593Smuzhiyun 		if (IS_ERR(op))
581*4882a593Smuzhiyun 			return op;
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 		if (len == trailerlen)
584*4882a593Smuzhiyun 			return op;
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 		padoff += len;
587*4882a593Smuzhiyun 	}
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	ret = mv_cesa_dma_add_data_transfer(chain,
590*4882a593Smuzhiyun 					    CESA_SA_DATA_SRAM_OFFSET,
591*4882a593Smuzhiyun 					    ahashdreq->padding_dma +
592*4882a593Smuzhiyun 					    padoff,
593*4882a593Smuzhiyun 					    trailerlen - padoff,
594*4882a593Smuzhiyun 					    CESA_TDMA_DST_IN_SRAM,
595*4882a593Smuzhiyun 					    flags);
596*4882a593Smuzhiyun 	if (ret)
597*4882a593Smuzhiyun 		return ERR_PTR(ret);
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	return mv_cesa_dma_add_frag(chain, &creq->op_tmpl, trailerlen - padoff,
600*4882a593Smuzhiyun 				    flags);
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun 
mv_cesa_ahash_dma_req_init(struct ahash_request * req)603*4882a593Smuzhiyun static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
604*4882a593Smuzhiyun {
605*4882a593Smuzhiyun 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
606*4882a593Smuzhiyun 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
607*4882a593Smuzhiyun 		      GFP_KERNEL : GFP_ATOMIC;
608*4882a593Smuzhiyun 	struct mv_cesa_req *basereq = &creq->base;
609*4882a593Smuzhiyun 	struct mv_cesa_ahash_dma_iter iter;
610*4882a593Smuzhiyun 	struct mv_cesa_op_ctx *op = NULL;
611*4882a593Smuzhiyun 	unsigned int frag_len;
612*4882a593Smuzhiyun 	bool set_state = false;
613*4882a593Smuzhiyun 	int ret;
614*4882a593Smuzhiyun 	u32 type;
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	basereq->chain.first = NULL;
617*4882a593Smuzhiyun 	basereq->chain.last = NULL;
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 	if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl))
620*4882a593Smuzhiyun 		set_state = true;
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 	if (creq->src_nents) {
623*4882a593Smuzhiyun 		ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
624*4882a593Smuzhiyun 				 DMA_TO_DEVICE);
625*4882a593Smuzhiyun 		if (!ret) {
626*4882a593Smuzhiyun 			ret = -ENOMEM;
627*4882a593Smuzhiyun 			goto err;
628*4882a593Smuzhiyun 		}
629*4882a593Smuzhiyun 	}
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 	mv_cesa_tdma_desc_iter_init(&basereq->chain);
632*4882a593Smuzhiyun 	mv_cesa_ahash_req_iter_init(&iter, req);
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	/*
635*4882a593Smuzhiyun 	 * Add the cache (left-over data from a previous block) first.
636*4882a593Smuzhiyun 	 * This will never overflow the SRAM size.
637*4882a593Smuzhiyun 	 */
638*4882a593Smuzhiyun 	ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, creq, flags);
639*4882a593Smuzhiyun 	if (ret)
640*4882a593Smuzhiyun 		goto err_free_tdma;
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 	if (iter.src.sg) {
643*4882a593Smuzhiyun 		/*
644*4882a593Smuzhiyun 		 * Add all the new data, inserting an operation block and
645*4882a593Smuzhiyun 		 * launch command between each full SRAM block-worth of
646*4882a593Smuzhiyun 		 * data. We intentionally do not add the final op block.
647*4882a593Smuzhiyun 		 */
648*4882a593Smuzhiyun 		while (true) {
649*4882a593Smuzhiyun 			ret = mv_cesa_dma_add_op_transfers(&basereq->chain,
650*4882a593Smuzhiyun 							   &iter.base,
651*4882a593Smuzhiyun 							   &iter.src, flags);
652*4882a593Smuzhiyun 			if (ret)
653*4882a593Smuzhiyun 				goto err_free_tdma;
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 			frag_len = iter.base.op_len;
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 			if (!mv_cesa_ahash_req_iter_next_op(&iter))
658*4882a593Smuzhiyun 				break;
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 			op = mv_cesa_dma_add_frag(&basereq->chain,
661*4882a593Smuzhiyun 						  &creq->op_tmpl,
662*4882a593Smuzhiyun 						  frag_len, flags);
663*4882a593Smuzhiyun 			if (IS_ERR(op)) {
664*4882a593Smuzhiyun 				ret = PTR_ERR(op);
665*4882a593Smuzhiyun 				goto err_free_tdma;
666*4882a593Smuzhiyun 			}
667*4882a593Smuzhiyun 		}
668*4882a593Smuzhiyun 	} else {
669*4882a593Smuzhiyun 		/* Account for the data that was in the cache. */
670*4882a593Smuzhiyun 		frag_len = iter.base.op_len;
671*4882a593Smuzhiyun 	}
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 	/*
674*4882a593Smuzhiyun 	 * At this point, frag_len indicates whether we have any data
675*4882a593Smuzhiyun 	 * outstanding which needs an operation.  Queue up the final
676*4882a593Smuzhiyun 	 * operation, which depends whether this is the final request.
677*4882a593Smuzhiyun 	 */
678*4882a593Smuzhiyun 	if (creq->last_req)
679*4882a593Smuzhiyun 		op = mv_cesa_ahash_dma_last_req(&basereq->chain, &iter, creq,
680*4882a593Smuzhiyun 						frag_len, flags);
681*4882a593Smuzhiyun 	else if (frag_len)
682*4882a593Smuzhiyun 		op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
683*4882a593Smuzhiyun 					  frag_len, flags);
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	if (IS_ERR(op)) {
686*4882a593Smuzhiyun 		ret = PTR_ERR(op);
687*4882a593Smuzhiyun 		goto err_free_tdma;
688*4882a593Smuzhiyun 	}
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	/*
691*4882a593Smuzhiyun 	 * If results are copied via DMA, this means that this
692*4882a593Smuzhiyun 	 * request can be directly processed by the engine,
693*4882a593Smuzhiyun 	 * without partial updates. So we can chain it at the
694*4882a593Smuzhiyun 	 * DMA level with other requests.
695*4882a593Smuzhiyun 	 */
696*4882a593Smuzhiyun 	type = basereq->chain.last->flags & CESA_TDMA_TYPE_MSK;
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 	if (op && type != CESA_TDMA_RESULT) {
699*4882a593Smuzhiyun 		/* Add dummy desc to wait for crypto operation end */
700*4882a593Smuzhiyun 		ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags);
701*4882a593Smuzhiyun 		if (ret)
702*4882a593Smuzhiyun 			goto err_free_tdma;
703*4882a593Smuzhiyun 	}
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun 	if (!creq->last_req)
706*4882a593Smuzhiyun 		creq->cache_ptr = req->nbytes + creq->cache_ptr -
707*4882a593Smuzhiyun 				  iter.base.len;
708*4882a593Smuzhiyun 	else
709*4882a593Smuzhiyun 		creq->cache_ptr = 0;
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun 	basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 	if (type != CESA_TDMA_RESULT)
714*4882a593Smuzhiyun 		basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
715*4882a593Smuzhiyun 
716*4882a593Smuzhiyun 	if (set_state) {
717*4882a593Smuzhiyun 		/*
718*4882a593Smuzhiyun 		 * Put the CESA_TDMA_SET_STATE flag on the first tdma desc to
719*4882a593Smuzhiyun 		 * let the step logic know that the IVDIG registers should be
720*4882a593Smuzhiyun 		 * explicitly set before launching a TDMA chain.
721*4882a593Smuzhiyun 		 */
722*4882a593Smuzhiyun 		basereq->chain.first->flags |= CESA_TDMA_SET_STATE;
723*4882a593Smuzhiyun 	}
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 	return 0;
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun err_free_tdma:
728*4882a593Smuzhiyun 	mv_cesa_dma_cleanup(basereq);
729*4882a593Smuzhiyun 	dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun err:
732*4882a593Smuzhiyun 	mv_cesa_ahash_last_cleanup(req);
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 	return ret;
735*4882a593Smuzhiyun }
736*4882a593Smuzhiyun 
mv_cesa_ahash_req_init(struct ahash_request * req,bool * cached)737*4882a593Smuzhiyun static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
738*4882a593Smuzhiyun {
739*4882a593Smuzhiyun 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun 	creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
742*4882a593Smuzhiyun 	if (creq->src_nents < 0) {
743*4882a593Smuzhiyun 		dev_err(cesa_dev->dev, "Invalid number of src SG");
744*4882a593Smuzhiyun 		return creq->src_nents;
745*4882a593Smuzhiyun 	}
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun 	*cached = mv_cesa_ahash_cache_req(req);
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 	if (*cached)
750*4882a593Smuzhiyun 		return 0;
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun 	if (cesa_dev->caps->has_tdma)
753*4882a593Smuzhiyun 		return mv_cesa_ahash_dma_req_init(req);
754*4882a593Smuzhiyun 	else
755*4882a593Smuzhiyun 		return 0;
756*4882a593Smuzhiyun }
757*4882a593Smuzhiyun 
mv_cesa_ahash_queue_req(struct ahash_request * req)758*4882a593Smuzhiyun static int mv_cesa_ahash_queue_req(struct ahash_request *req)
759*4882a593Smuzhiyun {
760*4882a593Smuzhiyun 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
761*4882a593Smuzhiyun 	struct mv_cesa_engine *engine;
762*4882a593Smuzhiyun 	bool cached = false;
763*4882a593Smuzhiyun 	int ret;
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun 	ret = mv_cesa_ahash_req_init(req, &cached);
766*4882a593Smuzhiyun 	if (ret)
767*4882a593Smuzhiyun 		return ret;
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	if (cached)
770*4882a593Smuzhiyun 		return 0;
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 	engine = mv_cesa_select_engine(req->nbytes);
773*4882a593Smuzhiyun 	mv_cesa_ahash_prepare(&req->base, engine);
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 	ret = mv_cesa_queue_req(&req->base, &creq->base);
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun 	if (mv_cesa_req_needs_cleanup(&req->base, ret))
778*4882a593Smuzhiyun 		mv_cesa_ahash_cleanup(req);
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 	return ret;
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun 
mv_cesa_ahash_update(struct ahash_request * req)783*4882a593Smuzhiyun static int mv_cesa_ahash_update(struct ahash_request *req)
784*4882a593Smuzhiyun {
785*4882a593Smuzhiyun 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun 	creq->len += req->nbytes;
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	return mv_cesa_ahash_queue_req(req);
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun 
mv_cesa_ahash_final(struct ahash_request * req)792*4882a593Smuzhiyun static int mv_cesa_ahash_final(struct ahash_request *req)
793*4882a593Smuzhiyun {
794*4882a593Smuzhiyun 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
795*4882a593Smuzhiyun 	struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 	mv_cesa_set_mac_op_total_len(tmpl, creq->len);
798*4882a593Smuzhiyun 	creq->last_req = true;
799*4882a593Smuzhiyun 	req->nbytes = 0;
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 	return mv_cesa_ahash_queue_req(req);
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun 
mv_cesa_ahash_finup(struct ahash_request * req)804*4882a593Smuzhiyun static int mv_cesa_ahash_finup(struct ahash_request *req)
805*4882a593Smuzhiyun {
806*4882a593Smuzhiyun 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
807*4882a593Smuzhiyun 	struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun 	creq->len += req->nbytes;
810*4882a593Smuzhiyun 	mv_cesa_set_mac_op_total_len(tmpl, creq->len);
811*4882a593Smuzhiyun 	creq->last_req = true;
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun 	return mv_cesa_ahash_queue_req(req);
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun 
mv_cesa_ahash_export(struct ahash_request * req,void * hash,u64 * len,void * cache)816*4882a593Smuzhiyun static int mv_cesa_ahash_export(struct ahash_request *req, void *hash,
817*4882a593Smuzhiyun 				u64 *len, void *cache)
818*4882a593Smuzhiyun {
819*4882a593Smuzhiyun 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
820*4882a593Smuzhiyun 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
821*4882a593Smuzhiyun 	unsigned int digsize = crypto_ahash_digestsize(ahash);
822*4882a593Smuzhiyun 	unsigned int blocksize;
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun 	blocksize = crypto_ahash_blocksize(ahash);
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 	*len = creq->len;
827*4882a593Smuzhiyun 	memcpy(hash, creq->state, digsize);
828*4882a593Smuzhiyun 	memset(cache, 0, blocksize);
829*4882a593Smuzhiyun 	memcpy(cache, creq->cache, creq->cache_ptr);
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	return 0;
832*4882a593Smuzhiyun }
833*4882a593Smuzhiyun 
mv_cesa_ahash_import(struct ahash_request * req,const void * hash,u64 len,const void * cache)834*4882a593Smuzhiyun static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash,
835*4882a593Smuzhiyun 				u64 len, const void *cache)
836*4882a593Smuzhiyun {
837*4882a593Smuzhiyun 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
838*4882a593Smuzhiyun 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
839*4882a593Smuzhiyun 	unsigned int digsize = crypto_ahash_digestsize(ahash);
840*4882a593Smuzhiyun 	unsigned int blocksize;
841*4882a593Smuzhiyun 	unsigned int cache_ptr;
842*4882a593Smuzhiyun 	int ret;
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 	ret = crypto_ahash_init(req);
845*4882a593Smuzhiyun 	if (ret)
846*4882a593Smuzhiyun 		return ret;
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun 	blocksize = crypto_ahash_blocksize(ahash);
849*4882a593Smuzhiyun 	if (len >= blocksize)
850*4882a593Smuzhiyun 		mv_cesa_update_op_cfg(&creq->op_tmpl,
851*4882a593Smuzhiyun 				      CESA_SA_DESC_CFG_MID_FRAG,
852*4882a593Smuzhiyun 				      CESA_SA_DESC_CFG_FRAG_MSK);
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun 	creq->len = len;
855*4882a593Smuzhiyun 	memcpy(creq->state, hash, digsize);
856*4882a593Smuzhiyun 	creq->cache_ptr = 0;
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun 	cache_ptr = do_div(len, blocksize);
859*4882a593Smuzhiyun 	if (!cache_ptr)
860*4882a593Smuzhiyun 		return 0;
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun 	memcpy(creq->cache, cache, cache_ptr);
863*4882a593Smuzhiyun 	creq->cache_ptr = cache_ptr;
864*4882a593Smuzhiyun 
865*4882a593Smuzhiyun 	return 0;
866*4882a593Smuzhiyun }
867*4882a593Smuzhiyun 
mv_cesa_md5_init(struct ahash_request * req)868*4882a593Smuzhiyun static int mv_cesa_md5_init(struct ahash_request *req)
869*4882a593Smuzhiyun {
870*4882a593Smuzhiyun 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
871*4882a593Smuzhiyun 	struct mv_cesa_op_ctx tmpl = { };
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun 	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5);
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun 	mv_cesa_ahash_init(req, &tmpl, true);
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun 	creq->state[0] = MD5_H0;
878*4882a593Smuzhiyun 	creq->state[1] = MD5_H1;
879*4882a593Smuzhiyun 	creq->state[2] = MD5_H2;
880*4882a593Smuzhiyun 	creq->state[3] = MD5_H3;
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun 	return 0;
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun 
mv_cesa_md5_export(struct ahash_request * req,void * out)885*4882a593Smuzhiyun static int mv_cesa_md5_export(struct ahash_request *req, void *out)
886*4882a593Smuzhiyun {
887*4882a593Smuzhiyun 	struct md5_state *out_state = out;
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 	return mv_cesa_ahash_export(req, out_state->hash,
890*4882a593Smuzhiyun 				    &out_state->byte_count, out_state->block);
891*4882a593Smuzhiyun }
892*4882a593Smuzhiyun 
mv_cesa_md5_import(struct ahash_request * req,const void * in)893*4882a593Smuzhiyun static int mv_cesa_md5_import(struct ahash_request *req, const void *in)
894*4882a593Smuzhiyun {
895*4882a593Smuzhiyun 	const struct md5_state *in_state = in;
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 	return mv_cesa_ahash_import(req, in_state->hash, in_state->byte_count,
898*4882a593Smuzhiyun 				    in_state->block);
899*4882a593Smuzhiyun }
900*4882a593Smuzhiyun 
mv_cesa_md5_digest(struct ahash_request * req)901*4882a593Smuzhiyun static int mv_cesa_md5_digest(struct ahash_request *req)
902*4882a593Smuzhiyun {
903*4882a593Smuzhiyun 	int ret;
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun 	ret = mv_cesa_md5_init(req);
906*4882a593Smuzhiyun 	if (ret)
907*4882a593Smuzhiyun 		return ret;
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun 	return mv_cesa_ahash_finup(req);
910*4882a593Smuzhiyun }
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun struct ahash_alg mv_md5_alg = {
913*4882a593Smuzhiyun 	.init = mv_cesa_md5_init,
914*4882a593Smuzhiyun 	.update = mv_cesa_ahash_update,
915*4882a593Smuzhiyun 	.final = mv_cesa_ahash_final,
916*4882a593Smuzhiyun 	.finup = mv_cesa_ahash_finup,
917*4882a593Smuzhiyun 	.digest = mv_cesa_md5_digest,
918*4882a593Smuzhiyun 	.export = mv_cesa_md5_export,
919*4882a593Smuzhiyun 	.import = mv_cesa_md5_import,
920*4882a593Smuzhiyun 	.halg = {
921*4882a593Smuzhiyun 		.digestsize = MD5_DIGEST_SIZE,
922*4882a593Smuzhiyun 		.statesize = sizeof(struct md5_state),
923*4882a593Smuzhiyun 		.base = {
924*4882a593Smuzhiyun 			.cra_name = "md5",
925*4882a593Smuzhiyun 			.cra_driver_name = "mv-md5",
926*4882a593Smuzhiyun 			.cra_priority = 300,
927*4882a593Smuzhiyun 			.cra_flags = CRYPTO_ALG_ASYNC |
928*4882a593Smuzhiyun 				     CRYPTO_ALG_ALLOCATES_MEMORY |
929*4882a593Smuzhiyun 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
930*4882a593Smuzhiyun 			.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
931*4882a593Smuzhiyun 			.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
932*4882a593Smuzhiyun 			.cra_init = mv_cesa_ahash_cra_init,
933*4882a593Smuzhiyun 			.cra_module = THIS_MODULE,
934*4882a593Smuzhiyun 		}
935*4882a593Smuzhiyun 	}
936*4882a593Smuzhiyun };
937*4882a593Smuzhiyun 
mv_cesa_sha1_init(struct ahash_request * req)938*4882a593Smuzhiyun static int mv_cesa_sha1_init(struct ahash_request *req)
939*4882a593Smuzhiyun {
940*4882a593Smuzhiyun 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
941*4882a593Smuzhiyun 	struct mv_cesa_op_ctx tmpl = { };
942*4882a593Smuzhiyun 
943*4882a593Smuzhiyun 	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1);
944*4882a593Smuzhiyun 
945*4882a593Smuzhiyun 	mv_cesa_ahash_init(req, &tmpl, false);
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 	creq->state[0] = SHA1_H0;
948*4882a593Smuzhiyun 	creq->state[1] = SHA1_H1;
949*4882a593Smuzhiyun 	creq->state[2] = SHA1_H2;
950*4882a593Smuzhiyun 	creq->state[3] = SHA1_H3;
951*4882a593Smuzhiyun 	creq->state[4] = SHA1_H4;
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 	return 0;
954*4882a593Smuzhiyun }
955*4882a593Smuzhiyun 
mv_cesa_sha1_export(struct ahash_request * req,void * out)956*4882a593Smuzhiyun static int mv_cesa_sha1_export(struct ahash_request *req, void *out)
957*4882a593Smuzhiyun {
958*4882a593Smuzhiyun 	struct sha1_state *out_state = out;
959*4882a593Smuzhiyun 
960*4882a593Smuzhiyun 	return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
961*4882a593Smuzhiyun 				    out_state->buffer);
962*4882a593Smuzhiyun }
963*4882a593Smuzhiyun 
mv_cesa_sha1_import(struct ahash_request * req,const void * in)964*4882a593Smuzhiyun static int mv_cesa_sha1_import(struct ahash_request *req, const void *in)
965*4882a593Smuzhiyun {
966*4882a593Smuzhiyun 	const struct sha1_state *in_state = in;
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun 	return mv_cesa_ahash_import(req, in_state->state, in_state->count,
969*4882a593Smuzhiyun 				    in_state->buffer);
970*4882a593Smuzhiyun }
971*4882a593Smuzhiyun 
mv_cesa_sha1_digest(struct ahash_request * req)972*4882a593Smuzhiyun static int mv_cesa_sha1_digest(struct ahash_request *req)
973*4882a593Smuzhiyun {
974*4882a593Smuzhiyun 	int ret;
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun 	ret = mv_cesa_sha1_init(req);
977*4882a593Smuzhiyun 	if (ret)
978*4882a593Smuzhiyun 		return ret;
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 	return mv_cesa_ahash_finup(req);
981*4882a593Smuzhiyun }
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun struct ahash_alg mv_sha1_alg = {
984*4882a593Smuzhiyun 	.init = mv_cesa_sha1_init,
985*4882a593Smuzhiyun 	.update = mv_cesa_ahash_update,
986*4882a593Smuzhiyun 	.final = mv_cesa_ahash_final,
987*4882a593Smuzhiyun 	.finup = mv_cesa_ahash_finup,
988*4882a593Smuzhiyun 	.digest = mv_cesa_sha1_digest,
989*4882a593Smuzhiyun 	.export = mv_cesa_sha1_export,
990*4882a593Smuzhiyun 	.import = mv_cesa_sha1_import,
991*4882a593Smuzhiyun 	.halg = {
992*4882a593Smuzhiyun 		.digestsize = SHA1_DIGEST_SIZE,
993*4882a593Smuzhiyun 		.statesize = sizeof(struct sha1_state),
994*4882a593Smuzhiyun 		.base = {
995*4882a593Smuzhiyun 			.cra_name = "sha1",
996*4882a593Smuzhiyun 			.cra_driver_name = "mv-sha1",
997*4882a593Smuzhiyun 			.cra_priority = 300,
998*4882a593Smuzhiyun 			.cra_flags = CRYPTO_ALG_ASYNC |
999*4882a593Smuzhiyun 				     CRYPTO_ALG_ALLOCATES_MEMORY |
1000*4882a593Smuzhiyun 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
1001*4882a593Smuzhiyun 			.cra_blocksize = SHA1_BLOCK_SIZE,
1002*4882a593Smuzhiyun 			.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
1003*4882a593Smuzhiyun 			.cra_init = mv_cesa_ahash_cra_init,
1004*4882a593Smuzhiyun 			.cra_module = THIS_MODULE,
1005*4882a593Smuzhiyun 		}
1006*4882a593Smuzhiyun 	}
1007*4882a593Smuzhiyun };
1008*4882a593Smuzhiyun 
mv_cesa_sha256_init(struct ahash_request * req)1009*4882a593Smuzhiyun static int mv_cesa_sha256_init(struct ahash_request *req)
1010*4882a593Smuzhiyun {
1011*4882a593Smuzhiyun 	struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
1012*4882a593Smuzhiyun 	struct mv_cesa_op_ctx tmpl = { };
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun 	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256);
1015*4882a593Smuzhiyun 
1016*4882a593Smuzhiyun 	mv_cesa_ahash_init(req, &tmpl, false);
1017*4882a593Smuzhiyun 
1018*4882a593Smuzhiyun 	creq->state[0] = SHA256_H0;
1019*4882a593Smuzhiyun 	creq->state[1] = SHA256_H1;
1020*4882a593Smuzhiyun 	creq->state[2] = SHA256_H2;
1021*4882a593Smuzhiyun 	creq->state[3] = SHA256_H3;
1022*4882a593Smuzhiyun 	creq->state[4] = SHA256_H4;
1023*4882a593Smuzhiyun 	creq->state[5] = SHA256_H5;
1024*4882a593Smuzhiyun 	creq->state[6] = SHA256_H6;
1025*4882a593Smuzhiyun 	creq->state[7] = SHA256_H7;
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun 	return 0;
1028*4882a593Smuzhiyun }
1029*4882a593Smuzhiyun 
mv_cesa_sha256_digest(struct ahash_request * req)1030*4882a593Smuzhiyun static int mv_cesa_sha256_digest(struct ahash_request *req)
1031*4882a593Smuzhiyun {
1032*4882a593Smuzhiyun 	int ret;
1033*4882a593Smuzhiyun 
1034*4882a593Smuzhiyun 	ret = mv_cesa_sha256_init(req);
1035*4882a593Smuzhiyun 	if (ret)
1036*4882a593Smuzhiyun 		return ret;
1037*4882a593Smuzhiyun 
1038*4882a593Smuzhiyun 	return mv_cesa_ahash_finup(req);
1039*4882a593Smuzhiyun }
1040*4882a593Smuzhiyun 
mv_cesa_sha256_export(struct ahash_request * req,void * out)1041*4882a593Smuzhiyun static int mv_cesa_sha256_export(struct ahash_request *req, void *out)
1042*4882a593Smuzhiyun {
1043*4882a593Smuzhiyun 	struct sha256_state *out_state = out;
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 	return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
1046*4882a593Smuzhiyun 				    out_state->buf);
1047*4882a593Smuzhiyun }
1048*4882a593Smuzhiyun 
mv_cesa_sha256_import(struct ahash_request * req,const void * in)1049*4882a593Smuzhiyun static int mv_cesa_sha256_import(struct ahash_request *req, const void *in)
1050*4882a593Smuzhiyun {
1051*4882a593Smuzhiyun 	const struct sha256_state *in_state = in;
1052*4882a593Smuzhiyun 
1053*4882a593Smuzhiyun 	return mv_cesa_ahash_import(req, in_state->state, in_state->count,
1054*4882a593Smuzhiyun 				    in_state->buf);
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun struct ahash_alg mv_sha256_alg = {
1058*4882a593Smuzhiyun 	.init = mv_cesa_sha256_init,
1059*4882a593Smuzhiyun 	.update = mv_cesa_ahash_update,
1060*4882a593Smuzhiyun 	.final = mv_cesa_ahash_final,
1061*4882a593Smuzhiyun 	.finup = mv_cesa_ahash_finup,
1062*4882a593Smuzhiyun 	.digest = mv_cesa_sha256_digest,
1063*4882a593Smuzhiyun 	.export = mv_cesa_sha256_export,
1064*4882a593Smuzhiyun 	.import = mv_cesa_sha256_import,
1065*4882a593Smuzhiyun 	.halg = {
1066*4882a593Smuzhiyun 		.digestsize = SHA256_DIGEST_SIZE,
1067*4882a593Smuzhiyun 		.statesize = sizeof(struct sha256_state),
1068*4882a593Smuzhiyun 		.base = {
1069*4882a593Smuzhiyun 			.cra_name = "sha256",
1070*4882a593Smuzhiyun 			.cra_driver_name = "mv-sha256",
1071*4882a593Smuzhiyun 			.cra_priority = 300,
1072*4882a593Smuzhiyun 			.cra_flags = CRYPTO_ALG_ASYNC |
1073*4882a593Smuzhiyun 				     CRYPTO_ALG_ALLOCATES_MEMORY |
1074*4882a593Smuzhiyun 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
1075*4882a593Smuzhiyun 			.cra_blocksize = SHA256_BLOCK_SIZE,
1076*4882a593Smuzhiyun 			.cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
1077*4882a593Smuzhiyun 			.cra_init = mv_cesa_ahash_cra_init,
1078*4882a593Smuzhiyun 			.cra_module = THIS_MODULE,
1079*4882a593Smuzhiyun 		}
1080*4882a593Smuzhiyun 	}
1081*4882a593Smuzhiyun };
1082*4882a593Smuzhiyun 
1083*4882a593Smuzhiyun struct mv_cesa_ahash_result {
1084*4882a593Smuzhiyun 	struct completion completion;
1085*4882a593Smuzhiyun 	int error;
1086*4882a593Smuzhiyun };
1087*4882a593Smuzhiyun 
mv_cesa_hmac_ahash_complete(struct crypto_async_request * req,int error)1088*4882a593Smuzhiyun static void mv_cesa_hmac_ahash_complete(struct crypto_async_request *req,
1089*4882a593Smuzhiyun 					int error)
1090*4882a593Smuzhiyun {
1091*4882a593Smuzhiyun 	struct mv_cesa_ahash_result *result = req->data;
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun 	if (error == -EINPROGRESS)
1094*4882a593Smuzhiyun 		return;
1095*4882a593Smuzhiyun 
1096*4882a593Smuzhiyun 	result->error = error;
1097*4882a593Smuzhiyun 	complete(&result->completion);
1098*4882a593Smuzhiyun }
1099*4882a593Smuzhiyun 
mv_cesa_ahmac_iv_state_init(struct ahash_request * req,u8 * pad,void * state,unsigned int blocksize)1100*4882a593Smuzhiyun static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad,
1101*4882a593Smuzhiyun 				       void *state, unsigned int blocksize)
1102*4882a593Smuzhiyun {
1103*4882a593Smuzhiyun 	struct mv_cesa_ahash_result result;
1104*4882a593Smuzhiyun 	struct scatterlist sg;
1105*4882a593Smuzhiyun 	int ret;
1106*4882a593Smuzhiyun 
1107*4882a593Smuzhiyun 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1108*4882a593Smuzhiyun 				   mv_cesa_hmac_ahash_complete, &result);
1109*4882a593Smuzhiyun 	sg_init_one(&sg, pad, blocksize);
1110*4882a593Smuzhiyun 	ahash_request_set_crypt(req, &sg, pad, blocksize);
1111*4882a593Smuzhiyun 	init_completion(&result.completion);
1112*4882a593Smuzhiyun 
1113*4882a593Smuzhiyun 	ret = crypto_ahash_init(req);
1114*4882a593Smuzhiyun 	if (ret)
1115*4882a593Smuzhiyun 		return ret;
1116*4882a593Smuzhiyun 
1117*4882a593Smuzhiyun 	ret = crypto_ahash_update(req);
1118*4882a593Smuzhiyun 	if (ret && ret != -EINPROGRESS)
1119*4882a593Smuzhiyun 		return ret;
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun 	wait_for_completion_interruptible(&result.completion);
1122*4882a593Smuzhiyun 	if (result.error)
1123*4882a593Smuzhiyun 		return result.error;
1124*4882a593Smuzhiyun 
1125*4882a593Smuzhiyun 	ret = crypto_ahash_export(req, state);
1126*4882a593Smuzhiyun 	if (ret)
1127*4882a593Smuzhiyun 		return ret;
1128*4882a593Smuzhiyun 
1129*4882a593Smuzhiyun 	return 0;
1130*4882a593Smuzhiyun }
1131*4882a593Smuzhiyun 
mv_cesa_ahmac_pad_init(struct ahash_request * req,const u8 * key,unsigned int keylen,u8 * ipad,u8 * opad,unsigned int blocksize)1132*4882a593Smuzhiyun static int mv_cesa_ahmac_pad_init(struct ahash_request *req,
1133*4882a593Smuzhiyun 				  const u8 *key, unsigned int keylen,
1134*4882a593Smuzhiyun 				  u8 *ipad, u8 *opad,
1135*4882a593Smuzhiyun 				  unsigned int blocksize)
1136*4882a593Smuzhiyun {
1137*4882a593Smuzhiyun 	struct mv_cesa_ahash_result result;
1138*4882a593Smuzhiyun 	struct scatterlist sg;
1139*4882a593Smuzhiyun 	int ret;
1140*4882a593Smuzhiyun 	int i;
1141*4882a593Smuzhiyun 
1142*4882a593Smuzhiyun 	if (keylen <= blocksize) {
1143*4882a593Smuzhiyun 		memcpy(ipad, key, keylen);
1144*4882a593Smuzhiyun 	} else {
1145*4882a593Smuzhiyun 		u8 *keydup = kmemdup(key, keylen, GFP_KERNEL);
1146*4882a593Smuzhiyun 
1147*4882a593Smuzhiyun 		if (!keydup)
1148*4882a593Smuzhiyun 			return -ENOMEM;
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun 		ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1151*4882a593Smuzhiyun 					   mv_cesa_hmac_ahash_complete,
1152*4882a593Smuzhiyun 					   &result);
1153*4882a593Smuzhiyun 		sg_init_one(&sg, keydup, keylen);
1154*4882a593Smuzhiyun 		ahash_request_set_crypt(req, &sg, ipad, keylen);
1155*4882a593Smuzhiyun 		init_completion(&result.completion);
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun 		ret = crypto_ahash_digest(req);
1158*4882a593Smuzhiyun 		if (ret == -EINPROGRESS) {
1159*4882a593Smuzhiyun 			wait_for_completion_interruptible(&result.completion);
1160*4882a593Smuzhiyun 			ret = result.error;
1161*4882a593Smuzhiyun 		}
1162*4882a593Smuzhiyun 
1163*4882a593Smuzhiyun 		/* Set the memory region to 0 to avoid any leak. */
1164*4882a593Smuzhiyun 		kfree_sensitive(keydup);
1165*4882a593Smuzhiyun 
1166*4882a593Smuzhiyun 		if (ret)
1167*4882a593Smuzhiyun 			return ret;
1168*4882a593Smuzhiyun 
1169*4882a593Smuzhiyun 		keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
1170*4882a593Smuzhiyun 	}
1171*4882a593Smuzhiyun 
1172*4882a593Smuzhiyun 	memset(ipad + keylen, 0, blocksize - keylen);
1173*4882a593Smuzhiyun 	memcpy(opad, ipad, blocksize);
1174*4882a593Smuzhiyun 
1175*4882a593Smuzhiyun 	for (i = 0; i < blocksize; i++) {
1176*4882a593Smuzhiyun 		ipad[i] ^= HMAC_IPAD_VALUE;
1177*4882a593Smuzhiyun 		opad[i] ^= HMAC_OPAD_VALUE;
1178*4882a593Smuzhiyun 	}
1179*4882a593Smuzhiyun 
1180*4882a593Smuzhiyun 	return 0;
1181*4882a593Smuzhiyun }
1182*4882a593Smuzhiyun 
mv_cesa_ahmac_setkey(const char * hash_alg_name,const u8 * key,unsigned int keylen,void * istate,void * ostate)1183*4882a593Smuzhiyun static int mv_cesa_ahmac_setkey(const char *hash_alg_name,
1184*4882a593Smuzhiyun 				const u8 *key, unsigned int keylen,
1185*4882a593Smuzhiyun 				void *istate, void *ostate)
1186*4882a593Smuzhiyun {
1187*4882a593Smuzhiyun 	struct ahash_request *req;
1188*4882a593Smuzhiyun 	struct crypto_ahash *tfm;
1189*4882a593Smuzhiyun 	unsigned int blocksize;
1190*4882a593Smuzhiyun 	u8 *ipad = NULL;
1191*4882a593Smuzhiyun 	u8 *opad;
1192*4882a593Smuzhiyun 	int ret;
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun 	tfm = crypto_alloc_ahash(hash_alg_name, 0, 0);
1195*4882a593Smuzhiyun 	if (IS_ERR(tfm))
1196*4882a593Smuzhiyun 		return PTR_ERR(tfm);
1197*4882a593Smuzhiyun 
1198*4882a593Smuzhiyun 	req = ahash_request_alloc(tfm, GFP_KERNEL);
1199*4882a593Smuzhiyun 	if (!req) {
1200*4882a593Smuzhiyun 		ret = -ENOMEM;
1201*4882a593Smuzhiyun 		goto free_ahash;
1202*4882a593Smuzhiyun 	}
1203*4882a593Smuzhiyun 
1204*4882a593Smuzhiyun 	crypto_ahash_clear_flags(tfm, ~0);
1205*4882a593Smuzhiyun 
1206*4882a593Smuzhiyun 	blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1207*4882a593Smuzhiyun 
1208*4882a593Smuzhiyun 	ipad = kcalloc(2, blocksize, GFP_KERNEL);
1209*4882a593Smuzhiyun 	if (!ipad) {
1210*4882a593Smuzhiyun 		ret = -ENOMEM;
1211*4882a593Smuzhiyun 		goto free_req;
1212*4882a593Smuzhiyun 	}
1213*4882a593Smuzhiyun 
1214*4882a593Smuzhiyun 	opad = ipad + blocksize;
1215*4882a593Smuzhiyun 
1216*4882a593Smuzhiyun 	ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize);
1217*4882a593Smuzhiyun 	if (ret)
1218*4882a593Smuzhiyun 		goto free_ipad;
1219*4882a593Smuzhiyun 
1220*4882a593Smuzhiyun 	ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize);
1221*4882a593Smuzhiyun 	if (ret)
1222*4882a593Smuzhiyun 		goto free_ipad;
1223*4882a593Smuzhiyun 
1224*4882a593Smuzhiyun 	ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize);
1225*4882a593Smuzhiyun 
1226*4882a593Smuzhiyun free_ipad:
1227*4882a593Smuzhiyun 	kfree(ipad);
1228*4882a593Smuzhiyun free_req:
1229*4882a593Smuzhiyun 	ahash_request_free(req);
1230*4882a593Smuzhiyun free_ahash:
1231*4882a593Smuzhiyun 	crypto_free_ahash(tfm);
1232*4882a593Smuzhiyun 
1233*4882a593Smuzhiyun 	return ret;
1234*4882a593Smuzhiyun }
1235*4882a593Smuzhiyun 
mv_cesa_ahmac_cra_init(struct crypto_tfm * tfm)1236*4882a593Smuzhiyun static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm)
1237*4882a593Smuzhiyun {
1238*4882a593Smuzhiyun 	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm);
1239*4882a593Smuzhiyun 
1240*4882a593Smuzhiyun 	ctx->base.ops = &mv_cesa_ahash_req_ops;
1241*4882a593Smuzhiyun 
1242*4882a593Smuzhiyun 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1243*4882a593Smuzhiyun 				 sizeof(struct mv_cesa_ahash_req));
1244*4882a593Smuzhiyun 	return 0;
1245*4882a593Smuzhiyun }
1246*4882a593Smuzhiyun 
mv_cesa_ahmac_md5_init(struct ahash_request * req)1247*4882a593Smuzhiyun static int mv_cesa_ahmac_md5_init(struct ahash_request *req)
1248*4882a593Smuzhiyun {
1249*4882a593Smuzhiyun 	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1250*4882a593Smuzhiyun 	struct mv_cesa_op_ctx tmpl = { };
1251*4882a593Smuzhiyun 
1252*4882a593Smuzhiyun 	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5);
1253*4882a593Smuzhiyun 	memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1254*4882a593Smuzhiyun 
1255*4882a593Smuzhiyun 	mv_cesa_ahash_init(req, &tmpl, true);
1256*4882a593Smuzhiyun 
1257*4882a593Smuzhiyun 	return 0;
1258*4882a593Smuzhiyun }
1259*4882a593Smuzhiyun 
mv_cesa_ahmac_md5_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)1260*4882a593Smuzhiyun static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
1261*4882a593Smuzhiyun 				    unsigned int keylen)
1262*4882a593Smuzhiyun {
1263*4882a593Smuzhiyun 	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1264*4882a593Smuzhiyun 	struct md5_state istate, ostate;
1265*4882a593Smuzhiyun 	int ret, i;
1266*4882a593Smuzhiyun 
1267*4882a593Smuzhiyun 	ret = mv_cesa_ahmac_setkey("mv-md5", key, keylen, &istate, &ostate);
1268*4882a593Smuzhiyun 	if (ret)
1269*4882a593Smuzhiyun 		return ret;
1270*4882a593Smuzhiyun 
1271*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(istate.hash); i++)
1272*4882a593Smuzhiyun 		ctx->iv[i] = cpu_to_be32(istate.hash[i]);
1273*4882a593Smuzhiyun 
1274*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(ostate.hash); i++)
1275*4882a593Smuzhiyun 		ctx->iv[i + 8] = cpu_to_be32(ostate.hash[i]);
1276*4882a593Smuzhiyun 
1277*4882a593Smuzhiyun 	return 0;
1278*4882a593Smuzhiyun }
1279*4882a593Smuzhiyun 
mv_cesa_ahmac_md5_digest(struct ahash_request * req)1280*4882a593Smuzhiyun static int mv_cesa_ahmac_md5_digest(struct ahash_request *req)
1281*4882a593Smuzhiyun {
1282*4882a593Smuzhiyun 	int ret;
1283*4882a593Smuzhiyun 
1284*4882a593Smuzhiyun 	ret = mv_cesa_ahmac_md5_init(req);
1285*4882a593Smuzhiyun 	if (ret)
1286*4882a593Smuzhiyun 		return ret;
1287*4882a593Smuzhiyun 
1288*4882a593Smuzhiyun 	return mv_cesa_ahash_finup(req);
1289*4882a593Smuzhiyun }
1290*4882a593Smuzhiyun 
1291*4882a593Smuzhiyun struct ahash_alg mv_ahmac_md5_alg = {
1292*4882a593Smuzhiyun 	.init = mv_cesa_ahmac_md5_init,
1293*4882a593Smuzhiyun 	.update = mv_cesa_ahash_update,
1294*4882a593Smuzhiyun 	.final = mv_cesa_ahash_final,
1295*4882a593Smuzhiyun 	.finup = mv_cesa_ahash_finup,
1296*4882a593Smuzhiyun 	.digest = mv_cesa_ahmac_md5_digest,
1297*4882a593Smuzhiyun 	.setkey = mv_cesa_ahmac_md5_setkey,
1298*4882a593Smuzhiyun 	.export = mv_cesa_md5_export,
1299*4882a593Smuzhiyun 	.import = mv_cesa_md5_import,
1300*4882a593Smuzhiyun 	.halg = {
1301*4882a593Smuzhiyun 		.digestsize = MD5_DIGEST_SIZE,
1302*4882a593Smuzhiyun 		.statesize = sizeof(struct md5_state),
1303*4882a593Smuzhiyun 		.base = {
1304*4882a593Smuzhiyun 			.cra_name = "hmac(md5)",
1305*4882a593Smuzhiyun 			.cra_driver_name = "mv-hmac-md5",
1306*4882a593Smuzhiyun 			.cra_priority = 300,
1307*4882a593Smuzhiyun 			.cra_flags = CRYPTO_ALG_ASYNC |
1308*4882a593Smuzhiyun 				     CRYPTO_ALG_ALLOCATES_MEMORY |
1309*4882a593Smuzhiyun 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
1310*4882a593Smuzhiyun 			.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1311*4882a593Smuzhiyun 			.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1312*4882a593Smuzhiyun 			.cra_init = mv_cesa_ahmac_cra_init,
1313*4882a593Smuzhiyun 			.cra_module = THIS_MODULE,
1314*4882a593Smuzhiyun 		}
1315*4882a593Smuzhiyun 	}
1316*4882a593Smuzhiyun };
1317*4882a593Smuzhiyun 
mv_cesa_ahmac_sha1_init(struct ahash_request * req)1318*4882a593Smuzhiyun static int mv_cesa_ahmac_sha1_init(struct ahash_request *req)
1319*4882a593Smuzhiyun {
1320*4882a593Smuzhiyun 	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1321*4882a593Smuzhiyun 	struct mv_cesa_op_ctx tmpl = { };
1322*4882a593Smuzhiyun 
1323*4882a593Smuzhiyun 	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1);
1324*4882a593Smuzhiyun 	memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1325*4882a593Smuzhiyun 
1326*4882a593Smuzhiyun 	mv_cesa_ahash_init(req, &tmpl, false);
1327*4882a593Smuzhiyun 
1328*4882a593Smuzhiyun 	return 0;
1329*4882a593Smuzhiyun }
1330*4882a593Smuzhiyun 
mv_cesa_ahmac_sha1_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)1331*4882a593Smuzhiyun static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
1332*4882a593Smuzhiyun 				     unsigned int keylen)
1333*4882a593Smuzhiyun {
1334*4882a593Smuzhiyun 	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1335*4882a593Smuzhiyun 	struct sha1_state istate, ostate;
1336*4882a593Smuzhiyun 	int ret, i;
1337*4882a593Smuzhiyun 
1338*4882a593Smuzhiyun 	ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate);
1339*4882a593Smuzhiyun 	if (ret)
1340*4882a593Smuzhiyun 		return ret;
1341*4882a593Smuzhiyun 
1342*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1343*4882a593Smuzhiyun 		ctx->iv[i] = cpu_to_be32(istate.state[i]);
1344*4882a593Smuzhiyun 
1345*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1346*4882a593Smuzhiyun 		ctx->iv[i + 8] = cpu_to_be32(ostate.state[i]);
1347*4882a593Smuzhiyun 
1348*4882a593Smuzhiyun 	return 0;
1349*4882a593Smuzhiyun }
1350*4882a593Smuzhiyun 
mv_cesa_ahmac_sha1_digest(struct ahash_request * req)1351*4882a593Smuzhiyun static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req)
1352*4882a593Smuzhiyun {
1353*4882a593Smuzhiyun 	int ret;
1354*4882a593Smuzhiyun 
1355*4882a593Smuzhiyun 	ret = mv_cesa_ahmac_sha1_init(req);
1356*4882a593Smuzhiyun 	if (ret)
1357*4882a593Smuzhiyun 		return ret;
1358*4882a593Smuzhiyun 
1359*4882a593Smuzhiyun 	return mv_cesa_ahash_finup(req);
1360*4882a593Smuzhiyun }
1361*4882a593Smuzhiyun 
1362*4882a593Smuzhiyun struct ahash_alg mv_ahmac_sha1_alg = {
1363*4882a593Smuzhiyun 	.init = mv_cesa_ahmac_sha1_init,
1364*4882a593Smuzhiyun 	.update = mv_cesa_ahash_update,
1365*4882a593Smuzhiyun 	.final = mv_cesa_ahash_final,
1366*4882a593Smuzhiyun 	.finup = mv_cesa_ahash_finup,
1367*4882a593Smuzhiyun 	.digest = mv_cesa_ahmac_sha1_digest,
1368*4882a593Smuzhiyun 	.setkey = mv_cesa_ahmac_sha1_setkey,
1369*4882a593Smuzhiyun 	.export = mv_cesa_sha1_export,
1370*4882a593Smuzhiyun 	.import = mv_cesa_sha1_import,
1371*4882a593Smuzhiyun 	.halg = {
1372*4882a593Smuzhiyun 		.digestsize = SHA1_DIGEST_SIZE,
1373*4882a593Smuzhiyun 		.statesize = sizeof(struct sha1_state),
1374*4882a593Smuzhiyun 		.base = {
1375*4882a593Smuzhiyun 			.cra_name = "hmac(sha1)",
1376*4882a593Smuzhiyun 			.cra_driver_name = "mv-hmac-sha1",
1377*4882a593Smuzhiyun 			.cra_priority = 300,
1378*4882a593Smuzhiyun 			.cra_flags = CRYPTO_ALG_ASYNC |
1379*4882a593Smuzhiyun 				     CRYPTO_ALG_ALLOCATES_MEMORY |
1380*4882a593Smuzhiyun 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
1381*4882a593Smuzhiyun 			.cra_blocksize = SHA1_BLOCK_SIZE,
1382*4882a593Smuzhiyun 			.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1383*4882a593Smuzhiyun 			.cra_init = mv_cesa_ahmac_cra_init,
1384*4882a593Smuzhiyun 			.cra_module = THIS_MODULE,
1385*4882a593Smuzhiyun 		}
1386*4882a593Smuzhiyun 	}
1387*4882a593Smuzhiyun };
1388*4882a593Smuzhiyun 
mv_cesa_ahmac_sha256_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)1389*4882a593Smuzhiyun static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
1390*4882a593Smuzhiyun 				       unsigned int keylen)
1391*4882a593Smuzhiyun {
1392*4882a593Smuzhiyun 	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1393*4882a593Smuzhiyun 	struct sha256_state istate, ostate;
1394*4882a593Smuzhiyun 	int ret, i;
1395*4882a593Smuzhiyun 
1396*4882a593Smuzhiyun 	ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate);
1397*4882a593Smuzhiyun 	if (ret)
1398*4882a593Smuzhiyun 		return ret;
1399*4882a593Smuzhiyun 
1400*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1401*4882a593Smuzhiyun 		ctx->iv[i] = cpu_to_be32(istate.state[i]);
1402*4882a593Smuzhiyun 
1403*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1404*4882a593Smuzhiyun 		ctx->iv[i + 8] = cpu_to_be32(ostate.state[i]);
1405*4882a593Smuzhiyun 
1406*4882a593Smuzhiyun 	return 0;
1407*4882a593Smuzhiyun }
1408*4882a593Smuzhiyun 
mv_cesa_ahmac_sha256_init(struct ahash_request * req)1409*4882a593Smuzhiyun static int mv_cesa_ahmac_sha256_init(struct ahash_request *req)
1410*4882a593Smuzhiyun {
1411*4882a593Smuzhiyun 	struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1412*4882a593Smuzhiyun 	struct mv_cesa_op_ctx tmpl = { };
1413*4882a593Smuzhiyun 
1414*4882a593Smuzhiyun 	mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256);
1415*4882a593Smuzhiyun 	memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1416*4882a593Smuzhiyun 
1417*4882a593Smuzhiyun 	mv_cesa_ahash_init(req, &tmpl, false);
1418*4882a593Smuzhiyun 
1419*4882a593Smuzhiyun 	return 0;
1420*4882a593Smuzhiyun }
1421*4882a593Smuzhiyun 
mv_cesa_ahmac_sha256_digest(struct ahash_request * req)1422*4882a593Smuzhiyun static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req)
1423*4882a593Smuzhiyun {
1424*4882a593Smuzhiyun 	int ret;
1425*4882a593Smuzhiyun 
1426*4882a593Smuzhiyun 	ret = mv_cesa_ahmac_sha256_init(req);
1427*4882a593Smuzhiyun 	if (ret)
1428*4882a593Smuzhiyun 		return ret;
1429*4882a593Smuzhiyun 
1430*4882a593Smuzhiyun 	return mv_cesa_ahash_finup(req);
1431*4882a593Smuzhiyun }
1432*4882a593Smuzhiyun 
1433*4882a593Smuzhiyun struct ahash_alg mv_ahmac_sha256_alg = {
1434*4882a593Smuzhiyun 	.init = mv_cesa_ahmac_sha256_init,
1435*4882a593Smuzhiyun 	.update = mv_cesa_ahash_update,
1436*4882a593Smuzhiyun 	.final = mv_cesa_ahash_final,
1437*4882a593Smuzhiyun 	.finup = mv_cesa_ahash_finup,
1438*4882a593Smuzhiyun 	.digest = mv_cesa_ahmac_sha256_digest,
1439*4882a593Smuzhiyun 	.setkey = mv_cesa_ahmac_sha256_setkey,
1440*4882a593Smuzhiyun 	.export = mv_cesa_sha256_export,
1441*4882a593Smuzhiyun 	.import = mv_cesa_sha256_import,
1442*4882a593Smuzhiyun 	.halg = {
1443*4882a593Smuzhiyun 		.digestsize = SHA256_DIGEST_SIZE,
1444*4882a593Smuzhiyun 		.statesize = sizeof(struct sha256_state),
1445*4882a593Smuzhiyun 		.base = {
1446*4882a593Smuzhiyun 			.cra_name = "hmac(sha256)",
1447*4882a593Smuzhiyun 			.cra_driver_name = "mv-hmac-sha256",
1448*4882a593Smuzhiyun 			.cra_priority = 300,
1449*4882a593Smuzhiyun 			.cra_flags = CRYPTO_ALG_ASYNC |
1450*4882a593Smuzhiyun 				     CRYPTO_ALG_ALLOCATES_MEMORY |
1451*4882a593Smuzhiyun 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
1452*4882a593Smuzhiyun 			.cra_blocksize = SHA256_BLOCK_SIZE,
1453*4882a593Smuzhiyun 			.cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1454*4882a593Smuzhiyun 			.cra_init = mv_cesa_ahmac_cra_init,
1455*4882a593Smuzhiyun 			.cra_module = THIS_MODULE,
1456*4882a593Smuzhiyun 		}
1457*4882a593Smuzhiyun 	}
1458*4882a593Smuzhiyun };
1459