xref: /OK3568_Linux_fs/kernel/drivers/crypto/bcm/cipher.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright 2016 Broadcom
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/err.h>
7*4882a593Smuzhiyun #include <linux/module.h>
8*4882a593Smuzhiyun #include <linux/init.h>
9*4882a593Smuzhiyun #include <linux/errno.h>
10*4882a593Smuzhiyun #include <linux/kernel.h>
11*4882a593Smuzhiyun #include <linux/interrupt.h>
12*4882a593Smuzhiyun #include <linux/platform_device.h>
13*4882a593Smuzhiyun #include <linux/scatterlist.h>
14*4882a593Smuzhiyun #include <linux/crypto.h>
15*4882a593Smuzhiyun #include <linux/kthread.h>
16*4882a593Smuzhiyun #include <linux/rtnetlink.h>
17*4882a593Smuzhiyun #include <linux/sched.h>
18*4882a593Smuzhiyun #include <linux/of_address.h>
19*4882a593Smuzhiyun #include <linux/of_device.h>
20*4882a593Smuzhiyun #include <linux/io.h>
21*4882a593Smuzhiyun #include <linux/bitops.h>
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #include <crypto/algapi.h>
24*4882a593Smuzhiyun #include <crypto/aead.h>
25*4882a593Smuzhiyun #include <crypto/internal/aead.h>
26*4882a593Smuzhiyun #include <crypto/aes.h>
27*4882a593Smuzhiyun #include <crypto/internal/des.h>
28*4882a593Smuzhiyun #include <crypto/hmac.h>
29*4882a593Smuzhiyun #include <crypto/sha.h>
30*4882a593Smuzhiyun #include <crypto/md5.h>
31*4882a593Smuzhiyun #include <crypto/authenc.h>
32*4882a593Smuzhiyun #include <crypto/skcipher.h>
33*4882a593Smuzhiyun #include <crypto/hash.h>
34*4882a593Smuzhiyun #include <crypto/sha3.h>
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #include "util.h"
37*4882a593Smuzhiyun #include "cipher.h"
38*4882a593Smuzhiyun #include "spu.h"
39*4882a593Smuzhiyun #include "spum.h"
40*4882a593Smuzhiyun #include "spu2.h"
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun /* ================= Device Structure ================== */
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun struct bcm_device_private iproc_priv;
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun /* ==================== Parameters ===================== */
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun int flow_debug_logging;
49*4882a593Smuzhiyun module_param(flow_debug_logging, int, 0644);
50*4882a593Smuzhiyun MODULE_PARM_DESC(flow_debug_logging, "Enable Flow Debug Logging");
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun int packet_debug_logging;
53*4882a593Smuzhiyun module_param(packet_debug_logging, int, 0644);
54*4882a593Smuzhiyun MODULE_PARM_DESC(packet_debug_logging, "Enable Packet Debug Logging");
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun int debug_logging_sleep;
57*4882a593Smuzhiyun module_param(debug_logging_sleep, int, 0644);
58*4882a593Smuzhiyun MODULE_PARM_DESC(debug_logging_sleep, "Packet Debug Logging Sleep");
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun /*
61*4882a593Smuzhiyun  * The value of these module parameters is used to set the priority for each
62*4882a593Smuzhiyun  * algo type when this driver registers algos with the kernel crypto API.
63*4882a593Smuzhiyun  * To use a priority other than the default, set the priority in the insmod or
64*4882a593Smuzhiyun  * modprobe. Changing the module priority after init time has no effect.
65*4882a593Smuzhiyun  *
66*4882a593Smuzhiyun  * The default priorities are chosen to be lower (less preferred) than ARMv8 CE
67*4882a593Smuzhiyun  * algos, but more preferred than generic software algos.
68*4882a593Smuzhiyun  */
69*4882a593Smuzhiyun static int cipher_pri = 150;
70*4882a593Smuzhiyun module_param(cipher_pri, int, 0644);
71*4882a593Smuzhiyun MODULE_PARM_DESC(cipher_pri, "Priority for cipher algos");
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun static int hash_pri = 100;
74*4882a593Smuzhiyun module_param(hash_pri, int, 0644);
75*4882a593Smuzhiyun MODULE_PARM_DESC(hash_pri, "Priority for hash algos");
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun static int aead_pri = 150;
78*4882a593Smuzhiyun module_param(aead_pri, int, 0644);
79*4882a593Smuzhiyun MODULE_PARM_DESC(aead_pri, "Priority for AEAD algos");
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun /* A type 3 BCM header, expected to precede the SPU header for SPU-M.
82*4882a593Smuzhiyun  * Bits 3 and 4 in the first byte encode the channel number (the dma ringset).
83*4882a593Smuzhiyun  * 0x60 - ring 0
84*4882a593Smuzhiyun  * 0x68 - ring 1
85*4882a593Smuzhiyun  * 0x70 - ring 2
86*4882a593Smuzhiyun  * 0x78 - ring 3
87*4882a593Smuzhiyun  */
88*4882a593Smuzhiyun static char BCMHEADER[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 };
89*4882a593Smuzhiyun /*
90*4882a593Smuzhiyun  * Some SPU hw does not use BCM header on SPU messages. So BCM_HDR_LEN
91*4882a593Smuzhiyun  * is set dynamically after reading SPU type from device tree.
92*4882a593Smuzhiyun  */
93*4882a593Smuzhiyun #define BCM_HDR_LEN  iproc_priv.bcm_hdr_len
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun /* min and max time to sleep before retrying when mbox queue is full. usec */
96*4882a593Smuzhiyun #define MBOX_SLEEP_MIN  800
97*4882a593Smuzhiyun #define MBOX_SLEEP_MAX 1000
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun /**
100*4882a593Smuzhiyun  * select_channel() - Select a SPU channel to handle a crypto request. Selects
101*4882a593Smuzhiyun  * channel in round robin order.
102*4882a593Smuzhiyun  *
103*4882a593Smuzhiyun  * Return:  channel index
104*4882a593Smuzhiyun  */
select_channel(void)105*4882a593Smuzhiyun static u8 select_channel(void)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	u8 chan_idx = atomic_inc_return(&iproc_priv.next_chan);
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	return chan_idx % iproc_priv.spu.num_chan;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun /**
113*4882a593Smuzhiyun  * spu_skcipher_rx_sg_create() - Build up the scatterlist of buffers used to
114*4882a593Smuzhiyun  * receive a SPU response message for an skcipher request. Includes buffers to
115*4882a593Smuzhiyun  * catch SPU message headers and the response data.
116*4882a593Smuzhiyun  * @mssg:	mailbox message containing the receive sg
117*4882a593Smuzhiyun  * @rctx:	crypto request context
118*4882a593Smuzhiyun  * @rx_frag_num: number of scatterlist elements required to hold the
119*4882a593Smuzhiyun  *		SPU response message
120*4882a593Smuzhiyun  * @chunksize:	Number of bytes of response data expected
121*4882a593Smuzhiyun  * @stat_pad_len: Number of bytes required to pad the STAT field to
122*4882a593Smuzhiyun  *		a 4-byte boundary
123*4882a593Smuzhiyun  *
124*4882a593Smuzhiyun  * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
125*4882a593Smuzhiyun  * when the request completes, whether the request is handled successfully or
126*4882a593Smuzhiyun  * there is an error.
127*4882a593Smuzhiyun  *
128*4882a593Smuzhiyun  * Returns:
129*4882a593Smuzhiyun  *   0 if successful
130*4882a593Smuzhiyun  *   < 0 if an error
131*4882a593Smuzhiyun  */
132*4882a593Smuzhiyun static int
spu_skcipher_rx_sg_create(struct brcm_message * mssg,struct iproc_reqctx_s * rctx,u8 rx_frag_num,unsigned int chunksize,u32 stat_pad_len)133*4882a593Smuzhiyun spu_skcipher_rx_sg_create(struct brcm_message *mssg,
134*4882a593Smuzhiyun 			    struct iproc_reqctx_s *rctx,
135*4882a593Smuzhiyun 			    u8 rx_frag_num,
136*4882a593Smuzhiyun 			    unsigned int chunksize, u32 stat_pad_len)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun 	struct spu_hw *spu = &iproc_priv.spu;
139*4882a593Smuzhiyun 	struct scatterlist *sg;	/* used to build sgs in mbox message */
140*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = rctx->ctx;
141*4882a593Smuzhiyun 	u32 datalen;		/* Number of bytes of response data expected */
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
144*4882a593Smuzhiyun 				rctx->gfp);
145*4882a593Smuzhiyun 	if (!mssg->spu.dst)
146*4882a593Smuzhiyun 		return -ENOMEM;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	sg = mssg->spu.dst;
149*4882a593Smuzhiyun 	sg_init_table(sg, rx_frag_num);
150*4882a593Smuzhiyun 	/* Space for SPU message header */
151*4882a593Smuzhiyun 	sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	/* If XTS tweak in payload, add buffer to receive encrypted tweak */
154*4882a593Smuzhiyun 	if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
155*4882a593Smuzhiyun 	    spu->spu_xts_tweak_in_payload())
156*4882a593Smuzhiyun 		sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak,
157*4882a593Smuzhiyun 			   SPU_XTS_TWEAK_SIZE);
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	/* Copy in each dst sg entry from request, up to chunksize */
160*4882a593Smuzhiyun 	datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip,
161*4882a593Smuzhiyun 				 rctx->dst_nents, chunksize);
162*4882a593Smuzhiyun 	if (datalen < chunksize) {
163*4882a593Smuzhiyun 		pr_err("%s(): failed to copy dst sg to mbox msg. chunksize %u, datalen %u",
164*4882a593Smuzhiyun 		       __func__, chunksize, datalen);
165*4882a593Smuzhiyun 		return -EFAULT;
166*4882a593Smuzhiyun 	}
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	if (stat_pad_len)
169*4882a593Smuzhiyun 		sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
172*4882a593Smuzhiyun 	sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	return 0;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun /**
178*4882a593Smuzhiyun  * spu_skcipher_tx_sg_create() - Build up the scatterlist of buffers used to
179*4882a593Smuzhiyun  * send a SPU request message for an skcipher request. Includes SPU message
180*4882a593Smuzhiyun  * headers and the request data.
181*4882a593Smuzhiyun  * @mssg:	mailbox message containing the transmit sg
182*4882a593Smuzhiyun  * @rctx:	crypto request context
183*4882a593Smuzhiyun  * @tx_frag_num: number of scatterlist elements required to construct the
184*4882a593Smuzhiyun  *		SPU request message
185*4882a593Smuzhiyun  * @chunksize:	Number of bytes of request data
186*4882a593Smuzhiyun  * @pad_len:	Number of pad bytes
187*4882a593Smuzhiyun  *
188*4882a593Smuzhiyun  * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
189*4882a593Smuzhiyun  * when the request completes, whether the request is handled successfully or
190*4882a593Smuzhiyun  * there is an error.
191*4882a593Smuzhiyun  *
192*4882a593Smuzhiyun  * Returns:
193*4882a593Smuzhiyun  *   0 if successful
194*4882a593Smuzhiyun  *   < 0 if an error
195*4882a593Smuzhiyun  */
196*4882a593Smuzhiyun static int
spu_skcipher_tx_sg_create(struct brcm_message * mssg,struct iproc_reqctx_s * rctx,u8 tx_frag_num,unsigned int chunksize,u32 pad_len)197*4882a593Smuzhiyun spu_skcipher_tx_sg_create(struct brcm_message *mssg,
198*4882a593Smuzhiyun 			    struct iproc_reqctx_s *rctx,
199*4882a593Smuzhiyun 			    u8 tx_frag_num, unsigned int chunksize, u32 pad_len)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun 	struct spu_hw *spu = &iproc_priv.spu;
202*4882a593Smuzhiyun 	struct scatterlist *sg;	/* used to build sgs in mbox message */
203*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = rctx->ctx;
204*4882a593Smuzhiyun 	u32 datalen;		/* Number of bytes of response data expected */
205*4882a593Smuzhiyun 	u32 stat_len;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
208*4882a593Smuzhiyun 				rctx->gfp);
209*4882a593Smuzhiyun 	if (unlikely(!mssg->spu.src))
210*4882a593Smuzhiyun 		return -ENOMEM;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	sg = mssg->spu.src;
213*4882a593Smuzhiyun 	sg_init_table(sg, tx_frag_num);
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
216*4882a593Smuzhiyun 		   BCM_HDR_LEN + ctx->spu_req_hdr_len);
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	/* if XTS tweak in payload, copy from IV (where crypto API puts it) */
219*4882a593Smuzhiyun 	if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
220*4882a593Smuzhiyun 	    spu->spu_xts_tweak_in_payload())
221*4882a593Smuzhiyun 		sg_set_buf(sg++, rctx->msg_buf.iv_ctr, SPU_XTS_TWEAK_SIZE);
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	/* Copy in each src sg entry from request, up to chunksize */
224*4882a593Smuzhiyun 	datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
225*4882a593Smuzhiyun 				 rctx->src_nents, chunksize);
226*4882a593Smuzhiyun 	if (unlikely(datalen < chunksize)) {
227*4882a593Smuzhiyun 		pr_err("%s(): failed to copy src sg to mbox msg",
228*4882a593Smuzhiyun 		       __func__);
229*4882a593Smuzhiyun 		return -EFAULT;
230*4882a593Smuzhiyun 	}
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	if (pad_len)
233*4882a593Smuzhiyun 		sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	stat_len = spu->spu_tx_status_len();
236*4882a593Smuzhiyun 	if (stat_len) {
237*4882a593Smuzhiyun 		memset(rctx->msg_buf.tx_stat, 0, stat_len);
238*4882a593Smuzhiyun 		sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
239*4882a593Smuzhiyun 	}
240*4882a593Smuzhiyun 	return 0;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun 
mailbox_send_message(struct brcm_message * mssg,u32 flags,u8 chan_idx)243*4882a593Smuzhiyun static int mailbox_send_message(struct brcm_message *mssg, u32 flags,
244*4882a593Smuzhiyun 				u8 chan_idx)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun 	int err;
247*4882a593Smuzhiyun 	int retry_cnt = 0;
248*4882a593Smuzhiyun 	struct device *dev = &(iproc_priv.pdev->dev);
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	err = mbox_send_message(iproc_priv.mbox[chan_idx], mssg);
251*4882a593Smuzhiyun 	if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
252*4882a593Smuzhiyun 		while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
253*4882a593Smuzhiyun 			/*
254*4882a593Smuzhiyun 			 * Mailbox queue is full. Since MAY_SLEEP is set, assume
255*4882a593Smuzhiyun 			 * not in atomic context and we can wait and try again.
256*4882a593Smuzhiyun 			 */
257*4882a593Smuzhiyun 			retry_cnt++;
258*4882a593Smuzhiyun 			usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
259*4882a593Smuzhiyun 			err = mbox_send_message(iproc_priv.mbox[chan_idx],
260*4882a593Smuzhiyun 						mssg);
261*4882a593Smuzhiyun 			atomic_inc(&iproc_priv.mb_no_spc);
262*4882a593Smuzhiyun 		}
263*4882a593Smuzhiyun 	}
264*4882a593Smuzhiyun 	if (err < 0) {
265*4882a593Smuzhiyun 		atomic_inc(&iproc_priv.mb_send_fail);
266*4882a593Smuzhiyun 		return err;
267*4882a593Smuzhiyun 	}
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	/* Check error returned by mailbox controller */
270*4882a593Smuzhiyun 	err = mssg->error;
271*4882a593Smuzhiyun 	if (unlikely(err < 0)) {
272*4882a593Smuzhiyun 		dev_err(dev, "message error %d", err);
273*4882a593Smuzhiyun 		/* Signal txdone for mailbox channel */
274*4882a593Smuzhiyun 	}
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	/* Signal txdone for mailbox channel */
277*4882a593Smuzhiyun 	mbox_client_txdone(iproc_priv.mbox[chan_idx], err);
278*4882a593Smuzhiyun 	return err;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun /**
282*4882a593Smuzhiyun  * handle_skcipher_req() - Submit as much of a block cipher request as fits in
283*4882a593Smuzhiyun  * a single SPU request message, starting at the current position in the request
284*4882a593Smuzhiyun  * data.
285*4882a593Smuzhiyun  * @rctx:	Crypto request context
286*4882a593Smuzhiyun  *
287*4882a593Smuzhiyun  * This may be called on the crypto API thread, or, when a request is so large
288*4882a593Smuzhiyun  * it must be broken into multiple SPU messages, on the thread used to invoke
289*4882a593Smuzhiyun  * the response callback. When requests are broken into multiple SPU
290*4882a593Smuzhiyun  * messages, we assume subsequent messages depend on previous results, and
291*4882a593Smuzhiyun  * thus always wait for previous results before submitting the next message.
292*4882a593Smuzhiyun  * Because requests are submitted in lock step like this, there is no need
293*4882a593Smuzhiyun  * to synchronize access to request data structures.
294*4882a593Smuzhiyun  *
295*4882a593Smuzhiyun  * Return: -EINPROGRESS: request has been accepted and result will be returned
296*4882a593Smuzhiyun  *			 asynchronously
297*4882a593Smuzhiyun  *         Any other value indicates an error
298*4882a593Smuzhiyun  */
handle_skcipher_req(struct iproc_reqctx_s * rctx)299*4882a593Smuzhiyun static int handle_skcipher_req(struct iproc_reqctx_s *rctx)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun 	struct spu_hw *spu = &iproc_priv.spu;
302*4882a593Smuzhiyun 	struct crypto_async_request *areq = rctx->parent;
303*4882a593Smuzhiyun 	struct skcipher_request *req =
304*4882a593Smuzhiyun 	    container_of(areq, struct skcipher_request, base);
305*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = rctx->ctx;
306*4882a593Smuzhiyun 	struct spu_cipher_parms cipher_parms;
307*4882a593Smuzhiyun 	int err;
308*4882a593Smuzhiyun 	unsigned int chunksize;	/* Num bytes of request to submit */
309*4882a593Smuzhiyun 	int remaining;	/* Bytes of request still to process */
310*4882a593Smuzhiyun 	int chunk_start;	/* Beginning of data for current SPU msg */
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	/* IV or ctr value to use in this SPU msg */
313*4882a593Smuzhiyun 	u8 local_iv_ctr[MAX_IV_SIZE];
314*4882a593Smuzhiyun 	u32 stat_pad_len;	/* num bytes to align status field */
315*4882a593Smuzhiyun 	u32 pad_len;		/* total length of all padding */
316*4882a593Smuzhiyun 	struct brcm_message *mssg;	/* mailbox message */
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	/* number of entries in src and dst sg in mailbox message. */
319*4882a593Smuzhiyun 	u8 rx_frag_num = 2;	/* response header and STATUS */
320*4882a593Smuzhiyun 	u8 tx_frag_num = 1;	/* request header */
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	flow_log("%s\n", __func__);
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	cipher_parms.alg = ctx->cipher.alg;
325*4882a593Smuzhiyun 	cipher_parms.mode = ctx->cipher.mode;
326*4882a593Smuzhiyun 	cipher_parms.type = ctx->cipher_type;
327*4882a593Smuzhiyun 	cipher_parms.key_len = ctx->enckeylen;
328*4882a593Smuzhiyun 	cipher_parms.key_buf = ctx->enckey;
329*4882a593Smuzhiyun 	cipher_parms.iv_buf = local_iv_ctr;
330*4882a593Smuzhiyun 	cipher_parms.iv_len = rctx->iv_ctr_len;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	mssg = &rctx->mb_mssg;
333*4882a593Smuzhiyun 	chunk_start = rctx->src_sent;
334*4882a593Smuzhiyun 	remaining = rctx->total_todo - chunk_start;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	/* determine the chunk we are breaking off and update the indexes */
337*4882a593Smuzhiyun 	if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
338*4882a593Smuzhiyun 	    (remaining > ctx->max_payload))
339*4882a593Smuzhiyun 		chunksize = ctx->max_payload;
340*4882a593Smuzhiyun 	else
341*4882a593Smuzhiyun 		chunksize = remaining;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	rctx->src_sent += chunksize;
344*4882a593Smuzhiyun 	rctx->total_sent = rctx->src_sent;
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	/* Count number of sg entries to be included in this request */
347*4882a593Smuzhiyun 	rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
348*4882a593Smuzhiyun 	rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	if ((ctx->cipher.mode == CIPHER_MODE_CBC) &&
351*4882a593Smuzhiyun 	    rctx->is_encrypt && chunk_start)
352*4882a593Smuzhiyun 		/*
353*4882a593Smuzhiyun 		 * Encrypting non-first first chunk. Copy last block of
354*4882a593Smuzhiyun 		 * previous result to IV for this chunk.
355*4882a593Smuzhiyun 		 */
356*4882a593Smuzhiyun 		sg_copy_part_to_buf(req->dst, rctx->msg_buf.iv_ctr,
357*4882a593Smuzhiyun 				    rctx->iv_ctr_len,
358*4882a593Smuzhiyun 				    chunk_start - rctx->iv_ctr_len);
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	if (rctx->iv_ctr_len) {
361*4882a593Smuzhiyun 		/* get our local copy of the iv */
362*4882a593Smuzhiyun 		__builtin_memcpy(local_iv_ctr, rctx->msg_buf.iv_ctr,
363*4882a593Smuzhiyun 				 rctx->iv_ctr_len);
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 		/* generate the next IV if possible */
366*4882a593Smuzhiyun 		if ((ctx->cipher.mode == CIPHER_MODE_CBC) &&
367*4882a593Smuzhiyun 		    !rctx->is_encrypt) {
368*4882a593Smuzhiyun 			/*
369*4882a593Smuzhiyun 			 * CBC Decrypt: next IV is the last ciphertext block in
370*4882a593Smuzhiyun 			 * this chunk
371*4882a593Smuzhiyun 			 */
372*4882a593Smuzhiyun 			sg_copy_part_to_buf(req->src, rctx->msg_buf.iv_ctr,
373*4882a593Smuzhiyun 					    rctx->iv_ctr_len,
374*4882a593Smuzhiyun 					    rctx->src_sent - rctx->iv_ctr_len);
375*4882a593Smuzhiyun 		} else if (ctx->cipher.mode == CIPHER_MODE_CTR) {
376*4882a593Smuzhiyun 			/*
377*4882a593Smuzhiyun 			 * The SPU hardware increments the counter once for
378*4882a593Smuzhiyun 			 * each AES block of 16 bytes. So update the counter
379*4882a593Smuzhiyun 			 * for the next chunk, if there is one. Note that for
380*4882a593Smuzhiyun 			 * this chunk, the counter has already been copied to
381*4882a593Smuzhiyun 			 * local_iv_ctr. We can assume a block size of 16,
382*4882a593Smuzhiyun 			 * because we only support CTR mode for AES, not for
383*4882a593Smuzhiyun 			 * any other cipher alg.
384*4882a593Smuzhiyun 			 */
385*4882a593Smuzhiyun 			add_to_ctr(rctx->msg_buf.iv_ctr, chunksize >> 4);
386*4882a593Smuzhiyun 		}
387*4882a593Smuzhiyun 	}
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
390*4882a593Smuzhiyun 		flow_log("max_payload infinite\n");
391*4882a593Smuzhiyun 	else
392*4882a593Smuzhiyun 		flow_log("max_payload %u\n", ctx->max_payload);
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	flow_log("sent:%u start:%u remains:%u size:%u\n",
395*4882a593Smuzhiyun 		 rctx->src_sent, chunk_start, remaining, chunksize);
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	/* Copy SPU header template created at setkey time */
398*4882a593Smuzhiyun 	memcpy(rctx->msg_buf.bcm_spu_req_hdr, ctx->bcm_spu_req_hdr,
399*4882a593Smuzhiyun 	       sizeof(rctx->msg_buf.bcm_spu_req_hdr));
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	spu->spu_cipher_req_finish(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
402*4882a593Smuzhiyun 				   ctx->spu_req_hdr_len, !(rctx->is_encrypt),
403*4882a593Smuzhiyun 				   &cipher_parms, chunksize);
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	atomic64_add(chunksize, &iproc_priv.bytes_out);
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	stat_pad_len = spu->spu_wordalign_padlen(chunksize);
408*4882a593Smuzhiyun 	if (stat_pad_len)
409*4882a593Smuzhiyun 		rx_frag_num++;
410*4882a593Smuzhiyun 	pad_len = stat_pad_len;
411*4882a593Smuzhiyun 	if (pad_len) {
412*4882a593Smuzhiyun 		tx_frag_num++;
413*4882a593Smuzhiyun 		spu->spu_request_pad(rctx->msg_buf.spu_req_pad, 0,
414*4882a593Smuzhiyun 				     0, ctx->auth.alg, ctx->auth.mode,
415*4882a593Smuzhiyun 				     rctx->total_sent, stat_pad_len);
416*4882a593Smuzhiyun 	}
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
419*4882a593Smuzhiyun 			      ctx->spu_req_hdr_len);
420*4882a593Smuzhiyun 	packet_log("payload:\n");
421*4882a593Smuzhiyun 	dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
422*4882a593Smuzhiyun 	packet_dump("   pad: ", rctx->msg_buf.spu_req_pad, pad_len);
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	/*
425*4882a593Smuzhiyun 	 * Build mailbox message containing SPU request msg and rx buffers
426*4882a593Smuzhiyun 	 * to catch response message
427*4882a593Smuzhiyun 	 */
428*4882a593Smuzhiyun 	memset(mssg, 0, sizeof(*mssg));
429*4882a593Smuzhiyun 	mssg->type = BRCM_MESSAGE_SPU;
430*4882a593Smuzhiyun 	mssg->ctx = rctx;	/* Will be returned in response */
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	/* Create rx scatterlist to catch result */
433*4882a593Smuzhiyun 	rx_frag_num += rctx->dst_nents;
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
436*4882a593Smuzhiyun 	    spu->spu_xts_tweak_in_payload())
437*4882a593Smuzhiyun 		rx_frag_num++;	/* extra sg to insert tweak */
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	err = spu_skcipher_rx_sg_create(mssg, rctx, rx_frag_num, chunksize,
440*4882a593Smuzhiyun 					  stat_pad_len);
441*4882a593Smuzhiyun 	if (err)
442*4882a593Smuzhiyun 		return err;
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	/* Create tx scatterlist containing SPU request message */
445*4882a593Smuzhiyun 	tx_frag_num += rctx->src_nents;
446*4882a593Smuzhiyun 	if (spu->spu_tx_status_len())
447*4882a593Smuzhiyun 		tx_frag_num++;
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
450*4882a593Smuzhiyun 	    spu->spu_xts_tweak_in_payload())
451*4882a593Smuzhiyun 		tx_frag_num++;	/* extra sg to insert tweak */
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	err = spu_skcipher_tx_sg_create(mssg, rctx, tx_frag_num, chunksize,
454*4882a593Smuzhiyun 					  pad_len);
455*4882a593Smuzhiyun 	if (err)
456*4882a593Smuzhiyun 		return err;
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
459*4882a593Smuzhiyun 	if (unlikely(err < 0))
460*4882a593Smuzhiyun 		return err;
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	return -EINPROGRESS;
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun /**
466*4882a593Smuzhiyun  * handle_skcipher_resp() - Process a block cipher SPU response. Updates the
467*4882a593Smuzhiyun  * total received count for the request and updates global stats.
468*4882a593Smuzhiyun  * @rctx:	Crypto request context
469*4882a593Smuzhiyun  */
handle_skcipher_resp(struct iproc_reqctx_s * rctx)470*4882a593Smuzhiyun static void handle_skcipher_resp(struct iproc_reqctx_s *rctx)
471*4882a593Smuzhiyun {
472*4882a593Smuzhiyun 	struct spu_hw *spu = &iproc_priv.spu;
473*4882a593Smuzhiyun #ifdef DEBUG
474*4882a593Smuzhiyun 	struct crypto_async_request *areq = rctx->parent;
475*4882a593Smuzhiyun 	struct skcipher_request *req = skcipher_request_cast(areq);
476*4882a593Smuzhiyun #endif
477*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = rctx->ctx;
478*4882a593Smuzhiyun 	u32 payload_len;
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	/* See how much data was returned */
481*4882a593Smuzhiyun 	payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr);
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	/*
484*4882a593Smuzhiyun 	 * In XTS mode, the first SPU_XTS_TWEAK_SIZE bytes may be the
485*4882a593Smuzhiyun 	 * encrypted tweak ("i") value; we don't count those.
486*4882a593Smuzhiyun 	 */
487*4882a593Smuzhiyun 	if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
488*4882a593Smuzhiyun 	    spu->spu_xts_tweak_in_payload() &&
489*4882a593Smuzhiyun 	    (payload_len >= SPU_XTS_TWEAK_SIZE))
490*4882a593Smuzhiyun 		payload_len -= SPU_XTS_TWEAK_SIZE;
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	atomic64_add(payload_len, &iproc_priv.bytes_in);
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	flow_log("%s() offset: %u, bd_len: %u BD:\n",
495*4882a593Smuzhiyun 		 __func__, rctx->total_received, payload_len);
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	dump_sg(req->dst, rctx->total_received, payload_len);
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	rctx->total_received += payload_len;
500*4882a593Smuzhiyun 	if (rctx->total_received == rctx->total_todo) {
501*4882a593Smuzhiyun 		atomic_inc(&iproc_priv.op_counts[SPU_OP_CIPHER]);
502*4882a593Smuzhiyun 		atomic_inc(
503*4882a593Smuzhiyun 		   &iproc_priv.cipher_cnt[ctx->cipher.alg][ctx->cipher.mode]);
504*4882a593Smuzhiyun 	}
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun /**
508*4882a593Smuzhiyun  * spu_ahash_rx_sg_create() - Build up the scatterlist of buffers used to
509*4882a593Smuzhiyun  * receive a SPU response message for an ahash request.
510*4882a593Smuzhiyun  * @mssg:	mailbox message containing the receive sg
511*4882a593Smuzhiyun  * @rctx:	crypto request context
512*4882a593Smuzhiyun  * @rx_frag_num: number of scatterlist elements required to hold the
513*4882a593Smuzhiyun  *		SPU response message
514*4882a593Smuzhiyun  * @digestsize: length of hash digest, in bytes
515*4882a593Smuzhiyun  * @stat_pad_len: Number of bytes required to pad the STAT field to
516*4882a593Smuzhiyun  *		a 4-byte boundary
517*4882a593Smuzhiyun  *
518*4882a593Smuzhiyun  * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
519*4882a593Smuzhiyun  * when the request completes, whether the request is handled successfully or
520*4882a593Smuzhiyun  * there is an error.
521*4882a593Smuzhiyun  *
522*4882a593Smuzhiyun  * Return:
523*4882a593Smuzhiyun  *   0 if successful
524*4882a593Smuzhiyun  *   < 0 if an error
525*4882a593Smuzhiyun  */
526*4882a593Smuzhiyun static int
spu_ahash_rx_sg_create(struct brcm_message * mssg,struct iproc_reqctx_s * rctx,u8 rx_frag_num,unsigned int digestsize,u32 stat_pad_len)527*4882a593Smuzhiyun spu_ahash_rx_sg_create(struct brcm_message *mssg,
528*4882a593Smuzhiyun 		       struct iproc_reqctx_s *rctx,
529*4882a593Smuzhiyun 		       u8 rx_frag_num, unsigned int digestsize,
530*4882a593Smuzhiyun 		       u32 stat_pad_len)
531*4882a593Smuzhiyun {
532*4882a593Smuzhiyun 	struct spu_hw *spu = &iproc_priv.spu;
533*4882a593Smuzhiyun 	struct scatterlist *sg;	/* used to build sgs in mbox message */
534*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = rctx->ctx;
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
537*4882a593Smuzhiyun 				rctx->gfp);
538*4882a593Smuzhiyun 	if (!mssg->spu.dst)
539*4882a593Smuzhiyun 		return -ENOMEM;
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	sg = mssg->spu.dst;
542*4882a593Smuzhiyun 	sg_init_table(sg, rx_frag_num);
543*4882a593Smuzhiyun 	/* Space for SPU message header */
544*4882a593Smuzhiyun 	sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	/* Space for digest */
547*4882a593Smuzhiyun 	sg_set_buf(sg++, rctx->msg_buf.digest, digestsize);
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	if (stat_pad_len)
550*4882a593Smuzhiyun 		sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
553*4882a593Smuzhiyun 	sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
554*4882a593Smuzhiyun 	return 0;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun /**
558*4882a593Smuzhiyun  * spu_ahash_tx_sg_create() -  Build up the scatterlist of buffers used to send
559*4882a593Smuzhiyun  * a SPU request message for an ahash request. Includes SPU message headers and
560*4882a593Smuzhiyun  * the request data.
561*4882a593Smuzhiyun  * @mssg:	mailbox message containing the transmit sg
562*4882a593Smuzhiyun  * @rctx:	crypto request context
563*4882a593Smuzhiyun  * @tx_frag_num: number of scatterlist elements required to construct the
564*4882a593Smuzhiyun  *		SPU request message
565*4882a593Smuzhiyun  * @spu_hdr_len: length in bytes of SPU message header
566*4882a593Smuzhiyun  * @hash_carry_len: Number of bytes of data carried over from previous req
567*4882a593Smuzhiyun  * @new_data_len: Number of bytes of new request data
568*4882a593Smuzhiyun  * @pad_len:	Number of pad bytes
569*4882a593Smuzhiyun  *
570*4882a593Smuzhiyun  * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
571*4882a593Smuzhiyun  * when the request completes, whether the request is handled successfully or
572*4882a593Smuzhiyun  * there is an error.
573*4882a593Smuzhiyun  *
574*4882a593Smuzhiyun  * Return:
575*4882a593Smuzhiyun  *   0 if successful
576*4882a593Smuzhiyun  *   < 0 if an error
577*4882a593Smuzhiyun  */
578*4882a593Smuzhiyun static int
spu_ahash_tx_sg_create(struct brcm_message * mssg,struct iproc_reqctx_s * rctx,u8 tx_frag_num,u32 spu_hdr_len,unsigned int hash_carry_len,unsigned int new_data_len,u32 pad_len)579*4882a593Smuzhiyun spu_ahash_tx_sg_create(struct brcm_message *mssg,
580*4882a593Smuzhiyun 		       struct iproc_reqctx_s *rctx,
581*4882a593Smuzhiyun 		       u8 tx_frag_num,
582*4882a593Smuzhiyun 		       u32 spu_hdr_len,
583*4882a593Smuzhiyun 		       unsigned int hash_carry_len,
584*4882a593Smuzhiyun 		       unsigned int new_data_len, u32 pad_len)
585*4882a593Smuzhiyun {
586*4882a593Smuzhiyun 	struct spu_hw *spu = &iproc_priv.spu;
587*4882a593Smuzhiyun 	struct scatterlist *sg;	/* used to build sgs in mbox message */
588*4882a593Smuzhiyun 	u32 datalen;		/* Number of bytes of response data expected */
589*4882a593Smuzhiyun 	u32 stat_len;
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
592*4882a593Smuzhiyun 				rctx->gfp);
593*4882a593Smuzhiyun 	if (!mssg->spu.src)
594*4882a593Smuzhiyun 		return -ENOMEM;
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	sg = mssg->spu.src;
597*4882a593Smuzhiyun 	sg_init_table(sg, tx_frag_num);
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
600*4882a593Smuzhiyun 		   BCM_HDR_LEN + spu_hdr_len);
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	if (hash_carry_len)
603*4882a593Smuzhiyun 		sg_set_buf(sg++, rctx->hash_carry, hash_carry_len);
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	if (new_data_len) {
606*4882a593Smuzhiyun 		/* Copy in each src sg entry from request, up to chunksize */
607*4882a593Smuzhiyun 		datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
608*4882a593Smuzhiyun 					 rctx->src_nents, new_data_len);
609*4882a593Smuzhiyun 		if (datalen < new_data_len) {
610*4882a593Smuzhiyun 			pr_err("%s(): failed to copy src sg to mbox msg",
611*4882a593Smuzhiyun 			       __func__);
612*4882a593Smuzhiyun 			return -EFAULT;
613*4882a593Smuzhiyun 		}
614*4882a593Smuzhiyun 	}
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	if (pad_len)
617*4882a593Smuzhiyun 		sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 	stat_len = spu->spu_tx_status_len();
620*4882a593Smuzhiyun 	if (stat_len) {
621*4882a593Smuzhiyun 		memset(rctx->msg_buf.tx_stat, 0, stat_len);
622*4882a593Smuzhiyun 		sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
623*4882a593Smuzhiyun 	}
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	return 0;
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun /**
629*4882a593Smuzhiyun  * handle_ahash_req() - Process an asynchronous hash request from the crypto
630*4882a593Smuzhiyun  * API.
631*4882a593Smuzhiyun  * @rctx:  Crypto request context
632*4882a593Smuzhiyun  *
633*4882a593Smuzhiyun  * Builds a SPU request message embedded in a mailbox message and submits the
634*4882a593Smuzhiyun  * mailbox message on a selected mailbox channel. The SPU request message is
635*4882a593Smuzhiyun  * constructed as a scatterlist, including entries from the crypto API's
636*4882a593Smuzhiyun  * src scatterlist to avoid copying the data to be hashed. This function is
637*4882a593Smuzhiyun  * called either on the thread from the crypto API, or, in the case that the
638*4882a593Smuzhiyun  * crypto API request is too large to fit in a single SPU request message,
639*4882a593Smuzhiyun  * on the thread that invokes the receive callback with a response message.
640*4882a593Smuzhiyun  * Because some operations require the response from one chunk before the next
641*4882a593Smuzhiyun  * chunk can be submitted, we always wait for the response for the previous
642*4882a593Smuzhiyun  * chunk before submitting the next chunk. Because requests are submitted in
643*4882a593Smuzhiyun  * lock step like this, there is no need to synchronize access to request data
644*4882a593Smuzhiyun  * structures.
645*4882a593Smuzhiyun  *
646*4882a593Smuzhiyun  * Return:
647*4882a593Smuzhiyun  *   -EINPROGRESS: request has been submitted to SPU and response will be
648*4882a593Smuzhiyun  *		   returned asynchronously
649*4882a593Smuzhiyun  *   -EAGAIN:      non-final request included a small amount of data, which for
650*4882a593Smuzhiyun  *		   efficiency we did not submit to the SPU, but instead stored
651*4882a593Smuzhiyun  *		   to be submitted to the SPU with the next part of the request
652*4882a593Smuzhiyun  *   other:        an error code
653*4882a593Smuzhiyun  */
handle_ahash_req(struct iproc_reqctx_s * rctx)654*4882a593Smuzhiyun static int handle_ahash_req(struct iproc_reqctx_s *rctx)
655*4882a593Smuzhiyun {
656*4882a593Smuzhiyun 	struct spu_hw *spu = &iproc_priv.spu;
657*4882a593Smuzhiyun 	struct crypto_async_request *areq = rctx->parent;
658*4882a593Smuzhiyun 	struct ahash_request *req = ahash_request_cast(areq);
659*4882a593Smuzhiyun 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
660*4882a593Smuzhiyun 	struct crypto_tfm *tfm = crypto_ahash_tfm(ahash);
661*4882a593Smuzhiyun 	unsigned int blocksize = crypto_tfm_alg_blocksize(tfm);
662*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = rctx->ctx;
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	/* number of bytes still to be hashed in this req */
665*4882a593Smuzhiyun 	unsigned int nbytes_to_hash = 0;
666*4882a593Smuzhiyun 	int err;
667*4882a593Smuzhiyun 	unsigned int chunksize = 0;	/* length of hash carry + new data */
668*4882a593Smuzhiyun 	/*
669*4882a593Smuzhiyun 	 * length of new data, not from hash carry, to be submitted in
670*4882a593Smuzhiyun 	 * this hw request
671*4882a593Smuzhiyun 	 */
672*4882a593Smuzhiyun 	unsigned int new_data_len;
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 	unsigned int __maybe_unused chunk_start = 0;
675*4882a593Smuzhiyun 	u32 db_size;	 /* Length of data field, incl gcm and hash padding */
676*4882a593Smuzhiyun 	int pad_len = 0; /* total pad len, including gcm, hash, stat padding */
677*4882a593Smuzhiyun 	u32 data_pad_len = 0;	/* length of GCM/CCM padding */
678*4882a593Smuzhiyun 	u32 stat_pad_len = 0;	/* length of padding to align STATUS word */
679*4882a593Smuzhiyun 	struct brcm_message *mssg;	/* mailbox message */
680*4882a593Smuzhiyun 	struct spu_request_opts req_opts;
681*4882a593Smuzhiyun 	struct spu_cipher_parms cipher_parms;
682*4882a593Smuzhiyun 	struct spu_hash_parms hash_parms;
683*4882a593Smuzhiyun 	struct spu_aead_parms aead_parms;
684*4882a593Smuzhiyun 	unsigned int local_nbuf;
685*4882a593Smuzhiyun 	u32 spu_hdr_len;
686*4882a593Smuzhiyun 	unsigned int digestsize;
687*4882a593Smuzhiyun 	u16 rem = 0;
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun 	/*
690*4882a593Smuzhiyun 	 * number of entries in src and dst sg. Always includes SPU msg header.
691*4882a593Smuzhiyun 	 * rx always includes a buffer to catch digest and STATUS.
692*4882a593Smuzhiyun 	 */
693*4882a593Smuzhiyun 	u8 rx_frag_num = 3;
694*4882a593Smuzhiyun 	u8 tx_frag_num = 1;
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	flow_log("total_todo %u, total_sent %u\n",
697*4882a593Smuzhiyun 		 rctx->total_todo, rctx->total_sent);
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	memset(&req_opts, 0, sizeof(req_opts));
700*4882a593Smuzhiyun 	memset(&cipher_parms, 0, sizeof(cipher_parms));
701*4882a593Smuzhiyun 	memset(&hash_parms, 0, sizeof(hash_parms));
702*4882a593Smuzhiyun 	memset(&aead_parms, 0, sizeof(aead_parms));
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 	req_opts.bd_suppress = true;
705*4882a593Smuzhiyun 	hash_parms.alg = ctx->auth.alg;
706*4882a593Smuzhiyun 	hash_parms.mode = ctx->auth.mode;
707*4882a593Smuzhiyun 	hash_parms.type = HASH_TYPE_NONE;
708*4882a593Smuzhiyun 	hash_parms.key_buf = (u8 *)ctx->authkey;
709*4882a593Smuzhiyun 	hash_parms.key_len = ctx->authkeylen;
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun 	/*
712*4882a593Smuzhiyun 	 * For hash algorithms below assignment looks bit odd but
713*4882a593Smuzhiyun 	 * it's needed for AES-XCBC and AES-CMAC hash algorithms
714*4882a593Smuzhiyun 	 * to differentiate between 128, 192, 256 bit key values.
715*4882a593Smuzhiyun 	 * Based on the key values, hash algorithm is selected.
716*4882a593Smuzhiyun 	 * For example for 128 bit key, hash algorithm is AES-128.
717*4882a593Smuzhiyun 	 */
718*4882a593Smuzhiyun 	cipher_parms.type = ctx->cipher_type;
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 	mssg = &rctx->mb_mssg;
721*4882a593Smuzhiyun 	chunk_start = rctx->src_sent;
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 	/*
724*4882a593Smuzhiyun 	 * Compute the amount remaining to hash. This may include data
725*4882a593Smuzhiyun 	 * carried over from previous requests.
726*4882a593Smuzhiyun 	 */
727*4882a593Smuzhiyun 	nbytes_to_hash = rctx->total_todo - rctx->total_sent;
728*4882a593Smuzhiyun 	chunksize = nbytes_to_hash;
729*4882a593Smuzhiyun 	if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
730*4882a593Smuzhiyun 	    (chunksize > ctx->max_payload))
731*4882a593Smuzhiyun 		chunksize = ctx->max_payload;
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 	/*
734*4882a593Smuzhiyun 	 * If this is not a final request and the request data is not a multiple
735*4882a593Smuzhiyun 	 * of a full block, then simply park the extra data and prefix it to the
736*4882a593Smuzhiyun 	 * data for the next request.
737*4882a593Smuzhiyun 	 */
738*4882a593Smuzhiyun 	if (!rctx->is_final) {
739*4882a593Smuzhiyun 		u8 *dest = rctx->hash_carry + rctx->hash_carry_len;
740*4882a593Smuzhiyun 		u16 new_len;  /* len of data to add to hash carry */
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 		rem = chunksize % blocksize;   /* remainder */
743*4882a593Smuzhiyun 		if (rem) {
744*4882a593Smuzhiyun 			/* chunksize not a multiple of blocksize */
745*4882a593Smuzhiyun 			chunksize -= rem;
746*4882a593Smuzhiyun 			if (chunksize == 0) {
747*4882a593Smuzhiyun 				/* Don't have a full block to submit to hw */
748*4882a593Smuzhiyun 				new_len = rem - rctx->hash_carry_len;
749*4882a593Smuzhiyun 				sg_copy_part_to_buf(req->src, dest, new_len,
750*4882a593Smuzhiyun 						    rctx->src_sent);
751*4882a593Smuzhiyun 				rctx->hash_carry_len = rem;
752*4882a593Smuzhiyun 				flow_log("Exiting with hash carry len: %u\n",
753*4882a593Smuzhiyun 					 rctx->hash_carry_len);
754*4882a593Smuzhiyun 				packet_dump("  buf: ",
755*4882a593Smuzhiyun 					    rctx->hash_carry,
756*4882a593Smuzhiyun 					    rctx->hash_carry_len);
757*4882a593Smuzhiyun 				return -EAGAIN;
758*4882a593Smuzhiyun 			}
759*4882a593Smuzhiyun 		}
760*4882a593Smuzhiyun 	}
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun 	/* if we have hash carry, then prefix it to the data in this request */
763*4882a593Smuzhiyun 	local_nbuf = rctx->hash_carry_len;
764*4882a593Smuzhiyun 	rctx->hash_carry_len = 0;
765*4882a593Smuzhiyun 	if (local_nbuf)
766*4882a593Smuzhiyun 		tx_frag_num++;
767*4882a593Smuzhiyun 	new_data_len = chunksize - local_nbuf;
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	/* Count number of sg entries to be used in this request */
770*4882a593Smuzhiyun 	rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip,
771*4882a593Smuzhiyun 				       new_data_len);
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun 	/* AES hashing keeps key size in type field, so need to copy it here */
774*4882a593Smuzhiyun 	if (hash_parms.alg == HASH_ALG_AES)
775*4882a593Smuzhiyun 		hash_parms.type = (enum hash_type)cipher_parms.type;
776*4882a593Smuzhiyun 	else
777*4882a593Smuzhiyun 		hash_parms.type = spu->spu_hash_type(rctx->total_sent);
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 	digestsize = spu->spu_digest_size(ctx->digestsize, ctx->auth.alg,
780*4882a593Smuzhiyun 					  hash_parms.type);
781*4882a593Smuzhiyun 	hash_parms.digestsize =	digestsize;
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	/* update the indexes */
784*4882a593Smuzhiyun 	rctx->total_sent += chunksize;
785*4882a593Smuzhiyun 	/* if you sent a prebuf then that wasn't from this req->src */
786*4882a593Smuzhiyun 	rctx->src_sent += new_data_len;
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun 	if ((rctx->total_sent == rctx->total_todo) && rctx->is_final)
789*4882a593Smuzhiyun 		hash_parms.pad_len = spu->spu_hash_pad_len(hash_parms.alg,
790*4882a593Smuzhiyun 							   hash_parms.mode,
791*4882a593Smuzhiyun 							   chunksize,
792*4882a593Smuzhiyun 							   blocksize);
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 	/*
795*4882a593Smuzhiyun 	 * If a non-first chunk, then include the digest returned from the
796*4882a593Smuzhiyun 	 * previous chunk so that hw can add to it (except for AES types).
797*4882a593Smuzhiyun 	 */
798*4882a593Smuzhiyun 	if ((hash_parms.type == HASH_TYPE_UPDT) &&
799*4882a593Smuzhiyun 	    (hash_parms.alg != HASH_ALG_AES)) {
800*4882a593Smuzhiyun 		hash_parms.key_buf = rctx->incr_hash;
801*4882a593Smuzhiyun 		hash_parms.key_len = digestsize;
802*4882a593Smuzhiyun 	}
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun 	atomic64_add(chunksize, &iproc_priv.bytes_out);
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 	flow_log("%s() final: %u nbuf: %u ",
807*4882a593Smuzhiyun 		 __func__, rctx->is_final, local_nbuf);
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun 	if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
810*4882a593Smuzhiyun 		flow_log("max_payload infinite\n");
811*4882a593Smuzhiyun 	else
812*4882a593Smuzhiyun 		flow_log("max_payload %u\n", ctx->max_payload);
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 	flow_log("chunk_start: %u chunk_size: %u\n", chunk_start, chunksize);
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 	/* Prepend SPU header with type 3 BCM header */
817*4882a593Smuzhiyun 	memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 	hash_parms.prebuf_len = local_nbuf;
820*4882a593Smuzhiyun 	spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
821*4882a593Smuzhiyun 					      BCM_HDR_LEN,
822*4882a593Smuzhiyun 					      &req_opts, &cipher_parms,
823*4882a593Smuzhiyun 					      &hash_parms, &aead_parms,
824*4882a593Smuzhiyun 					      new_data_len);
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 	if (spu_hdr_len == 0) {
827*4882a593Smuzhiyun 		pr_err("Failed to create SPU request header\n");
828*4882a593Smuzhiyun 		return -EFAULT;
829*4882a593Smuzhiyun 	}
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	/*
832*4882a593Smuzhiyun 	 * Determine total length of padding required. Put all padding in one
833*4882a593Smuzhiyun 	 * buffer.
834*4882a593Smuzhiyun 	 */
835*4882a593Smuzhiyun 	data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, chunksize);
836*4882a593Smuzhiyun 	db_size = spu_real_db_size(0, 0, local_nbuf, new_data_len,
837*4882a593Smuzhiyun 				   0, 0, hash_parms.pad_len);
838*4882a593Smuzhiyun 	if (spu->spu_tx_status_len())
839*4882a593Smuzhiyun 		stat_pad_len = spu->spu_wordalign_padlen(db_size);
840*4882a593Smuzhiyun 	if (stat_pad_len)
841*4882a593Smuzhiyun 		rx_frag_num++;
842*4882a593Smuzhiyun 	pad_len = hash_parms.pad_len + data_pad_len + stat_pad_len;
843*4882a593Smuzhiyun 	if (pad_len) {
844*4882a593Smuzhiyun 		tx_frag_num++;
845*4882a593Smuzhiyun 		spu->spu_request_pad(rctx->msg_buf.spu_req_pad, data_pad_len,
846*4882a593Smuzhiyun 				     hash_parms.pad_len, ctx->auth.alg,
847*4882a593Smuzhiyun 				     ctx->auth.mode, rctx->total_sent,
848*4882a593Smuzhiyun 				     stat_pad_len);
849*4882a593Smuzhiyun 	}
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun 	spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
852*4882a593Smuzhiyun 			      spu_hdr_len);
853*4882a593Smuzhiyun 	packet_dump("    prebuf: ", rctx->hash_carry, local_nbuf);
854*4882a593Smuzhiyun 	flow_log("Data:\n");
855*4882a593Smuzhiyun 	dump_sg(rctx->src_sg, rctx->src_skip, new_data_len);
856*4882a593Smuzhiyun 	packet_dump("   pad: ", rctx->msg_buf.spu_req_pad, pad_len);
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun 	/*
859*4882a593Smuzhiyun 	 * Build mailbox message containing SPU request msg and rx buffers
860*4882a593Smuzhiyun 	 * to catch response message
861*4882a593Smuzhiyun 	 */
862*4882a593Smuzhiyun 	memset(mssg, 0, sizeof(*mssg));
863*4882a593Smuzhiyun 	mssg->type = BRCM_MESSAGE_SPU;
864*4882a593Smuzhiyun 	mssg->ctx = rctx;	/* Will be returned in response */
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 	/* Create rx scatterlist to catch result */
867*4882a593Smuzhiyun 	err = spu_ahash_rx_sg_create(mssg, rctx, rx_frag_num, digestsize,
868*4882a593Smuzhiyun 				     stat_pad_len);
869*4882a593Smuzhiyun 	if (err)
870*4882a593Smuzhiyun 		return err;
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 	/* Create tx scatterlist containing SPU request message */
873*4882a593Smuzhiyun 	tx_frag_num += rctx->src_nents;
874*4882a593Smuzhiyun 	if (spu->spu_tx_status_len())
875*4882a593Smuzhiyun 		tx_frag_num++;
876*4882a593Smuzhiyun 	err = spu_ahash_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
877*4882a593Smuzhiyun 				     local_nbuf, new_data_len, pad_len);
878*4882a593Smuzhiyun 	if (err)
879*4882a593Smuzhiyun 		return err;
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 	err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
882*4882a593Smuzhiyun 	if (unlikely(err < 0))
883*4882a593Smuzhiyun 		return err;
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 	return -EINPROGRESS;
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun /**
889*4882a593Smuzhiyun  * spu_hmac_outer_hash() - Request synchonous software compute of the outer hash
890*4882a593Smuzhiyun  * for an HMAC request.
891*4882a593Smuzhiyun  * @req:  The HMAC request from the crypto API
892*4882a593Smuzhiyun  * @ctx:  The session context
893*4882a593Smuzhiyun  *
894*4882a593Smuzhiyun  * Return: 0 if synchronous hash operation successful
895*4882a593Smuzhiyun  *         -EINVAL if the hash algo is unrecognized
896*4882a593Smuzhiyun  *         any other value indicates an error
897*4882a593Smuzhiyun  */
spu_hmac_outer_hash(struct ahash_request * req,struct iproc_ctx_s * ctx)898*4882a593Smuzhiyun static int spu_hmac_outer_hash(struct ahash_request *req,
899*4882a593Smuzhiyun 			       struct iproc_ctx_s *ctx)
900*4882a593Smuzhiyun {
901*4882a593Smuzhiyun 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
902*4882a593Smuzhiyun 	unsigned int blocksize =
903*4882a593Smuzhiyun 		crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
904*4882a593Smuzhiyun 	int rc;
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 	switch (ctx->auth.alg) {
907*4882a593Smuzhiyun 	case HASH_ALG_MD5:
908*4882a593Smuzhiyun 		rc = do_shash("md5", req->result, ctx->opad, blocksize,
909*4882a593Smuzhiyun 			      req->result, ctx->digestsize, NULL, 0);
910*4882a593Smuzhiyun 		break;
911*4882a593Smuzhiyun 	case HASH_ALG_SHA1:
912*4882a593Smuzhiyun 		rc = do_shash("sha1", req->result, ctx->opad, blocksize,
913*4882a593Smuzhiyun 			      req->result, ctx->digestsize, NULL, 0);
914*4882a593Smuzhiyun 		break;
915*4882a593Smuzhiyun 	case HASH_ALG_SHA224:
916*4882a593Smuzhiyun 		rc = do_shash("sha224", req->result, ctx->opad, blocksize,
917*4882a593Smuzhiyun 			      req->result, ctx->digestsize, NULL, 0);
918*4882a593Smuzhiyun 		break;
919*4882a593Smuzhiyun 	case HASH_ALG_SHA256:
920*4882a593Smuzhiyun 		rc = do_shash("sha256", req->result, ctx->opad, blocksize,
921*4882a593Smuzhiyun 			      req->result, ctx->digestsize, NULL, 0);
922*4882a593Smuzhiyun 		break;
923*4882a593Smuzhiyun 	case HASH_ALG_SHA384:
924*4882a593Smuzhiyun 		rc = do_shash("sha384", req->result, ctx->opad, blocksize,
925*4882a593Smuzhiyun 			      req->result, ctx->digestsize, NULL, 0);
926*4882a593Smuzhiyun 		break;
927*4882a593Smuzhiyun 	case HASH_ALG_SHA512:
928*4882a593Smuzhiyun 		rc = do_shash("sha512", req->result, ctx->opad, blocksize,
929*4882a593Smuzhiyun 			      req->result, ctx->digestsize, NULL, 0);
930*4882a593Smuzhiyun 		break;
931*4882a593Smuzhiyun 	default:
932*4882a593Smuzhiyun 		pr_err("%s() Error : unknown hmac type\n", __func__);
933*4882a593Smuzhiyun 		rc = -EINVAL;
934*4882a593Smuzhiyun 	}
935*4882a593Smuzhiyun 	return rc;
936*4882a593Smuzhiyun }
937*4882a593Smuzhiyun 
938*4882a593Smuzhiyun /**
939*4882a593Smuzhiyun  * ahash_req_done() - Process a hash result from the SPU hardware.
940*4882a593Smuzhiyun  * @rctx: Crypto request context
941*4882a593Smuzhiyun  *
942*4882a593Smuzhiyun  * Return: 0 if successful
943*4882a593Smuzhiyun  *         < 0 if an error
944*4882a593Smuzhiyun  */
ahash_req_done(struct iproc_reqctx_s * rctx)945*4882a593Smuzhiyun static int ahash_req_done(struct iproc_reqctx_s *rctx)
946*4882a593Smuzhiyun {
947*4882a593Smuzhiyun 	struct spu_hw *spu = &iproc_priv.spu;
948*4882a593Smuzhiyun 	struct crypto_async_request *areq = rctx->parent;
949*4882a593Smuzhiyun 	struct ahash_request *req = ahash_request_cast(areq);
950*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = rctx->ctx;
951*4882a593Smuzhiyun 	int err;
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 	memcpy(req->result, rctx->msg_buf.digest, ctx->digestsize);
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 	if (spu->spu_type == SPU_TYPE_SPUM) {
956*4882a593Smuzhiyun 		/* byte swap the output from the UPDT function to network byte
957*4882a593Smuzhiyun 		 * order
958*4882a593Smuzhiyun 		 */
959*4882a593Smuzhiyun 		if (ctx->auth.alg == HASH_ALG_MD5) {
960*4882a593Smuzhiyun 			__swab32s((u32 *)req->result);
961*4882a593Smuzhiyun 			__swab32s(((u32 *)req->result) + 1);
962*4882a593Smuzhiyun 			__swab32s(((u32 *)req->result) + 2);
963*4882a593Smuzhiyun 			__swab32s(((u32 *)req->result) + 3);
964*4882a593Smuzhiyun 			__swab32s(((u32 *)req->result) + 4);
965*4882a593Smuzhiyun 		}
966*4882a593Smuzhiyun 	}
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun 	flow_dump("  digest ", req->result, ctx->digestsize);
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun 	/* if this an HMAC then do the outer hash */
971*4882a593Smuzhiyun 	if (rctx->is_sw_hmac) {
972*4882a593Smuzhiyun 		err = spu_hmac_outer_hash(req, ctx);
973*4882a593Smuzhiyun 		if (err < 0)
974*4882a593Smuzhiyun 			return err;
975*4882a593Smuzhiyun 		flow_dump("  hmac: ", req->result, ctx->digestsize);
976*4882a593Smuzhiyun 	}
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 	if (rctx->is_sw_hmac || ctx->auth.mode == HASH_MODE_HMAC) {
979*4882a593Smuzhiyun 		atomic_inc(&iproc_priv.op_counts[SPU_OP_HMAC]);
980*4882a593Smuzhiyun 		atomic_inc(&iproc_priv.hmac_cnt[ctx->auth.alg]);
981*4882a593Smuzhiyun 	} else {
982*4882a593Smuzhiyun 		atomic_inc(&iproc_priv.op_counts[SPU_OP_HASH]);
983*4882a593Smuzhiyun 		atomic_inc(&iproc_priv.hash_cnt[ctx->auth.alg]);
984*4882a593Smuzhiyun 	}
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun 	return 0;
987*4882a593Smuzhiyun }
988*4882a593Smuzhiyun 
989*4882a593Smuzhiyun /**
990*4882a593Smuzhiyun  * handle_ahash_resp() - Process a SPU response message for a hash request.
991*4882a593Smuzhiyun  * Checks if the entire crypto API request has been processed, and if so,
992*4882a593Smuzhiyun  * invokes post processing on the result.
993*4882a593Smuzhiyun  * @rctx: Crypto request context
994*4882a593Smuzhiyun  */
handle_ahash_resp(struct iproc_reqctx_s * rctx)995*4882a593Smuzhiyun static void handle_ahash_resp(struct iproc_reqctx_s *rctx)
996*4882a593Smuzhiyun {
997*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = rctx->ctx;
998*4882a593Smuzhiyun #ifdef DEBUG
999*4882a593Smuzhiyun 	struct crypto_async_request *areq = rctx->parent;
1000*4882a593Smuzhiyun 	struct ahash_request *req = ahash_request_cast(areq);
1001*4882a593Smuzhiyun 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1002*4882a593Smuzhiyun 	unsigned int blocksize =
1003*4882a593Smuzhiyun 		crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
1004*4882a593Smuzhiyun #endif
1005*4882a593Smuzhiyun 	/*
1006*4882a593Smuzhiyun 	 * Save hash to use as input to next op if incremental. Might be copying
1007*4882a593Smuzhiyun 	 * too much, but that's easier than figuring out actual digest size here
1008*4882a593Smuzhiyun 	 */
1009*4882a593Smuzhiyun 	memcpy(rctx->incr_hash, rctx->msg_buf.digest, MAX_DIGEST_SIZE);
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun 	flow_log("%s() blocksize:%u digestsize:%u\n",
1012*4882a593Smuzhiyun 		 __func__, blocksize, ctx->digestsize);
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun 	atomic64_add(ctx->digestsize, &iproc_priv.bytes_in);
1015*4882a593Smuzhiyun 
1016*4882a593Smuzhiyun 	if (rctx->is_final && (rctx->total_sent == rctx->total_todo))
1017*4882a593Smuzhiyun 		ahash_req_done(rctx);
1018*4882a593Smuzhiyun }
1019*4882a593Smuzhiyun 
1020*4882a593Smuzhiyun /**
1021*4882a593Smuzhiyun  * spu_aead_rx_sg_create() - Build up the scatterlist of buffers used to receive
1022*4882a593Smuzhiyun  * a SPU response message for an AEAD request. Includes buffers to catch SPU
1023*4882a593Smuzhiyun  * message headers and the response data.
1024*4882a593Smuzhiyun  * @mssg:	mailbox message containing the receive sg
1025*4882a593Smuzhiyun  * @rctx:	crypto request context
1026*4882a593Smuzhiyun  * @rx_frag_num: number of scatterlist elements required to hold the
1027*4882a593Smuzhiyun  *		SPU response message
1028*4882a593Smuzhiyun  * @assoc_len:	Length of associated data included in the crypto request
1029*4882a593Smuzhiyun  * @ret_iv_len: Length of IV returned in response
1030*4882a593Smuzhiyun  * @resp_len:	Number of bytes of response data expected to be written to
1031*4882a593Smuzhiyun  *              dst buffer from crypto API
1032*4882a593Smuzhiyun  * @digestsize: Length of hash digest, in bytes
1033*4882a593Smuzhiyun  * @stat_pad_len: Number of bytes required to pad the STAT field to
1034*4882a593Smuzhiyun  *		a 4-byte boundary
1035*4882a593Smuzhiyun  *
1036*4882a593Smuzhiyun  * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
1037*4882a593Smuzhiyun  * when the request completes, whether the request is handled successfully or
1038*4882a593Smuzhiyun  * there is an error.
1039*4882a593Smuzhiyun  *
1040*4882a593Smuzhiyun  * Returns:
1041*4882a593Smuzhiyun  *   0 if successful
1042*4882a593Smuzhiyun  *   < 0 if an error
1043*4882a593Smuzhiyun  */
spu_aead_rx_sg_create(struct brcm_message * mssg,struct aead_request * req,struct iproc_reqctx_s * rctx,u8 rx_frag_num,unsigned int assoc_len,u32 ret_iv_len,unsigned int resp_len,unsigned int digestsize,u32 stat_pad_len)1044*4882a593Smuzhiyun static int spu_aead_rx_sg_create(struct brcm_message *mssg,
1045*4882a593Smuzhiyun 				 struct aead_request *req,
1046*4882a593Smuzhiyun 				 struct iproc_reqctx_s *rctx,
1047*4882a593Smuzhiyun 				 u8 rx_frag_num,
1048*4882a593Smuzhiyun 				 unsigned int assoc_len,
1049*4882a593Smuzhiyun 				 u32 ret_iv_len, unsigned int resp_len,
1050*4882a593Smuzhiyun 				 unsigned int digestsize, u32 stat_pad_len)
1051*4882a593Smuzhiyun {
1052*4882a593Smuzhiyun 	struct spu_hw *spu = &iproc_priv.spu;
1053*4882a593Smuzhiyun 	struct scatterlist *sg;	/* used to build sgs in mbox message */
1054*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = rctx->ctx;
1055*4882a593Smuzhiyun 	u32 datalen;		/* Number of bytes of response data expected */
1056*4882a593Smuzhiyun 	u32 assoc_buf_len;
1057*4882a593Smuzhiyun 	u8 data_padlen = 0;
1058*4882a593Smuzhiyun 
1059*4882a593Smuzhiyun 	if (ctx->is_rfc4543) {
1060*4882a593Smuzhiyun 		/* RFC4543: only pad after data, not after AAD */
1061*4882a593Smuzhiyun 		data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1062*4882a593Smuzhiyun 							  assoc_len + resp_len);
1063*4882a593Smuzhiyun 		assoc_buf_len = assoc_len;
1064*4882a593Smuzhiyun 	} else {
1065*4882a593Smuzhiyun 		data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1066*4882a593Smuzhiyun 							  resp_len);
1067*4882a593Smuzhiyun 		assoc_buf_len = spu->spu_assoc_resp_len(ctx->cipher.mode,
1068*4882a593Smuzhiyun 						assoc_len, ret_iv_len,
1069*4882a593Smuzhiyun 						rctx->is_encrypt);
1070*4882a593Smuzhiyun 	}
1071*4882a593Smuzhiyun 
1072*4882a593Smuzhiyun 	if (ctx->cipher.mode == CIPHER_MODE_CCM)
1073*4882a593Smuzhiyun 		/* ICV (after data) must be in the next 32-bit word for CCM */
1074*4882a593Smuzhiyun 		data_padlen += spu->spu_wordalign_padlen(assoc_buf_len +
1075*4882a593Smuzhiyun 							 resp_len +
1076*4882a593Smuzhiyun 							 data_padlen);
1077*4882a593Smuzhiyun 
1078*4882a593Smuzhiyun 	if (data_padlen)
1079*4882a593Smuzhiyun 		/* have to catch gcm pad in separate buffer */
1080*4882a593Smuzhiyun 		rx_frag_num++;
1081*4882a593Smuzhiyun 
1082*4882a593Smuzhiyun 	mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
1083*4882a593Smuzhiyun 				rctx->gfp);
1084*4882a593Smuzhiyun 	if (!mssg->spu.dst)
1085*4882a593Smuzhiyun 		return -ENOMEM;
1086*4882a593Smuzhiyun 
1087*4882a593Smuzhiyun 	sg = mssg->spu.dst;
1088*4882a593Smuzhiyun 	sg_init_table(sg, rx_frag_num);
1089*4882a593Smuzhiyun 
1090*4882a593Smuzhiyun 	/* Space for SPU message header */
1091*4882a593Smuzhiyun 	sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun 	if (assoc_buf_len) {
1094*4882a593Smuzhiyun 		/*
1095*4882a593Smuzhiyun 		 * Don't write directly to req->dst, because SPU may pad the
1096*4882a593Smuzhiyun 		 * assoc data in the response
1097*4882a593Smuzhiyun 		 */
1098*4882a593Smuzhiyun 		memset(rctx->msg_buf.a.resp_aad, 0, assoc_buf_len);
1099*4882a593Smuzhiyun 		sg_set_buf(sg++, rctx->msg_buf.a.resp_aad, assoc_buf_len);
1100*4882a593Smuzhiyun 	}
1101*4882a593Smuzhiyun 
1102*4882a593Smuzhiyun 	if (resp_len) {
1103*4882a593Smuzhiyun 		/*
1104*4882a593Smuzhiyun 		 * Copy in each dst sg entry from request, up to chunksize.
1105*4882a593Smuzhiyun 		 * dst sg catches just the data. digest caught in separate buf.
1106*4882a593Smuzhiyun 		 */
1107*4882a593Smuzhiyun 		datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip,
1108*4882a593Smuzhiyun 					 rctx->dst_nents, resp_len);
1109*4882a593Smuzhiyun 		if (datalen < (resp_len)) {
1110*4882a593Smuzhiyun 			pr_err("%s(): failed to copy dst sg to mbox msg. expected len %u, datalen %u",
1111*4882a593Smuzhiyun 			       __func__, resp_len, datalen);
1112*4882a593Smuzhiyun 			return -EFAULT;
1113*4882a593Smuzhiyun 		}
1114*4882a593Smuzhiyun 	}
1115*4882a593Smuzhiyun 
1116*4882a593Smuzhiyun 	/* If GCM/CCM data is padded, catch padding in separate buffer */
1117*4882a593Smuzhiyun 	if (data_padlen) {
1118*4882a593Smuzhiyun 		memset(rctx->msg_buf.a.gcmpad, 0, data_padlen);
1119*4882a593Smuzhiyun 		sg_set_buf(sg++, rctx->msg_buf.a.gcmpad, data_padlen);
1120*4882a593Smuzhiyun 	}
1121*4882a593Smuzhiyun 
1122*4882a593Smuzhiyun 	/* Always catch ICV in separate buffer */
1123*4882a593Smuzhiyun 	sg_set_buf(sg++, rctx->msg_buf.digest, digestsize);
1124*4882a593Smuzhiyun 
1125*4882a593Smuzhiyun 	flow_log("stat_pad_len %u\n", stat_pad_len);
1126*4882a593Smuzhiyun 	if (stat_pad_len) {
1127*4882a593Smuzhiyun 		memset(rctx->msg_buf.rx_stat_pad, 0, stat_pad_len);
1128*4882a593Smuzhiyun 		sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
1129*4882a593Smuzhiyun 	}
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun 	memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
1132*4882a593Smuzhiyun 	sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
1133*4882a593Smuzhiyun 
1134*4882a593Smuzhiyun 	return 0;
1135*4882a593Smuzhiyun }
1136*4882a593Smuzhiyun 
1137*4882a593Smuzhiyun /**
1138*4882a593Smuzhiyun  * spu_aead_tx_sg_create() - Build up the scatterlist of buffers used to send a
1139*4882a593Smuzhiyun  * SPU request message for an AEAD request. Includes SPU message headers and the
1140*4882a593Smuzhiyun  * request data.
1141*4882a593Smuzhiyun  * @mssg:	mailbox message containing the transmit sg
1142*4882a593Smuzhiyun  * @rctx:	crypto request context
1143*4882a593Smuzhiyun  * @tx_frag_num: number of scatterlist elements required to construct the
1144*4882a593Smuzhiyun  *		SPU request message
1145*4882a593Smuzhiyun  * @spu_hdr_len: length of SPU message header in bytes
1146*4882a593Smuzhiyun  * @assoc:	crypto API associated data scatterlist
1147*4882a593Smuzhiyun  * @assoc_len:	length of associated data
1148*4882a593Smuzhiyun  * @assoc_nents: number of scatterlist entries containing assoc data
1149*4882a593Smuzhiyun  * @aead_iv_len: length of AEAD IV, if included
1150*4882a593Smuzhiyun  * @chunksize:	Number of bytes of request data
1151*4882a593Smuzhiyun  * @aad_pad_len: Number of bytes of padding at end of AAD. For GCM/CCM.
1152*4882a593Smuzhiyun  * @pad_len:	Number of pad bytes
1153*4882a593Smuzhiyun  * @incl_icv:	If true, write separate ICV buffer after data and
1154*4882a593Smuzhiyun  *              any padding
1155*4882a593Smuzhiyun  *
1156*4882a593Smuzhiyun  * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
1157*4882a593Smuzhiyun  * when the request completes, whether the request is handled successfully or
1158*4882a593Smuzhiyun  * there is an error.
1159*4882a593Smuzhiyun  *
1160*4882a593Smuzhiyun  * Return:
1161*4882a593Smuzhiyun  *   0 if successful
1162*4882a593Smuzhiyun  *   < 0 if an error
1163*4882a593Smuzhiyun  */
spu_aead_tx_sg_create(struct brcm_message * mssg,struct iproc_reqctx_s * rctx,u8 tx_frag_num,u32 spu_hdr_len,struct scatterlist * assoc,unsigned int assoc_len,int assoc_nents,unsigned int aead_iv_len,unsigned int chunksize,u32 aad_pad_len,u32 pad_len,bool incl_icv)1164*4882a593Smuzhiyun static int spu_aead_tx_sg_create(struct brcm_message *mssg,
1165*4882a593Smuzhiyun 				 struct iproc_reqctx_s *rctx,
1166*4882a593Smuzhiyun 				 u8 tx_frag_num,
1167*4882a593Smuzhiyun 				 u32 spu_hdr_len,
1168*4882a593Smuzhiyun 				 struct scatterlist *assoc,
1169*4882a593Smuzhiyun 				 unsigned int assoc_len,
1170*4882a593Smuzhiyun 				 int assoc_nents,
1171*4882a593Smuzhiyun 				 unsigned int aead_iv_len,
1172*4882a593Smuzhiyun 				 unsigned int chunksize,
1173*4882a593Smuzhiyun 				 u32 aad_pad_len, u32 pad_len, bool incl_icv)
1174*4882a593Smuzhiyun {
1175*4882a593Smuzhiyun 	struct spu_hw *spu = &iproc_priv.spu;
1176*4882a593Smuzhiyun 	struct scatterlist *sg;	/* used to build sgs in mbox message */
1177*4882a593Smuzhiyun 	struct scatterlist *assoc_sg = assoc;
1178*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = rctx->ctx;
1179*4882a593Smuzhiyun 	u32 datalen;		/* Number of bytes of data to write */
1180*4882a593Smuzhiyun 	u32 written;		/* Number of bytes of data written */
1181*4882a593Smuzhiyun 	u32 assoc_offset = 0;
1182*4882a593Smuzhiyun 	u32 stat_len;
1183*4882a593Smuzhiyun 
1184*4882a593Smuzhiyun 	mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
1185*4882a593Smuzhiyun 				rctx->gfp);
1186*4882a593Smuzhiyun 	if (!mssg->spu.src)
1187*4882a593Smuzhiyun 		return -ENOMEM;
1188*4882a593Smuzhiyun 
1189*4882a593Smuzhiyun 	sg = mssg->spu.src;
1190*4882a593Smuzhiyun 	sg_init_table(sg, tx_frag_num);
1191*4882a593Smuzhiyun 
1192*4882a593Smuzhiyun 	sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
1193*4882a593Smuzhiyun 		   BCM_HDR_LEN + spu_hdr_len);
1194*4882a593Smuzhiyun 
1195*4882a593Smuzhiyun 	if (assoc_len) {
1196*4882a593Smuzhiyun 		/* Copy in each associated data sg entry from request */
1197*4882a593Smuzhiyun 		written = spu_msg_sg_add(&sg, &assoc_sg, &assoc_offset,
1198*4882a593Smuzhiyun 					 assoc_nents, assoc_len);
1199*4882a593Smuzhiyun 		if (written < assoc_len) {
1200*4882a593Smuzhiyun 			pr_err("%s(): failed to copy assoc sg to mbox msg",
1201*4882a593Smuzhiyun 			       __func__);
1202*4882a593Smuzhiyun 			return -EFAULT;
1203*4882a593Smuzhiyun 		}
1204*4882a593Smuzhiyun 	}
1205*4882a593Smuzhiyun 
1206*4882a593Smuzhiyun 	if (aead_iv_len)
1207*4882a593Smuzhiyun 		sg_set_buf(sg++, rctx->msg_buf.iv_ctr, aead_iv_len);
1208*4882a593Smuzhiyun 
1209*4882a593Smuzhiyun 	if (aad_pad_len) {
1210*4882a593Smuzhiyun 		memset(rctx->msg_buf.a.req_aad_pad, 0, aad_pad_len);
1211*4882a593Smuzhiyun 		sg_set_buf(sg++, rctx->msg_buf.a.req_aad_pad, aad_pad_len);
1212*4882a593Smuzhiyun 	}
1213*4882a593Smuzhiyun 
1214*4882a593Smuzhiyun 	datalen = chunksize;
1215*4882a593Smuzhiyun 	if ((chunksize > ctx->digestsize) && incl_icv)
1216*4882a593Smuzhiyun 		datalen -= ctx->digestsize;
1217*4882a593Smuzhiyun 	if (datalen) {
1218*4882a593Smuzhiyun 		/* For aead, a single msg should consume the entire src sg */
1219*4882a593Smuzhiyun 		written = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
1220*4882a593Smuzhiyun 					 rctx->src_nents, datalen);
1221*4882a593Smuzhiyun 		if (written < datalen) {
1222*4882a593Smuzhiyun 			pr_err("%s(): failed to copy src sg to mbox msg",
1223*4882a593Smuzhiyun 			       __func__);
1224*4882a593Smuzhiyun 			return -EFAULT;
1225*4882a593Smuzhiyun 		}
1226*4882a593Smuzhiyun 	}
1227*4882a593Smuzhiyun 
1228*4882a593Smuzhiyun 	if (pad_len) {
1229*4882a593Smuzhiyun 		memset(rctx->msg_buf.spu_req_pad, 0, pad_len);
1230*4882a593Smuzhiyun 		sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
1231*4882a593Smuzhiyun 	}
1232*4882a593Smuzhiyun 
1233*4882a593Smuzhiyun 	if (incl_icv)
1234*4882a593Smuzhiyun 		sg_set_buf(sg++, rctx->msg_buf.digest, ctx->digestsize);
1235*4882a593Smuzhiyun 
1236*4882a593Smuzhiyun 	stat_len = spu->spu_tx_status_len();
1237*4882a593Smuzhiyun 	if (stat_len) {
1238*4882a593Smuzhiyun 		memset(rctx->msg_buf.tx_stat, 0, stat_len);
1239*4882a593Smuzhiyun 		sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
1240*4882a593Smuzhiyun 	}
1241*4882a593Smuzhiyun 	return 0;
1242*4882a593Smuzhiyun }
1243*4882a593Smuzhiyun 
1244*4882a593Smuzhiyun /**
1245*4882a593Smuzhiyun  * handle_aead_req() - Submit a SPU request message for the next chunk of the
1246*4882a593Smuzhiyun  * current AEAD request.
1247*4882a593Smuzhiyun  * @rctx:  Crypto request context
1248*4882a593Smuzhiyun  *
1249*4882a593Smuzhiyun  * Unlike other operation types, we assume the length of the request fits in
1250*4882a593Smuzhiyun  * a single SPU request message. aead_enqueue() makes sure this is true.
1251*4882a593Smuzhiyun  * Comments for other op types regarding threads applies here as well.
1252*4882a593Smuzhiyun  *
1253*4882a593Smuzhiyun  * Unlike incremental hash ops, where the spu returns the entire hash for
1254*4882a593Smuzhiyun  * truncated algs like sha-224, the SPU returns just the truncated hash in
1255*4882a593Smuzhiyun  * response to aead requests. So digestsize is always ctx->digestsize here.
1256*4882a593Smuzhiyun  *
1257*4882a593Smuzhiyun  * Return: -EINPROGRESS: crypto request has been accepted and result will be
1258*4882a593Smuzhiyun  *			 returned asynchronously
1259*4882a593Smuzhiyun  *         Any other value indicates an error
1260*4882a593Smuzhiyun  */
handle_aead_req(struct iproc_reqctx_s * rctx)1261*4882a593Smuzhiyun static int handle_aead_req(struct iproc_reqctx_s *rctx)
1262*4882a593Smuzhiyun {
1263*4882a593Smuzhiyun 	struct spu_hw *spu = &iproc_priv.spu;
1264*4882a593Smuzhiyun 	struct crypto_async_request *areq = rctx->parent;
1265*4882a593Smuzhiyun 	struct aead_request *req = container_of(areq,
1266*4882a593Smuzhiyun 						struct aead_request, base);
1267*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = rctx->ctx;
1268*4882a593Smuzhiyun 	int err;
1269*4882a593Smuzhiyun 	unsigned int chunksize;
1270*4882a593Smuzhiyun 	unsigned int resp_len;
1271*4882a593Smuzhiyun 	u32 spu_hdr_len;
1272*4882a593Smuzhiyun 	u32 db_size;
1273*4882a593Smuzhiyun 	u32 stat_pad_len;
1274*4882a593Smuzhiyun 	u32 pad_len;
1275*4882a593Smuzhiyun 	struct brcm_message *mssg;	/* mailbox message */
1276*4882a593Smuzhiyun 	struct spu_request_opts req_opts;
1277*4882a593Smuzhiyun 	struct spu_cipher_parms cipher_parms;
1278*4882a593Smuzhiyun 	struct spu_hash_parms hash_parms;
1279*4882a593Smuzhiyun 	struct spu_aead_parms aead_parms;
1280*4882a593Smuzhiyun 	int assoc_nents = 0;
1281*4882a593Smuzhiyun 	bool incl_icv = false;
1282*4882a593Smuzhiyun 	unsigned int digestsize = ctx->digestsize;
1283*4882a593Smuzhiyun 
1284*4882a593Smuzhiyun 	/* number of entries in src and dst sg. Always includes SPU msg header.
1285*4882a593Smuzhiyun 	 */
1286*4882a593Smuzhiyun 	u8 rx_frag_num = 2;	/* and STATUS */
1287*4882a593Smuzhiyun 	u8 tx_frag_num = 1;
1288*4882a593Smuzhiyun 
1289*4882a593Smuzhiyun 	/* doing the whole thing at once */
1290*4882a593Smuzhiyun 	chunksize = rctx->total_todo;
1291*4882a593Smuzhiyun 
1292*4882a593Smuzhiyun 	flow_log("%s: chunksize %u\n", __func__, chunksize);
1293*4882a593Smuzhiyun 
1294*4882a593Smuzhiyun 	memset(&req_opts, 0, sizeof(req_opts));
1295*4882a593Smuzhiyun 	memset(&hash_parms, 0, sizeof(hash_parms));
1296*4882a593Smuzhiyun 	memset(&aead_parms, 0, sizeof(aead_parms));
1297*4882a593Smuzhiyun 
1298*4882a593Smuzhiyun 	req_opts.is_inbound = !(rctx->is_encrypt);
1299*4882a593Smuzhiyun 	req_opts.auth_first = ctx->auth_first;
1300*4882a593Smuzhiyun 	req_opts.is_aead = true;
1301*4882a593Smuzhiyun 	req_opts.is_esp = ctx->is_esp;
1302*4882a593Smuzhiyun 
1303*4882a593Smuzhiyun 	cipher_parms.alg = ctx->cipher.alg;
1304*4882a593Smuzhiyun 	cipher_parms.mode = ctx->cipher.mode;
1305*4882a593Smuzhiyun 	cipher_parms.type = ctx->cipher_type;
1306*4882a593Smuzhiyun 	cipher_parms.key_buf = ctx->enckey;
1307*4882a593Smuzhiyun 	cipher_parms.key_len = ctx->enckeylen;
1308*4882a593Smuzhiyun 	cipher_parms.iv_buf = rctx->msg_buf.iv_ctr;
1309*4882a593Smuzhiyun 	cipher_parms.iv_len = rctx->iv_ctr_len;
1310*4882a593Smuzhiyun 
1311*4882a593Smuzhiyun 	hash_parms.alg = ctx->auth.alg;
1312*4882a593Smuzhiyun 	hash_parms.mode = ctx->auth.mode;
1313*4882a593Smuzhiyun 	hash_parms.type = HASH_TYPE_NONE;
1314*4882a593Smuzhiyun 	hash_parms.key_buf = (u8 *)ctx->authkey;
1315*4882a593Smuzhiyun 	hash_parms.key_len = ctx->authkeylen;
1316*4882a593Smuzhiyun 	hash_parms.digestsize = digestsize;
1317*4882a593Smuzhiyun 
1318*4882a593Smuzhiyun 	if ((ctx->auth.alg == HASH_ALG_SHA224) &&
1319*4882a593Smuzhiyun 	    (ctx->authkeylen < SHA224_DIGEST_SIZE))
1320*4882a593Smuzhiyun 		hash_parms.key_len = SHA224_DIGEST_SIZE;
1321*4882a593Smuzhiyun 
1322*4882a593Smuzhiyun 	aead_parms.assoc_size = req->assoclen;
1323*4882a593Smuzhiyun 	if (ctx->is_esp && !ctx->is_rfc4543) {
1324*4882a593Smuzhiyun 		/*
1325*4882a593Smuzhiyun 		 * 8-byte IV is included assoc data in request. SPU2
1326*4882a593Smuzhiyun 		 * expects AAD to include just SPI and seqno. So
1327*4882a593Smuzhiyun 		 * subtract off the IV len.
1328*4882a593Smuzhiyun 		 */
1329*4882a593Smuzhiyun 		aead_parms.assoc_size -= GCM_RFC4106_IV_SIZE;
1330*4882a593Smuzhiyun 
1331*4882a593Smuzhiyun 		if (rctx->is_encrypt) {
1332*4882a593Smuzhiyun 			aead_parms.return_iv = true;
1333*4882a593Smuzhiyun 			aead_parms.ret_iv_len = GCM_RFC4106_IV_SIZE;
1334*4882a593Smuzhiyun 			aead_parms.ret_iv_off = GCM_ESP_SALT_SIZE;
1335*4882a593Smuzhiyun 		}
1336*4882a593Smuzhiyun 	} else {
1337*4882a593Smuzhiyun 		aead_parms.ret_iv_len = 0;
1338*4882a593Smuzhiyun 	}
1339*4882a593Smuzhiyun 
1340*4882a593Smuzhiyun 	/*
1341*4882a593Smuzhiyun 	 * Count number of sg entries from the crypto API request that are to
1342*4882a593Smuzhiyun 	 * be included in this mailbox message. For dst sg, don't count space
1343*4882a593Smuzhiyun 	 * for digest. Digest gets caught in a separate buffer and copied back
1344*4882a593Smuzhiyun 	 * to dst sg when processing response.
1345*4882a593Smuzhiyun 	 */
1346*4882a593Smuzhiyun 	rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
1347*4882a593Smuzhiyun 	rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
1348*4882a593Smuzhiyun 	if (aead_parms.assoc_size)
1349*4882a593Smuzhiyun 		assoc_nents = spu_sg_count(rctx->assoc, 0,
1350*4882a593Smuzhiyun 					   aead_parms.assoc_size);
1351*4882a593Smuzhiyun 
1352*4882a593Smuzhiyun 	mssg = &rctx->mb_mssg;
1353*4882a593Smuzhiyun 
1354*4882a593Smuzhiyun 	rctx->total_sent = chunksize;
1355*4882a593Smuzhiyun 	rctx->src_sent = chunksize;
1356*4882a593Smuzhiyun 	if (spu->spu_assoc_resp_len(ctx->cipher.mode,
1357*4882a593Smuzhiyun 				    aead_parms.assoc_size,
1358*4882a593Smuzhiyun 				    aead_parms.ret_iv_len,
1359*4882a593Smuzhiyun 				    rctx->is_encrypt))
1360*4882a593Smuzhiyun 		rx_frag_num++;
1361*4882a593Smuzhiyun 
1362*4882a593Smuzhiyun 	aead_parms.iv_len = spu->spu_aead_ivlen(ctx->cipher.mode,
1363*4882a593Smuzhiyun 						rctx->iv_ctr_len);
1364*4882a593Smuzhiyun 
1365*4882a593Smuzhiyun 	if (ctx->auth.alg == HASH_ALG_AES)
1366*4882a593Smuzhiyun 		hash_parms.type = (enum hash_type)ctx->cipher_type;
1367*4882a593Smuzhiyun 
1368*4882a593Smuzhiyun 	/* General case AAD padding (CCM and RFC4543 special cases below) */
1369*4882a593Smuzhiyun 	aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1370*4882a593Smuzhiyun 						 aead_parms.assoc_size);
1371*4882a593Smuzhiyun 
1372*4882a593Smuzhiyun 	/* General case data padding (CCM decrypt special case below) */
1373*4882a593Smuzhiyun 	aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1374*4882a593Smuzhiyun 							   chunksize);
1375*4882a593Smuzhiyun 
1376*4882a593Smuzhiyun 	if (ctx->cipher.mode == CIPHER_MODE_CCM) {
1377*4882a593Smuzhiyun 		/*
1378*4882a593Smuzhiyun 		 * for CCM, AAD len + 2 (rather than AAD len) needs to be
1379*4882a593Smuzhiyun 		 * 128-bit aligned
1380*4882a593Smuzhiyun 		 */
1381*4882a593Smuzhiyun 		aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(
1382*4882a593Smuzhiyun 					 ctx->cipher.mode,
1383*4882a593Smuzhiyun 					 aead_parms.assoc_size + 2);
1384*4882a593Smuzhiyun 
1385*4882a593Smuzhiyun 		/*
1386*4882a593Smuzhiyun 		 * And when decrypting CCM, need to pad without including
1387*4882a593Smuzhiyun 		 * size of ICV which is tacked on to end of chunk
1388*4882a593Smuzhiyun 		 */
1389*4882a593Smuzhiyun 		if (!rctx->is_encrypt)
1390*4882a593Smuzhiyun 			aead_parms.data_pad_len =
1391*4882a593Smuzhiyun 				spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1392*4882a593Smuzhiyun 							chunksize - digestsize);
1393*4882a593Smuzhiyun 
1394*4882a593Smuzhiyun 		/* CCM also requires software to rewrite portions of IV: */
1395*4882a593Smuzhiyun 		spu->spu_ccm_update_iv(digestsize, &cipher_parms, req->assoclen,
1396*4882a593Smuzhiyun 				       chunksize, rctx->is_encrypt,
1397*4882a593Smuzhiyun 				       ctx->is_esp);
1398*4882a593Smuzhiyun 	}
1399*4882a593Smuzhiyun 
1400*4882a593Smuzhiyun 	if (ctx->is_rfc4543) {
1401*4882a593Smuzhiyun 		/*
1402*4882a593Smuzhiyun 		 * RFC4543: data is included in AAD, so don't pad after AAD
1403*4882a593Smuzhiyun 		 * and pad data based on both AAD + data size
1404*4882a593Smuzhiyun 		 */
1405*4882a593Smuzhiyun 		aead_parms.aad_pad_len = 0;
1406*4882a593Smuzhiyun 		if (!rctx->is_encrypt)
1407*4882a593Smuzhiyun 			aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(
1408*4882a593Smuzhiyun 					ctx->cipher.mode,
1409*4882a593Smuzhiyun 					aead_parms.assoc_size + chunksize -
1410*4882a593Smuzhiyun 					digestsize);
1411*4882a593Smuzhiyun 		else
1412*4882a593Smuzhiyun 			aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(
1413*4882a593Smuzhiyun 					ctx->cipher.mode,
1414*4882a593Smuzhiyun 					aead_parms.assoc_size + chunksize);
1415*4882a593Smuzhiyun 
1416*4882a593Smuzhiyun 		req_opts.is_rfc4543 = true;
1417*4882a593Smuzhiyun 	}
1418*4882a593Smuzhiyun 
1419*4882a593Smuzhiyun 	if (spu_req_incl_icv(ctx->cipher.mode, rctx->is_encrypt)) {
1420*4882a593Smuzhiyun 		incl_icv = true;
1421*4882a593Smuzhiyun 		tx_frag_num++;
1422*4882a593Smuzhiyun 		/* Copy ICV from end of src scatterlist to digest buf */
1423*4882a593Smuzhiyun 		sg_copy_part_to_buf(req->src, rctx->msg_buf.digest, digestsize,
1424*4882a593Smuzhiyun 				    req->assoclen + rctx->total_sent -
1425*4882a593Smuzhiyun 				    digestsize);
1426*4882a593Smuzhiyun 	}
1427*4882a593Smuzhiyun 
1428*4882a593Smuzhiyun 	atomic64_add(chunksize, &iproc_priv.bytes_out);
1429*4882a593Smuzhiyun 
1430*4882a593Smuzhiyun 	flow_log("%s()-sent chunksize:%u\n", __func__, chunksize);
1431*4882a593Smuzhiyun 
1432*4882a593Smuzhiyun 	/* Prepend SPU header with type 3 BCM header */
1433*4882a593Smuzhiyun 	memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
1434*4882a593Smuzhiyun 
1435*4882a593Smuzhiyun 	spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
1436*4882a593Smuzhiyun 					      BCM_HDR_LEN, &req_opts,
1437*4882a593Smuzhiyun 					      &cipher_parms, &hash_parms,
1438*4882a593Smuzhiyun 					      &aead_parms, chunksize);
1439*4882a593Smuzhiyun 
1440*4882a593Smuzhiyun 	/* Determine total length of padding. Put all padding in one buffer. */
1441*4882a593Smuzhiyun 	db_size = spu_real_db_size(aead_parms.assoc_size, aead_parms.iv_len, 0,
1442*4882a593Smuzhiyun 				   chunksize, aead_parms.aad_pad_len,
1443*4882a593Smuzhiyun 				   aead_parms.data_pad_len, 0);
1444*4882a593Smuzhiyun 
1445*4882a593Smuzhiyun 	stat_pad_len = spu->spu_wordalign_padlen(db_size);
1446*4882a593Smuzhiyun 
1447*4882a593Smuzhiyun 	if (stat_pad_len)
1448*4882a593Smuzhiyun 		rx_frag_num++;
1449*4882a593Smuzhiyun 	pad_len = aead_parms.data_pad_len + stat_pad_len;
1450*4882a593Smuzhiyun 	if (pad_len) {
1451*4882a593Smuzhiyun 		tx_frag_num++;
1452*4882a593Smuzhiyun 		spu->spu_request_pad(rctx->msg_buf.spu_req_pad,
1453*4882a593Smuzhiyun 				     aead_parms.data_pad_len, 0,
1454*4882a593Smuzhiyun 				     ctx->auth.alg, ctx->auth.mode,
1455*4882a593Smuzhiyun 				     rctx->total_sent, stat_pad_len);
1456*4882a593Smuzhiyun 	}
1457*4882a593Smuzhiyun 
1458*4882a593Smuzhiyun 	spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
1459*4882a593Smuzhiyun 			      spu_hdr_len);
1460*4882a593Smuzhiyun 	dump_sg(rctx->assoc, 0, aead_parms.assoc_size);
1461*4882a593Smuzhiyun 	packet_dump("    aead iv: ", rctx->msg_buf.iv_ctr, aead_parms.iv_len);
1462*4882a593Smuzhiyun 	packet_log("BD:\n");
1463*4882a593Smuzhiyun 	dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
1464*4882a593Smuzhiyun 	packet_dump("   pad: ", rctx->msg_buf.spu_req_pad, pad_len);
1465*4882a593Smuzhiyun 
1466*4882a593Smuzhiyun 	/*
1467*4882a593Smuzhiyun 	 * Build mailbox message containing SPU request msg and rx buffers
1468*4882a593Smuzhiyun 	 * to catch response message
1469*4882a593Smuzhiyun 	 */
1470*4882a593Smuzhiyun 	memset(mssg, 0, sizeof(*mssg));
1471*4882a593Smuzhiyun 	mssg->type = BRCM_MESSAGE_SPU;
1472*4882a593Smuzhiyun 	mssg->ctx = rctx;	/* Will be returned in response */
1473*4882a593Smuzhiyun 
1474*4882a593Smuzhiyun 	/* Create rx scatterlist to catch result */
1475*4882a593Smuzhiyun 	rx_frag_num += rctx->dst_nents;
1476*4882a593Smuzhiyun 	resp_len = chunksize;
1477*4882a593Smuzhiyun 
1478*4882a593Smuzhiyun 	/*
1479*4882a593Smuzhiyun 	 * Always catch ICV in separate buffer. Have to for GCM/CCM because of
1480*4882a593Smuzhiyun 	 * padding. Have to for SHA-224 and other truncated SHAs because SPU
1481*4882a593Smuzhiyun 	 * sends entire digest back.
1482*4882a593Smuzhiyun 	 */
1483*4882a593Smuzhiyun 	rx_frag_num++;
1484*4882a593Smuzhiyun 
1485*4882a593Smuzhiyun 	if (((ctx->cipher.mode == CIPHER_MODE_GCM) ||
1486*4882a593Smuzhiyun 	     (ctx->cipher.mode == CIPHER_MODE_CCM)) && !rctx->is_encrypt) {
1487*4882a593Smuzhiyun 		/*
1488*4882a593Smuzhiyun 		 * Input is ciphertxt plus ICV, but ICV not incl
1489*4882a593Smuzhiyun 		 * in output.
1490*4882a593Smuzhiyun 		 */
1491*4882a593Smuzhiyun 		resp_len -= ctx->digestsize;
1492*4882a593Smuzhiyun 		if (resp_len == 0)
1493*4882a593Smuzhiyun 			/* no rx frags to catch output data */
1494*4882a593Smuzhiyun 			rx_frag_num -= rctx->dst_nents;
1495*4882a593Smuzhiyun 	}
1496*4882a593Smuzhiyun 
1497*4882a593Smuzhiyun 	err = spu_aead_rx_sg_create(mssg, req, rctx, rx_frag_num,
1498*4882a593Smuzhiyun 				    aead_parms.assoc_size,
1499*4882a593Smuzhiyun 				    aead_parms.ret_iv_len, resp_len, digestsize,
1500*4882a593Smuzhiyun 				    stat_pad_len);
1501*4882a593Smuzhiyun 	if (err)
1502*4882a593Smuzhiyun 		return err;
1503*4882a593Smuzhiyun 
1504*4882a593Smuzhiyun 	/* Create tx scatterlist containing SPU request message */
1505*4882a593Smuzhiyun 	tx_frag_num += rctx->src_nents;
1506*4882a593Smuzhiyun 	tx_frag_num += assoc_nents;
1507*4882a593Smuzhiyun 	if (aead_parms.aad_pad_len)
1508*4882a593Smuzhiyun 		tx_frag_num++;
1509*4882a593Smuzhiyun 	if (aead_parms.iv_len)
1510*4882a593Smuzhiyun 		tx_frag_num++;
1511*4882a593Smuzhiyun 	if (spu->spu_tx_status_len())
1512*4882a593Smuzhiyun 		tx_frag_num++;
1513*4882a593Smuzhiyun 	err = spu_aead_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
1514*4882a593Smuzhiyun 				    rctx->assoc, aead_parms.assoc_size,
1515*4882a593Smuzhiyun 				    assoc_nents, aead_parms.iv_len, chunksize,
1516*4882a593Smuzhiyun 				    aead_parms.aad_pad_len, pad_len, incl_icv);
1517*4882a593Smuzhiyun 	if (err)
1518*4882a593Smuzhiyun 		return err;
1519*4882a593Smuzhiyun 
1520*4882a593Smuzhiyun 	err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
1521*4882a593Smuzhiyun 	if (unlikely(err < 0))
1522*4882a593Smuzhiyun 		return err;
1523*4882a593Smuzhiyun 
1524*4882a593Smuzhiyun 	return -EINPROGRESS;
1525*4882a593Smuzhiyun }
1526*4882a593Smuzhiyun 
1527*4882a593Smuzhiyun /**
1528*4882a593Smuzhiyun  * handle_aead_resp() - Process a SPU response message for an AEAD request.
1529*4882a593Smuzhiyun  * @rctx:  Crypto request context
1530*4882a593Smuzhiyun  */
handle_aead_resp(struct iproc_reqctx_s * rctx)1531*4882a593Smuzhiyun static void handle_aead_resp(struct iproc_reqctx_s *rctx)
1532*4882a593Smuzhiyun {
1533*4882a593Smuzhiyun 	struct spu_hw *spu = &iproc_priv.spu;
1534*4882a593Smuzhiyun 	struct crypto_async_request *areq = rctx->parent;
1535*4882a593Smuzhiyun 	struct aead_request *req = container_of(areq,
1536*4882a593Smuzhiyun 						struct aead_request, base);
1537*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = rctx->ctx;
1538*4882a593Smuzhiyun 	u32 payload_len;
1539*4882a593Smuzhiyun 	unsigned int icv_offset;
1540*4882a593Smuzhiyun 	u32 result_len;
1541*4882a593Smuzhiyun 
1542*4882a593Smuzhiyun 	/* See how much data was returned */
1543*4882a593Smuzhiyun 	payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr);
1544*4882a593Smuzhiyun 	flow_log("payload_len %u\n", payload_len);
1545*4882a593Smuzhiyun 
1546*4882a593Smuzhiyun 	/* only count payload */
1547*4882a593Smuzhiyun 	atomic64_add(payload_len, &iproc_priv.bytes_in);
1548*4882a593Smuzhiyun 
1549*4882a593Smuzhiyun 	if (req->assoclen)
1550*4882a593Smuzhiyun 		packet_dump("  assoc_data ", rctx->msg_buf.a.resp_aad,
1551*4882a593Smuzhiyun 			    req->assoclen);
1552*4882a593Smuzhiyun 
1553*4882a593Smuzhiyun 	/*
1554*4882a593Smuzhiyun 	 * Copy the ICV back to the destination
1555*4882a593Smuzhiyun 	 * buffer. In decrypt case, SPU gives us back the digest, but crypto
1556*4882a593Smuzhiyun 	 * API doesn't expect ICV in dst buffer.
1557*4882a593Smuzhiyun 	 */
1558*4882a593Smuzhiyun 	result_len = req->cryptlen;
1559*4882a593Smuzhiyun 	if (rctx->is_encrypt) {
1560*4882a593Smuzhiyun 		icv_offset = req->assoclen + rctx->total_sent;
1561*4882a593Smuzhiyun 		packet_dump("  ICV: ", rctx->msg_buf.digest, ctx->digestsize);
1562*4882a593Smuzhiyun 		flow_log("copying ICV to dst sg at offset %u\n", icv_offset);
1563*4882a593Smuzhiyun 		sg_copy_part_from_buf(req->dst, rctx->msg_buf.digest,
1564*4882a593Smuzhiyun 				      ctx->digestsize, icv_offset);
1565*4882a593Smuzhiyun 		result_len += ctx->digestsize;
1566*4882a593Smuzhiyun 	}
1567*4882a593Smuzhiyun 
1568*4882a593Smuzhiyun 	packet_log("response data:  ");
1569*4882a593Smuzhiyun 	dump_sg(req->dst, req->assoclen, result_len);
1570*4882a593Smuzhiyun 
1571*4882a593Smuzhiyun 	atomic_inc(&iproc_priv.op_counts[SPU_OP_AEAD]);
1572*4882a593Smuzhiyun 	if (ctx->cipher.alg == CIPHER_ALG_AES) {
1573*4882a593Smuzhiyun 		if (ctx->cipher.mode == CIPHER_MODE_CCM)
1574*4882a593Smuzhiyun 			atomic_inc(&iproc_priv.aead_cnt[AES_CCM]);
1575*4882a593Smuzhiyun 		else if (ctx->cipher.mode == CIPHER_MODE_GCM)
1576*4882a593Smuzhiyun 			atomic_inc(&iproc_priv.aead_cnt[AES_GCM]);
1577*4882a593Smuzhiyun 		else
1578*4882a593Smuzhiyun 			atomic_inc(&iproc_priv.aead_cnt[AUTHENC]);
1579*4882a593Smuzhiyun 	} else {
1580*4882a593Smuzhiyun 		atomic_inc(&iproc_priv.aead_cnt[AUTHENC]);
1581*4882a593Smuzhiyun 	}
1582*4882a593Smuzhiyun }
1583*4882a593Smuzhiyun 
1584*4882a593Smuzhiyun /**
1585*4882a593Smuzhiyun  * spu_chunk_cleanup() - Do cleanup after processing one chunk of a request
1586*4882a593Smuzhiyun  * @rctx:  request context
1587*4882a593Smuzhiyun  *
1588*4882a593Smuzhiyun  * Mailbox scatterlists are allocated for each chunk. So free them after
1589*4882a593Smuzhiyun  * processing each chunk.
1590*4882a593Smuzhiyun  */
spu_chunk_cleanup(struct iproc_reqctx_s * rctx)1591*4882a593Smuzhiyun static void spu_chunk_cleanup(struct iproc_reqctx_s *rctx)
1592*4882a593Smuzhiyun {
1593*4882a593Smuzhiyun 	/* mailbox message used to tx request */
1594*4882a593Smuzhiyun 	struct brcm_message *mssg = &rctx->mb_mssg;
1595*4882a593Smuzhiyun 
1596*4882a593Smuzhiyun 	kfree(mssg->spu.src);
1597*4882a593Smuzhiyun 	kfree(mssg->spu.dst);
1598*4882a593Smuzhiyun 	memset(mssg, 0, sizeof(struct brcm_message));
1599*4882a593Smuzhiyun }
1600*4882a593Smuzhiyun 
1601*4882a593Smuzhiyun /**
1602*4882a593Smuzhiyun  * finish_req() - Used to invoke the complete callback from the requester when
1603*4882a593Smuzhiyun  * a request has been handled asynchronously.
1604*4882a593Smuzhiyun  * @rctx:  Request context
1605*4882a593Smuzhiyun  * @err:   Indicates whether the request was successful or not
1606*4882a593Smuzhiyun  *
1607*4882a593Smuzhiyun  * Ensures that cleanup has been done for request
1608*4882a593Smuzhiyun  */
finish_req(struct iproc_reqctx_s * rctx,int err)1609*4882a593Smuzhiyun static void finish_req(struct iproc_reqctx_s *rctx, int err)
1610*4882a593Smuzhiyun {
1611*4882a593Smuzhiyun 	struct crypto_async_request *areq = rctx->parent;
1612*4882a593Smuzhiyun 
1613*4882a593Smuzhiyun 	flow_log("%s() err:%d\n\n", __func__, err);
1614*4882a593Smuzhiyun 
1615*4882a593Smuzhiyun 	/* No harm done if already called */
1616*4882a593Smuzhiyun 	spu_chunk_cleanup(rctx);
1617*4882a593Smuzhiyun 
1618*4882a593Smuzhiyun 	if (areq)
1619*4882a593Smuzhiyun 		areq->complete(areq, err);
1620*4882a593Smuzhiyun }
1621*4882a593Smuzhiyun 
1622*4882a593Smuzhiyun /**
1623*4882a593Smuzhiyun  * spu_rx_callback() - Callback from mailbox framework with a SPU response.
1624*4882a593Smuzhiyun  * @cl:		mailbox client structure for SPU driver
1625*4882a593Smuzhiyun  * @msg:	mailbox message containing SPU response
1626*4882a593Smuzhiyun  */
spu_rx_callback(struct mbox_client * cl,void * msg)1627*4882a593Smuzhiyun static void spu_rx_callback(struct mbox_client *cl, void *msg)
1628*4882a593Smuzhiyun {
1629*4882a593Smuzhiyun 	struct spu_hw *spu = &iproc_priv.spu;
1630*4882a593Smuzhiyun 	struct brcm_message *mssg = msg;
1631*4882a593Smuzhiyun 	struct iproc_reqctx_s *rctx;
1632*4882a593Smuzhiyun 	int err;
1633*4882a593Smuzhiyun 
1634*4882a593Smuzhiyun 	rctx = mssg->ctx;
1635*4882a593Smuzhiyun 	if (unlikely(!rctx)) {
1636*4882a593Smuzhiyun 		/* This is fatal */
1637*4882a593Smuzhiyun 		pr_err("%s(): no request context", __func__);
1638*4882a593Smuzhiyun 		err = -EFAULT;
1639*4882a593Smuzhiyun 		goto cb_finish;
1640*4882a593Smuzhiyun 	}
1641*4882a593Smuzhiyun 
1642*4882a593Smuzhiyun 	/* process the SPU status */
1643*4882a593Smuzhiyun 	err = spu->spu_status_process(rctx->msg_buf.rx_stat);
1644*4882a593Smuzhiyun 	if (err != 0) {
1645*4882a593Smuzhiyun 		if (err == SPU_INVALID_ICV)
1646*4882a593Smuzhiyun 			atomic_inc(&iproc_priv.bad_icv);
1647*4882a593Smuzhiyun 		err = -EBADMSG;
1648*4882a593Smuzhiyun 		goto cb_finish;
1649*4882a593Smuzhiyun 	}
1650*4882a593Smuzhiyun 
1651*4882a593Smuzhiyun 	/* Process the SPU response message */
1652*4882a593Smuzhiyun 	switch (rctx->ctx->alg->type) {
1653*4882a593Smuzhiyun 	case CRYPTO_ALG_TYPE_SKCIPHER:
1654*4882a593Smuzhiyun 		handle_skcipher_resp(rctx);
1655*4882a593Smuzhiyun 		break;
1656*4882a593Smuzhiyun 	case CRYPTO_ALG_TYPE_AHASH:
1657*4882a593Smuzhiyun 		handle_ahash_resp(rctx);
1658*4882a593Smuzhiyun 		break;
1659*4882a593Smuzhiyun 	case CRYPTO_ALG_TYPE_AEAD:
1660*4882a593Smuzhiyun 		handle_aead_resp(rctx);
1661*4882a593Smuzhiyun 		break;
1662*4882a593Smuzhiyun 	default:
1663*4882a593Smuzhiyun 		err = -EINVAL;
1664*4882a593Smuzhiyun 		goto cb_finish;
1665*4882a593Smuzhiyun 	}
1666*4882a593Smuzhiyun 
1667*4882a593Smuzhiyun 	/*
1668*4882a593Smuzhiyun 	 * If this response does not complete the request, then send the next
1669*4882a593Smuzhiyun 	 * request chunk.
1670*4882a593Smuzhiyun 	 */
1671*4882a593Smuzhiyun 	if (rctx->total_sent < rctx->total_todo) {
1672*4882a593Smuzhiyun 		/* Deallocate anything specific to previous chunk */
1673*4882a593Smuzhiyun 		spu_chunk_cleanup(rctx);
1674*4882a593Smuzhiyun 
1675*4882a593Smuzhiyun 		switch (rctx->ctx->alg->type) {
1676*4882a593Smuzhiyun 		case CRYPTO_ALG_TYPE_SKCIPHER:
1677*4882a593Smuzhiyun 			err = handle_skcipher_req(rctx);
1678*4882a593Smuzhiyun 			break;
1679*4882a593Smuzhiyun 		case CRYPTO_ALG_TYPE_AHASH:
1680*4882a593Smuzhiyun 			err = handle_ahash_req(rctx);
1681*4882a593Smuzhiyun 			if (err == -EAGAIN)
1682*4882a593Smuzhiyun 				/*
1683*4882a593Smuzhiyun 				 * we saved data in hash carry, but tell crypto
1684*4882a593Smuzhiyun 				 * API we successfully completed request.
1685*4882a593Smuzhiyun 				 */
1686*4882a593Smuzhiyun 				err = 0;
1687*4882a593Smuzhiyun 			break;
1688*4882a593Smuzhiyun 		case CRYPTO_ALG_TYPE_AEAD:
1689*4882a593Smuzhiyun 			err = handle_aead_req(rctx);
1690*4882a593Smuzhiyun 			break;
1691*4882a593Smuzhiyun 		default:
1692*4882a593Smuzhiyun 			err = -EINVAL;
1693*4882a593Smuzhiyun 		}
1694*4882a593Smuzhiyun 
1695*4882a593Smuzhiyun 		if (err == -EINPROGRESS)
1696*4882a593Smuzhiyun 			/* Successfully submitted request for next chunk */
1697*4882a593Smuzhiyun 			return;
1698*4882a593Smuzhiyun 	}
1699*4882a593Smuzhiyun 
1700*4882a593Smuzhiyun cb_finish:
1701*4882a593Smuzhiyun 	finish_req(rctx, err);
1702*4882a593Smuzhiyun }
1703*4882a593Smuzhiyun 
1704*4882a593Smuzhiyun /* ==================== Kernel Cryptographic API ==================== */
1705*4882a593Smuzhiyun 
1706*4882a593Smuzhiyun /**
1707*4882a593Smuzhiyun  * skcipher_enqueue() - Handle skcipher encrypt or decrypt request.
1708*4882a593Smuzhiyun  * @req:	Crypto API request
1709*4882a593Smuzhiyun  * @encrypt:	true if encrypting; false if decrypting
1710*4882a593Smuzhiyun  *
1711*4882a593Smuzhiyun  * Return: -EINPROGRESS if request accepted and result will be returned
1712*4882a593Smuzhiyun  *			asynchronously
1713*4882a593Smuzhiyun  *	   < 0 if an error
1714*4882a593Smuzhiyun  */
skcipher_enqueue(struct skcipher_request * req,bool encrypt)1715*4882a593Smuzhiyun static int skcipher_enqueue(struct skcipher_request *req, bool encrypt)
1716*4882a593Smuzhiyun {
1717*4882a593Smuzhiyun 	struct iproc_reqctx_s *rctx = skcipher_request_ctx(req);
1718*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx =
1719*4882a593Smuzhiyun 	    crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
1720*4882a593Smuzhiyun 	int err;
1721*4882a593Smuzhiyun 
1722*4882a593Smuzhiyun 	flow_log("%s() enc:%u\n", __func__, encrypt);
1723*4882a593Smuzhiyun 
1724*4882a593Smuzhiyun 	rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1725*4882a593Smuzhiyun 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1726*4882a593Smuzhiyun 	rctx->parent = &req->base;
1727*4882a593Smuzhiyun 	rctx->is_encrypt = encrypt;
1728*4882a593Smuzhiyun 	rctx->bd_suppress = false;
1729*4882a593Smuzhiyun 	rctx->total_todo = req->cryptlen;
1730*4882a593Smuzhiyun 	rctx->src_sent = 0;
1731*4882a593Smuzhiyun 	rctx->total_sent = 0;
1732*4882a593Smuzhiyun 	rctx->total_received = 0;
1733*4882a593Smuzhiyun 	rctx->ctx = ctx;
1734*4882a593Smuzhiyun 
1735*4882a593Smuzhiyun 	/* Initialize current position in src and dst scatterlists */
1736*4882a593Smuzhiyun 	rctx->src_sg = req->src;
1737*4882a593Smuzhiyun 	rctx->src_nents = 0;
1738*4882a593Smuzhiyun 	rctx->src_skip = 0;
1739*4882a593Smuzhiyun 	rctx->dst_sg = req->dst;
1740*4882a593Smuzhiyun 	rctx->dst_nents = 0;
1741*4882a593Smuzhiyun 	rctx->dst_skip = 0;
1742*4882a593Smuzhiyun 
1743*4882a593Smuzhiyun 	if (ctx->cipher.mode == CIPHER_MODE_CBC ||
1744*4882a593Smuzhiyun 	    ctx->cipher.mode == CIPHER_MODE_CTR ||
1745*4882a593Smuzhiyun 	    ctx->cipher.mode == CIPHER_MODE_OFB ||
1746*4882a593Smuzhiyun 	    ctx->cipher.mode == CIPHER_MODE_XTS ||
1747*4882a593Smuzhiyun 	    ctx->cipher.mode == CIPHER_MODE_GCM ||
1748*4882a593Smuzhiyun 	    ctx->cipher.mode == CIPHER_MODE_CCM) {
1749*4882a593Smuzhiyun 		rctx->iv_ctr_len =
1750*4882a593Smuzhiyun 		    crypto_skcipher_ivsize(crypto_skcipher_reqtfm(req));
1751*4882a593Smuzhiyun 		memcpy(rctx->msg_buf.iv_ctr, req->iv, rctx->iv_ctr_len);
1752*4882a593Smuzhiyun 	} else {
1753*4882a593Smuzhiyun 		rctx->iv_ctr_len = 0;
1754*4882a593Smuzhiyun 	}
1755*4882a593Smuzhiyun 
1756*4882a593Smuzhiyun 	/* Choose a SPU to process this request */
1757*4882a593Smuzhiyun 	rctx->chan_idx = select_channel();
1758*4882a593Smuzhiyun 	err = handle_skcipher_req(rctx);
1759*4882a593Smuzhiyun 	if (err != -EINPROGRESS)
1760*4882a593Smuzhiyun 		/* synchronous result */
1761*4882a593Smuzhiyun 		spu_chunk_cleanup(rctx);
1762*4882a593Smuzhiyun 
1763*4882a593Smuzhiyun 	return err;
1764*4882a593Smuzhiyun }
1765*4882a593Smuzhiyun 
des_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int keylen)1766*4882a593Smuzhiyun static int des_setkey(struct crypto_skcipher *cipher, const u8 *key,
1767*4882a593Smuzhiyun 		      unsigned int keylen)
1768*4882a593Smuzhiyun {
1769*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
1770*4882a593Smuzhiyun 	int err;
1771*4882a593Smuzhiyun 
1772*4882a593Smuzhiyun 	err = verify_skcipher_des_key(cipher, key);
1773*4882a593Smuzhiyun 	if (err)
1774*4882a593Smuzhiyun 		return err;
1775*4882a593Smuzhiyun 
1776*4882a593Smuzhiyun 	ctx->cipher_type = CIPHER_TYPE_DES;
1777*4882a593Smuzhiyun 	return 0;
1778*4882a593Smuzhiyun }
1779*4882a593Smuzhiyun 
threedes_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int keylen)1780*4882a593Smuzhiyun static int threedes_setkey(struct crypto_skcipher *cipher, const u8 *key,
1781*4882a593Smuzhiyun 			   unsigned int keylen)
1782*4882a593Smuzhiyun {
1783*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
1784*4882a593Smuzhiyun 	int err;
1785*4882a593Smuzhiyun 
1786*4882a593Smuzhiyun 	err = verify_skcipher_des3_key(cipher, key);
1787*4882a593Smuzhiyun 	if (err)
1788*4882a593Smuzhiyun 		return err;
1789*4882a593Smuzhiyun 
1790*4882a593Smuzhiyun 	ctx->cipher_type = CIPHER_TYPE_3DES;
1791*4882a593Smuzhiyun 	return 0;
1792*4882a593Smuzhiyun }
1793*4882a593Smuzhiyun 
aes_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int keylen)1794*4882a593Smuzhiyun static int aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
1795*4882a593Smuzhiyun 		      unsigned int keylen)
1796*4882a593Smuzhiyun {
1797*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
1798*4882a593Smuzhiyun 
1799*4882a593Smuzhiyun 	if (ctx->cipher.mode == CIPHER_MODE_XTS)
1800*4882a593Smuzhiyun 		/* XTS includes two keys of equal length */
1801*4882a593Smuzhiyun 		keylen = keylen / 2;
1802*4882a593Smuzhiyun 
1803*4882a593Smuzhiyun 	switch (keylen) {
1804*4882a593Smuzhiyun 	case AES_KEYSIZE_128:
1805*4882a593Smuzhiyun 		ctx->cipher_type = CIPHER_TYPE_AES128;
1806*4882a593Smuzhiyun 		break;
1807*4882a593Smuzhiyun 	case AES_KEYSIZE_192:
1808*4882a593Smuzhiyun 		ctx->cipher_type = CIPHER_TYPE_AES192;
1809*4882a593Smuzhiyun 		break;
1810*4882a593Smuzhiyun 	case AES_KEYSIZE_256:
1811*4882a593Smuzhiyun 		ctx->cipher_type = CIPHER_TYPE_AES256;
1812*4882a593Smuzhiyun 		break;
1813*4882a593Smuzhiyun 	default:
1814*4882a593Smuzhiyun 		return -EINVAL;
1815*4882a593Smuzhiyun 	}
1816*4882a593Smuzhiyun 	WARN_ON((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
1817*4882a593Smuzhiyun 		((ctx->max_payload % AES_BLOCK_SIZE) != 0));
1818*4882a593Smuzhiyun 	return 0;
1819*4882a593Smuzhiyun }
1820*4882a593Smuzhiyun 
skcipher_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int keylen)1821*4882a593Smuzhiyun static int skcipher_setkey(struct crypto_skcipher *cipher, const u8 *key,
1822*4882a593Smuzhiyun 			     unsigned int keylen)
1823*4882a593Smuzhiyun {
1824*4882a593Smuzhiyun 	struct spu_hw *spu = &iproc_priv.spu;
1825*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
1826*4882a593Smuzhiyun 	struct spu_cipher_parms cipher_parms;
1827*4882a593Smuzhiyun 	u32 alloc_len = 0;
1828*4882a593Smuzhiyun 	int err;
1829*4882a593Smuzhiyun 
1830*4882a593Smuzhiyun 	flow_log("skcipher_setkey() keylen: %d\n", keylen);
1831*4882a593Smuzhiyun 	flow_dump("  key: ", key, keylen);
1832*4882a593Smuzhiyun 
1833*4882a593Smuzhiyun 	switch (ctx->cipher.alg) {
1834*4882a593Smuzhiyun 	case CIPHER_ALG_DES:
1835*4882a593Smuzhiyun 		err = des_setkey(cipher, key, keylen);
1836*4882a593Smuzhiyun 		break;
1837*4882a593Smuzhiyun 	case CIPHER_ALG_3DES:
1838*4882a593Smuzhiyun 		err = threedes_setkey(cipher, key, keylen);
1839*4882a593Smuzhiyun 		break;
1840*4882a593Smuzhiyun 	case CIPHER_ALG_AES:
1841*4882a593Smuzhiyun 		err = aes_setkey(cipher, key, keylen);
1842*4882a593Smuzhiyun 		break;
1843*4882a593Smuzhiyun 	default:
1844*4882a593Smuzhiyun 		pr_err("%s() Error: unknown cipher alg\n", __func__);
1845*4882a593Smuzhiyun 		err = -EINVAL;
1846*4882a593Smuzhiyun 	}
1847*4882a593Smuzhiyun 	if (err)
1848*4882a593Smuzhiyun 		return err;
1849*4882a593Smuzhiyun 
1850*4882a593Smuzhiyun 	memcpy(ctx->enckey, key, keylen);
1851*4882a593Smuzhiyun 	ctx->enckeylen = keylen;
1852*4882a593Smuzhiyun 
1853*4882a593Smuzhiyun 	/* SPU needs XTS keys in the reverse order the crypto API presents */
1854*4882a593Smuzhiyun 	if ((ctx->cipher.alg == CIPHER_ALG_AES) &&
1855*4882a593Smuzhiyun 	    (ctx->cipher.mode == CIPHER_MODE_XTS)) {
1856*4882a593Smuzhiyun 		unsigned int xts_keylen = keylen / 2;
1857*4882a593Smuzhiyun 
1858*4882a593Smuzhiyun 		memcpy(ctx->enckey, key + xts_keylen, xts_keylen);
1859*4882a593Smuzhiyun 		memcpy(ctx->enckey + xts_keylen, key, xts_keylen);
1860*4882a593Smuzhiyun 	}
1861*4882a593Smuzhiyun 
1862*4882a593Smuzhiyun 	if (spu->spu_type == SPU_TYPE_SPUM)
1863*4882a593Smuzhiyun 		alloc_len = BCM_HDR_LEN + SPU_HEADER_ALLOC_LEN;
1864*4882a593Smuzhiyun 	else if (spu->spu_type == SPU_TYPE_SPU2)
1865*4882a593Smuzhiyun 		alloc_len = BCM_HDR_LEN + SPU2_HEADER_ALLOC_LEN;
1866*4882a593Smuzhiyun 	memset(ctx->bcm_spu_req_hdr, 0, alloc_len);
1867*4882a593Smuzhiyun 	cipher_parms.iv_buf = NULL;
1868*4882a593Smuzhiyun 	cipher_parms.iv_len = crypto_skcipher_ivsize(cipher);
1869*4882a593Smuzhiyun 	flow_log("%s: iv_len %u\n", __func__, cipher_parms.iv_len);
1870*4882a593Smuzhiyun 
1871*4882a593Smuzhiyun 	cipher_parms.alg = ctx->cipher.alg;
1872*4882a593Smuzhiyun 	cipher_parms.mode = ctx->cipher.mode;
1873*4882a593Smuzhiyun 	cipher_parms.type = ctx->cipher_type;
1874*4882a593Smuzhiyun 	cipher_parms.key_buf = ctx->enckey;
1875*4882a593Smuzhiyun 	cipher_parms.key_len = ctx->enckeylen;
1876*4882a593Smuzhiyun 
1877*4882a593Smuzhiyun 	/* Prepend SPU request message with BCM header */
1878*4882a593Smuzhiyun 	memcpy(ctx->bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
1879*4882a593Smuzhiyun 	ctx->spu_req_hdr_len =
1880*4882a593Smuzhiyun 	    spu->spu_cipher_req_init(ctx->bcm_spu_req_hdr + BCM_HDR_LEN,
1881*4882a593Smuzhiyun 				     &cipher_parms);
1882*4882a593Smuzhiyun 
1883*4882a593Smuzhiyun 	ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
1884*4882a593Smuzhiyun 							  ctx->enckeylen,
1885*4882a593Smuzhiyun 							  false);
1886*4882a593Smuzhiyun 
1887*4882a593Smuzhiyun 	atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_CIPHER]);
1888*4882a593Smuzhiyun 
1889*4882a593Smuzhiyun 	return 0;
1890*4882a593Smuzhiyun }
1891*4882a593Smuzhiyun 
skcipher_encrypt(struct skcipher_request * req)1892*4882a593Smuzhiyun static int skcipher_encrypt(struct skcipher_request *req)
1893*4882a593Smuzhiyun {
1894*4882a593Smuzhiyun 	flow_log("skcipher_encrypt() nbytes:%u\n", req->cryptlen);
1895*4882a593Smuzhiyun 
1896*4882a593Smuzhiyun 	return skcipher_enqueue(req, true);
1897*4882a593Smuzhiyun }
1898*4882a593Smuzhiyun 
skcipher_decrypt(struct skcipher_request * req)1899*4882a593Smuzhiyun static int skcipher_decrypt(struct skcipher_request *req)
1900*4882a593Smuzhiyun {
1901*4882a593Smuzhiyun 	flow_log("skcipher_decrypt() nbytes:%u\n", req->cryptlen);
1902*4882a593Smuzhiyun 	return skcipher_enqueue(req, false);
1903*4882a593Smuzhiyun }
1904*4882a593Smuzhiyun 
ahash_enqueue(struct ahash_request * req)1905*4882a593Smuzhiyun static int ahash_enqueue(struct ahash_request *req)
1906*4882a593Smuzhiyun {
1907*4882a593Smuzhiyun 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
1908*4882a593Smuzhiyun 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1909*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
1910*4882a593Smuzhiyun 	int err;
1911*4882a593Smuzhiyun 	const char *alg_name;
1912*4882a593Smuzhiyun 
1913*4882a593Smuzhiyun 	flow_log("ahash_enqueue() nbytes:%u\n", req->nbytes);
1914*4882a593Smuzhiyun 
1915*4882a593Smuzhiyun 	rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1916*4882a593Smuzhiyun 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1917*4882a593Smuzhiyun 	rctx->parent = &req->base;
1918*4882a593Smuzhiyun 	rctx->ctx = ctx;
1919*4882a593Smuzhiyun 	rctx->bd_suppress = true;
1920*4882a593Smuzhiyun 	memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message));
1921*4882a593Smuzhiyun 
1922*4882a593Smuzhiyun 	/* Initialize position in src scatterlist */
1923*4882a593Smuzhiyun 	rctx->src_sg = req->src;
1924*4882a593Smuzhiyun 	rctx->src_skip = 0;
1925*4882a593Smuzhiyun 	rctx->src_nents = 0;
1926*4882a593Smuzhiyun 	rctx->dst_sg = NULL;
1927*4882a593Smuzhiyun 	rctx->dst_skip = 0;
1928*4882a593Smuzhiyun 	rctx->dst_nents = 0;
1929*4882a593Smuzhiyun 
1930*4882a593Smuzhiyun 	/* SPU2 hardware does not compute hash of zero length data */
1931*4882a593Smuzhiyun 	if ((rctx->is_final == 1) && (rctx->total_todo == 0) &&
1932*4882a593Smuzhiyun 	    (iproc_priv.spu.spu_type == SPU_TYPE_SPU2)) {
1933*4882a593Smuzhiyun 		alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
1934*4882a593Smuzhiyun 		flow_log("Doing %sfinal %s zero-len hash request in software\n",
1935*4882a593Smuzhiyun 			 rctx->is_final ? "" : "non-", alg_name);
1936*4882a593Smuzhiyun 		err = do_shash((unsigned char *)alg_name, req->result,
1937*4882a593Smuzhiyun 			       NULL, 0, NULL, 0, ctx->authkey,
1938*4882a593Smuzhiyun 			       ctx->authkeylen);
1939*4882a593Smuzhiyun 		if (err < 0)
1940*4882a593Smuzhiyun 			flow_log("Hash request failed with error %d\n", err);
1941*4882a593Smuzhiyun 		return err;
1942*4882a593Smuzhiyun 	}
1943*4882a593Smuzhiyun 	/* Choose a SPU to process this request */
1944*4882a593Smuzhiyun 	rctx->chan_idx = select_channel();
1945*4882a593Smuzhiyun 
1946*4882a593Smuzhiyun 	err = handle_ahash_req(rctx);
1947*4882a593Smuzhiyun 	if (err != -EINPROGRESS)
1948*4882a593Smuzhiyun 		/* synchronous result */
1949*4882a593Smuzhiyun 		spu_chunk_cleanup(rctx);
1950*4882a593Smuzhiyun 
1951*4882a593Smuzhiyun 	if (err == -EAGAIN)
1952*4882a593Smuzhiyun 		/*
1953*4882a593Smuzhiyun 		 * we saved data in hash carry, but tell crypto API
1954*4882a593Smuzhiyun 		 * we successfully completed request.
1955*4882a593Smuzhiyun 		 */
1956*4882a593Smuzhiyun 		err = 0;
1957*4882a593Smuzhiyun 
1958*4882a593Smuzhiyun 	return err;
1959*4882a593Smuzhiyun }
1960*4882a593Smuzhiyun 
__ahash_init(struct ahash_request * req)1961*4882a593Smuzhiyun static int __ahash_init(struct ahash_request *req)
1962*4882a593Smuzhiyun {
1963*4882a593Smuzhiyun 	struct spu_hw *spu = &iproc_priv.spu;
1964*4882a593Smuzhiyun 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
1965*4882a593Smuzhiyun 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1966*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
1967*4882a593Smuzhiyun 
1968*4882a593Smuzhiyun 	flow_log("%s()\n", __func__);
1969*4882a593Smuzhiyun 
1970*4882a593Smuzhiyun 	/* Initialize the context */
1971*4882a593Smuzhiyun 	rctx->hash_carry_len = 0;
1972*4882a593Smuzhiyun 	rctx->is_final = 0;
1973*4882a593Smuzhiyun 
1974*4882a593Smuzhiyun 	rctx->total_todo = 0;
1975*4882a593Smuzhiyun 	rctx->src_sent = 0;
1976*4882a593Smuzhiyun 	rctx->total_sent = 0;
1977*4882a593Smuzhiyun 	rctx->total_received = 0;
1978*4882a593Smuzhiyun 
1979*4882a593Smuzhiyun 	ctx->digestsize = crypto_ahash_digestsize(tfm);
1980*4882a593Smuzhiyun 	/* If we add a hash whose digest is larger, catch it here. */
1981*4882a593Smuzhiyun 	WARN_ON(ctx->digestsize > MAX_DIGEST_SIZE);
1982*4882a593Smuzhiyun 
1983*4882a593Smuzhiyun 	rctx->is_sw_hmac = false;
1984*4882a593Smuzhiyun 
1985*4882a593Smuzhiyun 	ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen, 0,
1986*4882a593Smuzhiyun 							  true);
1987*4882a593Smuzhiyun 
1988*4882a593Smuzhiyun 	return 0;
1989*4882a593Smuzhiyun }
1990*4882a593Smuzhiyun 
1991*4882a593Smuzhiyun /**
1992*4882a593Smuzhiyun  * spu_no_incr_hash() - Determine whether incremental hashing is supported.
1993*4882a593Smuzhiyun  * @ctx:  Crypto session context
1994*4882a593Smuzhiyun  *
1995*4882a593Smuzhiyun  * SPU-2 does not support incremental hashing (we'll have to revisit and
1996*4882a593Smuzhiyun  * condition based on chip revision or device tree entry if future versions do
1997*4882a593Smuzhiyun  * support incremental hash)
1998*4882a593Smuzhiyun  *
1999*4882a593Smuzhiyun  * SPU-M also doesn't support incremental hashing of AES-XCBC
2000*4882a593Smuzhiyun  *
2001*4882a593Smuzhiyun  * Return: true if incremental hashing is not supported
2002*4882a593Smuzhiyun  *         false otherwise
2003*4882a593Smuzhiyun  */
spu_no_incr_hash(struct iproc_ctx_s * ctx)2004*4882a593Smuzhiyun static bool spu_no_incr_hash(struct iproc_ctx_s *ctx)
2005*4882a593Smuzhiyun {
2006*4882a593Smuzhiyun 	struct spu_hw *spu = &iproc_priv.spu;
2007*4882a593Smuzhiyun 
2008*4882a593Smuzhiyun 	if (spu->spu_type == SPU_TYPE_SPU2)
2009*4882a593Smuzhiyun 		return true;
2010*4882a593Smuzhiyun 
2011*4882a593Smuzhiyun 	if ((ctx->auth.alg == HASH_ALG_AES) &&
2012*4882a593Smuzhiyun 	    (ctx->auth.mode == HASH_MODE_XCBC))
2013*4882a593Smuzhiyun 		return true;
2014*4882a593Smuzhiyun 
2015*4882a593Smuzhiyun 	/* Otherwise, incremental hashing is supported */
2016*4882a593Smuzhiyun 	return false;
2017*4882a593Smuzhiyun }
2018*4882a593Smuzhiyun 
ahash_init(struct ahash_request * req)2019*4882a593Smuzhiyun static int ahash_init(struct ahash_request *req)
2020*4882a593Smuzhiyun {
2021*4882a593Smuzhiyun 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2022*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2023*4882a593Smuzhiyun 	const char *alg_name;
2024*4882a593Smuzhiyun 	struct crypto_shash *hash;
2025*4882a593Smuzhiyun 	int ret;
2026*4882a593Smuzhiyun 	gfp_t gfp;
2027*4882a593Smuzhiyun 
2028*4882a593Smuzhiyun 	if (spu_no_incr_hash(ctx)) {
2029*4882a593Smuzhiyun 		/*
2030*4882a593Smuzhiyun 		 * If we get an incremental hashing request and it's not
2031*4882a593Smuzhiyun 		 * supported by the hardware, we need to handle it in software
2032*4882a593Smuzhiyun 		 * by calling synchronous hash functions.
2033*4882a593Smuzhiyun 		 */
2034*4882a593Smuzhiyun 		alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
2035*4882a593Smuzhiyun 		hash = crypto_alloc_shash(alg_name, 0, 0);
2036*4882a593Smuzhiyun 		if (IS_ERR(hash)) {
2037*4882a593Smuzhiyun 			ret = PTR_ERR(hash);
2038*4882a593Smuzhiyun 			goto err;
2039*4882a593Smuzhiyun 		}
2040*4882a593Smuzhiyun 
2041*4882a593Smuzhiyun 		gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2042*4882a593Smuzhiyun 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2043*4882a593Smuzhiyun 		ctx->shash = kmalloc(sizeof(*ctx->shash) +
2044*4882a593Smuzhiyun 				     crypto_shash_descsize(hash), gfp);
2045*4882a593Smuzhiyun 		if (!ctx->shash) {
2046*4882a593Smuzhiyun 			ret = -ENOMEM;
2047*4882a593Smuzhiyun 			goto err_hash;
2048*4882a593Smuzhiyun 		}
2049*4882a593Smuzhiyun 		ctx->shash->tfm = hash;
2050*4882a593Smuzhiyun 
2051*4882a593Smuzhiyun 		/* Set the key using data we already have from setkey */
2052*4882a593Smuzhiyun 		if (ctx->authkeylen > 0) {
2053*4882a593Smuzhiyun 			ret = crypto_shash_setkey(hash, ctx->authkey,
2054*4882a593Smuzhiyun 						  ctx->authkeylen);
2055*4882a593Smuzhiyun 			if (ret)
2056*4882a593Smuzhiyun 				goto err_shash;
2057*4882a593Smuzhiyun 		}
2058*4882a593Smuzhiyun 
2059*4882a593Smuzhiyun 		/* Initialize hash w/ this key and other params */
2060*4882a593Smuzhiyun 		ret = crypto_shash_init(ctx->shash);
2061*4882a593Smuzhiyun 		if (ret)
2062*4882a593Smuzhiyun 			goto err_shash;
2063*4882a593Smuzhiyun 	} else {
2064*4882a593Smuzhiyun 		/* Otherwise call the internal function which uses SPU hw */
2065*4882a593Smuzhiyun 		ret = __ahash_init(req);
2066*4882a593Smuzhiyun 	}
2067*4882a593Smuzhiyun 
2068*4882a593Smuzhiyun 	return ret;
2069*4882a593Smuzhiyun 
2070*4882a593Smuzhiyun err_shash:
2071*4882a593Smuzhiyun 	kfree(ctx->shash);
2072*4882a593Smuzhiyun err_hash:
2073*4882a593Smuzhiyun 	crypto_free_shash(hash);
2074*4882a593Smuzhiyun err:
2075*4882a593Smuzhiyun 	return ret;
2076*4882a593Smuzhiyun }
2077*4882a593Smuzhiyun 
__ahash_update(struct ahash_request * req)2078*4882a593Smuzhiyun static int __ahash_update(struct ahash_request *req)
2079*4882a593Smuzhiyun {
2080*4882a593Smuzhiyun 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2081*4882a593Smuzhiyun 
2082*4882a593Smuzhiyun 	flow_log("ahash_update() nbytes:%u\n", req->nbytes);
2083*4882a593Smuzhiyun 
2084*4882a593Smuzhiyun 	if (!req->nbytes)
2085*4882a593Smuzhiyun 		return 0;
2086*4882a593Smuzhiyun 	rctx->total_todo += req->nbytes;
2087*4882a593Smuzhiyun 	rctx->src_sent = 0;
2088*4882a593Smuzhiyun 
2089*4882a593Smuzhiyun 	return ahash_enqueue(req);
2090*4882a593Smuzhiyun }
2091*4882a593Smuzhiyun 
ahash_update(struct ahash_request * req)2092*4882a593Smuzhiyun static int ahash_update(struct ahash_request *req)
2093*4882a593Smuzhiyun {
2094*4882a593Smuzhiyun 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2095*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2096*4882a593Smuzhiyun 	u8 *tmpbuf;
2097*4882a593Smuzhiyun 	int ret;
2098*4882a593Smuzhiyun 	int nents;
2099*4882a593Smuzhiyun 	gfp_t gfp;
2100*4882a593Smuzhiyun 
2101*4882a593Smuzhiyun 	if (spu_no_incr_hash(ctx)) {
2102*4882a593Smuzhiyun 		/*
2103*4882a593Smuzhiyun 		 * If we get an incremental hashing request and it's not
2104*4882a593Smuzhiyun 		 * supported by the hardware, we need to handle it in software
2105*4882a593Smuzhiyun 		 * by calling synchronous hash functions.
2106*4882a593Smuzhiyun 		 */
2107*4882a593Smuzhiyun 		if (req->src)
2108*4882a593Smuzhiyun 			nents = sg_nents(req->src);
2109*4882a593Smuzhiyun 		else
2110*4882a593Smuzhiyun 			return -EINVAL;
2111*4882a593Smuzhiyun 
2112*4882a593Smuzhiyun 		/* Copy data from req scatterlist to tmp buffer */
2113*4882a593Smuzhiyun 		gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2114*4882a593Smuzhiyun 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2115*4882a593Smuzhiyun 		tmpbuf = kmalloc(req->nbytes, gfp);
2116*4882a593Smuzhiyun 		if (!tmpbuf)
2117*4882a593Smuzhiyun 			return -ENOMEM;
2118*4882a593Smuzhiyun 
2119*4882a593Smuzhiyun 		if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) !=
2120*4882a593Smuzhiyun 				req->nbytes) {
2121*4882a593Smuzhiyun 			kfree(tmpbuf);
2122*4882a593Smuzhiyun 			return -EINVAL;
2123*4882a593Smuzhiyun 		}
2124*4882a593Smuzhiyun 
2125*4882a593Smuzhiyun 		/* Call synchronous update */
2126*4882a593Smuzhiyun 		ret = crypto_shash_update(ctx->shash, tmpbuf, req->nbytes);
2127*4882a593Smuzhiyun 		kfree(tmpbuf);
2128*4882a593Smuzhiyun 	} else {
2129*4882a593Smuzhiyun 		/* Otherwise call the internal function which uses SPU hw */
2130*4882a593Smuzhiyun 		ret = __ahash_update(req);
2131*4882a593Smuzhiyun 	}
2132*4882a593Smuzhiyun 
2133*4882a593Smuzhiyun 	return ret;
2134*4882a593Smuzhiyun }
2135*4882a593Smuzhiyun 
__ahash_final(struct ahash_request * req)2136*4882a593Smuzhiyun static int __ahash_final(struct ahash_request *req)
2137*4882a593Smuzhiyun {
2138*4882a593Smuzhiyun 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2139*4882a593Smuzhiyun 
2140*4882a593Smuzhiyun 	flow_log("ahash_final() nbytes:%u\n", req->nbytes);
2141*4882a593Smuzhiyun 
2142*4882a593Smuzhiyun 	rctx->is_final = 1;
2143*4882a593Smuzhiyun 
2144*4882a593Smuzhiyun 	return ahash_enqueue(req);
2145*4882a593Smuzhiyun }
2146*4882a593Smuzhiyun 
ahash_final(struct ahash_request * req)2147*4882a593Smuzhiyun static int ahash_final(struct ahash_request *req)
2148*4882a593Smuzhiyun {
2149*4882a593Smuzhiyun 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2150*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2151*4882a593Smuzhiyun 	int ret;
2152*4882a593Smuzhiyun 
2153*4882a593Smuzhiyun 	if (spu_no_incr_hash(ctx)) {
2154*4882a593Smuzhiyun 		/*
2155*4882a593Smuzhiyun 		 * If we get an incremental hashing request and it's not
2156*4882a593Smuzhiyun 		 * supported by the hardware, we need to handle it in software
2157*4882a593Smuzhiyun 		 * by calling synchronous hash functions.
2158*4882a593Smuzhiyun 		 */
2159*4882a593Smuzhiyun 		ret = crypto_shash_final(ctx->shash, req->result);
2160*4882a593Smuzhiyun 
2161*4882a593Smuzhiyun 		/* Done with hash, can deallocate it now */
2162*4882a593Smuzhiyun 		crypto_free_shash(ctx->shash->tfm);
2163*4882a593Smuzhiyun 		kfree(ctx->shash);
2164*4882a593Smuzhiyun 
2165*4882a593Smuzhiyun 	} else {
2166*4882a593Smuzhiyun 		/* Otherwise call the internal function which uses SPU hw */
2167*4882a593Smuzhiyun 		ret = __ahash_final(req);
2168*4882a593Smuzhiyun 	}
2169*4882a593Smuzhiyun 
2170*4882a593Smuzhiyun 	return ret;
2171*4882a593Smuzhiyun }
2172*4882a593Smuzhiyun 
__ahash_finup(struct ahash_request * req)2173*4882a593Smuzhiyun static int __ahash_finup(struct ahash_request *req)
2174*4882a593Smuzhiyun {
2175*4882a593Smuzhiyun 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2176*4882a593Smuzhiyun 
2177*4882a593Smuzhiyun 	flow_log("ahash_finup() nbytes:%u\n", req->nbytes);
2178*4882a593Smuzhiyun 
2179*4882a593Smuzhiyun 	rctx->total_todo += req->nbytes;
2180*4882a593Smuzhiyun 	rctx->src_sent = 0;
2181*4882a593Smuzhiyun 	rctx->is_final = 1;
2182*4882a593Smuzhiyun 
2183*4882a593Smuzhiyun 	return ahash_enqueue(req);
2184*4882a593Smuzhiyun }
2185*4882a593Smuzhiyun 
ahash_finup(struct ahash_request * req)2186*4882a593Smuzhiyun static int ahash_finup(struct ahash_request *req)
2187*4882a593Smuzhiyun {
2188*4882a593Smuzhiyun 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2189*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2190*4882a593Smuzhiyun 	u8 *tmpbuf;
2191*4882a593Smuzhiyun 	int ret;
2192*4882a593Smuzhiyun 	int nents;
2193*4882a593Smuzhiyun 	gfp_t gfp;
2194*4882a593Smuzhiyun 
2195*4882a593Smuzhiyun 	if (spu_no_incr_hash(ctx)) {
2196*4882a593Smuzhiyun 		/*
2197*4882a593Smuzhiyun 		 * If we get an incremental hashing request and it's not
2198*4882a593Smuzhiyun 		 * supported by the hardware, we need to handle it in software
2199*4882a593Smuzhiyun 		 * by calling synchronous hash functions.
2200*4882a593Smuzhiyun 		 */
2201*4882a593Smuzhiyun 		if (req->src) {
2202*4882a593Smuzhiyun 			nents = sg_nents(req->src);
2203*4882a593Smuzhiyun 		} else {
2204*4882a593Smuzhiyun 			ret = -EINVAL;
2205*4882a593Smuzhiyun 			goto ahash_finup_exit;
2206*4882a593Smuzhiyun 		}
2207*4882a593Smuzhiyun 
2208*4882a593Smuzhiyun 		/* Copy data from req scatterlist to tmp buffer */
2209*4882a593Smuzhiyun 		gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2210*4882a593Smuzhiyun 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2211*4882a593Smuzhiyun 		tmpbuf = kmalloc(req->nbytes, gfp);
2212*4882a593Smuzhiyun 		if (!tmpbuf) {
2213*4882a593Smuzhiyun 			ret = -ENOMEM;
2214*4882a593Smuzhiyun 			goto ahash_finup_exit;
2215*4882a593Smuzhiyun 		}
2216*4882a593Smuzhiyun 
2217*4882a593Smuzhiyun 		if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) !=
2218*4882a593Smuzhiyun 				req->nbytes) {
2219*4882a593Smuzhiyun 			ret = -EINVAL;
2220*4882a593Smuzhiyun 			goto ahash_finup_free;
2221*4882a593Smuzhiyun 		}
2222*4882a593Smuzhiyun 
2223*4882a593Smuzhiyun 		/* Call synchronous update */
2224*4882a593Smuzhiyun 		ret = crypto_shash_finup(ctx->shash, tmpbuf, req->nbytes,
2225*4882a593Smuzhiyun 					 req->result);
2226*4882a593Smuzhiyun 	} else {
2227*4882a593Smuzhiyun 		/* Otherwise call the internal function which uses SPU hw */
2228*4882a593Smuzhiyun 		return __ahash_finup(req);
2229*4882a593Smuzhiyun 	}
2230*4882a593Smuzhiyun ahash_finup_free:
2231*4882a593Smuzhiyun 	kfree(tmpbuf);
2232*4882a593Smuzhiyun 
2233*4882a593Smuzhiyun ahash_finup_exit:
2234*4882a593Smuzhiyun 	/* Done with hash, can deallocate it now */
2235*4882a593Smuzhiyun 	crypto_free_shash(ctx->shash->tfm);
2236*4882a593Smuzhiyun 	kfree(ctx->shash);
2237*4882a593Smuzhiyun 	return ret;
2238*4882a593Smuzhiyun }
2239*4882a593Smuzhiyun 
ahash_digest(struct ahash_request * req)2240*4882a593Smuzhiyun static int ahash_digest(struct ahash_request *req)
2241*4882a593Smuzhiyun {
2242*4882a593Smuzhiyun 	int err;
2243*4882a593Smuzhiyun 
2244*4882a593Smuzhiyun 	flow_log("ahash_digest() nbytes:%u\n", req->nbytes);
2245*4882a593Smuzhiyun 
2246*4882a593Smuzhiyun 	/* whole thing at once */
2247*4882a593Smuzhiyun 	err = __ahash_init(req);
2248*4882a593Smuzhiyun 	if (!err)
2249*4882a593Smuzhiyun 		err = __ahash_finup(req);
2250*4882a593Smuzhiyun 
2251*4882a593Smuzhiyun 	return err;
2252*4882a593Smuzhiyun }
2253*4882a593Smuzhiyun 
ahash_setkey(struct crypto_ahash * ahash,const u8 * key,unsigned int keylen)2254*4882a593Smuzhiyun static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
2255*4882a593Smuzhiyun 			unsigned int keylen)
2256*4882a593Smuzhiyun {
2257*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash);
2258*4882a593Smuzhiyun 
2259*4882a593Smuzhiyun 	flow_log("%s() ahash:%p key:%p keylen:%u\n",
2260*4882a593Smuzhiyun 		 __func__, ahash, key, keylen);
2261*4882a593Smuzhiyun 	flow_dump("  key: ", key, keylen);
2262*4882a593Smuzhiyun 
2263*4882a593Smuzhiyun 	if (ctx->auth.alg == HASH_ALG_AES) {
2264*4882a593Smuzhiyun 		switch (keylen) {
2265*4882a593Smuzhiyun 		case AES_KEYSIZE_128:
2266*4882a593Smuzhiyun 			ctx->cipher_type = CIPHER_TYPE_AES128;
2267*4882a593Smuzhiyun 			break;
2268*4882a593Smuzhiyun 		case AES_KEYSIZE_192:
2269*4882a593Smuzhiyun 			ctx->cipher_type = CIPHER_TYPE_AES192;
2270*4882a593Smuzhiyun 			break;
2271*4882a593Smuzhiyun 		case AES_KEYSIZE_256:
2272*4882a593Smuzhiyun 			ctx->cipher_type = CIPHER_TYPE_AES256;
2273*4882a593Smuzhiyun 			break;
2274*4882a593Smuzhiyun 		default:
2275*4882a593Smuzhiyun 			pr_err("%s() Error: Invalid key length\n", __func__);
2276*4882a593Smuzhiyun 			return -EINVAL;
2277*4882a593Smuzhiyun 		}
2278*4882a593Smuzhiyun 	} else {
2279*4882a593Smuzhiyun 		pr_err("%s() Error: unknown hash alg\n", __func__);
2280*4882a593Smuzhiyun 		return -EINVAL;
2281*4882a593Smuzhiyun 	}
2282*4882a593Smuzhiyun 	memcpy(ctx->authkey, key, keylen);
2283*4882a593Smuzhiyun 	ctx->authkeylen = keylen;
2284*4882a593Smuzhiyun 
2285*4882a593Smuzhiyun 	return 0;
2286*4882a593Smuzhiyun }
2287*4882a593Smuzhiyun 
ahash_export(struct ahash_request * req,void * out)2288*4882a593Smuzhiyun static int ahash_export(struct ahash_request *req, void *out)
2289*4882a593Smuzhiyun {
2290*4882a593Smuzhiyun 	const struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2291*4882a593Smuzhiyun 	struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)out;
2292*4882a593Smuzhiyun 
2293*4882a593Smuzhiyun 	spu_exp->total_todo = rctx->total_todo;
2294*4882a593Smuzhiyun 	spu_exp->total_sent = rctx->total_sent;
2295*4882a593Smuzhiyun 	spu_exp->is_sw_hmac = rctx->is_sw_hmac;
2296*4882a593Smuzhiyun 	memcpy(spu_exp->hash_carry, rctx->hash_carry, sizeof(rctx->hash_carry));
2297*4882a593Smuzhiyun 	spu_exp->hash_carry_len = rctx->hash_carry_len;
2298*4882a593Smuzhiyun 	memcpy(spu_exp->incr_hash, rctx->incr_hash, sizeof(rctx->incr_hash));
2299*4882a593Smuzhiyun 
2300*4882a593Smuzhiyun 	return 0;
2301*4882a593Smuzhiyun }
2302*4882a593Smuzhiyun 
ahash_import(struct ahash_request * req,const void * in)2303*4882a593Smuzhiyun static int ahash_import(struct ahash_request *req, const void *in)
2304*4882a593Smuzhiyun {
2305*4882a593Smuzhiyun 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2306*4882a593Smuzhiyun 	struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)in;
2307*4882a593Smuzhiyun 
2308*4882a593Smuzhiyun 	rctx->total_todo = spu_exp->total_todo;
2309*4882a593Smuzhiyun 	rctx->total_sent = spu_exp->total_sent;
2310*4882a593Smuzhiyun 	rctx->is_sw_hmac = spu_exp->is_sw_hmac;
2311*4882a593Smuzhiyun 	memcpy(rctx->hash_carry, spu_exp->hash_carry, sizeof(rctx->hash_carry));
2312*4882a593Smuzhiyun 	rctx->hash_carry_len = spu_exp->hash_carry_len;
2313*4882a593Smuzhiyun 	memcpy(rctx->incr_hash, spu_exp->incr_hash, sizeof(rctx->incr_hash));
2314*4882a593Smuzhiyun 
2315*4882a593Smuzhiyun 	return 0;
2316*4882a593Smuzhiyun }
2317*4882a593Smuzhiyun 
ahash_hmac_setkey(struct crypto_ahash * ahash,const u8 * key,unsigned int keylen)2318*4882a593Smuzhiyun static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
2319*4882a593Smuzhiyun 			     unsigned int keylen)
2320*4882a593Smuzhiyun {
2321*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash);
2322*4882a593Smuzhiyun 	unsigned int blocksize =
2323*4882a593Smuzhiyun 		crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
2324*4882a593Smuzhiyun 	unsigned int digestsize = crypto_ahash_digestsize(ahash);
2325*4882a593Smuzhiyun 	unsigned int index;
2326*4882a593Smuzhiyun 	int rc;
2327*4882a593Smuzhiyun 
2328*4882a593Smuzhiyun 	flow_log("%s() ahash:%p key:%p keylen:%u blksz:%u digestsz:%u\n",
2329*4882a593Smuzhiyun 		 __func__, ahash, key, keylen, blocksize, digestsize);
2330*4882a593Smuzhiyun 	flow_dump("  key: ", key, keylen);
2331*4882a593Smuzhiyun 
2332*4882a593Smuzhiyun 	if (keylen > blocksize) {
2333*4882a593Smuzhiyun 		switch (ctx->auth.alg) {
2334*4882a593Smuzhiyun 		case HASH_ALG_MD5:
2335*4882a593Smuzhiyun 			rc = do_shash("md5", ctx->authkey, key, keylen, NULL,
2336*4882a593Smuzhiyun 				      0, NULL, 0);
2337*4882a593Smuzhiyun 			break;
2338*4882a593Smuzhiyun 		case HASH_ALG_SHA1:
2339*4882a593Smuzhiyun 			rc = do_shash("sha1", ctx->authkey, key, keylen, NULL,
2340*4882a593Smuzhiyun 				      0, NULL, 0);
2341*4882a593Smuzhiyun 			break;
2342*4882a593Smuzhiyun 		case HASH_ALG_SHA224:
2343*4882a593Smuzhiyun 			rc = do_shash("sha224", ctx->authkey, key, keylen, NULL,
2344*4882a593Smuzhiyun 				      0, NULL, 0);
2345*4882a593Smuzhiyun 			break;
2346*4882a593Smuzhiyun 		case HASH_ALG_SHA256:
2347*4882a593Smuzhiyun 			rc = do_shash("sha256", ctx->authkey, key, keylen, NULL,
2348*4882a593Smuzhiyun 				      0, NULL, 0);
2349*4882a593Smuzhiyun 			break;
2350*4882a593Smuzhiyun 		case HASH_ALG_SHA384:
2351*4882a593Smuzhiyun 			rc = do_shash("sha384", ctx->authkey, key, keylen, NULL,
2352*4882a593Smuzhiyun 				      0, NULL, 0);
2353*4882a593Smuzhiyun 			break;
2354*4882a593Smuzhiyun 		case HASH_ALG_SHA512:
2355*4882a593Smuzhiyun 			rc = do_shash("sha512", ctx->authkey, key, keylen, NULL,
2356*4882a593Smuzhiyun 				      0, NULL, 0);
2357*4882a593Smuzhiyun 			break;
2358*4882a593Smuzhiyun 		case HASH_ALG_SHA3_224:
2359*4882a593Smuzhiyun 			rc = do_shash("sha3-224", ctx->authkey, key, keylen,
2360*4882a593Smuzhiyun 				      NULL, 0, NULL, 0);
2361*4882a593Smuzhiyun 			break;
2362*4882a593Smuzhiyun 		case HASH_ALG_SHA3_256:
2363*4882a593Smuzhiyun 			rc = do_shash("sha3-256", ctx->authkey, key, keylen,
2364*4882a593Smuzhiyun 				      NULL, 0, NULL, 0);
2365*4882a593Smuzhiyun 			break;
2366*4882a593Smuzhiyun 		case HASH_ALG_SHA3_384:
2367*4882a593Smuzhiyun 			rc = do_shash("sha3-384", ctx->authkey, key, keylen,
2368*4882a593Smuzhiyun 				      NULL, 0, NULL, 0);
2369*4882a593Smuzhiyun 			break;
2370*4882a593Smuzhiyun 		case HASH_ALG_SHA3_512:
2371*4882a593Smuzhiyun 			rc = do_shash("sha3-512", ctx->authkey, key, keylen,
2372*4882a593Smuzhiyun 				      NULL, 0, NULL, 0);
2373*4882a593Smuzhiyun 			break;
2374*4882a593Smuzhiyun 		default:
2375*4882a593Smuzhiyun 			pr_err("%s() Error: unknown hash alg\n", __func__);
2376*4882a593Smuzhiyun 			return -EINVAL;
2377*4882a593Smuzhiyun 		}
2378*4882a593Smuzhiyun 		if (rc < 0) {
2379*4882a593Smuzhiyun 			pr_err("%s() Error %d computing shash for %s\n",
2380*4882a593Smuzhiyun 			       __func__, rc, hash_alg_name[ctx->auth.alg]);
2381*4882a593Smuzhiyun 			return rc;
2382*4882a593Smuzhiyun 		}
2383*4882a593Smuzhiyun 		ctx->authkeylen = digestsize;
2384*4882a593Smuzhiyun 
2385*4882a593Smuzhiyun 		flow_log("  keylen > digestsize... hashed\n");
2386*4882a593Smuzhiyun 		flow_dump("  newkey: ", ctx->authkey, ctx->authkeylen);
2387*4882a593Smuzhiyun 	} else {
2388*4882a593Smuzhiyun 		memcpy(ctx->authkey, key, keylen);
2389*4882a593Smuzhiyun 		ctx->authkeylen = keylen;
2390*4882a593Smuzhiyun 	}
2391*4882a593Smuzhiyun 
2392*4882a593Smuzhiyun 	/*
2393*4882a593Smuzhiyun 	 * Full HMAC operation in SPUM is not verified,
2394*4882a593Smuzhiyun 	 * So keeping the generation of IPAD, OPAD and
2395*4882a593Smuzhiyun 	 * outer hashing in software.
2396*4882a593Smuzhiyun 	 */
2397*4882a593Smuzhiyun 	if (iproc_priv.spu.spu_type == SPU_TYPE_SPUM) {
2398*4882a593Smuzhiyun 		memcpy(ctx->ipad, ctx->authkey, ctx->authkeylen);
2399*4882a593Smuzhiyun 		memset(ctx->ipad + ctx->authkeylen, 0,
2400*4882a593Smuzhiyun 		       blocksize - ctx->authkeylen);
2401*4882a593Smuzhiyun 		ctx->authkeylen = 0;
2402*4882a593Smuzhiyun 		memcpy(ctx->opad, ctx->ipad, blocksize);
2403*4882a593Smuzhiyun 
2404*4882a593Smuzhiyun 		for (index = 0; index < blocksize; index++) {
2405*4882a593Smuzhiyun 			ctx->ipad[index] ^= HMAC_IPAD_VALUE;
2406*4882a593Smuzhiyun 			ctx->opad[index] ^= HMAC_OPAD_VALUE;
2407*4882a593Smuzhiyun 		}
2408*4882a593Smuzhiyun 
2409*4882a593Smuzhiyun 		flow_dump("  ipad: ", ctx->ipad, blocksize);
2410*4882a593Smuzhiyun 		flow_dump("  opad: ", ctx->opad, blocksize);
2411*4882a593Smuzhiyun 	}
2412*4882a593Smuzhiyun 	ctx->digestsize = digestsize;
2413*4882a593Smuzhiyun 	atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_HMAC]);
2414*4882a593Smuzhiyun 
2415*4882a593Smuzhiyun 	return 0;
2416*4882a593Smuzhiyun }
2417*4882a593Smuzhiyun 
ahash_hmac_init(struct ahash_request * req)2418*4882a593Smuzhiyun static int ahash_hmac_init(struct ahash_request *req)
2419*4882a593Smuzhiyun {
2420*4882a593Smuzhiyun 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2421*4882a593Smuzhiyun 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2422*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2423*4882a593Smuzhiyun 	unsigned int blocksize =
2424*4882a593Smuzhiyun 			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2425*4882a593Smuzhiyun 
2426*4882a593Smuzhiyun 	flow_log("ahash_hmac_init()\n");
2427*4882a593Smuzhiyun 
2428*4882a593Smuzhiyun 	/* init the context as a hash */
2429*4882a593Smuzhiyun 	ahash_init(req);
2430*4882a593Smuzhiyun 
2431*4882a593Smuzhiyun 	if (!spu_no_incr_hash(ctx)) {
2432*4882a593Smuzhiyun 		/* SPU-M can do incr hashing but needs sw for outer HMAC */
2433*4882a593Smuzhiyun 		rctx->is_sw_hmac = true;
2434*4882a593Smuzhiyun 		ctx->auth.mode = HASH_MODE_HASH;
2435*4882a593Smuzhiyun 		/* start with a prepended ipad */
2436*4882a593Smuzhiyun 		memcpy(rctx->hash_carry, ctx->ipad, blocksize);
2437*4882a593Smuzhiyun 		rctx->hash_carry_len = blocksize;
2438*4882a593Smuzhiyun 		rctx->total_todo += blocksize;
2439*4882a593Smuzhiyun 	}
2440*4882a593Smuzhiyun 
2441*4882a593Smuzhiyun 	return 0;
2442*4882a593Smuzhiyun }
2443*4882a593Smuzhiyun 
ahash_hmac_update(struct ahash_request * req)2444*4882a593Smuzhiyun static int ahash_hmac_update(struct ahash_request *req)
2445*4882a593Smuzhiyun {
2446*4882a593Smuzhiyun 	flow_log("ahash_hmac_update() nbytes:%u\n", req->nbytes);
2447*4882a593Smuzhiyun 
2448*4882a593Smuzhiyun 	if (!req->nbytes)
2449*4882a593Smuzhiyun 		return 0;
2450*4882a593Smuzhiyun 
2451*4882a593Smuzhiyun 	return ahash_update(req);
2452*4882a593Smuzhiyun }
2453*4882a593Smuzhiyun 
ahash_hmac_final(struct ahash_request * req)2454*4882a593Smuzhiyun static int ahash_hmac_final(struct ahash_request *req)
2455*4882a593Smuzhiyun {
2456*4882a593Smuzhiyun 	flow_log("ahash_hmac_final() nbytes:%u\n", req->nbytes);
2457*4882a593Smuzhiyun 
2458*4882a593Smuzhiyun 	return ahash_final(req);
2459*4882a593Smuzhiyun }
2460*4882a593Smuzhiyun 
ahash_hmac_finup(struct ahash_request * req)2461*4882a593Smuzhiyun static int ahash_hmac_finup(struct ahash_request *req)
2462*4882a593Smuzhiyun {
2463*4882a593Smuzhiyun 	flow_log("ahash_hmac_finupl() nbytes:%u\n", req->nbytes);
2464*4882a593Smuzhiyun 
2465*4882a593Smuzhiyun 	return ahash_finup(req);
2466*4882a593Smuzhiyun }
2467*4882a593Smuzhiyun 
ahash_hmac_digest(struct ahash_request * req)2468*4882a593Smuzhiyun static int ahash_hmac_digest(struct ahash_request *req)
2469*4882a593Smuzhiyun {
2470*4882a593Smuzhiyun 	struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2471*4882a593Smuzhiyun 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2472*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2473*4882a593Smuzhiyun 	unsigned int blocksize =
2474*4882a593Smuzhiyun 			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2475*4882a593Smuzhiyun 
2476*4882a593Smuzhiyun 	flow_log("ahash_hmac_digest() nbytes:%u\n", req->nbytes);
2477*4882a593Smuzhiyun 
2478*4882a593Smuzhiyun 	/* Perform initialization and then call finup */
2479*4882a593Smuzhiyun 	__ahash_init(req);
2480*4882a593Smuzhiyun 
2481*4882a593Smuzhiyun 	if (iproc_priv.spu.spu_type == SPU_TYPE_SPU2) {
2482*4882a593Smuzhiyun 		/*
2483*4882a593Smuzhiyun 		 * SPU2 supports full HMAC implementation in the
2484*4882a593Smuzhiyun 		 * hardware, need not to generate IPAD, OPAD and
2485*4882a593Smuzhiyun 		 * outer hash in software.
2486*4882a593Smuzhiyun 		 * Only for hash key len > hash block size, SPU2
2487*4882a593Smuzhiyun 		 * expects to perform hashing on the key, shorten
2488*4882a593Smuzhiyun 		 * it to digest size and feed it as hash key.
2489*4882a593Smuzhiyun 		 */
2490*4882a593Smuzhiyun 		rctx->is_sw_hmac = false;
2491*4882a593Smuzhiyun 		ctx->auth.mode = HASH_MODE_HMAC;
2492*4882a593Smuzhiyun 	} else {
2493*4882a593Smuzhiyun 		rctx->is_sw_hmac = true;
2494*4882a593Smuzhiyun 		ctx->auth.mode = HASH_MODE_HASH;
2495*4882a593Smuzhiyun 		/* start with a prepended ipad */
2496*4882a593Smuzhiyun 		memcpy(rctx->hash_carry, ctx->ipad, blocksize);
2497*4882a593Smuzhiyun 		rctx->hash_carry_len = blocksize;
2498*4882a593Smuzhiyun 		rctx->total_todo += blocksize;
2499*4882a593Smuzhiyun 	}
2500*4882a593Smuzhiyun 
2501*4882a593Smuzhiyun 	return __ahash_finup(req);
2502*4882a593Smuzhiyun }
2503*4882a593Smuzhiyun 
2504*4882a593Smuzhiyun /* aead helpers */
2505*4882a593Smuzhiyun 
aead_need_fallback(struct aead_request * req)2506*4882a593Smuzhiyun static int aead_need_fallback(struct aead_request *req)
2507*4882a593Smuzhiyun {
2508*4882a593Smuzhiyun 	struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2509*4882a593Smuzhiyun 	struct spu_hw *spu = &iproc_priv.spu;
2510*4882a593Smuzhiyun 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2511*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = crypto_aead_ctx(aead);
2512*4882a593Smuzhiyun 	u32 payload_len;
2513*4882a593Smuzhiyun 
2514*4882a593Smuzhiyun 	/*
2515*4882a593Smuzhiyun 	 * SPU hardware cannot handle the AES-GCM/CCM case where plaintext
2516*4882a593Smuzhiyun 	 * and AAD are both 0 bytes long. So use fallback in this case.
2517*4882a593Smuzhiyun 	 */
2518*4882a593Smuzhiyun 	if (((ctx->cipher.mode == CIPHER_MODE_GCM) ||
2519*4882a593Smuzhiyun 	     (ctx->cipher.mode == CIPHER_MODE_CCM)) &&
2520*4882a593Smuzhiyun 	    (req->assoclen == 0)) {
2521*4882a593Smuzhiyun 		if ((rctx->is_encrypt && (req->cryptlen == 0)) ||
2522*4882a593Smuzhiyun 		    (!rctx->is_encrypt && (req->cryptlen == ctx->digestsize))) {
2523*4882a593Smuzhiyun 			flow_log("AES GCM/CCM needs fallback for 0 len req\n");
2524*4882a593Smuzhiyun 			return 1;
2525*4882a593Smuzhiyun 		}
2526*4882a593Smuzhiyun 	}
2527*4882a593Smuzhiyun 
2528*4882a593Smuzhiyun 	/* SPU-M hardware only supports CCM digest size of 8, 12, or 16 bytes */
2529*4882a593Smuzhiyun 	if ((ctx->cipher.mode == CIPHER_MODE_CCM) &&
2530*4882a593Smuzhiyun 	    (spu->spu_type == SPU_TYPE_SPUM) &&
2531*4882a593Smuzhiyun 	    (ctx->digestsize != 8) && (ctx->digestsize != 12) &&
2532*4882a593Smuzhiyun 	    (ctx->digestsize != 16)) {
2533*4882a593Smuzhiyun 		flow_log("%s() AES CCM needs fallback for digest size %d\n",
2534*4882a593Smuzhiyun 			 __func__, ctx->digestsize);
2535*4882a593Smuzhiyun 		return 1;
2536*4882a593Smuzhiyun 	}
2537*4882a593Smuzhiyun 
2538*4882a593Smuzhiyun 	/*
2539*4882a593Smuzhiyun 	 * SPU-M on NSP has an issue where AES-CCM hash is not correct
2540*4882a593Smuzhiyun 	 * when AAD size is 0
2541*4882a593Smuzhiyun 	 */
2542*4882a593Smuzhiyun 	if ((ctx->cipher.mode == CIPHER_MODE_CCM) &&
2543*4882a593Smuzhiyun 	    (spu->spu_subtype == SPU_SUBTYPE_SPUM_NSP) &&
2544*4882a593Smuzhiyun 	    (req->assoclen == 0)) {
2545*4882a593Smuzhiyun 		flow_log("%s() AES_CCM needs fallback for 0 len AAD on NSP\n",
2546*4882a593Smuzhiyun 			 __func__);
2547*4882a593Smuzhiyun 		return 1;
2548*4882a593Smuzhiyun 	}
2549*4882a593Smuzhiyun 
2550*4882a593Smuzhiyun 	/*
2551*4882a593Smuzhiyun 	 * RFC4106 and RFC4543 cannot handle the case where AAD is other than
2552*4882a593Smuzhiyun 	 * 16 or 20 bytes long. So use fallback in this case.
2553*4882a593Smuzhiyun 	 */
2554*4882a593Smuzhiyun 	if (ctx->cipher.mode == CIPHER_MODE_GCM &&
2555*4882a593Smuzhiyun 	    ctx->cipher.alg == CIPHER_ALG_AES &&
2556*4882a593Smuzhiyun 	    rctx->iv_ctr_len == GCM_RFC4106_IV_SIZE &&
2557*4882a593Smuzhiyun 	    req->assoclen != 16 && req->assoclen != 20) {
2558*4882a593Smuzhiyun 		flow_log("RFC4106/RFC4543 needs fallback for assoclen"
2559*4882a593Smuzhiyun 			 " other than 16 or 20 bytes\n");
2560*4882a593Smuzhiyun 		return 1;
2561*4882a593Smuzhiyun 	}
2562*4882a593Smuzhiyun 
2563*4882a593Smuzhiyun 	payload_len = req->cryptlen;
2564*4882a593Smuzhiyun 	if (spu->spu_type == SPU_TYPE_SPUM)
2565*4882a593Smuzhiyun 		payload_len += req->assoclen;
2566*4882a593Smuzhiyun 
2567*4882a593Smuzhiyun 	flow_log("%s() payload len: %u\n", __func__, payload_len);
2568*4882a593Smuzhiyun 
2569*4882a593Smuzhiyun 	if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
2570*4882a593Smuzhiyun 		return 0;
2571*4882a593Smuzhiyun 	else
2572*4882a593Smuzhiyun 		return payload_len > ctx->max_payload;
2573*4882a593Smuzhiyun }
2574*4882a593Smuzhiyun 
aead_complete(struct crypto_async_request * areq,int err)2575*4882a593Smuzhiyun static void aead_complete(struct crypto_async_request *areq, int err)
2576*4882a593Smuzhiyun {
2577*4882a593Smuzhiyun 	struct aead_request *req =
2578*4882a593Smuzhiyun 	    container_of(areq, struct aead_request, base);
2579*4882a593Smuzhiyun 	struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2580*4882a593Smuzhiyun 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2581*4882a593Smuzhiyun 
2582*4882a593Smuzhiyun 	flow_log("%s() err:%d\n", __func__, err);
2583*4882a593Smuzhiyun 
2584*4882a593Smuzhiyun 	areq->tfm = crypto_aead_tfm(aead);
2585*4882a593Smuzhiyun 
2586*4882a593Smuzhiyun 	areq->complete = rctx->old_complete;
2587*4882a593Smuzhiyun 	areq->data = rctx->old_data;
2588*4882a593Smuzhiyun 
2589*4882a593Smuzhiyun 	areq->complete(areq, err);
2590*4882a593Smuzhiyun }
2591*4882a593Smuzhiyun 
aead_do_fallback(struct aead_request * req,bool is_encrypt)2592*4882a593Smuzhiyun static int aead_do_fallback(struct aead_request *req, bool is_encrypt)
2593*4882a593Smuzhiyun {
2594*4882a593Smuzhiyun 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2595*4882a593Smuzhiyun 	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
2596*4882a593Smuzhiyun 	struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2597*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
2598*4882a593Smuzhiyun 	int err;
2599*4882a593Smuzhiyun 	u32 req_flags;
2600*4882a593Smuzhiyun 
2601*4882a593Smuzhiyun 	flow_log("%s() enc:%u\n", __func__, is_encrypt);
2602*4882a593Smuzhiyun 
2603*4882a593Smuzhiyun 	if (ctx->fallback_cipher) {
2604*4882a593Smuzhiyun 		/* Store the cipher tfm and then use the fallback tfm */
2605*4882a593Smuzhiyun 		rctx->old_tfm = tfm;
2606*4882a593Smuzhiyun 		aead_request_set_tfm(req, ctx->fallback_cipher);
2607*4882a593Smuzhiyun 		/*
2608*4882a593Smuzhiyun 		 * Save the callback and chain ourselves in, so we can restore
2609*4882a593Smuzhiyun 		 * the tfm
2610*4882a593Smuzhiyun 		 */
2611*4882a593Smuzhiyun 		rctx->old_complete = req->base.complete;
2612*4882a593Smuzhiyun 		rctx->old_data = req->base.data;
2613*4882a593Smuzhiyun 		req_flags = aead_request_flags(req);
2614*4882a593Smuzhiyun 		aead_request_set_callback(req, req_flags, aead_complete, req);
2615*4882a593Smuzhiyun 		err = is_encrypt ? crypto_aead_encrypt(req) :
2616*4882a593Smuzhiyun 		    crypto_aead_decrypt(req);
2617*4882a593Smuzhiyun 
2618*4882a593Smuzhiyun 		if (err == 0) {
2619*4882a593Smuzhiyun 			/*
2620*4882a593Smuzhiyun 			 * fallback was synchronous (did not return
2621*4882a593Smuzhiyun 			 * -EINPROGRESS). So restore request state here.
2622*4882a593Smuzhiyun 			 */
2623*4882a593Smuzhiyun 			aead_request_set_callback(req, req_flags,
2624*4882a593Smuzhiyun 						  rctx->old_complete, req);
2625*4882a593Smuzhiyun 			req->base.data = rctx->old_data;
2626*4882a593Smuzhiyun 			aead_request_set_tfm(req, aead);
2627*4882a593Smuzhiyun 			flow_log("%s() fallback completed successfully\n\n",
2628*4882a593Smuzhiyun 				 __func__);
2629*4882a593Smuzhiyun 		}
2630*4882a593Smuzhiyun 	} else {
2631*4882a593Smuzhiyun 		err = -EINVAL;
2632*4882a593Smuzhiyun 	}
2633*4882a593Smuzhiyun 
2634*4882a593Smuzhiyun 	return err;
2635*4882a593Smuzhiyun }
2636*4882a593Smuzhiyun 
aead_enqueue(struct aead_request * req,bool is_encrypt)2637*4882a593Smuzhiyun static int aead_enqueue(struct aead_request *req, bool is_encrypt)
2638*4882a593Smuzhiyun {
2639*4882a593Smuzhiyun 	struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2640*4882a593Smuzhiyun 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2641*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = crypto_aead_ctx(aead);
2642*4882a593Smuzhiyun 	int err;
2643*4882a593Smuzhiyun 
2644*4882a593Smuzhiyun 	flow_log("%s() enc:%u\n", __func__, is_encrypt);
2645*4882a593Smuzhiyun 
2646*4882a593Smuzhiyun 	if (req->assoclen > MAX_ASSOC_SIZE) {
2647*4882a593Smuzhiyun 		pr_err
2648*4882a593Smuzhiyun 		    ("%s() Error: associated data too long. (%u > %u bytes)\n",
2649*4882a593Smuzhiyun 		     __func__, req->assoclen, MAX_ASSOC_SIZE);
2650*4882a593Smuzhiyun 		return -EINVAL;
2651*4882a593Smuzhiyun 	}
2652*4882a593Smuzhiyun 
2653*4882a593Smuzhiyun 	rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2654*4882a593Smuzhiyun 		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2655*4882a593Smuzhiyun 	rctx->parent = &req->base;
2656*4882a593Smuzhiyun 	rctx->is_encrypt = is_encrypt;
2657*4882a593Smuzhiyun 	rctx->bd_suppress = false;
2658*4882a593Smuzhiyun 	rctx->total_todo = req->cryptlen;
2659*4882a593Smuzhiyun 	rctx->src_sent = 0;
2660*4882a593Smuzhiyun 	rctx->total_sent = 0;
2661*4882a593Smuzhiyun 	rctx->total_received = 0;
2662*4882a593Smuzhiyun 	rctx->is_sw_hmac = false;
2663*4882a593Smuzhiyun 	rctx->ctx = ctx;
2664*4882a593Smuzhiyun 	memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message));
2665*4882a593Smuzhiyun 
2666*4882a593Smuzhiyun 	/* assoc data is at start of src sg */
2667*4882a593Smuzhiyun 	rctx->assoc = req->src;
2668*4882a593Smuzhiyun 
2669*4882a593Smuzhiyun 	/*
2670*4882a593Smuzhiyun 	 * Init current position in src scatterlist to be after assoc data.
2671*4882a593Smuzhiyun 	 * src_skip set to buffer offset where data begins. (Assoc data could
2672*4882a593Smuzhiyun 	 * end in the middle of a buffer.)
2673*4882a593Smuzhiyun 	 */
2674*4882a593Smuzhiyun 	if (spu_sg_at_offset(req->src, req->assoclen, &rctx->src_sg,
2675*4882a593Smuzhiyun 			     &rctx->src_skip) < 0) {
2676*4882a593Smuzhiyun 		pr_err("%s() Error: Unable to find start of src data\n",
2677*4882a593Smuzhiyun 		       __func__);
2678*4882a593Smuzhiyun 		return -EINVAL;
2679*4882a593Smuzhiyun 	}
2680*4882a593Smuzhiyun 
2681*4882a593Smuzhiyun 	rctx->src_nents = 0;
2682*4882a593Smuzhiyun 	rctx->dst_nents = 0;
2683*4882a593Smuzhiyun 	if (req->dst == req->src) {
2684*4882a593Smuzhiyun 		rctx->dst_sg = rctx->src_sg;
2685*4882a593Smuzhiyun 		rctx->dst_skip = rctx->src_skip;
2686*4882a593Smuzhiyun 	} else {
2687*4882a593Smuzhiyun 		/*
2688*4882a593Smuzhiyun 		 * Expect req->dst to have room for assoc data followed by
2689*4882a593Smuzhiyun 		 * output data and ICV, if encrypt. So initialize dst_sg
2690*4882a593Smuzhiyun 		 * to point beyond assoc len offset.
2691*4882a593Smuzhiyun 		 */
2692*4882a593Smuzhiyun 		if (spu_sg_at_offset(req->dst, req->assoclen, &rctx->dst_sg,
2693*4882a593Smuzhiyun 				     &rctx->dst_skip) < 0) {
2694*4882a593Smuzhiyun 			pr_err("%s() Error: Unable to find start of dst data\n",
2695*4882a593Smuzhiyun 			       __func__);
2696*4882a593Smuzhiyun 			return -EINVAL;
2697*4882a593Smuzhiyun 		}
2698*4882a593Smuzhiyun 	}
2699*4882a593Smuzhiyun 
2700*4882a593Smuzhiyun 	if (ctx->cipher.mode == CIPHER_MODE_CBC ||
2701*4882a593Smuzhiyun 	    ctx->cipher.mode == CIPHER_MODE_CTR ||
2702*4882a593Smuzhiyun 	    ctx->cipher.mode == CIPHER_MODE_OFB ||
2703*4882a593Smuzhiyun 	    ctx->cipher.mode == CIPHER_MODE_XTS ||
2704*4882a593Smuzhiyun 	    ctx->cipher.mode == CIPHER_MODE_GCM) {
2705*4882a593Smuzhiyun 		rctx->iv_ctr_len =
2706*4882a593Smuzhiyun 			ctx->salt_len +
2707*4882a593Smuzhiyun 			crypto_aead_ivsize(crypto_aead_reqtfm(req));
2708*4882a593Smuzhiyun 	} else if (ctx->cipher.mode == CIPHER_MODE_CCM) {
2709*4882a593Smuzhiyun 		rctx->iv_ctr_len = CCM_AES_IV_SIZE;
2710*4882a593Smuzhiyun 	} else {
2711*4882a593Smuzhiyun 		rctx->iv_ctr_len = 0;
2712*4882a593Smuzhiyun 	}
2713*4882a593Smuzhiyun 
2714*4882a593Smuzhiyun 	rctx->hash_carry_len = 0;
2715*4882a593Smuzhiyun 
2716*4882a593Smuzhiyun 	flow_log("  src sg: %p\n", req->src);
2717*4882a593Smuzhiyun 	flow_log("  rctx->src_sg: %p, src_skip %u\n",
2718*4882a593Smuzhiyun 		 rctx->src_sg, rctx->src_skip);
2719*4882a593Smuzhiyun 	flow_log("  assoc:  %p, assoclen %u\n", rctx->assoc, req->assoclen);
2720*4882a593Smuzhiyun 	flow_log("  dst sg: %p\n", req->dst);
2721*4882a593Smuzhiyun 	flow_log("  rctx->dst_sg: %p, dst_skip %u\n",
2722*4882a593Smuzhiyun 		 rctx->dst_sg, rctx->dst_skip);
2723*4882a593Smuzhiyun 	flow_log("  iv_ctr_len:%u\n", rctx->iv_ctr_len);
2724*4882a593Smuzhiyun 	flow_dump("  iv: ", req->iv, rctx->iv_ctr_len);
2725*4882a593Smuzhiyun 	flow_log("  authkeylen:%u\n", ctx->authkeylen);
2726*4882a593Smuzhiyun 	flow_log("  is_esp: %s\n", ctx->is_esp ? "yes" : "no");
2727*4882a593Smuzhiyun 
2728*4882a593Smuzhiyun 	if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
2729*4882a593Smuzhiyun 		flow_log("  max_payload infinite");
2730*4882a593Smuzhiyun 	else
2731*4882a593Smuzhiyun 		flow_log("  max_payload: %u\n", ctx->max_payload);
2732*4882a593Smuzhiyun 
2733*4882a593Smuzhiyun 	if (unlikely(aead_need_fallback(req)))
2734*4882a593Smuzhiyun 		return aead_do_fallback(req, is_encrypt);
2735*4882a593Smuzhiyun 
2736*4882a593Smuzhiyun 	/*
2737*4882a593Smuzhiyun 	 * Do memory allocations for request after fallback check, because if we
2738*4882a593Smuzhiyun 	 * do fallback, we won't call finish_req() to dealloc.
2739*4882a593Smuzhiyun 	 */
2740*4882a593Smuzhiyun 	if (rctx->iv_ctr_len) {
2741*4882a593Smuzhiyun 		if (ctx->salt_len)
2742*4882a593Smuzhiyun 			memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset,
2743*4882a593Smuzhiyun 			       ctx->salt, ctx->salt_len);
2744*4882a593Smuzhiyun 		memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset + ctx->salt_len,
2745*4882a593Smuzhiyun 		       req->iv,
2746*4882a593Smuzhiyun 		       rctx->iv_ctr_len - ctx->salt_len - ctx->salt_offset);
2747*4882a593Smuzhiyun 	}
2748*4882a593Smuzhiyun 
2749*4882a593Smuzhiyun 	rctx->chan_idx = select_channel();
2750*4882a593Smuzhiyun 	err = handle_aead_req(rctx);
2751*4882a593Smuzhiyun 	if (err != -EINPROGRESS)
2752*4882a593Smuzhiyun 		/* synchronous result */
2753*4882a593Smuzhiyun 		spu_chunk_cleanup(rctx);
2754*4882a593Smuzhiyun 
2755*4882a593Smuzhiyun 	return err;
2756*4882a593Smuzhiyun }
2757*4882a593Smuzhiyun 
aead_authenc_setkey(struct crypto_aead * cipher,const u8 * key,unsigned int keylen)2758*4882a593Smuzhiyun static int aead_authenc_setkey(struct crypto_aead *cipher,
2759*4882a593Smuzhiyun 			       const u8 *key, unsigned int keylen)
2760*4882a593Smuzhiyun {
2761*4882a593Smuzhiyun 	struct spu_hw *spu = &iproc_priv.spu;
2762*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2763*4882a593Smuzhiyun 	struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
2764*4882a593Smuzhiyun 	struct crypto_authenc_keys keys;
2765*4882a593Smuzhiyun 	int ret;
2766*4882a593Smuzhiyun 
2767*4882a593Smuzhiyun 	flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key,
2768*4882a593Smuzhiyun 		 keylen);
2769*4882a593Smuzhiyun 	flow_dump("  key: ", key, keylen);
2770*4882a593Smuzhiyun 
2771*4882a593Smuzhiyun 	ret = crypto_authenc_extractkeys(&keys, key, keylen);
2772*4882a593Smuzhiyun 	if (ret)
2773*4882a593Smuzhiyun 		goto badkey;
2774*4882a593Smuzhiyun 
2775*4882a593Smuzhiyun 	if (keys.enckeylen > MAX_KEY_SIZE ||
2776*4882a593Smuzhiyun 	    keys.authkeylen > MAX_KEY_SIZE)
2777*4882a593Smuzhiyun 		goto badkey;
2778*4882a593Smuzhiyun 
2779*4882a593Smuzhiyun 	ctx->enckeylen = keys.enckeylen;
2780*4882a593Smuzhiyun 	ctx->authkeylen = keys.authkeylen;
2781*4882a593Smuzhiyun 
2782*4882a593Smuzhiyun 	memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
2783*4882a593Smuzhiyun 	/* May end up padding auth key. So make sure it's zeroed. */
2784*4882a593Smuzhiyun 	memset(ctx->authkey, 0, sizeof(ctx->authkey));
2785*4882a593Smuzhiyun 	memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
2786*4882a593Smuzhiyun 
2787*4882a593Smuzhiyun 	switch (ctx->alg->cipher_info.alg) {
2788*4882a593Smuzhiyun 	case CIPHER_ALG_DES:
2789*4882a593Smuzhiyun 		if (verify_aead_des_key(cipher, keys.enckey, keys.enckeylen))
2790*4882a593Smuzhiyun 			return -EINVAL;
2791*4882a593Smuzhiyun 
2792*4882a593Smuzhiyun 		ctx->cipher_type = CIPHER_TYPE_DES;
2793*4882a593Smuzhiyun 		break;
2794*4882a593Smuzhiyun 	case CIPHER_ALG_3DES:
2795*4882a593Smuzhiyun 		if (verify_aead_des3_key(cipher, keys.enckey, keys.enckeylen))
2796*4882a593Smuzhiyun 			return -EINVAL;
2797*4882a593Smuzhiyun 
2798*4882a593Smuzhiyun 		ctx->cipher_type = CIPHER_TYPE_3DES;
2799*4882a593Smuzhiyun 		break;
2800*4882a593Smuzhiyun 	case CIPHER_ALG_AES:
2801*4882a593Smuzhiyun 		switch (ctx->enckeylen) {
2802*4882a593Smuzhiyun 		case AES_KEYSIZE_128:
2803*4882a593Smuzhiyun 			ctx->cipher_type = CIPHER_TYPE_AES128;
2804*4882a593Smuzhiyun 			break;
2805*4882a593Smuzhiyun 		case AES_KEYSIZE_192:
2806*4882a593Smuzhiyun 			ctx->cipher_type = CIPHER_TYPE_AES192;
2807*4882a593Smuzhiyun 			break;
2808*4882a593Smuzhiyun 		case AES_KEYSIZE_256:
2809*4882a593Smuzhiyun 			ctx->cipher_type = CIPHER_TYPE_AES256;
2810*4882a593Smuzhiyun 			break;
2811*4882a593Smuzhiyun 		default:
2812*4882a593Smuzhiyun 			goto badkey;
2813*4882a593Smuzhiyun 		}
2814*4882a593Smuzhiyun 		break;
2815*4882a593Smuzhiyun 	default:
2816*4882a593Smuzhiyun 		pr_err("%s() Error: Unknown cipher alg\n", __func__);
2817*4882a593Smuzhiyun 		return -EINVAL;
2818*4882a593Smuzhiyun 	}
2819*4882a593Smuzhiyun 
2820*4882a593Smuzhiyun 	flow_log("  enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
2821*4882a593Smuzhiyun 		 ctx->authkeylen);
2822*4882a593Smuzhiyun 	flow_dump("  enc: ", ctx->enckey, ctx->enckeylen);
2823*4882a593Smuzhiyun 	flow_dump("  auth: ", ctx->authkey, ctx->authkeylen);
2824*4882a593Smuzhiyun 
2825*4882a593Smuzhiyun 	/* setkey the fallback just in case we needto use it */
2826*4882a593Smuzhiyun 	if (ctx->fallback_cipher) {
2827*4882a593Smuzhiyun 		flow_log("  running fallback setkey()\n");
2828*4882a593Smuzhiyun 
2829*4882a593Smuzhiyun 		ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
2830*4882a593Smuzhiyun 		ctx->fallback_cipher->base.crt_flags |=
2831*4882a593Smuzhiyun 		    tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
2832*4882a593Smuzhiyun 		ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen);
2833*4882a593Smuzhiyun 		if (ret)
2834*4882a593Smuzhiyun 			flow_log("  fallback setkey() returned:%d\n", ret);
2835*4882a593Smuzhiyun 	}
2836*4882a593Smuzhiyun 
2837*4882a593Smuzhiyun 	ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
2838*4882a593Smuzhiyun 							  ctx->enckeylen,
2839*4882a593Smuzhiyun 							  false);
2840*4882a593Smuzhiyun 
2841*4882a593Smuzhiyun 	atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]);
2842*4882a593Smuzhiyun 
2843*4882a593Smuzhiyun 	return ret;
2844*4882a593Smuzhiyun 
2845*4882a593Smuzhiyun badkey:
2846*4882a593Smuzhiyun 	ctx->enckeylen = 0;
2847*4882a593Smuzhiyun 	ctx->authkeylen = 0;
2848*4882a593Smuzhiyun 	ctx->digestsize = 0;
2849*4882a593Smuzhiyun 
2850*4882a593Smuzhiyun 	return -EINVAL;
2851*4882a593Smuzhiyun }
2852*4882a593Smuzhiyun 
aead_gcm_ccm_setkey(struct crypto_aead * cipher,const u8 * key,unsigned int keylen)2853*4882a593Smuzhiyun static int aead_gcm_ccm_setkey(struct crypto_aead *cipher,
2854*4882a593Smuzhiyun 			       const u8 *key, unsigned int keylen)
2855*4882a593Smuzhiyun {
2856*4882a593Smuzhiyun 	struct spu_hw *spu = &iproc_priv.spu;
2857*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2858*4882a593Smuzhiyun 	struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
2859*4882a593Smuzhiyun 
2860*4882a593Smuzhiyun 	int ret = 0;
2861*4882a593Smuzhiyun 
2862*4882a593Smuzhiyun 	flow_log("%s() keylen:%u\n", __func__, keylen);
2863*4882a593Smuzhiyun 	flow_dump("  key: ", key, keylen);
2864*4882a593Smuzhiyun 
2865*4882a593Smuzhiyun 	if (!ctx->is_esp)
2866*4882a593Smuzhiyun 		ctx->digestsize = keylen;
2867*4882a593Smuzhiyun 
2868*4882a593Smuzhiyun 	ctx->enckeylen = keylen;
2869*4882a593Smuzhiyun 	ctx->authkeylen = 0;
2870*4882a593Smuzhiyun 
2871*4882a593Smuzhiyun 	switch (ctx->enckeylen) {
2872*4882a593Smuzhiyun 	case AES_KEYSIZE_128:
2873*4882a593Smuzhiyun 		ctx->cipher_type = CIPHER_TYPE_AES128;
2874*4882a593Smuzhiyun 		break;
2875*4882a593Smuzhiyun 	case AES_KEYSIZE_192:
2876*4882a593Smuzhiyun 		ctx->cipher_type = CIPHER_TYPE_AES192;
2877*4882a593Smuzhiyun 		break;
2878*4882a593Smuzhiyun 	case AES_KEYSIZE_256:
2879*4882a593Smuzhiyun 		ctx->cipher_type = CIPHER_TYPE_AES256;
2880*4882a593Smuzhiyun 		break;
2881*4882a593Smuzhiyun 	default:
2882*4882a593Smuzhiyun 		goto badkey;
2883*4882a593Smuzhiyun 	}
2884*4882a593Smuzhiyun 
2885*4882a593Smuzhiyun 	memcpy(ctx->enckey, key, ctx->enckeylen);
2886*4882a593Smuzhiyun 
2887*4882a593Smuzhiyun 	flow_log("  enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
2888*4882a593Smuzhiyun 		 ctx->authkeylen);
2889*4882a593Smuzhiyun 	flow_dump("  enc: ", ctx->enckey, ctx->enckeylen);
2890*4882a593Smuzhiyun 	flow_dump("  auth: ", ctx->authkey, ctx->authkeylen);
2891*4882a593Smuzhiyun 
2892*4882a593Smuzhiyun 	/* setkey the fallback just in case we need to use it */
2893*4882a593Smuzhiyun 	if (ctx->fallback_cipher) {
2894*4882a593Smuzhiyun 		flow_log("  running fallback setkey()\n");
2895*4882a593Smuzhiyun 
2896*4882a593Smuzhiyun 		ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
2897*4882a593Smuzhiyun 		ctx->fallback_cipher->base.crt_flags |=
2898*4882a593Smuzhiyun 		    tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
2899*4882a593Smuzhiyun 		ret = crypto_aead_setkey(ctx->fallback_cipher, key,
2900*4882a593Smuzhiyun 					 keylen + ctx->salt_len);
2901*4882a593Smuzhiyun 		if (ret)
2902*4882a593Smuzhiyun 			flow_log("  fallback setkey() returned:%d\n", ret);
2903*4882a593Smuzhiyun 	}
2904*4882a593Smuzhiyun 
2905*4882a593Smuzhiyun 	ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
2906*4882a593Smuzhiyun 							  ctx->enckeylen,
2907*4882a593Smuzhiyun 							  false);
2908*4882a593Smuzhiyun 
2909*4882a593Smuzhiyun 	atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]);
2910*4882a593Smuzhiyun 
2911*4882a593Smuzhiyun 	flow_log("  enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
2912*4882a593Smuzhiyun 		 ctx->authkeylen);
2913*4882a593Smuzhiyun 
2914*4882a593Smuzhiyun 	return ret;
2915*4882a593Smuzhiyun 
2916*4882a593Smuzhiyun badkey:
2917*4882a593Smuzhiyun 	ctx->enckeylen = 0;
2918*4882a593Smuzhiyun 	ctx->authkeylen = 0;
2919*4882a593Smuzhiyun 	ctx->digestsize = 0;
2920*4882a593Smuzhiyun 
2921*4882a593Smuzhiyun 	return -EINVAL;
2922*4882a593Smuzhiyun }
2923*4882a593Smuzhiyun 
2924*4882a593Smuzhiyun /**
2925*4882a593Smuzhiyun  * aead_gcm_esp_setkey() - setkey() operation for ESP variant of GCM AES.
2926*4882a593Smuzhiyun  * @cipher: AEAD structure
2927*4882a593Smuzhiyun  * @key:    Key followed by 4 bytes of salt
2928*4882a593Smuzhiyun  * @keylen: Length of key plus salt, in bytes
2929*4882a593Smuzhiyun  *
2930*4882a593Smuzhiyun  * Extracts salt from key and stores it to be prepended to IV on each request.
2931*4882a593Smuzhiyun  * Digest is always 16 bytes
2932*4882a593Smuzhiyun  *
2933*4882a593Smuzhiyun  * Return: Value from generic gcm setkey.
2934*4882a593Smuzhiyun  */
aead_gcm_esp_setkey(struct crypto_aead * cipher,const u8 * key,unsigned int keylen)2935*4882a593Smuzhiyun static int aead_gcm_esp_setkey(struct crypto_aead *cipher,
2936*4882a593Smuzhiyun 			       const u8 *key, unsigned int keylen)
2937*4882a593Smuzhiyun {
2938*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2939*4882a593Smuzhiyun 
2940*4882a593Smuzhiyun 	flow_log("%s\n", __func__);
2941*4882a593Smuzhiyun 
2942*4882a593Smuzhiyun 	if (keylen < GCM_ESP_SALT_SIZE)
2943*4882a593Smuzhiyun 		return -EINVAL;
2944*4882a593Smuzhiyun 
2945*4882a593Smuzhiyun 	ctx->salt_len = GCM_ESP_SALT_SIZE;
2946*4882a593Smuzhiyun 	ctx->salt_offset = GCM_ESP_SALT_OFFSET;
2947*4882a593Smuzhiyun 	memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
2948*4882a593Smuzhiyun 	keylen -= GCM_ESP_SALT_SIZE;
2949*4882a593Smuzhiyun 	ctx->digestsize = GCM_ESP_DIGESTSIZE;
2950*4882a593Smuzhiyun 	ctx->is_esp = true;
2951*4882a593Smuzhiyun 	flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE);
2952*4882a593Smuzhiyun 
2953*4882a593Smuzhiyun 	return aead_gcm_ccm_setkey(cipher, key, keylen);
2954*4882a593Smuzhiyun }
2955*4882a593Smuzhiyun 
2956*4882a593Smuzhiyun /**
2957*4882a593Smuzhiyun  * rfc4543_gcm_esp_setkey() - setkey operation for RFC4543 variant of GCM/GMAC.
2958*4882a593Smuzhiyun  * cipher: AEAD structure
2959*4882a593Smuzhiyun  * key:    Key followed by 4 bytes of salt
2960*4882a593Smuzhiyun  * keylen: Length of key plus salt, in bytes
2961*4882a593Smuzhiyun  *
2962*4882a593Smuzhiyun  * Extracts salt from key and stores it to be prepended to IV on each request.
2963*4882a593Smuzhiyun  * Digest is always 16 bytes
2964*4882a593Smuzhiyun  *
2965*4882a593Smuzhiyun  * Return: Value from generic gcm setkey.
2966*4882a593Smuzhiyun  */
rfc4543_gcm_esp_setkey(struct crypto_aead * cipher,const u8 * key,unsigned int keylen)2967*4882a593Smuzhiyun static int rfc4543_gcm_esp_setkey(struct crypto_aead *cipher,
2968*4882a593Smuzhiyun 				  const u8 *key, unsigned int keylen)
2969*4882a593Smuzhiyun {
2970*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2971*4882a593Smuzhiyun 
2972*4882a593Smuzhiyun 	flow_log("%s\n", __func__);
2973*4882a593Smuzhiyun 
2974*4882a593Smuzhiyun 	if (keylen < GCM_ESP_SALT_SIZE)
2975*4882a593Smuzhiyun 		return -EINVAL;
2976*4882a593Smuzhiyun 
2977*4882a593Smuzhiyun 	ctx->salt_len = GCM_ESP_SALT_SIZE;
2978*4882a593Smuzhiyun 	ctx->salt_offset = GCM_ESP_SALT_OFFSET;
2979*4882a593Smuzhiyun 	memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
2980*4882a593Smuzhiyun 	keylen -= GCM_ESP_SALT_SIZE;
2981*4882a593Smuzhiyun 	ctx->digestsize = GCM_ESP_DIGESTSIZE;
2982*4882a593Smuzhiyun 	ctx->is_esp = true;
2983*4882a593Smuzhiyun 	ctx->is_rfc4543 = true;
2984*4882a593Smuzhiyun 	flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE);
2985*4882a593Smuzhiyun 
2986*4882a593Smuzhiyun 	return aead_gcm_ccm_setkey(cipher, key, keylen);
2987*4882a593Smuzhiyun }
2988*4882a593Smuzhiyun 
2989*4882a593Smuzhiyun /**
2990*4882a593Smuzhiyun  * aead_ccm_esp_setkey() - setkey() operation for ESP variant of CCM AES.
2991*4882a593Smuzhiyun  * @cipher: AEAD structure
2992*4882a593Smuzhiyun  * @key:    Key followed by 4 bytes of salt
2993*4882a593Smuzhiyun  * @keylen: Length of key plus salt, in bytes
2994*4882a593Smuzhiyun  *
2995*4882a593Smuzhiyun  * Extracts salt from key and stores it to be prepended to IV on each request.
2996*4882a593Smuzhiyun  * Digest is always 16 bytes
2997*4882a593Smuzhiyun  *
2998*4882a593Smuzhiyun  * Return: Value from generic ccm setkey.
2999*4882a593Smuzhiyun  */
aead_ccm_esp_setkey(struct crypto_aead * cipher,const u8 * key,unsigned int keylen)3000*4882a593Smuzhiyun static int aead_ccm_esp_setkey(struct crypto_aead *cipher,
3001*4882a593Smuzhiyun 			       const u8 *key, unsigned int keylen)
3002*4882a593Smuzhiyun {
3003*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3004*4882a593Smuzhiyun 
3005*4882a593Smuzhiyun 	flow_log("%s\n", __func__);
3006*4882a593Smuzhiyun 
3007*4882a593Smuzhiyun 	if (keylen < CCM_ESP_SALT_SIZE)
3008*4882a593Smuzhiyun 		return -EINVAL;
3009*4882a593Smuzhiyun 
3010*4882a593Smuzhiyun 	ctx->salt_len = CCM_ESP_SALT_SIZE;
3011*4882a593Smuzhiyun 	ctx->salt_offset = CCM_ESP_SALT_OFFSET;
3012*4882a593Smuzhiyun 	memcpy(ctx->salt, key + keylen - CCM_ESP_SALT_SIZE, CCM_ESP_SALT_SIZE);
3013*4882a593Smuzhiyun 	keylen -= CCM_ESP_SALT_SIZE;
3014*4882a593Smuzhiyun 	ctx->is_esp = true;
3015*4882a593Smuzhiyun 	flow_dump("salt: ", ctx->salt, CCM_ESP_SALT_SIZE);
3016*4882a593Smuzhiyun 
3017*4882a593Smuzhiyun 	return aead_gcm_ccm_setkey(cipher, key, keylen);
3018*4882a593Smuzhiyun }
3019*4882a593Smuzhiyun 
aead_setauthsize(struct crypto_aead * cipher,unsigned int authsize)3020*4882a593Smuzhiyun static int aead_setauthsize(struct crypto_aead *cipher, unsigned int authsize)
3021*4882a593Smuzhiyun {
3022*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3023*4882a593Smuzhiyun 	int ret = 0;
3024*4882a593Smuzhiyun 
3025*4882a593Smuzhiyun 	flow_log("%s() authkeylen:%u authsize:%u\n",
3026*4882a593Smuzhiyun 		 __func__, ctx->authkeylen, authsize);
3027*4882a593Smuzhiyun 
3028*4882a593Smuzhiyun 	ctx->digestsize = authsize;
3029*4882a593Smuzhiyun 
3030*4882a593Smuzhiyun 	/* setkey the fallback just in case we needto use it */
3031*4882a593Smuzhiyun 	if (ctx->fallback_cipher) {
3032*4882a593Smuzhiyun 		flow_log("  running fallback setauth()\n");
3033*4882a593Smuzhiyun 
3034*4882a593Smuzhiyun 		ret = crypto_aead_setauthsize(ctx->fallback_cipher, authsize);
3035*4882a593Smuzhiyun 		if (ret)
3036*4882a593Smuzhiyun 			flow_log("  fallback setauth() returned:%d\n", ret);
3037*4882a593Smuzhiyun 	}
3038*4882a593Smuzhiyun 
3039*4882a593Smuzhiyun 	return ret;
3040*4882a593Smuzhiyun }
3041*4882a593Smuzhiyun 
aead_encrypt(struct aead_request * req)3042*4882a593Smuzhiyun static int aead_encrypt(struct aead_request *req)
3043*4882a593Smuzhiyun {
3044*4882a593Smuzhiyun 	flow_log("%s() cryptlen:%u %08x\n", __func__, req->cryptlen,
3045*4882a593Smuzhiyun 		 req->cryptlen);
3046*4882a593Smuzhiyun 	dump_sg(req->src, 0, req->cryptlen + req->assoclen);
3047*4882a593Smuzhiyun 	flow_log("  assoc_len:%u\n", req->assoclen);
3048*4882a593Smuzhiyun 
3049*4882a593Smuzhiyun 	return aead_enqueue(req, true);
3050*4882a593Smuzhiyun }
3051*4882a593Smuzhiyun 
aead_decrypt(struct aead_request * req)3052*4882a593Smuzhiyun static int aead_decrypt(struct aead_request *req)
3053*4882a593Smuzhiyun {
3054*4882a593Smuzhiyun 	flow_log("%s() cryptlen:%u\n", __func__, req->cryptlen);
3055*4882a593Smuzhiyun 	dump_sg(req->src, 0, req->cryptlen + req->assoclen);
3056*4882a593Smuzhiyun 	flow_log("  assoc_len:%u\n", req->assoclen);
3057*4882a593Smuzhiyun 
3058*4882a593Smuzhiyun 	return aead_enqueue(req, false);
3059*4882a593Smuzhiyun }
3060*4882a593Smuzhiyun 
3061*4882a593Smuzhiyun /* ==================== Supported Cipher Algorithms ==================== */
3062*4882a593Smuzhiyun 
3063*4882a593Smuzhiyun static struct iproc_alg_s driver_algs[] = {
3064*4882a593Smuzhiyun 	{
3065*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_AEAD,
3066*4882a593Smuzhiyun 	 .alg.aead = {
3067*4882a593Smuzhiyun 		 .base = {
3068*4882a593Smuzhiyun 			.cra_name = "gcm(aes)",
3069*4882a593Smuzhiyun 			.cra_driver_name = "gcm-aes-iproc",
3070*4882a593Smuzhiyun 			.cra_blocksize = AES_BLOCK_SIZE,
3071*4882a593Smuzhiyun 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
3072*4882a593Smuzhiyun 		 },
3073*4882a593Smuzhiyun 		 .setkey = aead_gcm_ccm_setkey,
3074*4882a593Smuzhiyun 		 .ivsize = GCM_AES_IV_SIZE,
3075*4882a593Smuzhiyun 		.maxauthsize = AES_BLOCK_SIZE,
3076*4882a593Smuzhiyun 	 },
3077*4882a593Smuzhiyun 	 .cipher_info = {
3078*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_AES,
3079*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_GCM,
3080*4882a593Smuzhiyun 			 },
3081*4882a593Smuzhiyun 	 .auth_info = {
3082*4882a593Smuzhiyun 		       .alg = HASH_ALG_AES,
3083*4882a593Smuzhiyun 		       .mode = HASH_MODE_GCM,
3084*4882a593Smuzhiyun 		       },
3085*4882a593Smuzhiyun 	 .auth_first = 0,
3086*4882a593Smuzhiyun 	 },
3087*4882a593Smuzhiyun 	{
3088*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_AEAD,
3089*4882a593Smuzhiyun 	 .alg.aead = {
3090*4882a593Smuzhiyun 		 .base = {
3091*4882a593Smuzhiyun 			.cra_name = "ccm(aes)",
3092*4882a593Smuzhiyun 			.cra_driver_name = "ccm-aes-iproc",
3093*4882a593Smuzhiyun 			.cra_blocksize = AES_BLOCK_SIZE,
3094*4882a593Smuzhiyun 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
3095*4882a593Smuzhiyun 		 },
3096*4882a593Smuzhiyun 		 .setkey = aead_gcm_ccm_setkey,
3097*4882a593Smuzhiyun 		 .ivsize = CCM_AES_IV_SIZE,
3098*4882a593Smuzhiyun 		.maxauthsize = AES_BLOCK_SIZE,
3099*4882a593Smuzhiyun 	 },
3100*4882a593Smuzhiyun 	 .cipher_info = {
3101*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_AES,
3102*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_CCM,
3103*4882a593Smuzhiyun 			 },
3104*4882a593Smuzhiyun 	 .auth_info = {
3105*4882a593Smuzhiyun 		       .alg = HASH_ALG_AES,
3106*4882a593Smuzhiyun 		       .mode = HASH_MODE_CCM,
3107*4882a593Smuzhiyun 		       },
3108*4882a593Smuzhiyun 	 .auth_first = 0,
3109*4882a593Smuzhiyun 	 },
3110*4882a593Smuzhiyun 	{
3111*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_AEAD,
3112*4882a593Smuzhiyun 	 .alg.aead = {
3113*4882a593Smuzhiyun 		 .base = {
3114*4882a593Smuzhiyun 			.cra_name = "rfc4106(gcm(aes))",
3115*4882a593Smuzhiyun 			.cra_driver_name = "gcm-aes-esp-iproc",
3116*4882a593Smuzhiyun 			.cra_blocksize = AES_BLOCK_SIZE,
3117*4882a593Smuzhiyun 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
3118*4882a593Smuzhiyun 		 },
3119*4882a593Smuzhiyun 		 .setkey = aead_gcm_esp_setkey,
3120*4882a593Smuzhiyun 		 .ivsize = GCM_RFC4106_IV_SIZE,
3121*4882a593Smuzhiyun 		 .maxauthsize = AES_BLOCK_SIZE,
3122*4882a593Smuzhiyun 	 },
3123*4882a593Smuzhiyun 	 .cipher_info = {
3124*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_AES,
3125*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_GCM,
3126*4882a593Smuzhiyun 			 },
3127*4882a593Smuzhiyun 	 .auth_info = {
3128*4882a593Smuzhiyun 		       .alg = HASH_ALG_AES,
3129*4882a593Smuzhiyun 		       .mode = HASH_MODE_GCM,
3130*4882a593Smuzhiyun 		       },
3131*4882a593Smuzhiyun 	 .auth_first = 0,
3132*4882a593Smuzhiyun 	 },
3133*4882a593Smuzhiyun 	{
3134*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_AEAD,
3135*4882a593Smuzhiyun 	 .alg.aead = {
3136*4882a593Smuzhiyun 		 .base = {
3137*4882a593Smuzhiyun 			.cra_name = "rfc4309(ccm(aes))",
3138*4882a593Smuzhiyun 			.cra_driver_name = "ccm-aes-esp-iproc",
3139*4882a593Smuzhiyun 			.cra_blocksize = AES_BLOCK_SIZE,
3140*4882a593Smuzhiyun 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
3141*4882a593Smuzhiyun 		 },
3142*4882a593Smuzhiyun 		 .setkey = aead_ccm_esp_setkey,
3143*4882a593Smuzhiyun 		 .ivsize = CCM_AES_IV_SIZE,
3144*4882a593Smuzhiyun 		 .maxauthsize = AES_BLOCK_SIZE,
3145*4882a593Smuzhiyun 	 },
3146*4882a593Smuzhiyun 	 .cipher_info = {
3147*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_AES,
3148*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_CCM,
3149*4882a593Smuzhiyun 			 },
3150*4882a593Smuzhiyun 	 .auth_info = {
3151*4882a593Smuzhiyun 		       .alg = HASH_ALG_AES,
3152*4882a593Smuzhiyun 		       .mode = HASH_MODE_CCM,
3153*4882a593Smuzhiyun 		       },
3154*4882a593Smuzhiyun 	 .auth_first = 0,
3155*4882a593Smuzhiyun 	 },
3156*4882a593Smuzhiyun 	{
3157*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_AEAD,
3158*4882a593Smuzhiyun 	 .alg.aead = {
3159*4882a593Smuzhiyun 		 .base = {
3160*4882a593Smuzhiyun 			.cra_name = "rfc4543(gcm(aes))",
3161*4882a593Smuzhiyun 			.cra_driver_name = "gmac-aes-esp-iproc",
3162*4882a593Smuzhiyun 			.cra_blocksize = AES_BLOCK_SIZE,
3163*4882a593Smuzhiyun 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK
3164*4882a593Smuzhiyun 		 },
3165*4882a593Smuzhiyun 		 .setkey = rfc4543_gcm_esp_setkey,
3166*4882a593Smuzhiyun 		 .ivsize = GCM_RFC4106_IV_SIZE,
3167*4882a593Smuzhiyun 		 .maxauthsize = AES_BLOCK_SIZE,
3168*4882a593Smuzhiyun 	 },
3169*4882a593Smuzhiyun 	 .cipher_info = {
3170*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_AES,
3171*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_GCM,
3172*4882a593Smuzhiyun 			 },
3173*4882a593Smuzhiyun 	 .auth_info = {
3174*4882a593Smuzhiyun 		       .alg = HASH_ALG_AES,
3175*4882a593Smuzhiyun 		       .mode = HASH_MODE_GCM,
3176*4882a593Smuzhiyun 		       },
3177*4882a593Smuzhiyun 	 .auth_first = 0,
3178*4882a593Smuzhiyun 	 },
3179*4882a593Smuzhiyun 	{
3180*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_AEAD,
3181*4882a593Smuzhiyun 	 .alg.aead = {
3182*4882a593Smuzhiyun 		 .base = {
3183*4882a593Smuzhiyun 			.cra_name = "authenc(hmac(md5),cbc(aes))",
3184*4882a593Smuzhiyun 			.cra_driver_name = "authenc-hmac-md5-cbc-aes-iproc",
3185*4882a593Smuzhiyun 			.cra_blocksize = AES_BLOCK_SIZE,
3186*4882a593Smuzhiyun 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3187*4882a593Smuzhiyun 				     CRYPTO_ALG_ASYNC |
3188*4882a593Smuzhiyun 				     CRYPTO_ALG_ALLOCATES_MEMORY
3189*4882a593Smuzhiyun 		 },
3190*4882a593Smuzhiyun 		 .setkey = aead_authenc_setkey,
3191*4882a593Smuzhiyun 		.ivsize = AES_BLOCK_SIZE,
3192*4882a593Smuzhiyun 		.maxauthsize = MD5_DIGEST_SIZE,
3193*4882a593Smuzhiyun 	 },
3194*4882a593Smuzhiyun 	 .cipher_info = {
3195*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_AES,
3196*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_CBC,
3197*4882a593Smuzhiyun 			 },
3198*4882a593Smuzhiyun 	 .auth_info = {
3199*4882a593Smuzhiyun 		       .alg = HASH_ALG_MD5,
3200*4882a593Smuzhiyun 		       .mode = HASH_MODE_HMAC,
3201*4882a593Smuzhiyun 		       },
3202*4882a593Smuzhiyun 	 .auth_first = 0,
3203*4882a593Smuzhiyun 	 },
3204*4882a593Smuzhiyun 	{
3205*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_AEAD,
3206*4882a593Smuzhiyun 	 .alg.aead = {
3207*4882a593Smuzhiyun 		 .base = {
3208*4882a593Smuzhiyun 			.cra_name = "authenc(hmac(sha1),cbc(aes))",
3209*4882a593Smuzhiyun 			.cra_driver_name = "authenc-hmac-sha1-cbc-aes-iproc",
3210*4882a593Smuzhiyun 			.cra_blocksize = AES_BLOCK_SIZE,
3211*4882a593Smuzhiyun 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3212*4882a593Smuzhiyun 				     CRYPTO_ALG_ASYNC |
3213*4882a593Smuzhiyun 				     CRYPTO_ALG_ALLOCATES_MEMORY
3214*4882a593Smuzhiyun 		 },
3215*4882a593Smuzhiyun 		 .setkey = aead_authenc_setkey,
3216*4882a593Smuzhiyun 		 .ivsize = AES_BLOCK_SIZE,
3217*4882a593Smuzhiyun 		 .maxauthsize = SHA1_DIGEST_SIZE,
3218*4882a593Smuzhiyun 	 },
3219*4882a593Smuzhiyun 	 .cipher_info = {
3220*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_AES,
3221*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_CBC,
3222*4882a593Smuzhiyun 			 },
3223*4882a593Smuzhiyun 	 .auth_info = {
3224*4882a593Smuzhiyun 		       .alg = HASH_ALG_SHA1,
3225*4882a593Smuzhiyun 		       .mode = HASH_MODE_HMAC,
3226*4882a593Smuzhiyun 		       },
3227*4882a593Smuzhiyun 	 .auth_first = 0,
3228*4882a593Smuzhiyun 	 },
3229*4882a593Smuzhiyun 	{
3230*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_AEAD,
3231*4882a593Smuzhiyun 	 .alg.aead = {
3232*4882a593Smuzhiyun 		 .base = {
3233*4882a593Smuzhiyun 			.cra_name = "authenc(hmac(sha256),cbc(aes))",
3234*4882a593Smuzhiyun 			.cra_driver_name = "authenc-hmac-sha256-cbc-aes-iproc",
3235*4882a593Smuzhiyun 			.cra_blocksize = AES_BLOCK_SIZE,
3236*4882a593Smuzhiyun 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3237*4882a593Smuzhiyun 				     CRYPTO_ALG_ASYNC |
3238*4882a593Smuzhiyun 				     CRYPTO_ALG_ALLOCATES_MEMORY
3239*4882a593Smuzhiyun 		 },
3240*4882a593Smuzhiyun 		 .setkey = aead_authenc_setkey,
3241*4882a593Smuzhiyun 		 .ivsize = AES_BLOCK_SIZE,
3242*4882a593Smuzhiyun 		 .maxauthsize = SHA256_DIGEST_SIZE,
3243*4882a593Smuzhiyun 	 },
3244*4882a593Smuzhiyun 	 .cipher_info = {
3245*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_AES,
3246*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_CBC,
3247*4882a593Smuzhiyun 			 },
3248*4882a593Smuzhiyun 	 .auth_info = {
3249*4882a593Smuzhiyun 		       .alg = HASH_ALG_SHA256,
3250*4882a593Smuzhiyun 		       .mode = HASH_MODE_HMAC,
3251*4882a593Smuzhiyun 		       },
3252*4882a593Smuzhiyun 	 .auth_first = 0,
3253*4882a593Smuzhiyun 	 },
3254*4882a593Smuzhiyun 	{
3255*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_AEAD,
3256*4882a593Smuzhiyun 	 .alg.aead = {
3257*4882a593Smuzhiyun 		 .base = {
3258*4882a593Smuzhiyun 			.cra_name = "authenc(hmac(md5),cbc(des))",
3259*4882a593Smuzhiyun 			.cra_driver_name = "authenc-hmac-md5-cbc-des-iproc",
3260*4882a593Smuzhiyun 			.cra_blocksize = DES_BLOCK_SIZE,
3261*4882a593Smuzhiyun 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3262*4882a593Smuzhiyun 				     CRYPTO_ALG_ASYNC |
3263*4882a593Smuzhiyun 				     CRYPTO_ALG_ALLOCATES_MEMORY
3264*4882a593Smuzhiyun 		 },
3265*4882a593Smuzhiyun 		 .setkey = aead_authenc_setkey,
3266*4882a593Smuzhiyun 		 .ivsize = DES_BLOCK_SIZE,
3267*4882a593Smuzhiyun 		 .maxauthsize = MD5_DIGEST_SIZE,
3268*4882a593Smuzhiyun 	 },
3269*4882a593Smuzhiyun 	 .cipher_info = {
3270*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_DES,
3271*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_CBC,
3272*4882a593Smuzhiyun 			 },
3273*4882a593Smuzhiyun 	 .auth_info = {
3274*4882a593Smuzhiyun 		       .alg = HASH_ALG_MD5,
3275*4882a593Smuzhiyun 		       .mode = HASH_MODE_HMAC,
3276*4882a593Smuzhiyun 		       },
3277*4882a593Smuzhiyun 	 .auth_first = 0,
3278*4882a593Smuzhiyun 	 },
3279*4882a593Smuzhiyun 	{
3280*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_AEAD,
3281*4882a593Smuzhiyun 	 .alg.aead = {
3282*4882a593Smuzhiyun 		 .base = {
3283*4882a593Smuzhiyun 			.cra_name = "authenc(hmac(sha1),cbc(des))",
3284*4882a593Smuzhiyun 			.cra_driver_name = "authenc-hmac-sha1-cbc-des-iproc",
3285*4882a593Smuzhiyun 			.cra_blocksize = DES_BLOCK_SIZE,
3286*4882a593Smuzhiyun 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3287*4882a593Smuzhiyun 				     CRYPTO_ALG_ASYNC |
3288*4882a593Smuzhiyun 				     CRYPTO_ALG_ALLOCATES_MEMORY
3289*4882a593Smuzhiyun 		 },
3290*4882a593Smuzhiyun 		 .setkey = aead_authenc_setkey,
3291*4882a593Smuzhiyun 		 .ivsize = DES_BLOCK_SIZE,
3292*4882a593Smuzhiyun 		 .maxauthsize = SHA1_DIGEST_SIZE,
3293*4882a593Smuzhiyun 	 },
3294*4882a593Smuzhiyun 	 .cipher_info = {
3295*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_DES,
3296*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_CBC,
3297*4882a593Smuzhiyun 			 },
3298*4882a593Smuzhiyun 	 .auth_info = {
3299*4882a593Smuzhiyun 		       .alg = HASH_ALG_SHA1,
3300*4882a593Smuzhiyun 		       .mode = HASH_MODE_HMAC,
3301*4882a593Smuzhiyun 		       },
3302*4882a593Smuzhiyun 	 .auth_first = 0,
3303*4882a593Smuzhiyun 	 },
3304*4882a593Smuzhiyun 	{
3305*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_AEAD,
3306*4882a593Smuzhiyun 	 .alg.aead = {
3307*4882a593Smuzhiyun 		 .base = {
3308*4882a593Smuzhiyun 			.cra_name = "authenc(hmac(sha224),cbc(des))",
3309*4882a593Smuzhiyun 			.cra_driver_name = "authenc-hmac-sha224-cbc-des-iproc",
3310*4882a593Smuzhiyun 			.cra_blocksize = DES_BLOCK_SIZE,
3311*4882a593Smuzhiyun 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3312*4882a593Smuzhiyun 				     CRYPTO_ALG_ASYNC |
3313*4882a593Smuzhiyun 				     CRYPTO_ALG_ALLOCATES_MEMORY
3314*4882a593Smuzhiyun 		 },
3315*4882a593Smuzhiyun 		 .setkey = aead_authenc_setkey,
3316*4882a593Smuzhiyun 		 .ivsize = DES_BLOCK_SIZE,
3317*4882a593Smuzhiyun 		 .maxauthsize = SHA224_DIGEST_SIZE,
3318*4882a593Smuzhiyun 	 },
3319*4882a593Smuzhiyun 	 .cipher_info = {
3320*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_DES,
3321*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_CBC,
3322*4882a593Smuzhiyun 			 },
3323*4882a593Smuzhiyun 	 .auth_info = {
3324*4882a593Smuzhiyun 		       .alg = HASH_ALG_SHA224,
3325*4882a593Smuzhiyun 		       .mode = HASH_MODE_HMAC,
3326*4882a593Smuzhiyun 		       },
3327*4882a593Smuzhiyun 	 .auth_first = 0,
3328*4882a593Smuzhiyun 	 },
3329*4882a593Smuzhiyun 	{
3330*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_AEAD,
3331*4882a593Smuzhiyun 	 .alg.aead = {
3332*4882a593Smuzhiyun 		 .base = {
3333*4882a593Smuzhiyun 			.cra_name = "authenc(hmac(sha256),cbc(des))",
3334*4882a593Smuzhiyun 			.cra_driver_name = "authenc-hmac-sha256-cbc-des-iproc",
3335*4882a593Smuzhiyun 			.cra_blocksize = DES_BLOCK_SIZE,
3336*4882a593Smuzhiyun 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3337*4882a593Smuzhiyun 				     CRYPTO_ALG_ASYNC |
3338*4882a593Smuzhiyun 				     CRYPTO_ALG_ALLOCATES_MEMORY
3339*4882a593Smuzhiyun 		 },
3340*4882a593Smuzhiyun 		 .setkey = aead_authenc_setkey,
3341*4882a593Smuzhiyun 		 .ivsize = DES_BLOCK_SIZE,
3342*4882a593Smuzhiyun 		 .maxauthsize = SHA256_DIGEST_SIZE,
3343*4882a593Smuzhiyun 	 },
3344*4882a593Smuzhiyun 	 .cipher_info = {
3345*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_DES,
3346*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_CBC,
3347*4882a593Smuzhiyun 			 },
3348*4882a593Smuzhiyun 	 .auth_info = {
3349*4882a593Smuzhiyun 		       .alg = HASH_ALG_SHA256,
3350*4882a593Smuzhiyun 		       .mode = HASH_MODE_HMAC,
3351*4882a593Smuzhiyun 		       },
3352*4882a593Smuzhiyun 	 .auth_first = 0,
3353*4882a593Smuzhiyun 	 },
3354*4882a593Smuzhiyun 	{
3355*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_AEAD,
3356*4882a593Smuzhiyun 	 .alg.aead = {
3357*4882a593Smuzhiyun 		 .base = {
3358*4882a593Smuzhiyun 			.cra_name = "authenc(hmac(sha384),cbc(des))",
3359*4882a593Smuzhiyun 			.cra_driver_name = "authenc-hmac-sha384-cbc-des-iproc",
3360*4882a593Smuzhiyun 			.cra_blocksize = DES_BLOCK_SIZE,
3361*4882a593Smuzhiyun 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3362*4882a593Smuzhiyun 				     CRYPTO_ALG_ASYNC |
3363*4882a593Smuzhiyun 				     CRYPTO_ALG_ALLOCATES_MEMORY
3364*4882a593Smuzhiyun 		 },
3365*4882a593Smuzhiyun 		 .setkey = aead_authenc_setkey,
3366*4882a593Smuzhiyun 		 .ivsize = DES_BLOCK_SIZE,
3367*4882a593Smuzhiyun 		 .maxauthsize = SHA384_DIGEST_SIZE,
3368*4882a593Smuzhiyun 	 },
3369*4882a593Smuzhiyun 	 .cipher_info = {
3370*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_DES,
3371*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_CBC,
3372*4882a593Smuzhiyun 			 },
3373*4882a593Smuzhiyun 	 .auth_info = {
3374*4882a593Smuzhiyun 		       .alg = HASH_ALG_SHA384,
3375*4882a593Smuzhiyun 		       .mode = HASH_MODE_HMAC,
3376*4882a593Smuzhiyun 		       },
3377*4882a593Smuzhiyun 	 .auth_first = 0,
3378*4882a593Smuzhiyun 	 },
3379*4882a593Smuzhiyun 	{
3380*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_AEAD,
3381*4882a593Smuzhiyun 	 .alg.aead = {
3382*4882a593Smuzhiyun 		 .base = {
3383*4882a593Smuzhiyun 			.cra_name = "authenc(hmac(sha512),cbc(des))",
3384*4882a593Smuzhiyun 			.cra_driver_name = "authenc-hmac-sha512-cbc-des-iproc",
3385*4882a593Smuzhiyun 			.cra_blocksize = DES_BLOCK_SIZE,
3386*4882a593Smuzhiyun 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3387*4882a593Smuzhiyun 				     CRYPTO_ALG_ASYNC |
3388*4882a593Smuzhiyun 				     CRYPTO_ALG_ALLOCATES_MEMORY
3389*4882a593Smuzhiyun 		 },
3390*4882a593Smuzhiyun 		 .setkey = aead_authenc_setkey,
3391*4882a593Smuzhiyun 		 .ivsize = DES_BLOCK_SIZE,
3392*4882a593Smuzhiyun 		 .maxauthsize = SHA512_DIGEST_SIZE,
3393*4882a593Smuzhiyun 	 },
3394*4882a593Smuzhiyun 	 .cipher_info = {
3395*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_DES,
3396*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_CBC,
3397*4882a593Smuzhiyun 			 },
3398*4882a593Smuzhiyun 	 .auth_info = {
3399*4882a593Smuzhiyun 		       .alg = HASH_ALG_SHA512,
3400*4882a593Smuzhiyun 		       .mode = HASH_MODE_HMAC,
3401*4882a593Smuzhiyun 		       },
3402*4882a593Smuzhiyun 	 .auth_first = 0,
3403*4882a593Smuzhiyun 	 },
3404*4882a593Smuzhiyun 	{
3405*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_AEAD,
3406*4882a593Smuzhiyun 	 .alg.aead = {
3407*4882a593Smuzhiyun 		 .base = {
3408*4882a593Smuzhiyun 			.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
3409*4882a593Smuzhiyun 			.cra_driver_name = "authenc-hmac-md5-cbc-des3-iproc",
3410*4882a593Smuzhiyun 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3411*4882a593Smuzhiyun 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3412*4882a593Smuzhiyun 				     CRYPTO_ALG_ASYNC |
3413*4882a593Smuzhiyun 				     CRYPTO_ALG_ALLOCATES_MEMORY
3414*4882a593Smuzhiyun 		 },
3415*4882a593Smuzhiyun 		 .setkey = aead_authenc_setkey,
3416*4882a593Smuzhiyun 		 .ivsize = DES3_EDE_BLOCK_SIZE,
3417*4882a593Smuzhiyun 		 .maxauthsize = MD5_DIGEST_SIZE,
3418*4882a593Smuzhiyun 	 },
3419*4882a593Smuzhiyun 	 .cipher_info = {
3420*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_3DES,
3421*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_CBC,
3422*4882a593Smuzhiyun 			 },
3423*4882a593Smuzhiyun 	 .auth_info = {
3424*4882a593Smuzhiyun 		       .alg = HASH_ALG_MD5,
3425*4882a593Smuzhiyun 		       .mode = HASH_MODE_HMAC,
3426*4882a593Smuzhiyun 		       },
3427*4882a593Smuzhiyun 	 .auth_first = 0,
3428*4882a593Smuzhiyun 	 },
3429*4882a593Smuzhiyun 	{
3430*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_AEAD,
3431*4882a593Smuzhiyun 	 .alg.aead = {
3432*4882a593Smuzhiyun 		 .base = {
3433*4882a593Smuzhiyun 			.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
3434*4882a593Smuzhiyun 			.cra_driver_name = "authenc-hmac-sha1-cbc-des3-iproc",
3435*4882a593Smuzhiyun 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3436*4882a593Smuzhiyun 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3437*4882a593Smuzhiyun 				     CRYPTO_ALG_ASYNC |
3438*4882a593Smuzhiyun 				     CRYPTO_ALG_ALLOCATES_MEMORY
3439*4882a593Smuzhiyun 		 },
3440*4882a593Smuzhiyun 		 .setkey = aead_authenc_setkey,
3441*4882a593Smuzhiyun 		 .ivsize = DES3_EDE_BLOCK_SIZE,
3442*4882a593Smuzhiyun 		 .maxauthsize = SHA1_DIGEST_SIZE,
3443*4882a593Smuzhiyun 	 },
3444*4882a593Smuzhiyun 	 .cipher_info = {
3445*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_3DES,
3446*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_CBC,
3447*4882a593Smuzhiyun 			 },
3448*4882a593Smuzhiyun 	 .auth_info = {
3449*4882a593Smuzhiyun 		       .alg = HASH_ALG_SHA1,
3450*4882a593Smuzhiyun 		       .mode = HASH_MODE_HMAC,
3451*4882a593Smuzhiyun 		       },
3452*4882a593Smuzhiyun 	 .auth_first = 0,
3453*4882a593Smuzhiyun 	 },
3454*4882a593Smuzhiyun 	{
3455*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_AEAD,
3456*4882a593Smuzhiyun 	 .alg.aead = {
3457*4882a593Smuzhiyun 		 .base = {
3458*4882a593Smuzhiyun 			.cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
3459*4882a593Smuzhiyun 			.cra_driver_name = "authenc-hmac-sha224-cbc-des3-iproc",
3460*4882a593Smuzhiyun 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3461*4882a593Smuzhiyun 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3462*4882a593Smuzhiyun 				     CRYPTO_ALG_ASYNC |
3463*4882a593Smuzhiyun 				     CRYPTO_ALG_ALLOCATES_MEMORY
3464*4882a593Smuzhiyun 		 },
3465*4882a593Smuzhiyun 		 .setkey = aead_authenc_setkey,
3466*4882a593Smuzhiyun 		 .ivsize = DES3_EDE_BLOCK_SIZE,
3467*4882a593Smuzhiyun 		 .maxauthsize = SHA224_DIGEST_SIZE,
3468*4882a593Smuzhiyun 	 },
3469*4882a593Smuzhiyun 	 .cipher_info = {
3470*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_3DES,
3471*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_CBC,
3472*4882a593Smuzhiyun 			 },
3473*4882a593Smuzhiyun 	 .auth_info = {
3474*4882a593Smuzhiyun 		       .alg = HASH_ALG_SHA224,
3475*4882a593Smuzhiyun 		       .mode = HASH_MODE_HMAC,
3476*4882a593Smuzhiyun 		       },
3477*4882a593Smuzhiyun 	 .auth_first = 0,
3478*4882a593Smuzhiyun 	 },
3479*4882a593Smuzhiyun 	{
3480*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_AEAD,
3481*4882a593Smuzhiyun 	 .alg.aead = {
3482*4882a593Smuzhiyun 		 .base = {
3483*4882a593Smuzhiyun 			.cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
3484*4882a593Smuzhiyun 			.cra_driver_name = "authenc-hmac-sha256-cbc-des3-iproc",
3485*4882a593Smuzhiyun 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3486*4882a593Smuzhiyun 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3487*4882a593Smuzhiyun 				     CRYPTO_ALG_ASYNC |
3488*4882a593Smuzhiyun 				     CRYPTO_ALG_ALLOCATES_MEMORY
3489*4882a593Smuzhiyun 		 },
3490*4882a593Smuzhiyun 		 .setkey = aead_authenc_setkey,
3491*4882a593Smuzhiyun 		 .ivsize = DES3_EDE_BLOCK_SIZE,
3492*4882a593Smuzhiyun 		 .maxauthsize = SHA256_DIGEST_SIZE,
3493*4882a593Smuzhiyun 	 },
3494*4882a593Smuzhiyun 	 .cipher_info = {
3495*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_3DES,
3496*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_CBC,
3497*4882a593Smuzhiyun 			 },
3498*4882a593Smuzhiyun 	 .auth_info = {
3499*4882a593Smuzhiyun 		       .alg = HASH_ALG_SHA256,
3500*4882a593Smuzhiyun 		       .mode = HASH_MODE_HMAC,
3501*4882a593Smuzhiyun 		       },
3502*4882a593Smuzhiyun 	 .auth_first = 0,
3503*4882a593Smuzhiyun 	 },
3504*4882a593Smuzhiyun 	{
3505*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_AEAD,
3506*4882a593Smuzhiyun 	 .alg.aead = {
3507*4882a593Smuzhiyun 		 .base = {
3508*4882a593Smuzhiyun 			.cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
3509*4882a593Smuzhiyun 			.cra_driver_name = "authenc-hmac-sha384-cbc-des3-iproc",
3510*4882a593Smuzhiyun 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3511*4882a593Smuzhiyun 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3512*4882a593Smuzhiyun 				     CRYPTO_ALG_ASYNC |
3513*4882a593Smuzhiyun 				     CRYPTO_ALG_ALLOCATES_MEMORY
3514*4882a593Smuzhiyun 		 },
3515*4882a593Smuzhiyun 		 .setkey = aead_authenc_setkey,
3516*4882a593Smuzhiyun 		 .ivsize = DES3_EDE_BLOCK_SIZE,
3517*4882a593Smuzhiyun 		 .maxauthsize = SHA384_DIGEST_SIZE,
3518*4882a593Smuzhiyun 	 },
3519*4882a593Smuzhiyun 	 .cipher_info = {
3520*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_3DES,
3521*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_CBC,
3522*4882a593Smuzhiyun 			 },
3523*4882a593Smuzhiyun 	 .auth_info = {
3524*4882a593Smuzhiyun 		       .alg = HASH_ALG_SHA384,
3525*4882a593Smuzhiyun 		       .mode = HASH_MODE_HMAC,
3526*4882a593Smuzhiyun 		       },
3527*4882a593Smuzhiyun 	 .auth_first = 0,
3528*4882a593Smuzhiyun 	 },
3529*4882a593Smuzhiyun 	{
3530*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_AEAD,
3531*4882a593Smuzhiyun 	 .alg.aead = {
3532*4882a593Smuzhiyun 		 .base = {
3533*4882a593Smuzhiyun 			.cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
3534*4882a593Smuzhiyun 			.cra_driver_name = "authenc-hmac-sha512-cbc-des3-iproc",
3535*4882a593Smuzhiyun 			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3536*4882a593Smuzhiyun 			.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3537*4882a593Smuzhiyun 				     CRYPTO_ALG_ASYNC |
3538*4882a593Smuzhiyun 				     CRYPTO_ALG_ALLOCATES_MEMORY
3539*4882a593Smuzhiyun 		 },
3540*4882a593Smuzhiyun 		 .setkey = aead_authenc_setkey,
3541*4882a593Smuzhiyun 		 .ivsize = DES3_EDE_BLOCK_SIZE,
3542*4882a593Smuzhiyun 		 .maxauthsize = SHA512_DIGEST_SIZE,
3543*4882a593Smuzhiyun 	 },
3544*4882a593Smuzhiyun 	 .cipher_info = {
3545*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_3DES,
3546*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_CBC,
3547*4882a593Smuzhiyun 			 },
3548*4882a593Smuzhiyun 	 .auth_info = {
3549*4882a593Smuzhiyun 		       .alg = HASH_ALG_SHA512,
3550*4882a593Smuzhiyun 		       .mode = HASH_MODE_HMAC,
3551*4882a593Smuzhiyun 		       },
3552*4882a593Smuzhiyun 	 .auth_first = 0,
3553*4882a593Smuzhiyun 	 },
3554*4882a593Smuzhiyun 
3555*4882a593Smuzhiyun /* SKCIPHER algorithms. */
3556*4882a593Smuzhiyun 	{
3557*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3558*4882a593Smuzhiyun 	 .alg.skcipher = {
3559*4882a593Smuzhiyun 			.base.cra_name = "ofb(des)",
3560*4882a593Smuzhiyun 			.base.cra_driver_name = "ofb-des-iproc",
3561*4882a593Smuzhiyun 			.base.cra_blocksize = DES_BLOCK_SIZE,
3562*4882a593Smuzhiyun 			.min_keysize = DES_KEY_SIZE,
3563*4882a593Smuzhiyun 			.max_keysize = DES_KEY_SIZE,
3564*4882a593Smuzhiyun 			.ivsize = DES_BLOCK_SIZE,
3565*4882a593Smuzhiyun 			},
3566*4882a593Smuzhiyun 	 .cipher_info = {
3567*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_DES,
3568*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_OFB,
3569*4882a593Smuzhiyun 			 },
3570*4882a593Smuzhiyun 	 .auth_info = {
3571*4882a593Smuzhiyun 		       .alg = HASH_ALG_NONE,
3572*4882a593Smuzhiyun 		       .mode = HASH_MODE_NONE,
3573*4882a593Smuzhiyun 		       },
3574*4882a593Smuzhiyun 	 },
3575*4882a593Smuzhiyun 	{
3576*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3577*4882a593Smuzhiyun 	 .alg.skcipher = {
3578*4882a593Smuzhiyun 			.base.cra_name = "cbc(des)",
3579*4882a593Smuzhiyun 			.base.cra_driver_name = "cbc-des-iproc",
3580*4882a593Smuzhiyun 			.base.cra_blocksize = DES_BLOCK_SIZE,
3581*4882a593Smuzhiyun 			.min_keysize = DES_KEY_SIZE,
3582*4882a593Smuzhiyun 			.max_keysize = DES_KEY_SIZE,
3583*4882a593Smuzhiyun 			.ivsize = DES_BLOCK_SIZE,
3584*4882a593Smuzhiyun 			},
3585*4882a593Smuzhiyun 	 .cipher_info = {
3586*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_DES,
3587*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_CBC,
3588*4882a593Smuzhiyun 			 },
3589*4882a593Smuzhiyun 	 .auth_info = {
3590*4882a593Smuzhiyun 		       .alg = HASH_ALG_NONE,
3591*4882a593Smuzhiyun 		       .mode = HASH_MODE_NONE,
3592*4882a593Smuzhiyun 		       },
3593*4882a593Smuzhiyun 	 },
3594*4882a593Smuzhiyun 	{
3595*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3596*4882a593Smuzhiyun 	 .alg.skcipher = {
3597*4882a593Smuzhiyun 			.base.cra_name = "ecb(des)",
3598*4882a593Smuzhiyun 			.base.cra_driver_name = "ecb-des-iproc",
3599*4882a593Smuzhiyun 			.base.cra_blocksize = DES_BLOCK_SIZE,
3600*4882a593Smuzhiyun 			.min_keysize = DES_KEY_SIZE,
3601*4882a593Smuzhiyun 			.max_keysize = DES_KEY_SIZE,
3602*4882a593Smuzhiyun 			.ivsize = 0,
3603*4882a593Smuzhiyun 			},
3604*4882a593Smuzhiyun 	 .cipher_info = {
3605*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_DES,
3606*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_ECB,
3607*4882a593Smuzhiyun 			 },
3608*4882a593Smuzhiyun 	 .auth_info = {
3609*4882a593Smuzhiyun 		       .alg = HASH_ALG_NONE,
3610*4882a593Smuzhiyun 		       .mode = HASH_MODE_NONE,
3611*4882a593Smuzhiyun 		       },
3612*4882a593Smuzhiyun 	 },
3613*4882a593Smuzhiyun 	{
3614*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3615*4882a593Smuzhiyun 	 .alg.skcipher = {
3616*4882a593Smuzhiyun 			.base.cra_name = "ofb(des3_ede)",
3617*4882a593Smuzhiyun 			.base.cra_driver_name = "ofb-des3-iproc",
3618*4882a593Smuzhiyun 			.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3619*4882a593Smuzhiyun 			.min_keysize = DES3_EDE_KEY_SIZE,
3620*4882a593Smuzhiyun 			.max_keysize = DES3_EDE_KEY_SIZE,
3621*4882a593Smuzhiyun 			.ivsize = DES3_EDE_BLOCK_SIZE,
3622*4882a593Smuzhiyun 			},
3623*4882a593Smuzhiyun 	 .cipher_info = {
3624*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_3DES,
3625*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_OFB,
3626*4882a593Smuzhiyun 			 },
3627*4882a593Smuzhiyun 	 .auth_info = {
3628*4882a593Smuzhiyun 		       .alg = HASH_ALG_NONE,
3629*4882a593Smuzhiyun 		       .mode = HASH_MODE_NONE,
3630*4882a593Smuzhiyun 		       },
3631*4882a593Smuzhiyun 	 },
3632*4882a593Smuzhiyun 	{
3633*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3634*4882a593Smuzhiyun 	 .alg.skcipher = {
3635*4882a593Smuzhiyun 			.base.cra_name = "cbc(des3_ede)",
3636*4882a593Smuzhiyun 			.base.cra_driver_name = "cbc-des3-iproc",
3637*4882a593Smuzhiyun 			.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3638*4882a593Smuzhiyun 			.min_keysize = DES3_EDE_KEY_SIZE,
3639*4882a593Smuzhiyun 			.max_keysize = DES3_EDE_KEY_SIZE,
3640*4882a593Smuzhiyun 			.ivsize = DES3_EDE_BLOCK_SIZE,
3641*4882a593Smuzhiyun 			},
3642*4882a593Smuzhiyun 	 .cipher_info = {
3643*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_3DES,
3644*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_CBC,
3645*4882a593Smuzhiyun 			 },
3646*4882a593Smuzhiyun 	 .auth_info = {
3647*4882a593Smuzhiyun 		       .alg = HASH_ALG_NONE,
3648*4882a593Smuzhiyun 		       .mode = HASH_MODE_NONE,
3649*4882a593Smuzhiyun 		       },
3650*4882a593Smuzhiyun 	 },
3651*4882a593Smuzhiyun 	{
3652*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3653*4882a593Smuzhiyun 	 .alg.skcipher = {
3654*4882a593Smuzhiyun 			.base.cra_name = "ecb(des3_ede)",
3655*4882a593Smuzhiyun 			.base.cra_driver_name = "ecb-des3-iproc",
3656*4882a593Smuzhiyun 			.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3657*4882a593Smuzhiyun 			.min_keysize = DES3_EDE_KEY_SIZE,
3658*4882a593Smuzhiyun 			.max_keysize = DES3_EDE_KEY_SIZE,
3659*4882a593Smuzhiyun 			.ivsize = 0,
3660*4882a593Smuzhiyun 			},
3661*4882a593Smuzhiyun 	 .cipher_info = {
3662*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_3DES,
3663*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_ECB,
3664*4882a593Smuzhiyun 			 },
3665*4882a593Smuzhiyun 	 .auth_info = {
3666*4882a593Smuzhiyun 		       .alg = HASH_ALG_NONE,
3667*4882a593Smuzhiyun 		       .mode = HASH_MODE_NONE,
3668*4882a593Smuzhiyun 		       },
3669*4882a593Smuzhiyun 	 },
3670*4882a593Smuzhiyun 	{
3671*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3672*4882a593Smuzhiyun 	 .alg.skcipher = {
3673*4882a593Smuzhiyun 			.base.cra_name = "ofb(aes)",
3674*4882a593Smuzhiyun 			.base.cra_driver_name = "ofb-aes-iproc",
3675*4882a593Smuzhiyun 			.base.cra_blocksize = AES_BLOCK_SIZE,
3676*4882a593Smuzhiyun 			.min_keysize = AES_MIN_KEY_SIZE,
3677*4882a593Smuzhiyun 			.max_keysize = AES_MAX_KEY_SIZE,
3678*4882a593Smuzhiyun 			.ivsize = AES_BLOCK_SIZE,
3679*4882a593Smuzhiyun 			},
3680*4882a593Smuzhiyun 	 .cipher_info = {
3681*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_AES,
3682*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_OFB,
3683*4882a593Smuzhiyun 			 },
3684*4882a593Smuzhiyun 	 .auth_info = {
3685*4882a593Smuzhiyun 		       .alg = HASH_ALG_NONE,
3686*4882a593Smuzhiyun 		       .mode = HASH_MODE_NONE,
3687*4882a593Smuzhiyun 		       },
3688*4882a593Smuzhiyun 	 },
3689*4882a593Smuzhiyun 	{
3690*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3691*4882a593Smuzhiyun 	 .alg.skcipher = {
3692*4882a593Smuzhiyun 			.base.cra_name = "cbc(aes)",
3693*4882a593Smuzhiyun 			.base.cra_driver_name = "cbc-aes-iproc",
3694*4882a593Smuzhiyun 			.base.cra_blocksize = AES_BLOCK_SIZE,
3695*4882a593Smuzhiyun 			.min_keysize = AES_MIN_KEY_SIZE,
3696*4882a593Smuzhiyun 			.max_keysize = AES_MAX_KEY_SIZE,
3697*4882a593Smuzhiyun 			.ivsize = AES_BLOCK_SIZE,
3698*4882a593Smuzhiyun 			},
3699*4882a593Smuzhiyun 	 .cipher_info = {
3700*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_AES,
3701*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_CBC,
3702*4882a593Smuzhiyun 			 },
3703*4882a593Smuzhiyun 	 .auth_info = {
3704*4882a593Smuzhiyun 		       .alg = HASH_ALG_NONE,
3705*4882a593Smuzhiyun 		       .mode = HASH_MODE_NONE,
3706*4882a593Smuzhiyun 		       },
3707*4882a593Smuzhiyun 	 },
3708*4882a593Smuzhiyun 	{
3709*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3710*4882a593Smuzhiyun 	 .alg.skcipher = {
3711*4882a593Smuzhiyun 			.base.cra_name = "ecb(aes)",
3712*4882a593Smuzhiyun 			.base.cra_driver_name = "ecb-aes-iproc",
3713*4882a593Smuzhiyun 			.base.cra_blocksize = AES_BLOCK_SIZE,
3714*4882a593Smuzhiyun 			.min_keysize = AES_MIN_KEY_SIZE,
3715*4882a593Smuzhiyun 			.max_keysize = AES_MAX_KEY_SIZE,
3716*4882a593Smuzhiyun 			.ivsize = 0,
3717*4882a593Smuzhiyun 			},
3718*4882a593Smuzhiyun 	 .cipher_info = {
3719*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_AES,
3720*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_ECB,
3721*4882a593Smuzhiyun 			 },
3722*4882a593Smuzhiyun 	 .auth_info = {
3723*4882a593Smuzhiyun 		       .alg = HASH_ALG_NONE,
3724*4882a593Smuzhiyun 		       .mode = HASH_MODE_NONE,
3725*4882a593Smuzhiyun 		       },
3726*4882a593Smuzhiyun 	 },
3727*4882a593Smuzhiyun 	{
3728*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3729*4882a593Smuzhiyun 	 .alg.skcipher = {
3730*4882a593Smuzhiyun 			.base.cra_name = "ctr(aes)",
3731*4882a593Smuzhiyun 			.base.cra_driver_name = "ctr-aes-iproc",
3732*4882a593Smuzhiyun 			.base.cra_blocksize = AES_BLOCK_SIZE,
3733*4882a593Smuzhiyun 			.min_keysize = AES_MIN_KEY_SIZE,
3734*4882a593Smuzhiyun 			.max_keysize = AES_MAX_KEY_SIZE,
3735*4882a593Smuzhiyun 			.ivsize = AES_BLOCK_SIZE,
3736*4882a593Smuzhiyun 			},
3737*4882a593Smuzhiyun 	 .cipher_info = {
3738*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_AES,
3739*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_CTR,
3740*4882a593Smuzhiyun 			 },
3741*4882a593Smuzhiyun 	 .auth_info = {
3742*4882a593Smuzhiyun 		       .alg = HASH_ALG_NONE,
3743*4882a593Smuzhiyun 		       .mode = HASH_MODE_NONE,
3744*4882a593Smuzhiyun 		       },
3745*4882a593Smuzhiyun 	 },
3746*4882a593Smuzhiyun {
3747*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3748*4882a593Smuzhiyun 	 .alg.skcipher = {
3749*4882a593Smuzhiyun 			.base.cra_name = "xts(aes)",
3750*4882a593Smuzhiyun 			.base.cra_driver_name = "xts-aes-iproc",
3751*4882a593Smuzhiyun 			.base.cra_blocksize = AES_BLOCK_SIZE,
3752*4882a593Smuzhiyun 			.min_keysize = 2 * AES_MIN_KEY_SIZE,
3753*4882a593Smuzhiyun 			.max_keysize = 2 * AES_MAX_KEY_SIZE,
3754*4882a593Smuzhiyun 			.ivsize = AES_BLOCK_SIZE,
3755*4882a593Smuzhiyun 			},
3756*4882a593Smuzhiyun 	 .cipher_info = {
3757*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_AES,
3758*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_XTS,
3759*4882a593Smuzhiyun 			 },
3760*4882a593Smuzhiyun 	 .auth_info = {
3761*4882a593Smuzhiyun 		       .alg = HASH_ALG_NONE,
3762*4882a593Smuzhiyun 		       .mode = HASH_MODE_NONE,
3763*4882a593Smuzhiyun 		       },
3764*4882a593Smuzhiyun 	 },
3765*4882a593Smuzhiyun 
3766*4882a593Smuzhiyun /* AHASH algorithms. */
3767*4882a593Smuzhiyun 	{
3768*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_AHASH,
3769*4882a593Smuzhiyun 	 .alg.hash = {
3770*4882a593Smuzhiyun 		      .halg.digestsize = MD5_DIGEST_SIZE,
3771*4882a593Smuzhiyun 		      .halg.base = {
3772*4882a593Smuzhiyun 				    .cra_name = "md5",
3773*4882a593Smuzhiyun 				    .cra_driver_name = "md5-iproc",
3774*4882a593Smuzhiyun 				    .cra_blocksize = MD5_BLOCK_WORDS * 4,
3775*4882a593Smuzhiyun 				    .cra_flags = CRYPTO_ALG_ASYNC |
3776*4882a593Smuzhiyun 						 CRYPTO_ALG_ALLOCATES_MEMORY,
3777*4882a593Smuzhiyun 				}
3778*4882a593Smuzhiyun 		      },
3779*4882a593Smuzhiyun 	 .cipher_info = {
3780*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_NONE,
3781*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_NONE,
3782*4882a593Smuzhiyun 			 },
3783*4882a593Smuzhiyun 	 .auth_info = {
3784*4882a593Smuzhiyun 		       .alg = HASH_ALG_MD5,
3785*4882a593Smuzhiyun 		       .mode = HASH_MODE_HASH,
3786*4882a593Smuzhiyun 		       },
3787*4882a593Smuzhiyun 	 },
3788*4882a593Smuzhiyun 	{
3789*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_AHASH,
3790*4882a593Smuzhiyun 	 .alg.hash = {
3791*4882a593Smuzhiyun 		      .halg.digestsize = MD5_DIGEST_SIZE,
3792*4882a593Smuzhiyun 		      .halg.base = {
3793*4882a593Smuzhiyun 				    .cra_name = "hmac(md5)",
3794*4882a593Smuzhiyun 				    .cra_driver_name = "hmac-md5-iproc",
3795*4882a593Smuzhiyun 				    .cra_blocksize = MD5_BLOCK_WORDS * 4,
3796*4882a593Smuzhiyun 				}
3797*4882a593Smuzhiyun 		      },
3798*4882a593Smuzhiyun 	 .cipher_info = {
3799*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_NONE,
3800*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_NONE,
3801*4882a593Smuzhiyun 			 },
3802*4882a593Smuzhiyun 	 .auth_info = {
3803*4882a593Smuzhiyun 		       .alg = HASH_ALG_MD5,
3804*4882a593Smuzhiyun 		       .mode = HASH_MODE_HMAC,
3805*4882a593Smuzhiyun 		       },
3806*4882a593Smuzhiyun 	 },
3807*4882a593Smuzhiyun 	{.type = CRYPTO_ALG_TYPE_AHASH,
3808*4882a593Smuzhiyun 	 .alg.hash = {
3809*4882a593Smuzhiyun 		      .halg.digestsize = SHA1_DIGEST_SIZE,
3810*4882a593Smuzhiyun 		      .halg.base = {
3811*4882a593Smuzhiyun 				    .cra_name = "sha1",
3812*4882a593Smuzhiyun 				    .cra_driver_name = "sha1-iproc",
3813*4882a593Smuzhiyun 				    .cra_blocksize = SHA1_BLOCK_SIZE,
3814*4882a593Smuzhiyun 				}
3815*4882a593Smuzhiyun 		      },
3816*4882a593Smuzhiyun 	 .cipher_info = {
3817*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_NONE,
3818*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_NONE,
3819*4882a593Smuzhiyun 			 },
3820*4882a593Smuzhiyun 	 .auth_info = {
3821*4882a593Smuzhiyun 		       .alg = HASH_ALG_SHA1,
3822*4882a593Smuzhiyun 		       .mode = HASH_MODE_HASH,
3823*4882a593Smuzhiyun 		       },
3824*4882a593Smuzhiyun 	 },
3825*4882a593Smuzhiyun 	{.type = CRYPTO_ALG_TYPE_AHASH,
3826*4882a593Smuzhiyun 	 .alg.hash = {
3827*4882a593Smuzhiyun 		      .halg.digestsize = SHA1_DIGEST_SIZE,
3828*4882a593Smuzhiyun 		      .halg.base = {
3829*4882a593Smuzhiyun 				    .cra_name = "hmac(sha1)",
3830*4882a593Smuzhiyun 				    .cra_driver_name = "hmac-sha1-iproc",
3831*4882a593Smuzhiyun 				    .cra_blocksize = SHA1_BLOCK_SIZE,
3832*4882a593Smuzhiyun 				}
3833*4882a593Smuzhiyun 		      },
3834*4882a593Smuzhiyun 	 .cipher_info = {
3835*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_NONE,
3836*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_NONE,
3837*4882a593Smuzhiyun 			 },
3838*4882a593Smuzhiyun 	 .auth_info = {
3839*4882a593Smuzhiyun 		       .alg = HASH_ALG_SHA1,
3840*4882a593Smuzhiyun 		       .mode = HASH_MODE_HMAC,
3841*4882a593Smuzhiyun 		       },
3842*4882a593Smuzhiyun 	 },
3843*4882a593Smuzhiyun 	{.type = CRYPTO_ALG_TYPE_AHASH,
3844*4882a593Smuzhiyun 	 .alg.hash = {
3845*4882a593Smuzhiyun 			.halg.digestsize = SHA224_DIGEST_SIZE,
3846*4882a593Smuzhiyun 			.halg.base = {
3847*4882a593Smuzhiyun 				    .cra_name = "sha224",
3848*4882a593Smuzhiyun 				    .cra_driver_name = "sha224-iproc",
3849*4882a593Smuzhiyun 				    .cra_blocksize = SHA224_BLOCK_SIZE,
3850*4882a593Smuzhiyun 			}
3851*4882a593Smuzhiyun 		      },
3852*4882a593Smuzhiyun 	 .cipher_info = {
3853*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_NONE,
3854*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_NONE,
3855*4882a593Smuzhiyun 			 },
3856*4882a593Smuzhiyun 	 .auth_info = {
3857*4882a593Smuzhiyun 		       .alg = HASH_ALG_SHA224,
3858*4882a593Smuzhiyun 		       .mode = HASH_MODE_HASH,
3859*4882a593Smuzhiyun 		       },
3860*4882a593Smuzhiyun 	 },
3861*4882a593Smuzhiyun 	{.type = CRYPTO_ALG_TYPE_AHASH,
3862*4882a593Smuzhiyun 	 .alg.hash = {
3863*4882a593Smuzhiyun 		      .halg.digestsize = SHA224_DIGEST_SIZE,
3864*4882a593Smuzhiyun 		      .halg.base = {
3865*4882a593Smuzhiyun 				    .cra_name = "hmac(sha224)",
3866*4882a593Smuzhiyun 				    .cra_driver_name = "hmac-sha224-iproc",
3867*4882a593Smuzhiyun 				    .cra_blocksize = SHA224_BLOCK_SIZE,
3868*4882a593Smuzhiyun 				}
3869*4882a593Smuzhiyun 		      },
3870*4882a593Smuzhiyun 	 .cipher_info = {
3871*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_NONE,
3872*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_NONE,
3873*4882a593Smuzhiyun 			 },
3874*4882a593Smuzhiyun 	 .auth_info = {
3875*4882a593Smuzhiyun 		       .alg = HASH_ALG_SHA224,
3876*4882a593Smuzhiyun 		       .mode = HASH_MODE_HMAC,
3877*4882a593Smuzhiyun 		       },
3878*4882a593Smuzhiyun 	 },
3879*4882a593Smuzhiyun 	{.type = CRYPTO_ALG_TYPE_AHASH,
3880*4882a593Smuzhiyun 	 .alg.hash = {
3881*4882a593Smuzhiyun 		      .halg.digestsize = SHA256_DIGEST_SIZE,
3882*4882a593Smuzhiyun 		      .halg.base = {
3883*4882a593Smuzhiyun 				    .cra_name = "sha256",
3884*4882a593Smuzhiyun 				    .cra_driver_name = "sha256-iproc",
3885*4882a593Smuzhiyun 				    .cra_blocksize = SHA256_BLOCK_SIZE,
3886*4882a593Smuzhiyun 				}
3887*4882a593Smuzhiyun 		      },
3888*4882a593Smuzhiyun 	 .cipher_info = {
3889*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_NONE,
3890*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_NONE,
3891*4882a593Smuzhiyun 			 },
3892*4882a593Smuzhiyun 	 .auth_info = {
3893*4882a593Smuzhiyun 		       .alg = HASH_ALG_SHA256,
3894*4882a593Smuzhiyun 		       .mode = HASH_MODE_HASH,
3895*4882a593Smuzhiyun 		       },
3896*4882a593Smuzhiyun 	 },
3897*4882a593Smuzhiyun 	{.type = CRYPTO_ALG_TYPE_AHASH,
3898*4882a593Smuzhiyun 	 .alg.hash = {
3899*4882a593Smuzhiyun 		      .halg.digestsize = SHA256_DIGEST_SIZE,
3900*4882a593Smuzhiyun 		      .halg.base = {
3901*4882a593Smuzhiyun 				    .cra_name = "hmac(sha256)",
3902*4882a593Smuzhiyun 				    .cra_driver_name = "hmac-sha256-iproc",
3903*4882a593Smuzhiyun 				    .cra_blocksize = SHA256_BLOCK_SIZE,
3904*4882a593Smuzhiyun 				}
3905*4882a593Smuzhiyun 		      },
3906*4882a593Smuzhiyun 	 .cipher_info = {
3907*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_NONE,
3908*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_NONE,
3909*4882a593Smuzhiyun 			 },
3910*4882a593Smuzhiyun 	 .auth_info = {
3911*4882a593Smuzhiyun 		       .alg = HASH_ALG_SHA256,
3912*4882a593Smuzhiyun 		       .mode = HASH_MODE_HMAC,
3913*4882a593Smuzhiyun 		       },
3914*4882a593Smuzhiyun 	 },
3915*4882a593Smuzhiyun 	{
3916*4882a593Smuzhiyun 	.type = CRYPTO_ALG_TYPE_AHASH,
3917*4882a593Smuzhiyun 	 .alg.hash = {
3918*4882a593Smuzhiyun 		      .halg.digestsize = SHA384_DIGEST_SIZE,
3919*4882a593Smuzhiyun 		      .halg.base = {
3920*4882a593Smuzhiyun 				    .cra_name = "sha384",
3921*4882a593Smuzhiyun 				    .cra_driver_name = "sha384-iproc",
3922*4882a593Smuzhiyun 				    .cra_blocksize = SHA384_BLOCK_SIZE,
3923*4882a593Smuzhiyun 				}
3924*4882a593Smuzhiyun 		      },
3925*4882a593Smuzhiyun 	 .cipher_info = {
3926*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_NONE,
3927*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_NONE,
3928*4882a593Smuzhiyun 			 },
3929*4882a593Smuzhiyun 	 .auth_info = {
3930*4882a593Smuzhiyun 		       .alg = HASH_ALG_SHA384,
3931*4882a593Smuzhiyun 		       .mode = HASH_MODE_HASH,
3932*4882a593Smuzhiyun 		       },
3933*4882a593Smuzhiyun 	 },
3934*4882a593Smuzhiyun 	{
3935*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_AHASH,
3936*4882a593Smuzhiyun 	 .alg.hash = {
3937*4882a593Smuzhiyun 		      .halg.digestsize = SHA384_DIGEST_SIZE,
3938*4882a593Smuzhiyun 		      .halg.base = {
3939*4882a593Smuzhiyun 				    .cra_name = "hmac(sha384)",
3940*4882a593Smuzhiyun 				    .cra_driver_name = "hmac-sha384-iproc",
3941*4882a593Smuzhiyun 				    .cra_blocksize = SHA384_BLOCK_SIZE,
3942*4882a593Smuzhiyun 				}
3943*4882a593Smuzhiyun 		      },
3944*4882a593Smuzhiyun 	 .cipher_info = {
3945*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_NONE,
3946*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_NONE,
3947*4882a593Smuzhiyun 			 },
3948*4882a593Smuzhiyun 	 .auth_info = {
3949*4882a593Smuzhiyun 		       .alg = HASH_ALG_SHA384,
3950*4882a593Smuzhiyun 		       .mode = HASH_MODE_HMAC,
3951*4882a593Smuzhiyun 		       },
3952*4882a593Smuzhiyun 	 },
3953*4882a593Smuzhiyun 	{
3954*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_AHASH,
3955*4882a593Smuzhiyun 	 .alg.hash = {
3956*4882a593Smuzhiyun 		      .halg.digestsize = SHA512_DIGEST_SIZE,
3957*4882a593Smuzhiyun 		      .halg.base = {
3958*4882a593Smuzhiyun 				    .cra_name = "sha512",
3959*4882a593Smuzhiyun 				    .cra_driver_name = "sha512-iproc",
3960*4882a593Smuzhiyun 				    .cra_blocksize = SHA512_BLOCK_SIZE,
3961*4882a593Smuzhiyun 				}
3962*4882a593Smuzhiyun 		      },
3963*4882a593Smuzhiyun 	 .cipher_info = {
3964*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_NONE,
3965*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_NONE,
3966*4882a593Smuzhiyun 			 },
3967*4882a593Smuzhiyun 	 .auth_info = {
3968*4882a593Smuzhiyun 		       .alg = HASH_ALG_SHA512,
3969*4882a593Smuzhiyun 		       .mode = HASH_MODE_HASH,
3970*4882a593Smuzhiyun 		       },
3971*4882a593Smuzhiyun 	 },
3972*4882a593Smuzhiyun 	{
3973*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_AHASH,
3974*4882a593Smuzhiyun 	 .alg.hash = {
3975*4882a593Smuzhiyun 		      .halg.digestsize = SHA512_DIGEST_SIZE,
3976*4882a593Smuzhiyun 		      .halg.base = {
3977*4882a593Smuzhiyun 				    .cra_name = "hmac(sha512)",
3978*4882a593Smuzhiyun 				    .cra_driver_name = "hmac-sha512-iproc",
3979*4882a593Smuzhiyun 				    .cra_blocksize = SHA512_BLOCK_SIZE,
3980*4882a593Smuzhiyun 				}
3981*4882a593Smuzhiyun 		      },
3982*4882a593Smuzhiyun 	 .cipher_info = {
3983*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_NONE,
3984*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_NONE,
3985*4882a593Smuzhiyun 			 },
3986*4882a593Smuzhiyun 	 .auth_info = {
3987*4882a593Smuzhiyun 		       .alg = HASH_ALG_SHA512,
3988*4882a593Smuzhiyun 		       .mode = HASH_MODE_HMAC,
3989*4882a593Smuzhiyun 		       },
3990*4882a593Smuzhiyun 	 },
3991*4882a593Smuzhiyun 	{
3992*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_AHASH,
3993*4882a593Smuzhiyun 	 .alg.hash = {
3994*4882a593Smuzhiyun 		      .halg.digestsize = SHA3_224_DIGEST_SIZE,
3995*4882a593Smuzhiyun 		      .halg.base = {
3996*4882a593Smuzhiyun 				    .cra_name = "sha3-224",
3997*4882a593Smuzhiyun 				    .cra_driver_name = "sha3-224-iproc",
3998*4882a593Smuzhiyun 				    .cra_blocksize = SHA3_224_BLOCK_SIZE,
3999*4882a593Smuzhiyun 				}
4000*4882a593Smuzhiyun 		      },
4001*4882a593Smuzhiyun 	 .cipher_info = {
4002*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_NONE,
4003*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_NONE,
4004*4882a593Smuzhiyun 			 },
4005*4882a593Smuzhiyun 	 .auth_info = {
4006*4882a593Smuzhiyun 		       .alg = HASH_ALG_SHA3_224,
4007*4882a593Smuzhiyun 		       .mode = HASH_MODE_HASH,
4008*4882a593Smuzhiyun 		       },
4009*4882a593Smuzhiyun 	 },
4010*4882a593Smuzhiyun 	{
4011*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_AHASH,
4012*4882a593Smuzhiyun 	 .alg.hash = {
4013*4882a593Smuzhiyun 		      .halg.digestsize = SHA3_224_DIGEST_SIZE,
4014*4882a593Smuzhiyun 		      .halg.base = {
4015*4882a593Smuzhiyun 				    .cra_name = "hmac(sha3-224)",
4016*4882a593Smuzhiyun 				    .cra_driver_name = "hmac-sha3-224-iproc",
4017*4882a593Smuzhiyun 				    .cra_blocksize = SHA3_224_BLOCK_SIZE,
4018*4882a593Smuzhiyun 				}
4019*4882a593Smuzhiyun 		      },
4020*4882a593Smuzhiyun 	 .cipher_info = {
4021*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_NONE,
4022*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_NONE,
4023*4882a593Smuzhiyun 			 },
4024*4882a593Smuzhiyun 	 .auth_info = {
4025*4882a593Smuzhiyun 		       .alg = HASH_ALG_SHA3_224,
4026*4882a593Smuzhiyun 		       .mode = HASH_MODE_HMAC
4027*4882a593Smuzhiyun 		       },
4028*4882a593Smuzhiyun 	 },
4029*4882a593Smuzhiyun 	{
4030*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_AHASH,
4031*4882a593Smuzhiyun 	 .alg.hash = {
4032*4882a593Smuzhiyun 		      .halg.digestsize = SHA3_256_DIGEST_SIZE,
4033*4882a593Smuzhiyun 		      .halg.base = {
4034*4882a593Smuzhiyun 				    .cra_name = "sha3-256",
4035*4882a593Smuzhiyun 				    .cra_driver_name = "sha3-256-iproc",
4036*4882a593Smuzhiyun 				    .cra_blocksize = SHA3_256_BLOCK_SIZE,
4037*4882a593Smuzhiyun 				}
4038*4882a593Smuzhiyun 		      },
4039*4882a593Smuzhiyun 	 .cipher_info = {
4040*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_NONE,
4041*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_NONE,
4042*4882a593Smuzhiyun 			 },
4043*4882a593Smuzhiyun 	 .auth_info = {
4044*4882a593Smuzhiyun 		       .alg = HASH_ALG_SHA3_256,
4045*4882a593Smuzhiyun 		       .mode = HASH_MODE_HASH,
4046*4882a593Smuzhiyun 		       },
4047*4882a593Smuzhiyun 	 },
4048*4882a593Smuzhiyun 	{
4049*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_AHASH,
4050*4882a593Smuzhiyun 	 .alg.hash = {
4051*4882a593Smuzhiyun 		      .halg.digestsize = SHA3_256_DIGEST_SIZE,
4052*4882a593Smuzhiyun 		      .halg.base = {
4053*4882a593Smuzhiyun 				    .cra_name = "hmac(sha3-256)",
4054*4882a593Smuzhiyun 				    .cra_driver_name = "hmac-sha3-256-iproc",
4055*4882a593Smuzhiyun 				    .cra_blocksize = SHA3_256_BLOCK_SIZE,
4056*4882a593Smuzhiyun 				}
4057*4882a593Smuzhiyun 		      },
4058*4882a593Smuzhiyun 	 .cipher_info = {
4059*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_NONE,
4060*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_NONE,
4061*4882a593Smuzhiyun 			 },
4062*4882a593Smuzhiyun 	 .auth_info = {
4063*4882a593Smuzhiyun 		       .alg = HASH_ALG_SHA3_256,
4064*4882a593Smuzhiyun 		       .mode = HASH_MODE_HMAC,
4065*4882a593Smuzhiyun 		       },
4066*4882a593Smuzhiyun 	 },
4067*4882a593Smuzhiyun 	{
4068*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_AHASH,
4069*4882a593Smuzhiyun 	 .alg.hash = {
4070*4882a593Smuzhiyun 		      .halg.digestsize = SHA3_384_DIGEST_SIZE,
4071*4882a593Smuzhiyun 		      .halg.base = {
4072*4882a593Smuzhiyun 				    .cra_name = "sha3-384",
4073*4882a593Smuzhiyun 				    .cra_driver_name = "sha3-384-iproc",
4074*4882a593Smuzhiyun 				    .cra_blocksize = SHA3_224_BLOCK_SIZE,
4075*4882a593Smuzhiyun 				}
4076*4882a593Smuzhiyun 		      },
4077*4882a593Smuzhiyun 	 .cipher_info = {
4078*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_NONE,
4079*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_NONE,
4080*4882a593Smuzhiyun 			 },
4081*4882a593Smuzhiyun 	 .auth_info = {
4082*4882a593Smuzhiyun 		       .alg = HASH_ALG_SHA3_384,
4083*4882a593Smuzhiyun 		       .mode = HASH_MODE_HASH,
4084*4882a593Smuzhiyun 		       },
4085*4882a593Smuzhiyun 	 },
4086*4882a593Smuzhiyun 	{
4087*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_AHASH,
4088*4882a593Smuzhiyun 	 .alg.hash = {
4089*4882a593Smuzhiyun 		      .halg.digestsize = SHA3_384_DIGEST_SIZE,
4090*4882a593Smuzhiyun 		      .halg.base = {
4091*4882a593Smuzhiyun 				    .cra_name = "hmac(sha3-384)",
4092*4882a593Smuzhiyun 				    .cra_driver_name = "hmac-sha3-384-iproc",
4093*4882a593Smuzhiyun 				    .cra_blocksize = SHA3_384_BLOCK_SIZE,
4094*4882a593Smuzhiyun 				}
4095*4882a593Smuzhiyun 		      },
4096*4882a593Smuzhiyun 	 .cipher_info = {
4097*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_NONE,
4098*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_NONE,
4099*4882a593Smuzhiyun 			 },
4100*4882a593Smuzhiyun 	 .auth_info = {
4101*4882a593Smuzhiyun 		       .alg = HASH_ALG_SHA3_384,
4102*4882a593Smuzhiyun 		       .mode = HASH_MODE_HMAC,
4103*4882a593Smuzhiyun 		       },
4104*4882a593Smuzhiyun 	 },
4105*4882a593Smuzhiyun 	{
4106*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_AHASH,
4107*4882a593Smuzhiyun 	 .alg.hash = {
4108*4882a593Smuzhiyun 		      .halg.digestsize = SHA3_512_DIGEST_SIZE,
4109*4882a593Smuzhiyun 		      .halg.base = {
4110*4882a593Smuzhiyun 				    .cra_name = "sha3-512",
4111*4882a593Smuzhiyun 				    .cra_driver_name = "sha3-512-iproc",
4112*4882a593Smuzhiyun 				    .cra_blocksize = SHA3_512_BLOCK_SIZE,
4113*4882a593Smuzhiyun 				}
4114*4882a593Smuzhiyun 		      },
4115*4882a593Smuzhiyun 	 .cipher_info = {
4116*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_NONE,
4117*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_NONE,
4118*4882a593Smuzhiyun 			 },
4119*4882a593Smuzhiyun 	 .auth_info = {
4120*4882a593Smuzhiyun 		       .alg = HASH_ALG_SHA3_512,
4121*4882a593Smuzhiyun 		       .mode = HASH_MODE_HASH,
4122*4882a593Smuzhiyun 		       },
4123*4882a593Smuzhiyun 	 },
4124*4882a593Smuzhiyun 	{
4125*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_AHASH,
4126*4882a593Smuzhiyun 	 .alg.hash = {
4127*4882a593Smuzhiyun 		      .halg.digestsize = SHA3_512_DIGEST_SIZE,
4128*4882a593Smuzhiyun 		      .halg.base = {
4129*4882a593Smuzhiyun 				    .cra_name = "hmac(sha3-512)",
4130*4882a593Smuzhiyun 				    .cra_driver_name = "hmac-sha3-512-iproc",
4131*4882a593Smuzhiyun 				    .cra_blocksize = SHA3_512_BLOCK_SIZE,
4132*4882a593Smuzhiyun 				}
4133*4882a593Smuzhiyun 		      },
4134*4882a593Smuzhiyun 	 .cipher_info = {
4135*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_NONE,
4136*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_NONE,
4137*4882a593Smuzhiyun 			 },
4138*4882a593Smuzhiyun 	 .auth_info = {
4139*4882a593Smuzhiyun 		       .alg = HASH_ALG_SHA3_512,
4140*4882a593Smuzhiyun 		       .mode = HASH_MODE_HMAC,
4141*4882a593Smuzhiyun 		       },
4142*4882a593Smuzhiyun 	 },
4143*4882a593Smuzhiyun 	{
4144*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_AHASH,
4145*4882a593Smuzhiyun 	 .alg.hash = {
4146*4882a593Smuzhiyun 		      .halg.digestsize = AES_BLOCK_SIZE,
4147*4882a593Smuzhiyun 		      .halg.base = {
4148*4882a593Smuzhiyun 				    .cra_name = "xcbc(aes)",
4149*4882a593Smuzhiyun 				    .cra_driver_name = "xcbc-aes-iproc",
4150*4882a593Smuzhiyun 				    .cra_blocksize = AES_BLOCK_SIZE,
4151*4882a593Smuzhiyun 				}
4152*4882a593Smuzhiyun 		      },
4153*4882a593Smuzhiyun 	 .cipher_info = {
4154*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_NONE,
4155*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_NONE,
4156*4882a593Smuzhiyun 			 },
4157*4882a593Smuzhiyun 	 .auth_info = {
4158*4882a593Smuzhiyun 		       .alg = HASH_ALG_AES,
4159*4882a593Smuzhiyun 		       .mode = HASH_MODE_XCBC,
4160*4882a593Smuzhiyun 		       },
4161*4882a593Smuzhiyun 	 },
4162*4882a593Smuzhiyun 	{
4163*4882a593Smuzhiyun 	 .type = CRYPTO_ALG_TYPE_AHASH,
4164*4882a593Smuzhiyun 	 .alg.hash = {
4165*4882a593Smuzhiyun 		      .halg.digestsize = AES_BLOCK_SIZE,
4166*4882a593Smuzhiyun 		      .halg.base = {
4167*4882a593Smuzhiyun 				    .cra_name = "cmac(aes)",
4168*4882a593Smuzhiyun 				    .cra_driver_name = "cmac-aes-iproc",
4169*4882a593Smuzhiyun 				    .cra_blocksize = AES_BLOCK_SIZE,
4170*4882a593Smuzhiyun 				}
4171*4882a593Smuzhiyun 		      },
4172*4882a593Smuzhiyun 	 .cipher_info = {
4173*4882a593Smuzhiyun 			 .alg = CIPHER_ALG_NONE,
4174*4882a593Smuzhiyun 			 .mode = CIPHER_MODE_NONE,
4175*4882a593Smuzhiyun 			 },
4176*4882a593Smuzhiyun 	 .auth_info = {
4177*4882a593Smuzhiyun 		       .alg = HASH_ALG_AES,
4178*4882a593Smuzhiyun 		       .mode = HASH_MODE_CMAC,
4179*4882a593Smuzhiyun 		       },
4180*4882a593Smuzhiyun 	 },
4181*4882a593Smuzhiyun };
4182*4882a593Smuzhiyun 
generic_cra_init(struct crypto_tfm * tfm,struct iproc_alg_s * cipher_alg)4183*4882a593Smuzhiyun static int generic_cra_init(struct crypto_tfm *tfm,
4184*4882a593Smuzhiyun 			    struct iproc_alg_s *cipher_alg)
4185*4882a593Smuzhiyun {
4186*4882a593Smuzhiyun 	struct spu_hw *spu = &iproc_priv.spu;
4187*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4188*4882a593Smuzhiyun 	unsigned int blocksize = crypto_tfm_alg_blocksize(tfm);
4189*4882a593Smuzhiyun 
4190*4882a593Smuzhiyun 	flow_log("%s()\n", __func__);
4191*4882a593Smuzhiyun 
4192*4882a593Smuzhiyun 	ctx->alg = cipher_alg;
4193*4882a593Smuzhiyun 	ctx->cipher = cipher_alg->cipher_info;
4194*4882a593Smuzhiyun 	ctx->auth = cipher_alg->auth_info;
4195*4882a593Smuzhiyun 	ctx->auth_first = cipher_alg->auth_first;
4196*4882a593Smuzhiyun 	ctx->max_payload = spu->spu_ctx_max_payload(ctx->cipher.alg,
4197*4882a593Smuzhiyun 						    ctx->cipher.mode,
4198*4882a593Smuzhiyun 						    blocksize);
4199*4882a593Smuzhiyun 	ctx->fallback_cipher = NULL;
4200*4882a593Smuzhiyun 
4201*4882a593Smuzhiyun 	ctx->enckeylen = 0;
4202*4882a593Smuzhiyun 	ctx->authkeylen = 0;
4203*4882a593Smuzhiyun 
4204*4882a593Smuzhiyun 	atomic_inc(&iproc_priv.stream_count);
4205*4882a593Smuzhiyun 	atomic_inc(&iproc_priv.session_count);
4206*4882a593Smuzhiyun 
4207*4882a593Smuzhiyun 	return 0;
4208*4882a593Smuzhiyun }
4209*4882a593Smuzhiyun 
skcipher_init_tfm(struct crypto_skcipher * skcipher)4210*4882a593Smuzhiyun static int skcipher_init_tfm(struct crypto_skcipher *skcipher)
4211*4882a593Smuzhiyun {
4212*4882a593Smuzhiyun 	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
4213*4882a593Smuzhiyun 	struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
4214*4882a593Smuzhiyun 	struct iproc_alg_s *cipher_alg;
4215*4882a593Smuzhiyun 
4216*4882a593Smuzhiyun 	flow_log("%s()\n", __func__);
4217*4882a593Smuzhiyun 
4218*4882a593Smuzhiyun 	crypto_skcipher_set_reqsize(skcipher, sizeof(struct iproc_reqctx_s));
4219*4882a593Smuzhiyun 
4220*4882a593Smuzhiyun 	cipher_alg = container_of(alg, struct iproc_alg_s, alg.skcipher);
4221*4882a593Smuzhiyun 	return generic_cra_init(tfm, cipher_alg);
4222*4882a593Smuzhiyun }
4223*4882a593Smuzhiyun 
ahash_cra_init(struct crypto_tfm * tfm)4224*4882a593Smuzhiyun static int ahash_cra_init(struct crypto_tfm *tfm)
4225*4882a593Smuzhiyun {
4226*4882a593Smuzhiyun 	int err;
4227*4882a593Smuzhiyun 	struct crypto_alg *alg = tfm->__crt_alg;
4228*4882a593Smuzhiyun 	struct iproc_alg_s *cipher_alg;
4229*4882a593Smuzhiyun 
4230*4882a593Smuzhiyun 	cipher_alg = container_of(__crypto_ahash_alg(alg), struct iproc_alg_s,
4231*4882a593Smuzhiyun 				  alg.hash);
4232*4882a593Smuzhiyun 
4233*4882a593Smuzhiyun 	err = generic_cra_init(tfm, cipher_alg);
4234*4882a593Smuzhiyun 	flow_log("%s()\n", __func__);
4235*4882a593Smuzhiyun 
4236*4882a593Smuzhiyun 	/*
4237*4882a593Smuzhiyun 	 * export state size has to be < 512 bytes. So don't include msg bufs
4238*4882a593Smuzhiyun 	 * in state size.
4239*4882a593Smuzhiyun 	 */
4240*4882a593Smuzhiyun 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
4241*4882a593Smuzhiyun 				 sizeof(struct iproc_reqctx_s));
4242*4882a593Smuzhiyun 
4243*4882a593Smuzhiyun 	return err;
4244*4882a593Smuzhiyun }
4245*4882a593Smuzhiyun 
aead_cra_init(struct crypto_aead * aead)4246*4882a593Smuzhiyun static int aead_cra_init(struct crypto_aead *aead)
4247*4882a593Smuzhiyun {
4248*4882a593Smuzhiyun 	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
4249*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4250*4882a593Smuzhiyun 	struct crypto_alg *alg = tfm->__crt_alg;
4251*4882a593Smuzhiyun 	struct aead_alg *aalg = container_of(alg, struct aead_alg, base);
4252*4882a593Smuzhiyun 	struct iproc_alg_s *cipher_alg = container_of(aalg, struct iproc_alg_s,
4253*4882a593Smuzhiyun 						      alg.aead);
4254*4882a593Smuzhiyun 
4255*4882a593Smuzhiyun 	int err = generic_cra_init(tfm, cipher_alg);
4256*4882a593Smuzhiyun 
4257*4882a593Smuzhiyun 	flow_log("%s()\n", __func__);
4258*4882a593Smuzhiyun 
4259*4882a593Smuzhiyun 	crypto_aead_set_reqsize(aead, sizeof(struct iproc_reqctx_s));
4260*4882a593Smuzhiyun 	ctx->is_esp = false;
4261*4882a593Smuzhiyun 	ctx->salt_len = 0;
4262*4882a593Smuzhiyun 	ctx->salt_offset = 0;
4263*4882a593Smuzhiyun 
4264*4882a593Smuzhiyun 	/* random first IV */
4265*4882a593Smuzhiyun 	get_random_bytes(ctx->iv, MAX_IV_SIZE);
4266*4882a593Smuzhiyun 	flow_dump("  iv: ", ctx->iv, MAX_IV_SIZE);
4267*4882a593Smuzhiyun 
4268*4882a593Smuzhiyun 	if (!err) {
4269*4882a593Smuzhiyun 		if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
4270*4882a593Smuzhiyun 			flow_log("%s() creating fallback cipher\n", __func__);
4271*4882a593Smuzhiyun 
4272*4882a593Smuzhiyun 			ctx->fallback_cipher =
4273*4882a593Smuzhiyun 			    crypto_alloc_aead(alg->cra_name, 0,
4274*4882a593Smuzhiyun 					      CRYPTO_ALG_ASYNC |
4275*4882a593Smuzhiyun 					      CRYPTO_ALG_NEED_FALLBACK);
4276*4882a593Smuzhiyun 			if (IS_ERR(ctx->fallback_cipher)) {
4277*4882a593Smuzhiyun 				pr_err("%s() Error: failed to allocate fallback for %s\n",
4278*4882a593Smuzhiyun 				       __func__, alg->cra_name);
4279*4882a593Smuzhiyun 				return PTR_ERR(ctx->fallback_cipher);
4280*4882a593Smuzhiyun 			}
4281*4882a593Smuzhiyun 		}
4282*4882a593Smuzhiyun 	}
4283*4882a593Smuzhiyun 
4284*4882a593Smuzhiyun 	return err;
4285*4882a593Smuzhiyun }
4286*4882a593Smuzhiyun 
generic_cra_exit(struct crypto_tfm * tfm)4287*4882a593Smuzhiyun static void generic_cra_exit(struct crypto_tfm *tfm)
4288*4882a593Smuzhiyun {
4289*4882a593Smuzhiyun 	atomic_dec(&iproc_priv.session_count);
4290*4882a593Smuzhiyun }
4291*4882a593Smuzhiyun 
skcipher_exit_tfm(struct crypto_skcipher * tfm)4292*4882a593Smuzhiyun static void skcipher_exit_tfm(struct crypto_skcipher *tfm)
4293*4882a593Smuzhiyun {
4294*4882a593Smuzhiyun 	generic_cra_exit(crypto_skcipher_tfm(tfm));
4295*4882a593Smuzhiyun }
4296*4882a593Smuzhiyun 
aead_cra_exit(struct crypto_aead * aead)4297*4882a593Smuzhiyun static void aead_cra_exit(struct crypto_aead *aead)
4298*4882a593Smuzhiyun {
4299*4882a593Smuzhiyun 	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
4300*4882a593Smuzhiyun 	struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4301*4882a593Smuzhiyun 
4302*4882a593Smuzhiyun 	generic_cra_exit(tfm);
4303*4882a593Smuzhiyun 
4304*4882a593Smuzhiyun 	if (ctx->fallback_cipher) {
4305*4882a593Smuzhiyun 		crypto_free_aead(ctx->fallback_cipher);
4306*4882a593Smuzhiyun 		ctx->fallback_cipher = NULL;
4307*4882a593Smuzhiyun 	}
4308*4882a593Smuzhiyun }
4309*4882a593Smuzhiyun 
4310*4882a593Smuzhiyun /**
4311*4882a593Smuzhiyun  * spu_functions_register() - Specify hardware-specific SPU functions based on
4312*4882a593Smuzhiyun  * SPU type read from device tree.
4313*4882a593Smuzhiyun  * @dev:	device structure
4314*4882a593Smuzhiyun  * @spu_type:	SPU hardware generation
4315*4882a593Smuzhiyun  * @spu_subtype: SPU hardware version
4316*4882a593Smuzhiyun  */
spu_functions_register(struct device * dev,enum spu_spu_type spu_type,enum spu_spu_subtype spu_subtype)4317*4882a593Smuzhiyun static void spu_functions_register(struct device *dev,
4318*4882a593Smuzhiyun 				   enum spu_spu_type spu_type,
4319*4882a593Smuzhiyun 				   enum spu_spu_subtype spu_subtype)
4320*4882a593Smuzhiyun {
4321*4882a593Smuzhiyun 	struct spu_hw *spu = &iproc_priv.spu;
4322*4882a593Smuzhiyun 
4323*4882a593Smuzhiyun 	if (spu_type == SPU_TYPE_SPUM) {
4324*4882a593Smuzhiyun 		dev_dbg(dev, "Registering SPUM functions");
4325*4882a593Smuzhiyun 		spu->spu_dump_msg_hdr = spum_dump_msg_hdr;
4326*4882a593Smuzhiyun 		spu->spu_payload_length = spum_payload_length;
4327*4882a593Smuzhiyun 		spu->spu_response_hdr_len = spum_response_hdr_len;
4328*4882a593Smuzhiyun 		spu->spu_hash_pad_len = spum_hash_pad_len;
4329*4882a593Smuzhiyun 		spu->spu_gcm_ccm_pad_len = spum_gcm_ccm_pad_len;
4330*4882a593Smuzhiyun 		spu->spu_assoc_resp_len = spum_assoc_resp_len;
4331*4882a593Smuzhiyun 		spu->spu_aead_ivlen = spum_aead_ivlen;
4332*4882a593Smuzhiyun 		spu->spu_hash_type = spum_hash_type;
4333*4882a593Smuzhiyun 		spu->spu_digest_size = spum_digest_size;
4334*4882a593Smuzhiyun 		spu->spu_create_request = spum_create_request;
4335*4882a593Smuzhiyun 		spu->spu_cipher_req_init = spum_cipher_req_init;
4336*4882a593Smuzhiyun 		spu->spu_cipher_req_finish = spum_cipher_req_finish;
4337*4882a593Smuzhiyun 		spu->spu_request_pad = spum_request_pad;
4338*4882a593Smuzhiyun 		spu->spu_tx_status_len = spum_tx_status_len;
4339*4882a593Smuzhiyun 		spu->spu_rx_status_len = spum_rx_status_len;
4340*4882a593Smuzhiyun 		spu->spu_status_process = spum_status_process;
4341*4882a593Smuzhiyun 		spu->spu_xts_tweak_in_payload = spum_xts_tweak_in_payload;
4342*4882a593Smuzhiyun 		spu->spu_ccm_update_iv = spum_ccm_update_iv;
4343*4882a593Smuzhiyun 		spu->spu_wordalign_padlen = spum_wordalign_padlen;
4344*4882a593Smuzhiyun 		if (spu_subtype == SPU_SUBTYPE_SPUM_NS2)
4345*4882a593Smuzhiyun 			spu->spu_ctx_max_payload = spum_ns2_ctx_max_payload;
4346*4882a593Smuzhiyun 		else
4347*4882a593Smuzhiyun 			spu->spu_ctx_max_payload = spum_nsp_ctx_max_payload;
4348*4882a593Smuzhiyun 	} else {
4349*4882a593Smuzhiyun 		dev_dbg(dev, "Registering SPU2 functions");
4350*4882a593Smuzhiyun 		spu->spu_dump_msg_hdr = spu2_dump_msg_hdr;
4351*4882a593Smuzhiyun 		spu->spu_ctx_max_payload = spu2_ctx_max_payload;
4352*4882a593Smuzhiyun 		spu->spu_payload_length = spu2_payload_length;
4353*4882a593Smuzhiyun 		spu->spu_response_hdr_len = spu2_response_hdr_len;
4354*4882a593Smuzhiyun 		spu->spu_hash_pad_len = spu2_hash_pad_len;
4355*4882a593Smuzhiyun 		spu->spu_gcm_ccm_pad_len = spu2_gcm_ccm_pad_len;
4356*4882a593Smuzhiyun 		spu->spu_assoc_resp_len = spu2_assoc_resp_len;
4357*4882a593Smuzhiyun 		spu->spu_aead_ivlen = spu2_aead_ivlen;
4358*4882a593Smuzhiyun 		spu->spu_hash_type = spu2_hash_type;
4359*4882a593Smuzhiyun 		spu->spu_digest_size = spu2_digest_size;
4360*4882a593Smuzhiyun 		spu->spu_create_request = spu2_create_request;
4361*4882a593Smuzhiyun 		spu->spu_cipher_req_init = spu2_cipher_req_init;
4362*4882a593Smuzhiyun 		spu->spu_cipher_req_finish = spu2_cipher_req_finish;
4363*4882a593Smuzhiyun 		spu->spu_request_pad = spu2_request_pad;
4364*4882a593Smuzhiyun 		spu->spu_tx_status_len = spu2_tx_status_len;
4365*4882a593Smuzhiyun 		spu->spu_rx_status_len = spu2_rx_status_len;
4366*4882a593Smuzhiyun 		spu->spu_status_process = spu2_status_process;
4367*4882a593Smuzhiyun 		spu->spu_xts_tweak_in_payload = spu2_xts_tweak_in_payload;
4368*4882a593Smuzhiyun 		spu->spu_ccm_update_iv = spu2_ccm_update_iv;
4369*4882a593Smuzhiyun 		spu->spu_wordalign_padlen = spu2_wordalign_padlen;
4370*4882a593Smuzhiyun 	}
4371*4882a593Smuzhiyun }
4372*4882a593Smuzhiyun 
4373*4882a593Smuzhiyun /**
4374*4882a593Smuzhiyun  * spu_mb_init() - Initialize mailbox client. Request ownership of a mailbox
4375*4882a593Smuzhiyun  * channel for the SPU being probed.
4376*4882a593Smuzhiyun  * @dev:  SPU driver device structure
4377*4882a593Smuzhiyun  *
4378*4882a593Smuzhiyun  * Return: 0 if successful
4379*4882a593Smuzhiyun  *	   < 0 otherwise
4380*4882a593Smuzhiyun  */
spu_mb_init(struct device * dev)4381*4882a593Smuzhiyun static int spu_mb_init(struct device *dev)
4382*4882a593Smuzhiyun {
4383*4882a593Smuzhiyun 	struct mbox_client *mcl = &iproc_priv.mcl;
4384*4882a593Smuzhiyun 	int err, i;
4385*4882a593Smuzhiyun 
4386*4882a593Smuzhiyun 	iproc_priv.mbox = devm_kcalloc(dev, iproc_priv.spu.num_chan,
4387*4882a593Smuzhiyun 				  sizeof(struct mbox_chan *), GFP_KERNEL);
4388*4882a593Smuzhiyun 	if (!iproc_priv.mbox)
4389*4882a593Smuzhiyun 		return -ENOMEM;
4390*4882a593Smuzhiyun 
4391*4882a593Smuzhiyun 	mcl->dev = dev;
4392*4882a593Smuzhiyun 	mcl->tx_block = false;
4393*4882a593Smuzhiyun 	mcl->tx_tout = 0;
4394*4882a593Smuzhiyun 	mcl->knows_txdone = true;
4395*4882a593Smuzhiyun 	mcl->rx_callback = spu_rx_callback;
4396*4882a593Smuzhiyun 	mcl->tx_done = NULL;
4397*4882a593Smuzhiyun 
4398*4882a593Smuzhiyun 	for (i = 0; i < iproc_priv.spu.num_chan; i++) {
4399*4882a593Smuzhiyun 		iproc_priv.mbox[i] = mbox_request_channel(mcl, i);
4400*4882a593Smuzhiyun 		if (IS_ERR(iproc_priv.mbox[i])) {
4401*4882a593Smuzhiyun 			err = PTR_ERR(iproc_priv.mbox[i]);
4402*4882a593Smuzhiyun 			dev_err(dev,
4403*4882a593Smuzhiyun 				"Mbox channel %d request failed with err %d",
4404*4882a593Smuzhiyun 				i, err);
4405*4882a593Smuzhiyun 			iproc_priv.mbox[i] = NULL;
4406*4882a593Smuzhiyun 			goto free_channels;
4407*4882a593Smuzhiyun 		}
4408*4882a593Smuzhiyun 	}
4409*4882a593Smuzhiyun 
4410*4882a593Smuzhiyun 	return 0;
4411*4882a593Smuzhiyun free_channels:
4412*4882a593Smuzhiyun 	for (i = 0; i < iproc_priv.spu.num_chan; i++) {
4413*4882a593Smuzhiyun 		if (iproc_priv.mbox[i])
4414*4882a593Smuzhiyun 			mbox_free_channel(iproc_priv.mbox[i]);
4415*4882a593Smuzhiyun 	}
4416*4882a593Smuzhiyun 
4417*4882a593Smuzhiyun 	return err;
4418*4882a593Smuzhiyun }
4419*4882a593Smuzhiyun 
spu_mb_release(struct platform_device * pdev)4420*4882a593Smuzhiyun static void spu_mb_release(struct platform_device *pdev)
4421*4882a593Smuzhiyun {
4422*4882a593Smuzhiyun 	int i;
4423*4882a593Smuzhiyun 
4424*4882a593Smuzhiyun 	for (i = 0; i < iproc_priv.spu.num_chan; i++)
4425*4882a593Smuzhiyun 		mbox_free_channel(iproc_priv.mbox[i]);
4426*4882a593Smuzhiyun }
4427*4882a593Smuzhiyun 
spu_counters_init(void)4428*4882a593Smuzhiyun static void spu_counters_init(void)
4429*4882a593Smuzhiyun {
4430*4882a593Smuzhiyun 	int i;
4431*4882a593Smuzhiyun 	int j;
4432*4882a593Smuzhiyun 
4433*4882a593Smuzhiyun 	atomic_set(&iproc_priv.session_count, 0);
4434*4882a593Smuzhiyun 	atomic_set(&iproc_priv.stream_count, 0);
4435*4882a593Smuzhiyun 	atomic_set(&iproc_priv.next_chan, (int)iproc_priv.spu.num_chan);
4436*4882a593Smuzhiyun 	atomic64_set(&iproc_priv.bytes_in, 0);
4437*4882a593Smuzhiyun 	atomic64_set(&iproc_priv.bytes_out, 0);
4438*4882a593Smuzhiyun 	for (i = 0; i < SPU_OP_NUM; i++) {
4439*4882a593Smuzhiyun 		atomic_set(&iproc_priv.op_counts[i], 0);
4440*4882a593Smuzhiyun 		atomic_set(&iproc_priv.setkey_cnt[i], 0);
4441*4882a593Smuzhiyun 	}
4442*4882a593Smuzhiyun 	for (i = 0; i < CIPHER_ALG_LAST; i++)
4443*4882a593Smuzhiyun 		for (j = 0; j < CIPHER_MODE_LAST; j++)
4444*4882a593Smuzhiyun 			atomic_set(&iproc_priv.cipher_cnt[i][j], 0);
4445*4882a593Smuzhiyun 
4446*4882a593Smuzhiyun 	for (i = 0; i < HASH_ALG_LAST; i++) {
4447*4882a593Smuzhiyun 		atomic_set(&iproc_priv.hash_cnt[i], 0);
4448*4882a593Smuzhiyun 		atomic_set(&iproc_priv.hmac_cnt[i], 0);
4449*4882a593Smuzhiyun 	}
4450*4882a593Smuzhiyun 	for (i = 0; i < AEAD_TYPE_LAST; i++)
4451*4882a593Smuzhiyun 		atomic_set(&iproc_priv.aead_cnt[i], 0);
4452*4882a593Smuzhiyun 
4453*4882a593Smuzhiyun 	atomic_set(&iproc_priv.mb_no_spc, 0);
4454*4882a593Smuzhiyun 	atomic_set(&iproc_priv.mb_send_fail, 0);
4455*4882a593Smuzhiyun 	atomic_set(&iproc_priv.bad_icv, 0);
4456*4882a593Smuzhiyun }
4457*4882a593Smuzhiyun 
spu_register_skcipher(struct iproc_alg_s * driver_alg)4458*4882a593Smuzhiyun static int spu_register_skcipher(struct iproc_alg_s *driver_alg)
4459*4882a593Smuzhiyun {
4460*4882a593Smuzhiyun 	struct skcipher_alg *crypto = &driver_alg->alg.skcipher;
4461*4882a593Smuzhiyun 	int err;
4462*4882a593Smuzhiyun 
4463*4882a593Smuzhiyun 	crypto->base.cra_module = THIS_MODULE;
4464*4882a593Smuzhiyun 	crypto->base.cra_priority = cipher_pri;
4465*4882a593Smuzhiyun 	crypto->base.cra_alignmask = 0;
4466*4882a593Smuzhiyun 	crypto->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
4467*4882a593Smuzhiyun 	crypto->base.cra_flags = CRYPTO_ALG_ASYNC |
4468*4882a593Smuzhiyun 				 CRYPTO_ALG_ALLOCATES_MEMORY |
4469*4882a593Smuzhiyun 				 CRYPTO_ALG_KERN_DRIVER_ONLY;
4470*4882a593Smuzhiyun 
4471*4882a593Smuzhiyun 	crypto->init = skcipher_init_tfm;
4472*4882a593Smuzhiyun 	crypto->exit = skcipher_exit_tfm;
4473*4882a593Smuzhiyun 	crypto->setkey = skcipher_setkey;
4474*4882a593Smuzhiyun 	crypto->encrypt = skcipher_encrypt;
4475*4882a593Smuzhiyun 	crypto->decrypt = skcipher_decrypt;
4476*4882a593Smuzhiyun 
4477*4882a593Smuzhiyun 	err = crypto_register_skcipher(crypto);
4478*4882a593Smuzhiyun 	/* Mark alg as having been registered, if successful */
4479*4882a593Smuzhiyun 	if (err == 0)
4480*4882a593Smuzhiyun 		driver_alg->registered = true;
4481*4882a593Smuzhiyun 	pr_debug("  registered skcipher %s\n", crypto->base.cra_driver_name);
4482*4882a593Smuzhiyun 	return err;
4483*4882a593Smuzhiyun }
4484*4882a593Smuzhiyun 
spu_register_ahash(struct iproc_alg_s * driver_alg)4485*4882a593Smuzhiyun static int spu_register_ahash(struct iproc_alg_s *driver_alg)
4486*4882a593Smuzhiyun {
4487*4882a593Smuzhiyun 	struct spu_hw *spu = &iproc_priv.spu;
4488*4882a593Smuzhiyun 	struct ahash_alg *hash = &driver_alg->alg.hash;
4489*4882a593Smuzhiyun 	int err;
4490*4882a593Smuzhiyun 
4491*4882a593Smuzhiyun 	/* AES-XCBC is the only AES hash type currently supported on SPU-M */
4492*4882a593Smuzhiyun 	if ((driver_alg->auth_info.alg == HASH_ALG_AES) &&
4493*4882a593Smuzhiyun 	    (driver_alg->auth_info.mode != HASH_MODE_XCBC) &&
4494*4882a593Smuzhiyun 	    (spu->spu_type == SPU_TYPE_SPUM))
4495*4882a593Smuzhiyun 		return 0;
4496*4882a593Smuzhiyun 
4497*4882a593Smuzhiyun 	/* SHA3 algorithm variants are not registered for SPU-M or SPU2. */
4498*4882a593Smuzhiyun 	if ((driver_alg->auth_info.alg >= HASH_ALG_SHA3_224) &&
4499*4882a593Smuzhiyun 	    (spu->spu_subtype != SPU_SUBTYPE_SPU2_V2))
4500*4882a593Smuzhiyun 		return 0;
4501*4882a593Smuzhiyun 
4502*4882a593Smuzhiyun 	hash->halg.base.cra_module = THIS_MODULE;
4503*4882a593Smuzhiyun 	hash->halg.base.cra_priority = hash_pri;
4504*4882a593Smuzhiyun 	hash->halg.base.cra_alignmask = 0;
4505*4882a593Smuzhiyun 	hash->halg.base.cra_ctxsize = sizeof(struct iproc_ctx_s);
4506*4882a593Smuzhiyun 	hash->halg.base.cra_init = ahash_cra_init;
4507*4882a593Smuzhiyun 	hash->halg.base.cra_exit = generic_cra_exit;
4508*4882a593Smuzhiyun 	hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC |
4509*4882a593Smuzhiyun 				    CRYPTO_ALG_ALLOCATES_MEMORY;
4510*4882a593Smuzhiyun 	hash->halg.statesize = sizeof(struct spu_hash_export_s);
4511*4882a593Smuzhiyun 
4512*4882a593Smuzhiyun 	if (driver_alg->auth_info.mode != HASH_MODE_HMAC) {
4513*4882a593Smuzhiyun 		hash->init = ahash_init;
4514*4882a593Smuzhiyun 		hash->update = ahash_update;
4515*4882a593Smuzhiyun 		hash->final = ahash_final;
4516*4882a593Smuzhiyun 		hash->finup = ahash_finup;
4517*4882a593Smuzhiyun 		hash->digest = ahash_digest;
4518*4882a593Smuzhiyun 		if ((driver_alg->auth_info.alg == HASH_ALG_AES) &&
4519*4882a593Smuzhiyun 		    ((driver_alg->auth_info.mode == HASH_MODE_XCBC) ||
4520*4882a593Smuzhiyun 		    (driver_alg->auth_info.mode == HASH_MODE_CMAC))) {
4521*4882a593Smuzhiyun 			hash->setkey = ahash_setkey;
4522*4882a593Smuzhiyun 		}
4523*4882a593Smuzhiyun 	} else {
4524*4882a593Smuzhiyun 		hash->setkey = ahash_hmac_setkey;
4525*4882a593Smuzhiyun 		hash->init = ahash_hmac_init;
4526*4882a593Smuzhiyun 		hash->update = ahash_hmac_update;
4527*4882a593Smuzhiyun 		hash->final = ahash_hmac_final;
4528*4882a593Smuzhiyun 		hash->finup = ahash_hmac_finup;
4529*4882a593Smuzhiyun 		hash->digest = ahash_hmac_digest;
4530*4882a593Smuzhiyun 	}
4531*4882a593Smuzhiyun 	hash->export = ahash_export;
4532*4882a593Smuzhiyun 	hash->import = ahash_import;
4533*4882a593Smuzhiyun 
4534*4882a593Smuzhiyun 	err = crypto_register_ahash(hash);
4535*4882a593Smuzhiyun 	/* Mark alg as having been registered, if successful */
4536*4882a593Smuzhiyun 	if (err == 0)
4537*4882a593Smuzhiyun 		driver_alg->registered = true;
4538*4882a593Smuzhiyun 	pr_debug("  registered ahash %s\n",
4539*4882a593Smuzhiyun 		 hash->halg.base.cra_driver_name);
4540*4882a593Smuzhiyun 	return err;
4541*4882a593Smuzhiyun }
4542*4882a593Smuzhiyun 
spu_register_aead(struct iproc_alg_s * driver_alg)4543*4882a593Smuzhiyun static int spu_register_aead(struct iproc_alg_s *driver_alg)
4544*4882a593Smuzhiyun {
4545*4882a593Smuzhiyun 	struct aead_alg *aead = &driver_alg->alg.aead;
4546*4882a593Smuzhiyun 	int err;
4547*4882a593Smuzhiyun 
4548*4882a593Smuzhiyun 	aead->base.cra_module = THIS_MODULE;
4549*4882a593Smuzhiyun 	aead->base.cra_priority = aead_pri;
4550*4882a593Smuzhiyun 	aead->base.cra_alignmask = 0;
4551*4882a593Smuzhiyun 	aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
4552*4882a593Smuzhiyun 
4553*4882a593Smuzhiyun 	aead->base.cra_flags |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
4554*4882a593Smuzhiyun 	/* setkey set in alg initialization */
4555*4882a593Smuzhiyun 	aead->setauthsize = aead_setauthsize;
4556*4882a593Smuzhiyun 	aead->encrypt = aead_encrypt;
4557*4882a593Smuzhiyun 	aead->decrypt = aead_decrypt;
4558*4882a593Smuzhiyun 	aead->init = aead_cra_init;
4559*4882a593Smuzhiyun 	aead->exit = aead_cra_exit;
4560*4882a593Smuzhiyun 
4561*4882a593Smuzhiyun 	err = crypto_register_aead(aead);
4562*4882a593Smuzhiyun 	/* Mark alg as having been registered, if successful */
4563*4882a593Smuzhiyun 	if (err == 0)
4564*4882a593Smuzhiyun 		driver_alg->registered = true;
4565*4882a593Smuzhiyun 	pr_debug("  registered aead %s\n", aead->base.cra_driver_name);
4566*4882a593Smuzhiyun 	return err;
4567*4882a593Smuzhiyun }
4568*4882a593Smuzhiyun 
4569*4882a593Smuzhiyun /* register crypto algorithms the device supports */
spu_algs_register(struct device * dev)4570*4882a593Smuzhiyun static int spu_algs_register(struct device *dev)
4571*4882a593Smuzhiyun {
4572*4882a593Smuzhiyun 	int i, j;
4573*4882a593Smuzhiyun 	int err;
4574*4882a593Smuzhiyun 
4575*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4576*4882a593Smuzhiyun 		switch (driver_algs[i].type) {
4577*4882a593Smuzhiyun 		case CRYPTO_ALG_TYPE_SKCIPHER:
4578*4882a593Smuzhiyun 			err = spu_register_skcipher(&driver_algs[i]);
4579*4882a593Smuzhiyun 			break;
4580*4882a593Smuzhiyun 		case CRYPTO_ALG_TYPE_AHASH:
4581*4882a593Smuzhiyun 			err = spu_register_ahash(&driver_algs[i]);
4582*4882a593Smuzhiyun 			break;
4583*4882a593Smuzhiyun 		case CRYPTO_ALG_TYPE_AEAD:
4584*4882a593Smuzhiyun 			err = spu_register_aead(&driver_algs[i]);
4585*4882a593Smuzhiyun 			break;
4586*4882a593Smuzhiyun 		default:
4587*4882a593Smuzhiyun 			dev_err(dev,
4588*4882a593Smuzhiyun 				"iproc-crypto: unknown alg type: %d",
4589*4882a593Smuzhiyun 				driver_algs[i].type);
4590*4882a593Smuzhiyun 			err = -EINVAL;
4591*4882a593Smuzhiyun 		}
4592*4882a593Smuzhiyun 
4593*4882a593Smuzhiyun 		if (err) {
4594*4882a593Smuzhiyun 			dev_err(dev, "alg registration failed with error %d\n",
4595*4882a593Smuzhiyun 				err);
4596*4882a593Smuzhiyun 			goto err_algs;
4597*4882a593Smuzhiyun 		}
4598*4882a593Smuzhiyun 	}
4599*4882a593Smuzhiyun 
4600*4882a593Smuzhiyun 	return 0;
4601*4882a593Smuzhiyun 
4602*4882a593Smuzhiyun err_algs:
4603*4882a593Smuzhiyun 	for (j = 0; j < i; j++) {
4604*4882a593Smuzhiyun 		/* Skip any algorithm not registered */
4605*4882a593Smuzhiyun 		if (!driver_algs[j].registered)
4606*4882a593Smuzhiyun 			continue;
4607*4882a593Smuzhiyun 		switch (driver_algs[j].type) {
4608*4882a593Smuzhiyun 		case CRYPTO_ALG_TYPE_SKCIPHER:
4609*4882a593Smuzhiyun 			crypto_unregister_skcipher(&driver_algs[j].alg.skcipher);
4610*4882a593Smuzhiyun 			driver_algs[j].registered = false;
4611*4882a593Smuzhiyun 			break;
4612*4882a593Smuzhiyun 		case CRYPTO_ALG_TYPE_AHASH:
4613*4882a593Smuzhiyun 			crypto_unregister_ahash(&driver_algs[j].alg.hash);
4614*4882a593Smuzhiyun 			driver_algs[j].registered = false;
4615*4882a593Smuzhiyun 			break;
4616*4882a593Smuzhiyun 		case CRYPTO_ALG_TYPE_AEAD:
4617*4882a593Smuzhiyun 			crypto_unregister_aead(&driver_algs[j].alg.aead);
4618*4882a593Smuzhiyun 			driver_algs[j].registered = false;
4619*4882a593Smuzhiyun 			break;
4620*4882a593Smuzhiyun 		}
4621*4882a593Smuzhiyun 	}
4622*4882a593Smuzhiyun 	return err;
4623*4882a593Smuzhiyun }
4624*4882a593Smuzhiyun 
4625*4882a593Smuzhiyun /* ==================== Kernel Platform API ==================== */
4626*4882a593Smuzhiyun 
4627*4882a593Smuzhiyun static struct spu_type_subtype spum_ns2_types = {
4628*4882a593Smuzhiyun 	SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NS2
4629*4882a593Smuzhiyun };
4630*4882a593Smuzhiyun 
4631*4882a593Smuzhiyun static struct spu_type_subtype spum_nsp_types = {
4632*4882a593Smuzhiyun 	SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NSP
4633*4882a593Smuzhiyun };
4634*4882a593Smuzhiyun 
4635*4882a593Smuzhiyun static struct spu_type_subtype spu2_types = {
4636*4882a593Smuzhiyun 	SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V1
4637*4882a593Smuzhiyun };
4638*4882a593Smuzhiyun 
4639*4882a593Smuzhiyun static struct spu_type_subtype spu2_v2_types = {
4640*4882a593Smuzhiyun 	SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V2
4641*4882a593Smuzhiyun };
4642*4882a593Smuzhiyun 
4643*4882a593Smuzhiyun static const struct of_device_id bcm_spu_dt_ids[] = {
4644*4882a593Smuzhiyun 	{
4645*4882a593Smuzhiyun 		.compatible = "brcm,spum-crypto",
4646*4882a593Smuzhiyun 		.data = &spum_ns2_types,
4647*4882a593Smuzhiyun 	},
4648*4882a593Smuzhiyun 	{
4649*4882a593Smuzhiyun 		.compatible = "brcm,spum-nsp-crypto",
4650*4882a593Smuzhiyun 		.data = &spum_nsp_types,
4651*4882a593Smuzhiyun 	},
4652*4882a593Smuzhiyun 	{
4653*4882a593Smuzhiyun 		.compatible = "brcm,spu2-crypto",
4654*4882a593Smuzhiyun 		.data = &spu2_types,
4655*4882a593Smuzhiyun 	},
4656*4882a593Smuzhiyun 	{
4657*4882a593Smuzhiyun 		.compatible = "brcm,spu2-v2-crypto",
4658*4882a593Smuzhiyun 		.data = &spu2_v2_types,
4659*4882a593Smuzhiyun 	},
4660*4882a593Smuzhiyun 	{ /* sentinel */ }
4661*4882a593Smuzhiyun };
4662*4882a593Smuzhiyun 
4663*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, bcm_spu_dt_ids);
4664*4882a593Smuzhiyun 
spu_dt_read(struct platform_device * pdev)4665*4882a593Smuzhiyun static int spu_dt_read(struct platform_device *pdev)
4666*4882a593Smuzhiyun {
4667*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
4668*4882a593Smuzhiyun 	struct spu_hw *spu = &iproc_priv.spu;
4669*4882a593Smuzhiyun 	struct resource *spu_ctrl_regs;
4670*4882a593Smuzhiyun 	const struct spu_type_subtype *matched_spu_type;
4671*4882a593Smuzhiyun 	struct device_node *dn = pdev->dev.of_node;
4672*4882a593Smuzhiyun 	int err, i;
4673*4882a593Smuzhiyun 
4674*4882a593Smuzhiyun 	/* Count number of mailbox channels */
4675*4882a593Smuzhiyun 	spu->num_chan = of_count_phandle_with_args(dn, "mboxes", "#mbox-cells");
4676*4882a593Smuzhiyun 
4677*4882a593Smuzhiyun 	matched_spu_type = of_device_get_match_data(dev);
4678*4882a593Smuzhiyun 	if (!matched_spu_type) {
4679*4882a593Smuzhiyun 		dev_err(dev, "Failed to match device\n");
4680*4882a593Smuzhiyun 		return -ENODEV;
4681*4882a593Smuzhiyun 	}
4682*4882a593Smuzhiyun 
4683*4882a593Smuzhiyun 	spu->spu_type = matched_spu_type->type;
4684*4882a593Smuzhiyun 	spu->spu_subtype = matched_spu_type->subtype;
4685*4882a593Smuzhiyun 
4686*4882a593Smuzhiyun 	for (i = 0; (i < MAX_SPUS) && ((spu_ctrl_regs =
4687*4882a593Smuzhiyun 		platform_get_resource(pdev, IORESOURCE_MEM, i)) != NULL); i++) {
4688*4882a593Smuzhiyun 
4689*4882a593Smuzhiyun 		spu->reg_vbase[i] = devm_ioremap_resource(dev, spu_ctrl_regs);
4690*4882a593Smuzhiyun 		if (IS_ERR(spu->reg_vbase[i])) {
4691*4882a593Smuzhiyun 			err = PTR_ERR(spu->reg_vbase[i]);
4692*4882a593Smuzhiyun 			dev_err(dev, "Failed to map registers: %d\n",
4693*4882a593Smuzhiyun 				err);
4694*4882a593Smuzhiyun 			spu->reg_vbase[i] = NULL;
4695*4882a593Smuzhiyun 			return err;
4696*4882a593Smuzhiyun 		}
4697*4882a593Smuzhiyun 	}
4698*4882a593Smuzhiyun 	spu->num_spu = i;
4699*4882a593Smuzhiyun 	dev_dbg(dev, "Device has %d SPUs", spu->num_spu);
4700*4882a593Smuzhiyun 
4701*4882a593Smuzhiyun 	return 0;
4702*4882a593Smuzhiyun }
4703*4882a593Smuzhiyun 
bcm_spu_probe(struct platform_device * pdev)4704*4882a593Smuzhiyun static int bcm_spu_probe(struct platform_device *pdev)
4705*4882a593Smuzhiyun {
4706*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
4707*4882a593Smuzhiyun 	struct spu_hw *spu = &iproc_priv.spu;
4708*4882a593Smuzhiyun 	int err;
4709*4882a593Smuzhiyun 
4710*4882a593Smuzhiyun 	iproc_priv.pdev  = pdev;
4711*4882a593Smuzhiyun 	platform_set_drvdata(iproc_priv.pdev,
4712*4882a593Smuzhiyun 			     &iproc_priv);
4713*4882a593Smuzhiyun 
4714*4882a593Smuzhiyun 	err = spu_dt_read(pdev);
4715*4882a593Smuzhiyun 	if (err < 0)
4716*4882a593Smuzhiyun 		goto failure;
4717*4882a593Smuzhiyun 
4718*4882a593Smuzhiyun 	err = spu_mb_init(dev);
4719*4882a593Smuzhiyun 	if (err < 0)
4720*4882a593Smuzhiyun 		goto failure;
4721*4882a593Smuzhiyun 
4722*4882a593Smuzhiyun 	if (spu->spu_type == SPU_TYPE_SPUM)
4723*4882a593Smuzhiyun 		iproc_priv.bcm_hdr_len = 8;
4724*4882a593Smuzhiyun 	else if (spu->spu_type == SPU_TYPE_SPU2)
4725*4882a593Smuzhiyun 		iproc_priv.bcm_hdr_len = 0;
4726*4882a593Smuzhiyun 
4727*4882a593Smuzhiyun 	spu_functions_register(dev, spu->spu_type, spu->spu_subtype);
4728*4882a593Smuzhiyun 
4729*4882a593Smuzhiyun 	spu_counters_init();
4730*4882a593Smuzhiyun 
4731*4882a593Smuzhiyun 	spu_setup_debugfs();
4732*4882a593Smuzhiyun 
4733*4882a593Smuzhiyun 	err = spu_algs_register(dev);
4734*4882a593Smuzhiyun 	if (err < 0)
4735*4882a593Smuzhiyun 		goto fail_reg;
4736*4882a593Smuzhiyun 
4737*4882a593Smuzhiyun 	return 0;
4738*4882a593Smuzhiyun 
4739*4882a593Smuzhiyun fail_reg:
4740*4882a593Smuzhiyun 	spu_free_debugfs();
4741*4882a593Smuzhiyun failure:
4742*4882a593Smuzhiyun 	spu_mb_release(pdev);
4743*4882a593Smuzhiyun 	dev_err(dev, "%s failed with error %d.\n", __func__, err);
4744*4882a593Smuzhiyun 
4745*4882a593Smuzhiyun 	return err;
4746*4882a593Smuzhiyun }
4747*4882a593Smuzhiyun 
bcm_spu_remove(struct platform_device * pdev)4748*4882a593Smuzhiyun static int bcm_spu_remove(struct platform_device *pdev)
4749*4882a593Smuzhiyun {
4750*4882a593Smuzhiyun 	int i;
4751*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
4752*4882a593Smuzhiyun 	char *cdn;
4753*4882a593Smuzhiyun 
4754*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4755*4882a593Smuzhiyun 		/*
4756*4882a593Smuzhiyun 		 * Not all algorithms were registered, depending on whether
4757*4882a593Smuzhiyun 		 * hardware is SPU or SPU2.  So here we make sure to skip
4758*4882a593Smuzhiyun 		 * those algorithms that were not previously registered.
4759*4882a593Smuzhiyun 		 */
4760*4882a593Smuzhiyun 		if (!driver_algs[i].registered)
4761*4882a593Smuzhiyun 			continue;
4762*4882a593Smuzhiyun 
4763*4882a593Smuzhiyun 		switch (driver_algs[i].type) {
4764*4882a593Smuzhiyun 		case CRYPTO_ALG_TYPE_SKCIPHER:
4765*4882a593Smuzhiyun 			crypto_unregister_skcipher(&driver_algs[i].alg.skcipher);
4766*4882a593Smuzhiyun 			dev_dbg(dev, "  unregistered cipher %s\n",
4767*4882a593Smuzhiyun 				driver_algs[i].alg.skcipher.base.cra_driver_name);
4768*4882a593Smuzhiyun 			driver_algs[i].registered = false;
4769*4882a593Smuzhiyun 			break;
4770*4882a593Smuzhiyun 		case CRYPTO_ALG_TYPE_AHASH:
4771*4882a593Smuzhiyun 			crypto_unregister_ahash(&driver_algs[i].alg.hash);
4772*4882a593Smuzhiyun 			cdn = driver_algs[i].alg.hash.halg.base.cra_driver_name;
4773*4882a593Smuzhiyun 			dev_dbg(dev, "  unregistered hash %s\n", cdn);
4774*4882a593Smuzhiyun 			driver_algs[i].registered = false;
4775*4882a593Smuzhiyun 			break;
4776*4882a593Smuzhiyun 		case CRYPTO_ALG_TYPE_AEAD:
4777*4882a593Smuzhiyun 			crypto_unregister_aead(&driver_algs[i].alg.aead);
4778*4882a593Smuzhiyun 			dev_dbg(dev, "  unregistered aead %s\n",
4779*4882a593Smuzhiyun 				driver_algs[i].alg.aead.base.cra_driver_name);
4780*4882a593Smuzhiyun 			driver_algs[i].registered = false;
4781*4882a593Smuzhiyun 			break;
4782*4882a593Smuzhiyun 		}
4783*4882a593Smuzhiyun 	}
4784*4882a593Smuzhiyun 	spu_free_debugfs();
4785*4882a593Smuzhiyun 	spu_mb_release(pdev);
4786*4882a593Smuzhiyun 	return 0;
4787*4882a593Smuzhiyun }
4788*4882a593Smuzhiyun 
4789*4882a593Smuzhiyun /* ===== Kernel Module API ===== */
4790*4882a593Smuzhiyun 
4791*4882a593Smuzhiyun static struct platform_driver bcm_spu_pdriver = {
4792*4882a593Smuzhiyun 	.driver = {
4793*4882a593Smuzhiyun 		   .name = "brcm-spu-crypto",
4794*4882a593Smuzhiyun 		   .of_match_table = of_match_ptr(bcm_spu_dt_ids),
4795*4882a593Smuzhiyun 		   },
4796*4882a593Smuzhiyun 	.probe = bcm_spu_probe,
4797*4882a593Smuzhiyun 	.remove = bcm_spu_remove,
4798*4882a593Smuzhiyun };
4799*4882a593Smuzhiyun module_platform_driver(bcm_spu_pdriver);
4800*4882a593Smuzhiyun 
4801*4882a593Smuzhiyun MODULE_AUTHOR("Rob Rice <rob.rice@broadcom.com>");
4802*4882a593Smuzhiyun MODULE_DESCRIPTION("Broadcom symmetric crypto offload driver");
4803*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
4804