xref: /OK3568_Linux_fs/kernel/drivers/crypto/caam/caampkc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * caam - Freescale FSL CAAM support for Public Key Cryptography
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright 2016 Freescale Semiconductor, Inc.
6*4882a593Smuzhiyun  * Copyright 2018-2019 NXP
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
9*4882a593Smuzhiyun  * all the desired key parameters, input and output pointers.
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun #include "compat.h"
12*4882a593Smuzhiyun #include "regs.h"
13*4882a593Smuzhiyun #include "intern.h"
14*4882a593Smuzhiyun #include "jr.h"
15*4882a593Smuzhiyun #include "error.h"
16*4882a593Smuzhiyun #include "desc_constr.h"
17*4882a593Smuzhiyun #include "sg_sw_sec4.h"
18*4882a593Smuzhiyun #include "caampkc.h"
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #define DESC_RSA_PUB_LEN	(2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB)
21*4882a593Smuzhiyun #define DESC_RSA_PRIV_F1_LEN	(2 * CAAM_CMD_SZ + \
22*4882a593Smuzhiyun 				 SIZEOF_RSA_PRIV_F1_PDB)
23*4882a593Smuzhiyun #define DESC_RSA_PRIV_F2_LEN	(2 * CAAM_CMD_SZ + \
24*4882a593Smuzhiyun 				 SIZEOF_RSA_PRIV_F2_PDB)
25*4882a593Smuzhiyun #define DESC_RSA_PRIV_F3_LEN	(2 * CAAM_CMD_SZ + \
26*4882a593Smuzhiyun 				 SIZEOF_RSA_PRIV_F3_PDB)
27*4882a593Smuzhiyun #define CAAM_RSA_MAX_INPUT_SIZE	512 /* for a 4096-bit modulus */
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun /* buffer filled with zeros, used for padding */
30*4882a593Smuzhiyun static u8 *zero_buffer;
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun  * variable used to avoid double free of resources in case
34*4882a593Smuzhiyun  * algorithm registration was unsuccessful
35*4882a593Smuzhiyun  */
36*4882a593Smuzhiyun static bool init_done;
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun struct caam_akcipher_alg {
39*4882a593Smuzhiyun 	struct akcipher_alg akcipher;
40*4882a593Smuzhiyun 	bool registered;
41*4882a593Smuzhiyun };
42*4882a593Smuzhiyun 
rsa_io_unmap(struct device * dev,struct rsa_edesc * edesc,struct akcipher_request * req)43*4882a593Smuzhiyun static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
44*4882a593Smuzhiyun 			 struct akcipher_request *req)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
49*4882a593Smuzhiyun 	dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE);
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	if (edesc->sec4_sg_bytes)
52*4882a593Smuzhiyun 		dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
53*4882a593Smuzhiyun 				 DMA_TO_DEVICE);
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun 
rsa_pub_unmap(struct device * dev,struct rsa_edesc * edesc,struct akcipher_request * req)56*4882a593Smuzhiyun static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
57*4882a593Smuzhiyun 			  struct akcipher_request *req)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
60*4882a593Smuzhiyun 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
61*4882a593Smuzhiyun 	struct caam_rsa_key *key = &ctx->key;
62*4882a593Smuzhiyun 	struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
65*4882a593Smuzhiyun 	dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE);
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun 
rsa_priv_f1_unmap(struct device * dev,struct rsa_edesc * edesc,struct akcipher_request * req)68*4882a593Smuzhiyun static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
69*4882a593Smuzhiyun 			      struct akcipher_request *req)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
72*4882a593Smuzhiyun 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
73*4882a593Smuzhiyun 	struct caam_rsa_key *key = &ctx->key;
74*4882a593Smuzhiyun 	struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
77*4882a593Smuzhiyun 	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun 
rsa_priv_f2_unmap(struct device * dev,struct rsa_edesc * edesc,struct akcipher_request * req)80*4882a593Smuzhiyun static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
81*4882a593Smuzhiyun 			      struct akcipher_request *req)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
84*4882a593Smuzhiyun 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
85*4882a593Smuzhiyun 	struct caam_rsa_key *key = &ctx->key;
86*4882a593Smuzhiyun 	struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
87*4882a593Smuzhiyun 	size_t p_sz = key->p_sz;
88*4882a593Smuzhiyun 	size_t q_sz = key->q_sz;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
91*4882a593Smuzhiyun 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
92*4882a593Smuzhiyun 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
93*4882a593Smuzhiyun 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
94*4882a593Smuzhiyun 	dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun 
rsa_priv_f3_unmap(struct device * dev,struct rsa_edesc * edesc,struct akcipher_request * req)97*4882a593Smuzhiyun static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
98*4882a593Smuzhiyun 			      struct akcipher_request *req)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
101*4882a593Smuzhiyun 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
102*4882a593Smuzhiyun 	struct caam_rsa_key *key = &ctx->key;
103*4882a593Smuzhiyun 	struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
104*4882a593Smuzhiyun 	size_t p_sz = key->p_sz;
105*4882a593Smuzhiyun 	size_t q_sz = key->q_sz;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
108*4882a593Smuzhiyun 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
109*4882a593Smuzhiyun 	dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
110*4882a593Smuzhiyun 	dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
111*4882a593Smuzhiyun 	dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
112*4882a593Smuzhiyun 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
113*4882a593Smuzhiyun 	dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun /* RSA Job Completion handler */
rsa_pub_done(struct device * dev,u32 * desc,u32 err,void * context)117*4882a593Smuzhiyun static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun 	struct akcipher_request *req = context;
120*4882a593Smuzhiyun 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
121*4882a593Smuzhiyun 	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
122*4882a593Smuzhiyun 	struct rsa_edesc *edesc;
123*4882a593Smuzhiyun 	int ecode = 0;
124*4882a593Smuzhiyun 	bool has_bklog;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	if (err)
127*4882a593Smuzhiyun 		ecode = caam_jr_strstatus(dev, err);
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	edesc = req_ctx->edesc;
130*4882a593Smuzhiyun 	has_bklog = edesc->bklog;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	rsa_pub_unmap(dev, edesc, req);
133*4882a593Smuzhiyun 	rsa_io_unmap(dev, edesc, req);
134*4882a593Smuzhiyun 	kfree(edesc);
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	/*
137*4882a593Smuzhiyun 	 * If no backlog flag, the completion of the request is done
138*4882a593Smuzhiyun 	 * by CAAM, not crypto engine.
139*4882a593Smuzhiyun 	 */
140*4882a593Smuzhiyun 	if (!has_bklog)
141*4882a593Smuzhiyun 		akcipher_request_complete(req, ecode);
142*4882a593Smuzhiyun 	else
143*4882a593Smuzhiyun 		crypto_finalize_akcipher_request(jrp->engine, req, ecode);
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun 
rsa_priv_f_done(struct device * dev,u32 * desc,u32 err,void * context)146*4882a593Smuzhiyun static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err,
147*4882a593Smuzhiyun 			    void *context)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun 	struct akcipher_request *req = context;
150*4882a593Smuzhiyun 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
151*4882a593Smuzhiyun 	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
152*4882a593Smuzhiyun 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
153*4882a593Smuzhiyun 	struct caam_rsa_key *key = &ctx->key;
154*4882a593Smuzhiyun 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
155*4882a593Smuzhiyun 	struct rsa_edesc *edesc;
156*4882a593Smuzhiyun 	int ecode = 0;
157*4882a593Smuzhiyun 	bool has_bklog;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	if (err)
160*4882a593Smuzhiyun 		ecode = caam_jr_strstatus(dev, err);
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	edesc = req_ctx->edesc;
163*4882a593Smuzhiyun 	has_bklog = edesc->bklog;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	switch (key->priv_form) {
166*4882a593Smuzhiyun 	case FORM1:
167*4882a593Smuzhiyun 		rsa_priv_f1_unmap(dev, edesc, req);
168*4882a593Smuzhiyun 		break;
169*4882a593Smuzhiyun 	case FORM2:
170*4882a593Smuzhiyun 		rsa_priv_f2_unmap(dev, edesc, req);
171*4882a593Smuzhiyun 		break;
172*4882a593Smuzhiyun 	case FORM3:
173*4882a593Smuzhiyun 		rsa_priv_f3_unmap(dev, edesc, req);
174*4882a593Smuzhiyun 	}
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	rsa_io_unmap(dev, edesc, req);
177*4882a593Smuzhiyun 	kfree(edesc);
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	/*
180*4882a593Smuzhiyun 	 * If no backlog flag, the completion of the request is done
181*4882a593Smuzhiyun 	 * by CAAM, not crypto engine.
182*4882a593Smuzhiyun 	 */
183*4882a593Smuzhiyun 	if (!has_bklog)
184*4882a593Smuzhiyun 		akcipher_request_complete(req, ecode);
185*4882a593Smuzhiyun 	else
186*4882a593Smuzhiyun 		crypto_finalize_akcipher_request(jrp->engine, req, ecode);
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun /**
190*4882a593Smuzhiyun  * Count leading zeros, need it to strip, from a given scatterlist
191*4882a593Smuzhiyun  *
192*4882a593Smuzhiyun  * @sgl   : scatterlist to count zeros from
193*4882a593Smuzhiyun  * @nbytes: number of zeros, in bytes, to strip
194*4882a593Smuzhiyun  * @flags : operation flags
195*4882a593Smuzhiyun  */
caam_rsa_count_leading_zeros(struct scatterlist * sgl,unsigned int nbytes,unsigned int flags)196*4882a593Smuzhiyun static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
197*4882a593Smuzhiyun 					unsigned int nbytes,
198*4882a593Smuzhiyun 					unsigned int flags)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun 	struct sg_mapping_iter miter;
201*4882a593Smuzhiyun 	int lzeros, ents;
202*4882a593Smuzhiyun 	unsigned int len;
203*4882a593Smuzhiyun 	unsigned int tbytes = nbytes;
204*4882a593Smuzhiyun 	const u8 *buff;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	ents = sg_nents_for_len(sgl, nbytes);
207*4882a593Smuzhiyun 	if (ents < 0)
208*4882a593Smuzhiyun 		return ents;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	lzeros = 0;
213*4882a593Smuzhiyun 	len = 0;
214*4882a593Smuzhiyun 	while (nbytes > 0) {
215*4882a593Smuzhiyun 		/* do not strip more than given bytes */
216*4882a593Smuzhiyun 		while (len && !*buff && lzeros < nbytes) {
217*4882a593Smuzhiyun 			lzeros++;
218*4882a593Smuzhiyun 			len--;
219*4882a593Smuzhiyun 			buff++;
220*4882a593Smuzhiyun 		}
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 		if (len && *buff)
223*4882a593Smuzhiyun 			break;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 		sg_miter_next(&miter);
226*4882a593Smuzhiyun 		buff = miter.addr;
227*4882a593Smuzhiyun 		len = miter.length;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 		nbytes -= lzeros;
230*4882a593Smuzhiyun 		lzeros = 0;
231*4882a593Smuzhiyun 	}
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	miter.consumed = lzeros;
234*4882a593Smuzhiyun 	sg_miter_stop(&miter);
235*4882a593Smuzhiyun 	nbytes -= lzeros;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	return tbytes - nbytes;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun 
rsa_edesc_alloc(struct akcipher_request * req,size_t desclen)240*4882a593Smuzhiyun static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
241*4882a593Smuzhiyun 					 size_t desclen)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
244*4882a593Smuzhiyun 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
245*4882a593Smuzhiyun 	struct device *dev = ctx->dev;
246*4882a593Smuzhiyun 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
247*4882a593Smuzhiyun 	struct caam_rsa_key *key = &ctx->key;
248*4882a593Smuzhiyun 	struct rsa_edesc *edesc;
249*4882a593Smuzhiyun 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
250*4882a593Smuzhiyun 		       GFP_KERNEL : GFP_ATOMIC;
251*4882a593Smuzhiyun 	int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
252*4882a593Smuzhiyun 	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
253*4882a593Smuzhiyun 	int src_nents, dst_nents;
254*4882a593Smuzhiyun 	int mapped_src_nents, mapped_dst_nents;
255*4882a593Smuzhiyun 	unsigned int diff_size = 0;
256*4882a593Smuzhiyun 	int lzeros;
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	if (req->src_len > key->n_sz) {
259*4882a593Smuzhiyun 		/*
260*4882a593Smuzhiyun 		 * strip leading zeros and
261*4882a593Smuzhiyun 		 * return the number of zeros to skip
262*4882a593Smuzhiyun 		 */
263*4882a593Smuzhiyun 		lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len -
264*4882a593Smuzhiyun 						      key->n_sz, sg_flags);
265*4882a593Smuzhiyun 		if (lzeros < 0)
266*4882a593Smuzhiyun 			return ERR_PTR(lzeros);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 		req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src,
269*4882a593Smuzhiyun 						      lzeros);
270*4882a593Smuzhiyun 		req_ctx->fixup_src_len = req->src_len - lzeros;
271*4882a593Smuzhiyun 	} else {
272*4882a593Smuzhiyun 		/*
273*4882a593Smuzhiyun 		 * input src is less then n key modulus,
274*4882a593Smuzhiyun 		 * so there will be zero padding
275*4882a593Smuzhiyun 		 */
276*4882a593Smuzhiyun 		diff_size = key->n_sz - req->src_len;
277*4882a593Smuzhiyun 		req_ctx->fixup_src = req->src;
278*4882a593Smuzhiyun 		req_ctx->fixup_src_len = req->src_len;
279*4882a593Smuzhiyun 	}
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	src_nents = sg_nents_for_len(req_ctx->fixup_src,
282*4882a593Smuzhiyun 				     req_ctx->fixup_src_len);
283*4882a593Smuzhiyun 	dst_nents = sg_nents_for_len(req->dst, req->dst_len);
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	mapped_src_nents = dma_map_sg(dev, req_ctx->fixup_src, src_nents,
286*4882a593Smuzhiyun 				      DMA_TO_DEVICE);
287*4882a593Smuzhiyun 	if (unlikely(!mapped_src_nents)) {
288*4882a593Smuzhiyun 		dev_err(dev, "unable to map source\n");
289*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
290*4882a593Smuzhiyun 	}
291*4882a593Smuzhiyun 	mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
292*4882a593Smuzhiyun 				      DMA_FROM_DEVICE);
293*4882a593Smuzhiyun 	if (unlikely(!mapped_dst_nents)) {
294*4882a593Smuzhiyun 		dev_err(dev, "unable to map destination\n");
295*4882a593Smuzhiyun 		goto src_fail;
296*4882a593Smuzhiyun 	}
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	if (!diff_size && mapped_src_nents == 1)
299*4882a593Smuzhiyun 		sec4_sg_len = 0; /* no need for an input hw s/g table */
300*4882a593Smuzhiyun 	else
301*4882a593Smuzhiyun 		sec4_sg_len = mapped_src_nents + !!diff_size;
302*4882a593Smuzhiyun 	sec4_sg_index = sec4_sg_len;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	if (mapped_dst_nents > 1)
305*4882a593Smuzhiyun 		sec4_sg_len += pad_sg_nents(mapped_dst_nents);
306*4882a593Smuzhiyun 	else
307*4882a593Smuzhiyun 		sec4_sg_len = pad_sg_nents(sec4_sg_len);
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	/* allocate space for base edesc, hw desc commands and link tables */
312*4882a593Smuzhiyun 	edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
313*4882a593Smuzhiyun 			GFP_DMA | flags);
314*4882a593Smuzhiyun 	if (!edesc)
315*4882a593Smuzhiyun 		goto dst_fail;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
318*4882a593Smuzhiyun 	if (diff_size)
319*4882a593Smuzhiyun 		dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size,
320*4882a593Smuzhiyun 				   0);
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	if (sec4_sg_index)
323*4882a593Smuzhiyun 		sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len,
324*4882a593Smuzhiyun 				   edesc->sec4_sg + !!diff_size, 0);
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	if (mapped_dst_nents > 1)
327*4882a593Smuzhiyun 		sg_to_sec4_sg_last(req->dst, req->dst_len,
328*4882a593Smuzhiyun 				   edesc->sec4_sg + sec4_sg_index, 0);
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	/* Save nents for later use in Job Descriptor */
331*4882a593Smuzhiyun 	edesc->src_nents = src_nents;
332*4882a593Smuzhiyun 	edesc->dst_nents = dst_nents;
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	req_ctx->edesc = edesc;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	if (!sec4_sg_bytes)
337*4882a593Smuzhiyun 		return edesc;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	edesc->mapped_src_nents = mapped_src_nents;
340*4882a593Smuzhiyun 	edesc->mapped_dst_nents = mapped_dst_nents;
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
343*4882a593Smuzhiyun 					    sec4_sg_bytes, DMA_TO_DEVICE);
344*4882a593Smuzhiyun 	if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
345*4882a593Smuzhiyun 		dev_err(dev, "unable to map S/G table\n");
346*4882a593Smuzhiyun 		goto sec4_sg_fail;
347*4882a593Smuzhiyun 	}
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	edesc->sec4_sg_bytes = sec4_sg_bytes;
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__) ": ",
352*4882a593Smuzhiyun 			     DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
353*4882a593Smuzhiyun 			     edesc->sec4_sg_bytes, 1);
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	return edesc;
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun sec4_sg_fail:
358*4882a593Smuzhiyun 	kfree(edesc);
359*4882a593Smuzhiyun dst_fail:
360*4882a593Smuzhiyun 	dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
361*4882a593Smuzhiyun src_fail:
362*4882a593Smuzhiyun 	dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
363*4882a593Smuzhiyun 	return ERR_PTR(-ENOMEM);
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun 
akcipher_do_one_req(struct crypto_engine * engine,void * areq)366*4882a593Smuzhiyun static int akcipher_do_one_req(struct crypto_engine *engine, void *areq)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun 	struct akcipher_request *req = container_of(areq,
369*4882a593Smuzhiyun 						    struct akcipher_request,
370*4882a593Smuzhiyun 						    base);
371*4882a593Smuzhiyun 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
372*4882a593Smuzhiyun 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
373*4882a593Smuzhiyun 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
374*4882a593Smuzhiyun 	struct device *jrdev = ctx->dev;
375*4882a593Smuzhiyun 	u32 *desc = req_ctx->edesc->hw_desc;
376*4882a593Smuzhiyun 	int ret;
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	req_ctx->edesc->bklog = true;
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	ret = caam_jr_enqueue(jrdev, desc, req_ctx->akcipher_op_done, req);
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	if (ret != -EINPROGRESS) {
383*4882a593Smuzhiyun 		rsa_pub_unmap(jrdev, req_ctx->edesc, req);
384*4882a593Smuzhiyun 		rsa_io_unmap(jrdev, req_ctx->edesc, req);
385*4882a593Smuzhiyun 		kfree(req_ctx->edesc);
386*4882a593Smuzhiyun 	} else {
387*4882a593Smuzhiyun 		ret = 0;
388*4882a593Smuzhiyun 	}
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	return ret;
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun 
set_rsa_pub_pdb(struct akcipher_request * req,struct rsa_edesc * edesc)393*4882a593Smuzhiyun static int set_rsa_pub_pdb(struct akcipher_request *req,
394*4882a593Smuzhiyun 			   struct rsa_edesc *edesc)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
397*4882a593Smuzhiyun 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
398*4882a593Smuzhiyun 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
399*4882a593Smuzhiyun 	struct caam_rsa_key *key = &ctx->key;
400*4882a593Smuzhiyun 	struct device *dev = ctx->dev;
401*4882a593Smuzhiyun 	struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
402*4882a593Smuzhiyun 	int sec4_sg_index = 0;
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
405*4882a593Smuzhiyun 	if (dma_mapping_error(dev, pdb->n_dma)) {
406*4882a593Smuzhiyun 		dev_err(dev, "Unable to map RSA modulus memory\n");
407*4882a593Smuzhiyun 		return -ENOMEM;
408*4882a593Smuzhiyun 	}
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
411*4882a593Smuzhiyun 	if (dma_mapping_error(dev, pdb->e_dma)) {
412*4882a593Smuzhiyun 		dev_err(dev, "Unable to map RSA public exponent memory\n");
413*4882a593Smuzhiyun 		dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
414*4882a593Smuzhiyun 		return -ENOMEM;
415*4882a593Smuzhiyun 	}
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	if (edesc->mapped_src_nents > 1) {
418*4882a593Smuzhiyun 		pdb->sgf |= RSA_PDB_SGF_F;
419*4882a593Smuzhiyun 		pdb->f_dma = edesc->sec4_sg_dma;
420*4882a593Smuzhiyun 		sec4_sg_index += edesc->mapped_src_nents;
421*4882a593Smuzhiyun 	} else {
422*4882a593Smuzhiyun 		pdb->f_dma = sg_dma_address(req_ctx->fixup_src);
423*4882a593Smuzhiyun 	}
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	if (edesc->mapped_dst_nents > 1) {
426*4882a593Smuzhiyun 		pdb->sgf |= RSA_PDB_SGF_G;
427*4882a593Smuzhiyun 		pdb->g_dma = edesc->sec4_sg_dma +
428*4882a593Smuzhiyun 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
429*4882a593Smuzhiyun 	} else {
430*4882a593Smuzhiyun 		pdb->g_dma = sg_dma_address(req->dst);
431*4882a593Smuzhiyun 	}
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
434*4882a593Smuzhiyun 	pdb->f_len = req_ctx->fixup_src_len;
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	return 0;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun 
set_rsa_priv_f1_pdb(struct akcipher_request * req,struct rsa_edesc * edesc)439*4882a593Smuzhiyun static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
440*4882a593Smuzhiyun 			       struct rsa_edesc *edesc)
441*4882a593Smuzhiyun {
442*4882a593Smuzhiyun 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
443*4882a593Smuzhiyun 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
444*4882a593Smuzhiyun 	struct caam_rsa_key *key = &ctx->key;
445*4882a593Smuzhiyun 	struct device *dev = ctx->dev;
446*4882a593Smuzhiyun 	struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
447*4882a593Smuzhiyun 	int sec4_sg_index = 0;
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
450*4882a593Smuzhiyun 	if (dma_mapping_error(dev, pdb->n_dma)) {
451*4882a593Smuzhiyun 		dev_err(dev, "Unable to map modulus memory\n");
452*4882a593Smuzhiyun 		return -ENOMEM;
453*4882a593Smuzhiyun 	}
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
456*4882a593Smuzhiyun 	if (dma_mapping_error(dev, pdb->d_dma)) {
457*4882a593Smuzhiyun 		dev_err(dev, "Unable to map RSA private exponent memory\n");
458*4882a593Smuzhiyun 		dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
459*4882a593Smuzhiyun 		return -ENOMEM;
460*4882a593Smuzhiyun 	}
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	if (edesc->mapped_src_nents > 1) {
463*4882a593Smuzhiyun 		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
464*4882a593Smuzhiyun 		pdb->g_dma = edesc->sec4_sg_dma;
465*4882a593Smuzhiyun 		sec4_sg_index += edesc->mapped_src_nents;
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	} else {
468*4882a593Smuzhiyun 		struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 		pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
471*4882a593Smuzhiyun 	}
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	if (edesc->mapped_dst_nents > 1) {
474*4882a593Smuzhiyun 		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
475*4882a593Smuzhiyun 		pdb->f_dma = edesc->sec4_sg_dma +
476*4882a593Smuzhiyun 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
477*4882a593Smuzhiyun 	} else {
478*4882a593Smuzhiyun 		pdb->f_dma = sg_dma_address(req->dst);
479*4882a593Smuzhiyun 	}
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	return 0;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun 
set_rsa_priv_f2_pdb(struct akcipher_request * req,struct rsa_edesc * edesc)486*4882a593Smuzhiyun static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
487*4882a593Smuzhiyun 			       struct rsa_edesc *edesc)
488*4882a593Smuzhiyun {
489*4882a593Smuzhiyun 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
490*4882a593Smuzhiyun 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
491*4882a593Smuzhiyun 	struct caam_rsa_key *key = &ctx->key;
492*4882a593Smuzhiyun 	struct device *dev = ctx->dev;
493*4882a593Smuzhiyun 	struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
494*4882a593Smuzhiyun 	int sec4_sg_index = 0;
495*4882a593Smuzhiyun 	size_t p_sz = key->p_sz;
496*4882a593Smuzhiyun 	size_t q_sz = key->q_sz;
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 	pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
499*4882a593Smuzhiyun 	if (dma_mapping_error(dev, pdb->d_dma)) {
500*4882a593Smuzhiyun 		dev_err(dev, "Unable to map RSA private exponent memory\n");
501*4882a593Smuzhiyun 		return -ENOMEM;
502*4882a593Smuzhiyun 	}
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
505*4882a593Smuzhiyun 	if (dma_mapping_error(dev, pdb->p_dma)) {
506*4882a593Smuzhiyun 		dev_err(dev, "Unable to map RSA prime factor p memory\n");
507*4882a593Smuzhiyun 		goto unmap_d;
508*4882a593Smuzhiyun 	}
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
511*4882a593Smuzhiyun 	if (dma_mapping_error(dev, pdb->q_dma)) {
512*4882a593Smuzhiyun 		dev_err(dev, "Unable to map RSA prime factor q memory\n");
513*4882a593Smuzhiyun 		goto unmap_p;
514*4882a593Smuzhiyun 	}
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
517*4882a593Smuzhiyun 	if (dma_mapping_error(dev, pdb->tmp1_dma)) {
518*4882a593Smuzhiyun 		dev_err(dev, "Unable to map RSA tmp1 memory\n");
519*4882a593Smuzhiyun 		goto unmap_q;
520*4882a593Smuzhiyun 	}
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
523*4882a593Smuzhiyun 	if (dma_mapping_error(dev, pdb->tmp2_dma)) {
524*4882a593Smuzhiyun 		dev_err(dev, "Unable to map RSA tmp2 memory\n");
525*4882a593Smuzhiyun 		goto unmap_tmp1;
526*4882a593Smuzhiyun 	}
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	if (edesc->mapped_src_nents > 1) {
529*4882a593Smuzhiyun 		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
530*4882a593Smuzhiyun 		pdb->g_dma = edesc->sec4_sg_dma;
531*4882a593Smuzhiyun 		sec4_sg_index += edesc->mapped_src_nents;
532*4882a593Smuzhiyun 	} else {
533*4882a593Smuzhiyun 		struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 		pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
536*4882a593Smuzhiyun 	}
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	if (edesc->mapped_dst_nents > 1) {
539*4882a593Smuzhiyun 		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
540*4882a593Smuzhiyun 		pdb->f_dma = edesc->sec4_sg_dma +
541*4882a593Smuzhiyun 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
542*4882a593Smuzhiyun 	} else {
543*4882a593Smuzhiyun 		pdb->f_dma = sg_dma_address(req->dst);
544*4882a593Smuzhiyun 	}
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
547*4882a593Smuzhiyun 	pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	return 0;
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun unmap_tmp1:
552*4882a593Smuzhiyun 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
553*4882a593Smuzhiyun unmap_q:
554*4882a593Smuzhiyun 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
555*4882a593Smuzhiyun unmap_p:
556*4882a593Smuzhiyun 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
557*4882a593Smuzhiyun unmap_d:
558*4882a593Smuzhiyun 	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun 	return -ENOMEM;
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun 
set_rsa_priv_f3_pdb(struct akcipher_request * req,struct rsa_edesc * edesc)563*4882a593Smuzhiyun static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
564*4882a593Smuzhiyun 			       struct rsa_edesc *edesc)
565*4882a593Smuzhiyun {
566*4882a593Smuzhiyun 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
567*4882a593Smuzhiyun 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
568*4882a593Smuzhiyun 	struct caam_rsa_key *key = &ctx->key;
569*4882a593Smuzhiyun 	struct device *dev = ctx->dev;
570*4882a593Smuzhiyun 	struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
571*4882a593Smuzhiyun 	int sec4_sg_index = 0;
572*4882a593Smuzhiyun 	size_t p_sz = key->p_sz;
573*4882a593Smuzhiyun 	size_t q_sz = key->q_sz;
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
576*4882a593Smuzhiyun 	if (dma_mapping_error(dev, pdb->p_dma)) {
577*4882a593Smuzhiyun 		dev_err(dev, "Unable to map RSA prime factor p memory\n");
578*4882a593Smuzhiyun 		return -ENOMEM;
579*4882a593Smuzhiyun 	}
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
582*4882a593Smuzhiyun 	if (dma_mapping_error(dev, pdb->q_dma)) {
583*4882a593Smuzhiyun 		dev_err(dev, "Unable to map RSA prime factor q memory\n");
584*4882a593Smuzhiyun 		goto unmap_p;
585*4882a593Smuzhiyun 	}
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
588*4882a593Smuzhiyun 	if (dma_mapping_error(dev, pdb->dp_dma)) {
589*4882a593Smuzhiyun 		dev_err(dev, "Unable to map RSA exponent dp memory\n");
590*4882a593Smuzhiyun 		goto unmap_q;
591*4882a593Smuzhiyun 	}
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
594*4882a593Smuzhiyun 	if (dma_mapping_error(dev, pdb->dq_dma)) {
595*4882a593Smuzhiyun 		dev_err(dev, "Unable to map RSA exponent dq memory\n");
596*4882a593Smuzhiyun 		goto unmap_dp;
597*4882a593Smuzhiyun 	}
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
600*4882a593Smuzhiyun 	if (dma_mapping_error(dev, pdb->c_dma)) {
601*4882a593Smuzhiyun 		dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
602*4882a593Smuzhiyun 		goto unmap_dq;
603*4882a593Smuzhiyun 	}
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
606*4882a593Smuzhiyun 	if (dma_mapping_error(dev, pdb->tmp1_dma)) {
607*4882a593Smuzhiyun 		dev_err(dev, "Unable to map RSA tmp1 memory\n");
608*4882a593Smuzhiyun 		goto unmap_qinv;
609*4882a593Smuzhiyun 	}
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
612*4882a593Smuzhiyun 	if (dma_mapping_error(dev, pdb->tmp2_dma)) {
613*4882a593Smuzhiyun 		dev_err(dev, "Unable to map RSA tmp2 memory\n");
614*4882a593Smuzhiyun 		goto unmap_tmp1;
615*4882a593Smuzhiyun 	}
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	if (edesc->mapped_src_nents > 1) {
618*4882a593Smuzhiyun 		pdb->sgf |= RSA_PRIV_PDB_SGF_G;
619*4882a593Smuzhiyun 		pdb->g_dma = edesc->sec4_sg_dma;
620*4882a593Smuzhiyun 		sec4_sg_index += edesc->mapped_src_nents;
621*4882a593Smuzhiyun 	} else {
622*4882a593Smuzhiyun 		struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 		pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
625*4882a593Smuzhiyun 	}
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	if (edesc->mapped_dst_nents > 1) {
628*4882a593Smuzhiyun 		pdb->sgf |= RSA_PRIV_PDB_SGF_F;
629*4882a593Smuzhiyun 		pdb->f_dma = edesc->sec4_sg_dma +
630*4882a593Smuzhiyun 			     sec4_sg_index * sizeof(struct sec4_sg_entry);
631*4882a593Smuzhiyun 	} else {
632*4882a593Smuzhiyun 		pdb->f_dma = sg_dma_address(req->dst);
633*4882a593Smuzhiyun 	}
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	pdb->sgf |= key->n_sz;
636*4882a593Smuzhiyun 	pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 	return 0;
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun unmap_tmp1:
641*4882a593Smuzhiyun 	dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
642*4882a593Smuzhiyun unmap_qinv:
643*4882a593Smuzhiyun 	dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
644*4882a593Smuzhiyun unmap_dq:
645*4882a593Smuzhiyun 	dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
646*4882a593Smuzhiyun unmap_dp:
647*4882a593Smuzhiyun 	dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
648*4882a593Smuzhiyun unmap_q:
649*4882a593Smuzhiyun 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
650*4882a593Smuzhiyun unmap_p:
651*4882a593Smuzhiyun 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 	return -ENOMEM;
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun 
akcipher_enqueue_req(struct device * jrdev,void (* cbk)(struct device * jrdev,u32 * desc,u32 err,void * context),struct akcipher_request * req)656*4882a593Smuzhiyun static int akcipher_enqueue_req(struct device *jrdev,
657*4882a593Smuzhiyun 				void (*cbk)(struct device *jrdev, u32 *desc,
658*4882a593Smuzhiyun 					    u32 err, void *context),
659*4882a593Smuzhiyun 				struct akcipher_request *req)
660*4882a593Smuzhiyun {
661*4882a593Smuzhiyun 	struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
662*4882a593Smuzhiyun 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
663*4882a593Smuzhiyun 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
664*4882a593Smuzhiyun 	struct caam_rsa_key *key = &ctx->key;
665*4882a593Smuzhiyun 	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
666*4882a593Smuzhiyun 	struct rsa_edesc *edesc = req_ctx->edesc;
667*4882a593Smuzhiyun 	u32 *desc = edesc->hw_desc;
668*4882a593Smuzhiyun 	int ret;
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 	req_ctx->akcipher_op_done = cbk;
671*4882a593Smuzhiyun 	/*
672*4882a593Smuzhiyun 	 * Only the backlog request are sent to crypto-engine since the others
673*4882a593Smuzhiyun 	 * can be handled by CAAM, if free, especially since JR has up to 1024
674*4882a593Smuzhiyun 	 * entries (more than the 10 entries from crypto-engine).
675*4882a593Smuzhiyun 	 */
676*4882a593Smuzhiyun 	if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
677*4882a593Smuzhiyun 		ret = crypto_transfer_akcipher_request_to_engine(jrpriv->engine,
678*4882a593Smuzhiyun 								 req);
679*4882a593Smuzhiyun 	else
680*4882a593Smuzhiyun 		ret = caam_jr_enqueue(jrdev, desc, cbk, req);
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 	if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
683*4882a593Smuzhiyun 		switch (key->priv_form) {
684*4882a593Smuzhiyun 		case FORM1:
685*4882a593Smuzhiyun 			rsa_priv_f1_unmap(jrdev, edesc, req);
686*4882a593Smuzhiyun 			break;
687*4882a593Smuzhiyun 		case FORM2:
688*4882a593Smuzhiyun 			rsa_priv_f2_unmap(jrdev, edesc, req);
689*4882a593Smuzhiyun 			break;
690*4882a593Smuzhiyun 		case FORM3:
691*4882a593Smuzhiyun 			rsa_priv_f3_unmap(jrdev, edesc, req);
692*4882a593Smuzhiyun 			break;
693*4882a593Smuzhiyun 		default:
694*4882a593Smuzhiyun 			rsa_pub_unmap(jrdev, edesc, req);
695*4882a593Smuzhiyun 		}
696*4882a593Smuzhiyun 		rsa_io_unmap(jrdev, edesc, req);
697*4882a593Smuzhiyun 		kfree(edesc);
698*4882a593Smuzhiyun 	}
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	return ret;
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun 
caam_rsa_enc(struct akcipher_request * req)703*4882a593Smuzhiyun static int caam_rsa_enc(struct akcipher_request *req)
704*4882a593Smuzhiyun {
705*4882a593Smuzhiyun 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
706*4882a593Smuzhiyun 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
707*4882a593Smuzhiyun 	struct caam_rsa_key *key = &ctx->key;
708*4882a593Smuzhiyun 	struct device *jrdev = ctx->dev;
709*4882a593Smuzhiyun 	struct rsa_edesc *edesc;
710*4882a593Smuzhiyun 	int ret;
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 	if (unlikely(!key->n || !key->e))
713*4882a593Smuzhiyun 		return -EINVAL;
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 	if (req->dst_len < key->n_sz) {
716*4882a593Smuzhiyun 		req->dst_len = key->n_sz;
717*4882a593Smuzhiyun 		dev_err(jrdev, "Output buffer length less than parameter n\n");
718*4882a593Smuzhiyun 		return -EOVERFLOW;
719*4882a593Smuzhiyun 	}
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	/* Allocate extended descriptor */
722*4882a593Smuzhiyun 	edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
723*4882a593Smuzhiyun 	if (IS_ERR(edesc))
724*4882a593Smuzhiyun 		return PTR_ERR(edesc);
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun 	/* Set RSA Encrypt Protocol Data Block */
727*4882a593Smuzhiyun 	ret = set_rsa_pub_pdb(req, edesc);
728*4882a593Smuzhiyun 	if (ret)
729*4882a593Smuzhiyun 		goto init_fail;
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	/* Initialize Job Descriptor */
732*4882a593Smuzhiyun 	init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 	return akcipher_enqueue_req(jrdev, rsa_pub_done, req);
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun init_fail:
737*4882a593Smuzhiyun 	rsa_io_unmap(jrdev, edesc, req);
738*4882a593Smuzhiyun 	kfree(edesc);
739*4882a593Smuzhiyun 	return ret;
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun 
caam_rsa_dec_priv_f1(struct akcipher_request * req)742*4882a593Smuzhiyun static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
743*4882a593Smuzhiyun {
744*4882a593Smuzhiyun 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
745*4882a593Smuzhiyun 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
746*4882a593Smuzhiyun 	struct device *jrdev = ctx->dev;
747*4882a593Smuzhiyun 	struct rsa_edesc *edesc;
748*4882a593Smuzhiyun 	int ret;
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun 	/* Allocate extended descriptor */
751*4882a593Smuzhiyun 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
752*4882a593Smuzhiyun 	if (IS_ERR(edesc))
753*4882a593Smuzhiyun 		return PTR_ERR(edesc);
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun 	/* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
756*4882a593Smuzhiyun 	ret = set_rsa_priv_f1_pdb(req, edesc);
757*4882a593Smuzhiyun 	if (ret)
758*4882a593Smuzhiyun 		goto init_fail;
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 	/* Initialize Job Descriptor */
761*4882a593Smuzhiyun 	init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun 	return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun init_fail:
766*4882a593Smuzhiyun 	rsa_io_unmap(jrdev, edesc, req);
767*4882a593Smuzhiyun 	kfree(edesc);
768*4882a593Smuzhiyun 	return ret;
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun 
caam_rsa_dec_priv_f2(struct akcipher_request * req)771*4882a593Smuzhiyun static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
772*4882a593Smuzhiyun {
773*4882a593Smuzhiyun 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
774*4882a593Smuzhiyun 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
775*4882a593Smuzhiyun 	struct device *jrdev = ctx->dev;
776*4882a593Smuzhiyun 	struct rsa_edesc *edesc;
777*4882a593Smuzhiyun 	int ret;
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 	/* Allocate extended descriptor */
780*4882a593Smuzhiyun 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
781*4882a593Smuzhiyun 	if (IS_ERR(edesc))
782*4882a593Smuzhiyun 		return PTR_ERR(edesc);
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun 	/* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
785*4882a593Smuzhiyun 	ret = set_rsa_priv_f2_pdb(req, edesc);
786*4882a593Smuzhiyun 	if (ret)
787*4882a593Smuzhiyun 		goto init_fail;
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	/* Initialize Job Descriptor */
790*4882a593Smuzhiyun 	init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun 	return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun init_fail:
795*4882a593Smuzhiyun 	rsa_io_unmap(jrdev, edesc, req);
796*4882a593Smuzhiyun 	kfree(edesc);
797*4882a593Smuzhiyun 	return ret;
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun 
caam_rsa_dec_priv_f3(struct akcipher_request * req)800*4882a593Smuzhiyun static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
801*4882a593Smuzhiyun {
802*4882a593Smuzhiyun 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
803*4882a593Smuzhiyun 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
804*4882a593Smuzhiyun 	struct device *jrdev = ctx->dev;
805*4882a593Smuzhiyun 	struct rsa_edesc *edesc;
806*4882a593Smuzhiyun 	int ret;
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 	/* Allocate extended descriptor */
809*4882a593Smuzhiyun 	edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
810*4882a593Smuzhiyun 	if (IS_ERR(edesc))
811*4882a593Smuzhiyun 		return PTR_ERR(edesc);
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun 	/* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
814*4882a593Smuzhiyun 	ret = set_rsa_priv_f3_pdb(req, edesc);
815*4882a593Smuzhiyun 	if (ret)
816*4882a593Smuzhiyun 		goto init_fail;
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 	/* Initialize Job Descriptor */
819*4882a593Smuzhiyun 	init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
820*4882a593Smuzhiyun 
821*4882a593Smuzhiyun 	return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun init_fail:
824*4882a593Smuzhiyun 	rsa_io_unmap(jrdev, edesc, req);
825*4882a593Smuzhiyun 	kfree(edesc);
826*4882a593Smuzhiyun 	return ret;
827*4882a593Smuzhiyun }
828*4882a593Smuzhiyun 
caam_rsa_dec(struct akcipher_request * req)829*4882a593Smuzhiyun static int caam_rsa_dec(struct akcipher_request *req)
830*4882a593Smuzhiyun {
831*4882a593Smuzhiyun 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
832*4882a593Smuzhiyun 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
833*4882a593Smuzhiyun 	struct caam_rsa_key *key = &ctx->key;
834*4882a593Smuzhiyun 	int ret;
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun 	if (unlikely(!key->n || !key->d))
837*4882a593Smuzhiyun 		return -EINVAL;
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun 	if (req->dst_len < key->n_sz) {
840*4882a593Smuzhiyun 		req->dst_len = key->n_sz;
841*4882a593Smuzhiyun 		dev_err(ctx->dev, "Output buffer length less than parameter n\n");
842*4882a593Smuzhiyun 		return -EOVERFLOW;
843*4882a593Smuzhiyun 	}
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 	if (key->priv_form == FORM3)
846*4882a593Smuzhiyun 		ret = caam_rsa_dec_priv_f3(req);
847*4882a593Smuzhiyun 	else if (key->priv_form == FORM2)
848*4882a593Smuzhiyun 		ret = caam_rsa_dec_priv_f2(req);
849*4882a593Smuzhiyun 	else
850*4882a593Smuzhiyun 		ret = caam_rsa_dec_priv_f1(req);
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 	return ret;
853*4882a593Smuzhiyun }
854*4882a593Smuzhiyun 
caam_rsa_free_key(struct caam_rsa_key * key)855*4882a593Smuzhiyun static void caam_rsa_free_key(struct caam_rsa_key *key)
856*4882a593Smuzhiyun {
857*4882a593Smuzhiyun 	kfree_sensitive(key->d);
858*4882a593Smuzhiyun 	kfree_sensitive(key->p);
859*4882a593Smuzhiyun 	kfree_sensitive(key->q);
860*4882a593Smuzhiyun 	kfree_sensitive(key->dp);
861*4882a593Smuzhiyun 	kfree_sensitive(key->dq);
862*4882a593Smuzhiyun 	kfree_sensitive(key->qinv);
863*4882a593Smuzhiyun 	kfree_sensitive(key->tmp1);
864*4882a593Smuzhiyun 	kfree_sensitive(key->tmp2);
865*4882a593Smuzhiyun 	kfree(key->e);
866*4882a593Smuzhiyun 	kfree(key->n);
867*4882a593Smuzhiyun 	memset(key, 0, sizeof(*key));
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun 
caam_rsa_drop_leading_zeros(const u8 ** ptr,size_t * nbytes)870*4882a593Smuzhiyun static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
871*4882a593Smuzhiyun {
872*4882a593Smuzhiyun 	while (!**ptr && *nbytes) {
873*4882a593Smuzhiyun 		(*ptr)++;
874*4882a593Smuzhiyun 		(*nbytes)--;
875*4882a593Smuzhiyun 	}
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun /**
879*4882a593Smuzhiyun  * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
880*4882a593Smuzhiyun  * dP, dQ and qInv could decode to less than corresponding p, q length, as the
881*4882a593Smuzhiyun  * BER-encoding requires that the minimum number of bytes be used to encode the
882*4882a593Smuzhiyun  * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
883*4882a593Smuzhiyun  * length.
884*4882a593Smuzhiyun  *
885*4882a593Smuzhiyun  * @ptr   : pointer to {dP, dQ, qInv} CRT member
886*4882a593Smuzhiyun  * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
887*4882a593Smuzhiyun  * @dstlen: length in bytes of corresponding p or q prime factor
888*4882a593Smuzhiyun  */
caam_read_rsa_crt(const u8 * ptr,size_t nbytes,size_t dstlen)889*4882a593Smuzhiyun static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
890*4882a593Smuzhiyun {
891*4882a593Smuzhiyun 	u8 *dst;
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun 	caam_rsa_drop_leading_zeros(&ptr, &nbytes);
894*4882a593Smuzhiyun 	if (!nbytes)
895*4882a593Smuzhiyun 		return NULL;
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 	dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
898*4882a593Smuzhiyun 	if (!dst)
899*4882a593Smuzhiyun 		return NULL;
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 	memcpy(dst + (dstlen - nbytes), ptr, nbytes);
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun 	return dst;
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun /**
907*4882a593Smuzhiyun  * caam_read_raw_data - Read a raw byte stream as a positive integer.
908*4882a593Smuzhiyun  * The function skips buffer's leading zeros, copies the remained data
909*4882a593Smuzhiyun  * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
910*4882a593Smuzhiyun  * the address of the new buffer.
911*4882a593Smuzhiyun  *
912*4882a593Smuzhiyun  * @buf   : The data to read
913*4882a593Smuzhiyun  * @nbytes: The amount of data to read
914*4882a593Smuzhiyun  */
caam_read_raw_data(const u8 * buf,size_t * nbytes)915*4882a593Smuzhiyun static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
916*4882a593Smuzhiyun {
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun 	caam_rsa_drop_leading_zeros(&buf, nbytes);
919*4882a593Smuzhiyun 	if (!*nbytes)
920*4882a593Smuzhiyun 		return NULL;
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun 	return kmemdup(buf, *nbytes, GFP_DMA | GFP_KERNEL);
923*4882a593Smuzhiyun }
924*4882a593Smuzhiyun 
caam_rsa_check_key_length(unsigned int len)925*4882a593Smuzhiyun static int caam_rsa_check_key_length(unsigned int len)
926*4882a593Smuzhiyun {
927*4882a593Smuzhiyun 	if (len > 4096)
928*4882a593Smuzhiyun 		return -EINVAL;
929*4882a593Smuzhiyun 	return 0;
930*4882a593Smuzhiyun }
931*4882a593Smuzhiyun 
caam_rsa_set_pub_key(struct crypto_akcipher * tfm,const void * key,unsigned int keylen)932*4882a593Smuzhiyun static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
933*4882a593Smuzhiyun 				unsigned int keylen)
934*4882a593Smuzhiyun {
935*4882a593Smuzhiyun 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
936*4882a593Smuzhiyun 	struct rsa_key raw_key = {NULL};
937*4882a593Smuzhiyun 	struct caam_rsa_key *rsa_key = &ctx->key;
938*4882a593Smuzhiyun 	int ret;
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun 	/* Free the old RSA key if any */
941*4882a593Smuzhiyun 	caam_rsa_free_key(rsa_key);
942*4882a593Smuzhiyun 
943*4882a593Smuzhiyun 	ret = rsa_parse_pub_key(&raw_key, key, keylen);
944*4882a593Smuzhiyun 	if (ret)
945*4882a593Smuzhiyun 		return ret;
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 	/* Copy key in DMA zone */
948*4882a593Smuzhiyun 	rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
949*4882a593Smuzhiyun 	if (!rsa_key->e)
950*4882a593Smuzhiyun 		goto err;
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun 	/*
953*4882a593Smuzhiyun 	 * Skip leading zeros and copy the positive integer to a buffer
954*4882a593Smuzhiyun 	 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
955*4882a593Smuzhiyun 	 * expects a positive integer for the RSA modulus and uses its length as
956*4882a593Smuzhiyun 	 * decryption output length.
957*4882a593Smuzhiyun 	 */
958*4882a593Smuzhiyun 	rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
959*4882a593Smuzhiyun 	if (!rsa_key->n)
960*4882a593Smuzhiyun 		goto err;
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun 	if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
963*4882a593Smuzhiyun 		caam_rsa_free_key(rsa_key);
964*4882a593Smuzhiyun 		return -EINVAL;
965*4882a593Smuzhiyun 	}
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 	rsa_key->e_sz = raw_key.e_sz;
968*4882a593Smuzhiyun 	rsa_key->n_sz = raw_key.n_sz;
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun 	return 0;
971*4882a593Smuzhiyun err:
972*4882a593Smuzhiyun 	caam_rsa_free_key(rsa_key);
973*4882a593Smuzhiyun 	return -ENOMEM;
974*4882a593Smuzhiyun }
975*4882a593Smuzhiyun 
caam_rsa_set_priv_key_form(struct caam_rsa_ctx * ctx,struct rsa_key * raw_key)976*4882a593Smuzhiyun static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
977*4882a593Smuzhiyun 				       struct rsa_key *raw_key)
978*4882a593Smuzhiyun {
979*4882a593Smuzhiyun 	struct caam_rsa_key *rsa_key = &ctx->key;
980*4882a593Smuzhiyun 	size_t p_sz = raw_key->p_sz;
981*4882a593Smuzhiyun 	size_t q_sz = raw_key->q_sz;
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun 	rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
984*4882a593Smuzhiyun 	if (!rsa_key->p)
985*4882a593Smuzhiyun 		return;
986*4882a593Smuzhiyun 	rsa_key->p_sz = p_sz;
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun 	rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
989*4882a593Smuzhiyun 	if (!rsa_key->q)
990*4882a593Smuzhiyun 		goto free_p;
991*4882a593Smuzhiyun 	rsa_key->q_sz = q_sz;
992*4882a593Smuzhiyun 
993*4882a593Smuzhiyun 	rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
994*4882a593Smuzhiyun 	if (!rsa_key->tmp1)
995*4882a593Smuzhiyun 		goto free_q;
996*4882a593Smuzhiyun 
997*4882a593Smuzhiyun 	rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
998*4882a593Smuzhiyun 	if (!rsa_key->tmp2)
999*4882a593Smuzhiyun 		goto free_tmp1;
1000*4882a593Smuzhiyun 
1001*4882a593Smuzhiyun 	rsa_key->priv_form = FORM2;
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun 	rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
1004*4882a593Smuzhiyun 	if (!rsa_key->dp)
1005*4882a593Smuzhiyun 		goto free_tmp2;
1006*4882a593Smuzhiyun 
1007*4882a593Smuzhiyun 	rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
1008*4882a593Smuzhiyun 	if (!rsa_key->dq)
1009*4882a593Smuzhiyun 		goto free_dp;
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun 	rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
1012*4882a593Smuzhiyun 					  q_sz);
1013*4882a593Smuzhiyun 	if (!rsa_key->qinv)
1014*4882a593Smuzhiyun 		goto free_dq;
1015*4882a593Smuzhiyun 
1016*4882a593Smuzhiyun 	rsa_key->priv_form = FORM3;
1017*4882a593Smuzhiyun 
1018*4882a593Smuzhiyun 	return;
1019*4882a593Smuzhiyun 
1020*4882a593Smuzhiyun free_dq:
1021*4882a593Smuzhiyun 	kfree_sensitive(rsa_key->dq);
1022*4882a593Smuzhiyun free_dp:
1023*4882a593Smuzhiyun 	kfree_sensitive(rsa_key->dp);
1024*4882a593Smuzhiyun free_tmp2:
1025*4882a593Smuzhiyun 	kfree_sensitive(rsa_key->tmp2);
1026*4882a593Smuzhiyun free_tmp1:
1027*4882a593Smuzhiyun 	kfree_sensitive(rsa_key->tmp1);
1028*4882a593Smuzhiyun free_q:
1029*4882a593Smuzhiyun 	kfree_sensitive(rsa_key->q);
1030*4882a593Smuzhiyun free_p:
1031*4882a593Smuzhiyun 	kfree_sensitive(rsa_key->p);
1032*4882a593Smuzhiyun }
1033*4882a593Smuzhiyun 
caam_rsa_set_priv_key(struct crypto_akcipher * tfm,const void * key,unsigned int keylen)1034*4882a593Smuzhiyun static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
1035*4882a593Smuzhiyun 				 unsigned int keylen)
1036*4882a593Smuzhiyun {
1037*4882a593Smuzhiyun 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1038*4882a593Smuzhiyun 	struct rsa_key raw_key = {NULL};
1039*4882a593Smuzhiyun 	struct caam_rsa_key *rsa_key = &ctx->key;
1040*4882a593Smuzhiyun 	int ret;
1041*4882a593Smuzhiyun 
1042*4882a593Smuzhiyun 	/* Free the old RSA key if any */
1043*4882a593Smuzhiyun 	caam_rsa_free_key(rsa_key);
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 	ret = rsa_parse_priv_key(&raw_key, key, keylen);
1046*4882a593Smuzhiyun 	if (ret)
1047*4882a593Smuzhiyun 		return ret;
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun 	/* Copy key in DMA zone */
1050*4882a593Smuzhiyun 	rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_DMA | GFP_KERNEL);
1051*4882a593Smuzhiyun 	if (!rsa_key->d)
1052*4882a593Smuzhiyun 		goto err;
1053*4882a593Smuzhiyun 
1054*4882a593Smuzhiyun 	rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
1055*4882a593Smuzhiyun 	if (!rsa_key->e)
1056*4882a593Smuzhiyun 		goto err;
1057*4882a593Smuzhiyun 
1058*4882a593Smuzhiyun 	/*
1059*4882a593Smuzhiyun 	 * Skip leading zeros and copy the positive integer to a buffer
1060*4882a593Smuzhiyun 	 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
1061*4882a593Smuzhiyun 	 * expects a positive integer for the RSA modulus and uses its length as
1062*4882a593Smuzhiyun 	 * decryption output length.
1063*4882a593Smuzhiyun 	 */
1064*4882a593Smuzhiyun 	rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
1065*4882a593Smuzhiyun 	if (!rsa_key->n)
1066*4882a593Smuzhiyun 		goto err;
1067*4882a593Smuzhiyun 
1068*4882a593Smuzhiyun 	if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
1069*4882a593Smuzhiyun 		caam_rsa_free_key(rsa_key);
1070*4882a593Smuzhiyun 		return -EINVAL;
1071*4882a593Smuzhiyun 	}
1072*4882a593Smuzhiyun 
1073*4882a593Smuzhiyun 	rsa_key->d_sz = raw_key.d_sz;
1074*4882a593Smuzhiyun 	rsa_key->e_sz = raw_key.e_sz;
1075*4882a593Smuzhiyun 	rsa_key->n_sz = raw_key.n_sz;
1076*4882a593Smuzhiyun 
1077*4882a593Smuzhiyun 	caam_rsa_set_priv_key_form(ctx, &raw_key);
1078*4882a593Smuzhiyun 
1079*4882a593Smuzhiyun 	return 0;
1080*4882a593Smuzhiyun 
1081*4882a593Smuzhiyun err:
1082*4882a593Smuzhiyun 	caam_rsa_free_key(rsa_key);
1083*4882a593Smuzhiyun 	return -ENOMEM;
1084*4882a593Smuzhiyun }
1085*4882a593Smuzhiyun 
caam_rsa_max_size(struct crypto_akcipher * tfm)1086*4882a593Smuzhiyun static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm)
1087*4882a593Smuzhiyun {
1088*4882a593Smuzhiyun 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1089*4882a593Smuzhiyun 
1090*4882a593Smuzhiyun 	return ctx->key.n_sz;
1091*4882a593Smuzhiyun }
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun /* Per session pkc's driver context creation function */
caam_rsa_init_tfm(struct crypto_akcipher * tfm)1094*4882a593Smuzhiyun static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
1095*4882a593Smuzhiyun {
1096*4882a593Smuzhiyun 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1097*4882a593Smuzhiyun 
1098*4882a593Smuzhiyun 	ctx->dev = caam_jr_alloc();
1099*4882a593Smuzhiyun 
1100*4882a593Smuzhiyun 	if (IS_ERR(ctx->dev)) {
1101*4882a593Smuzhiyun 		pr_err("Job Ring Device allocation for transform failed\n");
1102*4882a593Smuzhiyun 		return PTR_ERR(ctx->dev);
1103*4882a593Smuzhiyun 	}
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun 	ctx->padding_dma = dma_map_single(ctx->dev, zero_buffer,
1106*4882a593Smuzhiyun 					  CAAM_RSA_MAX_INPUT_SIZE - 1,
1107*4882a593Smuzhiyun 					  DMA_TO_DEVICE);
1108*4882a593Smuzhiyun 	if (dma_mapping_error(ctx->dev, ctx->padding_dma)) {
1109*4882a593Smuzhiyun 		dev_err(ctx->dev, "unable to map padding\n");
1110*4882a593Smuzhiyun 		caam_jr_free(ctx->dev);
1111*4882a593Smuzhiyun 		return -ENOMEM;
1112*4882a593Smuzhiyun 	}
1113*4882a593Smuzhiyun 
1114*4882a593Smuzhiyun 	ctx->enginectx.op.do_one_request = akcipher_do_one_req;
1115*4882a593Smuzhiyun 
1116*4882a593Smuzhiyun 	return 0;
1117*4882a593Smuzhiyun }
1118*4882a593Smuzhiyun 
1119*4882a593Smuzhiyun /* Per session pkc's driver context cleanup function */
caam_rsa_exit_tfm(struct crypto_akcipher * tfm)1120*4882a593Smuzhiyun static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
1121*4882a593Smuzhiyun {
1122*4882a593Smuzhiyun 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1123*4882a593Smuzhiyun 	struct caam_rsa_key *key = &ctx->key;
1124*4882a593Smuzhiyun 
1125*4882a593Smuzhiyun 	dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE -
1126*4882a593Smuzhiyun 			 1, DMA_TO_DEVICE);
1127*4882a593Smuzhiyun 	caam_rsa_free_key(key);
1128*4882a593Smuzhiyun 	caam_jr_free(ctx->dev);
1129*4882a593Smuzhiyun }
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun static struct caam_akcipher_alg caam_rsa = {
1132*4882a593Smuzhiyun 	.akcipher = {
1133*4882a593Smuzhiyun 		.encrypt = caam_rsa_enc,
1134*4882a593Smuzhiyun 		.decrypt = caam_rsa_dec,
1135*4882a593Smuzhiyun 		.set_pub_key = caam_rsa_set_pub_key,
1136*4882a593Smuzhiyun 		.set_priv_key = caam_rsa_set_priv_key,
1137*4882a593Smuzhiyun 		.max_size = caam_rsa_max_size,
1138*4882a593Smuzhiyun 		.init = caam_rsa_init_tfm,
1139*4882a593Smuzhiyun 		.exit = caam_rsa_exit_tfm,
1140*4882a593Smuzhiyun 		.reqsize = sizeof(struct caam_rsa_req_ctx),
1141*4882a593Smuzhiyun 		.base = {
1142*4882a593Smuzhiyun 			.cra_name = "rsa",
1143*4882a593Smuzhiyun 			.cra_driver_name = "rsa-caam",
1144*4882a593Smuzhiyun 			.cra_priority = 3000,
1145*4882a593Smuzhiyun 			.cra_module = THIS_MODULE,
1146*4882a593Smuzhiyun 			.cra_ctxsize = sizeof(struct caam_rsa_ctx),
1147*4882a593Smuzhiyun 		},
1148*4882a593Smuzhiyun 	}
1149*4882a593Smuzhiyun };
1150*4882a593Smuzhiyun 
1151*4882a593Smuzhiyun /* Public Key Cryptography module initialization handler */
caam_pkc_init(struct device * ctrldev)1152*4882a593Smuzhiyun int caam_pkc_init(struct device *ctrldev)
1153*4882a593Smuzhiyun {
1154*4882a593Smuzhiyun 	struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1155*4882a593Smuzhiyun 	u32 pk_inst, pkha;
1156*4882a593Smuzhiyun 	int err;
1157*4882a593Smuzhiyun 	init_done = false;
1158*4882a593Smuzhiyun 
1159*4882a593Smuzhiyun 	/* Determine public key hardware accelerator presence. */
1160*4882a593Smuzhiyun 	if (priv->era < 10) {
1161*4882a593Smuzhiyun 		pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
1162*4882a593Smuzhiyun 			   CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
1163*4882a593Smuzhiyun 	} else {
1164*4882a593Smuzhiyun 		pkha = rd_reg32(&priv->ctrl->vreg.pkha);
1165*4882a593Smuzhiyun 		pk_inst = pkha & CHA_VER_NUM_MASK;
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun 		/*
1168*4882a593Smuzhiyun 		 * Newer CAAMs support partially disabled functionality. If this is the
1169*4882a593Smuzhiyun 		 * case, the number is non-zero, but this bit is set to indicate that
1170*4882a593Smuzhiyun 		 * no encryption or decryption is supported. Only signing and verifying
1171*4882a593Smuzhiyun 		 * is supported.
1172*4882a593Smuzhiyun 		 */
1173*4882a593Smuzhiyun 		if (pkha & CHA_VER_MISC_PKHA_NO_CRYPT)
1174*4882a593Smuzhiyun 			pk_inst = 0;
1175*4882a593Smuzhiyun 	}
1176*4882a593Smuzhiyun 
1177*4882a593Smuzhiyun 	/* Do not register algorithms if PKHA is not present. */
1178*4882a593Smuzhiyun 	if (!pk_inst)
1179*4882a593Smuzhiyun 		return 0;
1180*4882a593Smuzhiyun 
1181*4882a593Smuzhiyun 	/* allocate zero buffer, used for padding input */
1182*4882a593Smuzhiyun 	zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_DMA |
1183*4882a593Smuzhiyun 			      GFP_KERNEL);
1184*4882a593Smuzhiyun 	if (!zero_buffer)
1185*4882a593Smuzhiyun 		return -ENOMEM;
1186*4882a593Smuzhiyun 
1187*4882a593Smuzhiyun 	err = crypto_register_akcipher(&caam_rsa.akcipher);
1188*4882a593Smuzhiyun 
1189*4882a593Smuzhiyun 	if (err) {
1190*4882a593Smuzhiyun 		kfree(zero_buffer);
1191*4882a593Smuzhiyun 		dev_warn(ctrldev, "%s alg registration failed\n",
1192*4882a593Smuzhiyun 			 caam_rsa.akcipher.base.cra_driver_name);
1193*4882a593Smuzhiyun 	} else {
1194*4882a593Smuzhiyun 		init_done = true;
1195*4882a593Smuzhiyun 		caam_rsa.registered = true;
1196*4882a593Smuzhiyun 		dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
1197*4882a593Smuzhiyun 	}
1198*4882a593Smuzhiyun 
1199*4882a593Smuzhiyun 	return err;
1200*4882a593Smuzhiyun }
1201*4882a593Smuzhiyun 
caam_pkc_exit(void)1202*4882a593Smuzhiyun void caam_pkc_exit(void)
1203*4882a593Smuzhiyun {
1204*4882a593Smuzhiyun 	if (!init_done)
1205*4882a593Smuzhiyun 		return;
1206*4882a593Smuzhiyun 
1207*4882a593Smuzhiyun 	if (caam_rsa.registered)
1208*4882a593Smuzhiyun 		crypto_unregister_akcipher(&caam_rsa.akcipher);
1209*4882a593Smuzhiyun 
1210*4882a593Smuzhiyun 	kfree(zero_buffer);
1211*4882a593Smuzhiyun }
1212