xref: /OK3568_Linux_fs/kernel/drivers/crypto/caam/caamrng.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * caam - Freescale FSL CAAM support for hw_random
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright 2011 Freescale Semiconductor, Inc.
6*4882a593Smuzhiyun  * Copyright 2018-2019 NXP
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Based on caamalg.c crypto API driver.
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/hw_random.h>
13*4882a593Smuzhiyun #include <linux/completion.h>
14*4882a593Smuzhiyun #include <linux/atomic.h>
15*4882a593Smuzhiyun #include <linux/kfifo.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include "compat.h"
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #include "regs.h"
20*4882a593Smuzhiyun #include "intern.h"
21*4882a593Smuzhiyun #include "desc_constr.h"
22*4882a593Smuzhiyun #include "jr.h"
23*4882a593Smuzhiyun #include "error.h"
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #define CAAM_RNG_MAX_FIFO_STORE_SIZE	16
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun /*
28*4882a593Smuzhiyun  * Length of used descriptors, see caam_init_desc()
29*4882a593Smuzhiyun  */
30*4882a593Smuzhiyun #define CAAM_RNG_DESC_LEN (CAAM_CMD_SZ +				\
31*4882a593Smuzhiyun 			   CAAM_CMD_SZ +				\
32*4882a593Smuzhiyun 			   CAAM_CMD_SZ + CAAM_PTR_SZ_MAX)
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun /* rng per-device context */
35*4882a593Smuzhiyun struct caam_rng_ctx {
36*4882a593Smuzhiyun 	struct hwrng rng;
37*4882a593Smuzhiyun 	struct device *jrdev;
38*4882a593Smuzhiyun 	struct device *ctrldev;
39*4882a593Smuzhiyun 	void *desc_async;
40*4882a593Smuzhiyun 	void *desc_sync;
41*4882a593Smuzhiyun 	struct work_struct worker;
42*4882a593Smuzhiyun 	struct kfifo fifo;
43*4882a593Smuzhiyun };
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun struct caam_rng_job_ctx {
46*4882a593Smuzhiyun 	struct completion *done;
47*4882a593Smuzhiyun 	int *err;
48*4882a593Smuzhiyun };
49*4882a593Smuzhiyun 
to_caam_rng_ctx(struct hwrng * r)50*4882a593Smuzhiyun static struct caam_rng_ctx *to_caam_rng_ctx(struct hwrng *r)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun 	return (struct caam_rng_ctx *)r->priv;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun 
caam_rng_done(struct device * jrdev,u32 * desc,u32 err,void * context)55*4882a593Smuzhiyun static void caam_rng_done(struct device *jrdev, u32 *desc, u32 err,
56*4882a593Smuzhiyun 			  void *context)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun 	struct caam_rng_job_ctx *jctx = context;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	if (err)
61*4882a593Smuzhiyun 		*jctx->err = caam_jr_strstatus(jrdev, err);
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	complete(jctx->done);
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun 
caam_init_desc(u32 * desc,dma_addr_t dst_dma)66*4882a593Smuzhiyun static u32 *caam_init_desc(u32 *desc, dma_addr_t dst_dma)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	init_job_desc(desc, 0);	/* + 1 cmd_sz */
69*4882a593Smuzhiyun 	/* Generate random bytes: + 1 cmd_sz */
70*4882a593Smuzhiyun 	append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG |
71*4882a593Smuzhiyun 			 OP_ALG_PR_ON);
72*4882a593Smuzhiyun 	/* Store bytes: + 1 cmd_sz + caam_ptr_sz  */
73*4882a593Smuzhiyun 	append_fifo_store(desc, dst_dma,
74*4882a593Smuzhiyun 			  CAAM_RNG_MAX_FIFO_STORE_SIZE, FIFOST_TYPE_RNGSTORE);
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	print_hex_dump_debug("rng job desc@: ", DUMP_PREFIX_ADDRESS,
77*4882a593Smuzhiyun 			     16, 4, desc, desc_bytes(desc), 1);
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	return desc;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun 
caam_rng_read_one(struct device * jrdev,void * dst,int len,void * desc,struct completion * done)82*4882a593Smuzhiyun static int caam_rng_read_one(struct device *jrdev,
83*4882a593Smuzhiyun 			     void *dst, int len,
84*4882a593Smuzhiyun 			     void *desc,
85*4882a593Smuzhiyun 			     struct completion *done)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun 	dma_addr_t dst_dma;
88*4882a593Smuzhiyun 	int err, ret = 0;
89*4882a593Smuzhiyun 	struct caam_rng_job_ctx jctx = {
90*4882a593Smuzhiyun 		.done = done,
91*4882a593Smuzhiyun 		.err  = &ret,
92*4882a593Smuzhiyun 	};
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	len = CAAM_RNG_MAX_FIFO_STORE_SIZE;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	dst_dma = dma_map_single(jrdev, dst, len, DMA_FROM_DEVICE);
97*4882a593Smuzhiyun 	if (dma_mapping_error(jrdev, dst_dma)) {
98*4882a593Smuzhiyun 		dev_err(jrdev, "unable to map destination memory\n");
99*4882a593Smuzhiyun 		return -ENOMEM;
100*4882a593Smuzhiyun 	}
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	init_completion(done);
103*4882a593Smuzhiyun 	err = caam_jr_enqueue(jrdev,
104*4882a593Smuzhiyun 			      caam_init_desc(desc, dst_dma),
105*4882a593Smuzhiyun 			      caam_rng_done, &jctx);
106*4882a593Smuzhiyun 	if (err == -EINPROGRESS) {
107*4882a593Smuzhiyun 		wait_for_completion(done);
108*4882a593Smuzhiyun 		err = 0;
109*4882a593Smuzhiyun 	}
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	dma_unmap_single(jrdev, dst_dma, len, DMA_FROM_DEVICE);
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	return err ?: (ret ?: len);
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
caam_rng_fill_async(struct caam_rng_ctx * ctx)116*4882a593Smuzhiyun static void caam_rng_fill_async(struct caam_rng_ctx *ctx)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	struct scatterlist sg[1];
119*4882a593Smuzhiyun 	struct completion done;
120*4882a593Smuzhiyun 	int len, nents;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	sg_init_table(sg, ARRAY_SIZE(sg));
123*4882a593Smuzhiyun 	nents = kfifo_dma_in_prepare(&ctx->fifo, sg, ARRAY_SIZE(sg),
124*4882a593Smuzhiyun 				     CAAM_RNG_MAX_FIFO_STORE_SIZE);
125*4882a593Smuzhiyun 	if (!nents)
126*4882a593Smuzhiyun 		return;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	len = caam_rng_read_one(ctx->jrdev, sg_virt(&sg[0]),
129*4882a593Smuzhiyun 				sg[0].length,
130*4882a593Smuzhiyun 				ctx->desc_async,
131*4882a593Smuzhiyun 				&done);
132*4882a593Smuzhiyun 	if (len < 0)
133*4882a593Smuzhiyun 		return;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	kfifo_dma_in_finish(&ctx->fifo, len);
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun 
caam_rng_worker(struct work_struct * work)138*4882a593Smuzhiyun static void caam_rng_worker(struct work_struct *work)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun 	struct caam_rng_ctx *ctx = container_of(work, struct caam_rng_ctx,
141*4882a593Smuzhiyun 						worker);
142*4882a593Smuzhiyun 	caam_rng_fill_async(ctx);
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun 
caam_read(struct hwrng * rng,void * dst,size_t max,bool wait)145*4882a593Smuzhiyun static int caam_read(struct hwrng *rng, void *dst, size_t max, bool wait)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun 	struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
148*4882a593Smuzhiyun 	int out;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	if (wait) {
151*4882a593Smuzhiyun 		struct completion done;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 		return caam_rng_read_one(ctx->jrdev, dst, max,
154*4882a593Smuzhiyun 					 ctx->desc_sync, &done);
155*4882a593Smuzhiyun 	}
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	out = kfifo_out(&ctx->fifo, dst, max);
158*4882a593Smuzhiyun 	if (kfifo_is_empty(&ctx->fifo))
159*4882a593Smuzhiyun 		schedule_work(&ctx->worker);
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	return out;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun 
caam_cleanup(struct hwrng * rng)164*4882a593Smuzhiyun static void caam_cleanup(struct hwrng *rng)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun 	struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	flush_work(&ctx->worker);
169*4882a593Smuzhiyun 	caam_jr_free(ctx->jrdev);
170*4882a593Smuzhiyun 	kfifo_free(&ctx->fifo);
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun 
caam_init(struct hwrng * rng)173*4882a593Smuzhiyun static int caam_init(struct hwrng *rng)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun 	struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
176*4882a593Smuzhiyun 	int err;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	ctx->desc_sync = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN,
179*4882a593Smuzhiyun 				      GFP_DMA | GFP_KERNEL);
180*4882a593Smuzhiyun 	if (!ctx->desc_sync)
181*4882a593Smuzhiyun 		return -ENOMEM;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	ctx->desc_async = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN,
184*4882a593Smuzhiyun 				       GFP_DMA | GFP_KERNEL);
185*4882a593Smuzhiyun 	if (!ctx->desc_async)
186*4882a593Smuzhiyun 		return -ENOMEM;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	if (kfifo_alloc(&ctx->fifo, CAAM_RNG_MAX_FIFO_STORE_SIZE,
189*4882a593Smuzhiyun 			GFP_DMA | GFP_KERNEL))
190*4882a593Smuzhiyun 		return -ENOMEM;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	INIT_WORK(&ctx->worker, caam_rng_worker);
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	ctx->jrdev = caam_jr_alloc();
195*4882a593Smuzhiyun 	err = PTR_ERR_OR_ZERO(ctx->jrdev);
196*4882a593Smuzhiyun 	if (err) {
197*4882a593Smuzhiyun 		kfifo_free(&ctx->fifo);
198*4882a593Smuzhiyun 		pr_err("Job Ring Device allocation for transform failed\n");
199*4882a593Smuzhiyun 		return err;
200*4882a593Smuzhiyun 	}
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	/*
203*4882a593Smuzhiyun 	 * Fill async buffer to have early randomness data for
204*4882a593Smuzhiyun 	 * hw_random
205*4882a593Smuzhiyun 	 */
206*4882a593Smuzhiyun 	caam_rng_fill_async(ctx);
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	return 0;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun int caam_rng_init(struct device *ctrldev);
212*4882a593Smuzhiyun 
caam_rng_exit(struct device * ctrldev)213*4882a593Smuzhiyun void caam_rng_exit(struct device *ctrldev)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun 	devres_release_group(ctrldev, caam_rng_init);
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun 
caam_rng_init(struct device * ctrldev)218*4882a593Smuzhiyun int caam_rng_init(struct device *ctrldev)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun 	struct caam_rng_ctx *ctx;
221*4882a593Smuzhiyun 	u32 rng_inst;
222*4882a593Smuzhiyun 	struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
223*4882a593Smuzhiyun 	int ret;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	/* Check for an instantiated RNG before registration */
226*4882a593Smuzhiyun 	if (priv->era < 10)
227*4882a593Smuzhiyun 		rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
228*4882a593Smuzhiyun 			    CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
229*4882a593Smuzhiyun 	else
230*4882a593Smuzhiyun 		rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	if (!rng_inst)
233*4882a593Smuzhiyun 		return 0;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	if (!devres_open_group(ctrldev, caam_rng_init, GFP_KERNEL))
236*4882a593Smuzhiyun 		return -ENOMEM;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	ctx = devm_kzalloc(ctrldev, sizeof(*ctx), GFP_KERNEL);
239*4882a593Smuzhiyun 	if (!ctx)
240*4882a593Smuzhiyun 		return -ENOMEM;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	ctx->ctrldev = ctrldev;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	ctx->rng.name    = "rng-caam";
245*4882a593Smuzhiyun 	ctx->rng.init    = caam_init;
246*4882a593Smuzhiyun 	ctx->rng.cleanup = caam_cleanup;
247*4882a593Smuzhiyun 	ctx->rng.read    = caam_read;
248*4882a593Smuzhiyun 	ctx->rng.priv    = (unsigned long)ctx;
249*4882a593Smuzhiyun 	ctx->rng.quality = 1024;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	dev_info(ctrldev, "registering rng-caam\n");
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	ret = devm_hwrng_register(ctrldev, &ctx->rng);
254*4882a593Smuzhiyun 	if (ret) {
255*4882a593Smuzhiyun 		caam_rng_exit(ctrldev);
256*4882a593Smuzhiyun 		return ret;
257*4882a593Smuzhiyun 	}
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	devres_close_group(ctrldev, caam_rng_init);
260*4882a593Smuzhiyun 	return 0;
261*4882a593Smuzhiyun }
262