xref: /OK3568_Linux_fs/kernel/drivers/crypto/caam/qi.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * CAAM/SEC 4.x QI transport/backend driver
4*4882a593Smuzhiyun  * Queue Interface backend functionality
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright 2013-2016 Freescale Semiconductor, Inc.
7*4882a593Smuzhiyun  * Copyright 2016-2017, 2019-2020 NXP
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/cpumask.h>
11*4882a593Smuzhiyun #include <linux/kthread.h>
12*4882a593Smuzhiyun #include <soc/fsl/qman.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include "debugfs.h"
15*4882a593Smuzhiyun #include "regs.h"
16*4882a593Smuzhiyun #include "qi.h"
17*4882a593Smuzhiyun #include "desc.h"
18*4882a593Smuzhiyun #include "intern.h"
19*4882a593Smuzhiyun #include "desc_constr.h"
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #define PREHDR_RSLS_SHIFT	31
22*4882a593Smuzhiyun #define PREHDR_ABS		BIT(25)
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun /*
25*4882a593Smuzhiyun  * Use a reasonable backlog of frames (per CPU) as congestion threshold,
26*4882a593Smuzhiyun  * so that resources used by the in-flight buffers do not become a memory hog.
27*4882a593Smuzhiyun  */
28*4882a593Smuzhiyun #define MAX_RSP_FQ_BACKLOG_PER_CPU	256
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #define CAAM_QI_ENQUEUE_RETRIES	10000
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #define CAAM_NAPI_WEIGHT	63
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun /*
35*4882a593Smuzhiyun  * caam_napi - struct holding CAAM NAPI-related params
36*4882a593Smuzhiyun  * @irqtask: IRQ task for QI backend
37*4882a593Smuzhiyun  * @p: QMan portal
38*4882a593Smuzhiyun  */
39*4882a593Smuzhiyun struct caam_napi {
40*4882a593Smuzhiyun 	struct napi_struct irqtask;
41*4882a593Smuzhiyun 	struct qman_portal *p;
42*4882a593Smuzhiyun };
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun /*
45*4882a593Smuzhiyun  * caam_qi_pcpu_priv - percpu private data structure to main list of pending
46*4882a593Smuzhiyun  *                     responses expected on each cpu.
47*4882a593Smuzhiyun  * @caam_napi: CAAM NAPI params
48*4882a593Smuzhiyun  * @net_dev: netdev used by NAPI
49*4882a593Smuzhiyun  * @rsp_fq: response FQ from CAAM
50*4882a593Smuzhiyun  */
51*4882a593Smuzhiyun struct caam_qi_pcpu_priv {
52*4882a593Smuzhiyun 	struct caam_napi caam_napi;
53*4882a593Smuzhiyun 	struct net_device net_dev;
54*4882a593Smuzhiyun 	struct qman_fq *rsp_fq;
55*4882a593Smuzhiyun } ____cacheline_aligned;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun static DEFINE_PER_CPU(struct caam_qi_pcpu_priv, pcpu_qipriv);
58*4882a593Smuzhiyun static DEFINE_PER_CPU(int, last_cpu);
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun /*
61*4882a593Smuzhiyun  * caam_qi_priv - CAAM QI backend private params
62*4882a593Smuzhiyun  * @cgr: QMan congestion group
63*4882a593Smuzhiyun  */
64*4882a593Smuzhiyun struct caam_qi_priv {
65*4882a593Smuzhiyun 	struct qman_cgr cgr;
66*4882a593Smuzhiyun };
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun static struct caam_qi_priv qipriv ____cacheline_aligned;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun /*
71*4882a593Smuzhiyun  * This is written by only one core - the one that initialized the CGR - and
72*4882a593Smuzhiyun  * read by multiple cores (all the others).
73*4882a593Smuzhiyun  */
74*4882a593Smuzhiyun bool caam_congested __read_mostly;
75*4882a593Smuzhiyun EXPORT_SYMBOL(caam_congested);
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun /*
78*4882a593Smuzhiyun  * This is a a cache of buffers, from which the users of CAAM QI driver
79*4882a593Smuzhiyun  * can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than
80*4882a593Smuzhiyun  * doing malloc on the hotpath.
81*4882a593Smuzhiyun  * NOTE: A more elegant solution would be to have some headroom in the frames
82*4882a593Smuzhiyun  *       being processed. This could be added by the dpaa-ethernet driver.
83*4882a593Smuzhiyun  *       This would pose a problem for userspace application processing which
84*4882a593Smuzhiyun  *       cannot know of this limitation. So for now, this will work.
85*4882a593Smuzhiyun  * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
86*4882a593Smuzhiyun  */
87*4882a593Smuzhiyun static struct kmem_cache *qi_cache;
88*4882a593Smuzhiyun 
caam_iova_to_virt(struct iommu_domain * domain,dma_addr_t iova_addr)89*4882a593Smuzhiyun static void *caam_iova_to_virt(struct iommu_domain *domain,
90*4882a593Smuzhiyun 			       dma_addr_t iova_addr)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun 	phys_addr_t phys_addr;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	return phys_to_virt(phys_addr);
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun 
caam_qi_enqueue(struct device * qidev,struct caam_drv_req * req)99*4882a593Smuzhiyun int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	struct qm_fd fd;
102*4882a593Smuzhiyun 	dma_addr_t addr;
103*4882a593Smuzhiyun 	int ret;
104*4882a593Smuzhiyun 	int num_retries = 0;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	qm_fd_clear_fd(&fd);
107*4882a593Smuzhiyun 	qm_fd_set_compound(&fd, qm_sg_entry_get_len(&req->fd_sgt[1]));
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
110*4882a593Smuzhiyun 			      DMA_BIDIRECTIONAL);
111*4882a593Smuzhiyun 	if (dma_mapping_error(qidev, addr)) {
112*4882a593Smuzhiyun 		dev_err(qidev, "DMA mapping error for QI enqueue request\n");
113*4882a593Smuzhiyun 		return -EIO;
114*4882a593Smuzhiyun 	}
115*4882a593Smuzhiyun 	qm_fd_addr_set64(&fd, addr);
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	do {
118*4882a593Smuzhiyun 		ret = qman_enqueue(req->drv_ctx->req_fq, &fd);
119*4882a593Smuzhiyun 		if (likely(!ret)) {
120*4882a593Smuzhiyun 			refcount_inc(&req->drv_ctx->refcnt);
121*4882a593Smuzhiyun 			return 0;
122*4882a593Smuzhiyun 		}
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 		if (ret != -EBUSY)
125*4882a593Smuzhiyun 			break;
126*4882a593Smuzhiyun 		num_retries++;
127*4882a593Smuzhiyun 	} while (num_retries < CAAM_QI_ENQUEUE_RETRIES);
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	dev_err(qidev, "qman_enqueue failed: %d\n", ret);
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	return ret;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun EXPORT_SYMBOL(caam_qi_enqueue);
134*4882a593Smuzhiyun 
caam_fq_ern_cb(struct qman_portal * qm,struct qman_fq * fq,const union qm_mr_entry * msg)135*4882a593Smuzhiyun static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
136*4882a593Smuzhiyun 			   const union qm_mr_entry *msg)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun 	const struct qm_fd *fd;
139*4882a593Smuzhiyun 	struct caam_drv_req *drv_req;
140*4882a593Smuzhiyun 	struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
141*4882a593Smuzhiyun 	struct caam_drv_private *priv = dev_get_drvdata(qidev);
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	fd = &msg->ern.fd;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
146*4882a593Smuzhiyun 	if (!drv_req) {
147*4882a593Smuzhiyun 		dev_err(qidev,
148*4882a593Smuzhiyun 			"Can't find original request for CAAM response\n");
149*4882a593Smuzhiyun 		return;
150*4882a593Smuzhiyun 	}
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	refcount_dec(&drv_req->drv_ctx->refcnt);
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	if (qm_fd_get_format(fd) != qm_fd_compound) {
155*4882a593Smuzhiyun 		dev_err(qidev, "Non-compound FD from CAAM\n");
156*4882a593Smuzhiyun 		return;
157*4882a593Smuzhiyun 	}
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
160*4882a593Smuzhiyun 			 sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	if (fd->status)
163*4882a593Smuzhiyun 		drv_req->cbk(drv_req, be32_to_cpu(fd->status));
164*4882a593Smuzhiyun 	else
165*4882a593Smuzhiyun 		drv_req->cbk(drv_req, JRSTA_SSRC_QI);
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun 
create_caam_req_fq(struct device * qidev,struct qman_fq * rsp_fq,dma_addr_t hwdesc,int fq_sched_flag)168*4882a593Smuzhiyun static struct qman_fq *create_caam_req_fq(struct device *qidev,
169*4882a593Smuzhiyun 					  struct qman_fq *rsp_fq,
170*4882a593Smuzhiyun 					  dma_addr_t hwdesc,
171*4882a593Smuzhiyun 					  int fq_sched_flag)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun 	int ret;
174*4882a593Smuzhiyun 	struct qman_fq *req_fq;
175*4882a593Smuzhiyun 	struct qm_mcc_initfq opts;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	req_fq = kzalloc(sizeof(*req_fq), GFP_ATOMIC);
178*4882a593Smuzhiyun 	if (!req_fq)
179*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	req_fq->cb.ern = caam_fq_ern_cb;
182*4882a593Smuzhiyun 	req_fq->cb.fqs = NULL;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
185*4882a593Smuzhiyun 				QMAN_FQ_FLAG_TO_DCPORTAL, req_fq);
186*4882a593Smuzhiyun 	if (ret) {
187*4882a593Smuzhiyun 		dev_err(qidev, "Failed to create session req FQ\n");
188*4882a593Smuzhiyun 		goto create_req_fq_fail;
189*4882a593Smuzhiyun 	}
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	memset(&opts, 0, sizeof(opts));
192*4882a593Smuzhiyun 	opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
193*4882a593Smuzhiyun 				   QM_INITFQ_WE_CONTEXTB |
194*4882a593Smuzhiyun 				   QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
195*4882a593Smuzhiyun 	opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
196*4882a593Smuzhiyun 	qm_fqd_set_destwq(&opts.fqd, qm_channel_caam, 2);
197*4882a593Smuzhiyun 	opts.fqd.context_b = cpu_to_be32(qman_fq_fqid(rsp_fq));
198*4882a593Smuzhiyun 	qm_fqd_context_a_set64(&opts.fqd, hwdesc);
199*4882a593Smuzhiyun 	opts.fqd.cgid = qipriv.cgr.cgrid;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
202*4882a593Smuzhiyun 	if (ret) {
203*4882a593Smuzhiyun 		dev_err(qidev, "Failed to init session req FQ\n");
204*4882a593Smuzhiyun 		goto init_req_fq_fail;
205*4882a593Smuzhiyun 	}
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	dev_dbg(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid,
208*4882a593Smuzhiyun 		smp_processor_id());
209*4882a593Smuzhiyun 	return req_fq;
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun init_req_fq_fail:
212*4882a593Smuzhiyun 	qman_destroy_fq(req_fq);
213*4882a593Smuzhiyun create_req_fq_fail:
214*4882a593Smuzhiyun 	kfree(req_fq);
215*4882a593Smuzhiyun 	return ERR_PTR(ret);
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun 
empty_retired_fq(struct device * qidev,struct qman_fq * fq)218*4882a593Smuzhiyun static int empty_retired_fq(struct device *qidev, struct qman_fq *fq)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun 	int ret;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	ret = qman_volatile_dequeue(fq, QMAN_VOLATILE_FLAG_WAIT_INT |
223*4882a593Smuzhiyun 				    QMAN_VOLATILE_FLAG_FINISH,
224*4882a593Smuzhiyun 				    QM_VDQCR_PRECEDENCE_VDQCR |
225*4882a593Smuzhiyun 				    QM_VDQCR_NUMFRAMES_TILLEMPTY);
226*4882a593Smuzhiyun 	if (ret) {
227*4882a593Smuzhiyun 		dev_err(qidev, "Volatile dequeue fail for FQ: %u\n", fq->fqid);
228*4882a593Smuzhiyun 		return ret;
229*4882a593Smuzhiyun 	}
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	do {
232*4882a593Smuzhiyun 		struct qman_portal *p;
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 		p = qman_get_affine_portal(smp_processor_id());
235*4882a593Smuzhiyun 		qman_p_poll_dqrr(p, 16);
236*4882a593Smuzhiyun 	} while (fq->flags & QMAN_FQ_STATE_NE);
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	return 0;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun 
kill_fq(struct device * qidev,struct qman_fq * fq)241*4882a593Smuzhiyun static int kill_fq(struct device *qidev, struct qman_fq *fq)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun 	u32 flags;
244*4882a593Smuzhiyun 	int ret;
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	ret = qman_retire_fq(fq, &flags);
247*4882a593Smuzhiyun 	if (ret < 0) {
248*4882a593Smuzhiyun 		dev_err(qidev, "qman_retire_fq failed: %d\n", ret);
249*4882a593Smuzhiyun 		return ret;
250*4882a593Smuzhiyun 	}
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	if (!ret)
253*4882a593Smuzhiyun 		goto empty_fq;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	/* Async FQ retirement condition */
256*4882a593Smuzhiyun 	if (ret == 1) {
257*4882a593Smuzhiyun 		/* Retry till FQ gets in retired state */
258*4882a593Smuzhiyun 		do {
259*4882a593Smuzhiyun 			msleep(20);
260*4882a593Smuzhiyun 		} while (fq->state != qman_fq_state_retired);
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 		WARN_ON(fq->flags & QMAN_FQ_STATE_BLOCKOOS);
263*4882a593Smuzhiyun 		WARN_ON(fq->flags & QMAN_FQ_STATE_ORL);
264*4882a593Smuzhiyun 	}
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun empty_fq:
267*4882a593Smuzhiyun 	if (fq->flags & QMAN_FQ_STATE_NE) {
268*4882a593Smuzhiyun 		ret = empty_retired_fq(qidev, fq);
269*4882a593Smuzhiyun 		if (ret) {
270*4882a593Smuzhiyun 			dev_err(qidev, "empty_retired_fq fail for FQ: %u\n",
271*4882a593Smuzhiyun 				fq->fqid);
272*4882a593Smuzhiyun 			return ret;
273*4882a593Smuzhiyun 		}
274*4882a593Smuzhiyun 	}
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	ret = qman_oos_fq(fq);
277*4882a593Smuzhiyun 	if (ret)
278*4882a593Smuzhiyun 		dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	qman_destroy_fq(fq);
281*4882a593Smuzhiyun 	kfree(fq);
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	return ret;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun 
empty_caam_fq(struct qman_fq * fq,struct caam_drv_ctx * drv_ctx)286*4882a593Smuzhiyun static int empty_caam_fq(struct qman_fq *fq, struct caam_drv_ctx *drv_ctx)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun 	int ret;
289*4882a593Smuzhiyun 	int retries = 10;
290*4882a593Smuzhiyun 	struct qm_mcr_queryfq_np np;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	/* Wait till the older CAAM FQ get empty */
293*4882a593Smuzhiyun 	do {
294*4882a593Smuzhiyun 		ret = qman_query_fq_np(fq, &np);
295*4882a593Smuzhiyun 		if (ret)
296*4882a593Smuzhiyun 			return ret;
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 		if (!qm_mcr_np_get(&np, frm_cnt))
299*4882a593Smuzhiyun 			break;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 		msleep(20);
302*4882a593Smuzhiyun 	} while (1);
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	/* Wait until pending jobs from this FQ are processed by CAAM */
305*4882a593Smuzhiyun 	do {
306*4882a593Smuzhiyun 		if (refcount_read(&drv_ctx->refcnt) == 1)
307*4882a593Smuzhiyun 			break;
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 		msleep(20);
310*4882a593Smuzhiyun 	} while (--retries);
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	if (!retries)
313*4882a593Smuzhiyun 		dev_warn_once(drv_ctx->qidev, "%d frames from FQID %u still pending in CAAM\n",
314*4882a593Smuzhiyun 			      refcount_read(&drv_ctx->refcnt), fq->fqid);
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	return 0;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun 
caam_drv_ctx_update(struct caam_drv_ctx * drv_ctx,u32 * sh_desc)319*4882a593Smuzhiyun int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun 	int ret;
322*4882a593Smuzhiyun 	u32 num_words;
323*4882a593Smuzhiyun 	struct qman_fq *new_fq, *old_fq;
324*4882a593Smuzhiyun 	struct device *qidev = drv_ctx->qidev;
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	num_words = desc_len(sh_desc);
327*4882a593Smuzhiyun 	if (num_words > MAX_SDLEN) {
328*4882a593Smuzhiyun 		dev_err(qidev, "Invalid descriptor len: %d words\n", num_words);
329*4882a593Smuzhiyun 		return -EINVAL;
330*4882a593Smuzhiyun 	}
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	/* Note down older req FQ */
333*4882a593Smuzhiyun 	old_fq = drv_ctx->req_fq;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	/* Create a new req FQ in parked state */
336*4882a593Smuzhiyun 	new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq,
337*4882a593Smuzhiyun 				    drv_ctx->context_a, 0);
338*4882a593Smuzhiyun 	if (IS_ERR(new_fq)) {
339*4882a593Smuzhiyun 		dev_err(qidev, "FQ allocation for shdesc update failed\n");
340*4882a593Smuzhiyun 		return PTR_ERR(new_fq);
341*4882a593Smuzhiyun 	}
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	/* Hook up new FQ to context so that new requests keep queuing */
344*4882a593Smuzhiyun 	drv_ctx->req_fq = new_fq;
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	/* Empty and remove the older FQ */
347*4882a593Smuzhiyun 	ret = empty_caam_fq(old_fq, drv_ctx);
348*4882a593Smuzhiyun 	if (ret) {
349*4882a593Smuzhiyun 		dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret);
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 		/* We can revert to older FQ */
352*4882a593Smuzhiyun 		drv_ctx->req_fq = old_fq;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 		if (kill_fq(qidev, new_fq))
355*4882a593Smuzhiyun 			dev_warn(qidev, "New CAAM FQ kill failed\n");
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 		return ret;
358*4882a593Smuzhiyun 	}
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	/*
361*4882a593Smuzhiyun 	 * Re-initialise pre-header. Set RSLS and SDLEN.
362*4882a593Smuzhiyun 	 * Update the shared descriptor for driver context.
363*4882a593Smuzhiyun 	 */
364*4882a593Smuzhiyun 	drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
365*4882a593Smuzhiyun 					   num_words);
366*4882a593Smuzhiyun 	drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS);
367*4882a593Smuzhiyun 	memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
368*4882a593Smuzhiyun 	dma_sync_single_for_device(qidev, drv_ctx->context_a,
369*4882a593Smuzhiyun 				   sizeof(drv_ctx->sh_desc) +
370*4882a593Smuzhiyun 				   sizeof(drv_ctx->prehdr),
371*4882a593Smuzhiyun 				   DMA_BIDIRECTIONAL);
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	/* Put the new FQ in scheduled state */
374*4882a593Smuzhiyun 	ret = qman_schedule_fq(new_fq);
375*4882a593Smuzhiyun 	if (ret) {
376*4882a593Smuzhiyun 		dev_err(qidev, "Fail to sched new CAAM FQ, ecode = %d\n", ret);
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 		/*
379*4882a593Smuzhiyun 		 * We can kill new FQ and revert to old FQ.
380*4882a593Smuzhiyun 		 * Since the desc is already modified, it is success case
381*4882a593Smuzhiyun 		 */
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 		drv_ctx->req_fq = old_fq;
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 		if (kill_fq(qidev, new_fq))
386*4882a593Smuzhiyun 			dev_warn(qidev, "New CAAM FQ kill failed\n");
387*4882a593Smuzhiyun 	} else if (kill_fq(qidev, old_fq)) {
388*4882a593Smuzhiyun 		dev_warn(qidev, "Old CAAM FQ kill failed\n");
389*4882a593Smuzhiyun 	}
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	return 0;
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun EXPORT_SYMBOL(caam_drv_ctx_update);
394*4882a593Smuzhiyun 
caam_drv_ctx_init(struct device * qidev,int * cpu,u32 * sh_desc)395*4882a593Smuzhiyun struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
396*4882a593Smuzhiyun 				       int *cpu,
397*4882a593Smuzhiyun 				       u32 *sh_desc)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun 	size_t size;
400*4882a593Smuzhiyun 	u32 num_words;
401*4882a593Smuzhiyun 	dma_addr_t hwdesc;
402*4882a593Smuzhiyun 	struct caam_drv_ctx *drv_ctx;
403*4882a593Smuzhiyun 	const cpumask_t *cpus = qman_affine_cpus();
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	num_words = desc_len(sh_desc);
406*4882a593Smuzhiyun 	if (num_words > MAX_SDLEN) {
407*4882a593Smuzhiyun 		dev_err(qidev, "Invalid descriptor len: %d words\n",
408*4882a593Smuzhiyun 			num_words);
409*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
410*4882a593Smuzhiyun 	}
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	drv_ctx = kzalloc(sizeof(*drv_ctx), GFP_ATOMIC);
413*4882a593Smuzhiyun 	if (!drv_ctx)
414*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	/*
417*4882a593Smuzhiyun 	 * Initialise pre-header - set RSLS and SDLEN - and shared descriptor
418*4882a593Smuzhiyun 	 * and dma-map them.
419*4882a593Smuzhiyun 	 */
420*4882a593Smuzhiyun 	drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
421*4882a593Smuzhiyun 					   num_words);
422*4882a593Smuzhiyun 	drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS);
423*4882a593Smuzhiyun 	memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
424*4882a593Smuzhiyun 	size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc);
425*4882a593Smuzhiyun 	hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size,
426*4882a593Smuzhiyun 				DMA_BIDIRECTIONAL);
427*4882a593Smuzhiyun 	if (dma_mapping_error(qidev, hwdesc)) {
428*4882a593Smuzhiyun 		dev_err(qidev, "DMA map error for preheader + shdesc\n");
429*4882a593Smuzhiyun 		kfree(drv_ctx);
430*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
431*4882a593Smuzhiyun 	}
432*4882a593Smuzhiyun 	drv_ctx->context_a = hwdesc;
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	/* If given CPU does not own the portal, choose another one that does */
435*4882a593Smuzhiyun 	if (!cpumask_test_cpu(*cpu, cpus)) {
436*4882a593Smuzhiyun 		int *pcpu = &get_cpu_var(last_cpu);
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 		*pcpu = cpumask_next(*pcpu, cpus);
439*4882a593Smuzhiyun 		if (*pcpu >= nr_cpu_ids)
440*4882a593Smuzhiyun 			*pcpu = cpumask_first(cpus);
441*4882a593Smuzhiyun 		*cpu = *pcpu;
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 		put_cpu_var(last_cpu);
444*4882a593Smuzhiyun 	}
445*4882a593Smuzhiyun 	drv_ctx->cpu = *cpu;
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	/* Find response FQ hooked with this CPU */
448*4882a593Smuzhiyun 	drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu);
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	/* Attach request FQ */
451*4882a593Smuzhiyun 	drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc,
452*4882a593Smuzhiyun 					     QMAN_INITFQ_FLAG_SCHED);
453*4882a593Smuzhiyun 	if (IS_ERR(drv_ctx->req_fq)) {
454*4882a593Smuzhiyun 		dev_err(qidev, "create_caam_req_fq failed\n");
455*4882a593Smuzhiyun 		dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL);
456*4882a593Smuzhiyun 		kfree(drv_ctx);
457*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
458*4882a593Smuzhiyun 	}
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	/* init reference counter used to track references to request FQ */
461*4882a593Smuzhiyun 	refcount_set(&drv_ctx->refcnt, 1);
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	drv_ctx->qidev = qidev;
464*4882a593Smuzhiyun 	return drv_ctx;
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun EXPORT_SYMBOL(caam_drv_ctx_init);
467*4882a593Smuzhiyun 
qi_cache_alloc(gfp_t flags)468*4882a593Smuzhiyun void *qi_cache_alloc(gfp_t flags)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun 	return kmem_cache_alloc(qi_cache, flags);
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun EXPORT_SYMBOL(qi_cache_alloc);
473*4882a593Smuzhiyun 
qi_cache_free(void * obj)474*4882a593Smuzhiyun void qi_cache_free(void *obj)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun 	kmem_cache_free(qi_cache, obj);
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun EXPORT_SYMBOL(qi_cache_free);
479*4882a593Smuzhiyun 
caam_qi_poll(struct napi_struct * napi,int budget)480*4882a593Smuzhiyun static int caam_qi_poll(struct napi_struct *napi, int budget)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun 	struct caam_napi *np = container_of(napi, struct caam_napi, irqtask);
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	int cleaned = qman_p_poll_dqrr(np->p, budget);
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	if (cleaned < budget) {
487*4882a593Smuzhiyun 		napi_complete(napi);
488*4882a593Smuzhiyun 		qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
489*4882a593Smuzhiyun 	}
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	return cleaned;
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun 
caam_drv_ctx_rel(struct caam_drv_ctx * drv_ctx)494*4882a593Smuzhiyun void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx)
495*4882a593Smuzhiyun {
496*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(drv_ctx))
497*4882a593Smuzhiyun 		return;
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	/* Remove request FQ */
500*4882a593Smuzhiyun 	if (kill_fq(drv_ctx->qidev, drv_ctx->req_fq))
501*4882a593Smuzhiyun 		dev_err(drv_ctx->qidev, "Crypto session req FQ kill failed\n");
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	dma_unmap_single(drv_ctx->qidev, drv_ctx->context_a,
504*4882a593Smuzhiyun 			 sizeof(drv_ctx->sh_desc) + sizeof(drv_ctx->prehdr),
505*4882a593Smuzhiyun 			 DMA_BIDIRECTIONAL);
506*4882a593Smuzhiyun 	kfree(drv_ctx);
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun EXPORT_SYMBOL(caam_drv_ctx_rel);
509*4882a593Smuzhiyun 
caam_qi_shutdown(void * data)510*4882a593Smuzhiyun static void caam_qi_shutdown(void *data)
511*4882a593Smuzhiyun {
512*4882a593Smuzhiyun 	int i;
513*4882a593Smuzhiyun 	struct device *qidev = data;
514*4882a593Smuzhiyun 	struct caam_qi_priv *priv = &qipriv;
515*4882a593Smuzhiyun 	const cpumask_t *cpus = qman_affine_cpus();
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	for_each_cpu(i, cpus) {
518*4882a593Smuzhiyun 		struct napi_struct *irqtask;
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 		irqtask = &per_cpu_ptr(&pcpu_qipriv.caam_napi, i)->irqtask;
521*4882a593Smuzhiyun 		napi_disable(irqtask);
522*4882a593Smuzhiyun 		netif_napi_del(irqtask);
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 		if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i)))
525*4882a593Smuzhiyun 			dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
526*4882a593Smuzhiyun 	}
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	qman_delete_cgr_safe(&priv->cgr);
529*4882a593Smuzhiyun 	qman_release_cgrid(priv->cgr.cgrid);
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	kmem_cache_destroy(qi_cache);
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun 
cgr_cb(struct qman_portal * qm,struct qman_cgr * cgr,int congested)534*4882a593Smuzhiyun static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun 	caam_congested = congested;
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	if (congested) {
539*4882a593Smuzhiyun 		caam_debugfs_qi_congested();
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 		pr_debug_ratelimited("CAAM entered congestion\n");
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	} else {
544*4882a593Smuzhiyun 		pr_debug_ratelimited("CAAM exited congestion\n");
545*4882a593Smuzhiyun 	}
546*4882a593Smuzhiyun }
547*4882a593Smuzhiyun 
caam_qi_napi_schedule(struct qman_portal * p,struct caam_napi * np)548*4882a593Smuzhiyun static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np)
549*4882a593Smuzhiyun {
550*4882a593Smuzhiyun 	/*
551*4882a593Smuzhiyun 	 * In case of threaded ISR, for RT kernels in_irq() does not return
552*4882a593Smuzhiyun 	 * appropriate value, so use in_serving_softirq to distinguish between
553*4882a593Smuzhiyun 	 * softirq and irq contexts.
554*4882a593Smuzhiyun 	 */
555*4882a593Smuzhiyun 	if (unlikely(in_irq() || !in_serving_softirq())) {
556*4882a593Smuzhiyun 		/* Disable QMan IRQ source and invoke NAPI */
557*4882a593Smuzhiyun 		qman_p_irqsource_remove(p, QM_PIRQ_DQRI);
558*4882a593Smuzhiyun 		np->p = p;
559*4882a593Smuzhiyun 		napi_schedule(&np->irqtask);
560*4882a593Smuzhiyun 		return 1;
561*4882a593Smuzhiyun 	}
562*4882a593Smuzhiyun 	return 0;
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun 
caam_rsp_fq_dqrr_cb(struct qman_portal * p,struct qman_fq * rsp_fq,const struct qm_dqrr_entry * dqrr)565*4882a593Smuzhiyun static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
566*4882a593Smuzhiyun 						    struct qman_fq *rsp_fq,
567*4882a593Smuzhiyun 						    const struct qm_dqrr_entry *dqrr)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun 	struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi);
570*4882a593Smuzhiyun 	struct caam_drv_req *drv_req;
571*4882a593Smuzhiyun 	const struct qm_fd *fd;
572*4882a593Smuzhiyun 	struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
573*4882a593Smuzhiyun 	struct caam_drv_private *priv = dev_get_drvdata(qidev);
574*4882a593Smuzhiyun 	u32 status;
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	if (caam_qi_napi_schedule(p, caam_napi))
577*4882a593Smuzhiyun 		return qman_cb_dqrr_stop;
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	fd = &dqrr->fd;
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
582*4882a593Smuzhiyun 	if (unlikely(!drv_req)) {
583*4882a593Smuzhiyun 		dev_err(qidev,
584*4882a593Smuzhiyun 			"Can't find original request for caam response\n");
585*4882a593Smuzhiyun 		return qman_cb_dqrr_consume;
586*4882a593Smuzhiyun 	}
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	refcount_dec(&drv_req->drv_ctx->refcnt);
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 	status = be32_to_cpu(fd->status);
591*4882a593Smuzhiyun 	if (unlikely(status)) {
592*4882a593Smuzhiyun 		u32 ssrc = status & JRSTA_SSRC_MASK;
593*4882a593Smuzhiyun 		u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 		if (ssrc != JRSTA_SSRC_CCB_ERROR ||
596*4882a593Smuzhiyun 		    err_id != JRSTA_CCBERR_ERRID_ICVCHK)
597*4882a593Smuzhiyun 			dev_err_ratelimited(qidev,
598*4882a593Smuzhiyun 					    "Error: %#x in CAAM response FD\n",
599*4882a593Smuzhiyun 					    status);
600*4882a593Smuzhiyun 	}
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {
603*4882a593Smuzhiyun 		dev_err(qidev, "Non-compound FD from CAAM\n");
604*4882a593Smuzhiyun 		return qman_cb_dqrr_consume;
605*4882a593Smuzhiyun 	}
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
608*4882a593Smuzhiyun 			 sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	drv_req->cbk(drv_req, status);
611*4882a593Smuzhiyun 	return qman_cb_dqrr_consume;
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun 
alloc_rsp_fq_cpu(struct device * qidev,unsigned int cpu)614*4882a593Smuzhiyun static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu)
615*4882a593Smuzhiyun {
616*4882a593Smuzhiyun 	struct qm_mcc_initfq opts;
617*4882a593Smuzhiyun 	struct qman_fq *fq;
618*4882a593Smuzhiyun 	int ret;
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	fq = kzalloc(sizeof(*fq), GFP_KERNEL | GFP_DMA);
621*4882a593Smuzhiyun 	if (!fq)
622*4882a593Smuzhiyun 		return -ENOMEM;
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	fq->cb.dqrr = caam_rsp_fq_dqrr_cb;
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 	ret = qman_create_fq(0, QMAN_FQ_FLAG_NO_ENQUEUE |
627*4882a593Smuzhiyun 			     QMAN_FQ_FLAG_DYNAMIC_FQID, fq);
628*4882a593Smuzhiyun 	if (ret) {
629*4882a593Smuzhiyun 		dev_err(qidev, "Rsp FQ create failed\n");
630*4882a593Smuzhiyun 		kfree(fq);
631*4882a593Smuzhiyun 		return -ENODEV;
632*4882a593Smuzhiyun 	}
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	memset(&opts, 0, sizeof(opts));
635*4882a593Smuzhiyun 	opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
636*4882a593Smuzhiyun 				   QM_INITFQ_WE_CONTEXTB |
637*4882a593Smuzhiyun 				   QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
638*4882a593Smuzhiyun 	opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING |
639*4882a593Smuzhiyun 				       QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
640*4882a593Smuzhiyun 	qm_fqd_set_destwq(&opts.fqd, qman_affine_channel(cpu), 3);
641*4882a593Smuzhiyun 	opts.fqd.cgid = qipriv.cgr.cgrid;
642*4882a593Smuzhiyun 	opts.fqd.context_a.stashing.exclusive =	QM_STASHING_EXCL_CTX |
643*4882a593Smuzhiyun 						QM_STASHING_EXCL_DATA;
644*4882a593Smuzhiyun 	qm_fqd_set_stashing(&opts.fqd, 0, 1, 1);
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun 	ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
647*4882a593Smuzhiyun 	if (ret) {
648*4882a593Smuzhiyun 		dev_err(qidev, "Rsp FQ init failed\n");
649*4882a593Smuzhiyun 		kfree(fq);
650*4882a593Smuzhiyun 		return -ENODEV;
651*4882a593Smuzhiyun 	}
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 	per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq;
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 	dev_dbg(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu);
656*4882a593Smuzhiyun 	return 0;
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun 
init_cgr(struct device * qidev)659*4882a593Smuzhiyun static int init_cgr(struct device *qidev)
660*4882a593Smuzhiyun {
661*4882a593Smuzhiyun 	int ret;
662*4882a593Smuzhiyun 	struct qm_mcc_initcgr opts;
663*4882a593Smuzhiyun 	const u64 val = (u64)cpumask_weight(qman_affine_cpus()) *
664*4882a593Smuzhiyun 			MAX_RSP_FQ_BACKLOG_PER_CPU;
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	ret = qman_alloc_cgrid(&qipriv.cgr.cgrid);
667*4882a593Smuzhiyun 	if (ret) {
668*4882a593Smuzhiyun 		dev_err(qidev, "CGR alloc failed for rsp FQs: %d\n", ret);
669*4882a593Smuzhiyun 		return ret;
670*4882a593Smuzhiyun 	}
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	qipriv.cgr.cb = cgr_cb;
673*4882a593Smuzhiyun 	memset(&opts, 0, sizeof(opts));
674*4882a593Smuzhiyun 	opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES |
675*4882a593Smuzhiyun 				   QM_CGR_WE_MODE);
676*4882a593Smuzhiyun 	opts.cgr.cscn_en = QM_CGR_EN;
677*4882a593Smuzhiyun 	opts.cgr.mode = QMAN_CGR_MODE_FRAME;
678*4882a593Smuzhiyun 	qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	ret = qman_create_cgr(&qipriv.cgr, QMAN_CGR_FLAG_USE_INIT, &opts);
681*4882a593Smuzhiyun 	if (ret) {
682*4882a593Smuzhiyun 		dev_err(qidev, "Error %d creating CAAM CGRID: %u\n", ret,
683*4882a593Smuzhiyun 			qipriv.cgr.cgrid);
684*4882a593Smuzhiyun 		return ret;
685*4882a593Smuzhiyun 	}
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	dev_dbg(qidev, "Congestion threshold set to %llu\n", val);
688*4882a593Smuzhiyun 	return 0;
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun 
alloc_rsp_fqs(struct device * qidev)691*4882a593Smuzhiyun static int alloc_rsp_fqs(struct device *qidev)
692*4882a593Smuzhiyun {
693*4882a593Smuzhiyun 	int ret, i;
694*4882a593Smuzhiyun 	const cpumask_t *cpus = qman_affine_cpus();
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	/*Now create response FQs*/
697*4882a593Smuzhiyun 	for_each_cpu(i, cpus) {
698*4882a593Smuzhiyun 		ret = alloc_rsp_fq_cpu(qidev, i);
699*4882a593Smuzhiyun 		if (ret) {
700*4882a593Smuzhiyun 			dev_err(qidev, "CAAM rsp FQ alloc failed, cpu: %u", i);
701*4882a593Smuzhiyun 			return ret;
702*4882a593Smuzhiyun 		}
703*4882a593Smuzhiyun 	}
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun 	return 0;
706*4882a593Smuzhiyun }
707*4882a593Smuzhiyun 
free_rsp_fqs(void)708*4882a593Smuzhiyun static void free_rsp_fqs(void)
709*4882a593Smuzhiyun {
710*4882a593Smuzhiyun 	int i;
711*4882a593Smuzhiyun 	const cpumask_t *cpus = qman_affine_cpus();
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 	for_each_cpu(i, cpus)
714*4882a593Smuzhiyun 		kfree(per_cpu(pcpu_qipriv.rsp_fq, i));
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun 
caam_qi_init(struct platform_device * caam_pdev)717*4882a593Smuzhiyun int caam_qi_init(struct platform_device *caam_pdev)
718*4882a593Smuzhiyun {
719*4882a593Smuzhiyun 	int err, i;
720*4882a593Smuzhiyun 	struct device *ctrldev = &caam_pdev->dev, *qidev;
721*4882a593Smuzhiyun 	struct caam_drv_private *ctrlpriv;
722*4882a593Smuzhiyun 	const cpumask_t *cpus = qman_affine_cpus();
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 	ctrlpriv = dev_get_drvdata(ctrldev);
725*4882a593Smuzhiyun 	qidev = ctrldev;
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 	/* Initialize the congestion detection */
728*4882a593Smuzhiyun 	err = init_cgr(qidev);
729*4882a593Smuzhiyun 	if (err) {
730*4882a593Smuzhiyun 		dev_err(qidev, "CGR initialization failed: %d\n", err);
731*4882a593Smuzhiyun 		return err;
732*4882a593Smuzhiyun 	}
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 	/* Initialise response FQs */
735*4882a593Smuzhiyun 	err = alloc_rsp_fqs(qidev);
736*4882a593Smuzhiyun 	if (err) {
737*4882a593Smuzhiyun 		dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
738*4882a593Smuzhiyun 		free_rsp_fqs();
739*4882a593Smuzhiyun 		return err;
740*4882a593Smuzhiyun 	}
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 	/*
743*4882a593Smuzhiyun 	 * Enable the NAPI contexts on each of the core which has an affine
744*4882a593Smuzhiyun 	 * portal.
745*4882a593Smuzhiyun 	 */
746*4882a593Smuzhiyun 	for_each_cpu(i, cpus) {
747*4882a593Smuzhiyun 		struct caam_qi_pcpu_priv *priv = per_cpu_ptr(&pcpu_qipriv, i);
748*4882a593Smuzhiyun 		struct caam_napi *caam_napi = &priv->caam_napi;
749*4882a593Smuzhiyun 		struct napi_struct *irqtask = &caam_napi->irqtask;
750*4882a593Smuzhiyun 		struct net_device *net_dev = &priv->net_dev;
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun 		net_dev->dev = *qidev;
753*4882a593Smuzhiyun 		INIT_LIST_HEAD(&net_dev->napi_list);
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun 		netif_napi_add(net_dev, irqtask, caam_qi_poll,
756*4882a593Smuzhiyun 			       CAAM_NAPI_WEIGHT);
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 		napi_enable(irqtask);
759*4882a593Smuzhiyun 	}
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 	qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
762*4882a593Smuzhiyun 				     SLAB_CACHE_DMA, NULL);
763*4882a593Smuzhiyun 	if (!qi_cache) {
764*4882a593Smuzhiyun 		dev_err(qidev, "Can't allocate CAAM cache\n");
765*4882a593Smuzhiyun 		free_rsp_fqs();
766*4882a593Smuzhiyun 		return -ENOMEM;
767*4882a593Smuzhiyun 	}
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	caam_debugfs_qi_init(ctrlpriv);
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	err = devm_add_action_or_reset(qidev, caam_qi_shutdown, ctrlpriv);
772*4882a593Smuzhiyun 	if (err)
773*4882a593Smuzhiyun 		return err;
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 	dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
776*4882a593Smuzhiyun 	return 0;
777*4882a593Smuzhiyun }
778