xref: /OK3568_Linux_fs/kernel/drivers/crypto/cavium/nitrox/nitrox_lib.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <linux/cpumask.h>
3*4882a593Smuzhiyun #include <linux/dma-mapping.h>
4*4882a593Smuzhiyun #include <linux/dmapool.h>
5*4882a593Smuzhiyun #include <linux/delay.h>
6*4882a593Smuzhiyun #include <linux/gfp.h>
7*4882a593Smuzhiyun #include <linux/kernel.h>
8*4882a593Smuzhiyun #include <linux/module.h>
9*4882a593Smuzhiyun #include <linux/pci_regs.h>
10*4882a593Smuzhiyun #include <linux/vmalloc.h>
11*4882a593Smuzhiyun #include <linux/pci.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include "nitrox_dev.h"
14*4882a593Smuzhiyun #include "nitrox_common.h"
15*4882a593Smuzhiyun #include "nitrox_req.h"
16*4882a593Smuzhiyun #include "nitrox_csr.h"
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #define CRYPTO_CTX_SIZE	256
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun /* packet inuput ring alignments */
21*4882a593Smuzhiyun #define PKTIN_Q_ALIGN_BYTES 16
22*4882a593Smuzhiyun /* AQM Queue input alignments */
23*4882a593Smuzhiyun #define AQM_Q_ALIGN_BYTES 32
24*4882a593Smuzhiyun 
nitrox_cmdq_init(struct nitrox_cmdq * cmdq,int align_bytes)25*4882a593Smuzhiyun static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun 	struct nitrox_device *ndev = cmdq->ndev;
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes;
30*4882a593Smuzhiyun 	cmdq->unalign_base = dma_alloc_coherent(DEV(ndev), cmdq->qsize,
31*4882a593Smuzhiyun 						&cmdq->unalign_dma,
32*4882a593Smuzhiyun 						GFP_KERNEL);
33*4882a593Smuzhiyun 	if (!cmdq->unalign_base)
34*4882a593Smuzhiyun 		return -ENOMEM;
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	cmdq->dma = PTR_ALIGN(cmdq->unalign_dma, align_bytes);
37*4882a593Smuzhiyun 	cmdq->base = cmdq->unalign_base + (cmdq->dma - cmdq->unalign_dma);
38*4882a593Smuzhiyun 	cmdq->write_idx = 0;
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	spin_lock_init(&cmdq->cmd_qlock);
41*4882a593Smuzhiyun 	spin_lock_init(&cmdq->resp_qlock);
42*4882a593Smuzhiyun 	spin_lock_init(&cmdq->backlog_qlock);
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	INIT_LIST_HEAD(&cmdq->response_head);
45*4882a593Smuzhiyun 	INIT_LIST_HEAD(&cmdq->backlog_head);
46*4882a593Smuzhiyun 	INIT_WORK(&cmdq->backlog_qflush, backlog_qflush_work);
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	atomic_set(&cmdq->pending_count, 0);
49*4882a593Smuzhiyun 	atomic_set(&cmdq->backlog_count, 0);
50*4882a593Smuzhiyun 	return 0;
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun 
nitrox_cmdq_reset(struct nitrox_cmdq * cmdq)53*4882a593Smuzhiyun static void nitrox_cmdq_reset(struct nitrox_cmdq *cmdq)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun 	cmdq->write_idx = 0;
56*4882a593Smuzhiyun 	atomic_set(&cmdq->pending_count, 0);
57*4882a593Smuzhiyun 	atomic_set(&cmdq->backlog_count, 0);
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun 
nitrox_cmdq_cleanup(struct nitrox_cmdq * cmdq)60*4882a593Smuzhiyun static void nitrox_cmdq_cleanup(struct nitrox_cmdq *cmdq)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun 	struct nitrox_device *ndev;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	if (!cmdq)
65*4882a593Smuzhiyun 		return;
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	if (!cmdq->unalign_base)
68*4882a593Smuzhiyun 		return;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	ndev = cmdq->ndev;
71*4882a593Smuzhiyun 	cancel_work_sync(&cmdq->backlog_qflush);
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	dma_free_coherent(DEV(ndev), cmdq->qsize,
74*4882a593Smuzhiyun 			  cmdq->unalign_base, cmdq->unalign_dma);
75*4882a593Smuzhiyun 	nitrox_cmdq_reset(cmdq);
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	cmdq->dbell_csr_addr = NULL;
78*4882a593Smuzhiyun 	cmdq->compl_cnt_csr_addr = NULL;
79*4882a593Smuzhiyun 	cmdq->unalign_base = NULL;
80*4882a593Smuzhiyun 	cmdq->base = NULL;
81*4882a593Smuzhiyun 	cmdq->unalign_dma = 0;
82*4882a593Smuzhiyun 	cmdq->dma = 0;
83*4882a593Smuzhiyun 	cmdq->qsize = 0;
84*4882a593Smuzhiyun 	cmdq->instr_size = 0;
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun 
nitrox_free_aqm_queues(struct nitrox_device * ndev)87*4882a593Smuzhiyun static void nitrox_free_aqm_queues(struct nitrox_device *ndev)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun 	int i;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	for (i = 0; i < ndev->nr_queues; i++) {
92*4882a593Smuzhiyun 		nitrox_cmdq_cleanup(ndev->aqmq[i]);
93*4882a593Smuzhiyun 		kfree_sensitive(ndev->aqmq[i]);
94*4882a593Smuzhiyun 		ndev->aqmq[i] = NULL;
95*4882a593Smuzhiyun 	}
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun 
nitrox_alloc_aqm_queues(struct nitrox_device * ndev)98*4882a593Smuzhiyun static int nitrox_alloc_aqm_queues(struct nitrox_device *ndev)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun 	int i, err;
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	for (i = 0; i < ndev->nr_queues; i++) {
103*4882a593Smuzhiyun 		struct nitrox_cmdq *cmdq;
104*4882a593Smuzhiyun 		u64 offset;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 		cmdq = kzalloc_node(sizeof(*cmdq), GFP_KERNEL, ndev->node);
107*4882a593Smuzhiyun 		if (!cmdq) {
108*4882a593Smuzhiyun 			err = -ENOMEM;
109*4882a593Smuzhiyun 			goto aqmq_fail;
110*4882a593Smuzhiyun 		}
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 		cmdq->ndev = ndev;
113*4882a593Smuzhiyun 		cmdq->qno = i;
114*4882a593Smuzhiyun 		cmdq->instr_size = sizeof(struct aqmq_command_s);
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 		/* AQM Queue Doorbell Counter Register Address */
117*4882a593Smuzhiyun 		offset = AQMQ_DRBLX(i);
118*4882a593Smuzhiyun 		cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset);
119*4882a593Smuzhiyun 		/* AQM Queue Commands Completed Count Register Address */
120*4882a593Smuzhiyun 		offset = AQMQ_CMD_CNTX(i);
121*4882a593Smuzhiyun 		cmdq->compl_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset);
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 		err = nitrox_cmdq_init(cmdq, AQM_Q_ALIGN_BYTES);
124*4882a593Smuzhiyun 		if (err) {
125*4882a593Smuzhiyun 			kfree_sensitive(cmdq);
126*4882a593Smuzhiyun 			goto aqmq_fail;
127*4882a593Smuzhiyun 		}
128*4882a593Smuzhiyun 		ndev->aqmq[i] = cmdq;
129*4882a593Smuzhiyun 	}
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	return 0;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun aqmq_fail:
134*4882a593Smuzhiyun 	nitrox_free_aqm_queues(ndev);
135*4882a593Smuzhiyun 	return err;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun 
nitrox_free_pktin_queues(struct nitrox_device * ndev)138*4882a593Smuzhiyun static void nitrox_free_pktin_queues(struct nitrox_device *ndev)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun 	int i;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	for (i = 0; i < ndev->nr_queues; i++) {
143*4882a593Smuzhiyun 		struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i];
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 		nitrox_cmdq_cleanup(cmdq);
146*4882a593Smuzhiyun 	}
147*4882a593Smuzhiyun 	kfree(ndev->pkt_inq);
148*4882a593Smuzhiyun 	ndev->pkt_inq = NULL;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun 
nitrox_alloc_pktin_queues(struct nitrox_device * ndev)151*4882a593Smuzhiyun static int nitrox_alloc_pktin_queues(struct nitrox_device *ndev)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun 	int i, err;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	ndev->pkt_inq = kcalloc_node(ndev->nr_queues,
156*4882a593Smuzhiyun 				     sizeof(struct nitrox_cmdq),
157*4882a593Smuzhiyun 				     GFP_KERNEL, ndev->node);
158*4882a593Smuzhiyun 	if (!ndev->pkt_inq)
159*4882a593Smuzhiyun 		return -ENOMEM;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	for (i = 0; i < ndev->nr_queues; i++) {
162*4882a593Smuzhiyun 		struct nitrox_cmdq *cmdq;
163*4882a593Smuzhiyun 		u64 offset;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 		cmdq = &ndev->pkt_inq[i];
166*4882a593Smuzhiyun 		cmdq->ndev = ndev;
167*4882a593Smuzhiyun 		cmdq->qno = i;
168*4882a593Smuzhiyun 		cmdq->instr_size = sizeof(struct nps_pkt_instr);
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 		/* packet input ring doorbell address */
171*4882a593Smuzhiyun 		offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i);
172*4882a593Smuzhiyun 		cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset);
173*4882a593Smuzhiyun 		/* packet solicit port completion count address */
174*4882a593Smuzhiyun 		offset = NPS_PKT_SLC_CNTSX(i);
175*4882a593Smuzhiyun 		cmdq->compl_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset);
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 		err = nitrox_cmdq_init(cmdq, PKTIN_Q_ALIGN_BYTES);
178*4882a593Smuzhiyun 		if (err)
179*4882a593Smuzhiyun 			goto pktq_fail;
180*4882a593Smuzhiyun 	}
181*4882a593Smuzhiyun 	return 0;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun pktq_fail:
184*4882a593Smuzhiyun 	nitrox_free_pktin_queues(ndev);
185*4882a593Smuzhiyun 	return err;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun 
create_crypto_dma_pool(struct nitrox_device * ndev)188*4882a593Smuzhiyun static int create_crypto_dma_pool(struct nitrox_device *ndev)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun 	size_t size;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	/* Crypto context pool, 16 byte aligned */
193*4882a593Smuzhiyun 	size = CRYPTO_CTX_SIZE + sizeof(struct ctx_hdr);
194*4882a593Smuzhiyun 	ndev->ctx_pool = dma_pool_create("nitrox-context",
195*4882a593Smuzhiyun 					 DEV(ndev), size, 16, 0);
196*4882a593Smuzhiyun 	if (!ndev->ctx_pool)
197*4882a593Smuzhiyun 		return -ENOMEM;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	return 0;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun 
destroy_crypto_dma_pool(struct nitrox_device * ndev)202*4882a593Smuzhiyun static void destroy_crypto_dma_pool(struct nitrox_device *ndev)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun 	if (!ndev->ctx_pool)
205*4882a593Smuzhiyun 		return;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	dma_pool_destroy(ndev->ctx_pool);
208*4882a593Smuzhiyun 	ndev->ctx_pool = NULL;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun /*
212*4882a593Smuzhiyun  * crypto_alloc_context - Allocate crypto context from pool
213*4882a593Smuzhiyun  * @ndev: NITROX Device
214*4882a593Smuzhiyun  */
crypto_alloc_context(struct nitrox_device * ndev)215*4882a593Smuzhiyun void *crypto_alloc_context(struct nitrox_device *ndev)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun 	struct ctx_hdr *ctx;
218*4882a593Smuzhiyun 	struct crypto_ctx_hdr *chdr;
219*4882a593Smuzhiyun 	void *vaddr;
220*4882a593Smuzhiyun 	dma_addr_t dma;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	chdr = kmalloc(sizeof(*chdr), GFP_KERNEL);
223*4882a593Smuzhiyun 	if (!chdr)
224*4882a593Smuzhiyun 		return NULL;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	vaddr = dma_pool_zalloc(ndev->ctx_pool, GFP_KERNEL, &dma);
227*4882a593Smuzhiyun 	if (!vaddr) {
228*4882a593Smuzhiyun 		kfree(chdr);
229*4882a593Smuzhiyun 		return NULL;
230*4882a593Smuzhiyun 	}
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	/* fill meta data */
233*4882a593Smuzhiyun 	ctx = vaddr;
234*4882a593Smuzhiyun 	ctx->pool = ndev->ctx_pool;
235*4882a593Smuzhiyun 	ctx->dma = dma;
236*4882a593Smuzhiyun 	ctx->ctx_dma = dma + sizeof(struct ctx_hdr);
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	chdr->pool = ndev->ctx_pool;
239*4882a593Smuzhiyun 	chdr->dma = dma;
240*4882a593Smuzhiyun 	chdr->vaddr = vaddr;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	return chdr;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun /**
246*4882a593Smuzhiyun  * crypto_free_context - Free crypto context to pool
247*4882a593Smuzhiyun  * @ctx: context to free
248*4882a593Smuzhiyun  */
crypto_free_context(void * ctx)249*4882a593Smuzhiyun void crypto_free_context(void *ctx)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun 	struct crypto_ctx_hdr *ctxp;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	if (!ctx)
254*4882a593Smuzhiyun 		return;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	ctxp = ctx;
257*4882a593Smuzhiyun 	dma_pool_free(ctxp->pool, ctxp->vaddr, ctxp->dma);
258*4882a593Smuzhiyun 	kfree(ctxp);
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun /**
262*4882a593Smuzhiyun  * nitrox_common_sw_init - allocate software resources.
263*4882a593Smuzhiyun  * @ndev: NITROX device
264*4882a593Smuzhiyun  *
265*4882a593Smuzhiyun  * Allocates crypto context pools and command queues etc.
266*4882a593Smuzhiyun  *
267*4882a593Smuzhiyun  * Return: 0 on success, or a negative error code on error.
268*4882a593Smuzhiyun  */
nitrox_common_sw_init(struct nitrox_device * ndev)269*4882a593Smuzhiyun int nitrox_common_sw_init(struct nitrox_device *ndev)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun 	int err = 0;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	/* per device crypto context pool */
274*4882a593Smuzhiyun 	err = create_crypto_dma_pool(ndev);
275*4882a593Smuzhiyun 	if (err)
276*4882a593Smuzhiyun 		return err;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	err = nitrox_alloc_pktin_queues(ndev);
279*4882a593Smuzhiyun 	if (err)
280*4882a593Smuzhiyun 		destroy_crypto_dma_pool(ndev);
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	err = nitrox_alloc_aqm_queues(ndev);
283*4882a593Smuzhiyun 	if (err) {
284*4882a593Smuzhiyun 		nitrox_free_pktin_queues(ndev);
285*4882a593Smuzhiyun 		destroy_crypto_dma_pool(ndev);
286*4882a593Smuzhiyun 	}
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	return err;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun /**
292*4882a593Smuzhiyun  * nitrox_common_sw_cleanup - free software resources.
293*4882a593Smuzhiyun  * @ndev: NITROX device
294*4882a593Smuzhiyun  */
nitrox_common_sw_cleanup(struct nitrox_device * ndev)295*4882a593Smuzhiyun void nitrox_common_sw_cleanup(struct nitrox_device *ndev)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun 	nitrox_free_aqm_queues(ndev);
298*4882a593Smuzhiyun 	nitrox_free_pktin_queues(ndev);
299*4882a593Smuzhiyun 	destroy_crypto_dma_pool(ndev);
300*4882a593Smuzhiyun }
301