xref: /OK3568_Linux_fs/kernel/drivers/crypto/marvell/octeontx/otx_cptvf_main.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Marvell OcteonTX CPT driver
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (C) 2019 Marvell International Ltd.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * This program is free software; you can redistribute it and/or modify
7*4882a593Smuzhiyun  * it under the terms of the GNU General Public License version 2 as
8*4882a593Smuzhiyun  * published by the Free Software Foundation.
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/interrupt.h>
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include "otx_cptvf.h"
14*4882a593Smuzhiyun #include "otx_cptvf_algs.h"
15*4882a593Smuzhiyun #include "otx_cptvf_reqmgr.h"
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #define DRV_NAME	"octeontx-cptvf"
18*4882a593Smuzhiyun #define DRV_VERSION	"1.0"
19*4882a593Smuzhiyun 
vq_work_handler(unsigned long data)20*4882a593Smuzhiyun static void vq_work_handler(unsigned long data)
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun 	struct otx_cptvf_wqe_info *cwqe_info =
23*4882a593Smuzhiyun 					(struct otx_cptvf_wqe_info *) data;
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun 	otx_cpt_post_process(&cwqe_info->vq_wqe[0]);
26*4882a593Smuzhiyun }
27*4882a593Smuzhiyun 
init_worker_threads(struct otx_cptvf * cptvf)28*4882a593Smuzhiyun static int init_worker_threads(struct otx_cptvf *cptvf)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun 	struct pci_dev *pdev = cptvf->pdev;
31*4882a593Smuzhiyun 	struct otx_cptvf_wqe_info *cwqe_info;
32*4882a593Smuzhiyun 	int i;
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	cwqe_info = kzalloc(sizeof(*cwqe_info), GFP_KERNEL);
35*4882a593Smuzhiyun 	if (!cwqe_info)
36*4882a593Smuzhiyun 		return -ENOMEM;
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	if (cptvf->num_queues) {
39*4882a593Smuzhiyun 		dev_dbg(&pdev->dev, "Creating VQ worker threads (%d)\n",
40*4882a593Smuzhiyun 			cptvf->num_queues);
41*4882a593Smuzhiyun 	}
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 	for (i = 0; i < cptvf->num_queues; i++) {
44*4882a593Smuzhiyun 		tasklet_init(&cwqe_info->vq_wqe[i].twork, vq_work_handler,
45*4882a593Smuzhiyun 			     (u64)cwqe_info);
46*4882a593Smuzhiyun 		cwqe_info->vq_wqe[i].cptvf = cptvf;
47*4882a593Smuzhiyun 	}
48*4882a593Smuzhiyun 	cptvf->wqe_info = cwqe_info;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	return 0;
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun 
cleanup_worker_threads(struct otx_cptvf * cptvf)53*4882a593Smuzhiyun static void cleanup_worker_threads(struct otx_cptvf *cptvf)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun 	struct pci_dev *pdev = cptvf->pdev;
56*4882a593Smuzhiyun 	struct otx_cptvf_wqe_info *cwqe_info;
57*4882a593Smuzhiyun 	int i;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	cwqe_info = (struct otx_cptvf_wqe_info *)cptvf->wqe_info;
60*4882a593Smuzhiyun 	if (!cwqe_info)
61*4882a593Smuzhiyun 		return;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	if (cptvf->num_queues) {
64*4882a593Smuzhiyun 		dev_dbg(&pdev->dev, "Cleaning VQ worker threads (%u)\n",
65*4882a593Smuzhiyun 			cptvf->num_queues);
66*4882a593Smuzhiyun 	}
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	for (i = 0; i < cptvf->num_queues; i++)
69*4882a593Smuzhiyun 		tasklet_kill(&cwqe_info->vq_wqe[i].twork);
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	kfree_sensitive(cwqe_info);
72*4882a593Smuzhiyun 	cptvf->wqe_info = NULL;
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun 
free_pending_queues(struct otx_cpt_pending_qinfo * pqinfo)75*4882a593Smuzhiyun static void free_pending_queues(struct otx_cpt_pending_qinfo *pqinfo)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun 	struct otx_cpt_pending_queue *queue;
78*4882a593Smuzhiyun 	int i;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	for_each_pending_queue(pqinfo, queue, i) {
81*4882a593Smuzhiyun 		if (!queue->head)
82*4882a593Smuzhiyun 			continue;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 		/* free single queue */
85*4882a593Smuzhiyun 		kfree_sensitive((queue->head));
86*4882a593Smuzhiyun 		queue->front = 0;
87*4882a593Smuzhiyun 		queue->rear = 0;
88*4882a593Smuzhiyun 		queue->qlen = 0;
89*4882a593Smuzhiyun 	}
90*4882a593Smuzhiyun 	pqinfo->num_queues = 0;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun 
alloc_pending_queues(struct otx_cpt_pending_qinfo * pqinfo,u32 qlen,u32 num_queues)93*4882a593Smuzhiyun static int alloc_pending_queues(struct otx_cpt_pending_qinfo *pqinfo, u32 qlen,
94*4882a593Smuzhiyun 				u32 num_queues)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	struct otx_cpt_pending_queue *queue = NULL;
97*4882a593Smuzhiyun 	size_t size;
98*4882a593Smuzhiyun 	int ret;
99*4882a593Smuzhiyun 	u32 i;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	pqinfo->num_queues = num_queues;
102*4882a593Smuzhiyun 	size = (qlen * sizeof(struct otx_cpt_pending_entry));
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	for_each_pending_queue(pqinfo, queue, i) {
105*4882a593Smuzhiyun 		queue->head = kzalloc((size), GFP_KERNEL);
106*4882a593Smuzhiyun 		if (!queue->head) {
107*4882a593Smuzhiyun 			ret = -ENOMEM;
108*4882a593Smuzhiyun 			goto pending_qfail;
109*4882a593Smuzhiyun 		}
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 		queue->pending_count = 0;
112*4882a593Smuzhiyun 		queue->front = 0;
113*4882a593Smuzhiyun 		queue->rear = 0;
114*4882a593Smuzhiyun 		queue->qlen = qlen;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 		/* init queue spin lock */
117*4882a593Smuzhiyun 		spin_lock_init(&queue->lock);
118*4882a593Smuzhiyun 	}
119*4882a593Smuzhiyun 	return 0;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun pending_qfail:
122*4882a593Smuzhiyun 	free_pending_queues(pqinfo);
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	return ret;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun 
init_pending_queues(struct otx_cptvf * cptvf,u32 qlen,u32 num_queues)127*4882a593Smuzhiyun static int init_pending_queues(struct otx_cptvf *cptvf, u32 qlen,
128*4882a593Smuzhiyun 			       u32 num_queues)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	struct pci_dev *pdev = cptvf->pdev;
131*4882a593Smuzhiyun 	int ret;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	if (!num_queues)
134*4882a593Smuzhiyun 		return 0;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	ret = alloc_pending_queues(&cptvf->pqinfo, qlen, num_queues);
137*4882a593Smuzhiyun 	if (ret) {
138*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Failed to setup pending queues (%u)\n",
139*4882a593Smuzhiyun 			num_queues);
140*4882a593Smuzhiyun 		return ret;
141*4882a593Smuzhiyun 	}
142*4882a593Smuzhiyun 	return 0;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun 
cleanup_pending_queues(struct otx_cptvf * cptvf)145*4882a593Smuzhiyun static void cleanup_pending_queues(struct otx_cptvf *cptvf)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun 	struct pci_dev *pdev = cptvf->pdev;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	if (!cptvf->num_queues)
150*4882a593Smuzhiyun 		return;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	dev_dbg(&pdev->dev, "Cleaning VQ pending queue (%u)\n",
153*4882a593Smuzhiyun 		cptvf->num_queues);
154*4882a593Smuzhiyun 	free_pending_queues(&cptvf->pqinfo);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun 
free_command_queues(struct otx_cptvf * cptvf,struct otx_cpt_cmd_qinfo * cqinfo)157*4882a593Smuzhiyun static void free_command_queues(struct otx_cptvf *cptvf,
158*4882a593Smuzhiyun 				struct otx_cpt_cmd_qinfo *cqinfo)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun 	struct otx_cpt_cmd_queue *queue = NULL;
161*4882a593Smuzhiyun 	struct otx_cpt_cmd_chunk *chunk = NULL;
162*4882a593Smuzhiyun 	struct pci_dev *pdev = cptvf->pdev;
163*4882a593Smuzhiyun 	int i;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	/* clean up for each queue */
166*4882a593Smuzhiyun 	for (i = 0; i < cptvf->num_queues; i++) {
167*4882a593Smuzhiyun 		queue = &cqinfo->queue[i];
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 		while (!list_empty(&cqinfo->queue[i].chead)) {
170*4882a593Smuzhiyun 			chunk = list_first_entry(&cqinfo->queue[i].chead,
171*4882a593Smuzhiyun 					struct otx_cpt_cmd_chunk, nextchunk);
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 			dma_free_coherent(&pdev->dev, chunk->size,
174*4882a593Smuzhiyun 					  chunk->head,
175*4882a593Smuzhiyun 					  chunk->dma_addr);
176*4882a593Smuzhiyun 			chunk->head = NULL;
177*4882a593Smuzhiyun 			chunk->dma_addr = 0;
178*4882a593Smuzhiyun 			list_del(&chunk->nextchunk);
179*4882a593Smuzhiyun 			kfree_sensitive(chunk);
180*4882a593Smuzhiyun 		}
181*4882a593Smuzhiyun 		queue->num_chunks = 0;
182*4882a593Smuzhiyun 		queue->idx = 0;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	}
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun 
alloc_command_queues(struct otx_cptvf * cptvf,struct otx_cpt_cmd_qinfo * cqinfo,u32 qlen)187*4882a593Smuzhiyun static int alloc_command_queues(struct otx_cptvf *cptvf,
188*4882a593Smuzhiyun 				struct otx_cpt_cmd_qinfo *cqinfo,
189*4882a593Smuzhiyun 				u32 qlen)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun 	struct otx_cpt_cmd_chunk *curr, *first, *last;
192*4882a593Smuzhiyun 	struct otx_cpt_cmd_queue *queue = NULL;
193*4882a593Smuzhiyun 	struct pci_dev *pdev = cptvf->pdev;
194*4882a593Smuzhiyun 	size_t q_size, c_size, rem_q_size;
195*4882a593Smuzhiyun 	u32 qcsize_bytes;
196*4882a593Smuzhiyun 	int i;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	/* Qsize in dwords, needed for SADDR config, 1-next chunk pointer */
200*4882a593Smuzhiyun 	cptvf->qsize = min(qlen, cqinfo->qchunksize) *
201*4882a593Smuzhiyun 		       OTX_CPT_NEXT_CHUNK_PTR_SIZE + 1;
202*4882a593Smuzhiyun 	/* Qsize in bytes to create space for alignment */
203*4882a593Smuzhiyun 	q_size = qlen * OTX_CPT_INST_SIZE;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	qcsize_bytes = cqinfo->qchunksize * OTX_CPT_INST_SIZE;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	/* per queue initialization */
208*4882a593Smuzhiyun 	for (i = 0; i < cptvf->num_queues; i++) {
209*4882a593Smuzhiyun 		c_size = 0;
210*4882a593Smuzhiyun 		rem_q_size = q_size;
211*4882a593Smuzhiyun 		first = NULL;
212*4882a593Smuzhiyun 		last = NULL;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 		queue = &cqinfo->queue[i];
215*4882a593Smuzhiyun 		INIT_LIST_HEAD(&queue->chead);
216*4882a593Smuzhiyun 		do {
217*4882a593Smuzhiyun 			curr = kzalloc(sizeof(*curr), GFP_KERNEL);
218*4882a593Smuzhiyun 			if (!curr)
219*4882a593Smuzhiyun 				goto cmd_qfail;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 			c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes :
222*4882a593Smuzhiyun 					rem_q_size;
223*4882a593Smuzhiyun 			curr->head = dma_alloc_coherent(&pdev->dev,
224*4882a593Smuzhiyun 					   c_size + OTX_CPT_NEXT_CHUNK_PTR_SIZE,
225*4882a593Smuzhiyun 					   &curr->dma_addr, GFP_KERNEL);
226*4882a593Smuzhiyun 			if (!curr->head) {
227*4882a593Smuzhiyun 				dev_err(&pdev->dev,
228*4882a593Smuzhiyun 				"Command Q (%d) chunk (%d) allocation failed\n",
229*4882a593Smuzhiyun 					i, queue->num_chunks);
230*4882a593Smuzhiyun 				goto free_curr;
231*4882a593Smuzhiyun 			}
232*4882a593Smuzhiyun 			curr->size = c_size;
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 			if (queue->num_chunks == 0) {
235*4882a593Smuzhiyun 				first = curr;
236*4882a593Smuzhiyun 				queue->base  = first;
237*4882a593Smuzhiyun 			}
238*4882a593Smuzhiyun 			list_add_tail(&curr->nextchunk,
239*4882a593Smuzhiyun 				      &cqinfo->queue[i].chead);
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 			queue->num_chunks++;
242*4882a593Smuzhiyun 			rem_q_size -= c_size;
243*4882a593Smuzhiyun 			if (last)
244*4882a593Smuzhiyun 				*((u64 *)(&last->head[last->size])) =
245*4882a593Smuzhiyun 					(u64)curr->dma_addr;
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 			last = curr;
248*4882a593Smuzhiyun 		} while (rem_q_size);
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 		/*
251*4882a593Smuzhiyun 		 * Make the queue circular, tie back last chunk entry to head
252*4882a593Smuzhiyun 		 */
253*4882a593Smuzhiyun 		curr = first;
254*4882a593Smuzhiyun 		*((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr;
255*4882a593Smuzhiyun 		queue->qhead = curr;
256*4882a593Smuzhiyun 	}
257*4882a593Smuzhiyun 	return 0;
258*4882a593Smuzhiyun free_curr:
259*4882a593Smuzhiyun 	kfree(curr);
260*4882a593Smuzhiyun cmd_qfail:
261*4882a593Smuzhiyun 	free_command_queues(cptvf, cqinfo);
262*4882a593Smuzhiyun 	return -ENOMEM;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun 
init_command_queues(struct otx_cptvf * cptvf,u32 qlen)265*4882a593Smuzhiyun static int init_command_queues(struct otx_cptvf *cptvf, u32 qlen)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun 	struct pci_dev *pdev = cptvf->pdev;
268*4882a593Smuzhiyun 	int ret;
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	/* setup command queues */
271*4882a593Smuzhiyun 	ret = alloc_command_queues(cptvf, &cptvf->cqinfo, qlen);
272*4882a593Smuzhiyun 	if (ret) {
273*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Failed to allocate command queues (%u)\n",
274*4882a593Smuzhiyun 			cptvf->num_queues);
275*4882a593Smuzhiyun 		return ret;
276*4882a593Smuzhiyun 	}
277*4882a593Smuzhiyun 	return ret;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun 
cleanup_command_queues(struct otx_cptvf * cptvf)280*4882a593Smuzhiyun static void cleanup_command_queues(struct otx_cptvf *cptvf)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun 	struct pci_dev *pdev = cptvf->pdev;
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	if (!cptvf->num_queues)
285*4882a593Smuzhiyun 		return;
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	dev_dbg(&pdev->dev, "Cleaning VQ command queue (%u)\n",
288*4882a593Smuzhiyun 		cptvf->num_queues);
289*4882a593Smuzhiyun 	free_command_queues(cptvf, &cptvf->cqinfo);
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun 
cptvf_sw_cleanup(struct otx_cptvf * cptvf)292*4882a593Smuzhiyun static void cptvf_sw_cleanup(struct otx_cptvf *cptvf)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun 	cleanup_worker_threads(cptvf);
295*4882a593Smuzhiyun 	cleanup_pending_queues(cptvf);
296*4882a593Smuzhiyun 	cleanup_command_queues(cptvf);
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun 
cptvf_sw_init(struct otx_cptvf * cptvf,u32 qlen,u32 num_queues)299*4882a593Smuzhiyun static int cptvf_sw_init(struct otx_cptvf *cptvf, u32 qlen, u32 num_queues)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun 	struct pci_dev *pdev = cptvf->pdev;
302*4882a593Smuzhiyun 	u32 max_dev_queues = 0;
303*4882a593Smuzhiyun 	int ret;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	max_dev_queues = OTX_CPT_NUM_QS_PER_VF;
306*4882a593Smuzhiyun 	/* possible cpus */
307*4882a593Smuzhiyun 	num_queues = min_t(u32, num_queues, max_dev_queues);
308*4882a593Smuzhiyun 	cptvf->num_queues = num_queues;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	ret = init_command_queues(cptvf, qlen);
311*4882a593Smuzhiyun 	if (ret) {
312*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Failed to setup command queues (%u)\n",
313*4882a593Smuzhiyun 			num_queues);
314*4882a593Smuzhiyun 		return ret;
315*4882a593Smuzhiyun 	}
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	ret = init_pending_queues(cptvf, qlen, num_queues);
318*4882a593Smuzhiyun 	if (ret) {
319*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Failed to setup pending queues (%u)\n",
320*4882a593Smuzhiyun 			num_queues);
321*4882a593Smuzhiyun 		goto setup_pqfail;
322*4882a593Smuzhiyun 	}
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	/* Create worker threads for BH processing */
325*4882a593Smuzhiyun 	ret = init_worker_threads(cptvf);
326*4882a593Smuzhiyun 	if (ret) {
327*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Failed to setup worker threads\n");
328*4882a593Smuzhiyun 		goto init_work_fail;
329*4882a593Smuzhiyun 	}
330*4882a593Smuzhiyun 	return 0;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun init_work_fail:
333*4882a593Smuzhiyun 	cleanup_worker_threads(cptvf);
334*4882a593Smuzhiyun 	cleanup_pending_queues(cptvf);
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun setup_pqfail:
337*4882a593Smuzhiyun 	cleanup_command_queues(cptvf);
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	return ret;
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun 
cptvf_free_irq_affinity(struct otx_cptvf * cptvf,int vec)342*4882a593Smuzhiyun static void cptvf_free_irq_affinity(struct otx_cptvf *cptvf, int vec)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun 	irq_set_affinity_hint(pci_irq_vector(cptvf->pdev, vec), NULL);
345*4882a593Smuzhiyun 	free_cpumask_var(cptvf->affinity_mask[vec]);
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun 
cptvf_write_vq_ctl(struct otx_cptvf * cptvf,bool val)348*4882a593Smuzhiyun static void cptvf_write_vq_ctl(struct otx_cptvf *cptvf, bool val)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun 	union otx_cptx_vqx_ctl vqx_ctl;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	vqx_ctl.u = readq(cptvf->reg_base + OTX_CPT_VQX_CTL(0));
353*4882a593Smuzhiyun 	vqx_ctl.s.ena = val;
354*4882a593Smuzhiyun 	writeq(vqx_ctl.u, cptvf->reg_base + OTX_CPT_VQX_CTL(0));
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun 
otx_cptvf_write_vq_doorbell(struct otx_cptvf * cptvf,u32 val)357*4882a593Smuzhiyun void otx_cptvf_write_vq_doorbell(struct otx_cptvf *cptvf, u32 val)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun 	union otx_cptx_vqx_doorbell vqx_dbell;
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	vqx_dbell.u = readq(cptvf->reg_base + OTX_CPT_VQX_DOORBELL(0));
362*4882a593Smuzhiyun 	vqx_dbell.s.dbell_cnt = val * 8; /* Num of Instructions * 8 words */
363*4882a593Smuzhiyun 	writeq(vqx_dbell.u, cptvf->reg_base + OTX_CPT_VQX_DOORBELL(0));
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun 
cptvf_write_vq_inprog(struct otx_cptvf * cptvf,u8 val)366*4882a593Smuzhiyun static void cptvf_write_vq_inprog(struct otx_cptvf *cptvf, u8 val)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun 	union otx_cptx_vqx_inprog vqx_inprg;
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	vqx_inprg.u = readq(cptvf->reg_base + OTX_CPT_VQX_INPROG(0));
371*4882a593Smuzhiyun 	vqx_inprg.s.inflight = val;
372*4882a593Smuzhiyun 	writeq(vqx_inprg.u, cptvf->reg_base + OTX_CPT_VQX_INPROG(0));
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun 
cptvf_write_vq_done_numwait(struct otx_cptvf * cptvf,u32 val)375*4882a593Smuzhiyun static void cptvf_write_vq_done_numwait(struct otx_cptvf *cptvf, u32 val)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun 	union otx_cptx_vqx_done_wait vqx_dwait;
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	vqx_dwait.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
380*4882a593Smuzhiyun 	vqx_dwait.s.num_wait = val;
381*4882a593Smuzhiyun 	writeq(vqx_dwait.u, cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun 
cptvf_read_vq_done_numwait(struct otx_cptvf * cptvf)384*4882a593Smuzhiyun static u32 cptvf_read_vq_done_numwait(struct otx_cptvf *cptvf)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun 	union otx_cptx_vqx_done_wait vqx_dwait;
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	vqx_dwait.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
389*4882a593Smuzhiyun 	return vqx_dwait.s.num_wait;
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun 
cptvf_write_vq_done_timewait(struct otx_cptvf * cptvf,u16 time)392*4882a593Smuzhiyun static void cptvf_write_vq_done_timewait(struct otx_cptvf *cptvf, u16 time)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun 	union otx_cptx_vqx_done_wait vqx_dwait;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	vqx_dwait.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
397*4882a593Smuzhiyun 	vqx_dwait.s.time_wait = time;
398*4882a593Smuzhiyun 	writeq(vqx_dwait.u, cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 
cptvf_read_vq_done_timewait(struct otx_cptvf * cptvf)402*4882a593Smuzhiyun static u16 cptvf_read_vq_done_timewait(struct otx_cptvf *cptvf)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun 	union otx_cptx_vqx_done_wait vqx_dwait;
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	vqx_dwait.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
407*4882a593Smuzhiyun 	return vqx_dwait.s.time_wait;
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun 
cptvf_enable_swerr_interrupts(struct otx_cptvf * cptvf)410*4882a593Smuzhiyun static void cptvf_enable_swerr_interrupts(struct otx_cptvf *cptvf)
411*4882a593Smuzhiyun {
412*4882a593Smuzhiyun 	union otx_cptx_vqx_misc_ena_w1s vqx_misc_ena;
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	vqx_misc_ena.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_ENA_W1S(0));
415*4882a593Smuzhiyun 	/* Enable SWERR interrupts for the requested VF */
416*4882a593Smuzhiyun 	vqx_misc_ena.s.swerr = 1;
417*4882a593Smuzhiyun 	writeq(vqx_misc_ena.u, cptvf->reg_base + OTX_CPT_VQX_MISC_ENA_W1S(0));
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun 
cptvf_enable_mbox_interrupts(struct otx_cptvf * cptvf)420*4882a593Smuzhiyun static void cptvf_enable_mbox_interrupts(struct otx_cptvf *cptvf)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun 	union otx_cptx_vqx_misc_ena_w1s vqx_misc_ena;
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	vqx_misc_ena.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_ENA_W1S(0));
425*4882a593Smuzhiyun 	/* Enable MBOX interrupt for the requested VF */
426*4882a593Smuzhiyun 	vqx_misc_ena.s.mbox = 1;
427*4882a593Smuzhiyun 	writeq(vqx_misc_ena.u, cptvf->reg_base + OTX_CPT_VQX_MISC_ENA_W1S(0));
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun 
cptvf_enable_done_interrupts(struct otx_cptvf * cptvf)430*4882a593Smuzhiyun static void cptvf_enable_done_interrupts(struct otx_cptvf *cptvf)
431*4882a593Smuzhiyun {
432*4882a593Smuzhiyun 	union otx_cptx_vqx_done_ena_w1s vqx_done_ena;
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	vqx_done_ena.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_ENA_W1S(0));
435*4882a593Smuzhiyun 	/* Enable DONE interrupt for the requested VF */
436*4882a593Smuzhiyun 	vqx_done_ena.s.done = 1;
437*4882a593Smuzhiyun 	writeq(vqx_done_ena.u, cptvf->reg_base + OTX_CPT_VQX_DONE_ENA_W1S(0));
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun 
cptvf_clear_dovf_intr(struct otx_cptvf * cptvf)440*4882a593Smuzhiyun static void cptvf_clear_dovf_intr(struct otx_cptvf *cptvf)
441*4882a593Smuzhiyun {
442*4882a593Smuzhiyun 	union otx_cptx_vqx_misc_int vqx_misc_int;
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
445*4882a593Smuzhiyun 	/* W1C for the VF */
446*4882a593Smuzhiyun 	vqx_misc_int.s.dovf = 1;
447*4882a593Smuzhiyun 	writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun 
cptvf_clear_irde_intr(struct otx_cptvf * cptvf)450*4882a593Smuzhiyun static void cptvf_clear_irde_intr(struct otx_cptvf *cptvf)
451*4882a593Smuzhiyun {
452*4882a593Smuzhiyun 	union otx_cptx_vqx_misc_int vqx_misc_int;
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
455*4882a593Smuzhiyun 	/* W1C for the VF */
456*4882a593Smuzhiyun 	vqx_misc_int.s.irde = 1;
457*4882a593Smuzhiyun 	writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun 
cptvf_clear_nwrp_intr(struct otx_cptvf * cptvf)460*4882a593Smuzhiyun static void cptvf_clear_nwrp_intr(struct otx_cptvf *cptvf)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun 	union otx_cptx_vqx_misc_int vqx_misc_int;
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
465*4882a593Smuzhiyun 	/* W1C for the VF */
466*4882a593Smuzhiyun 	vqx_misc_int.s.nwrp = 1;
467*4882a593Smuzhiyun 	writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun 
cptvf_clear_mbox_intr(struct otx_cptvf * cptvf)470*4882a593Smuzhiyun static void cptvf_clear_mbox_intr(struct otx_cptvf *cptvf)
471*4882a593Smuzhiyun {
472*4882a593Smuzhiyun 	union otx_cptx_vqx_misc_int vqx_misc_int;
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
475*4882a593Smuzhiyun 	/* W1C for the VF */
476*4882a593Smuzhiyun 	vqx_misc_int.s.mbox = 1;
477*4882a593Smuzhiyun 	writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun 
cptvf_clear_swerr_intr(struct otx_cptvf * cptvf)480*4882a593Smuzhiyun static void cptvf_clear_swerr_intr(struct otx_cptvf *cptvf)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun 	union otx_cptx_vqx_misc_int vqx_misc_int;
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
485*4882a593Smuzhiyun 	/* W1C for the VF */
486*4882a593Smuzhiyun 	vqx_misc_int.s.swerr = 1;
487*4882a593Smuzhiyun 	writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun 
cptvf_read_vf_misc_intr_status(struct otx_cptvf * cptvf)490*4882a593Smuzhiyun static u64 cptvf_read_vf_misc_intr_status(struct otx_cptvf *cptvf)
491*4882a593Smuzhiyun {
492*4882a593Smuzhiyun 	return readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun 
cptvf_misc_intr_handler(int __always_unused irq,void * arg)495*4882a593Smuzhiyun static irqreturn_t cptvf_misc_intr_handler(int __always_unused irq,
496*4882a593Smuzhiyun 					   void *arg)
497*4882a593Smuzhiyun {
498*4882a593Smuzhiyun 	struct otx_cptvf *cptvf = arg;
499*4882a593Smuzhiyun 	struct pci_dev *pdev = cptvf->pdev;
500*4882a593Smuzhiyun 	u64 intr;
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	intr = cptvf_read_vf_misc_intr_status(cptvf);
503*4882a593Smuzhiyun 	/* Check for MISC interrupt types */
504*4882a593Smuzhiyun 	if (likely(intr & OTX_CPT_VF_INTR_MBOX_MASK)) {
505*4882a593Smuzhiyun 		dev_dbg(&pdev->dev, "Mailbox interrupt 0x%llx on CPT VF %d\n",
506*4882a593Smuzhiyun 			intr, cptvf->vfid);
507*4882a593Smuzhiyun 		otx_cptvf_handle_mbox_intr(cptvf);
508*4882a593Smuzhiyun 		cptvf_clear_mbox_intr(cptvf);
509*4882a593Smuzhiyun 	} else if (unlikely(intr & OTX_CPT_VF_INTR_DOVF_MASK)) {
510*4882a593Smuzhiyun 		cptvf_clear_dovf_intr(cptvf);
511*4882a593Smuzhiyun 		/* Clear doorbell count */
512*4882a593Smuzhiyun 		otx_cptvf_write_vq_doorbell(cptvf, 0);
513*4882a593Smuzhiyun 		dev_err(&pdev->dev,
514*4882a593Smuzhiyun 		"Doorbell overflow error interrupt 0x%llx on CPT VF %d\n",
515*4882a593Smuzhiyun 			intr, cptvf->vfid);
516*4882a593Smuzhiyun 	} else if (unlikely(intr & OTX_CPT_VF_INTR_IRDE_MASK)) {
517*4882a593Smuzhiyun 		cptvf_clear_irde_intr(cptvf);
518*4882a593Smuzhiyun 		dev_err(&pdev->dev,
519*4882a593Smuzhiyun 		"Instruction NCB read error interrupt 0x%llx on CPT VF %d\n",
520*4882a593Smuzhiyun 			intr, cptvf->vfid);
521*4882a593Smuzhiyun 	} else if (unlikely(intr & OTX_CPT_VF_INTR_NWRP_MASK)) {
522*4882a593Smuzhiyun 		cptvf_clear_nwrp_intr(cptvf);
523*4882a593Smuzhiyun 		dev_err(&pdev->dev,
524*4882a593Smuzhiyun 		"NCB response write error interrupt 0x%llx on CPT VF %d\n",
525*4882a593Smuzhiyun 			intr, cptvf->vfid);
526*4882a593Smuzhiyun 	} else if (unlikely(intr & OTX_CPT_VF_INTR_SERR_MASK)) {
527*4882a593Smuzhiyun 		cptvf_clear_swerr_intr(cptvf);
528*4882a593Smuzhiyun 		dev_err(&pdev->dev,
529*4882a593Smuzhiyun 			"Software error interrupt 0x%llx on CPT VF %d\n",
530*4882a593Smuzhiyun 			intr, cptvf->vfid);
531*4882a593Smuzhiyun 	} else {
532*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Unhandled interrupt in OTX_CPT VF %d\n",
533*4882a593Smuzhiyun 			cptvf->vfid);
534*4882a593Smuzhiyun 	}
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	return IRQ_HANDLED;
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun 
get_cptvf_vq_wqe(struct otx_cptvf * cptvf,int qno)539*4882a593Smuzhiyun static inline struct otx_cptvf_wqe *get_cptvf_vq_wqe(struct otx_cptvf *cptvf,
540*4882a593Smuzhiyun 						     int qno)
541*4882a593Smuzhiyun {
542*4882a593Smuzhiyun 	struct otx_cptvf_wqe_info *nwqe_info;
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	if (unlikely(qno >= cptvf->num_queues))
545*4882a593Smuzhiyun 		return NULL;
546*4882a593Smuzhiyun 	nwqe_info = (struct otx_cptvf_wqe_info *)cptvf->wqe_info;
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	return &nwqe_info->vq_wqe[qno];
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun 
cptvf_read_vq_done_count(struct otx_cptvf * cptvf)551*4882a593Smuzhiyun static inline u32 cptvf_read_vq_done_count(struct otx_cptvf *cptvf)
552*4882a593Smuzhiyun {
553*4882a593Smuzhiyun 	union otx_cptx_vqx_done vqx_done;
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	vqx_done.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE(0));
556*4882a593Smuzhiyun 	return vqx_done.s.done;
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun 
cptvf_write_vq_done_ack(struct otx_cptvf * cptvf,u32 ackcnt)559*4882a593Smuzhiyun static inline void cptvf_write_vq_done_ack(struct otx_cptvf *cptvf,
560*4882a593Smuzhiyun 					   u32 ackcnt)
561*4882a593Smuzhiyun {
562*4882a593Smuzhiyun 	union otx_cptx_vqx_done_ack vqx_dack_cnt;
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	vqx_dack_cnt.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_ACK(0));
565*4882a593Smuzhiyun 	vqx_dack_cnt.s.done_ack = ackcnt;
566*4882a593Smuzhiyun 	writeq(vqx_dack_cnt.u, cptvf->reg_base + OTX_CPT_VQX_DONE_ACK(0));
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun 
cptvf_done_intr_handler(int __always_unused irq,void * cptvf_dev)569*4882a593Smuzhiyun static irqreturn_t cptvf_done_intr_handler(int __always_unused irq,
570*4882a593Smuzhiyun 					   void *cptvf_dev)
571*4882a593Smuzhiyun {
572*4882a593Smuzhiyun 	struct otx_cptvf *cptvf = (struct otx_cptvf *)cptvf_dev;
573*4882a593Smuzhiyun 	struct pci_dev *pdev = cptvf->pdev;
574*4882a593Smuzhiyun 	/* Read the number of completions */
575*4882a593Smuzhiyun 	u32 intr = cptvf_read_vq_done_count(cptvf);
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	if (intr) {
578*4882a593Smuzhiyun 		struct otx_cptvf_wqe *wqe;
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 		/*
581*4882a593Smuzhiyun 		 * Acknowledge the number of scheduled completions for
582*4882a593Smuzhiyun 		 * processing
583*4882a593Smuzhiyun 		 */
584*4882a593Smuzhiyun 		cptvf_write_vq_done_ack(cptvf, intr);
585*4882a593Smuzhiyun 		wqe = get_cptvf_vq_wqe(cptvf, 0);
586*4882a593Smuzhiyun 		if (unlikely(!wqe)) {
587*4882a593Smuzhiyun 			dev_err(&pdev->dev, "No work to schedule for VF (%d)\n",
588*4882a593Smuzhiyun 				cptvf->vfid);
589*4882a593Smuzhiyun 			return IRQ_NONE;
590*4882a593Smuzhiyun 		}
591*4882a593Smuzhiyun 		tasklet_hi_schedule(&wqe->twork);
592*4882a593Smuzhiyun 	}
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	return IRQ_HANDLED;
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun 
cptvf_set_irq_affinity(struct otx_cptvf * cptvf,int vec)597*4882a593Smuzhiyun static void cptvf_set_irq_affinity(struct otx_cptvf *cptvf, int vec)
598*4882a593Smuzhiyun {
599*4882a593Smuzhiyun 	struct pci_dev *pdev = cptvf->pdev;
600*4882a593Smuzhiyun 	int cpu;
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	if (!zalloc_cpumask_var(&cptvf->affinity_mask[vec],
603*4882a593Smuzhiyun 				GFP_KERNEL)) {
604*4882a593Smuzhiyun 		dev_err(&pdev->dev,
605*4882a593Smuzhiyun 			"Allocation failed for affinity_mask for VF %d\n",
606*4882a593Smuzhiyun 			cptvf->vfid);
607*4882a593Smuzhiyun 		return;
608*4882a593Smuzhiyun 	}
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	cpu = cptvf->vfid % num_online_cpus();
611*4882a593Smuzhiyun 	cpumask_set_cpu(cpumask_local_spread(cpu, cptvf->node),
612*4882a593Smuzhiyun 			cptvf->affinity_mask[vec]);
613*4882a593Smuzhiyun 	irq_set_affinity_hint(pci_irq_vector(pdev, vec),
614*4882a593Smuzhiyun 			      cptvf->affinity_mask[vec]);
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun 
cptvf_write_vq_saddr(struct otx_cptvf * cptvf,u64 val)617*4882a593Smuzhiyun static void cptvf_write_vq_saddr(struct otx_cptvf *cptvf, u64 val)
618*4882a593Smuzhiyun {
619*4882a593Smuzhiyun 	union otx_cptx_vqx_saddr vqx_saddr;
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 	vqx_saddr.u = val;
622*4882a593Smuzhiyun 	writeq(vqx_saddr.u, cptvf->reg_base + OTX_CPT_VQX_SADDR(0));
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun 
cptvf_device_init(struct otx_cptvf * cptvf)625*4882a593Smuzhiyun static void cptvf_device_init(struct otx_cptvf *cptvf)
626*4882a593Smuzhiyun {
627*4882a593Smuzhiyun 	u64 base_addr = 0;
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 	/* Disable the VQ */
630*4882a593Smuzhiyun 	cptvf_write_vq_ctl(cptvf, 0);
631*4882a593Smuzhiyun 	/* Reset the doorbell */
632*4882a593Smuzhiyun 	otx_cptvf_write_vq_doorbell(cptvf, 0);
633*4882a593Smuzhiyun 	/* Clear inflight */
634*4882a593Smuzhiyun 	cptvf_write_vq_inprog(cptvf, 0);
635*4882a593Smuzhiyun 	/* Write VQ SADDR */
636*4882a593Smuzhiyun 	base_addr = (u64)(cptvf->cqinfo.queue[0].qhead->dma_addr);
637*4882a593Smuzhiyun 	cptvf_write_vq_saddr(cptvf, base_addr);
638*4882a593Smuzhiyun 	/* Configure timerhold / coalescence */
639*4882a593Smuzhiyun 	cptvf_write_vq_done_timewait(cptvf, OTX_CPT_TIMER_HOLD);
640*4882a593Smuzhiyun 	cptvf_write_vq_done_numwait(cptvf, OTX_CPT_COUNT_HOLD);
641*4882a593Smuzhiyun 	/* Enable the VQ */
642*4882a593Smuzhiyun 	cptvf_write_vq_ctl(cptvf, 1);
643*4882a593Smuzhiyun 	/* Flag the VF ready */
644*4882a593Smuzhiyun 	cptvf->flags |= OTX_CPT_FLAG_DEVICE_READY;
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun 
vf_type_show(struct device * dev,struct device_attribute * attr,char * buf)647*4882a593Smuzhiyun static ssize_t vf_type_show(struct device *dev,
648*4882a593Smuzhiyun 			    struct device_attribute *attr,
649*4882a593Smuzhiyun 			    char *buf)
650*4882a593Smuzhiyun {
651*4882a593Smuzhiyun 	struct otx_cptvf *cptvf = dev_get_drvdata(dev);
652*4882a593Smuzhiyun 	char *msg;
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	switch (cptvf->vftype) {
655*4882a593Smuzhiyun 	case OTX_CPT_AE_TYPES:
656*4882a593Smuzhiyun 		msg = "AE";
657*4882a593Smuzhiyun 		break;
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	case OTX_CPT_SE_TYPES:
660*4882a593Smuzhiyun 		msg = "SE";
661*4882a593Smuzhiyun 		break;
662*4882a593Smuzhiyun 
663*4882a593Smuzhiyun 	default:
664*4882a593Smuzhiyun 		msg = "Invalid";
665*4882a593Smuzhiyun 	}
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	return scnprintf(buf, PAGE_SIZE, "%s\n", msg);
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun 
vf_engine_group_show(struct device * dev,struct device_attribute * attr,char * buf)670*4882a593Smuzhiyun static ssize_t vf_engine_group_show(struct device *dev,
671*4882a593Smuzhiyun 				    struct device_attribute *attr,
672*4882a593Smuzhiyun 				    char *buf)
673*4882a593Smuzhiyun {
674*4882a593Smuzhiyun 	struct otx_cptvf *cptvf = dev_get_drvdata(dev);
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 	return scnprintf(buf, PAGE_SIZE, "%d\n", cptvf->vfgrp);
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun 
vf_engine_group_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)679*4882a593Smuzhiyun static ssize_t vf_engine_group_store(struct device *dev,
680*4882a593Smuzhiyun 				     struct device_attribute *attr,
681*4882a593Smuzhiyun 				     const char *buf, size_t count)
682*4882a593Smuzhiyun {
683*4882a593Smuzhiyun 	struct otx_cptvf *cptvf = dev_get_drvdata(dev);
684*4882a593Smuzhiyun 	int val, ret;
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 	ret = kstrtoint(buf, 10, &val);
687*4882a593Smuzhiyun 	if (ret)
688*4882a593Smuzhiyun 		return ret;
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	if (val < 0)
691*4882a593Smuzhiyun 		return -EINVAL;
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	if (val >= OTX_CPT_MAX_ENGINE_GROUPS) {
694*4882a593Smuzhiyun 		dev_err(dev, "Engine group >= than max available groups %d\n",
695*4882a593Smuzhiyun 			OTX_CPT_MAX_ENGINE_GROUPS);
696*4882a593Smuzhiyun 		return -EINVAL;
697*4882a593Smuzhiyun 	}
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	ret = otx_cptvf_send_vf_to_grp_msg(cptvf, val);
700*4882a593Smuzhiyun 	if (ret)
701*4882a593Smuzhiyun 		return ret;
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	return count;
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun 
vf_coalesc_time_wait_show(struct device * dev,struct device_attribute * attr,char * buf)706*4882a593Smuzhiyun static ssize_t vf_coalesc_time_wait_show(struct device *dev,
707*4882a593Smuzhiyun 					 struct device_attribute *attr,
708*4882a593Smuzhiyun 					 char *buf)
709*4882a593Smuzhiyun {
710*4882a593Smuzhiyun 	struct otx_cptvf *cptvf = dev_get_drvdata(dev);
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 	return scnprintf(buf, PAGE_SIZE, "%d\n",
713*4882a593Smuzhiyun 			 cptvf_read_vq_done_timewait(cptvf));
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun 
vf_coalesc_num_wait_show(struct device * dev,struct device_attribute * attr,char * buf)716*4882a593Smuzhiyun static ssize_t vf_coalesc_num_wait_show(struct device *dev,
717*4882a593Smuzhiyun 					struct device_attribute *attr,
718*4882a593Smuzhiyun 					char *buf)
719*4882a593Smuzhiyun {
720*4882a593Smuzhiyun 	struct otx_cptvf *cptvf = dev_get_drvdata(dev);
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 	return scnprintf(buf, PAGE_SIZE, "%d\n",
723*4882a593Smuzhiyun 			 cptvf_read_vq_done_numwait(cptvf));
724*4882a593Smuzhiyun }
725*4882a593Smuzhiyun 
vf_coalesc_time_wait_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)726*4882a593Smuzhiyun static ssize_t vf_coalesc_time_wait_store(struct device *dev,
727*4882a593Smuzhiyun 					  struct device_attribute *attr,
728*4882a593Smuzhiyun 					  const char *buf, size_t count)
729*4882a593Smuzhiyun {
730*4882a593Smuzhiyun 	struct otx_cptvf *cptvf = dev_get_drvdata(dev);
731*4882a593Smuzhiyun 	long val;
732*4882a593Smuzhiyun 	int ret;
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 	ret = kstrtol(buf, 10, &val);
735*4882a593Smuzhiyun 	if (ret != 0)
736*4882a593Smuzhiyun 		return ret;
737*4882a593Smuzhiyun 
738*4882a593Smuzhiyun 	if (val < OTX_CPT_COALESC_MIN_TIME_WAIT ||
739*4882a593Smuzhiyun 	    val > OTX_CPT_COALESC_MAX_TIME_WAIT)
740*4882a593Smuzhiyun 		return -EINVAL;
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 	cptvf_write_vq_done_timewait(cptvf, val);
743*4882a593Smuzhiyun 	return count;
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun 
vf_coalesc_num_wait_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)746*4882a593Smuzhiyun static ssize_t vf_coalesc_num_wait_store(struct device *dev,
747*4882a593Smuzhiyun 					 struct device_attribute *attr,
748*4882a593Smuzhiyun 					 const char *buf, size_t count)
749*4882a593Smuzhiyun {
750*4882a593Smuzhiyun 	struct otx_cptvf *cptvf = dev_get_drvdata(dev);
751*4882a593Smuzhiyun 	long val;
752*4882a593Smuzhiyun 	int ret;
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 	ret = kstrtol(buf, 10, &val);
755*4882a593Smuzhiyun 	if (ret != 0)
756*4882a593Smuzhiyun 		return ret;
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 	if (val < OTX_CPT_COALESC_MIN_NUM_WAIT ||
759*4882a593Smuzhiyun 	    val > OTX_CPT_COALESC_MAX_NUM_WAIT)
760*4882a593Smuzhiyun 		return -EINVAL;
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun 	cptvf_write_vq_done_numwait(cptvf, val);
763*4882a593Smuzhiyun 	return count;
764*4882a593Smuzhiyun }
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun static DEVICE_ATTR_RO(vf_type);
767*4882a593Smuzhiyun static DEVICE_ATTR_RW(vf_engine_group);
768*4882a593Smuzhiyun static DEVICE_ATTR_RW(vf_coalesc_time_wait);
769*4882a593Smuzhiyun static DEVICE_ATTR_RW(vf_coalesc_num_wait);
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun static struct attribute *otx_cptvf_attrs[] = {
772*4882a593Smuzhiyun 	&dev_attr_vf_type.attr,
773*4882a593Smuzhiyun 	&dev_attr_vf_engine_group.attr,
774*4882a593Smuzhiyun 	&dev_attr_vf_coalesc_time_wait.attr,
775*4882a593Smuzhiyun 	&dev_attr_vf_coalesc_num_wait.attr,
776*4882a593Smuzhiyun 	NULL
777*4882a593Smuzhiyun };
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun static const struct attribute_group otx_cptvf_sysfs_group = {
780*4882a593Smuzhiyun 	.attrs = otx_cptvf_attrs,
781*4882a593Smuzhiyun };
782*4882a593Smuzhiyun 
otx_cptvf_probe(struct pci_dev * pdev,const struct pci_device_id * ent)783*4882a593Smuzhiyun static int otx_cptvf_probe(struct pci_dev *pdev,
784*4882a593Smuzhiyun 			   const struct pci_device_id *ent)
785*4882a593Smuzhiyun {
786*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
787*4882a593Smuzhiyun 	struct otx_cptvf *cptvf;
788*4882a593Smuzhiyun 	int err;
789*4882a593Smuzhiyun 
790*4882a593Smuzhiyun 	cptvf = devm_kzalloc(dev, sizeof(*cptvf), GFP_KERNEL);
791*4882a593Smuzhiyun 	if (!cptvf)
792*4882a593Smuzhiyun 		return -ENOMEM;
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 	pci_set_drvdata(pdev, cptvf);
795*4882a593Smuzhiyun 	cptvf->pdev = pdev;
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 	err = pci_enable_device(pdev);
798*4882a593Smuzhiyun 	if (err) {
799*4882a593Smuzhiyun 		dev_err(dev, "Failed to enable PCI device\n");
800*4882a593Smuzhiyun 		goto clear_drvdata;
801*4882a593Smuzhiyun 	}
802*4882a593Smuzhiyun 	err = pci_request_regions(pdev, DRV_NAME);
803*4882a593Smuzhiyun 	if (err) {
804*4882a593Smuzhiyun 		dev_err(dev, "PCI request regions failed 0x%x\n", err);
805*4882a593Smuzhiyun 		goto disable_device;
806*4882a593Smuzhiyun 	}
807*4882a593Smuzhiyun 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
808*4882a593Smuzhiyun 	if (err) {
809*4882a593Smuzhiyun 		dev_err(dev, "Unable to get usable DMA configuration\n");
810*4882a593Smuzhiyun 		goto release_regions;
811*4882a593Smuzhiyun 	}
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun 	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
814*4882a593Smuzhiyun 	if (err) {
815*4882a593Smuzhiyun 		dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n");
816*4882a593Smuzhiyun 		goto release_regions;
817*4882a593Smuzhiyun 	}
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 	/* MAP PF's configuration registers */
820*4882a593Smuzhiyun 	cptvf->reg_base = pci_iomap(pdev, OTX_CPT_VF_PCI_CFG_BAR, 0);
821*4882a593Smuzhiyun 	if (!cptvf->reg_base) {
822*4882a593Smuzhiyun 		dev_err(dev, "Cannot map config register space, aborting\n");
823*4882a593Smuzhiyun 		err = -ENOMEM;
824*4882a593Smuzhiyun 		goto release_regions;
825*4882a593Smuzhiyun 	}
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun 	cptvf->node = dev_to_node(&pdev->dev);
828*4882a593Smuzhiyun 	err = pci_alloc_irq_vectors(pdev, OTX_CPT_VF_MSIX_VECTORS,
829*4882a593Smuzhiyun 				    OTX_CPT_VF_MSIX_VECTORS, PCI_IRQ_MSIX);
830*4882a593Smuzhiyun 	if (err < 0) {
831*4882a593Smuzhiyun 		dev_err(dev, "Request for #%d msix vectors failed\n",
832*4882a593Smuzhiyun 			OTX_CPT_VF_MSIX_VECTORS);
833*4882a593Smuzhiyun 		goto unmap_region;
834*4882a593Smuzhiyun 	}
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun 	err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC),
837*4882a593Smuzhiyun 			  cptvf_misc_intr_handler, 0, "CPT VF misc intr",
838*4882a593Smuzhiyun 			  cptvf);
839*4882a593Smuzhiyun 	if (err) {
840*4882a593Smuzhiyun 		dev_err(dev, "Failed to request misc irq\n");
841*4882a593Smuzhiyun 		goto free_vectors;
842*4882a593Smuzhiyun 	}
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 	/* Enable mailbox interrupt */
845*4882a593Smuzhiyun 	cptvf_enable_mbox_interrupts(cptvf);
846*4882a593Smuzhiyun 	cptvf_enable_swerr_interrupts(cptvf);
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun 	/* Check cpt pf status, gets chip ID / device Id from PF if ready */
849*4882a593Smuzhiyun 	err = otx_cptvf_check_pf_ready(cptvf);
850*4882a593Smuzhiyun 	if (err)
851*4882a593Smuzhiyun 		goto free_misc_irq;
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun 	/* CPT VF software resources initialization */
854*4882a593Smuzhiyun 	cptvf->cqinfo.qchunksize = OTX_CPT_CMD_QCHUNK_SIZE;
855*4882a593Smuzhiyun 	err = cptvf_sw_init(cptvf, OTX_CPT_CMD_QLEN, OTX_CPT_NUM_QS_PER_VF);
856*4882a593Smuzhiyun 	if (err) {
857*4882a593Smuzhiyun 		dev_err(dev, "cptvf_sw_init() failed\n");
858*4882a593Smuzhiyun 		goto free_misc_irq;
859*4882a593Smuzhiyun 	}
860*4882a593Smuzhiyun 	/* Convey VQ LEN to PF */
861*4882a593Smuzhiyun 	err = otx_cptvf_send_vq_size_msg(cptvf);
862*4882a593Smuzhiyun 	if (err)
863*4882a593Smuzhiyun 		goto sw_cleanup;
864*4882a593Smuzhiyun 
865*4882a593Smuzhiyun 	/* CPT VF device initialization */
866*4882a593Smuzhiyun 	cptvf_device_init(cptvf);
867*4882a593Smuzhiyun 	/* Send msg to PF to assign currnet Q to required group */
868*4882a593Smuzhiyun 	err = otx_cptvf_send_vf_to_grp_msg(cptvf, cptvf->vfgrp);
869*4882a593Smuzhiyun 	if (err)
870*4882a593Smuzhiyun 		goto sw_cleanup;
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 	cptvf->priority = 1;
873*4882a593Smuzhiyun 	err = otx_cptvf_send_vf_priority_msg(cptvf);
874*4882a593Smuzhiyun 	if (err)
875*4882a593Smuzhiyun 		goto sw_cleanup;
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun 	err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE),
878*4882a593Smuzhiyun 			  cptvf_done_intr_handler, 0, "CPT VF done intr",
879*4882a593Smuzhiyun 			  cptvf);
880*4882a593Smuzhiyun 	if (err) {
881*4882a593Smuzhiyun 		dev_err(dev, "Failed to request done irq\n");
882*4882a593Smuzhiyun 		goto free_done_irq;
883*4882a593Smuzhiyun 	}
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 	/* Enable done interrupt */
886*4882a593Smuzhiyun 	cptvf_enable_done_interrupts(cptvf);
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun 	/* Set irq affinity masks */
889*4882a593Smuzhiyun 	cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
890*4882a593Smuzhiyun 	cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 	err = otx_cptvf_send_vf_up(cptvf);
893*4882a593Smuzhiyun 	if (err)
894*4882a593Smuzhiyun 		goto free_irq_affinity;
895*4882a593Smuzhiyun 
896*4882a593Smuzhiyun 	/* Initialize algorithms and set ops */
897*4882a593Smuzhiyun 	err = otx_cpt_crypto_init(pdev, THIS_MODULE,
898*4882a593Smuzhiyun 		    cptvf->vftype == OTX_CPT_SE_TYPES ? OTX_CPT_SE : OTX_CPT_AE,
899*4882a593Smuzhiyun 		    cptvf->vftype, 1, cptvf->num_vfs);
900*4882a593Smuzhiyun 	if (err) {
901*4882a593Smuzhiyun 		dev_err(dev, "Failed to register crypto algs\n");
902*4882a593Smuzhiyun 		goto free_irq_affinity;
903*4882a593Smuzhiyun 	}
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun 	err = sysfs_create_group(&dev->kobj, &otx_cptvf_sysfs_group);
906*4882a593Smuzhiyun 	if (err) {
907*4882a593Smuzhiyun 		dev_err(dev, "Creating sysfs entries failed\n");
908*4882a593Smuzhiyun 		goto crypto_exit;
909*4882a593Smuzhiyun 	}
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun 	return 0;
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun crypto_exit:
914*4882a593Smuzhiyun 	otx_cpt_crypto_exit(pdev, THIS_MODULE, cptvf->vftype);
915*4882a593Smuzhiyun free_irq_affinity:
916*4882a593Smuzhiyun 	cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
917*4882a593Smuzhiyun 	cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
918*4882a593Smuzhiyun free_done_irq:
919*4882a593Smuzhiyun 	free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), cptvf);
920*4882a593Smuzhiyun sw_cleanup:
921*4882a593Smuzhiyun 	cptvf_sw_cleanup(cptvf);
922*4882a593Smuzhiyun free_misc_irq:
923*4882a593Smuzhiyun 	free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
924*4882a593Smuzhiyun free_vectors:
925*4882a593Smuzhiyun 	pci_free_irq_vectors(cptvf->pdev);
926*4882a593Smuzhiyun unmap_region:
927*4882a593Smuzhiyun 	pci_iounmap(pdev, cptvf->reg_base);
928*4882a593Smuzhiyun release_regions:
929*4882a593Smuzhiyun 	pci_release_regions(pdev);
930*4882a593Smuzhiyun disable_device:
931*4882a593Smuzhiyun 	pci_disable_device(pdev);
932*4882a593Smuzhiyun clear_drvdata:
933*4882a593Smuzhiyun 	pci_set_drvdata(pdev, NULL);
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun 	return err;
936*4882a593Smuzhiyun }
937*4882a593Smuzhiyun 
otx_cptvf_remove(struct pci_dev * pdev)938*4882a593Smuzhiyun static void otx_cptvf_remove(struct pci_dev *pdev)
939*4882a593Smuzhiyun {
940*4882a593Smuzhiyun 	struct otx_cptvf *cptvf = pci_get_drvdata(pdev);
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun 	if (!cptvf) {
943*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Invalid CPT-VF device\n");
944*4882a593Smuzhiyun 		return;
945*4882a593Smuzhiyun 	}
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 	/* Convey DOWN to PF */
948*4882a593Smuzhiyun 	if (otx_cptvf_send_vf_down(cptvf)) {
949*4882a593Smuzhiyun 		dev_err(&pdev->dev, "PF not responding to DOWN msg\n");
950*4882a593Smuzhiyun 	} else {
951*4882a593Smuzhiyun 		sysfs_remove_group(&pdev->dev.kobj, &otx_cptvf_sysfs_group);
952*4882a593Smuzhiyun 		otx_cpt_crypto_exit(pdev, THIS_MODULE, cptvf->vftype);
953*4882a593Smuzhiyun 		cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
954*4882a593Smuzhiyun 		cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
955*4882a593Smuzhiyun 		free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), cptvf);
956*4882a593Smuzhiyun 		free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
957*4882a593Smuzhiyun 		cptvf_sw_cleanup(cptvf);
958*4882a593Smuzhiyun 		pci_free_irq_vectors(cptvf->pdev);
959*4882a593Smuzhiyun 		pci_iounmap(pdev, cptvf->reg_base);
960*4882a593Smuzhiyun 		pci_release_regions(pdev);
961*4882a593Smuzhiyun 		pci_disable_device(pdev);
962*4882a593Smuzhiyun 		pci_set_drvdata(pdev, NULL);
963*4882a593Smuzhiyun 	}
964*4882a593Smuzhiyun }
965*4882a593Smuzhiyun 
966*4882a593Smuzhiyun /* Supported devices */
967*4882a593Smuzhiyun static const struct pci_device_id otx_cptvf_id_table[] = {
968*4882a593Smuzhiyun 	{PCI_VDEVICE(CAVIUM, OTX_CPT_PCI_VF_DEVICE_ID), 0},
969*4882a593Smuzhiyun 	{ 0, }  /* end of table */
970*4882a593Smuzhiyun };
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun static struct pci_driver otx_cptvf_pci_driver = {
973*4882a593Smuzhiyun 	.name = DRV_NAME,
974*4882a593Smuzhiyun 	.id_table = otx_cptvf_id_table,
975*4882a593Smuzhiyun 	.probe = otx_cptvf_probe,
976*4882a593Smuzhiyun 	.remove = otx_cptvf_remove,
977*4882a593Smuzhiyun };
978*4882a593Smuzhiyun 
979*4882a593Smuzhiyun module_pci_driver(otx_cptvf_pci_driver);
980*4882a593Smuzhiyun 
981*4882a593Smuzhiyun MODULE_AUTHOR("Marvell International Ltd.");
982*4882a593Smuzhiyun MODULE_DESCRIPTION("Marvell OcteonTX CPT Virtual Function Driver");
983*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
984*4882a593Smuzhiyun MODULE_VERSION(DRV_VERSION);
985*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, otx_cptvf_id_table);
986