xref: /OK3568_Linux_fs/kernel/drivers/crypto/cavium/nitrox/nitrox_sriov.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <linux/pci.h>
3*4882a593Smuzhiyun #include <linux/delay.h>
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include "nitrox_dev.h"
6*4882a593Smuzhiyun #include "nitrox_hal.h"
7*4882a593Smuzhiyun #include "nitrox_common.h"
8*4882a593Smuzhiyun #include "nitrox_isr.h"
9*4882a593Smuzhiyun #include "nitrox_mbx.h"
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun /**
12*4882a593Smuzhiyun  * num_vfs_valid - validate VF count
13*4882a593Smuzhiyun  * @num_vfs: number of VF(s)
14*4882a593Smuzhiyun  */
num_vfs_valid(int num_vfs)15*4882a593Smuzhiyun static inline bool num_vfs_valid(int num_vfs)
16*4882a593Smuzhiyun {
17*4882a593Smuzhiyun 	bool valid = false;
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun 	switch (num_vfs) {
20*4882a593Smuzhiyun 	case 16:
21*4882a593Smuzhiyun 	case 32:
22*4882a593Smuzhiyun 	case 64:
23*4882a593Smuzhiyun 	case 128:
24*4882a593Smuzhiyun 		valid = true;
25*4882a593Smuzhiyun 		break;
26*4882a593Smuzhiyun 	}
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun 	return valid;
29*4882a593Smuzhiyun }
30*4882a593Smuzhiyun 
num_vfs_to_mode(int num_vfs)31*4882a593Smuzhiyun static inline enum vf_mode num_vfs_to_mode(int num_vfs)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun 	enum vf_mode mode = 0;
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	switch (num_vfs) {
36*4882a593Smuzhiyun 	case 0:
37*4882a593Smuzhiyun 		mode = __NDEV_MODE_PF;
38*4882a593Smuzhiyun 		break;
39*4882a593Smuzhiyun 	case 16:
40*4882a593Smuzhiyun 		mode = __NDEV_MODE_VF16;
41*4882a593Smuzhiyun 		break;
42*4882a593Smuzhiyun 	case 32:
43*4882a593Smuzhiyun 		mode = __NDEV_MODE_VF32;
44*4882a593Smuzhiyun 		break;
45*4882a593Smuzhiyun 	case 64:
46*4882a593Smuzhiyun 		mode = __NDEV_MODE_VF64;
47*4882a593Smuzhiyun 		break;
48*4882a593Smuzhiyun 	case 128:
49*4882a593Smuzhiyun 		mode = __NDEV_MODE_VF128;
50*4882a593Smuzhiyun 		break;
51*4882a593Smuzhiyun 	}
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	return mode;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun 
vf_mode_to_nr_queues(enum vf_mode mode)56*4882a593Smuzhiyun static inline int vf_mode_to_nr_queues(enum vf_mode mode)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun 	int nr_queues = 0;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	switch (mode) {
61*4882a593Smuzhiyun 	case __NDEV_MODE_PF:
62*4882a593Smuzhiyun 		nr_queues = MAX_PF_QUEUES;
63*4882a593Smuzhiyun 		break;
64*4882a593Smuzhiyun 	case __NDEV_MODE_VF16:
65*4882a593Smuzhiyun 		nr_queues = 8;
66*4882a593Smuzhiyun 		break;
67*4882a593Smuzhiyun 	case __NDEV_MODE_VF32:
68*4882a593Smuzhiyun 		nr_queues = 4;
69*4882a593Smuzhiyun 		break;
70*4882a593Smuzhiyun 	case __NDEV_MODE_VF64:
71*4882a593Smuzhiyun 		nr_queues = 2;
72*4882a593Smuzhiyun 		break;
73*4882a593Smuzhiyun 	case __NDEV_MODE_VF128:
74*4882a593Smuzhiyun 		nr_queues = 1;
75*4882a593Smuzhiyun 		break;
76*4882a593Smuzhiyun 	}
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	return nr_queues;
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun 
nitrox_pf_cleanup(struct nitrox_device * ndev)81*4882a593Smuzhiyun static void nitrox_pf_cleanup(struct nitrox_device *ndev)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	 /* PF has no queues in SR-IOV mode */
84*4882a593Smuzhiyun 	atomic_set(&ndev->state, __NDEV_NOT_READY);
85*4882a593Smuzhiyun 	/* unregister crypto algorithms */
86*4882a593Smuzhiyun 	nitrox_crypto_unregister();
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	/* cleanup PF resources */
89*4882a593Smuzhiyun 	nitrox_unregister_interrupts(ndev);
90*4882a593Smuzhiyun 	nitrox_common_sw_cleanup(ndev);
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun /**
94*4882a593Smuzhiyun  * nitrox_pf_reinit - re-initialize PF resources once SR-IOV is disabled
95*4882a593Smuzhiyun  * @ndev: NITROX device
96*4882a593Smuzhiyun  */
nitrox_pf_reinit(struct nitrox_device * ndev)97*4882a593Smuzhiyun static int nitrox_pf_reinit(struct nitrox_device *ndev)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun 	int err;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	/* allocate resources for PF */
102*4882a593Smuzhiyun 	err = nitrox_common_sw_init(ndev);
103*4882a593Smuzhiyun 	if (err)
104*4882a593Smuzhiyun 		return err;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	err = nitrox_register_interrupts(ndev);
107*4882a593Smuzhiyun 	if (err) {
108*4882a593Smuzhiyun 		nitrox_common_sw_cleanup(ndev);
109*4882a593Smuzhiyun 		return err;
110*4882a593Smuzhiyun 	}
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	/* configure the AQM queues */
113*4882a593Smuzhiyun 	nitrox_config_aqm_rings(ndev);
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	/* configure the packet queues */
116*4882a593Smuzhiyun 	nitrox_config_pkt_input_rings(ndev);
117*4882a593Smuzhiyun 	nitrox_config_pkt_solicit_ports(ndev);
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	/* set device to ready state */
120*4882a593Smuzhiyun 	atomic_set(&ndev->state, __NDEV_READY);
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	/* register crypto algorithms */
123*4882a593Smuzhiyun 	return nitrox_crypto_register();
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun 
nitrox_sriov_cleanup(struct nitrox_device * ndev)126*4882a593Smuzhiyun static void nitrox_sriov_cleanup(struct nitrox_device *ndev)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	/* unregister interrupts for PF in SR-IOV */
129*4882a593Smuzhiyun 	nitrox_sriov_unregister_interrupts(ndev);
130*4882a593Smuzhiyun 	nitrox_mbox_cleanup(ndev);
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun 
nitrox_sriov_init(struct nitrox_device * ndev)133*4882a593Smuzhiyun static int nitrox_sriov_init(struct nitrox_device *ndev)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun 	int ret;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	/* register interrupts for PF in SR-IOV */
138*4882a593Smuzhiyun 	ret = nitrox_sriov_register_interupts(ndev);
139*4882a593Smuzhiyun 	if (ret)
140*4882a593Smuzhiyun 		return ret;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	ret = nitrox_mbox_init(ndev);
143*4882a593Smuzhiyun 	if (ret)
144*4882a593Smuzhiyun 		goto sriov_init_fail;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	return 0;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun sriov_init_fail:
149*4882a593Smuzhiyun 	nitrox_sriov_cleanup(ndev);
150*4882a593Smuzhiyun 	return ret;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun 
nitrox_sriov_enable(struct pci_dev * pdev,int num_vfs)153*4882a593Smuzhiyun static int nitrox_sriov_enable(struct pci_dev *pdev, int num_vfs)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun 	struct nitrox_device *ndev = pci_get_drvdata(pdev);
156*4882a593Smuzhiyun 	int err;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	if (!num_vfs_valid(num_vfs)) {
159*4882a593Smuzhiyun 		dev_err(DEV(ndev), "Invalid num_vfs %d\n", num_vfs);
160*4882a593Smuzhiyun 		return -EINVAL;
161*4882a593Smuzhiyun 	}
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	if (pci_num_vf(pdev) == num_vfs)
164*4882a593Smuzhiyun 		return num_vfs;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	err = pci_enable_sriov(pdev, num_vfs);
167*4882a593Smuzhiyun 	if (err) {
168*4882a593Smuzhiyun 		dev_err(DEV(ndev), "failed to enable PCI sriov %d\n", err);
169*4882a593Smuzhiyun 		return err;
170*4882a593Smuzhiyun 	}
171*4882a593Smuzhiyun 	dev_info(DEV(ndev), "Enabled VF(s) %d\n", num_vfs);
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	ndev->mode = num_vfs_to_mode(num_vfs);
174*4882a593Smuzhiyun 	ndev->iov.num_vfs = num_vfs;
175*4882a593Smuzhiyun 	ndev->iov.max_vf_queues = vf_mode_to_nr_queues(ndev->mode);
176*4882a593Smuzhiyun 	/* set bit in flags */
177*4882a593Smuzhiyun 	set_bit(__NDEV_SRIOV_BIT, &ndev->flags);
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	/* cleanup PF resources */
180*4882a593Smuzhiyun 	nitrox_pf_cleanup(ndev);
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	/* PF SR-IOV mode initialization */
183*4882a593Smuzhiyun 	err = nitrox_sriov_init(ndev);
184*4882a593Smuzhiyun 	if (err)
185*4882a593Smuzhiyun 		goto iov_fail;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	config_nps_core_vfcfg_mode(ndev, ndev->mode);
188*4882a593Smuzhiyun 	return num_vfs;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun iov_fail:
191*4882a593Smuzhiyun 	pci_disable_sriov(pdev);
192*4882a593Smuzhiyun 	/* clear bit in flags */
193*4882a593Smuzhiyun 	clear_bit(__NDEV_SRIOV_BIT, &ndev->flags);
194*4882a593Smuzhiyun 	ndev->iov.num_vfs = 0;
195*4882a593Smuzhiyun 	ndev->mode = __NDEV_MODE_PF;
196*4882a593Smuzhiyun 	/* reset back to working mode in PF */
197*4882a593Smuzhiyun 	nitrox_pf_reinit(ndev);
198*4882a593Smuzhiyun 	return err;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun 
nitrox_sriov_disable(struct pci_dev * pdev)201*4882a593Smuzhiyun static int nitrox_sriov_disable(struct pci_dev *pdev)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun 	struct nitrox_device *ndev = pci_get_drvdata(pdev);
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	if (!test_bit(__NDEV_SRIOV_BIT, &ndev->flags))
206*4882a593Smuzhiyun 		return 0;
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	if (pci_vfs_assigned(pdev)) {
209*4882a593Smuzhiyun 		dev_warn(DEV(ndev), "VFs are attached to VM. Can't disable SR-IOV\n");
210*4882a593Smuzhiyun 		return -EPERM;
211*4882a593Smuzhiyun 	}
212*4882a593Smuzhiyun 	pci_disable_sriov(pdev);
213*4882a593Smuzhiyun 	/* clear bit in flags */
214*4882a593Smuzhiyun 	clear_bit(__NDEV_SRIOV_BIT, &ndev->flags);
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	ndev->iov.num_vfs = 0;
217*4882a593Smuzhiyun 	ndev->iov.max_vf_queues = 0;
218*4882a593Smuzhiyun 	ndev->mode = __NDEV_MODE_PF;
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	/* cleanup PF SR-IOV resources */
221*4882a593Smuzhiyun 	nitrox_sriov_cleanup(ndev);
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	config_nps_core_vfcfg_mode(ndev, ndev->mode);
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	return nitrox_pf_reinit(ndev);
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun 
nitrox_sriov_configure(struct pci_dev * pdev,int num_vfs)228*4882a593Smuzhiyun int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun 	if (!num_vfs)
231*4882a593Smuzhiyun 		return nitrox_sriov_disable(pdev);
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	return nitrox_sriov_enable(pdev, num_vfs);
234*4882a593Smuzhiyun }
235