xref: /OK3568_Linux_fs/external/dpdk/pcie/igb_uio/igb_uio.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*-
3*4882a593Smuzhiyun  * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/device.h>
9*4882a593Smuzhiyun #include <linux/module.h>
10*4882a593Smuzhiyun #include <linux/pci.h>
11*4882a593Smuzhiyun #include <linux/uio_driver.h>
12*4882a593Smuzhiyun #include <linux/io.h>
13*4882a593Smuzhiyun #include <linux/irq.h>
14*4882a593Smuzhiyun #include <linux/msi.h>
15*4882a593Smuzhiyun #include <linux/version.h>
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun /**
19*4882a593Smuzhiyun  * These enum and macro definitions are copied from the
20*4882a593Smuzhiyun  * file rte_pci_dev_features.h
21*4882a593Smuzhiyun  */
22*4882a593Smuzhiyun enum rte_intr_mode {
23*4882a593Smuzhiyun 	RTE_INTR_MODE_NONE = 0,
24*4882a593Smuzhiyun 	RTE_INTR_MODE_LEGACY,
25*4882a593Smuzhiyun 	RTE_INTR_MODE_MSI,
26*4882a593Smuzhiyun 	RTE_INTR_MODE_MSIX
27*4882a593Smuzhiyun };
28*4882a593Smuzhiyun #define RTE_INTR_MODE_NONE_NAME "none"
29*4882a593Smuzhiyun #define RTE_INTR_MODE_LEGACY_NAME "legacy"
30*4882a593Smuzhiyun #define RTE_INTR_MODE_MSI_NAME "msi"
31*4882a593Smuzhiyun #define RTE_INTR_MODE_MSIX_NAME "msix"
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #include "compat.h"
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun /**
37*4882a593Smuzhiyun  * A structure describing the private information for a uio device.
38*4882a593Smuzhiyun  */
39*4882a593Smuzhiyun struct rte_uio_pci_dev {
40*4882a593Smuzhiyun 	struct uio_info info;
41*4882a593Smuzhiyun 	struct pci_dev *pdev;
42*4882a593Smuzhiyun 	enum rte_intr_mode mode;
43*4882a593Smuzhiyun 	atomic_t refcnt;
44*4882a593Smuzhiyun };
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun static int wc_activate;
47*4882a593Smuzhiyun static char *intr_mode;
48*4882a593Smuzhiyun static enum rte_intr_mode igbuio_intr_mode_preferred = RTE_INTR_MODE_MSIX;
49*4882a593Smuzhiyun /* sriov sysfs */
50*4882a593Smuzhiyun static ssize_t
show_max_vfs(struct device * dev,struct device_attribute * attr,char * buf)51*4882a593Smuzhiyun show_max_vfs(struct device *dev, struct device_attribute *attr,
52*4882a593Smuzhiyun 	     char *buf)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun 	return snprintf(buf, 10, "%u\n", dev_num_vf(dev));
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun static ssize_t
store_max_vfs(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)58*4882a593Smuzhiyun store_max_vfs(struct device *dev, struct device_attribute *attr,
59*4882a593Smuzhiyun 	      const char *buf, size_t count)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun 	int err = 0;
62*4882a593Smuzhiyun 	unsigned long max_vfs;
63*4882a593Smuzhiyun 	struct pci_dev *pdev = to_pci_dev(dev);
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	if (0 != kstrtoul(buf, 0, &max_vfs))
66*4882a593Smuzhiyun 		return -EINVAL;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	if (0 == max_vfs)
69*4882a593Smuzhiyun 		pci_disable_sriov(pdev);
70*4882a593Smuzhiyun 	else if (0 == pci_num_vf(pdev))
71*4882a593Smuzhiyun 		err = pci_enable_sriov(pdev, max_vfs);
72*4882a593Smuzhiyun 	else /* do nothing if change max_vfs number */
73*4882a593Smuzhiyun 		err = -EINVAL;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	return err ? err : count;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun static DEVICE_ATTR(max_vfs, S_IRUGO | S_IWUSR, show_max_vfs, store_max_vfs);
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun static struct attribute *dev_attrs[] = {
81*4882a593Smuzhiyun 	&dev_attr_max_vfs.attr,
82*4882a593Smuzhiyun 	NULL,
83*4882a593Smuzhiyun };
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun static const struct attribute_group dev_attr_grp = {
86*4882a593Smuzhiyun 	.attrs = dev_attrs,
87*4882a593Smuzhiyun };
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun #ifndef HAVE_PCI_MSI_MASK_IRQ
90*4882a593Smuzhiyun /*
91*4882a593Smuzhiyun  * It masks the msix on/off of generating MSI-X messages.
92*4882a593Smuzhiyun  */
93*4882a593Smuzhiyun static void
igbuio_msix_mask_irq(struct msi_desc * desc,s32 state)94*4882a593Smuzhiyun igbuio_msix_mask_irq(struct msi_desc *desc, s32 state)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	u32 mask_bits = desc->masked;
97*4882a593Smuzhiyun 	unsigned int offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
98*4882a593Smuzhiyun 						PCI_MSIX_ENTRY_VECTOR_CTRL;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	if (state != 0)
101*4882a593Smuzhiyun 		mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
102*4882a593Smuzhiyun 	else
103*4882a593Smuzhiyun 		mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	if (mask_bits != desc->masked) {
106*4882a593Smuzhiyun 		writel(mask_bits, desc->mask_base + offset);
107*4882a593Smuzhiyun 		readl(desc->mask_base);
108*4882a593Smuzhiyun 		desc->masked = mask_bits;
109*4882a593Smuzhiyun 	}
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun /*
113*4882a593Smuzhiyun  * It masks the msi on/off of generating MSI messages.
114*4882a593Smuzhiyun  */
115*4882a593Smuzhiyun static void
igbuio_msi_mask_irq(struct pci_dev * pdev,struct msi_desc * desc,int32_t state)116*4882a593Smuzhiyun igbuio_msi_mask_irq(struct pci_dev *pdev, struct msi_desc *desc, int32_t state)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	u32 mask_bits = desc->masked;
119*4882a593Smuzhiyun 	u32 offset = desc->irq - pdev->irq;
120*4882a593Smuzhiyun 	u32 mask = 1 << offset;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	if (!desc->msi_attrib.maskbit)
123*4882a593Smuzhiyun 		return;
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	if (state != 0)
126*4882a593Smuzhiyun 		mask_bits &= ~mask;
127*4882a593Smuzhiyun 	else
128*4882a593Smuzhiyun 		mask_bits |= mask;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	if (mask_bits != desc->masked) {
131*4882a593Smuzhiyun 		pci_write_config_dword(pdev, desc->mask_pos, mask_bits);
132*4882a593Smuzhiyun 		desc->masked = mask_bits;
133*4882a593Smuzhiyun 	}
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun static void
igbuio_mask_irq(struct pci_dev * pdev,enum rte_intr_mode mode,s32 irq_state)137*4882a593Smuzhiyun igbuio_mask_irq(struct pci_dev *pdev, enum rte_intr_mode mode, s32 irq_state)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	struct msi_desc *desc;
140*4882a593Smuzhiyun 	struct list_head *msi_list;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun #ifdef HAVE_MSI_LIST_IN_GENERIC_DEVICE
143*4882a593Smuzhiyun 	msi_list = &pdev->dev.msi_list;
144*4882a593Smuzhiyun #else
145*4882a593Smuzhiyun 	msi_list = &pdev->msi_list;
146*4882a593Smuzhiyun #endif
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	if (mode == RTE_INTR_MODE_MSIX) {
149*4882a593Smuzhiyun 		list_for_each_entry(desc, msi_list, list)
150*4882a593Smuzhiyun 			igbuio_msix_mask_irq(desc, irq_state);
151*4882a593Smuzhiyun 	} else if (mode == RTE_INTR_MODE_MSI) {
152*4882a593Smuzhiyun 		list_for_each_entry(desc, msi_list, list)
153*4882a593Smuzhiyun 			igbuio_msi_mask_irq(pdev, desc, irq_state);
154*4882a593Smuzhiyun 	}
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun #endif
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun /**
159*4882a593Smuzhiyun  * This is the irqcontrol callback to be registered to uio_info.
160*4882a593Smuzhiyun  * It can be used to disable/enable interrupt from user space processes.
161*4882a593Smuzhiyun  *
162*4882a593Smuzhiyun  * @param info
163*4882a593Smuzhiyun  *  pointer to uio_info.
164*4882a593Smuzhiyun  * @param irq_state
165*4882a593Smuzhiyun  *  state value. 1 to enable interrupt, 0 to disable interrupt.
166*4882a593Smuzhiyun  *
167*4882a593Smuzhiyun  * @return
168*4882a593Smuzhiyun  *  - On success, 0.
169*4882a593Smuzhiyun  *  - On failure, a negative value.
170*4882a593Smuzhiyun  */
171*4882a593Smuzhiyun static int
igbuio_pci_irqcontrol(struct uio_info * info,s32 irq_state)172*4882a593Smuzhiyun igbuio_pci_irqcontrol(struct uio_info *info, s32 irq_state)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun 	struct rte_uio_pci_dev *udev = info->priv;
175*4882a593Smuzhiyun 	struct pci_dev *pdev = udev->pdev;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun #ifdef HAVE_PCI_MSI_MASK_IRQ
178*4882a593Smuzhiyun 	struct irq_data *irq = irq_get_irq_data(udev->info.irq);
179*4882a593Smuzhiyun #endif
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	pci_cfg_access_lock(pdev);
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	if (udev->mode == RTE_INTR_MODE_MSIX || udev->mode == RTE_INTR_MODE_MSI) {
184*4882a593Smuzhiyun #ifdef HAVE_PCI_MSI_MASK_IRQ
185*4882a593Smuzhiyun 		if (irq_state == 1)
186*4882a593Smuzhiyun 			pci_msi_unmask_irq(irq);
187*4882a593Smuzhiyun 		else
188*4882a593Smuzhiyun 			pci_msi_mask_irq(irq);
189*4882a593Smuzhiyun #else
190*4882a593Smuzhiyun 		igbuio_mask_irq(pdev, udev->mode, irq_state);
191*4882a593Smuzhiyun #endif
192*4882a593Smuzhiyun 	}
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	if (udev->mode == RTE_INTR_MODE_LEGACY)
195*4882a593Smuzhiyun 		pci_intx(pdev, !!irq_state);
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	pci_cfg_access_unlock(pdev);
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	return 0;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun /**
203*4882a593Smuzhiyun  * This is interrupt handler which will check if the interrupt is for the right device.
204*4882a593Smuzhiyun  * If yes, disable it here and will be enable later.
205*4882a593Smuzhiyun  */
206*4882a593Smuzhiyun static irqreturn_t
igbuio_pci_irqhandler(int irq,void * dev_id)207*4882a593Smuzhiyun igbuio_pci_irqhandler(int irq, void *dev_id)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun 	struct rte_uio_pci_dev *udev = (struct rte_uio_pci_dev *)dev_id;
210*4882a593Smuzhiyun 	struct uio_info *info = &udev->info;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	/* Legacy mode need to mask in hardware */
213*4882a593Smuzhiyun 	if (udev->mode == RTE_INTR_MODE_LEGACY &&
214*4882a593Smuzhiyun 	    !pci_check_and_mask_intx(udev->pdev))
215*4882a593Smuzhiyun 		return IRQ_NONE;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	uio_event_notify(info);
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	/* Message signal mode, no share IRQ and automasked */
220*4882a593Smuzhiyun 	return IRQ_HANDLED;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun static int
igbuio_pci_enable_interrupts(struct rte_uio_pci_dev * udev)224*4882a593Smuzhiyun igbuio_pci_enable_interrupts(struct rte_uio_pci_dev *udev)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun 	int err = 0;
227*4882a593Smuzhiyun #ifndef HAVE_ALLOC_IRQ_VECTORS
228*4882a593Smuzhiyun 	struct msix_entry msix_entry;
229*4882a593Smuzhiyun #endif
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	switch (igbuio_intr_mode_preferred) {
232*4882a593Smuzhiyun 	case RTE_INTR_MODE_MSIX:
233*4882a593Smuzhiyun 		/* Only 1 msi-x vector needed */
234*4882a593Smuzhiyun #ifndef HAVE_ALLOC_IRQ_VECTORS
235*4882a593Smuzhiyun 		msix_entry.entry = 0;
236*4882a593Smuzhiyun 		if (pci_enable_msix(udev->pdev, &msix_entry, 1) == 0) {
237*4882a593Smuzhiyun 			dev_dbg(&udev->pdev->dev, "using MSI-X");
238*4882a593Smuzhiyun 			udev->info.irq_flags = IRQF_NO_THREAD;
239*4882a593Smuzhiyun 			udev->info.irq = msix_entry.vector;
240*4882a593Smuzhiyun 			udev->mode = RTE_INTR_MODE_MSIX;
241*4882a593Smuzhiyun 			break;
242*4882a593Smuzhiyun 		}
243*4882a593Smuzhiyun #else
244*4882a593Smuzhiyun 		if (pci_alloc_irq_vectors(udev->pdev, 1, 1, PCI_IRQ_MSIX) == 1) {
245*4882a593Smuzhiyun 			dev_dbg(&udev->pdev->dev, "using MSI-X");
246*4882a593Smuzhiyun 			udev->info.irq_flags = IRQF_NO_THREAD;
247*4882a593Smuzhiyun 			udev->info.irq = pci_irq_vector(udev->pdev, 0);
248*4882a593Smuzhiyun 			udev->mode = RTE_INTR_MODE_MSIX;
249*4882a593Smuzhiyun 			break;
250*4882a593Smuzhiyun 		}
251*4882a593Smuzhiyun #endif
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	fallthrough;
254*4882a593Smuzhiyun 	case RTE_INTR_MODE_MSI:
255*4882a593Smuzhiyun #ifndef HAVE_ALLOC_IRQ_VECTORS
256*4882a593Smuzhiyun 		if (pci_enable_msi(udev->pdev) == 0) {
257*4882a593Smuzhiyun 			dev_dbg(&udev->pdev->dev, "using MSI");
258*4882a593Smuzhiyun 			udev->info.irq_flags = IRQF_NO_THREAD;
259*4882a593Smuzhiyun 			udev->info.irq = udev->pdev->irq;
260*4882a593Smuzhiyun 			udev->mode = RTE_INTR_MODE_MSI;
261*4882a593Smuzhiyun 			break;
262*4882a593Smuzhiyun 		}
263*4882a593Smuzhiyun #else
264*4882a593Smuzhiyun 		if (pci_alloc_irq_vectors(udev->pdev, 1, 1, PCI_IRQ_MSI) == 1) {
265*4882a593Smuzhiyun 			dev_dbg(&udev->pdev->dev, "using MSI");
266*4882a593Smuzhiyun 			udev->info.irq_flags = IRQF_NO_THREAD;
267*4882a593Smuzhiyun 			udev->info.irq = pci_irq_vector(udev->pdev, 0);
268*4882a593Smuzhiyun 			udev->mode = RTE_INTR_MODE_MSI;
269*4882a593Smuzhiyun 			break;
270*4882a593Smuzhiyun 		}
271*4882a593Smuzhiyun #endif
272*4882a593Smuzhiyun 	fallthrough;
273*4882a593Smuzhiyun 	case RTE_INTR_MODE_LEGACY:
274*4882a593Smuzhiyun 		if (pci_intx_mask_supported(udev->pdev)) {
275*4882a593Smuzhiyun 			dev_dbg(&udev->pdev->dev, "using INTX");
276*4882a593Smuzhiyun 			udev->info.irq_flags = IRQF_SHARED | IRQF_NO_THREAD;
277*4882a593Smuzhiyun 			udev->info.irq = udev->pdev->irq;
278*4882a593Smuzhiyun 			udev->mode = RTE_INTR_MODE_LEGACY;
279*4882a593Smuzhiyun 			break;
280*4882a593Smuzhiyun 		}
281*4882a593Smuzhiyun 		dev_notice(&udev->pdev->dev, "PCI INTX mask not supported\n");
282*4882a593Smuzhiyun 	fallthrough;
283*4882a593Smuzhiyun 	case RTE_INTR_MODE_NONE:
284*4882a593Smuzhiyun 		udev->mode = RTE_INTR_MODE_NONE;
285*4882a593Smuzhiyun 		udev->info.irq = UIO_IRQ_NONE;
286*4882a593Smuzhiyun 		break;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	default:
289*4882a593Smuzhiyun 		dev_err(&udev->pdev->dev, "invalid IRQ mode %u",
290*4882a593Smuzhiyun 			igbuio_intr_mode_preferred);
291*4882a593Smuzhiyun 		udev->info.irq = UIO_IRQ_NONE;
292*4882a593Smuzhiyun 		err = -EINVAL;
293*4882a593Smuzhiyun 	}
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	if (udev->info.irq != UIO_IRQ_NONE)
296*4882a593Smuzhiyun 		err = request_irq(udev->info.irq, igbuio_pci_irqhandler,
297*4882a593Smuzhiyun 				  udev->info.irq_flags, udev->info.name,
298*4882a593Smuzhiyun 				  udev);
299*4882a593Smuzhiyun 	dev_info(&udev->pdev->dev, "uio device registered with irq %ld\n",
300*4882a593Smuzhiyun 		 udev->info.irq);
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	return err;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun static void
igbuio_pci_disable_interrupts(struct rte_uio_pci_dev * udev)306*4882a593Smuzhiyun igbuio_pci_disable_interrupts(struct rte_uio_pci_dev *udev)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun 	if (udev->info.irq) {
309*4882a593Smuzhiyun 		free_irq(udev->info.irq, udev);
310*4882a593Smuzhiyun 		udev->info.irq = 0;
311*4882a593Smuzhiyun 	}
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun #ifndef HAVE_ALLOC_IRQ_VECTORS
314*4882a593Smuzhiyun 	if (udev->mode == RTE_INTR_MODE_MSIX)
315*4882a593Smuzhiyun 		pci_disable_msix(udev->pdev);
316*4882a593Smuzhiyun 	if (udev->mode == RTE_INTR_MODE_MSI)
317*4882a593Smuzhiyun 		pci_disable_msi(udev->pdev);
318*4882a593Smuzhiyun #else
319*4882a593Smuzhiyun 	if (udev->mode == RTE_INTR_MODE_MSIX ||
320*4882a593Smuzhiyun 	    udev->mode == RTE_INTR_MODE_MSI)
321*4882a593Smuzhiyun 		pci_free_irq_vectors(udev->pdev);
322*4882a593Smuzhiyun #endif
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun /**
327*4882a593Smuzhiyun  * This gets called while opening uio device file.
328*4882a593Smuzhiyun  */
329*4882a593Smuzhiyun static int
igbuio_pci_open(struct uio_info * info,struct inode * inode)330*4882a593Smuzhiyun igbuio_pci_open(struct uio_info *info, struct inode *inode)
331*4882a593Smuzhiyun {
332*4882a593Smuzhiyun 	struct rte_uio_pci_dev *udev = info->priv;
333*4882a593Smuzhiyun 	struct pci_dev *dev = udev->pdev;
334*4882a593Smuzhiyun 	int err;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	if (atomic_inc_return(&udev->refcnt) != 1)
337*4882a593Smuzhiyun 		return 0;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	/* set bus master, which was cleared by the reset function */
340*4882a593Smuzhiyun 	pci_set_master(dev);
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	/* enable interrupts */
343*4882a593Smuzhiyun 	err = igbuio_pci_enable_interrupts(udev);
344*4882a593Smuzhiyun 	if (err) {
345*4882a593Smuzhiyun 		atomic_dec(&udev->refcnt);
346*4882a593Smuzhiyun 		dev_err(&dev->dev, "Enable interrupt fails\n");
347*4882a593Smuzhiyun 	}
348*4882a593Smuzhiyun 	return err;
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun static int
igbuio_pci_release(struct uio_info * info,struct inode * inode)352*4882a593Smuzhiyun igbuio_pci_release(struct uio_info *info, struct inode *inode)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun 	struct rte_uio_pci_dev *udev = info->priv;
355*4882a593Smuzhiyun 	struct pci_dev *dev = udev->pdev;
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	if (atomic_dec_and_test(&udev->refcnt)) {
358*4882a593Smuzhiyun 		/* disable interrupts */
359*4882a593Smuzhiyun 		igbuio_pci_disable_interrupts(udev);
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 		/* stop the device from further DMA */
362*4882a593Smuzhiyun 		pci_clear_master(dev);
363*4882a593Smuzhiyun 	}
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	return 0;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun #define NUM_TX_DESC 4096    /* Number of Tx descriptor registers */
369*4882a593Smuzhiyun #define NUM_RX_DESC 4096    /* Number of Rx descriptor registers */
370*4882a593Smuzhiyun struct Desc {
371*4882a593Smuzhiyun         u32 opts1;
372*4882a593Smuzhiyun         u32 opts2;
373*4882a593Smuzhiyun         u64 addr;
374*4882a593Smuzhiyun };
375*4882a593Smuzhiyun /* Transmit Descriptor - Advanced */
376*4882a593Smuzhiyun union e1000_adv_tx_desc {
377*4882a593Smuzhiyun 	struct {
378*4882a593Smuzhiyun 		__le64 buffer_addr;    /* Address of descriptor's data buf */
379*4882a593Smuzhiyun 		__le32 cmd_type_len;
380*4882a593Smuzhiyun 		__le32 olinfo_status;
381*4882a593Smuzhiyun 	} read;
382*4882a593Smuzhiyun 	struct {
383*4882a593Smuzhiyun 		__le64 rsvd;       /* Reserved */
384*4882a593Smuzhiyun 		__le32 nxtseq_seed;
385*4882a593Smuzhiyun 		__le32 status;
386*4882a593Smuzhiyun 	} wb;
387*4882a593Smuzhiyun };
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun struct uio_rtl8168_counters {
390*4882a593Smuzhiyun         u64 tx_packets;
391*4882a593Smuzhiyun         u64 rx_packets;
392*4882a593Smuzhiyun         u64 tx_errors;
393*4882a593Smuzhiyun         u32 rx_errors;
394*4882a593Smuzhiyun         u16 rx_missed;
395*4882a593Smuzhiyun         u16 align_errors;
396*4882a593Smuzhiyun         u32 tx_one_collision;
397*4882a593Smuzhiyun         u32 tx_multi_collision;
398*4882a593Smuzhiyun         u64 rx_unicast;
399*4882a593Smuzhiyun         u64 rx_broadcast;
400*4882a593Smuzhiyun         u32 rx_multicast;
401*4882a593Smuzhiyun         u16 tx_aborted;
402*4882a593Smuzhiyun         u16 tx_underrun;
403*4882a593Smuzhiyun };
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun /* Remap pci resources described by bar #pci_bar in uio resource n. */
406*4882a593Smuzhiyun static int
igbuio_pci_setup_iomem1(struct pci_dev * dev,struct uio_info * info,int n,int pci_bar,const char * name)407*4882a593Smuzhiyun igbuio_pci_setup_iomem1(struct pci_dev *dev, struct uio_info *info,
408*4882a593Smuzhiyun 		       int n, int pci_bar, const char *name)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun 	void *internal_addr;
411*4882a593Smuzhiyun 	dma_addr_t PhyAddr;
412*4882a593Smuzhiyun 	int ret;
413*4882a593Smuzhiyun 	pr_err("%s %d\n", __func__, __LINE__);
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	internal_addr = dma_alloc_coherent(&dev->dev,
416*4882a593Smuzhiyun 		(NUM_TX_DESC * 16 * 8),
417*4882a593Smuzhiyun 		&PhyAddr, GFP_KERNEL);
418*4882a593Smuzhiyun 	if (!internal_addr) {
419*4882a593Smuzhiyun 		ret = -ENOMEM;
420*4882a593Smuzhiyun 		printk("%s: alloc rx desc array failed\n", __func__);
421*4882a593Smuzhiyun 	}
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	info->mem[n].name = name;
424*4882a593Smuzhiyun 	info->mem[n].addr = PhyAddr;
425*4882a593Smuzhiyun 	info->mem[n].size = NUM_TX_DESC * 16 * 8;
426*4882a593Smuzhiyun 	info->mem[n].memtype = UIO_MEM_PHYS;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	pr_err("%s %d name: %s, addr: %lld, len: %lld [%d]\n", __func__, __LINE__, name, PhyAddr, info->mem[n].size, n);
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	return 0;
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun /* Remap pci resources described by bar #pci_bar in uio resource n. */
434*4882a593Smuzhiyun static int
igbuio_pci_setup_iomem(struct pci_dev * dev,struct uio_info * info,int n,int pci_bar,const char * name)435*4882a593Smuzhiyun igbuio_pci_setup_iomem(struct pci_dev *dev, struct uio_info *info,
436*4882a593Smuzhiyun 		       int n, int pci_bar, const char *name)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun 	unsigned long addr, len;
439*4882a593Smuzhiyun 	void *internal_addr;
440*4882a593Smuzhiyun 	pr_err("%s %d\n", __func__, __LINE__);
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	if (n >= ARRAY_SIZE(info->mem))
443*4882a593Smuzhiyun 		return -EINVAL;
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	addr = pci_resource_start(dev, pci_bar);
446*4882a593Smuzhiyun 	len = pci_resource_len(dev, pci_bar);
447*4882a593Smuzhiyun 	if (addr == 0 || len == 0)
448*4882a593Smuzhiyun 		return -1;
449*4882a593Smuzhiyun 	if (wc_activate == 0) {
450*4882a593Smuzhiyun 		internal_addr = ioremap(addr, len);
451*4882a593Smuzhiyun 		if (internal_addr == NULL)
452*4882a593Smuzhiyun 			return -1;
453*4882a593Smuzhiyun 	} else {
454*4882a593Smuzhiyun 		internal_addr = NULL;
455*4882a593Smuzhiyun 	}
456*4882a593Smuzhiyun 	info->mem[n].name = name;
457*4882a593Smuzhiyun 	info->mem[n].addr = addr;
458*4882a593Smuzhiyun 	info->mem[n].internal_addr = internal_addr;
459*4882a593Smuzhiyun 	info->mem[n].size = len;
460*4882a593Smuzhiyun 	info->mem[n].memtype = UIO_MEM_PHYS;
461*4882a593Smuzhiyun 	pr_err("%s %d name: %s, addr: %lx, len: %ld\n", __func__, __LINE__, name, addr, len);
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	return 0;
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun /* Get pci port io resources described by bar #pci_bar in uio resource n. */
467*4882a593Smuzhiyun static int
igbuio_pci_setup_ioport(struct pci_dev * dev,struct uio_info * info,int n,int pci_bar,const char * name)468*4882a593Smuzhiyun igbuio_pci_setup_ioport(struct pci_dev *dev, struct uio_info *info,
469*4882a593Smuzhiyun 		int n, int pci_bar, const char *name)
470*4882a593Smuzhiyun {
471*4882a593Smuzhiyun 	unsigned long addr, len;
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	if (n >= ARRAY_SIZE(info->port))
474*4882a593Smuzhiyun 		return -EINVAL;
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	addr = pci_resource_start(dev, pci_bar);
477*4882a593Smuzhiyun 	len = pci_resource_len(dev, pci_bar);
478*4882a593Smuzhiyun 	if (addr == 0 || len == 0)
479*4882a593Smuzhiyun 		return -EINVAL;
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	info->port[n].name = name;
482*4882a593Smuzhiyun 	info->port[n].start = addr;
483*4882a593Smuzhiyun 	info->port[n].size = len;
484*4882a593Smuzhiyun 	info->port[n].porttype = UIO_PORT_X86;
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	return 0;
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun /* Unmap previously ioremap'd resources */
490*4882a593Smuzhiyun static void
igbuio_pci_release_iomem(struct uio_info * info)491*4882a593Smuzhiyun igbuio_pci_release_iomem(struct uio_info *info)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun 	int i;
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	for (i = 0; i < MAX_UIO_MAPS; i++) {
496*4882a593Smuzhiyun 		if (info->mem[i].internal_addr)
497*4882a593Smuzhiyun 			iounmap(info->mem[i].internal_addr);
498*4882a593Smuzhiyun 	}
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun static int
igbuio_setup_bars(struct pci_dev * dev,struct uio_info * info)502*4882a593Smuzhiyun igbuio_setup_bars(struct pci_dev *dev, struct uio_info *info)
503*4882a593Smuzhiyun {
504*4882a593Smuzhiyun 	int i, iom, iop, ret;
505*4882a593Smuzhiyun 	unsigned long flags;
506*4882a593Smuzhiyun 	static const char *bar_names[PCI_STD_RESOURCE_END + 1]  = {
507*4882a593Smuzhiyun 		"BAR0",
508*4882a593Smuzhiyun 		"BAR1",
509*4882a593Smuzhiyun 		"BAR2",
510*4882a593Smuzhiyun 		"BAR3",
511*4882a593Smuzhiyun 		"BAR4",
512*4882a593Smuzhiyun 		"BAR5",
513*4882a593Smuzhiyun 	};
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	iom = 0;
516*4882a593Smuzhiyun 	iop = 0;
517*4882a593Smuzhiyun 	//pr_err("%s %d\n", __func__, __LINE__);
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(bar_names); i++) {
520*4882a593Smuzhiyun 		//pr_err("%s %d\n", __func__, __LINE__);
521*4882a593Smuzhiyun 		if (pci_resource_len(dev, i) != 0 &&
522*4882a593Smuzhiyun 				pci_resource_start(dev, i) != 0) {
523*4882a593Smuzhiyun 			flags = pci_resource_flags(dev, i);
524*4882a593Smuzhiyun 			if (flags & IORESOURCE_MEM) {
525*4882a593Smuzhiyun 				//pr_err("%s %d %s\n", __func__, __LINE__, bar_names[i]);
526*4882a593Smuzhiyun 				ret = igbuio_pci_setup_iomem(dev, info, iom,
527*4882a593Smuzhiyun 							     i, bar_names[i]);
528*4882a593Smuzhiyun 				if (ret != 0)
529*4882a593Smuzhiyun 					return ret;
530*4882a593Smuzhiyun 				iom++;
531*4882a593Smuzhiyun 			} else if (flags & IORESOURCE_IO) {
532*4882a593Smuzhiyun 				//pr_err("%s %d %s\n", __func__, __LINE__, bar_names[i]);
533*4882a593Smuzhiyun 				ret = igbuio_pci_setup_ioport(dev, info, iop,
534*4882a593Smuzhiyun 							      i, bar_names[i]);
535*4882a593Smuzhiyun 				if (ret != 0)
536*4882a593Smuzhiyun 					return ret;
537*4882a593Smuzhiyun 				iop++;
538*4882a593Smuzhiyun 			}
539*4882a593Smuzhiyun 		}
540*4882a593Smuzhiyun 	}
541*4882a593Smuzhiyun 	//2
542*4882a593Smuzhiyun 	ret = igbuio_pci_setup_iomem1(dev, info, iom, i, "TX");
543*4882a593Smuzhiyun 	iom++;
544*4882a593Smuzhiyun 	ret = igbuio_pci_setup_iomem1(dev, info, iom, i, "RX");
545*4882a593Smuzhiyun 	iom++;
546*4882a593Smuzhiyun 	ret = igbuio_pci_setup_iomem1(dev, info, iom, i, "type");
547*4882a593Smuzhiyun 	/*
548*4882a593Smuzhiyun 	iom++;
549*4882a593Smuzhiyun 	ret = igbuio_pci_setup_iomem1(dev, info, iom, i, "type_tx");
550*4882a593Smuzhiyun 	iom++;
551*4882a593Smuzhiyun 	ret = igbuio_pci_setup_iomem1(dev, info, iom, i, "type_rx");
552*4882a593Smuzhiyun 	iom++;
553*4882a593Smuzhiyun 	ret = igbuio_pci_setup_iomem1(dev, info, iom, i, "DESCS");
554*4882a593Smuzhiyun 	iom++;
555*4882a593Smuzhiyun 	ret = igbuio_pci_setup_iomem1(dev, info, iom, i, "DESCR");
556*4882a593Smuzhiyun 	iom++;
557*4882a593Smuzhiyun 	ret = igbuio_pci_setup_iomem1(dev, info, iom, i, "DESCS1");
558*4882a593Smuzhiyun 	iom++;
559*4882a593Smuzhiyun 	ret = igbuio_pci_setup_iomem1(dev, info, iom, i, "DESCR1");
560*4882a593Smuzhiyun 	iom++;
561*4882a593Smuzhiyun 	ret = igbuio_pci_setup_iomem1(dev, info, iom, i, "DESCSD");
562*4882a593Smuzhiyun 	iom++;
563*4882a593Smuzhiyun 	ret = igbuio_pci_setup_iomem1(dev, info, iom, i, "DESCPD");
564*4882a593Smuzhiyun 	*/
565*4882a593Smuzhiyun 	return (iom != 0 || iop != 0) ? ret : -ENOENT;
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
569*4882a593Smuzhiyun static int __devinit
570*4882a593Smuzhiyun #else
571*4882a593Smuzhiyun static int
572*4882a593Smuzhiyun #endif
igbuio_pci_probe(struct pci_dev * dev,const struct pci_device_id * id)573*4882a593Smuzhiyun igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
574*4882a593Smuzhiyun {
575*4882a593Smuzhiyun 	struct rte_uio_pci_dev *udev;
576*4882a593Smuzhiyun 	dma_addr_t map_dma_addr;
577*4882a593Smuzhiyun 	void *map_addr;
578*4882a593Smuzhiyun 	int err;
579*4882a593Smuzhiyun 	pr_err("%s %d\n", __func__, __LINE__);
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun #ifdef HAVE_PCI_IS_BRIDGE_API
582*4882a593Smuzhiyun 	if (pci_is_bridge(dev)) {
583*4882a593Smuzhiyun 		dev_warn(&dev->dev, "Ignoring PCI bridge device\n");
584*4882a593Smuzhiyun 		return -ENODEV;
585*4882a593Smuzhiyun 	}
586*4882a593Smuzhiyun #endif
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	udev = kzalloc(sizeof(struct rte_uio_pci_dev), GFP_KERNEL);
589*4882a593Smuzhiyun 	if (!udev)
590*4882a593Smuzhiyun 		return -ENOMEM;
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 	/*
593*4882a593Smuzhiyun 	 * enable device: ask low-level code to enable I/O and
594*4882a593Smuzhiyun 	 * memory
595*4882a593Smuzhiyun 	 */
596*4882a593Smuzhiyun 	err = pci_enable_device(dev);
597*4882a593Smuzhiyun 	if (err != 0) {
598*4882a593Smuzhiyun 		dev_err(&dev->dev, "Cannot enable PCI device\n");
599*4882a593Smuzhiyun 		goto fail_free;
600*4882a593Smuzhiyun 	}
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	/* enable bus mastering on the device */
603*4882a593Smuzhiyun 	pci_set_master(dev);
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	/* remap IO memory */
606*4882a593Smuzhiyun 	err = igbuio_setup_bars(dev, &udev->info);
607*4882a593Smuzhiyun 	if (err != 0)
608*4882a593Smuzhiyun 		goto fail_release_iomem;
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	/* set 64-bit DMA mask */
611*4882a593Smuzhiyun 	err = pci_set_dma_mask(dev,  DMA_BIT_MASK(64));
612*4882a593Smuzhiyun 	if (err != 0) {
613*4882a593Smuzhiyun 		dev_err(&dev->dev, "Cannot set DMA mask\n");
614*4882a593Smuzhiyun 		goto fail_release_iomem;
615*4882a593Smuzhiyun 	}
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64));
618*4882a593Smuzhiyun 	if (err != 0) {
619*4882a593Smuzhiyun 		dev_err(&dev->dev, "Cannot set consistent DMA mask\n");
620*4882a593Smuzhiyun 		goto fail_release_iomem;
621*4882a593Smuzhiyun 	}
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 	/* fill uio infos */
624*4882a593Smuzhiyun 	udev->info.name = "igb_uio";
625*4882a593Smuzhiyun 	udev->info.version = "0.1";
626*4882a593Smuzhiyun 	udev->info.irqcontrol = igbuio_pci_irqcontrol;
627*4882a593Smuzhiyun 	udev->info.open = igbuio_pci_open;
628*4882a593Smuzhiyun 	udev->info.release = igbuio_pci_release;
629*4882a593Smuzhiyun 	udev->info.priv = udev;
630*4882a593Smuzhiyun 	udev->pdev = dev;
631*4882a593Smuzhiyun 	atomic_set(&udev->refcnt, 0);
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	err = sysfs_create_group(&dev->dev.kobj, &dev_attr_grp);
634*4882a593Smuzhiyun 	if (err != 0)
635*4882a593Smuzhiyun 		goto fail_release_iomem;
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 	/* register uio driver */
638*4882a593Smuzhiyun 	err = uio_register_device(&dev->dev, &udev->info);
639*4882a593Smuzhiyun 	if (err != 0)
640*4882a593Smuzhiyun 		goto fail_remove_group;
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 	pci_set_drvdata(dev, udev);
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 	/*
645*4882a593Smuzhiyun 	 * Doing a harmless dma mapping for attaching the device to
646*4882a593Smuzhiyun 	 * the iommu identity mapping if kernel boots with iommu=pt.
647*4882a593Smuzhiyun 	 * Note this is not a problem if no IOMMU at all.
648*4882a593Smuzhiyun 	 */
649*4882a593Smuzhiyun 	map_addr = dma_alloc_coherent(&dev->dev, 1024, &map_dma_addr,
650*4882a593Smuzhiyun 			GFP_KERNEL);
651*4882a593Smuzhiyun 	if (map_addr)
652*4882a593Smuzhiyun 		memset(map_addr, 0, 1024);
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	if (!map_addr)
655*4882a593Smuzhiyun 		dev_info(&dev->dev, "dma mapping failed\n");
656*4882a593Smuzhiyun 	else {
657*4882a593Smuzhiyun 		dev_info(&dev->dev, "mapping 1K dma=%#llx host=%p\n",
658*4882a593Smuzhiyun 			 (unsigned long long)map_dma_addr, map_addr);
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 		dma_free_coherent(&dev->dev, 1024, map_addr, map_dma_addr);
661*4882a593Smuzhiyun 		dev_info(&dev->dev, "unmapping 1K dma=%#llx host=%p\n",
662*4882a593Smuzhiyun 			 (unsigned long long)map_dma_addr, map_addr);
663*4882a593Smuzhiyun 	}
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun 	return 0;
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun fail_remove_group:
668*4882a593Smuzhiyun 	sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
669*4882a593Smuzhiyun fail_release_iomem:
670*4882a593Smuzhiyun 	igbuio_pci_release_iomem(&udev->info);
671*4882a593Smuzhiyun 	pci_disable_device(dev);
672*4882a593Smuzhiyun fail_free:
673*4882a593Smuzhiyun 	kfree(udev);
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	return err;
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun static void
igbuio_pci_remove(struct pci_dev * dev)679*4882a593Smuzhiyun igbuio_pci_remove(struct pci_dev *dev)
680*4882a593Smuzhiyun {
681*4882a593Smuzhiyun 	struct rte_uio_pci_dev *udev = pci_get_drvdata(dev);
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun 	igbuio_pci_release(&udev->info, NULL);
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
686*4882a593Smuzhiyun 	uio_unregister_device(&udev->info);
687*4882a593Smuzhiyun 	igbuio_pci_release_iomem(&udev->info);
688*4882a593Smuzhiyun 	pci_disable_device(dev);
689*4882a593Smuzhiyun 	pci_set_drvdata(dev, NULL);
690*4882a593Smuzhiyun 	kfree(udev);
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun static int
igbuio_config_intr_mode(char * intr_str)694*4882a593Smuzhiyun igbuio_config_intr_mode(char *intr_str)
695*4882a593Smuzhiyun {
696*4882a593Smuzhiyun 	if (!intr_str) {
697*4882a593Smuzhiyun 		pr_info("Use MSIX interrupt by default\n");
698*4882a593Smuzhiyun 		return 0;
699*4882a593Smuzhiyun 	}
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 	if (!strcmp(intr_str, RTE_INTR_MODE_MSIX_NAME)) {
702*4882a593Smuzhiyun 		igbuio_intr_mode_preferred = RTE_INTR_MODE_MSIX;
703*4882a593Smuzhiyun 		pr_info("Use MSIX interrupt\n");
704*4882a593Smuzhiyun 	} else if (!strcmp(intr_str, RTE_INTR_MODE_MSI_NAME)) {
705*4882a593Smuzhiyun 		igbuio_intr_mode_preferred = RTE_INTR_MODE_MSI;
706*4882a593Smuzhiyun 		pr_info("Use MSI interrupt\n");
707*4882a593Smuzhiyun 	} else if (!strcmp(intr_str, RTE_INTR_MODE_LEGACY_NAME)) {
708*4882a593Smuzhiyun 		igbuio_intr_mode_preferred = RTE_INTR_MODE_LEGACY;
709*4882a593Smuzhiyun 		pr_info("Use legacy interrupt\n");
710*4882a593Smuzhiyun 	} else {
711*4882a593Smuzhiyun 		pr_info("Error: bad parameter - %s\n", intr_str);
712*4882a593Smuzhiyun 		return -EINVAL;
713*4882a593Smuzhiyun 	}
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 	return 0;
716*4882a593Smuzhiyun }
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun static struct pci_driver igbuio_pci_driver = {
719*4882a593Smuzhiyun 	.name = "igb_uio",
720*4882a593Smuzhiyun 	.id_table = NULL,
721*4882a593Smuzhiyun 	.probe = igbuio_pci_probe,
722*4882a593Smuzhiyun 	.remove = igbuio_pci_remove,
723*4882a593Smuzhiyun };
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun static int __init
igbuio_pci_init_module(void)726*4882a593Smuzhiyun igbuio_pci_init_module(void)
727*4882a593Smuzhiyun {
728*4882a593Smuzhiyun 	int ret;
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 	if (igbuio_kernel_is_locked_down()) {
731*4882a593Smuzhiyun 		pr_err("Not able to use module, kernel lock down is enabled\n");
732*4882a593Smuzhiyun 		return -EINVAL;
733*4882a593Smuzhiyun 	}
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun 	if (wc_activate != 0)
736*4882a593Smuzhiyun 		pr_info("wc_activate is set\n");
737*4882a593Smuzhiyun 
738*4882a593Smuzhiyun 	ret = igbuio_config_intr_mode(intr_mode);
739*4882a593Smuzhiyun 	if (ret < 0)
740*4882a593Smuzhiyun 		return ret;
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 	return pci_register_driver(&igbuio_pci_driver);
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun static void __exit
igbuio_pci_exit_module(void)746*4882a593Smuzhiyun igbuio_pci_exit_module(void)
747*4882a593Smuzhiyun {
748*4882a593Smuzhiyun 	pci_unregister_driver(&igbuio_pci_driver);
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun module_init(igbuio_pci_init_module);
752*4882a593Smuzhiyun module_exit(igbuio_pci_exit_module);
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun module_param(intr_mode, charp, S_IRUGO);
755*4882a593Smuzhiyun MODULE_PARM_DESC(intr_mode,
756*4882a593Smuzhiyun "igb_uio interrupt mode (default=msix):\n"
757*4882a593Smuzhiyun "    " RTE_INTR_MODE_MSIX_NAME "       Use MSIX interrupt\n"
758*4882a593Smuzhiyun "    " RTE_INTR_MODE_MSI_NAME "        Use MSI interrupt\n"
759*4882a593Smuzhiyun "    " RTE_INTR_MODE_LEGACY_NAME "     Use Legacy interrupt\n"
760*4882a593Smuzhiyun "\n");
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun module_param(wc_activate, int, 0);
763*4882a593Smuzhiyun MODULE_PARM_DESC(wc_activate,
764*4882a593Smuzhiyun "Activate support for write combining (WC) (default=0)\n"
765*4882a593Smuzhiyun "    0 - disable\n"
766*4882a593Smuzhiyun "    other - enable\n");
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun MODULE_DESCRIPTION("UIO driver for Intel IGB PCI cards");
769*4882a593Smuzhiyun MODULE_LICENSE("GPL");
770*4882a593Smuzhiyun MODULE_AUTHOR("Intel Corporation");
771