1 // SPDX-License-Identifier: GPL-2.0
2 /*-
3 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
4 */
5
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8 #include <linux/device.h>
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 #include <linux/uio_driver.h>
12 #include <linux/io.h>
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/version.h>
16 #include <linux/slab.h>
17
18 /**
19 * These enum and macro definitions are copied from the
20 * file rte_pci_dev_features.h
21 */
22 enum rte_intr_mode {
23 RTE_INTR_MODE_NONE = 0,
24 RTE_INTR_MODE_LEGACY,
25 RTE_INTR_MODE_MSI,
26 RTE_INTR_MODE_MSIX
27 };
28 #define RTE_INTR_MODE_NONE_NAME "none"
29 #define RTE_INTR_MODE_LEGACY_NAME "legacy"
30 #define RTE_INTR_MODE_MSI_NAME "msi"
31 #define RTE_INTR_MODE_MSIX_NAME "msix"
32
33
34 #include "compat.h"
35
36 /**
37 * A structure describing the private information for a uio device.
38 */
39 struct rte_uio_pci_dev {
40 struct uio_info info;
41 struct pci_dev *pdev;
42 enum rte_intr_mode mode;
43 atomic_t refcnt;
44 };
45
46 static int wc_activate;
47 static char *intr_mode;
48 static enum rte_intr_mode igbuio_intr_mode_preferred = RTE_INTR_MODE_MSIX;
49 /* sriov sysfs */
50 static ssize_t
show_max_vfs(struct device * dev,struct device_attribute * attr,char * buf)51 show_max_vfs(struct device *dev, struct device_attribute *attr,
52 char *buf)
53 {
54 return snprintf(buf, 10, "%u\n", dev_num_vf(dev));
55 }
56
57 static ssize_t
store_max_vfs(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)58 store_max_vfs(struct device *dev, struct device_attribute *attr,
59 const char *buf, size_t count)
60 {
61 int err = 0;
62 unsigned long max_vfs;
63 struct pci_dev *pdev = to_pci_dev(dev);
64
65 if (0 != kstrtoul(buf, 0, &max_vfs))
66 return -EINVAL;
67
68 if (0 == max_vfs)
69 pci_disable_sriov(pdev);
70 else if (0 == pci_num_vf(pdev))
71 err = pci_enable_sriov(pdev, max_vfs);
72 else /* do nothing if change max_vfs number */
73 err = -EINVAL;
74
75 return err ? err : count;
76 }
77
78 static DEVICE_ATTR(max_vfs, S_IRUGO | S_IWUSR, show_max_vfs, store_max_vfs);
79
80 static struct attribute *dev_attrs[] = {
81 &dev_attr_max_vfs.attr,
82 NULL,
83 };
84
85 static const struct attribute_group dev_attr_grp = {
86 .attrs = dev_attrs,
87 };
88
89 #ifndef HAVE_PCI_MSI_MASK_IRQ
90 /*
91 * It masks the msix on/off of generating MSI-X messages.
92 */
93 static void
igbuio_msix_mask_irq(struct msi_desc * desc,s32 state)94 igbuio_msix_mask_irq(struct msi_desc *desc, s32 state)
95 {
96 u32 mask_bits = desc->masked;
97 unsigned int offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
98 PCI_MSIX_ENTRY_VECTOR_CTRL;
99
100 if (state != 0)
101 mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
102 else
103 mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
104
105 if (mask_bits != desc->masked) {
106 writel(mask_bits, desc->mask_base + offset);
107 readl(desc->mask_base);
108 desc->masked = mask_bits;
109 }
110 }
111
112 /*
113 * It masks the msi on/off of generating MSI messages.
114 */
115 static void
igbuio_msi_mask_irq(struct pci_dev * pdev,struct msi_desc * desc,int32_t state)116 igbuio_msi_mask_irq(struct pci_dev *pdev, struct msi_desc *desc, int32_t state)
117 {
118 u32 mask_bits = desc->masked;
119 u32 offset = desc->irq - pdev->irq;
120 u32 mask = 1 << offset;
121
122 if (!desc->msi_attrib.maskbit)
123 return;
124
125 if (state != 0)
126 mask_bits &= ~mask;
127 else
128 mask_bits |= mask;
129
130 if (mask_bits != desc->masked) {
131 pci_write_config_dword(pdev, desc->mask_pos, mask_bits);
132 desc->masked = mask_bits;
133 }
134 }
135
136 static void
igbuio_mask_irq(struct pci_dev * pdev,enum rte_intr_mode mode,s32 irq_state)137 igbuio_mask_irq(struct pci_dev *pdev, enum rte_intr_mode mode, s32 irq_state)
138 {
139 struct msi_desc *desc;
140 struct list_head *msi_list;
141
142 #ifdef HAVE_MSI_LIST_IN_GENERIC_DEVICE
143 msi_list = &pdev->dev.msi_list;
144 #else
145 msi_list = &pdev->msi_list;
146 #endif
147
148 if (mode == RTE_INTR_MODE_MSIX) {
149 list_for_each_entry(desc, msi_list, list)
150 igbuio_msix_mask_irq(desc, irq_state);
151 } else if (mode == RTE_INTR_MODE_MSI) {
152 list_for_each_entry(desc, msi_list, list)
153 igbuio_msi_mask_irq(pdev, desc, irq_state);
154 }
155 }
156 #endif
157
158 /**
159 * This is the irqcontrol callback to be registered to uio_info.
160 * It can be used to disable/enable interrupt from user space processes.
161 *
162 * @param info
163 * pointer to uio_info.
164 * @param irq_state
165 * state value. 1 to enable interrupt, 0 to disable interrupt.
166 *
167 * @return
168 * - On success, 0.
169 * - On failure, a negative value.
170 */
171 static int
igbuio_pci_irqcontrol(struct uio_info * info,s32 irq_state)172 igbuio_pci_irqcontrol(struct uio_info *info, s32 irq_state)
173 {
174 struct rte_uio_pci_dev *udev = info->priv;
175 struct pci_dev *pdev = udev->pdev;
176
177 #ifdef HAVE_PCI_MSI_MASK_IRQ
178 struct irq_data *irq = irq_get_irq_data(udev->info.irq);
179 #endif
180
181 pci_cfg_access_lock(pdev);
182
183 if (udev->mode == RTE_INTR_MODE_MSIX || udev->mode == RTE_INTR_MODE_MSI) {
184 #ifdef HAVE_PCI_MSI_MASK_IRQ
185 if (irq_state == 1)
186 pci_msi_unmask_irq(irq);
187 else
188 pci_msi_mask_irq(irq);
189 #else
190 igbuio_mask_irq(pdev, udev->mode, irq_state);
191 #endif
192 }
193
194 if (udev->mode == RTE_INTR_MODE_LEGACY)
195 pci_intx(pdev, !!irq_state);
196
197 pci_cfg_access_unlock(pdev);
198
199 return 0;
200 }
201
202 /**
203 * This is interrupt handler which will check if the interrupt is for the right device.
204 * If yes, disable it here and will be enable later.
205 */
206 static irqreturn_t
igbuio_pci_irqhandler(int irq,void * dev_id)207 igbuio_pci_irqhandler(int irq, void *dev_id)
208 {
209 struct rte_uio_pci_dev *udev = (struct rte_uio_pci_dev *)dev_id;
210 struct uio_info *info = &udev->info;
211
212 /* Legacy mode need to mask in hardware */
213 if (udev->mode == RTE_INTR_MODE_LEGACY &&
214 !pci_check_and_mask_intx(udev->pdev))
215 return IRQ_NONE;
216
217 uio_event_notify(info);
218
219 /* Message signal mode, no share IRQ and automasked */
220 return IRQ_HANDLED;
221 }
222
223 static int
igbuio_pci_enable_interrupts(struct rte_uio_pci_dev * udev)224 igbuio_pci_enable_interrupts(struct rte_uio_pci_dev *udev)
225 {
226 int err = 0;
227 #ifndef HAVE_ALLOC_IRQ_VECTORS
228 struct msix_entry msix_entry;
229 #endif
230
231 switch (igbuio_intr_mode_preferred) {
232 case RTE_INTR_MODE_MSIX:
233 /* Only 1 msi-x vector needed */
234 #ifndef HAVE_ALLOC_IRQ_VECTORS
235 msix_entry.entry = 0;
236 if (pci_enable_msix(udev->pdev, &msix_entry, 1) == 0) {
237 dev_dbg(&udev->pdev->dev, "using MSI-X");
238 udev->info.irq_flags = IRQF_NO_THREAD;
239 udev->info.irq = msix_entry.vector;
240 udev->mode = RTE_INTR_MODE_MSIX;
241 break;
242 }
243 #else
244 if (pci_alloc_irq_vectors(udev->pdev, 1, 1, PCI_IRQ_MSIX) == 1) {
245 dev_dbg(&udev->pdev->dev, "using MSI-X");
246 udev->info.irq_flags = IRQF_NO_THREAD;
247 udev->info.irq = pci_irq_vector(udev->pdev, 0);
248 udev->mode = RTE_INTR_MODE_MSIX;
249 break;
250 }
251 #endif
252
253 fallthrough;
254 case RTE_INTR_MODE_MSI:
255 #ifndef HAVE_ALLOC_IRQ_VECTORS
256 if (pci_enable_msi(udev->pdev) == 0) {
257 dev_dbg(&udev->pdev->dev, "using MSI");
258 udev->info.irq_flags = IRQF_NO_THREAD;
259 udev->info.irq = udev->pdev->irq;
260 udev->mode = RTE_INTR_MODE_MSI;
261 break;
262 }
263 #else
264 if (pci_alloc_irq_vectors(udev->pdev, 1, 1, PCI_IRQ_MSI) == 1) {
265 dev_dbg(&udev->pdev->dev, "using MSI");
266 udev->info.irq_flags = IRQF_NO_THREAD;
267 udev->info.irq = pci_irq_vector(udev->pdev, 0);
268 udev->mode = RTE_INTR_MODE_MSI;
269 break;
270 }
271 #endif
272 fallthrough;
273 case RTE_INTR_MODE_LEGACY:
274 if (pci_intx_mask_supported(udev->pdev)) {
275 dev_dbg(&udev->pdev->dev, "using INTX");
276 udev->info.irq_flags = IRQF_SHARED | IRQF_NO_THREAD;
277 udev->info.irq = udev->pdev->irq;
278 udev->mode = RTE_INTR_MODE_LEGACY;
279 break;
280 }
281 dev_notice(&udev->pdev->dev, "PCI INTX mask not supported\n");
282 fallthrough;
283 case RTE_INTR_MODE_NONE:
284 udev->mode = RTE_INTR_MODE_NONE;
285 udev->info.irq = UIO_IRQ_NONE;
286 break;
287
288 default:
289 dev_err(&udev->pdev->dev, "invalid IRQ mode %u",
290 igbuio_intr_mode_preferred);
291 udev->info.irq = UIO_IRQ_NONE;
292 err = -EINVAL;
293 }
294
295 if (udev->info.irq != UIO_IRQ_NONE)
296 err = request_irq(udev->info.irq, igbuio_pci_irqhandler,
297 udev->info.irq_flags, udev->info.name,
298 udev);
299 dev_info(&udev->pdev->dev, "uio device registered with irq %ld\n",
300 udev->info.irq);
301
302 return err;
303 }
304
305 static void
igbuio_pci_disable_interrupts(struct rte_uio_pci_dev * udev)306 igbuio_pci_disable_interrupts(struct rte_uio_pci_dev *udev)
307 {
308 if (udev->info.irq) {
309 free_irq(udev->info.irq, udev);
310 udev->info.irq = 0;
311 }
312
313 #ifndef HAVE_ALLOC_IRQ_VECTORS
314 if (udev->mode == RTE_INTR_MODE_MSIX)
315 pci_disable_msix(udev->pdev);
316 if (udev->mode == RTE_INTR_MODE_MSI)
317 pci_disable_msi(udev->pdev);
318 #else
319 if (udev->mode == RTE_INTR_MODE_MSIX ||
320 udev->mode == RTE_INTR_MODE_MSI)
321 pci_free_irq_vectors(udev->pdev);
322 #endif
323 }
324
325
326 /**
327 * This gets called while opening uio device file.
328 */
329 static int
igbuio_pci_open(struct uio_info * info,struct inode * inode)330 igbuio_pci_open(struct uio_info *info, struct inode *inode)
331 {
332 struct rte_uio_pci_dev *udev = info->priv;
333 struct pci_dev *dev = udev->pdev;
334 int err;
335
336 if (atomic_inc_return(&udev->refcnt) != 1)
337 return 0;
338
339 /* set bus master, which was cleared by the reset function */
340 pci_set_master(dev);
341
342 /* enable interrupts */
343 err = igbuio_pci_enable_interrupts(udev);
344 if (err) {
345 atomic_dec(&udev->refcnt);
346 dev_err(&dev->dev, "Enable interrupt fails\n");
347 }
348 return err;
349 }
350
351 static int
igbuio_pci_release(struct uio_info * info,struct inode * inode)352 igbuio_pci_release(struct uio_info *info, struct inode *inode)
353 {
354 struct rte_uio_pci_dev *udev = info->priv;
355 struct pci_dev *dev = udev->pdev;
356
357 if (atomic_dec_and_test(&udev->refcnt)) {
358 /* disable interrupts */
359 igbuio_pci_disable_interrupts(udev);
360
361 /* stop the device from further DMA */
362 pci_clear_master(dev);
363 }
364
365 return 0;
366 }
367
368 #define NUM_TX_DESC 4096 /* Number of Tx descriptor registers */
369 #define NUM_RX_DESC 4096 /* Number of Rx descriptor registers */
370 struct Desc {
371 u32 opts1;
372 u32 opts2;
373 u64 addr;
374 };
375 /* Transmit Descriptor - Advanced */
376 union e1000_adv_tx_desc {
377 struct {
378 __le64 buffer_addr; /* Address of descriptor's data buf */
379 __le32 cmd_type_len;
380 __le32 olinfo_status;
381 } read;
382 struct {
383 __le64 rsvd; /* Reserved */
384 __le32 nxtseq_seed;
385 __le32 status;
386 } wb;
387 };
388
389 struct uio_rtl8168_counters {
390 u64 tx_packets;
391 u64 rx_packets;
392 u64 tx_errors;
393 u32 rx_errors;
394 u16 rx_missed;
395 u16 align_errors;
396 u32 tx_one_collision;
397 u32 tx_multi_collision;
398 u64 rx_unicast;
399 u64 rx_broadcast;
400 u32 rx_multicast;
401 u16 tx_aborted;
402 u16 tx_underrun;
403 };
404
405 /* Remap pci resources described by bar #pci_bar in uio resource n. */
406 static int
igbuio_pci_setup_iomem1(struct pci_dev * dev,struct uio_info * info,int n,int pci_bar,const char * name)407 igbuio_pci_setup_iomem1(struct pci_dev *dev, struct uio_info *info,
408 int n, int pci_bar, const char *name)
409 {
410 void *internal_addr;
411 dma_addr_t PhyAddr;
412 int ret;
413 pr_err("%s %d\n", __func__, __LINE__);
414
415 internal_addr = dma_alloc_coherent(&dev->dev,
416 (NUM_TX_DESC * 16 * 8),
417 &PhyAddr, GFP_KERNEL);
418 if (!internal_addr) {
419 ret = -ENOMEM;
420 printk("%s: alloc rx desc array failed\n", __func__);
421 }
422
423 info->mem[n].name = name;
424 info->mem[n].addr = PhyAddr;
425 info->mem[n].size = NUM_TX_DESC * 16 * 8;
426 info->mem[n].memtype = UIO_MEM_PHYS;
427
428 pr_err("%s %d name: %s, addr: %lld, len: %lld [%d]\n", __func__, __LINE__, name, PhyAddr, info->mem[n].size, n);
429
430 return 0;
431 }
432
433 /* Remap pci resources described by bar #pci_bar in uio resource n. */
434 static int
igbuio_pci_setup_iomem(struct pci_dev * dev,struct uio_info * info,int n,int pci_bar,const char * name)435 igbuio_pci_setup_iomem(struct pci_dev *dev, struct uio_info *info,
436 int n, int pci_bar, const char *name)
437 {
438 unsigned long addr, len;
439 void *internal_addr;
440 pr_err("%s %d\n", __func__, __LINE__);
441
442 if (n >= ARRAY_SIZE(info->mem))
443 return -EINVAL;
444
445 addr = pci_resource_start(dev, pci_bar);
446 len = pci_resource_len(dev, pci_bar);
447 if (addr == 0 || len == 0)
448 return -1;
449 if (wc_activate == 0) {
450 internal_addr = ioremap(addr, len);
451 if (internal_addr == NULL)
452 return -1;
453 } else {
454 internal_addr = NULL;
455 }
456 info->mem[n].name = name;
457 info->mem[n].addr = addr;
458 info->mem[n].internal_addr = internal_addr;
459 info->mem[n].size = len;
460 info->mem[n].memtype = UIO_MEM_PHYS;
461 pr_err("%s %d name: %s, addr: %lx, len: %ld\n", __func__, __LINE__, name, addr, len);
462
463 return 0;
464 }
465
466 /* Get pci port io resources described by bar #pci_bar in uio resource n. */
467 static int
igbuio_pci_setup_ioport(struct pci_dev * dev,struct uio_info * info,int n,int pci_bar,const char * name)468 igbuio_pci_setup_ioport(struct pci_dev *dev, struct uio_info *info,
469 int n, int pci_bar, const char *name)
470 {
471 unsigned long addr, len;
472
473 if (n >= ARRAY_SIZE(info->port))
474 return -EINVAL;
475
476 addr = pci_resource_start(dev, pci_bar);
477 len = pci_resource_len(dev, pci_bar);
478 if (addr == 0 || len == 0)
479 return -EINVAL;
480
481 info->port[n].name = name;
482 info->port[n].start = addr;
483 info->port[n].size = len;
484 info->port[n].porttype = UIO_PORT_X86;
485
486 return 0;
487 }
488
489 /* Unmap previously ioremap'd resources */
490 static void
igbuio_pci_release_iomem(struct uio_info * info)491 igbuio_pci_release_iomem(struct uio_info *info)
492 {
493 int i;
494
495 for (i = 0; i < MAX_UIO_MAPS; i++) {
496 if (info->mem[i].internal_addr)
497 iounmap(info->mem[i].internal_addr);
498 }
499 }
500
501 static int
igbuio_setup_bars(struct pci_dev * dev,struct uio_info * info)502 igbuio_setup_bars(struct pci_dev *dev, struct uio_info *info)
503 {
504 int i, iom, iop, ret;
505 unsigned long flags;
506 static const char *bar_names[PCI_STD_RESOURCE_END + 1] = {
507 "BAR0",
508 "BAR1",
509 "BAR2",
510 "BAR3",
511 "BAR4",
512 "BAR5",
513 };
514
515 iom = 0;
516 iop = 0;
517 //pr_err("%s %d\n", __func__, __LINE__);
518
519 for (i = 0; i < ARRAY_SIZE(bar_names); i++) {
520 //pr_err("%s %d\n", __func__, __LINE__);
521 if (pci_resource_len(dev, i) != 0 &&
522 pci_resource_start(dev, i) != 0) {
523 flags = pci_resource_flags(dev, i);
524 if (flags & IORESOURCE_MEM) {
525 //pr_err("%s %d %s\n", __func__, __LINE__, bar_names[i]);
526 ret = igbuio_pci_setup_iomem(dev, info, iom,
527 i, bar_names[i]);
528 if (ret != 0)
529 return ret;
530 iom++;
531 } else if (flags & IORESOURCE_IO) {
532 //pr_err("%s %d %s\n", __func__, __LINE__, bar_names[i]);
533 ret = igbuio_pci_setup_ioport(dev, info, iop,
534 i, bar_names[i]);
535 if (ret != 0)
536 return ret;
537 iop++;
538 }
539 }
540 }
541 //2
542 ret = igbuio_pci_setup_iomem1(dev, info, iom, i, "TX");
543 iom++;
544 ret = igbuio_pci_setup_iomem1(dev, info, iom, i, "RX");
545 iom++;
546 ret = igbuio_pci_setup_iomem1(dev, info, iom, i, "type");
547 /*
548 iom++;
549 ret = igbuio_pci_setup_iomem1(dev, info, iom, i, "type_tx");
550 iom++;
551 ret = igbuio_pci_setup_iomem1(dev, info, iom, i, "type_rx");
552 iom++;
553 ret = igbuio_pci_setup_iomem1(dev, info, iom, i, "DESCS");
554 iom++;
555 ret = igbuio_pci_setup_iomem1(dev, info, iom, i, "DESCR");
556 iom++;
557 ret = igbuio_pci_setup_iomem1(dev, info, iom, i, "DESCS1");
558 iom++;
559 ret = igbuio_pci_setup_iomem1(dev, info, iom, i, "DESCR1");
560 iom++;
561 ret = igbuio_pci_setup_iomem1(dev, info, iom, i, "DESCSD");
562 iom++;
563 ret = igbuio_pci_setup_iomem1(dev, info, iom, i, "DESCPD");
564 */
565 return (iom != 0 || iop != 0) ? ret : -ENOENT;
566 }
567
568 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
569 static int __devinit
570 #else
571 static int
572 #endif
igbuio_pci_probe(struct pci_dev * dev,const struct pci_device_id * id)573 igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
574 {
575 struct rte_uio_pci_dev *udev;
576 dma_addr_t map_dma_addr;
577 void *map_addr;
578 int err;
579 pr_err("%s %d\n", __func__, __LINE__);
580
581 #ifdef HAVE_PCI_IS_BRIDGE_API
582 if (pci_is_bridge(dev)) {
583 dev_warn(&dev->dev, "Ignoring PCI bridge device\n");
584 return -ENODEV;
585 }
586 #endif
587
588 udev = kzalloc(sizeof(struct rte_uio_pci_dev), GFP_KERNEL);
589 if (!udev)
590 return -ENOMEM;
591
592 /*
593 * enable device: ask low-level code to enable I/O and
594 * memory
595 */
596 err = pci_enable_device(dev);
597 if (err != 0) {
598 dev_err(&dev->dev, "Cannot enable PCI device\n");
599 goto fail_free;
600 }
601
602 /* enable bus mastering on the device */
603 pci_set_master(dev);
604
605 /* remap IO memory */
606 err = igbuio_setup_bars(dev, &udev->info);
607 if (err != 0)
608 goto fail_release_iomem;
609
610 /* set 64-bit DMA mask */
611 err = pci_set_dma_mask(dev, DMA_BIT_MASK(64));
612 if (err != 0) {
613 dev_err(&dev->dev, "Cannot set DMA mask\n");
614 goto fail_release_iomem;
615 }
616
617 err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64));
618 if (err != 0) {
619 dev_err(&dev->dev, "Cannot set consistent DMA mask\n");
620 goto fail_release_iomem;
621 }
622
623 /* fill uio infos */
624 udev->info.name = "igb_uio";
625 udev->info.version = "0.1";
626 udev->info.irqcontrol = igbuio_pci_irqcontrol;
627 udev->info.open = igbuio_pci_open;
628 udev->info.release = igbuio_pci_release;
629 udev->info.priv = udev;
630 udev->pdev = dev;
631 atomic_set(&udev->refcnt, 0);
632
633 err = sysfs_create_group(&dev->dev.kobj, &dev_attr_grp);
634 if (err != 0)
635 goto fail_release_iomem;
636
637 /* register uio driver */
638 err = uio_register_device(&dev->dev, &udev->info);
639 if (err != 0)
640 goto fail_remove_group;
641
642 pci_set_drvdata(dev, udev);
643
644 /*
645 * Doing a harmless dma mapping for attaching the device to
646 * the iommu identity mapping if kernel boots with iommu=pt.
647 * Note this is not a problem if no IOMMU at all.
648 */
649 map_addr = dma_alloc_coherent(&dev->dev, 1024, &map_dma_addr,
650 GFP_KERNEL);
651 if (map_addr)
652 memset(map_addr, 0, 1024);
653
654 if (!map_addr)
655 dev_info(&dev->dev, "dma mapping failed\n");
656 else {
657 dev_info(&dev->dev, "mapping 1K dma=%#llx host=%p\n",
658 (unsigned long long)map_dma_addr, map_addr);
659
660 dma_free_coherent(&dev->dev, 1024, map_addr, map_dma_addr);
661 dev_info(&dev->dev, "unmapping 1K dma=%#llx host=%p\n",
662 (unsigned long long)map_dma_addr, map_addr);
663 }
664
665 return 0;
666
667 fail_remove_group:
668 sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
669 fail_release_iomem:
670 igbuio_pci_release_iomem(&udev->info);
671 pci_disable_device(dev);
672 fail_free:
673 kfree(udev);
674
675 return err;
676 }
677
678 static void
igbuio_pci_remove(struct pci_dev * dev)679 igbuio_pci_remove(struct pci_dev *dev)
680 {
681 struct rte_uio_pci_dev *udev = pci_get_drvdata(dev);
682
683 igbuio_pci_release(&udev->info, NULL);
684
685 sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
686 uio_unregister_device(&udev->info);
687 igbuio_pci_release_iomem(&udev->info);
688 pci_disable_device(dev);
689 pci_set_drvdata(dev, NULL);
690 kfree(udev);
691 }
692
693 static int
igbuio_config_intr_mode(char * intr_str)694 igbuio_config_intr_mode(char *intr_str)
695 {
696 if (!intr_str) {
697 pr_info("Use MSIX interrupt by default\n");
698 return 0;
699 }
700
701 if (!strcmp(intr_str, RTE_INTR_MODE_MSIX_NAME)) {
702 igbuio_intr_mode_preferred = RTE_INTR_MODE_MSIX;
703 pr_info("Use MSIX interrupt\n");
704 } else if (!strcmp(intr_str, RTE_INTR_MODE_MSI_NAME)) {
705 igbuio_intr_mode_preferred = RTE_INTR_MODE_MSI;
706 pr_info("Use MSI interrupt\n");
707 } else if (!strcmp(intr_str, RTE_INTR_MODE_LEGACY_NAME)) {
708 igbuio_intr_mode_preferred = RTE_INTR_MODE_LEGACY;
709 pr_info("Use legacy interrupt\n");
710 } else {
711 pr_info("Error: bad parameter - %s\n", intr_str);
712 return -EINVAL;
713 }
714
715 return 0;
716 }
717
718 static struct pci_driver igbuio_pci_driver = {
719 .name = "igb_uio",
720 .id_table = NULL,
721 .probe = igbuio_pci_probe,
722 .remove = igbuio_pci_remove,
723 };
724
725 static int __init
igbuio_pci_init_module(void)726 igbuio_pci_init_module(void)
727 {
728 int ret;
729
730 if (igbuio_kernel_is_locked_down()) {
731 pr_err("Not able to use module, kernel lock down is enabled\n");
732 return -EINVAL;
733 }
734
735 if (wc_activate != 0)
736 pr_info("wc_activate is set\n");
737
738 ret = igbuio_config_intr_mode(intr_mode);
739 if (ret < 0)
740 return ret;
741
742 return pci_register_driver(&igbuio_pci_driver);
743 }
744
745 static void __exit
igbuio_pci_exit_module(void)746 igbuio_pci_exit_module(void)
747 {
748 pci_unregister_driver(&igbuio_pci_driver);
749 }
750
751 module_init(igbuio_pci_init_module);
752 module_exit(igbuio_pci_exit_module);
753
754 module_param(intr_mode, charp, S_IRUGO);
755 MODULE_PARM_DESC(intr_mode,
756 "igb_uio interrupt mode (default=msix):\n"
757 " " RTE_INTR_MODE_MSIX_NAME " Use MSIX interrupt\n"
758 " " RTE_INTR_MODE_MSI_NAME " Use MSI interrupt\n"
759 " " RTE_INTR_MODE_LEGACY_NAME " Use Legacy interrupt\n"
760 "\n");
761
762 module_param(wc_activate, int, 0);
763 MODULE_PARM_DESC(wc_activate,
764 "Activate support for write combining (WC) (default=0)\n"
765 " 0 - disable\n"
766 " other - enable\n");
767
768 MODULE_DESCRIPTION("UIO driver for Intel IGB PCI cards");
769 MODULE_LICENSE("GPL");
770 MODULE_AUTHOR("Intel Corporation");
771