1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * linux/drivers/misc/xillybus_pcie.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright 2011 Xillybus Ltd, http://xillybus.com
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Driver for the Xillybus FPGA/host framework using PCI Express.
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/module.h>
11*4882a593Smuzhiyun #include <linux/pci.h>
12*4882a593Smuzhiyun #include <linux/slab.h>
13*4882a593Smuzhiyun #include "xillybus.h"
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun MODULE_DESCRIPTION("Xillybus driver for PCIe");
16*4882a593Smuzhiyun MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
17*4882a593Smuzhiyun MODULE_VERSION("1.06");
18*4882a593Smuzhiyun MODULE_ALIAS("xillybus_pcie");
19*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #define PCI_DEVICE_ID_XILLYBUS 0xebeb
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #define PCI_VENDOR_ID_ACTEL 0x11aa
24*4882a593Smuzhiyun #define PCI_VENDOR_ID_LATTICE 0x1204
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun static const char xillyname[] = "xillybus_pcie";
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun static const struct pci_device_id xillyids[] = {
29*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_XILLYBUS)},
30*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_ALTERA, PCI_DEVICE_ID_XILLYBUS)},
31*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_ACTEL, PCI_DEVICE_ID_XILLYBUS)},
32*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_LATTICE, PCI_DEVICE_ID_XILLYBUS)},
33*4882a593Smuzhiyun { /* End: all zeroes */ }
34*4882a593Smuzhiyun };
35*4882a593Smuzhiyun
xilly_pci_direction(int direction)36*4882a593Smuzhiyun static int xilly_pci_direction(int direction)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun switch (direction) {
39*4882a593Smuzhiyun case DMA_TO_DEVICE:
40*4882a593Smuzhiyun return PCI_DMA_TODEVICE;
41*4882a593Smuzhiyun case DMA_FROM_DEVICE:
42*4882a593Smuzhiyun return PCI_DMA_FROMDEVICE;
43*4882a593Smuzhiyun default:
44*4882a593Smuzhiyun return PCI_DMA_BIDIRECTIONAL;
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun
xilly_dma_sync_single_for_cpu_pci(struct xilly_endpoint * ep,dma_addr_t dma_handle,size_t size,int direction)48*4882a593Smuzhiyun static void xilly_dma_sync_single_for_cpu_pci(struct xilly_endpoint *ep,
49*4882a593Smuzhiyun dma_addr_t dma_handle,
50*4882a593Smuzhiyun size_t size,
51*4882a593Smuzhiyun int direction)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun pci_dma_sync_single_for_cpu(ep->pdev,
54*4882a593Smuzhiyun dma_handle,
55*4882a593Smuzhiyun size,
56*4882a593Smuzhiyun xilly_pci_direction(direction));
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun
xilly_dma_sync_single_for_device_pci(struct xilly_endpoint * ep,dma_addr_t dma_handle,size_t size,int direction)59*4882a593Smuzhiyun static void xilly_dma_sync_single_for_device_pci(struct xilly_endpoint *ep,
60*4882a593Smuzhiyun dma_addr_t dma_handle,
61*4882a593Smuzhiyun size_t size,
62*4882a593Smuzhiyun int direction)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun pci_dma_sync_single_for_device(ep->pdev,
65*4882a593Smuzhiyun dma_handle,
66*4882a593Smuzhiyun size,
67*4882a593Smuzhiyun xilly_pci_direction(direction));
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
xilly_pci_unmap(void * ptr)70*4882a593Smuzhiyun static void xilly_pci_unmap(void *ptr)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun struct xilly_mapping *data = ptr;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun pci_unmap_single(data->device, data->dma_addr,
75*4882a593Smuzhiyun data->size, data->direction);
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun kfree(ptr);
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun /*
81*4882a593Smuzhiyun * Map either through the PCI DMA mapper or the non_PCI one. Behind the
82*4882a593Smuzhiyun * scenes exactly the same functions are called with the same parameters,
83*4882a593Smuzhiyun * but that can change.
84*4882a593Smuzhiyun */
85*4882a593Smuzhiyun
xilly_map_single_pci(struct xilly_endpoint * ep,void * ptr,size_t size,int direction,dma_addr_t * ret_dma_handle)86*4882a593Smuzhiyun static int xilly_map_single_pci(struct xilly_endpoint *ep,
87*4882a593Smuzhiyun void *ptr,
88*4882a593Smuzhiyun size_t size,
89*4882a593Smuzhiyun int direction,
90*4882a593Smuzhiyun dma_addr_t *ret_dma_handle
91*4882a593Smuzhiyun )
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun int pci_direction;
94*4882a593Smuzhiyun dma_addr_t addr;
95*4882a593Smuzhiyun struct xilly_mapping *this;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun this = kzalloc(sizeof(*this), GFP_KERNEL);
98*4882a593Smuzhiyun if (!this)
99*4882a593Smuzhiyun return -ENOMEM;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun pci_direction = xilly_pci_direction(direction);
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun addr = pci_map_single(ep->pdev, ptr, size, pci_direction);
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun if (pci_dma_mapping_error(ep->pdev, addr)) {
106*4882a593Smuzhiyun kfree(this);
107*4882a593Smuzhiyun return -ENODEV;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun this->device = ep->pdev;
111*4882a593Smuzhiyun this->dma_addr = addr;
112*4882a593Smuzhiyun this->size = size;
113*4882a593Smuzhiyun this->direction = pci_direction;
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun *ret_dma_handle = addr;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun return devm_add_action_or_reset(ep->dev, xilly_pci_unmap, this);
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun static struct xilly_endpoint_hardware pci_hw = {
121*4882a593Smuzhiyun .owner = THIS_MODULE,
122*4882a593Smuzhiyun .hw_sync_sgl_for_cpu = xilly_dma_sync_single_for_cpu_pci,
123*4882a593Smuzhiyun .hw_sync_sgl_for_device = xilly_dma_sync_single_for_device_pci,
124*4882a593Smuzhiyun .map_single = xilly_map_single_pci,
125*4882a593Smuzhiyun };
126*4882a593Smuzhiyun
xilly_probe(struct pci_dev * pdev,const struct pci_device_id * ent)127*4882a593Smuzhiyun static int xilly_probe(struct pci_dev *pdev,
128*4882a593Smuzhiyun const struct pci_device_id *ent)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun struct xilly_endpoint *endpoint;
131*4882a593Smuzhiyun int rc;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun endpoint = xillybus_init_endpoint(pdev, &pdev->dev, &pci_hw);
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun if (!endpoint)
136*4882a593Smuzhiyun return -ENOMEM;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun pci_set_drvdata(pdev, endpoint);
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun rc = pcim_enable_device(pdev);
141*4882a593Smuzhiyun if (rc) {
142*4882a593Smuzhiyun dev_err(endpoint->dev,
143*4882a593Smuzhiyun "pcim_enable_device() failed. Aborting.\n");
144*4882a593Smuzhiyun return rc;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun /* L0s has caused packet drops. No power saving, thank you. */
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
152*4882a593Smuzhiyun dev_err(endpoint->dev,
153*4882a593Smuzhiyun "Incorrect BAR configuration. Aborting.\n");
154*4882a593Smuzhiyun return -ENODEV;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun rc = pcim_iomap_regions(pdev, 0x01, xillyname);
158*4882a593Smuzhiyun if (rc) {
159*4882a593Smuzhiyun dev_err(endpoint->dev,
160*4882a593Smuzhiyun "pcim_iomap_regions() failed. Aborting.\n");
161*4882a593Smuzhiyun return rc;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun endpoint->registers = pcim_iomap_table(pdev)[0];
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun pci_set_master(pdev);
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun /* Set up a single MSI interrupt */
169*4882a593Smuzhiyun if (pci_enable_msi(pdev)) {
170*4882a593Smuzhiyun dev_err(endpoint->dev,
171*4882a593Smuzhiyun "Failed to enable MSI interrupts. Aborting.\n");
172*4882a593Smuzhiyun return -ENODEV;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun rc = devm_request_irq(&pdev->dev, pdev->irq, xillybus_isr, 0,
175*4882a593Smuzhiyun xillyname, endpoint);
176*4882a593Smuzhiyun if (rc) {
177*4882a593Smuzhiyun dev_err(endpoint->dev,
178*4882a593Smuzhiyun "Failed to register MSI handler. Aborting.\n");
179*4882a593Smuzhiyun return -ENODEV;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun /*
183*4882a593Smuzhiyun * Some (old and buggy?) hardware drops 64-bit addressed PCIe packets,
184*4882a593Smuzhiyun * even when the PCIe driver claims that a 64-bit mask is OK. On the
185*4882a593Smuzhiyun * other hand, on some architectures, 64-bit addressing is mandatory.
186*4882a593Smuzhiyun * So go for the 64-bit mask only when failing is the other option.
187*4882a593Smuzhiyun */
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
190*4882a593Smuzhiyun endpoint->dma_using_dac = 0;
191*4882a593Smuzhiyun } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
192*4882a593Smuzhiyun endpoint->dma_using_dac = 1;
193*4882a593Smuzhiyun } else {
194*4882a593Smuzhiyun dev_err(endpoint->dev, "Failed to set DMA mask. Aborting.\n");
195*4882a593Smuzhiyun return -ENODEV;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun return xillybus_endpoint_discovery(endpoint);
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
xilly_remove(struct pci_dev * pdev)201*4882a593Smuzhiyun static void xilly_remove(struct pci_dev *pdev)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun struct xilly_endpoint *endpoint = pci_get_drvdata(pdev);
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun xillybus_endpoint_remove(endpoint);
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, xillyids);
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun static struct pci_driver xillybus_driver = {
211*4882a593Smuzhiyun .name = xillyname,
212*4882a593Smuzhiyun .id_table = xillyids,
213*4882a593Smuzhiyun .probe = xilly_probe,
214*4882a593Smuzhiyun .remove = xilly_remove,
215*4882a593Smuzhiyun };
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun module_pci_driver(xillybus_driver);
218