1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * PCIe endpoint driver for Renesas R-Car SoCs
4*4882a593Smuzhiyun * Copyright (c) 2020 Renesas Electronics Europe GmbH
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Author: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/clk.h>
10*4882a593Smuzhiyun #include <linux/delay.h>
11*4882a593Smuzhiyun #include <linux/of_address.h>
12*4882a593Smuzhiyun #include <linux/of_irq.h>
13*4882a593Smuzhiyun #include <linux/of_pci.h>
14*4882a593Smuzhiyun #include <linux/of_platform.h>
15*4882a593Smuzhiyun #include <linux/pci.h>
16*4882a593Smuzhiyun #include <linux/pci-epc.h>
17*4882a593Smuzhiyun #include <linux/phy/phy.h>
18*4882a593Smuzhiyun #include <linux/platform_device.h>
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #include "pcie-rcar.h"
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #define RCAR_EPC_MAX_FUNCTIONS 1
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun /* Structure representing the PCIe interface */
25*4882a593Smuzhiyun struct rcar_pcie_endpoint {
26*4882a593Smuzhiyun struct rcar_pcie pcie;
27*4882a593Smuzhiyun phys_addr_t *ob_mapped_addr;
28*4882a593Smuzhiyun struct pci_epc_mem_window *ob_window;
29*4882a593Smuzhiyun u8 max_functions;
30*4882a593Smuzhiyun unsigned int bar_to_atu[MAX_NR_INBOUND_MAPS];
31*4882a593Smuzhiyun unsigned long *ib_window_map;
32*4882a593Smuzhiyun u32 num_ib_windows;
33*4882a593Smuzhiyun u32 num_ob_windows;
34*4882a593Smuzhiyun };
35*4882a593Smuzhiyun
rcar_pcie_ep_hw_init(struct rcar_pcie * pcie)36*4882a593Smuzhiyun static void rcar_pcie_ep_hw_init(struct rcar_pcie *pcie)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun u32 val;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun rcar_pci_write_reg(pcie, 0, PCIETCTLR);
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /* Set endpoint mode */
43*4882a593Smuzhiyun rcar_pci_write_reg(pcie, 0, PCIEMSR);
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun /* Initialize default capabilities. */
46*4882a593Smuzhiyun rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
47*4882a593Smuzhiyun rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
48*4882a593Smuzhiyun PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ENDPOINT << 4);
49*4882a593Smuzhiyun rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
50*4882a593Smuzhiyun PCI_HEADER_TYPE_NORMAL);
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /* Write out the physical slot number = 0 */
53*4882a593Smuzhiyun rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0);
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun val = rcar_pci_read_reg(pcie, EXPCAP(1));
56*4882a593Smuzhiyun /* device supports fixed 128 bytes MPSS */
57*4882a593Smuzhiyun val &= ~GENMASK(2, 0);
58*4882a593Smuzhiyun rcar_pci_write_reg(pcie, val, EXPCAP(1));
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun val = rcar_pci_read_reg(pcie, EXPCAP(2));
61*4882a593Smuzhiyun /* read requests size 128 bytes */
62*4882a593Smuzhiyun val &= ~GENMASK(14, 12);
63*4882a593Smuzhiyun /* payload size 128 bytes */
64*4882a593Smuzhiyun val &= ~GENMASK(7, 5);
65*4882a593Smuzhiyun rcar_pci_write_reg(pcie, val, EXPCAP(2));
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun /* Set target link speed to 5.0 GT/s */
68*4882a593Smuzhiyun rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS,
69*4882a593Smuzhiyun PCI_EXP_LNKSTA_CLS_5_0GB);
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun /* Set the completion timer timeout to the maximum 50ms. */
72*4882a593Smuzhiyun rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50);
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /* Terminate list of capabilities (Next Capability Offset=0) */
75*4882a593Smuzhiyun rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0);
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun /* flush modifications */
78*4882a593Smuzhiyun wmb();
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun
rcar_pcie_ep_get_window(struct rcar_pcie_endpoint * ep,phys_addr_t addr)81*4882a593Smuzhiyun static int rcar_pcie_ep_get_window(struct rcar_pcie_endpoint *ep,
82*4882a593Smuzhiyun phys_addr_t addr)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun int i;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun for (i = 0; i < ep->num_ob_windows; i++)
87*4882a593Smuzhiyun if (ep->ob_window[i].phys_base == addr)
88*4882a593Smuzhiyun return i;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun return -EINVAL;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
rcar_pcie_parse_outbound_ranges(struct rcar_pcie_endpoint * ep,struct platform_device * pdev)93*4882a593Smuzhiyun static int rcar_pcie_parse_outbound_ranges(struct rcar_pcie_endpoint *ep,
94*4882a593Smuzhiyun struct platform_device *pdev)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun struct rcar_pcie *pcie = &ep->pcie;
97*4882a593Smuzhiyun char outbound_name[10];
98*4882a593Smuzhiyun struct resource *res;
99*4882a593Smuzhiyun unsigned int i = 0;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun ep->num_ob_windows = 0;
102*4882a593Smuzhiyun for (i = 0; i < RCAR_PCI_MAX_RESOURCES; i++) {
103*4882a593Smuzhiyun sprintf(outbound_name, "memory%u", i);
104*4882a593Smuzhiyun res = platform_get_resource_byname(pdev,
105*4882a593Smuzhiyun IORESOURCE_MEM,
106*4882a593Smuzhiyun outbound_name);
107*4882a593Smuzhiyun if (!res) {
108*4882a593Smuzhiyun dev_err(pcie->dev, "missing outbound window %u\n", i);
109*4882a593Smuzhiyun return -EINVAL;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun if (!devm_request_mem_region(&pdev->dev, res->start,
112*4882a593Smuzhiyun resource_size(res),
113*4882a593Smuzhiyun outbound_name)) {
114*4882a593Smuzhiyun dev_err(pcie->dev, "Cannot request memory region %s.\n",
115*4882a593Smuzhiyun outbound_name);
116*4882a593Smuzhiyun return -EIO;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun ep->ob_window[i].phys_base = res->start;
120*4882a593Smuzhiyun ep->ob_window[i].size = resource_size(res);
121*4882a593Smuzhiyun /* controller doesn't support multiple allocation
122*4882a593Smuzhiyun * from same window, so set page_size to window size
123*4882a593Smuzhiyun */
124*4882a593Smuzhiyun ep->ob_window[i].page_size = resource_size(res);
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun ep->num_ob_windows = i;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun return 0;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
rcar_pcie_ep_get_pdata(struct rcar_pcie_endpoint * ep,struct platform_device * pdev)131*4882a593Smuzhiyun static int rcar_pcie_ep_get_pdata(struct rcar_pcie_endpoint *ep,
132*4882a593Smuzhiyun struct platform_device *pdev)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun struct rcar_pcie *pcie = &ep->pcie;
135*4882a593Smuzhiyun struct pci_epc_mem_window *window;
136*4882a593Smuzhiyun struct device *dev = pcie->dev;
137*4882a593Smuzhiyun struct resource res;
138*4882a593Smuzhiyun int err;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun err = of_address_to_resource(dev->of_node, 0, &res);
141*4882a593Smuzhiyun if (err)
142*4882a593Smuzhiyun return err;
143*4882a593Smuzhiyun pcie->base = devm_ioremap_resource(dev, &res);
144*4882a593Smuzhiyun if (IS_ERR(pcie->base))
145*4882a593Smuzhiyun return PTR_ERR(pcie->base);
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun ep->ob_window = devm_kcalloc(dev, RCAR_PCI_MAX_RESOURCES,
148*4882a593Smuzhiyun sizeof(*window), GFP_KERNEL);
149*4882a593Smuzhiyun if (!ep->ob_window)
150*4882a593Smuzhiyun return -ENOMEM;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun rcar_pcie_parse_outbound_ranges(ep, pdev);
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun err = of_property_read_u8(dev->of_node, "max-functions",
155*4882a593Smuzhiyun &ep->max_functions);
156*4882a593Smuzhiyun if (err < 0 || ep->max_functions > RCAR_EPC_MAX_FUNCTIONS)
157*4882a593Smuzhiyun ep->max_functions = RCAR_EPC_MAX_FUNCTIONS;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun return 0;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
rcar_pcie_ep_write_header(struct pci_epc * epc,u8 fn,struct pci_epf_header * hdr)162*4882a593Smuzhiyun static int rcar_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
163*4882a593Smuzhiyun struct pci_epf_header *hdr)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
166*4882a593Smuzhiyun struct rcar_pcie *pcie = &ep->pcie;
167*4882a593Smuzhiyun u32 val;
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun if (!fn)
170*4882a593Smuzhiyun val = hdr->vendorid;
171*4882a593Smuzhiyun else
172*4882a593Smuzhiyun val = rcar_pci_read_reg(pcie, IDSETR0);
173*4882a593Smuzhiyun val |= hdr->deviceid << 16;
174*4882a593Smuzhiyun rcar_pci_write_reg(pcie, val, IDSETR0);
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun val = hdr->revid;
177*4882a593Smuzhiyun val |= hdr->progif_code << 8;
178*4882a593Smuzhiyun val |= hdr->subclass_code << 16;
179*4882a593Smuzhiyun val |= hdr->baseclass_code << 24;
180*4882a593Smuzhiyun rcar_pci_write_reg(pcie, val, IDSETR1);
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun if (!fn)
183*4882a593Smuzhiyun val = hdr->subsys_vendor_id;
184*4882a593Smuzhiyun else
185*4882a593Smuzhiyun val = rcar_pci_read_reg(pcie, SUBIDSETR);
186*4882a593Smuzhiyun val |= hdr->subsys_id << 16;
187*4882a593Smuzhiyun rcar_pci_write_reg(pcie, val, SUBIDSETR);
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun if (hdr->interrupt_pin > PCI_INTERRUPT_INTA)
190*4882a593Smuzhiyun return -EINVAL;
191*4882a593Smuzhiyun val = rcar_pci_read_reg(pcie, PCICONF(15));
192*4882a593Smuzhiyun val |= (hdr->interrupt_pin << 8);
193*4882a593Smuzhiyun rcar_pci_write_reg(pcie, val, PCICONF(15));
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun return 0;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
rcar_pcie_ep_set_bar(struct pci_epc * epc,u8 func_no,struct pci_epf_bar * epf_bar)198*4882a593Smuzhiyun static int rcar_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
199*4882a593Smuzhiyun struct pci_epf_bar *epf_bar)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun int flags = epf_bar->flags | LAR_ENABLE | LAM_64BIT;
202*4882a593Smuzhiyun struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
203*4882a593Smuzhiyun u64 size = 1ULL << fls64(epf_bar->size - 1);
204*4882a593Smuzhiyun dma_addr_t cpu_addr = epf_bar->phys_addr;
205*4882a593Smuzhiyun enum pci_barno bar = epf_bar->barno;
206*4882a593Smuzhiyun struct rcar_pcie *pcie = &ep->pcie;
207*4882a593Smuzhiyun u32 mask;
208*4882a593Smuzhiyun int idx;
209*4882a593Smuzhiyun int err;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun idx = find_first_zero_bit(ep->ib_window_map, ep->num_ib_windows);
212*4882a593Smuzhiyun if (idx >= ep->num_ib_windows) {
213*4882a593Smuzhiyun dev_err(pcie->dev, "no free inbound window\n");
214*4882a593Smuzhiyun return -EINVAL;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO)
218*4882a593Smuzhiyun flags |= IO_SPACE;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun ep->bar_to_atu[bar] = idx;
221*4882a593Smuzhiyun /* use 64-bit BARs */
222*4882a593Smuzhiyun set_bit(idx, ep->ib_window_map);
223*4882a593Smuzhiyun set_bit(idx + 1, ep->ib_window_map);
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun if (cpu_addr > 0) {
226*4882a593Smuzhiyun unsigned long nr_zeros = __ffs64(cpu_addr);
227*4882a593Smuzhiyun u64 alignment = 1ULL << nr_zeros;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun size = min(size, alignment);
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun size = min(size, 1ULL << 32);
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun mask = roundup_pow_of_two(size) - 1;
235*4882a593Smuzhiyun mask &= ~0xf;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun rcar_pcie_set_inbound(pcie, cpu_addr,
238*4882a593Smuzhiyun 0x0, mask | flags, idx, false);
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun err = rcar_pcie_wait_for_phyrdy(pcie);
241*4882a593Smuzhiyun if (err) {
242*4882a593Smuzhiyun dev_err(pcie->dev, "phy not ready\n");
243*4882a593Smuzhiyun return -EINVAL;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun return 0;
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun
rcar_pcie_ep_clear_bar(struct pci_epc * epc,u8 fn,struct pci_epf_bar * epf_bar)249*4882a593Smuzhiyun static void rcar_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
250*4882a593Smuzhiyun struct pci_epf_bar *epf_bar)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
253*4882a593Smuzhiyun enum pci_barno bar = epf_bar->barno;
254*4882a593Smuzhiyun u32 atu_index = ep->bar_to_atu[bar];
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun rcar_pcie_set_inbound(&ep->pcie, 0x0, 0x0, 0x0, bar, false);
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun clear_bit(atu_index, ep->ib_window_map);
259*4882a593Smuzhiyun clear_bit(atu_index + 1, ep->ib_window_map);
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun
rcar_pcie_ep_set_msi(struct pci_epc * epc,u8 fn,u8 interrupts)262*4882a593Smuzhiyun static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 interrupts)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
265*4882a593Smuzhiyun struct rcar_pcie *pcie = &ep->pcie;
266*4882a593Smuzhiyun u32 flags;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun flags = rcar_pci_read_reg(pcie, MSICAP(fn));
269*4882a593Smuzhiyun flags |= interrupts << MSICAP0_MMESCAP_OFFSET;
270*4882a593Smuzhiyun rcar_pci_write_reg(pcie, flags, MSICAP(fn));
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun return 0;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
rcar_pcie_ep_get_msi(struct pci_epc * epc,u8 fn)275*4882a593Smuzhiyun static int rcar_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
278*4882a593Smuzhiyun struct rcar_pcie *pcie = &ep->pcie;
279*4882a593Smuzhiyun u32 flags;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun flags = rcar_pci_read_reg(pcie, MSICAP(fn));
282*4882a593Smuzhiyun if (!(flags & MSICAP0_MSIE))
283*4882a593Smuzhiyun return -EINVAL;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun return ((flags & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET);
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun
rcar_pcie_ep_map_addr(struct pci_epc * epc,u8 fn,phys_addr_t addr,u64 pci_addr,size_t size)288*4882a593Smuzhiyun static int rcar_pcie_ep_map_addr(struct pci_epc *epc, u8 fn,
289*4882a593Smuzhiyun phys_addr_t addr, u64 pci_addr, size_t size)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
292*4882a593Smuzhiyun struct rcar_pcie *pcie = &ep->pcie;
293*4882a593Smuzhiyun struct resource_entry win;
294*4882a593Smuzhiyun struct resource res;
295*4882a593Smuzhiyun int window;
296*4882a593Smuzhiyun int err;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun /* check if we have a link. */
299*4882a593Smuzhiyun err = rcar_pcie_wait_for_dl(pcie);
300*4882a593Smuzhiyun if (err) {
301*4882a593Smuzhiyun dev_err(pcie->dev, "link not up\n");
302*4882a593Smuzhiyun return err;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun window = rcar_pcie_ep_get_window(ep, addr);
306*4882a593Smuzhiyun if (window < 0) {
307*4882a593Smuzhiyun dev_err(pcie->dev, "failed to get corresponding window\n");
308*4882a593Smuzhiyun return -EINVAL;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun memset(&win, 0x0, sizeof(win));
312*4882a593Smuzhiyun memset(&res, 0x0, sizeof(res));
313*4882a593Smuzhiyun res.start = pci_addr;
314*4882a593Smuzhiyun res.end = pci_addr + size - 1;
315*4882a593Smuzhiyun res.flags = IORESOURCE_MEM;
316*4882a593Smuzhiyun win.res = &res;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun rcar_pcie_set_outbound(pcie, window, &win);
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun ep->ob_mapped_addr[window] = addr;
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun return 0;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
rcar_pcie_ep_unmap_addr(struct pci_epc * epc,u8 fn,phys_addr_t addr)325*4882a593Smuzhiyun static void rcar_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
326*4882a593Smuzhiyun phys_addr_t addr)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
329*4882a593Smuzhiyun struct resource_entry win;
330*4882a593Smuzhiyun struct resource res;
331*4882a593Smuzhiyun int idx;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun for (idx = 0; idx < ep->num_ob_windows; idx++)
334*4882a593Smuzhiyun if (ep->ob_mapped_addr[idx] == addr)
335*4882a593Smuzhiyun break;
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun if (idx >= ep->num_ob_windows)
338*4882a593Smuzhiyun return;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun memset(&win, 0x0, sizeof(win));
341*4882a593Smuzhiyun memset(&res, 0x0, sizeof(res));
342*4882a593Smuzhiyun win.res = &res;
343*4882a593Smuzhiyun rcar_pcie_set_outbound(&ep->pcie, idx, &win);
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun ep->ob_mapped_addr[idx] = 0;
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun
rcar_pcie_ep_assert_intx(struct rcar_pcie_endpoint * ep,u8 fn,u8 intx)348*4882a593Smuzhiyun static int rcar_pcie_ep_assert_intx(struct rcar_pcie_endpoint *ep,
349*4882a593Smuzhiyun u8 fn, u8 intx)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun struct rcar_pcie *pcie = &ep->pcie;
352*4882a593Smuzhiyun u32 val;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun val = rcar_pci_read_reg(pcie, PCIEMSITXR);
355*4882a593Smuzhiyun if ((val & PCI_MSI_FLAGS_ENABLE)) {
356*4882a593Smuzhiyun dev_err(pcie->dev, "MSI is enabled, cannot assert INTx\n");
357*4882a593Smuzhiyun return -EINVAL;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun val = rcar_pci_read_reg(pcie, PCICONF(1));
361*4882a593Smuzhiyun if ((val & INTDIS)) {
362*4882a593Smuzhiyun dev_err(pcie->dev, "INTx message transmission is disabled\n");
363*4882a593Smuzhiyun return -EINVAL;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun val = rcar_pci_read_reg(pcie, PCIEINTXR);
367*4882a593Smuzhiyun if ((val & ASTINTX)) {
368*4882a593Smuzhiyun dev_err(pcie->dev, "INTx is already asserted\n");
369*4882a593Smuzhiyun return -EINVAL;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun val |= ASTINTX;
373*4882a593Smuzhiyun rcar_pci_write_reg(pcie, val, PCIEINTXR);
374*4882a593Smuzhiyun usleep_range(1000, 1001);
375*4882a593Smuzhiyun val = rcar_pci_read_reg(pcie, PCIEINTXR);
376*4882a593Smuzhiyun val &= ~ASTINTX;
377*4882a593Smuzhiyun rcar_pci_write_reg(pcie, val, PCIEINTXR);
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun return 0;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun
rcar_pcie_ep_assert_msi(struct rcar_pcie * pcie,u8 fn,u8 interrupt_num)382*4882a593Smuzhiyun static int rcar_pcie_ep_assert_msi(struct rcar_pcie *pcie,
383*4882a593Smuzhiyun u8 fn, u8 interrupt_num)
384*4882a593Smuzhiyun {
385*4882a593Smuzhiyun u16 msi_count;
386*4882a593Smuzhiyun u32 val;
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun /* Check MSI enable bit */
389*4882a593Smuzhiyun val = rcar_pci_read_reg(pcie, MSICAP(fn));
390*4882a593Smuzhiyun if (!(val & MSICAP0_MSIE))
391*4882a593Smuzhiyun return -EINVAL;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun /* Get MSI numbers from MME */
394*4882a593Smuzhiyun msi_count = ((val & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET);
395*4882a593Smuzhiyun msi_count = 1 << msi_count;
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun if (!interrupt_num || interrupt_num > msi_count)
398*4882a593Smuzhiyun return -EINVAL;
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun val = rcar_pci_read_reg(pcie, PCIEMSITXR);
401*4882a593Smuzhiyun rcar_pci_write_reg(pcie, val | (interrupt_num - 1), PCIEMSITXR);
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun return 0;
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
rcar_pcie_ep_raise_irq(struct pci_epc * epc,u8 fn,enum pci_epc_irq_type type,u16 interrupt_num)406*4882a593Smuzhiyun static int rcar_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
407*4882a593Smuzhiyun enum pci_epc_irq_type type,
408*4882a593Smuzhiyun u16 interrupt_num)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun switch (type) {
413*4882a593Smuzhiyun case PCI_EPC_IRQ_LEGACY:
414*4882a593Smuzhiyun return rcar_pcie_ep_assert_intx(ep, fn, 0);
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun case PCI_EPC_IRQ_MSI:
417*4882a593Smuzhiyun return rcar_pcie_ep_assert_msi(&ep->pcie, fn, interrupt_num);
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun default:
420*4882a593Smuzhiyun return -EINVAL;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun
rcar_pcie_ep_start(struct pci_epc * epc)424*4882a593Smuzhiyun static int rcar_pcie_ep_start(struct pci_epc *epc)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun rcar_pci_write_reg(&ep->pcie, MACCTLR_INIT_VAL, MACCTLR);
429*4882a593Smuzhiyun rcar_pci_write_reg(&ep->pcie, CFINIT, PCIETCTLR);
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun return 0;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
rcar_pcie_ep_stop(struct pci_epc * epc)434*4882a593Smuzhiyun static void rcar_pcie_ep_stop(struct pci_epc *epc)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun rcar_pci_write_reg(&ep->pcie, 0, PCIETCTLR);
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun static const struct pci_epc_features rcar_pcie_epc_features = {
442*4882a593Smuzhiyun .linkup_notifier = false,
443*4882a593Smuzhiyun .msi_capable = true,
444*4882a593Smuzhiyun .msix_capable = false,
445*4882a593Smuzhiyun /* use 64-bit BARs so mark BAR[1,3,5] as reserved */
446*4882a593Smuzhiyun .reserved_bar = 1 << BAR_1 | 1 << BAR_3 | 1 << BAR_5,
447*4882a593Smuzhiyun .bar_fixed_64bit = 1 << BAR_0 | 1 << BAR_2 | 1 << BAR_4,
448*4882a593Smuzhiyun .bar_fixed_size[0] = 128,
449*4882a593Smuzhiyun .bar_fixed_size[2] = 256,
450*4882a593Smuzhiyun .bar_fixed_size[4] = 256,
451*4882a593Smuzhiyun };
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun static const struct pci_epc_features*
rcar_pcie_ep_get_features(struct pci_epc * epc,u8 func_no)454*4882a593Smuzhiyun rcar_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
455*4882a593Smuzhiyun {
456*4882a593Smuzhiyun return &rcar_pcie_epc_features;
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun static const struct pci_epc_ops rcar_pcie_epc_ops = {
460*4882a593Smuzhiyun .write_header = rcar_pcie_ep_write_header,
461*4882a593Smuzhiyun .set_bar = rcar_pcie_ep_set_bar,
462*4882a593Smuzhiyun .clear_bar = rcar_pcie_ep_clear_bar,
463*4882a593Smuzhiyun .set_msi = rcar_pcie_ep_set_msi,
464*4882a593Smuzhiyun .get_msi = rcar_pcie_ep_get_msi,
465*4882a593Smuzhiyun .map_addr = rcar_pcie_ep_map_addr,
466*4882a593Smuzhiyun .unmap_addr = rcar_pcie_ep_unmap_addr,
467*4882a593Smuzhiyun .raise_irq = rcar_pcie_ep_raise_irq,
468*4882a593Smuzhiyun .start = rcar_pcie_ep_start,
469*4882a593Smuzhiyun .stop = rcar_pcie_ep_stop,
470*4882a593Smuzhiyun .get_features = rcar_pcie_ep_get_features,
471*4882a593Smuzhiyun };
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun static const struct of_device_id rcar_pcie_ep_of_match[] = {
474*4882a593Smuzhiyun { .compatible = "renesas,r8a774c0-pcie-ep", },
475*4882a593Smuzhiyun { .compatible = "renesas,rcar-gen3-pcie-ep" },
476*4882a593Smuzhiyun { },
477*4882a593Smuzhiyun };
478*4882a593Smuzhiyun
rcar_pcie_ep_probe(struct platform_device * pdev)479*4882a593Smuzhiyun static int rcar_pcie_ep_probe(struct platform_device *pdev)
480*4882a593Smuzhiyun {
481*4882a593Smuzhiyun struct device *dev = &pdev->dev;
482*4882a593Smuzhiyun struct rcar_pcie_endpoint *ep;
483*4882a593Smuzhiyun struct rcar_pcie *pcie;
484*4882a593Smuzhiyun struct pci_epc *epc;
485*4882a593Smuzhiyun int err;
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
488*4882a593Smuzhiyun if (!ep)
489*4882a593Smuzhiyun return -ENOMEM;
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun pcie = &ep->pcie;
492*4882a593Smuzhiyun pcie->dev = dev;
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun pm_runtime_enable(dev);
495*4882a593Smuzhiyun err = pm_runtime_resume_and_get(dev);
496*4882a593Smuzhiyun if (err < 0) {
497*4882a593Smuzhiyun dev_err(dev, "pm_runtime_resume_and_get failed\n");
498*4882a593Smuzhiyun goto err_pm_disable;
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun err = rcar_pcie_ep_get_pdata(ep, pdev);
502*4882a593Smuzhiyun if (err < 0) {
503*4882a593Smuzhiyun dev_err(dev, "failed to request resources: %d\n", err);
504*4882a593Smuzhiyun goto err_pm_put;
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun ep->num_ib_windows = MAX_NR_INBOUND_MAPS;
508*4882a593Smuzhiyun ep->ib_window_map =
509*4882a593Smuzhiyun devm_kcalloc(dev, BITS_TO_LONGS(ep->num_ib_windows),
510*4882a593Smuzhiyun sizeof(long), GFP_KERNEL);
511*4882a593Smuzhiyun if (!ep->ib_window_map) {
512*4882a593Smuzhiyun err = -ENOMEM;
513*4882a593Smuzhiyun dev_err(dev, "failed to allocate memory for inbound map\n");
514*4882a593Smuzhiyun goto err_pm_put;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun ep->ob_mapped_addr = devm_kcalloc(dev, ep->num_ob_windows,
518*4882a593Smuzhiyun sizeof(*ep->ob_mapped_addr),
519*4882a593Smuzhiyun GFP_KERNEL);
520*4882a593Smuzhiyun if (!ep->ob_mapped_addr) {
521*4882a593Smuzhiyun err = -ENOMEM;
522*4882a593Smuzhiyun dev_err(dev, "failed to allocate memory for outbound memory pointers\n");
523*4882a593Smuzhiyun goto err_pm_put;
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun epc = devm_pci_epc_create(dev, &rcar_pcie_epc_ops);
527*4882a593Smuzhiyun if (IS_ERR(epc)) {
528*4882a593Smuzhiyun dev_err(dev, "failed to create epc device\n");
529*4882a593Smuzhiyun err = PTR_ERR(epc);
530*4882a593Smuzhiyun goto err_pm_put;
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun epc->max_functions = ep->max_functions;
534*4882a593Smuzhiyun epc_set_drvdata(epc, ep);
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun rcar_pcie_ep_hw_init(pcie);
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun err = pci_epc_multi_mem_init(epc, ep->ob_window, ep->num_ob_windows);
539*4882a593Smuzhiyun if (err < 0) {
540*4882a593Smuzhiyun dev_err(dev, "failed to initialize the epc memory space\n");
541*4882a593Smuzhiyun goto err_pm_put;
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun return 0;
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun err_pm_put:
547*4882a593Smuzhiyun pm_runtime_put(dev);
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun err_pm_disable:
550*4882a593Smuzhiyun pm_runtime_disable(dev);
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun return err;
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun static struct platform_driver rcar_pcie_ep_driver = {
556*4882a593Smuzhiyun .driver = {
557*4882a593Smuzhiyun .name = "rcar-pcie-ep",
558*4882a593Smuzhiyun .of_match_table = rcar_pcie_ep_of_match,
559*4882a593Smuzhiyun .suppress_bind_attrs = true,
560*4882a593Smuzhiyun },
561*4882a593Smuzhiyun .probe = rcar_pcie_ep_probe,
562*4882a593Smuzhiyun };
563*4882a593Smuzhiyun builtin_platform_driver(rcar_pcie_ep_driver);
564