1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright Altera Corporation (C) 2013-2015. All rights reserved
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Author: Ley Foon Tan <lftan@altera.com>
6*4882a593Smuzhiyun * Description: Altera PCIe host controller driver
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/delay.h>
10*4882a593Smuzhiyun #include <linux/interrupt.h>
11*4882a593Smuzhiyun #include <linux/irqchip/chained_irq.h>
12*4882a593Smuzhiyun #include <linux/init.h>
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/of_address.h>
15*4882a593Smuzhiyun #include <linux/of_device.h>
16*4882a593Smuzhiyun #include <linux/of_irq.h>
17*4882a593Smuzhiyun #include <linux/of_pci.h>
18*4882a593Smuzhiyun #include <linux/pci.h>
19*4882a593Smuzhiyun #include <linux/platform_device.h>
20*4882a593Smuzhiyun #include <linux/slab.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #include "../pci.h"
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #define RP_TX_REG0 0x2000
25*4882a593Smuzhiyun #define RP_TX_REG1 0x2004
26*4882a593Smuzhiyun #define RP_TX_CNTRL 0x2008
27*4882a593Smuzhiyun #define RP_TX_EOP 0x2
28*4882a593Smuzhiyun #define RP_TX_SOP 0x1
29*4882a593Smuzhiyun #define RP_RXCPL_STATUS 0x2010
30*4882a593Smuzhiyun #define RP_RXCPL_EOP 0x2
31*4882a593Smuzhiyun #define RP_RXCPL_SOP 0x1
32*4882a593Smuzhiyun #define RP_RXCPL_REG0 0x2014
33*4882a593Smuzhiyun #define RP_RXCPL_REG1 0x2018
34*4882a593Smuzhiyun #define P2A_INT_STATUS 0x3060
35*4882a593Smuzhiyun #define P2A_INT_STS_ALL 0xf
36*4882a593Smuzhiyun #define P2A_INT_ENABLE 0x3070
37*4882a593Smuzhiyun #define P2A_INT_ENA_ALL 0xf
38*4882a593Smuzhiyun #define RP_LTSSM 0x3c64
39*4882a593Smuzhiyun #define RP_LTSSM_MASK 0x1f
40*4882a593Smuzhiyun #define LTSSM_L0 0xf
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun #define S10_RP_TX_CNTRL 0x2004
43*4882a593Smuzhiyun #define S10_RP_RXCPL_REG 0x2008
44*4882a593Smuzhiyun #define S10_RP_RXCPL_STATUS 0x200C
45*4882a593Smuzhiyun #define S10_RP_CFG_ADDR(pcie, reg) \
46*4882a593Smuzhiyun (((pcie)->hip_base) + (reg) + (1 << 20))
47*4882a593Smuzhiyun #define S10_RP_SECONDARY(pcie) \
48*4882a593Smuzhiyun readb(S10_RP_CFG_ADDR(pcie, PCI_SECONDARY_BUS))
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun /* TLP configuration type 0 and 1 */
51*4882a593Smuzhiyun #define TLP_FMTTYPE_CFGRD0 0x04 /* Configuration Read Type 0 */
52*4882a593Smuzhiyun #define TLP_FMTTYPE_CFGWR0 0x44 /* Configuration Write Type 0 */
53*4882a593Smuzhiyun #define TLP_FMTTYPE_CFGRD1 0x05 /* Configuration Read Type 1 */
54*4882a593Smuzhiyun #define TLP_FMTTYPE_CFGWR1 0x45 /* Configuration Write Type 1 */
55*4882a593Smuzhiyun #define TLP_PAYLOAD_SIZE 0x01
56*4882a593Smuzhiyun #define TLP_READ_TAG 0x1d
57*4882a593Smuzhiyun #define TLP_WRITE_TAG 0x10
58*4882a593Smuzhiyun #define RP_DEVFN 0
59*4882a593Smuzhiyun #define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn))
60*4882a593Smuzhiyun #define TLP_CFG_DW0(pcie, cfg) \
61*4882a593Smuzhiyun (((cfg) << 24) | \
62*4882a593Smuzhiyun TLP_PAYLOAD_SIZE)
63*4882a593Smuzhiyun #define TLP_CFG_DW1(pcie, tag, be) \
64*4882a593Smuzhiyun (((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be))
65*4882a593Smuzhiyun #define TLP_CFG_DW2(bus, devfn, offset) \
66*4882a593Smuzhiyun (((bus) << 24) | ((devfn) << 16) | (offset))
67*4882a593Smuzhiyun #define TLP_COMP_STATUS(s) (((s) >> 13) & 7)
68*4882a593Smuzhiyun #define TLP_BYTE_COUNT(s) (((s) >> 0) & 0xfff)
69*4882a593Smuzhiyun #define TLP_HDR_SIZE 3
70*4882a593Smuzhiyun #define TLP_LOOP 500
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun #define LINK_UP_TIMEOUT HZ
73*4882a593Smuzhiyun #define LINK_RETRAIN_TIMEOUT HZ
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun #define DWORD_MASK 3
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun #define S10_TLP_FMTTYPE_CFGRD0 0x05
78*4882a593Smuzhiyun #define S10_TLP_FMTTYPE_CFGRD1 0x04
79*4882a593Smuzhiyun #define S10_TLP_FMTTYPE_CFGWR0 0x45
80*4882a593Smuzhiyun #define S10_TLP_FMTTYPE_CFGWR1 0x44
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun enum altera_pcie_version {
83*4882a593Smuzhiyun ALTERA_PCIE_V1 = 0,
84*4882a593Smuzhiyun ALTERA_PCIE_V2,
85*4882a593Smuzhiyun };
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun struct altera_pcie {
88*4882a593Smuzhiyun struct platform_device *pdev;
89*4882a593Smuzhiyun void __iomem *cra_base;
90*4882a593Smuzhiyun void __iomem *hip_base;
91*4882a593Smuzhiyun int irq;
92*4882a593Smuzhiyun u8 root_bus_nr;
93*4882a593Smuzhiyun struct irq_domain *irq_domain;
94*4882a593Smuzhiyun struct resource bus_range;
95*4882a593Smuzhiyun const struct altera_pcie_data *pcie_data;
96*4882a593Smuzhiyun };
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun struct altera_pcie_ops {
99*4882a593Smuzhiyun int (*tlp_read_pkt)(struct altera_pcie *pcie, u32 *value);
100*4882a593Smuzhiyun void (*tlp_write_pkt)(struct altera_pcie *pcie, u32 *headers,
101*4882a593Smuzhiyun u32 data, bool align);
102*4882a593Smuzhiyun bool (*get_link_status)(struct altera_pcie *pcie);
103*4882a593Smuzhiyun int (*rp_read_cfg)(struct altera_pcie *pcie, int where,
104*4882a593Smuzhiyun int size, u32 *value);
105*4882a593Smuzhiyun int (*rp_write_cfg)(struct altera_pcie *pcie, u8 busno,
106*4882a593Smuzhiyun int where, int size, u32 value);
107*4882a593Smuzhiyun };
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun struct altera_pcie_data {
110*4882a593Smuzhiyun const struct altera_pcie_ops *ops;
111*4882a593Smuzhiyun enum altera_pcie_version version;
112*4882a593Smuzhiyun u32 cap_offset; /* PCIe capability structure register offset */
113*4882a593Smuzhiyun u32 cfgrd0;
114*4882a593Smuzhiyun u32 cfgrd1;
115*4882a593Smuzhiyun u32 cfgwr0;
116*4882a593Smuzhiyun u32 cfgwr1;
117*4882a593Smuzhiyun };
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun struct tlp_rp_regpair_t {
120*4882a593Smuzhiyun u32 ctrl;
121*4882a593Smuzhiyun u32 reg0;
122*4882a593Smuzhiyun u32 reg1;
123*4882a593Smuzhiyun };
124*4882a593Smuzhiyun
cra_writel(struct altera_pcie * pcie,const u32 value,const u32 reg)125*4882a593Smuzhiyun static inline void cra_writel(struct altera_pcie *pcie, const u32 value,
126*4882a593Smuzhiyun const u32 reg)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun writel_relaxed(value, pcie->cra_base + reg);
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
cra_readl(struct altera_pcie * pcie,const u32 reg)131*4882a593Smuzhiyun static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun return readl_relaxed(pcie->cra_base + reg);
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
altera_pcie_link_up(struct altera_pcie * pcie)136*4882a593Smuzhiyun static bool altera_pcie_link_up(struct altera_pcie *pcie)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0);
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
s10_altera_pcie_link_up(struct altera_pcie * pcie)141*4882a593Smuzhiyun static bool s10_altera_pcie_link_up(struct altera_pcie *pcie)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun void __iomem *addr = S10_RP_CFG_ADDR(pcie,
144*4882a593Smuzhiyun pcie->pcie_data->cap_offset +
145*4882a593Smuzhiyun PCI_EXP_LNKSTA);
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun return !!(readw(addr) & PCI_EXP_LNKSTA_DLLLA);
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun /*
151*4882a593Smuzhiyun * Altera PCIe port uses BAR0 of RC's configuration space as the translation
152*4882a593Smuzhiyun * from PCI bus to native BUS. Entire DDR region is mapped into PCIe space
153*4882a593Smuzhiyun * using these registers, so it can be reached by DMA from EP devices.
154*4882a593Smuzhiyun * This BAR0 will also access to MSI vector when receiving MSI/MSIX interrupt
155*4882a593Smuzhiyun * from EP devices, eventually trigger interrupt to GIC. The BAR0 of bridge
156*4882a593Smuzhiyun * should be hidden during enumeration to avoid the sizing and resource
157*4882a593Smuzhiyun * allocation by PCIe core.
158*4882a593Smuzhiyun */
altera_pcie_hide_rc_bar(struct pci_bus * bus,unsigned int devfn,int offset)159*4882a593Smuzhiyun static bool altera_pcie_hide_rc_bar(struct pci_bus *bus, unsigned int devfn,
160*4882a593Smuzhiyun int offset)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun if (pci_is_root_bus(bus) && (devfn == 0) &&
163*4882a593Smuzhiyun (offset == PCI_BASE_ADDRESS_0))
164*4882a593Smuzhiyun return true;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun return false;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
tlp_write_tx(struct altera_pcie * pcie,struct tlp_rp_regpair_t * tlp_rp_regdata)169*4882a593Smuzhiyun static void tlp_write_tx(struct altera_pcie *pcie,
170*4882a593Smuzhiyun struct tlp_rp_regpair_t *tlp_rp_regdata)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun cra_writel(pcie, tlp_rp_regdata->reg0, RP_TX_REG0);
173*4882a593Smuzhiyun cra_writel(pcie, tlp_rp_regdata->reg1, RP_TX_REG1);
174*4882a593Smuzhiyun cra_writel(pcie, tlp_rp_regdata->ctrl, RP_TX_CNTRL);
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
s10_tlp_write_tx(struct altera_pcie * pcie,u32 reg0,u32 ctrl)177*4882a593Smuzhiyun static void s10_tlp_write_tx(struct altera_pcie *pcie, u32 reg0, u32 ctrl)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun cra_writel(pcie, reg0, RP_TX_REG0);
180*4882a593Smuzhiyun cra_writel(pcie, ctrl, S10_RP_TX_CNTRL);
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
altera_pcie_valid_device(struct altera_pcie * pcie,struct pci_bus * bus,int dev)183*4882a593Smuzhiyun static bool altera_pcie_valid_device(struct altera_pcie *pcie,
184*4882a593Smuzhiyun struct pci_bus *bus, int dev)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun /* If there is no link, then there is no device */
187*4882a593Smuzhiyun if (bus->number != pcie->root_bus_nr) {
188*4882a593Smuzhiyun if (!pcie->pcie_data->ops->get_link_status(pcie))
189*4882a593Smuzhiyun return false;
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun /* access only one slot on each root port */
193*4882a593Smuzhiyun if (bus->number == pcie->root_bus_nr && dev > 0)
194*4882a593Smuzhiyun return false;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun return true;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun
tlp_read_packet(struct altera_pcie * pcie,u32 * value)199*4882a593Smuzhiyun static int tlp_read_packet(struct altera_pcie *pcie, u32 *value)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun int i;
202*4882a593Smuzhiyun bool sop = false;
203*4882a593Smuzhiyun u32 ctrl;
204*4882a593Smuzhiyun u32 reg0, reg1;
205*4882a593Smuzhiyun u32 comp_status = 1;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /*
208*4882a593Smuzhiyun * Minimum 2 loops to read TLP headers and 1 loop to read data
209*4882a593Smuzhiyun * payload.
210*4882a593Smuzhiyun */
211*4882a593Smuzhiyun for (i = 0; i < TLP_LOOP; i++) {
212*4882a593Smuzhiyun ctrl = cra_readl(pcie, RP_RXCPL_STATUS);
213*4882a593Smuzhiyun if ((ctrl & RP_RXCPL_SOP) || (ctrl & RP_RXCPL_EOP) || sop) {
214*4882a593Smuzhiyun reg0 = cra_readl(pcie, RP_RXCPL_REG0);
215*4882a593Smuzhiyun reg1 = cra_readl(pcie, RP_RXCPL_REG1);
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun if (ctrl & RP_RXCPL_SOP) {
218*4882a593Smuzhiyun sop = true;
219*4882a593Smuzhiyun comp_status = TLP_COMP_STATUS(reg1);
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun if (ctrl & RP_RXCPL_EOP) {
223*4882a593Smuzhiyun if (comp_status)
224*4882a593Smuzhiyun return PCIBIOS_DEVICE_NOT_FOUND;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun if (value)
227*4882a593Smuzhiyun *value = reg0;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun return PCIBIOS_SUCCESSFUL;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun udelay(5);
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun return PCIBIOS_DEVICE_NOT_FOUND;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
s10_tlp_read_packet(struct altera_pcie * pcie,u32 * value)238*4882a593Smuzhiyun static int s10_tlp_read_packet(struct altera_pcie *pcie, u32 *value)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun u32 ctrl;
241*4882a593Smuzhiyun u32 comp_status;
242*4882a593Smuzhiyun u32 dw[4];
243*4882a593Smuzhiyun u32 count;
244*4882a593Smuzhiyun struct device *dev = &pcie->pdev->dev;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun for (count = 0; count < TLP_LOOP; count++) {
247*4882a593Smuzhiyun ctrl = cra_readl(pcie, S10_RP_RXCPL_STATUS);
248*4882a593Smuzhiyun if (ctrl & RP_RXCPL_SOP) {
249*4882a593Smuzhiyun /* Read first DW */
250*4882a593Smuzhiyun dw[0] = cra_readl(pcie, S10_RP_RXCPL_REG);
251*4882a593Smuzhiyun break;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun udelay(5);
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun /* SOP detection failed, return error */
258*4882a593Smuzhiyun if (count == TLP_LOOP)
259*4882a593Smuzhiyun return PCIBIOS_DEVICE_NOT_FOUND;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun count = 1;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun /* Poll for EOP */
264*4882a593Smuzhiyun while (count < ARRAY_SIZE(dw)) {
265*4882a593Smuzhiyun ctrl = cra_readl(pcie, S10_RP_RXCPL_STATUS);
266*4882a593Smuzhiyun dw[count++] = cra_readl(pcie, S10_RP_RXCPL_REG);
267*4882a593Smuzhiyun if (ctrl & RP_RXCPL_EOP) {
268*4882a593Smuzhiyun comp_status = TLP_COMP_STATUS(dw[1]);
269*4882a593Smuzhiyun if (comp_status)
270*4882a593Smuzhiyun return PCIBIOS_DEVICE_NOT_FOUND;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun if (value && TLP_BYTE_COUNT(dw[1]) == sizeof(u32) &&
273*4882a593Smuzhiyun count == 4)
274*4882a593Smuzhiyun *value = dw[3];
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun return PCIBIOS_SUCCESSFUL;
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun dev_warn(dev, "Malformed TLP packet\n");
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun return PCIBIOS_DEVICE_NOT_FOUND;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
tlp_write_packet(struct altera_pcie * pcie,u32 * headers,u32 data,bool align)285*4882a593Smuzhiyun static void tlp_write_packet(struct altera_pcie *pcie, u32 *headers,
286*4882a593Smuzhiyun u32 data, bool align)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun struct tlp_rp_regpair_t tlp_rp_regdata;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun tlp_rp_regdata.reg0 = headers[0];
291*4882a593Smuzhiyun tlp_rp_regdata.reg1 = headers[1];
292*4882a593Smuzhiyun tlp_rp_regdata.ctrl = RP_TX_SOP;
293*4882a593Smuzhiyun tlp_write_tx(pcie, &tlp_rp_regdata);
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun if (align) {
296*4882a593Smuzhiyun tlp_rp_regdata.reg0 = headers[2];
297*4882a593Smuzhiyun tlp_rp_regdata.reg1 = 0;
298*4882a593Smuzhiyun tlp_rp_regdata.ctrl = 0;
299*4882a593Smuzhiyun tlp_write_tx(pcie, &tlp_rp_regdata);
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun tlp_rp_regdata.reg0 = data;
302*4882a593Smuzhiyun tlp_rp_regdata.reg1 = 0;
303*4882a593Smuzhiyun } else {
304*4882a593Smuzhiyun tlp_rp_regdata.reg0 = headers[2];
305*4882a593Smuzhiyun tlp_rp_regdata.reg1 = data;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun tlp_rp_regdata.ctrl = RP_TX_EOP;
309*4882a593Smuzhiyun tlp_write_tx(pcie, &tlp_rp_regdata);
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
s10_tlp_write_packet(struct altera_pcie * pcie,u32 * headers,u32 data,bool dummy)312*4882a593Smuzhiyun static void s10_tlp_write_packet(struct altera_pcie *pcie, u32 *headers,
313*4882a593Smuzhiyun u32 data, bool dummy)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun s10_tlp_write_tx(pcie, headers[0], RP_TX_SOP);
316*4882a593Smuzhiyun s10_tlp_write_tx(pcie, headers[1], 0);
317*4882a593Smuzhiyun s10_tlp_write_tx(pcie, headers[2], 0);
318*4882a593Smuzhiyun s10_tlp_write_tx(pcie, data, RP_TX_EOP);
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
get_tlp_header(struct altera_pcie * pcie,u8 bus,u32 devfn,int where,u8 byte_en,bool read,u32 * headers)321*4882a593Smuzhiyun static void get_tlp_header(struct altera_pcie *pcie, u8 bus, u32 devfn,
322*4882a593Smuzhiyun int where, u8 byte_en, bool read, u32 *headers)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun u8 cfg;
325*4882a593Smuzhiyun u8 cfg0 = read ? pcie->pcie_data->cfgrd0 : pcie->pcie_data->cfgwr0;
326*4882a593Smuzhiyun u8 cfg1 = read ? pcie->pcie_data->cfgrd1 : pcie->pcie_data->cfgwr1;
327*4882a593Smuzhiyun u8 tag = read ? TLP_READ_TAG : TLP_WRITE_TAG;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun if (pcie->pcie_data->version == ALTERA_PCIE_V1)
330*4882a593Smuzhiyun cfg = (bus == pcie->root_bus_nr) ? cfg0 : cfg1;
331*4882a593Smuzhiyun else
332*4882a593Smuzhiyun cfg = (bus > S10_RP_SECONDARY(pcie)) ? cfg0 : cfg1;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun headers[0] = TLP_CFG_DW0(pcie, cfg);
335*4882a593Smuzhiyun headers[1] = TLP_CFG_DW1(pcie, tag, byte_en);
336*4882a593Smuzhiyun headers[2] = TLP_CFG_DW2(bus, devfn, where);
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
tlp_cfg_dword_read(struct altera_pcie * pcie,u8 bus,u32 devfn,int where,u8 byte_en,u32 * value)339*4882a593Smuzhiyun static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn,
340*4882a593Smuzhiyun int where, u8 byte_en, u32 *value)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun u32 headers[TLP_HDR_SIZE];
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun get_tlp_header(pcie, bus, devfn, where, byte_en, true,
345*4882a593Smuzhiyun headers);
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun pcie->pcie_data->ops->tlp_write_pkt(pcie, headers, 0, false);
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun return pcie->pcie_data->ops->tlp_read_pkt(pcie, value);
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun
tlp_cfg_dword_write(struct altera_pcie * pcie,u8 bus,u32 devfn,int where,u8 byte_en,u32 value)352*4882a593Smuzhiyun static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn,
353*4882a593Smuzhiyun int where, u8 byte_en, u32 value)
354*4882a593Smuzhiyun {
355*4882a593Smuzhiyun u32 headers[TLP_HDR_SIZE];
356*4882a593Smuzhiyun int ret;
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun get_tlp_header(pcie, bus, devfn, where, byte_en, false,
359*4882a593Smuzhiyun headers);
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun /* check alignment to Qword */
362*4882a593Smuzhiyun if ((where & 0x7) == 0)
363*4882a593Smuzhiyun pcie->pcie_data->ops->tlp_write_pkt(pcie, headers,
364*4882a593Smuzhiyun value, true);
365*4882a593Smuzhiyun else
366*4882a593Smuzhiyun pcie->pcie_data->ops->tlp_write_pkt(pcie, headers,
367*4882a593Smuzhiyun value, false);
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun ret = pcie->pcie_data->ops->tlp_read_pkt(pcie, NULL);
370*4882a593Smuzhiyun if (ret != PCIBIOS_SUCCESSFUL)
371*4882a593Smuzhiyun return ret;
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun /*
374*4882a593Smuzhiyun * Monitor changes to PCI_PRIMARY_BUS register on root port
375*4882a593Smuzhiyun * and update local copy of root bus number accordingly.
376*4882a593Smuzhiyun */
377*4882a593Smuzhiyun if ((bus == pcie->root_bus_nr) && (where == PCI_PRIMARY_BUS))
378*4882a593Smuzhiyun pcie->root_bus_nr = (u8)(value);
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun return PCIBIOS_SUCCESSFUL;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
s10_rp_read_cfg(struct altera_pcie * pcie,int where,int size,u32 * value)383*4882a593Smuzhiyun static int s10_rp_read_cfg(struct altera_pcie *pcie, int where,
384*4882a593Smuzhiyun int size, u32 *value)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun void __iomem *addr = S10_RP_CFG_ADDR(pcie, where);
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun switch (size) {
389*4882a593Smuzhiyun case 1:
390*4882a593Smuzhiyun *value = readb(addr);
391*4882a593Smuzhiyun break;
392*4882a593Smuzhiyun case 2:
393*4882a593Smuzhiyun *value = readw(addr);
394*4882a593Smuzhiyun break;
395*4882a593Smuzhiyun default:
396*4882a593Smuzhiyun *value = readl(addr);
397*4882a593Smuzhiyun break;
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun return PCIBIOS_SUCCESSFUL;
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun
s10_rp_write_cfg(struct altera_pcie * pcie,u8 busno,int where,int size,u32 value)403*4882a593Smuzhiyun static int s10_rp_write_cfg(struct altera_pcie *pcie, u8 busno,
404*4882a593Smuzhiyun int where, int size, u32 value)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun void __iomem *addr = S10_RP_CFG_ADDR(pcie, where);
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun switch (size) {
409*4882a593Smuzhiyun case 1:
410*4882a593Smuzhiyun writeb(value, addr);
411*4882a593Smuzhiyun break;
412*4882a593Smuzhiyun case 2:
413*4882a593Smuzhiyun writew(value, addr);
414*4882a593Smuzhiyun break;
415*4882a593Smuzhiyun default:
416*4882a593Smuzhiyun writel(value, addr);
417*4882a593Smuzhiyun break;
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun /*
421*4882a593Smuzhiyun * Monitor changes to PCI_PRIMARY_BUS register on root port
422*4882a593Smuzhiyun * and update local copy of root bus number accordingly.
423*4882a593Smuzhiyun */
424*4882a593Smuzhiyun if (busno == pcie->root_bus_nr && where == PCI_PRIMARY_BUS)
425*4882a593Smuzhiyun pcie->root_bus_nr = value & 0xff;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun return PCIBIOS_SUCCESSFUL;
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun
_altera_pcie_cfg_read(struct altera_pcie * pcie,u8 busno,unsigned int devfn,int where,int size,u32 * value)430*4882a593Smuzhiyun static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno,
431*4882a593Smuzhiyun unsigned int devfn, int where, int size,
432*4882a593Smuzhiyun u32 *value)
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun int ret;
435*4882a593Smuzhiyun u32 data;
436*4882a593Smuzhiyun u8 byte_en;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun if (busno == pcie->root_bus_nr && pcie->pcie_data->ops->rp_read_cfg)
439*4882a593Smuzhiyun return pcie->pcie_data->ops->rp_read_cfg(pcie, where,
440*4882a593Smuzhiyun size, value);
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun switch (size) {
443*4882a593Smuzhiyun case 1:
444*4882a593Smuzhiyun byte_en = 1 << (where & 3);
445*4882a593Smuzhiyun break;
446*4882a593Smuzhiyun case 2:
447*4882a593Smuzhiyun byte_en = 3 << (where & 3);
448*4882a593Smuzhiyun break;
449*4882a593Smuzhiyun default:
450*4882a593Smuzhiyun byte_en = 0xf;
451*4882a593Smuzhiyun break;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun ret = tlp_cfg_dword_read(pcie, busno, devfn,
455*4882a593Smuzhiyun (where & ~DWORD_MASK), byte_en, &data);
456*4882a593Smuzhiyun if (ret != PCIBIOS_SUCCESSFUL)
457*4882a593Smuzhiyun return ret;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun switch (size) {
460*4882a593Smuzhiyun case 1:
461*4882a593Smuzhiyun *value = (data >> (8 * (where & 0x3))) & 0xff;
462*4882a593Smuzhiyun break;
463*4882a593Smuzhiyun case 2:
464*4882a593Smuzhiyun *value = (data >> (8 * (where & 0x2))) & 0xffff;
465*4882a593Smuzhiyun break;
466*4882a593Smuzhiyun default:
467*4882a593Smuzhiyun *value = data;
468*4882a593Smuzhiyun break;
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun return PCIBIOS_SUCCESSFUL;
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun
_altera_pcie_cfg_write(struct altera_pcie * pcie,u8 busno,unsigned int devfn,int where,int size,u32 value)474*4882a593Smuzhiyun static int _altera_pcie_cfg_write(struct altera_pcie *pcie, u8 busno,
475*4882a593Smuzhiyun unsigned int devfn, int where, int size,
476*4882a593Smuzhiyun u32 value)
477*4882a593Smuzhiyun {
478*4882a593Smuzhiyun u32 data32;
479*4882a593Smuzhiyun u32 shift = 8 * (where & 3);
480*4882a593Smuzhiyun u8 byte_en;
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun if (busno == pcie->root_bus_nr && pcie->pcie_data->ops->rp_write_cfg)
483*4882a593Smuzhiyun return pcie->pcie_data->ops->rp_write_cfg(pcie, busno,
484*4882a593Smuzhiyun where, size, value);
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun switch (size) {
487*4882a593Smuzhiyun case 1:
488*4882a593Smuzhiyun data32 = (value & 0xff) << shift;
489*4882a593Smuzhiyun byte_en = 1 << (where & 3);
490*4882a593Smuzhiyun break;
491*4882a593Smuzhiyun case 2:
492*4882a593Smuzhiyun data32 = (value & 0xffff) << shift;
493*4882a593Smuzhiyun byte_en = 3 << (where & 3);
494*4882a593Smuzhiyun break;
495*4882a593Smuzhiyun default:
496*4882a593Smuzhiyun data32 = value;
497*4882a593Smuzhiyun byte_en = 0xf;
498*4882a593Smuzhiyun break;
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun return tlp_cfg_dword_write(pcie, busno, devfn, (where & ~DWORD_MASK),
502*4882a593Smuzhiyun byte_en, data32);
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun
altera_pcie_cfg_read(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * value)505*4882a593Smuzhiyun static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn,
506*4882a593Smuzhiyun int where, int size, u32 *value)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun struct altera_pcie *pcie = bus->sysdata;
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun if (altera_pcie_hide_rc_bar(bus, devfn, where))
511*4882a593Smuzhiyun return PCIBIOS_BAD_REGISTER_NUMBER;
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn))) {
514*4882a593Smuzhiyun *value = 0xffffffff;
515*4882a593Smuzhiyun return PCIBIOS_DEVICE_NOT_FOUND;
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun return _altera_pcie_cfg_read(pcie, bus->number, devfn, where, size,
519*4882a593Smuzhiyun value);
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun
altera_pcie_cfg_write(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 value)522*4882a593Smuzhiyun static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn,
523*4882a593Smuzhiyun int where, int size, u32 value)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun struct altera_pcie *pcie = bus->sysdata;
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun if (altera_pcie_hide_rc_bar(bus, devfn, where))
528*4882a593Smuzhiyun return PCIBIOS_BAD_REGISTER_NUMBER;
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn)))
531*4882a593Smuzhiyun return PCIBIOS_DEVICE_NOT_FOUND;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun return _altera_pcie_cfg_write(pcie, bus->number, devfn, where, size,
534*4882a593Smuzhiyun value);
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun static struct pci_ops altera_pcie_ops = {
538*4882a593Smuzhiyun .read = altera_pcie_cfg_read,
539*4882a593Smuzhiyun .write = altera_pcie_cfg_write,
540*4882a593Smuzhiyun };
541*4882a593Smuzhiyun
altera_read_cap_word(struct altera_pcie * pcie,u8 busno,unsigned int devfn,int offset,u16 * value)542*4882a593Smuzhiyun static int altera_read_cap_word(struct altera_pcie *pcie, u8 busno,
543*4882a593Smuzhiyun unsigned int devfn, int offset, u16 *value)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun u32 data;
546*4882a593Smuzhiyun int ret;
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun ret = _altera_pcie_cfg_read(pcie, busno, devfn,
549*4882a593Smuzhiyun pcie->pcie_data->cap_offset + offset,
550*4882a593Smuzhiyun sizeof(*value),
551*4882a593Smuzhiyun &data);
552*4882a593Smuzhiyun *value = data;
553*4882a593Smuzhiyun return ret;
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun
altera_write_cap_word(struct altera_pcie * pcie,u8 busno,unsigned int devfn,int offset,u16 value)556*4882a593Smuzhiyun static int altera_write_cap_word(struct altera_pcie *pcie, u8 busno,
557*4882a593Smuzhiyun unsigned int devfn, int offset, u16 value)
558*4882a593Smuzhiyun {
559*4882a593Smuzhiyun return _altera_pcie_cfg_write(pcie, busno, devfn,
560*4882a593Smuzhiyun pcie->pcie_data->cap_offset + offset,
561*4882a593Smuzhiyun sizeof(value),
562*4882a593Smuzhiyun value);
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun
altera_wait_link_retrain(struct altera_pcie * pcie)565*4882a593Smuzhiyun static void altera_wait_link_retrain(struct altera_pcie *pcie)
566*4882a593Smuzhiyun {
567*4882a593Smuzhiyun struct device *dev = &pcie->pdev->dev;
568*4882a593Smuzhiyun u16 reg16;
569*4882a593Smuzhiyun unsigned long start_jiffies;
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun /* Wait for link training end. */
572*4882a593Smuzhiyun start_jiffies = jiffies;
573*4882a593Smuzhiyun for (;;) {
574*4882a593Smuzhiyun altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN,
575*4882a593Smuzhiyun PCI_EXP_LNKSTA, ®16);
576*4882a593Smuzhiyun if (!(reg16 & PCI_EXP_LNKSTA_LT))
577*4882a593Smuzhiyun break;
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) {
580*4882a593Smuzhiyun dev_err(dev, "link retrain timeout\n");
581*4882a593Smuzhiyun break;
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun udelay(100);
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun /* Wait for link is up */
587*4882a593Smuzhiyun start_jiffies = jiffies;
588*4882a593Smuzhiyun for (;;) {
589*4882a593Smuzhiyun if (pcie->pcie_data->ops->get_link_status(pcie))
590*4882a593Smuzhiyun break;
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun if (time_after(jiffies, start_jiffies + LINK_UP_TIMEOUT)) {
593*4882a593Smuzhiyun dev_err(dev, "link up timeout\n");
594*4882a593Smuzhiyun break;
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun udelay(100);
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun
altera_pcie_retrain(struct altera_pcie * pcie)600*4882a593Smuzhiyun static void altera_pcie_retrain(struct altera_pcie *pcie)
601*4882a593Smuzhiyun {
602*4882a593Smuzhiyun u16 linkcap, linkstat, linkctl;
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun if (!pcie->pcie_data->ops->get_link_status(pcie))
605*4882a593Smuzhiyun return;
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun /*
608*4882a593Smuzhiyun * Set the retrain bit if the PCIe rootport support > 2.5GB/s, but
609*4882a593Smuzhiyun * current speed is 2.5 GB/s.
610*4882a593Smuzhiyun */
611*4882a593Smuzhiyun altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, PCI_EXP_LNKCAP,
612*4882a593Smuzhiyun &linkcap);
613*4882a593Smuzhiyun if ((linkcap & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB)
614*4882a593Smuzhiyun return;
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, PCI_EXP_LNKSTA,
617*4882a593Smuzhiyun &linkstat);
618*4882a593Smuzhiyun if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) {
619*4882a593Smuzhiyun altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN,
620*4882a593Smuzhiyun PCI_EXP_LNKCTL, &linkctl);
621*4882a593Smuzhiyun linkctl |= PCI_EXP_LNKCTL_RL;
622*4882a593Smuzhiyun altera_write_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN,
623*4882a593Smuzhiyun PCI_EXP_LNKCTL, linkctl);
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun altera_wait_link_retrain(pcie);
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun
altera_pcie_intx_map(struct irq_domain * domain,unsigned int irq,irq_hw_number_t hwirq)629*4882a593Smuzhiyun static int altera_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
630*4882a593Smuzhiyun irq_hw_number_t hwirq)
631*4882a593Smuzhiyun {
632*4882a593Smuzhiyun irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
633*4882a593Smuzhiyun irq_set_chip_data(irq, domain->host_data);
634*4882a593Smuzhiyun return 0;
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun static const struct irq_domain_ops intx_domain_ops = {
638*4882a593Smuzhiyun .map = altera_pcie_intx_map,
639*4882a593Smuzhiyun .xlate = pci_irqd_intx_xlate,
640*4882a593Smuzhiyun };
641*4882a593Smuzhiyun
altera_pcie_isr(struct irq_desc * desc)642*4882a593Smuzhiyun static void altera_pcie_isr(struct irq_desc *desc)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun struct irq_chip *chip = irq_desc_get_chip(desc);
645*4882a593Smuzhiyun struct altera_pcie *pcie;
646*4882a593Smuzhiyun struct device *dev;
647*4882a593Smuzhiyun unsigned long status;
648*4882a593Smuzhiyun u32 bit;
649*4882a593Smuzhiyun u32 virq;
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun chained_irq_enter(chip, desc);
652*4882a593Smuzhiyun pcie = irq_desc_get_handler_data(desc);
653*4882a593Smuzhiyun dev = &pcie->pdev->dev;
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun while ((status = cra_readl(pcie, P2A_INT_STATUS)
656*4882a593Smuzhiyun & P2A_INT_STS_ALL) != 0) {
657*4882a593Smuzhiyun for_each_set_bit(bit, &status, PCI_NUM_INTX) {
658*4882a593Smuzhiyun /* clear interrupts */
659*4882a593Smuzhiyun cra_writel(pcie, 1 << bit, P2A_INT_STATUS);
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun virq = irq_find_mapping(pcie->irq_domain, bit);
662*4882a593Smuzhiyun if (virq)
663*4882a593Smuzhiyun generic_handle_irq(virq);
664*4882a593Smuzhiyun else
665*4882a593Smuzhiyun dev_err(dev, "unexpected IRQ, INT%d\n", bit);
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun chained_irq_exit(chip, desc);
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun
altera_pcie_init_irq_domain(struct altera_pcie * pcie)672*4882a593Smuzhiyun static int altera_pcie_init_irq_domain(struct altera_pcie *pcie)
673*4882a593Smuzhiyun {
674*4882a593Smuzhiyun struct device *dev = &pcie->pdev->dev;
675*4882a593Smuzhiyun struct device_node *node = dev->of_node;
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun /* Setup INTx */
678*4882a593Smuzhiyun pcie->irq_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
679*4882a593Smuzhiyun &intx_domain_ops, pcie);
680*4882a593Smuzhiyun if (!pcie->irq_domain) {
681*4882a593Smuzhiyun dev_err(dev, "Failed to get a INTx IRQ domain\n");
682*4882a593Smuzhiyun return -ENOMEM;
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun return 0;
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun
altera_pcie_irq_teardown(struct altera_pcie * pcie)688*4882a593Smuzhiyun static void altera_pcie_irq_teardown(struct altera_pcie *pcie)
689*4882a593Smuzhiyun {
690*4882a593Smuzhiyun irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
691*4882a593Smuzhiyun irq_domain_remove(pcie->irq_domain);
692*4882a593Smuzhiyun irq_dispose_mapping(pcie->irq);
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun
altera_pcie_parse_dt(struct altera_pcie * pcie)695*4882a593Smuzhiyun static int altera_pcie_parse_dt(struct altera_pcie *pcie)
696*4882a593Smuzhiyun {
697*4882a593Smuzhiyun struct platform_device *pdev = pcie->pdev;
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun pcie->cra_base = devm_platform_ioremap_resource_byname(pdev, "Cra");
700*4882a593Smuzhiyun if (IS_ERR(pcie->cra_base))
701*4882a593Smuzhiyun return PTR_ERR(pcie->cra_base);
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun if (pcie->pcie_data->version == ALTERA_PCIE_V2) {
704*4882a593Smuzhiyun pcie->hip_base =
705*4882a593Smuzhiyun devm_platform_ioremap_resource_byname(pdev, "Hip");
706*4882a593Smuzhiyun if (IS_ERR(pcie->hip_base))
707*4882a593Smuzhiyun return PTR_ERR(pcie->hip_base);
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun /* setup IRQ */
711*4882a593Smuzhiyun pcie->irq = platform_get_irq(pdev, 0);
712*4882a593Smuzhiyun if (pcie->irq < 0)
713*4882a593Smuzhiyun return pcie->irq;
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun irq_set_chained_handler_and_data(pcie->irq, altera_pcie_isr, pcie);
716*4882a593Smuzhiyun return 0;
717*4882a593Smuzhiyun }
718*4882a593Smuzhiyun
altera_pcie_host_init(struct altera_pcie * pcie)719*4882a593Smuzhiyun static void altera_pcie_host_init(struct altera_pcie *pcie)
720*4882a593Smuzhiyun {
721*4882a593Smuzhiyun altera_pcie_retrain(pcie);
722*4882a593Smuzhiyun }
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun static const struct altera_pcie_ops altera_pcie_ops_1_0 = {
725*4882a593Smuzhiyun .tlp_read_pkt = tlp_read_packet,
726*4882a593Smuzhiyun .tlp_write_pkt = tlp_write_packet,
727*4882a593Smuzhiyun .get_link_status = altera_pcie_link_up,
728*4882a593Smuzhiyun };
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun static const struct altera_pcie_ops altera_pcie_ops_2_0 = {
731*4882a593Smuzhiyun .tlp_read_pkt = s10_tlp_read_packet,
732*4882a593Smuzhiyun .tlp_write_pkt = s10_tlp_write_packet,
733*4882a593Smuzhiyun .get_link_status = s10_altera_pcie_link_up,
734*4882a593Smuzhiyun .rp_read_cfg = s10_rp_read_cfg,
735*4882a593Smuzhiyun .rp_write_cfg = s10_rp_write_cfg,
736*4882a593Smuzhiyun };
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun static const struct altera_pcie_data altera_pcie_1_0_data = {
739*4882a593Smuzhiyun .ops = &altera_pcie_ops_1_0,
740*4882a593Smuzhiyun .cap_offset = 0x80,
741*4882a593Smuzhiyun .version = ALTERA_PCIE_V1,
742*4882a593Smuzhiyun .cfgrd0 = TLP_FMTTYPE_CFGRD0,
743*4882a593Smuzhiyun .cfgrd1 = TLP_FMTTYPE_CFGRD1,
744*4882a593Smuzhiyun .cfgwr0 = TLP_FMTTYPE_CFGWR0,
745*4882a593Smuzhiyun .cfgwr1 = TLP_FMTTYPE_CFGWR1,
746*4882a593Smuzhiyun };
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun static const struct altera_pcie_data altera_pcie_2_0_data = {
749*4882a593Smuzhiyun .ops = &altera_pcie_ops_2_0,
750*4882a593Smuzhiyun .version = ALTERA_PCIE_V2,
751*4882a593Smuzhiyun .cap_offset = 0x70,
752*4882a593Smuzhiyun .cfgrd0 = S10_TLP_FMTTYPE_CFGRD0,
753*4882a593Smuzhiyun .cfgrd1 = S10_TLP_FMTTYPE_CFGRD1,
754*4882a593Smuzhiyun .cfgwr0 = S10_TLP_FMTTYPE_CFGWR0,
755*4882a593Smuzhiyun .cfgwr1 = S10_TLP_FMTTYPE_CFGWR1,
756*4882a593Smuzhiyun };
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun static const struct of_device_id altera_pcie_of_match[] = {
759*4882a593Smuzhiyun {.compatible = "altr,pcie-root-port-1.0",
760*4882a593Smuzhiyun .data = &altera_pcie_1_0_data },
761*4882a593Smuzhiyun {.compatible = "altr,pcie-root-port-2.0",
762*4882a593Smuzhiyun .data = &altera_pcie_2_0_data },
763*4882a593Smuzhiyun {},
764*4882a593Smuzhiyun };
765*4882a593Smuzhiyun
altera_pcie_probe(struct platform_device * pdev)766*4882a593Smuzhiyun static int altera_pcie_probe(struct platform_device *pdev)
767*4882a593Smuzhiyun {
768*4882a593Smuzhiyun struct device *dev = &pdev->dev;
769*4882a593Smuzhiyun struct altera_pcie *pcie;
770*4882a593Smuzhiyun struct pci_host_bridge *bridge;
771*4882a593Smuzhiyun int ret;
772*4882a593Smuzhiyun const struct of_device_id *match;
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
775*4882a593Smuzhiyun if (!bridge)
776*4882a593Smuzhiyun return -ENOMEM;
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun pcie = pci_host_bridge_priv(bridge);
779*4882a593Smuzhiyun pcie->pdev = pdev;
780*4882a593Smuzhiyun platform_set_drvdata(pdev, pcie);
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun match = of_match_device(altera_pcie_of_match, &pdev->dev);
783*4882a593Smuzhiyun if (!match)
784*4882a593Smuzhiyun return -ENODEV;
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun pcie->pcie_data = match->data;
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun ret = altera_pcie_parse_dt(pcie);
789*4882a593Smuzhiyun if (ret) {
790*4882a593Smuzhiyun dev_err(dev, "Parsing DT failed\n");
791*4882a593Smuzhiyun return ret;
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun ret = altera_pcie_init_irq_domain(pcie);
795*4882a593Smuzhiyun if (ret) {
796*4882a593Smuzhiyun dev_err(dev, "Failed creating IRQ Domain\n");
797*4882a593Smuzhiyun return ret;
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun /* clear all interrupts */
801*4882a593Smuzhiyun cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS);
802*4882a593Smuzhiyun /* enable all interrupts */
803*4882a593Smuzhiyun cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
804*4882a593Smuzhiyun altera_pcie_host_init(pcie);
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun bridge->sysdata = pcie;
807*4882a593Smuzhiyun bridge->busnr = pcie->root_bus_nr;
808*4882a593Smuzhiyun bridge->ops = &altera_pcie_ops;
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun return pci_host_probe(bridge);
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun
altera_pcie_remove(struct platform_device * pdev)813*4882a593Smuzhiyun static int altera_pcie_remove(struct platform_device *pdev)
814*4882a593Smuzhiyun {
815*4882a593Smuzhiyun struct altera_pcie *pcie = platform_get_drvdata(pdev);
816*4882a593Smuzhiyun struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun pci_stop_root_bus(bridge->bus);
819*4882a593Smuzhiyun pci_remove_root_bus(bridge->bus);
820*4882a593Smuzhiyun altera_pcie_irq_teardown(pcie);
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun return 0;
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun static struct platform_driver altera_pcie_driver = {
826*4882a593Smuzhiyun .probe = altera_pcie_probe,
827*4882a593Smuzhiyun .remove = altera_pcie_remove,
828*4882a593Smuzhiyun .driver = {
829*4882a593Smuzhiyun .name = "altera-pcie",
830*4882a593Smuzhiyun .of_match_table = altera_pcie_of_match,
831*4882a593Smuzhiyun },
832*4882a593Smuzhiyun };
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, altera_pcie_of_match);
835*4882a593Smuzhiyun module_platform_driver(altera_pcie_driver);
836*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
837