1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * PCIe host controller driver for NWL PCIe Bridge
4*4882a593Smuzhiyun * Based on pcie-xilinx.c, pci-tegra.c
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * (C) Copyright 2014 - 2015, Xilinx, Inc.
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/clk.h>
10*4882a593Smuzhiyun #include <linux/delay.h>
11*4882a593Smuzhiyun #include <linux/interrupt.h>
12*4882a593Smuzhiyun #include <linux/irq.h>
13*4882a593Smuzhiyun #include <linux/irqdomain.h>
14*4882a593Smuzhiyun #include <linux/kernel.h>
15*4882a593Smuzhiyun #include <linux/init.h>
16*4882a593Smuzhiyun #include <linux/msi.h>
17*4882a593Smuzhiyun #include <linux/of_address.h>
18*4882a593Smuzhiyun #include <linux/of_pci.h>
19*4882a593Smuzhiyun #include <linux/of_platform.h>
20*4882a593Smuzhiyun #include <linux/of_irq.h>
21*4882a593Smuzhiyun #include <linux/pci.h>
22*4882a593Smuzhiyun #include <linux/platform_device.h>
23*4882a593Smuzhiyun #include <linux/irqchip/chained_irq.h>
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #include "../pci.h"
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun /* Bridge core config registers */
28*4882a593Smuzhiyun #define BRCFG_PCIE_RX0 0x00000000
29*4882a593Smuzhiyun #define BRCFG_INTERRUPT 0x00000010
30*4882a593Smuzhiyun #define BRCFG_PCIE_RX_MSG_FILTER 0x00000020
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /* Egress - Bridge translation registers */
33*4882a593Smuzhiyun #define E_BREG_CAPABILITIES 0x00000200
34*4882a593Smuzhiyun #define E_BREG_CONTROL 0x00000208
35*4882a593Smuzhiyun #define E_BREG_BASE_LO 0x00000210
36*4882a593Smuzhiyun #define E_BREG_BASE_HI 0x00000214
37*4882a593Smuzhiyun #define E_ECAM_CAPABILITIES 0x00000220
38*4882a593Smuzhiyun #define E_ECAM_CONTROL 0x00000228
39*4882a593Smuzhiyun #define E_ECAM_BASE_LO 0x00000230
40*4882a593Smuzhiyun #define E_ECAM_BASE_HI 0x00000234
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /* Ingress - address translations */
43*4882a593Smuzhiyun #define I_MSII_CAPABILITIES 0x00000300
44*4882a593Smuzhiyun #define I_MSII_CONTROL 0x00000308
45*4882a593Smuzhiyun #define I_MSII_BASE_LO 0x00000310
46*4882a593Smuzhiyun #define I_MSII_BASE_HI 0x00000314
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun #define I_ISUB_CONTROL 0x000003E8
49*4882a593Smuzhiyun #define SET_ISUB_CONTROL BIT(0)
50*4882a593Smuzhiyun /* Rxed msg fifo - Interrupt status registers */
51*4882a593Smuzhiyun #define MSGF_MISC_STATUS 0x00000400
52*4882a593Smuzhiyun #define MSGF_MISC_MASK 0x00000404
53*4882a593Smuzhiyun #define MSGF_LEG_STATUS 0x00000420
54*4882a593Smuzhiyun #define MSGF_LEG_MASK 0x00000424
55*4882a593Smuzhiyun #define MSGF_MSI_STATUS_LO 0x00000440
56*4882a593Smuzhiyun #define MSGF_MSI_STATUS_HI 0x00000444
57*4882a593Smuzhiyun #define MSGF_MSI_MASK_LO 0x00000448
58*4882a593Smuzhiyun #define MSGF_MSI_MASK_HI 0x0000044C
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun /* Msg filter mask bits */
61*4882a593Smuzhiyun #define CFG_ENABLE_PM_MSG_FWD BIT(1)
62*4882a593Smuzhiyun #define CFG_ENABLE_INT_MSG_FWD BIT(2)
63*4882a593Smuzhiyun #define CFG_ENABLE_ERR_MSG_FWD BIT(3)
64*4882a593Smuzhiyun #define CFG_ENABLE_MSG_FILTER_MASK (CFG_ENABLE_PM_MSG_FWD | \
65*4882a593Smuzhiyun CFG_ENABLE_INT_MSG_FWD | \
66*4882a593Smuzhiyun CFG_ENABLE_ERR_MSG_FWD)
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun /* Misc interrupt status mask bits */
69*4882a593Smuzhiyun #define MSGF_MISC_SR_RXMSG_AVAIL BIT(0)
70*4882a593Smuzhiyun #define MSGF_MISC_SR_RXMSG_OVER BIT(1)
71*4882a593Smuzhiyun #define MSGF_MISC_SR_SLAVE_ERR BIT(4)
72*4882a593Smuzhiyun #define MSGF_MISC_SR_MASTER_ERR BIT(5)
73*4882a593Smuzhiyun #define MSGF_MISC_SR_I_ADDR_ERR BIT(6)
74*4882a593Smuzhiyun #define MSGF_MISC_SR_E_ADDR_ERR BIT(7)
75*4882a593Smuzhiyun #define MSGF_MISC_SR_FATAL_AER BIT(16)
76*4882a593Smuzhiyun #define MSGF_MISC_SR_NON_FATAL_AER BIT(17)
77*4882a593Smuzhiyun #define MSGF_MISC_SR_CORR_AER BIT(18)
78*4882a593Smuzhiyun #define MSGF_MISC_SR_UR_DETECT BIT(20)
79*4882a593Smuzhiyun #define MSGF_MISC_SR_NON_FATAL_DEV BIT(22)
80*4882a593Smuzhiyun #define MSGF_MISC_SR_FATAL_DEV BIT(23)
81*4882a593Smuzhiyun #define MSGF_MISC_SR_LINK_DOWN BIT(24)
82*4882a593Smuzhiyun #define MSGF_MSIC_SR_LINK_AUTO_BWIDTH BIT(25)
83*4882a593Smuzhiyun #define MSGF_MSIC_SR_LINK_BWIDTH BIT(26)
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun #define MSGF_MISC_SR_MASKALL (MSGF_MISC_SR_RXMSG_AVAIL | \
86*4882a593Smuzhiyun MSGF_MISC_SR_RXMSG_OVER | \
87*4882a593Smuzhiyun MSGF_MISC_SR_SLAVE_ERR | \
88*4882a593Smuzhiyun MSGF_MISC_SR_MASTER_ERR | \
89*4882a593Smuzhiyun MSGF_MISC_SR_I_ADDR_ERR | \
90*4882a593Smuzhiyun MSGF_MISC_SR_E_ADDR_ERR | \
91*4882a593Smuzhiyun MSGF_MISC_SR_FATAL_AER | \
92*4882a593Smuzhiyun MSGF_MISC_SR_NON_FATAL_AER | \
93*4882a593Smuzhiyun MSGF_MISC_SR_CORR_AER | \
94*4882a593Smuzhiyun MSGF_MISC_SR_UR_DETECT | \
95*4882a593Smuzhiyun MSGF_MISC_SR_NON_FATAL_DEV | \
96*4882a593Smuzhiyun MSGF_MISC_SR_FATAL_DEV | \
97*4882a593Smuzhiyun MSGF_MISC_SR_LINK_DOWN | \
98*4882a593Smuzhiyun MSGF_MSIC_SR_LINK_AUTO_BWIDTH | \
99*4882a593Smuzhiyun MSGF_MSIC_SR_LINK_BWIDTH)
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun /* Legacy interrupt status mask bits */
102*4882a593Smuzhiyun #define MSGF_LEG_SR_INTA BIT(0)
103*4882a593Smuzhiyun #define MSGF_LEG_SR_INTB BIT(1)
104*4882a593Smuzhiyun #define MSGF_LEG_SR_INTC BIT(2)
105*4882a593Smuzhiyun #define MSGF_LEG_SR_INTD BIT(3)
106*4882a593Smuzhiyun #define MSGF_LEG_SR_MASKALL (MSGF_LEG_SR_INTA | MSGF_LEG_SR_INTB | \
107*4882a593Smuzhiyun MSGF_LEG_SR_INTC | MSGF_LEG_SR_INTD)
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /* MSI interrupt status mask bits */
110*4882a593Smuzhiyun #define MSGF_MSI_SR_LO_MASK GENMASK(31, 0)
111*4882a593Smuzhiyun #define MSGF_MSI_SR_HI_MASK GENMASK(31, 0)
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun #define MSII_PRESENT BIT(0)
114*4882a593Smuzhiyun #define MSII_ENABLE BIT(0)
115*4882a593Smuzhiyun #define MSII_STATUS_ENABLE BIT(15)
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /* Bridge config interrupt mask */
118*4882a593Smuzhiyun #define BRCFG_INTERRUPT_MASK BIT(0)
119*4882a593Smuzhiyun #define BREG_PRESENT BIT(0)
120*4882a593Smuzhiyun #define BREG_ENABLE BIT(0)
121*4882a593Smuzhiyun #define BREG_ENABLE_FORCE BIT(1)
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun /* E_ECAM status mask bits */
124*4882a593Smuzhiyun #define E_ECAM_PRESENT BIT(0)
125*4882a593Smuzhiyun #define E_ECAM_CR_ENABLE BIT(0)
126*4882a593Smuzhiyun #define E_ECAM_SIZE_LOC GENMASK(20, 16)
127*4882a593Smuzhiyun #define E_ECAM_SIZE_SHIFT 16
128*4882a593Smuzhiyun #define ECAM_BUS_LOC_SHIFT 20
129*4882a593Smuzhiyun #define ECAM_DEV_LOC_SHIFT 12
130*4882a593Smuzhiyun #define NWL_ECAM_VALUE_DEFAULT 12
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun #define CFG_DMA_REG_BAR GENMASK(2, 0)
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun #define INT_PCI_MSI_NR (2 * 32)
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun /* Readin the PS_LINKUP */
137*4882a593Smuzhiyun #define PS_LINKUP_OFFSET 0x00000238
138*4882a593Smuzhiyun #define PCIE_PHY_LINKUP_BIT BIT(0)
139*4882a593Smuzhiyun #define PHY_RDY_LINKUP_BIT BIT(1)
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun /* Parameters for the waiting for link up routine */
142*4882a593Smuzhiyun #define LINK_WAIT_MAX_RETRIES 10
143*4882a593Smuzhiyun #define LINK_WAIT_USLEEP_MIN 90000
144*4882a593Smuzhiyun #define LINK_WAIT_USLEEP_MAX 100000
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun struct nwl_msi { /* MSI information */
147*4882a593Smuzhiyun struct irq_domain *msi_domain;
148*4882a593Smuzhiyun unsigned long *bitmap;
149*4882a593Smuzhiyun struct irq_domain *dev_domain;
150*4882a593Smuzhiyun struct mutex lock; /* protect bitmap variable */
151*4882a593Smuzhiyun int irq_msi0;
152*4882a593Smuzhiyun int irq_msi1;
153*4882a593Smuzhiyun };
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun struct nwl_pcie {
156*4882a593Smuzhiyun struct device *dev;
157*4882a593Smuzhiyun void __iomem *breg_base;
158*4882a593Smuzhiyun void __iomem *pcireg_base;
159*4882a593Smuzhiyun void __iomem *ecam_base;
160*4882a593Smuzhiyun phys_addr_t phys_breg_base; /* Physical Bridge Register Base */
161*4882a593Smuzhiyun phys_addr_t phys_pcie_reg_base; /* Physical PCIe Controller Base */
162*4882a593Smuzhiyun phys_addr_t phys_ecam_base; /* Physical Configuration Base */
163*4882a593Smuzhiyun u32 breg_size;
164*4882a593Smuzhiyun u32 pcie_reg_size;
165*4882a593Smuzhiyun u32 ecam_size;
166*4882a593Smuzhiyun int irq_intx;
167*4882a593Smuzhiyun int irq_misc;
168*4882a593Smuzhiyun u32 ecam_value;
169*4882a593Smuzhiyun u8 last_busno;
170*4882a593Smuzhiyun struct nwl_msi msi;
171*4882a593Smuzhiyun struct irq_domain *legacy_irq_domain;
172*4882a593Smuzhiyun struct clk *clk;
173*4882a593Smuzhiyun raw_spinlock_t leg_mask_lock;
174*4882a593Smuzhiyun };
175*4882a593Smuzhiyun
nwl_bridge_readl(struct nwl_pcie * pcie,u32 off)176*4882a593Smuzhiyun static inline u32 nwl_bridge_readl(struct nwl_pcie *pcie, u32 off)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun return readl(pcie->breg_base + off);
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
nwl_bridge_writel(struct nwl_pcie * pcie,u32 val,u32 off)181*4882a593Smuzhiyun static inline void nwl_bridge_writel(struct nwl_pcie *pcie, u32 val, u32 off)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun writel(val, pcie->breg_base + off);
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
nwl_pcie_link_up(struct nwl_pcie * pcie)186*4882a593Smuzhiyun static bool nwl_pcie_link_up(struct nwl_pcie *pcie)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun if (readl(pcie->pcireg_base + PS_LINKUP_OFFSET) & PCIE_PHY_LINKUP_BIT)
189*4882a593Smuzhiyun return true;
190*4882a593Smuzhiyun return false;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
nwl_phy_link_up(struct nwl_pcie * pcie)193*4882a593Smuzhiyun static bool nwl_phy_link_up(struct nwl_pcie *pcie)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun if (readl(pcie->pcireg_base + PS_LINKUP_OFFSET) & PHY_RDY_LINKUP_BIT)
196*4882a593Smuzhiyun return true;
197*4882a593Smuzhiyun return false;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
nwl_wait_for_link(struct nwl_pcie * pcie)200*4882a593Smuzhiyun static int nwl_wait_for_link(struct nwl_pcie *pcie)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun struct device *dev = pcie->dev;
203*4882a593Smuzhiyun int retries;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun /* check if the link is up or not */
206*4882a593Smuzhiyun for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
207*4882a593Smuzhiyun if (nwl_phy_link_up(pcie))
208*4882a593Smuzhiyun return 0;
209*4882a593Smuzhiyun usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun dev_err(dev, "PHY link never came up\n");
213*4882a593Smuzhiyun return -ETIMEDOUT;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
nwl_pcie_valid_device(struct pci_bus * bus,unsigned int devfn)216*4882a593Smuzhiyun static bool nwl_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun struct nwl_pcie *pcie = bus->sysdata;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun /* Check link before accessing downstream ports */
221*4882a593Smuzhiyun if (!pci_is_root_bus(bus)) {
222*4882a593Smuzhiyun if (!nwl_pcie_link_up(pcie))
223*4882a593Smuzhiyun return false;
224*4882a593Smuzhiyun } else if (devfn > 0)
225*4882a593Smuzhiyun /* Only one device down on each root port */
226*4882a593Smuzhiyun return false;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun return true;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun /**
232*4882a593Smuzhiyun * nwl_pcie_map_bus - Get configuration base
233*4882a593Smuzhiyun *
234*4882a593Smuzhiyun * @bus: Bus structure of current bus
235*4882a593Smuzhiyun * @devfn: Device/function
236*4882a593Smuzhiyun * @where: Offset from base
237*4882a593Smuzhiyun *
238*4882a593Smuzhiyun * Return: Base address of the configuration space needed to be
239*4882a593Smuzhiyun * accessed.
240*4882a593Smuzhiyun */
nwl_pcie_map_bus(struct pci_bus * bus,unsigned int devfn,int where)241*4882a593Smuzhiyun static void __iomem *nwl_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
242*4882a593Smuzhiyun int where)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun struct nwl_pcie *pcie = bus->sysdata;
245*4882a593Smuzhiyun int relbus;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun if (!nwl_pcie_valid_device(bus, devfn))
248*4882a593Smuzhiyun return NULL;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun relbus = (bus->number << ECAM_BUS_LOC_SHIFT) |
251*4882a593Smuzhiyun (devfn << ECAM_DEV_LOC_SHIFT);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun return pcie->ecam_base + relbus + where;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun /* PCIe operations */
257*4882a593Smuzhiyun static struct pci_ops nwl_pcie_ops = {
258*4882a593Smuzhiyun .map_bus = nwl_pcie_map_bus,
259*4882a593Smuzhiyun .read = pci_generic_config_read,
260*4882a593Smuzhiyun .write = pci_generic_config_write,
261*4882a593Smuzhiyun };
262*4882a593Smuzhiyun
nwl_pcie_misc_handler(int irq,void * data)263*4882a593Smuzhiyun static irqreturn_t nwl_pcie_misc_handler(int irq, void *data)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun struct nwl_pcie *pcie = data;
266*4882a593Smuzhiyun struct device *dev = pcie->dev;
267*4882a593Smuzhiyun u32 misc_stat;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun /* Checking for misc interrupts */
270*4882a593Smuzhiyun misc_stat = nwl_bridge_readl(pcie, MSGF_MISC_STATUS) &
271*4882a593Smuzhiyun MSGF_MISC_SR_MASKALL;
272*4882a593Smuzhiyun if (!misc_stat)
273*4882a593Smuzhiyun return IRQ_NONE;
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun if (misc_stat & MSGF_MISC_SR_RXMSG_OVER)
276*4882a593Smuzhiyun dev_err(dev, "Received Message FIFO Overflow\n");
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun if (misc_stat & MSGF_MISC_SR_SLAVE_ERR)
279*4882a593Smuzhiyun dev_err(dev, "Slave error\n");
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun if (misc_stat & MSGF_MISC_SR_MASTER_ERR)
282*4882a593Smuzhiyun dev_err(dev, "Master error\n");
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun if (misc_stat & MSGF_MISC_SR_I_ADDR_ERR)
285*4882a593Smuzhiyun dev_err(dev, "In Misc Ingress address translation error\n");
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun if (misc_stat & MSGF_MISC_SR_E_ADDR_ERR)
288*4882a593Smuzhiyun dev_err(dev, "In Misc Egress address translation error\n");
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun if (misc_stat & MSGF_MISC_SR_FATAL_AER)
291*4882a593Smuzhiyun dev_err(dev, "Fatal Error in AER Capability\n");
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun if (misc_stat & MSGF_MISC_SR_NON_FATAL_AER)
294*4882a593Smuzhiyun dev_err(dev, "Non-Fatal Error in AER Capability\n");
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun if (misc_stat & MSGF_MISC_SR_CORR_AER)
297*4882a593Smuzhiyun dev_err(dev, "Correctable Error in AER Capability\n");
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun if (misc_stat & MSGF_MISC_SR_UR_DETECT)
300*4882a593Smuzhiyun dev_err(dev, "Unsupported request Detected\n");
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun if (misc_stat & MSGF_MISC_SR_NON_FATAL_DEV)
303*4882a593Smuzhiyun dev_err(dev, "Non-Fatal Error Detected\n");
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun if (misc_stat & MSGF_MISC_SR_FATAL_DEV)
306*4882a593Smuzhiyun dev_err(dev, "Fatal Error Detected\n");
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun if (misc_stat & MSGF_MSIC_SR_LINK_AUTO_BWIDTH)
309*4882a593Smuzhiyun dev_info(dev, "Link Autonomous Bandwidth Management Status bit set\n");
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun if (misc_stat & MSGF_MSIC_SR_LINK_BWIDTH)
312*4882a593Smuzhiyun dev_info(dev, "Link Bandwidth Management Status bit set\n");
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun /* Clear misc interrupt status */
315*4882a593Smuzhiyun nwl_bridge_writel(pcie, misc_stat, MSGF_MISC_STATUS);
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun return IRQ_HANDLED;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
nwl_pcie_leg_handler(struct irq_desc * desc)320*4882a593Smuzhiyun static void nwl_pcie_leg_handler(struct irq_desc *desc)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun struct irq_chip *chip = irq_desc_get_chip(desc);
323*4882a593Smuzhiyun struct nwl_pcie *pcie;
324*4882a593Smuzhiyun unsigned long status;
325*4882a593Smuzhiyun u32 bit;
326*4882a593Smuzhiyun u32 virq;
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun chained_irq_enter(chip, desc);
329*4882a593Smuzhiyun pcie = irq_desc_get_handler_data(desc);
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun while ((status = nwl_bridge_readl(pcie, MSGF_LEG_STATUS) &
332*4882a593Smuzhiyun MSGF_LEG_SR_MASKALL) != 0) {
333*4882a593Smuzhiyun for_each_set_bit(bit, &status, PCI_NUM_INTX) {
334*4882a593Smuzhiyun virq = irq_find_mapping(pcie->legacy_irq_domain, bit);
335*4882a593Smuzhiyun if (virq)
336*4882a593Smuzhiyun generic_handle_irq(virq);
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun chained_irq_exit(chip, desc);
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun
nwl_pcie_handle_msi_irq(struct nwl_pcie * pcie,u32 status_reg)343*4882a593Smuzhiyun static void nwl_pcie_handle_msi_irq(struct nwl_pcie *pcie, u32 status_reg)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun struct nwl_msi *msi;
346*4882a593Smuzhiyun unsigned long status;
347*4882a593Smuzhiyun u32 bit;
348*4882a593Smuzhiyun u32 virq;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun msi = &pcie->msi;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun while ((status = nwl_bridge_readl(pcie, status_reg)) != 0) {
353*4882a593Smuzhiyun for_each_set_bit(bit, &status, 32) {
354*4882a593Smuzhiyun nwl_bridge_writel(pcie, 1 << bit, status_reg);
355*4882a593Smuzhiyun virq = irq_find_mapping(msi->dev_domain, bit);
356*4882a593Smuzhiyun if (virq)
357*4882a593Smuzhiyun generic_handle_irq(virq);
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun
nwl_pcie_msi_handler_high(struct irq_desc * desc)362*4882a593Smuzhiyun static void nwl_pcie_msi_handler_high(struct irq_desc *desc)
363*4882a593Smuzhiyun {
364*4882a593Smuzhiyun struct irq_chip *chip = irq_desc_get_chip(desc);
365*4882a593Smuzhiyun struct nwl_pcie *pcie = irq_desc_get_handler_data(desc);
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun chained_irq_enter(chip, desc);
368*4882a593Smuzhiyun nwl_pcie_handle_msi_irq(pcie, MSGF_MSI_STATUS_HI);
369*4882a593Smuzhiyun chained_irq_exit(chip, desc);
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun
nwl_pcie_msi_handler_low(struct irq_desc * desc)372*4882a593Smuzhiyun static void nwl_pcie_msi_handler_low(struct irq_desc *desc)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun struct irq_chip *chip = irq_desc_get_chip(desc);
375*4882a593Smuzhiyun struct nwl_pcie *pcie = irq_desc_get_handler_data(desc);
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun chained_irq_enter(chip, desc);
378*4882a593Smuzhiyun nwl_pcie_handle_msi_irq(pcie, MSGF_MSI_STATUS_LO);
379*4882a593Smuzhiyun chained_irq_exit(chip, desc);
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun
nwl_mask_leg_irq(struct irq_data * data)382*4882a593Smuzhiyun static void nwl_mask_leg_irq(struct irq_data *data)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun struct irq_desc *desc = irq_to_desc(data->irq);
385*4882a593Smuzhiyun struct nwl_pcie *pcie;
386*4882a593Smuzhiyun unsigned long flags;
387*4882a593Smuzhiyun u32 mask;
388*4882a593Smuzhiyun u32 val;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun pcie = irq_desc_get_chip_data(desc);
391*4882a593Smuzhiyun mask = 1 << (data->hwirq - 1);
392*4882a593Smuzhiyun raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
393*4882a593Smuzhiyun val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
394*4882a593Smuzhiyun nwl_bridge_writel(pcie, (val & (~mask)), MSGF_LEG_MASK);
395*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags);
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun
nwl_unmask_leg_irq(struct irq_data * data)398*4882a593Smuzhiyun static void nwl_unmask_leg_irq(struct irq_data *data)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun struct irq_desc *desc = irq_to_desc(data->irq);
401*4882a593Smuzhiyun struct nwl_pcie *pcie;
402*4882a593Smuzhiyun unsigned long flags;
403*4882a593Smuzhiyun u32 mask;
404*4882a593Smuzhiyun u32 val;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun pcie = irq_desc_get_chip_data(desc);
407*4882a593Smuzhiyun mask = 1 << (data->hwirq - 1);
408*4882a593Smuzhiyun raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
409*4882a593Smuzhiyun val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
410*4882a593Smuzhiyun nwl_bridge_writel(pcie, (val | mask), MSGF_LEG_MASK);
411*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags);
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun static struct irq_chip nwl_leg_irq_chip = {
415*4882a593Smuzhiyun .name = "nwl_pcie:legacy",
416*4882a593Smuzhiyun .irq_enable = nwl_unmask_leg_irq,
417*4882a593Smuzhiyun .irq_disable = nwl_mask_leg_irq,
418*4882a593Smuzhiyun .irq_mask = nwl_mask_leg_irq,
419*4882a593Smuzhiyun .irq_unmask = nwl_unmask_leg_irq,
420*4882a593Smuzhiyun };
421*4882a593Smuzhiyun
nwl_legacy_map(struct irq_domain * domain,unsigned int irq,irq_hw_number_t hwirq)422*4882a593Smuzhiyun static int nwl_legacy_map(struct irq_domain *domain, unsigned int irq,
423*4882a593Smuzhiyun irq_hw_number_t hwirq)
424*4882a593Smuzhiyun {
425*4882a593Smuzhiyun irq_set_chip_and_handler(irq, &nwl_leg_irq_chip, handle_level_irq);
426*4882a593Smuzhiyun irq_set_chip_data(irq, domain->host_data);
427*4882a593Smuzhiyun irq_set_status_flags(irq, IRQ_LEVEL);
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun return 0;
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun static const struct irq_domain_ops legacy_domain_ops = {
433*4882a593Smuzhiyun .map = nwl_legacy_map,
434*4882a593Smuzhiyun .xlate = pci_irqd_intx_xlate,
435*4882a593Smuzhiyun };
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun #ifdef CONFIG_PCI_MSI
438*4882a593Smuzhiyun static struct irq_chip nwl_msi_irq_chip = {
439*4882a593Smuzhiyun .name = "nwl_pcie:msi",
440*4882a593Smuzhiyun .irq_enable = pci_msi_unmask_irq,
441*4882a593Smuzhiyun .irq_disable = pci_msi_mask_irq,
442*4882a593Smuzhiyun .irq_mask = pci_msi_mask_irq,
443*4882a593Smuzhiyun .irq_unmask = pci_msi_unmask_irq,
444*4882a593Smuzhiyun };
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun static struct msi_domain_info nwl_msi_domain_info = {
447*4882a593Smuzhiyun .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
448*4882a593Smuzhiyun MSI_FLAG_MULTI_PCI_MSI),
449*4882a593Smuzhiyun .chip = &nwl_msi_irq_chip,
450*4882a593Smuzhiyun };
451*4882a593Smuzhiyun #endif
452*4882a593Smuzhiyun
nwl_compose_msi_msg(struct irq_data * data,struct msi_msg * msg)453*4882a593Smuzhiyun static void nwl_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
454*4882a593Smuzhiyun {
455*4882a593Smuzhiyun struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
456*4882a593Smuzhiyun phys_addr_t msi_addr = pcie->phys_pcie_reg_base;
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun msg->address_lo = lower_32_bits(msi_addr);
459*4882a593Smuzhiyun msg->address_hi = upper_32_bits(msi_addr);
460*4882a593Smuzhiyun msg->data = data->hwirq;
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun
nwl_msi_set_affinity(struct irq_data * irq_data,const struct cpumask * mask,bool force)463*4882a593Smuzhiyun static int nwl_msi_set_affinity(struct irq_data *irq_data,
464*4882a593Smuzhiyun const struct cpumask *mask, bool force)
465*4882a593Smuzhiyun {
466*4882a593Smuzhiyun return -EINVAL;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun static struct irq_chip nwl_irq_chip = {
470*4882a593Smuzhiyun .name = "Xilinx MSI",
471*4882a593Smuzhiyun .irq_compose_msi_msg = nwl_compose_msi_msg,
472*4882a593Smuzhiyun .irq_set_affinity = nwl_msi_set_affinity,
473*4882a593Smuzhiyun };
474*4882a593Smuzhiyun
nwl_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)475*4882a593Smuzhiyun static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
476*4882a593Smuzhiyun unsigned int nr_irqs, void *args)
477*4882a593Smuzhiyun {
478*4882a593Smuzhiyun struct nwl_pcie *pcie = domain->host_data;
479*4882a593Smuzhiyun struct nwl_msi *msi = &pcie->msi;
480*4882a593Smuzhiyun int bit;
481*4882a593Smuzhiyun int i;
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun mutex_lock(&msi->lock);
484*4882a593Smuzhiyun bit = bitmap_find_free_region(msi->bitmap, INT_PCI_MSI_NR,
485*4882a593Smuzhiyun get_count_order(nr_irqs));
486*4882a593Smuzhiyun if (bit < 0) {
487*4882a593Smuzhiyun mutex_unlock(&msi->lock);
488*4882a593Smuzhiyun return -ENOSPC;
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun for (i = 0; i < nr_irqs; i++) {
492*4882a593Smuzhiyun irq_domain_set_info(domain, virq + i, bit + i, &nwl_irq_chip,
493*4882a593Smuzhiyun domain->host_data, handle_simple_irq,
494*4882a593Smuzhiyun NULL, NULL);
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun mutex_unlock(&msi->lock);
497*4882a593Smuzhiyun return 0;
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun
nwl_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)500*4882a593Smuzhiyun static void nwl_irq_domain_free(struct irq_domain *domain, unsigned int virq,
501*4882a593Smuzhiyun unsigned int nr_irqs)
502*4882a593Smuzhiyun {
503*4882a593Smuzhiyun struct irq_data *data = irq_domain_get_irq_data(domain, virq);
504*4882a593Smuzhiyun struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
505*4882a593Smuzhiyun struct nwl_msi *msi = &pcie->msi;
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun mutex_lock(&msi->lock);
508*4882a593Smuzhiyun bitmap_release_region(msi->bitmap, data->hwirq,
509*4882a593Smuzhiyun get_count_order(nr_irqs));
510*4882a593Smuzhiyun mutex_unlock(&msi->lock);
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun static const struct irq_domain_ops dev_msi_domain_ops = {
514*4882a593Smuzhiyun .alloc = nwl_irq_domain_alloc,
515*4882a593Smuzhiyun .free = nwl_irq_domain_free,
516*4882a593Smuzhiyun };
517*4882a593Smuzhiyun
nwl_pcie_init_msi_irq_domain(struct nwl_pcie * pcie)518*4882a593Smuzhiyun static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie)
519*4882a593Smuzhiyun {
520*4882a593Smuzhiyun #ifdef CONFIG_PCI_MSI
521*4882a593Smuzhiyun struct device *dev = pcie->dev;
522*4882a593Smuzhiyun struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
523*4882a593Smuzhiyun struct nwl_msi *msi = &pcie->msi;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun msi->dev_domain = irq_domain_add_linear(NULL, INT_PCI_MSI_NR,
526*4882a593Smuzhiyun &dev_msi_domain_ops, pcie);
527*4882a593Smuzhiyun if (!msi->dev_domain) {
528*4882a593Smuzhiyun dev_err(dev, "failed to create dev IRQ domain\n");
529*4882a593Smuzhiyun return -ENOMEM;
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun msi->msi_domain = pci_msi_create_irq_domain(fwnode,
532*4882a593Smuzhiyun &nwl_msi_domain_info,
533*4882a593Smuzhiyun msi->dev_domain);
534*4882a593Smuzhiyun if (!msi->msi_domain) {
535*4882a593Smuzhiyun dev_err(dev, "failed to create msi IRQ domain\n");
536*4882a593Smuzhiyun irq_domain_remove(msi->dev_domain);
537*4882a593Smuzhiyun return -ENOMEM;
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun #endif
540*4882a593Smuzhiyun return 0;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun
nwl_pcie_init_irq_domain(struct nwl_pcie * pcie)543*4882a593Smuzhiyun static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun struct device *dev = pcie->dev;
546*4882a593Smuzhiyun struct device_node *node = dev->of_node;
547*4882a593Smuzhiyun struct device_node *legacy_intc_node;
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun legacy_intc_node = of_get_next_child(node, NULL);
550*4882a593Smuzhiyun if (!legacy_intc_node) {
551*4882a593Smuzhiyun dev_err(dev, "No legacy intc node found\n");
552*4882a593Smuzhiyun return -EINVAL;
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun pcie->legacy_irq_domain = irq_domain_add_linear(legacy_intc_node,
556*4882a593Smuzhiyun PCI_NUM_INTX,
557*4882a593Smuzhiyun &legacy_domain_ops,
558*4882a593Smuzhiyun pcie);
559*4882a593Smuzhiyun of_node_put(legacy_intc_node);
560*4882a593Smuzhiyun if (!pcie->legacy_irq_domain) {
561*4882a593Smuzhiyun dev_err(dev, "failed to create IRQ domain\n");
562*4882a593Smuzhiyun return -ENOMEM;
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun raw_spin_lock_init(&pcie->leg_mask_lock);
566*4882a593Smuzhiyun nwl_pcie_init_msi_irq_domain(pcie);
567*4882a593Smuzhiyun return 0;
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun
nwl_pcie_enable_msi(struct nwl_pcie * pcie)570*4882a593Smuzhiyun static int nwl_pcie_enable_msi(struct nwl_pcie *pcie)
571*4882a593Smuzhiyun {
572*4882a593Smuzhiyun struct device *dev = pcie->dev;
573*4882a593Smuzhiyun struct platform_device *pdev = to_platform_device(dev);
574*4882a593Smuzhiyun struct nwl_msi *msi = &pcie->msi;
575*4882a593Smuzhiyun unsigned long base;
576*4882a593Smuzhiyun int ret;
577*4882a593Smuzhiyun int size = BITS_TO_LONGS(INT_PCI_MSI_NR) * sizeof(long);
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun mutex_init(&msi->lock);
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun msi->bitmap = kzalloc(size, GFP_KERNEL);
582*4882a593Smuzhiyun if (!msi->bitmap)
583*4882a593Smuzhiyun return -ENOMEM;
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun /* Get msi_1 IRQ number */
586*4882a593Smuzhiyun msi->irq_msi1 = platform_get_irq_byname(pdev, "msi1");
587*4882a593Smuzhiyun if (msi->irq_msi1 < 0) {
588*4882a593Smuzhiyun ret = -EINVAL;
589*4882a593Smuzhiyun goto err;
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun irq_set_chained_handler_and_data(msi->irq_msi1,
593*4882a593Smuzhiyun nwl_pcie_msi_handler_high, pcie);
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun /* Get msi_0 IRQ number */
596*4882a593Smuzhiyun msi->irq_msi0 = platform_get_irq_byname(pdev, "msi0");
597*4882a593Smuzhiyun if (msi->irq_msi0 < 0) {
598*4882a593Smuzhiyun ret = -EINVAL;
599*4882a593Smuzhiyun goto err;
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun irq_set_chained_handler_and_data(msi->irq_msi0,
603*4882a593Smuzhiyun nwl_pcie_msi_handler_low, pcie);
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun /* Check for msii_present bit */
606*4882a593Smuzhiyun ret = nwl_bridge_readl(pcie, I_MSII_CAPABILITIES) & MSII_PRESENT;
607*4882a593Smuzhiyun if (!ret) {
608*4882a593Smuzhiyun dev_err(dev, "MSI not present\n");
609*4882a593Smuzhiyun ret = -EIO;
610*4882a593Smuzhiyun goto err;
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun /* Enable MSII */
614*4882a593Smuzhiyun nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, I_MSII_CONTROL) |
615*4882a593Smuzhiyun MSII_ENABLE, I_MSII_CONTROL);
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun /* Enable MSII status */
618*4882a593Smuzhiyun nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, I_MSII_CONTROL) |
619*4882a593Smuzhiyun MSII_STATUS_ENABLE, I_MSII_CONTROL);
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun /* setup AFI/FPCI range */
622*4882a593Smuzhiyun base = pcie->phys_pcie_reg_base;
623*4882a593Smuzhiyun nwl_bridge_writel(pcie, lower_32_bits(base), I_MSII_BASE_LO);
624*4882a593Smuzhiyun nwl_bridge_writel(pcie, upper_32_bits(base), I_MSII_BASE_HI);
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun /*
627*4882a593Smuzhiyun * For high range MSI interrupts: disable, clear any pending,
628*4882a593Smuzhiyun * and enable
629*4882a593Smuzhiyun */
630*4882a593Smuzhiyun nwl_bridge_writel(pcie, 0, MSGF_MSI_MASK_HI);
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MSI_STATUS_HI) &
633*4882a593Smuzhiyun MSGF_MSI_SR_HI_MASK, MSGF_MSI_STATUS_HI);
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun nwl_bridge_writel(pcie, MSGF_MSI_SR_HI_MASK, MSGF_MSI_MASK_HI);
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun /*
638*4882a593Smuzhiyun * For low range MSI interrupts: disable, clear any pending,
639*4882a593Smuzhiyun * and enable
640*4882a593Smuzhiyun */
641*4882a593Smuzhiyun nwl_bridge_writel(pcie, 0, MSGF_MSI_MASK_LO);
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MSI_STATUS_LO) &
644*4882a593Smuzhiyun MSGF_MSI_SR_LO_MASK, MSGF_MSI_STATUS_LO);
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun nwl_bridge_writel(pcie, MSGF_MSI_SR_LO_MASK, MSGF_MSI_MASK_LO);
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun return 0;
649*4882a593Smuzhiyun err:
650*4882a593Smuzhiyun kfree(msi->bitmap);
651*4882a593Smuzhiyun msi->bitmap = NULL;
652*4882a593Smuzhiyun return ret;
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun
nwl_pcie_bridge_init(struct nwl_pcie * pcie)655*4882a593Smuzhiyun static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
656*4882a593Smuzhiyun {
657*4882a593Smuzhiyun struct device *dev = pcie->dev;
658*4882a593Smuzhiyun struct platform_device *pdev = to_platform_device(dev);
659*4882a593Smuzhiyun u32 breg_val, ecam_val, first_busno = 0;
660*4882a593Smuzhiyun int err;
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun breg_val = nwl_bridge_readl(pcie, E_BREG_CAPABILITIES) & BREG_PRESENT;
663*4882a593Smuzhiyun if (!breg_val) {
664*4882a593Smuzhiyun dev_err(dev, "BREG is not present\n");
665*4882a593Smuzhiyun return breg_val;
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun /* Write bridge_off to breg base */
669*4882a593Smuzhiyun nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_breg_base),
670*4882a593Smuzhiyun E_BREG_BASE_LO);
671*4882a593Smuzhiyun nwl_bridge_writel(pcie, upper_32_bits(pcie->phys_breg_base),
672*4882a593Smuzhiyun E_BREG_BASE_HI);
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun /* Enable BREG */
675*4882a593Smuzhiyun nwl_bridge_writel(pcie, ~BREG_ENABLE_FORCE & BREG_ENABLE,
676*4882a593Smuzhiyun E_BREG_CONTROL);
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun /* Disable DMA channel registers */
679*4882a593Smuzhiyun nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, BRCFG_PCIE_RX0) |
680*4882a593Smuzhiyun CFG_DMA_REG_BAR, BRCFG_PCIE_RX0);
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun /* Enable Ingress subtractive decode translation */
683*4882a593Smuzhiyun nwl_bridge_writel(pcie, SET_ISUB_CONTROL, I_ISUB_CONTROL);
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun /* Enable msg filtering details */
686*4882a593Smuzhiyun nwl_bridge_writel(pcie, CFG_ENABLE_MSG_FILTER_MASK,
687*4882a593Smuzhiyun BRCFG_PCIE_RX_MSG_FILTER);
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun err = nwl_wait_for_link(pcie);
690*4882a593Smuzhiyun if (err)
691*4882a593Smuzhiyun return err;
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun ecam_val = nwl_bridge_readl(pcie, E_ECAM_CAPABILITIES) & E_ECAM_PRESENT;
694*4882a593Smuzhiyun if (!ecam_val) {
695*4882a593Smuzhiyun dev_err(dev, "ECAM is not present\n");
696*4882a593Smuzhiyun return ecam_val;
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun /* Enable ECAM */
700*4882a593Smuzhiyun nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) |
701*4882a593Smuzhiyun E_ECAM_CR_ENABLE, E_ECAM_CONTROL);
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) |
704*4882a593Smuzhiyun (pcie->ecam_value << E_ECAM_SIZE_SHIFT),
705*4882a593Smuzhiyun E_ECAM_CONTROL);
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_ecam_base),
708*4882a593Smuzhiyun E_ECAM_BASE_LO);
709*4882a593Smuzhiyun nwl_bridge_writel(pcie, upper_32_bits(pcie->phys_ecam_base),
710*4882a593Smuzhiyun E_ECAM_BASE_HI);
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun /* Get bus range */
713*4882a593Smuzhiyun ecam_val = nwl_bridge_readl(pcie, E_ECAM_CONTROL);
714*4882a593Smuzhiyun pcie->last_busno = (ecam_val & E_ECAM_SIZE_LOC) >> E_ECAM_SIZE_SHIFT;
715*4882a593Smuzhiyun /* Write primary, secondary and subordinate bus numbers */
716*4882a593Smuzhiyun ecam_val = first_busno;
717*4882a593Smuzhiyun ecam_val |= (first_busno + 1) << 8;
718*4882a593Smuzhiyun ecam_val |= (pcie->last_busno << E_ECAM_SIZE_SHIFT);
719*4882a593Smuzhiyun writel(ecam_val, (pcie->ecam_base + PCI_PRIMARY_BUS));
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun if (nwl_pcie_link_up(pcie))
722*4882a593Smuzhiyun dev_info(dev, "Link is UP\n");
723*4882a593Smuzhiyun else
724*4882a593Smuzhiyun dev_info(dev, "Link is DOWN\n");
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun /* Get misc IRQ number */
727*4882a593Smuzhiyun pcie->irq_misc = platform_get_irq_byname(pdev, "misc");
728*4882a593Smuzhiyun if (pcie->irq_misc < 0)
729*4882a593Smuzhiyun return -EINVAL;
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun err = devm_request_irq(dev, pcie->irq_misc,
732*4882a593Smuzhiyun nwl_pcie_misc_handler, IRQF_SHARED,
733*4882a593Smuzhiyun "nwl_pcie:misc", pcie);
734*4882a593Smuzhiyun if (err) {
735*4882a593Smuzhiyun dev_err(dev, "fail to register misc IRQ#%d\n",
736*4882a593Smuzhiyun pcie->irq_misc);
737*4882a593Smuzhiyun return err;
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun /* Disable all misc interrupts */
741*4882a593Smuzhiyun nwl_bridge_writel(pcie, (u32)~MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK);
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun /* Clear pending misc interrupts */
744*4882a593Smuzhiyun nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MISC_STATUS) &
745*4882a593Smuzhiyun MSGF_MISC_SR_MASKALL, MSGF_MISC_STATUS);
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun /* Enable all misc interrupts */
748*4882a593Smuzhiyun nwl_bridge_writel(pcie, MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK);
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun /* Disable all legacy interrupts */
752*4882a593Smuzhiyun nwl_bridge_writel(pcie, (u32)~MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK);
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun /* Clear pending legacy interrupts */
755*4882a593Smuzhiyun nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_LEG_STATUS) &
756*4882a593Smuzhiyun MSGF_LEG_SR_MASKALL, MSGF_LEG_STATUS);
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun /* Enable all legacy interrupts */
759*4882a593Smuzhiyun nwl_bridge_writel(pcie, MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK);
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun /* Enable the bridge config interrupt */
762*4882a593Smuzhiyun nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, BRCFG_INTERRUPT) |
763*4882a593Smuzhiyun BRCFG_INTERRUPT_MASK, BRCFG_INTERRUPT);
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun return 0;
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun
nwl_pcie_parse_dt(struct nwl_pcie * pcie,struct platform_device * pdev)768*4882a593Smuzhiyun static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
769*4882a593Smuzhiyun struct platform_device *pdev)
770*4882a593Smuzhiyun {
771*4882a593Smuzhiyun struct device *dev = pcie->dev;
772*4882a593Smuzhiyun struct resource *res;
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg");
775*4882a593Smuzhiyun pcie->breg_base = devm_ioremap_resource(dev, res);
776*4882a593Smuzhiyun if (IS_ERR(pcie->breg_base))
777*4882a593Smuzhiyun return PTR_ERR(pcie->breg_base);
778*4882a593Smuzhiyun pcie->phys_breg_base = res->start;
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcireg");
781*4882a593Smuzhiyun pcie->pcireg_base = devm_ioremap_resource(dev, res);
782*4882a593Smuzhiyun if (IS_ERR(pcie->pcireg_base))
783*4882a593Smuzhiyun return PTR_ERR(pcie->pcireg_base);
784*4882a593Smuzhiyun pcie->phys_pcie_reg_base = res->start;
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
787*4882a593Smuzhiyun pcie->ecam_base = devm_pci_remap_cfg_resource(dev, res);
788*4882a593Smuzhiyun if (IS_ERR(pcie->ecam_base))
789*4882a593Smuzhiyun return PTR_ERR(pcie->ecam_base);
790*4882a593Smuzhiyun pcie->phys_ecam_base = res->start;
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun /* Get intx IRQ number */
793*4882a593Smuzhiyun pcie->irq_intx = platform_get_irq_byname(pdev, "intx");
794*4882a593Smuzhiyun if (pcie->irq_intx < 0)
795*4882a593Smuzhiyun return pcie->irq_intx;
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun irq_set_chained_handler_and_data(pcie->irq_intx,
798*4882a593Smuzhiyun nwl_pcie_leg_handler, pcie);
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun return 0;
801*4882a593Smuzhiyun }
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun static const struct of_device_id nwl_pcie_of_match[] = {
804*4882a593Smuzhiyun { .compatible = "xlnx,nwl-pcie-2.11", },
805*4882a593Smuzhiyun {}
806*4882a593Smuzhiyun };
807*4882a593Smuzhiyun
nwl_pcie_probe(struct platform_device * pdev)808*4882a593Smuzhiyun static int nwl_pcie_probe(struct platform_device *pdev)
809*4882a593Smuzhiyun {
810*4882a593Smuzhiyun struct device *dev = &pdev->dev;
811*4882a593Smuzhiyun struct nwl_pcie *pcie;
812*4882a593Smuzhiyun struct pci_host_bridge *bridge;
813*4882a593Smuzhiyun int err;
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
816*4882a593Smuzhiyun if (!bridge)
817*4882a593Smuzhiyun return -ENODEV;
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun pcie = pci_host_bridge_priv(bridge);
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun pcie->dev = dev;
822*4882a593Smuzhiyun pcie->ecam_value = NWL_ECAM_VALUE_DEFAULT;
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun err = nwl_pcie_parse_dt(pcie, pdev);
825*4882a593Smuzhiyun if (err) {
826*4882a593Smuzhiyun dev_err(dev, "Parsing DT failed\n");
827*4882a593Smuzhiyun return err;
828*4882a593Smuzhiyun }
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun pcie->clk = devm_clk_get(dev, NULL);
831*4882a593Smuzhiyun if (IS_ERR(pcie->clk))
832*4882a593Smuzhiyun return PTR_ERR(pcie->clk);
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun err = clk_prepare_enable(pcie->clk);
835*4882a593Smuzhiyun if (err) {
836*4882a593Smuzhiyun dev_err(dev, "can't enable PCIe ref clock\n");
837*4882a593Smuzhiyun return err;
838*4882a593Smuzhiyun }
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun err = nwl_pcie_bridge_init(pcie);
841*4882a593Smuzhiyun if (err) {
842*4882a593Smuzhiyun dev_err(dev, "HW Initialization failed\n");
843*4882a593Smuzhiyun return err;
844*4882a593Smuzhiyun }
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun err = nwl_pcie_init_irq_domain(pcie);
847*4882a593Smuzhiyun if (err) {
848*4882a593Smuzhiyun dev_err(dev, "Failed creating IRQ Domain\n");
849*4882a593Smuzhiyun return err;
850*4882a593Smuzhiyun }
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun bridge->sysdata = pcie;
853*4882a593Smuzhiyun bridge->ops = &nwl_pcie_ops;
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_PCI_MSI)) {
856*4882a593Smuzhiyun err = nwl_pcie_enable_msi(pcie);
857*4882a593Smuzhiyun if (err < 0) {
858*4882a593Smuzhiyun dev_err(dev, "failed to enable MSI support: %d\n", err);
859*4882a593Smuzhiyun return err;
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun }
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun return pci_host_probe(bridge);
864*4882a593Smuzhiyun }
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun static struct platform_driver nwl_pcie_driver = {
867*4882a593Smuzhiyun .driver = {
868*4882a593Smuzhiyun .name = "nwl-pcie",
869*4882a593Smuzhiyun .suppress_bind_attrs = true,
870*4882a593Smuzhiyun .of_match_table = nwl_pcie_of_match,
871*4882a593Smuzhiyun },
872*4882a593Smuzhiyun .probe = nwl_pcie_probe,
873*4882a593Smuzhiyun };
874*4882a593Smuzhiyun builtin_platform_driver(nwl_pcie_driver);
875