xref: /OK3568_Linux_fs/kernel/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * PCIe host controller driver for Mobiveil PCIe Host controller
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (c) 2018 Mobiveil Inc.
6*4882a593Smuzhiyun  * Copyright 2019-2020 NXP
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
9*4882a593Smuzhiyun  *	   Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/init.h>
13*4882a593Smuzhiyun #include <linux/interrupt.h>
14*4882a593Smuzhiyun #include <linux/irq.h>
15*4882a593Smuzhiyun #include <linux/irqchip/chained_irq.h>
16*4882a593Smuzhiyun #include <linux/irqdomain.h>
17*4882a593Smuzhiyun #include <linux/kernel.h>
18*4882a593Smuzhiyun #include <linux/module.h>
19*4882a593Smuzhiyun #include <linux/msi.h>
20*4882a593Smuzhiyun #include <linux/of_address.h>
21*4882a593Smuzhiyun #include <linux/of_irq.h>
22*4882a593Smuzhiyun #include <linux/of_platform.h>
23*4882a593Smuzhiyun #include <linux/of_pci.h>
24*4882a593Smuzhiyun #include <linux/pci.h>
25*4882a593Smuzhiyun #include <linux/platform_device.h>
26*4882a593Smuzhiyun #include <linux/slab.h>
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #include "pcie-mobiveil.h"
29*4882a593Smuzhiyun 
mobiveil_pcie_valid_device(struct pci_bus * bus,unsigned int devfn)30*4882a593Smuzhiyun static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun 	/* Only one device down on each root port */
33*4882a593Smuzhiyun 	if (pci_is_root_bus(bus) && (devfn > 0))
34*4882a593Smuzhiyun 		return false;
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	/*
37*4882a593Smuzhiyun 	 * Do not read more than one device on the bus directly
38*4882a593Smuzhiyun 	 * attached to RC
39*4882a593Smuzhiyun 	 */
40*4882a593Smuzhiyun 	if ((bus->primary == to_pci_host_bridge(bus->bridge)->busnr) && (PCI_SLOT(devfn) > 0))
41*4882a593Smuzhiyun 		return false;
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 	return true;
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun /*
47*4882a593Smuzhiyun  * mobiveil_pcie_map_bus - routine to get the configuration base of either
48*4882a593Smuzhiyun  * root port or endpoint
49*4882a593Smuzhiyun  */
mobiveil_pcie_map_bus(struct pci_bus * bus,unsigned int devfn,int where)50*4882a593Smuzhiyun static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus,
51*4882a593Smuzhiyun 					   unsigned int devfn, int where)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun 	struct mobiveil_pcie *pcie = bus->sysdata;
54*4882a593Smuzhiyun 	struct mobiveil_root_port *rp = &pcie->rp;
55*4882a593Smuzhiyun 	u32 value;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	if (!mobiveil_pcie_valid_device(bus, devfn))
58*4882a593Smuzhiyun 		return NULL;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	/* RC config access */
61*4882a593Smuzhiyun 	if (pci_is_root_bus(bus))
62*4882a593Smuzhiyun 		return pcie->csr_axi_slave_base + where;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	/*
65*4882a593Smuzhiyun 	 * EP config access (in Config/APIO space)
66*4882a593Smuzhiyun 	 * Program PEX Address base (31..16 bits) with appropriate value
67*4882a593Smuzhiyun 	 * (BDF) in PAB_AXI_AMAP_PEX_WIN_L0 Register.
68*4882a593Smuzhiyun 	 * Relies on pci_lock serialization
69*4882a593Smuzhiyun 	 */
70*4882a593Smuzhiyun 	value = bus->number << PAB_BUS_SHIFT |
71*4882a593Smuzhiyun 		PCI_SLOT(devfn) << PAB_DEVICE_SHIFT |
72*4882a593Smuzhiyun 		PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	mobiveil_csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	return rp->config_axi_slave_base + where;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun static struct pci_ops mobiveil_pcie_ops = {
80*4882a593Smuzhiyun 	.map_bus = mobiveil_pcie_map_bus,
81*4882a593Smuzhiyun 	.read = pci_generic_config_read,
82*4882a593Smuzhiyun 	.write = pci_generic_config_write,
83*4882a593Smuzhiyun };
84*4882a593Smuzhiyun 
mobiveil_pcie_isr(struct irq_desc * desc)85*4882a593Smuzhiyun static void mobiveil_pcie_isr(struct irq_desc *desc)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun 	struct irq_chip *chip = irq_desc_get_chip(desc);
88*4882a593Smuzhiyun 	struct mobiveil_pcie *pcie = irq_desc_get_handler_data(desc);
89*4882a593Smuzhiyun 	struct device *dev = &pcie->pdev->dev;
90*4882a593Smuzhiyun 	struct mobiveil_root_port *rp = &pcie->rp;
91*4882a593Smuzhiyun 	struct mobiveil_msi *msi = &rp->msi;
92*4882a593Smuzhiyun 	u32 msi_data, msi_addr_lo, msi_addr_hi;
93*4882a593Smuzhiyun 	u32 intr_status, msi_status;
94*4882a593Smuzhiyun 	unsigned long shifted_status;
95*4882a593Smuzhiyun 	u32 bit, virq, val, mask;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	/*
98*4882a593Smuzhiyun 	 * The core provides a single interrupt for both INTx/MSI messages.
99*4882a593Smuzhiyun 	 * So we'll read both INTx and MSI status
100*4882a593Smuzhiyun 	 */
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	chained_irq_enter(chip, desc);
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	/* read INTx status */
105*4882a593Smuzhiyun 	val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
106*4882a593Smuzhiyun 	mask = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
107*4882a593Smuzhiyun 	intr_status = val & mask;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	/* Handle INTx */
110*4882a593Smuzhiyun 	if (intr_status & PAB_INTP_INTX_MASK) {
111*4882a593Smuzhiyun 		shifted_status = mobiveil_csr_readl(pcie,
112*4882a593Smuzhiyun 						    PAB_INTP_AMBA_MISC_STAT);
113*4882a593Smuzhiyun 		shifted_status &= PAB_INTP_INTX_MASK;
114*4882a593Smuzhiyun 		shifted_status >>= PAB_INTX_START;
115*4882a593Smuzhiyun 		do {
116*4882a593Smuzhiyun 			for_each_set_bit(bit, &shifted_status, PCI_NUM_INTX) {
117*4882a593Smuzhiyun 				virq = irq_find_mapping(rp->intx_domain,
118*4882a593Smuzhiyun 							bit + 1);
119*4882a593Smuzhiyun 				if (virq)
120*4882a593Smuzhiyun 					generic_handle_irq(virq);
121*4882a593Smuzhiyun 				else
122*4882a593Smuzhiyun 					dev_err_ratelimited(dev, "unexpected IRQ, INT%d\n",
123*4882a593Smuzhiyun 							    bit);
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 				/* clear interrupt handled */
126*4882a593Smuzhiyun 				mobiveil_csr_writel(pcie,
127*4882a593Smuzhiyun 						    1 << (PAB_INTX_START + bit),
128*4882a593Smuzhiyun 						    PAB_INTP_AMBA_MISC_STAT);
129*4882a593Smuzhiyun 			}
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 			shifted_status = mobiveil_csr_readl(pcie,
132*4882a593Smuzhiyun 							    PAB_INTP_AMBA_MISC_STAT);
133*4882a593Smuzhiyun 			shifted_status &= PAB_INTP_INTX_MASK;
134*4882a593Smuzhiyun 			shifted_status >>= PAB_INTX_START;
135*4882a593Smuzhiyun 		} while (shifted_status != 0);
136*4882a593Smuzhiyun 	}
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	/* read extra MSI status register */
139*4882a593Smuzhiyun 	msi_status = readl_relaxed(pcie->apb_csr_base + MSI_STATUS_OFFSET);
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	/* handle MSI interrupts */
142*4882a593Smuzhiyun 	while (msi_status & 1) {
143*4882a593Smuzhiyun 		msi_data = readl_relaxed(pcie->apb_csr_base + MSI_DATA_OFFSET);
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 		/*
146*4882a593Smuzhiyun 		 * MSI_STATUS_OFFSET register gets updated to zero
147*4882a593Smuzhiyun 		 * once we pop not only the MSI data but also address
148*4882a593Smuzhiyun 		 * from MSI hardware FIFO. So keeping these following
149*4882a593Smuzhiyun 		 * two dummy reads.
150*4882a593Smuzhiyun 		 */
151*4882a593Smuzhiyun 		msi_addr_lo = readl_relaxed(pcie->apb_csr_base +
152*4882a593Smuzhiyun 					    MSI_ADDR_L_OFFSET);
153*4882a593Smuzhiyun 		msi_addr_hi = readl_relaxed(pcie->apb_csr_base +
154*4882a593Smuzhiyun 					    MSI_ADDR_H_OFFSET);
155*4882a593Smuzhiyun 		dev_dbg(dev, "MSI registers, data: %08x, addr: %08x:%08x\n",
156*4882a593Smuzhiyun 			msi_data, msi_addr_hi, msi_addr_lo);
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 		virq = irq_find_mapping(msi->dev_domain, msi_data);
159*4882a593Smuzhiyun 		if (virq)
160*4882a593Smuzhiyun 			generic_handle_irq(virq);
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 		msi_status = readl_relaxed(pcie->apb_csr_base +
163*4882a593Smuzhiyun 					   MSI_STATUS_OFFSET);
164*4882a593Smuzhiyun 	}
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	/* Clear the interrupt status */
167*4882a593Smuzhiyun 	mobiveil_csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT);
168*4882a593Smuzhiyun 	chained_irq_exit(chip, desc);
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun 
mobiveil_pcie_parse_dt(struct mobiveil_pcie * pcie)171*4882a593Smuzhiyun static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun 	struct device *dev = &pcie->pdev->dev;
174*4882a593Smuzhiyun 	struct platform_device *pdev = pcie->pdev;
175*4882a593Smuzhiyun 	struct device_node *node = dev->of_node;
176*4882a593Smuzhiyun 	struct mobiveil_root_port *rp = &pcie->rp;
177*4882a593Smuzhiyun 	struct resource *res;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	/* map config resource */
180*4882a593Smuzhiyun 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
181*4882a593Smuzhiyun 					   "config_axi_slave");
182*4882a593Smuzhiyun 	rp->config_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
183*4882a593Smuzhiyun 	if (IS_ERR(rp->config_axi_slave_base))
184*4882a593Smuzhiyun 		return PTR_ERR(rp->config_axi_slave_base);
185*4882a593Smuzhiyun 	rp->ob_io_res = res;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	/* map csr resource */
188*4882a593Smuzhiyun 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
189*4882a593Smuzhiyun 					   "csr_axi_slave");
190*4882a593Smuzhiyun 	pcie->csr_axi_slave_base = devm_pci_remap_cfg_resource(dev, res);
191*4882a593Smuzhiyun 	if (IS_ERR(pcie->csr_axi_slave_base))
192*4882a593Smuzhiyun 		return PTR_ERR(pcie->csr_axi_slave_base);
193*4882a593Smuzhiyun 	pcie->pcie_reg_base = res->start;
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	/* read the number of windows requested */
196*4882a593Smuzhiyun 	if (of_property_read_u32(node, "apio-wins", &pcie->apio_wins))
197*4882a593Smuzhiyun 		pcie->apio_wins = MAX_PIO_WINDOWS;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	if (of_property_read_u32(node, "ppio-wins", &pcie->ppio_wins))
200*4882a593Smuzhiyun 		pcie->ppio_wins = MAX_PIO_WINDOWS;
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	return 0;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun 
mobiveil_pcie_enable_msi(struct mobiveil_pcie * pcie)205*4882a593Smuzhiyun static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun 	phys_addr_t msg_addr = pcie->pcie_reg_base;
208*4882a593Smuzhiyun 	struct mobiveil_msi *msi = &pcie->rp.msi;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	msi->num_of_vectors = PCI_NUM_MSI;
211*4882a593Smuzhiyun 	msi->msi_pages_phys = (phys_addr_t)msg_addr;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	writel_relaxed(lower_32_bits(msg_addr),
214*4882a593Smuzhiyun 		       pcie->apb_csr_base + MSI_BASE_LO_OFFSET);
215*4882a593Smuzhiyun 	writel_relaxed(upper_32_bits(msg_addr),
216*4882a593Smuzhiyun 		       pcie->apb_csr_base + MSI_BASE_HI_OFFSET);
217*4882a593Smuzhiyun 	writel_relaxed(4096, pcie->apb_csr_base + MSI_SIZE_OFFSET);
218*4882a593Smuzhiyun 	writel_relaxed(1, pcie->apb_csr_base + MSI_ENABLE_OFFSET);
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun 
mobiveil_host_init(struct mobiveil_pcie * pcie,bool reinit)221*4882a593Smuzhiyun int mobiveil_host_init(struct mobiveil_pcie *pcie, bool reinit)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun 	struct mobiveil_root_port *rp = &pcie->rp;
224*4882a593Smuzhiyun 	struct pci_host_bridge *bridge = rp->bridge;
225*4882a593Smuzhiyun 	u32 value, pab_ctrl, type;
226*4882a593Smuzhiyun 	struct resource_entry *win;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	pcie->ib_wins_configured = 0;
229*4882a593Smuzhiyun 	pcie->ob_wins_configured = 0;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	if (!reinit) {
232*4882a593Smuzhiyun 		/* setup bus numbers */
233*4882a593Smuzhiyun 		value = mobiveil_csr_readl(pcie, PCI_PRIMARY_BUS);
234*4882a593Smuzhiyun 		value &= 0xff000000;
235*4882a593Smuzhiyun 		value |= 0x00ff0100;
236*4882a593Smuzhiyun 		mobiveil_csr_writel(pcie, value, PCI_PRIMARY_BUS);
237*4882a593Smuzhiyun 	}
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	/*
240*4882a593Smuzhiyun 	 * program Bus Master Enable Bit in Command Register in PAB Config
241*4882a593Smuzhiyun 	 * Space
242*4882a593Smuzhiyun 	 */
243*4882a593Smuzhiyun 	value = mobiveil_csr_readl(pcie, PCI_COMMAND);
244*4882a593Smuzhiyun 	value |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
245*4882a593Smuzhiyun 	mobiveil_csr_writel(pcie, value, PCI_COMMAND);
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	/*
248*4882a593Smuzhiyun 	 * program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL
249*4882a593Smuzhiyun 	 * register
250*4882a593Smuzhiyun 	 */
251*4882a593Smuzhiyun 	pab_ctrl = mobiveil_csr_readl(pcie, PAB_CTRL);
252*4882a593Smuzhiyun 	pab_ctrl |= (1 << AMBA_PIO_ENABLE_SHIFT) | (1 << PEX_PIO_ENABLE_SHIFT);
253*4882a593Smuzhiyun 	mobiveil_csr_writel(pcie, pab_ctrl, PAB_CTRL);
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	/*
256*4882a593Smuzhiyun 	 * program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in
257*4882a593Smuzhiyun 	 * PAB_AXI_PIO_CTRL Register
258*4882a593Smuzhiyun 	 */
259*4882a593Smuzhiyun 	value = mobiveil_csr_readl(pcie, PAB_AXI_PIO_CTRL);
260*4882a593Smuzhiyun 	value |= APIO_EN_MASK;
261*4882a593Smuzhiyun 	mobiveil_csr_writel(pcie, value, PAB_AXI_PIO_CTRL);
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	/* Enable PCIe PIO master */
264*4882a593Smuzhiyun 	value = mobiveil_csr_readl(pcie, PAB_PEX_PIO_CTRL);
265*4882a593Smuzhiyun 	value |= 1 << PIO_ENABLE_SHIFT;
266*4882a593Smuzhiyun 	mobiveil_csr_writel(pcie, value, PAB_PEX_PIO_CTRL);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	/*
269*4882a593Smuzhiyun 	 * we'll program one outbound window for config reads and
270*4882a593Smuzhiyun 	 * another default inbound window for all the upstream traffic
271*4882a593Smuzhiyun 	 * rest of the outbound windows will be configured according to
272*4882a593Smuzhiyun 	 * the "ranges" field defined in device tree
273*4882a593Smuzhiyun 	 */
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	/* config outbound translation window */
276*4882a593Smuzhiyun 	program_ob_windows(pcie, WIN_NUM_0, rp->ob_io_res->start, 0,
277*4882a593Smuzhiyun 			   CFG_WINDOW_TYPE, resource_size(rp->ob_io_res));
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	/* memory inbound translation window */
280*4882a593Smuzhiyun 	program_ib_windows(pcie, WIN_NUM_0, 0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	/* Get the I/O and memory ranges from DT */
283*4882a593Smuzhiyun 	resource_list_for_each_entry(win, &bridge->windows) {
284*4882a593Smuzhiyun 		if (resource_type(win->res) == IORESOURCE_MEM)
285*4882a593Smuzhiyun 			type = MEM_WINDOW_TYPE;
286*4882a593Smuzhiyun 		else if (resource_type(win->res) == IORESOURCE_IO)
287*4882a593Smuzhiyun 			type = IO_WINDOW_TYPE;
288*4882a593Smuzhiyun 		else
289*4882a593Smuzhiyun 			continue;
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 		/* configure outbound translation window */
292*4882a593Smuzhiyun 		program_ob_windows(pcie, pcie->ob_wins_configured,
293*4882a593Smuzhiyun 				   win->res->start,
294*4882a593Smuzhiyun 				   win->res->start - win->offset,
295*4882a593Smuzhiyun 				   type, resource_size(win->res));
296*4882a593Smuzhiyun 	}
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	/* fixup for PCIe class register */
299*4882a593Smuzhiyun 	value = mobiveil_csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS);
300*4882a593Smuzhiyun 	value &= 0xff;
301*4882a593Smuzhiyun 	value |= (PCI_CLASS_BRIDGE_PCI << 16);
302*4882a593Smuzhiyun 	mobiveil_csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	return 0;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun 
mobiveil_mask_intx_irq(struct irq_data * data)307*4882a593Smuzhiyun static void mobiveil_mask_intx_irq(struct irq_data *data)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun 	struct irq_desc *desc = irq_to_desc(data->irq);
310*4882a593Smuzhiyun 	struct mobiveil_pcie *pcie;
311*4882a593Smuzhiyun 	struct mobiveil_root_port *rp;
312*4882a593Smuzhiyun 	unsigned long flags;
313*4882a593Smuzhiyun 	u32 mask, shifted_val;
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	pcie = irq_desc_get_chip_data(desc);
316*4882a593Smuzhiyun 	rp = &pcie->rp;
317*4882a593Smuzhiyun 	mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
318*4882a593Smuzhiyun 	raw_spin_lock_irqsave(&rp->intx_mask_lock, flags);
319*4882a593Smuzhiyun 	shifted_val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
320*4882a593Smuzhiyun 	shifted_val &= ~mask;
321*4882a593Smuzhiyun 	mobiveil_csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
322*4882a593Smuzhiyun 	raw_spin_unlock_irqrestore(&rp->intx_mask_lock, flags);
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun 
mobiveil_unmask_intx_irq(struct irq_data * data)325*4882a593Smuzhiyun static void mobiveil_unmask_intx_irq(struct irq_data *data)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun 	struct irq_desc *desc = irq_to_desc(data->irq);
328*4882a593Smuzhiyun 	struct mobiveil_pcie *pcie;
329*4882a593Smuzhiyun 	struct mobiveil_root_port *rp;
330*4882a593Smuzhiyun 	unsigned long flags;
331*4882a593Smuzhiyun 	u32 shifted_val, mask;
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	pcie = irq_desc_get_chip_data(desc);
334*4882a593Smuzhiyun 	rp = &pcie->rp;
335*4882a593Smuzhiyun 	mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
336*4882a593Smuzhiyun 	raw_spin_lock_irqsave(&rp->intx_mask_lock, flags);
337*4882a593Smuzhiyun 	shifted_val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
338*4882a593Smuzhiyun 	shifted_val |= mask;
339*4882a593Smuzhiyun 	mobiveil_csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
340*4882a593Smuzhiyun 	raw_spin_unlock_irqrestore(&rp->intx_mask_lock, flags);
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun static struct irq_chip intx_irq_chip = {
344*4882a593Smuzhiyun 	.name = "mobiveil_pcie:intx",
345*4882a593Smuzhiyun 	.irq_enable = mobiveil_unmask_intx_irq,
346*4882a593Smuzhiyun 	.irq_disable = mobiveil_mask_intx_irq,
347*4882a593Smuzhiyun 	.irq_mask = mobiveil_mask_intx_irq,
348*4882a593Smuzhiyun 	.irq_unmask = mobiveil_unmask_intx_irq,
349*4882a593Smuzhiyun };
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun /* routine to setup the INTx related data */
mobiveil_pcie_intx_map(struct irq_domain * domain,unsigned int irq,irq_hw_number_t hwirq)352*4882a593Smuzhiyun static int mobiveil_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
353*4882a593Smuzhiyun 				  irq_hw_number_t hwirq)
354*4882a593Smuzhiyun {
355*4882a593Smuzhiyun 	irq_set_chip_and_handler(irq, &intx_irq_chip, handle_level_irq);
356*4882a593Smuzhiyun 	irq_set_chip_data(irq, domain->host_data);
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	return 0;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun /* INTx domain operations structure */
362*4882a593Smuzhiyun static const struct irq_domain_ops intx_domain_ops = {
363*4882a593Smuzhiyun 	.map = mobiveil_pcie_intx_map,
364*4882a593Smuzhiyun };
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun static struct irq_chip mobiveil_msi_irq_chip = {
367*4882a593Smuzhiyun 	.name = "Mobiveil PCIe MSI",
368*4882a593Smuzhiyun 	.irq_mask = pci_msi_mask_irq,
369*4882a593Smuzhiyun 	.irq_unmask = pci_msi_unmask_irq,
370*4882a593Smuzhiyun };
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun static struct msi_domain_info mobiveil_msi_domain_info = {
373*4882a593Smuzhiyun 	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
374*4882a593Smuzhiyun 		   MSI_FLAG_PCI_MSIX),
375*4882a593Smuzhiyun 	.chip	= &mobiveil_msi_irq_chip,
376*4882a593Smuzhiyun };
377*4882a593Smuzhiyun 
mobiveil_compose_msi_msg(struct irq_data * data,struct msi_msg * msg)378*4882a593Smuzhiyun static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun 	struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(data);
381*4882a593Smuzhiyun 	phys_addr_t addr = pcie->pcie_reg_base + (data->hwirq * sizeof(int));
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	msg->address_lo = lower_32_bits(addr);
384*4882a593Smuzhiyun 	msg->address_hi = upper_32_bits(addr);
385*4882a593Smuzhiyun 	msg->data = data->hwirq;
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	dev_dbg(&pcie->pdev->dev, "msi#%d address_hi %#x address_lo %#x\n",
388*4882a593Smuzhiyun 		(int)data->hwirq, msg->address_hi, msg->address_lo);
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun 
mobiveil_msi_set_affinity(struct irq_data * irq_data,const struct cpumask * mask,bool force)391*4882a593Smuzhiyun static int mobiveil_msi_set_affinity(struct irq_data *irq_data,
392*4882a593Smuzhiyun 				     const struct cpumask *mask, bool force)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun 	return -EINVAL;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun static struct irq_chip mobiveil_msi_bottom_irq_chip = {
398*4882a593Smuzhiyun 	.name			= "Mobiveil MSI",
399*4882a593Smuzhiyun 	.irq_compose_msi_msg	= mobiveil_compose_msi_msg,
400*4882a593Smuzhiyun 	.irq_set_affinity	= mobiveil_msi_set_affinity,
401*4882a593Smuzhiyun };
402*4882a593Smuzhiyun 
mobiveil_irq_msi_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)403*4882a593Smuzhiyun static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain,
404*4882a593Smuzhiyun 					 unsigned int virq,
405*4882a593Smuzhiyun 					 unsigned int nr_irqs, void *args)
406*4882a593Smuzhiyun {
407*4882a593Smuzhiyun 	struct mobiveil_pcie *pcie = domain->host_data;
408*4882a593Smuzhiyun 	struct mobiveil_msi *msi = &pcie->rp.msi;
409*4882a593Smuzhiyun 	unsigned long bit;
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	WARN_ON(nr_irqs != 1);
412*4882a593Smuzhiyun 	mutex_lock(&msi->lock);
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	bit = find_first_zero_bit(msi->msi_irq_in_use, msi->num_of_vectors);
415*4882a593Smuzhiyun 	if (bit >= msi->num_of_vectors) {
416*4882a593Smuzhiyun 		mutex_unlock(&msi->lock);
417*4882a593Smuzhiyun 		return -ENOSPC;
418*4882a593Smuzhiyun 	}
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	set_bit(bit, msi->msi_irq_in_use);
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	mutex_unlock(&msi->lock);
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	irq_domain_set_info(domain, virq, bit, &mobiveil_msi_bottom_irq_chip,
425*4882a593Smuzhiyun 			    domain->host_data, handle_level_irq, NULL, NULL);
426*4882a593Smuzhiyun 	return 0;
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun 
mobiveil_irq_msi_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)429*4882a593Smuzhiyun static void mobiveil_irq_msi_domain_free(struct irq_domain *domain,
430*4882a593Smuzhiyun 					 unsigned int virq,
431*4882a593Smuzhiyun 					 unsigned int nr_irqs)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
434*4882a593Smuzhiyun 	struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(d);
435*4882a593Smuzhiyun 	struct mobiveil_msi *msi = &pcie->rp.msi;
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	mutex_lock(&msi->lock);
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	if (!test_bit(d->hwirq, msi->msi_irq_in_use))
440*4882a593Smuzhiyun 		dev_err(&pcie->pdev->dev, "trying to free unused MSI#%lu\n",
441*4882a593Smuzhiyun 			d->hwirq);
442*4882a593Smuzhiyun 	else
443*4882a593Smuzhiyun 		__clear_bit(d->hwirq, msi->msi_irq_in_use);
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	mutex_unlock(&msi->lock);
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun static const struct irq_domain_ops msi_domain_ops = {
448*4882a593Smuzhiyun 	.alloc	= mobiveil_irq_msi_domain_alloc,
449*4882a593Smuzhiyun 	.free	= mobiveil_irq_msi_domain_free,
450*4882a593Smuzhiyun };
451*4882a593Smuzhiyun 
mobiveil_allocate_msi_domains(struct mobiveil_pcie * pcie)452*4882a593Smuzhiyun static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun 	struct device *dev = &pcie->pdev->dev;
455*4882a593Smuzhiyun 	struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
456*4882a593Smuzhiyun 	struct mobiveil_msi *msi = &pcie->rp.msi;
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	mutex_init(&msi->lock);
459*4882a593Smuzhiyun 	msi->dev_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
460*4882a593Smuzhiyun 						&msi_domain_ops, pcie);
461*4882a593Smuzhiyun 	if (!msi->dev_domain) {
462*4882a593Smuzhiyun 		dev_err(dev, "failed to create IRQ domain\n");
463*4882a593Smuzhiyun 		return -ENOMEM;
464*4882a593Smuzhiyun 	}
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	msi->msi_domain = pci_msi_create_irq_domain(fwnode,
467*4882a593Smuzhiyun 						    &mobiveil_msi_domain_info,
468*4882a593Smuzhiyun 						    msi->dev_domain);
469*4882a593Smuzhiyun 	if (!msi->msi_domain) {
470*4882a593Smuzhiyun 		dev_err(dev, "failed to create MSI domain\n");
471*4882a593Smuzhiyun 		irq_domain_remove(msi->dev_domain);
472*4882a593Smuzhiyun 		return -ENOMEM;
473*4882a593Smuzhiyun 	}
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	return 0;
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun 
mobiveil_pcie_init_irq_domain(struct mobiveil_pcie * pcie)478*4882a593Smuzhiyun static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
479*4882a593Smuzhiyun {
480*4882a593Smuzhiyun 	struct device *dev = &pcie->pdev->dev;
481*4882a593Smuzhiyun 	struct device_node *node = dev->of_node;
482*4882a593Smuzhiyun 	struct mobiveil_root_port *rp = &pcie->rp;
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	/* setup INTx */
485*4882a593Smuzhiyun 	rp->intx_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
486*4882a593Smuzhiyun 						&intx_domain_ops, pcie);
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	if (!rp->intx_domain) {
489*4882a593Smuzhiyun 		dev_err(dev, "Failed to get a INTx IRQ domain\n");
490*4882a593Smuzhiyun 		return -ENOMEM;
491*4882a593Smuzhiyun 	}
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	raw_spin_lock_init(&rp->intx_mask_lock);
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	/* setup MSI */
496*4882a593Smuzhiyun 	return mobiveil_allocate_msi_domains(pcie);
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun 
mobiveil_pcie_integrated_interrupt_init(struct mobiveil_pcie * pcie)499*4882a593Smuzhiyun static int mobiveil_pcie_integrated_interrupt_init(struct mobiveil_pcie *pcie)
500*4882a593Smuzhiyun {
501*4882a593Smuzhiyun 	struct platform_device *pdev = pcie->pdev;
502*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
503*4882a593Smuzhiyun 	struct mobiveil_root_port *rp = &pcie->rp;
504*4882a593Smuzhiyun 	struct resource *res;
505*4882a593Smuzhiyun 	int ret;
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	/* map MSI config resource */
508*4882a593Smuzhiyun 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb_csr");
509*4882a593Smuzhiyun 	pcie->apb_csr_base = devm_pci_remap_cfg_resource(dev, res);
510*4882a593Smuzhiyun 	if (IS_ERR(pcie->apb_csr_base))
511*4882a593Smuzhiyun 		return PTR_ERR(pcie->apb_csr_base);
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	/* setup MSI hardware registers */
514*4882a593Smuzhiyun 	mobiveil_pcie_enable_msi(pcie);
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	rp->irq = platform_get_irq(pdev, 0);
517*4882a593Smuzhiyun 	if (rp->irq < 0)
518*4882a593Smuzhiyun 		return rp->irq;
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	/* initialize the IRQ domains */
521*4882a593Smuzhiyun 	ret = mobiveil_pcie_init_irq_domain(pcie);
522*4882a593Smuzhiyun 	if (ret) {
523*4882a593Smuzhiyun 		dev_err(dev, "Failed creating IRQ Domain\n");
524*4882a593Smuzhiyun 		return ret;
525*4882a593Smuzhiyun 	}
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	irq_set_chained_handler_and_data(rp->irq, mobiveil_pcie_isr, pcie);
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	/* Enable interrupts */
530*4882a593Smuzhiyun 	mobiveil_csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
531*4882a593Smuzhiyun 			    PAB_INTP_AMBA_MISC_ENB);
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	return 0;
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun 
mobiveil_pcie_interrupt_init(struct mobiveil_pcie * pcie)537*4882a593Smuzhiyun static int mobiveil_pcie_interrupt_init(struct mobiveil_pcie *pcie)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun 	struct mobiveil_root_port *rp = &pcie->rp;
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	if (rp->ops->interrupt_init)
542*4882a593Smuzhiyun 		return rp->ops->interrupt_init(pcie);
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	return mobiveil_pcie_integrated_interrupt_init(pcie);
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun 
mobiveil_pcie_is_bridge(struct mobiveil_pcie * pcie)547*4882a593Smuzhiyun static bool mobiveil_pcie_is_bridge(struct mobiveil_pcie *pcie)
548*4882a593Smuzhiyun {
549*4882a593Smuzhiyun 	u32 header_type;
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	header_type = mobiveil_csr_readb(pcie, PCI_HEADER_TYPE);
552*4882a593Smuzhiyun 	header_type &= 0x7f;
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	return header_type == PCI_HEADER_TYPE_BRIDGE;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun 
mobiveil_pcie_host_probe(struct mobiveil_pcie * pcie)557*4882a593Smuzhiyun int mobiveil_pcie_host_probe(struct mobiveil_pcie *pcie)
558*4882a593Smuzhiyun {
559*4882a593Smuzhiyun 	struct mobiveil_root_port *rp = &pcie->rp;
560*4882a593Smuzhiyun 	struct pci_host_bridge *bridge = rp->bridge;
561*4882a593Smuzhiyun 	struct device *dev = &pcie->pdev->dev;
562*4882a593Smuzhiyun 	int ret;
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	ret = mobiveil_pcie_parse_dt(pcie);
565*4882a593Smuzhiyun 	if (ret) {
566*4882a593Smuzhiyun 		dev_err(dev, "Parsing DT failed, ret: %x\n", ret);
567*4882a593Smuzhiyun 		return ret;
568*4882a593Smuzhiyun 	}
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	if (!mobiveil_pcie_is_bridge(pcie))
571*4882a593Smuzhiyun 		return -ENODEV;
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	/*
574*4882a593Smuzhiyun 	 * configure all inbound and outbound windows and prepare the RC for
575*4882a593Smuzhiyun 	 * config access
576*4882a593Smuzhiyun 	 */
577*4882a593Smuzhiyun 	ret = mobiveil_host_init(pcie, false);
578*4882a593Smuzhiyun 	if (ret) {
579*4882a593Smuzhiyun 		dev_err(dev, "Failed to initialize host\n");
580*4882a593Smuzhiyun 		return ret;
581*4882a593Smuzhiyun 	}
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	ret = mobiveil_pcie_interrupt_init(pcie);
584*4882a593Smuzhiyun 	if (ret) {
585*4882a593Smuzhiyun 		dev_err(dev, "Interrupt init failed\n");
586*4882a593Smuzhiyun 		return ret;
587*4882a593Smuzhiyun 	}
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	/* Initialize bridge */
590*4882a593Smuzhiyun 	bridge->sysdata = pcie;
591*4882a593Smuzhiyun 	bridge->ops = &mobiveil_pcie_ops;
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	ret = mobiveil_bringup_link(pcie);
594*4882a593Smuzhiyun 	if (ret) {
595*4882a593Smuzhiyun 		dev_info(dev, "link bring-up failed\n");
596*4882a593Smuzhiyun 		return ret;
597*4882a593Smuzhiyun 	}
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	return pci_host_probe(bridge);
600*4882a593Smuzhiyun }
601