1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * PCIe driver for Marvell Armada 370 and Armada XP SoCs
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Author: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/kernel.h>
9*4882a593Smuzhiyun #include <linux/pci.h>
10*4882a593Smuzhiyun #include <linux/clk.h>
11*4882a593Smuzhiyun #include <linux/delay.h>
12*4882a593Smuzhiyun #include <linux/gpio.h>
13*4882a593Smuzhiyun #include <linux/init.h>
14*4882a593Smuzhiyun #include <linux/mbus.h>
15*4882a593Smuzhiyun #include <linux/slab.h>
16*4882a593Smuzhiyun #include <linux/platform_device.h>
17*4882a593Smuzhiyun #include <linux/of_address.h>
18*4882a593Smuzhiyun #include <linux/of_irq.h>
19*4882a593Smuzhiyun #include <linux/of_gpio.h>
20*4882a593Smuzhiyun #include <linux/of_pci.h>
21*4882a593Smuzhiyun #include <linux/of_platform.h>
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #include "../pci.h"
24*4882a593Smuzhiyun #include "../pci-bridge-emul.h"
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun /*
27*4882a593Smuzhiyun * PCIe unit register offsets.
28*4882a593Smuzhiyun */
29*4882a593Smuzhiyun #define PCIE_DEV_ID_OFF 0x0000
30*4882a593Smuzhiyun #define PCIE_CMD_OFF 0x0004
31*4882a593Smuzhiyun #define PCIE_DEV_REV_OFF 0x0008
32*4882a593Smuzhiyun #define PCIE_BAR_LO_OFF(n) (0x0010 + ((n) << 3))
33*4882a593Smuzhiyun #define PCIE_BAR_HI_OFF(n) (0x0014 + ((n) << 3))
34*4882a593Smuzhiyun #define PCIE_CAP_PCIEXP 0x0060
35*4882a593Smuzhiyun #define PCIE_HEADER_LOG_4_OFF 0x0128
36*4882a593Smuzhiyun #define PCIE_BAR_CTRL_OFF(n) (0x1804 + (((n) - 1) * 4))
37*4882a593Smuzhiyun #define PCIE_WIN04_CTRL_OFF(n) (0x1820 + ((n) << 4))
38*4882a593Smuzhiyun #define PCIE_WIN04_BASE_OFF(n) (0x1824 + ((n) << 4))
39*4882a593Smuzhiyun #define PCIE_WIN04_REMAP_OFF(n) (0x182c + ((n) << 4))
40*4882a593Smuzhiyun #define PCIE_WIN5_CTRL_OFF 0x1880
41*4882a593Smuzhiyun #define PCIE_WIN5_BASE_OFF 0x1884
42*4882a593Smuzhiyun #define PCIE_WIN5_REMAP_OFF 0x188c
43*4882a593Smuzhiyun #define PCIE_CONF_ADDR_OFF 0x18f8
44*4882a593Smuzhiyun #define PCIE_CONF_ADDR_EN 0x80000000
45*4882a593Smuzhiyun #define PCIE_CONF_REG(r) ((((r) & 0xf00) << 16) | ((r) & 0xfc))
46*4882a593Smuzhiyun #define PCIE_CONF_BUS(b) (((b) & 0xff) << 16)
47*4882a593Smuzhiyun #define PCIE_CONF_DEV(d) (((d) & 0x1f) << 11)
48*4882a593Smuzhiyun #define PCIE_CONF_FUNC(f) (((f) & 0x7) << 8)
49*4882a593Smuzhiyun #define PCIE_CONF_ADDR(bus, devfn, where) \
50*4882a593Smuzhiyun (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \
51*4882a593Smuzhiyun PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where) | \
52*4882a593Smuzhiyun PCIE_CONF_ADDR_EN)
53*4882a593Smuzhiyun #define PCIE_CONF_DATA_OFF 0x18fc
54*4882a593Smuzhiyun #define PCIE_MASK_OFF 0x1910
55*4882a593Smuzhiyun #define PCIE_MASK_ENABLE_INTS 0x0f000000
56*4882a593Smuzhiyun #define PCIE_CTRL_OFF 0x1a00
57*4882a593Smuzhiyun #define PCIE_CTRL_X1_MODE 0x0001
58*4882a593Smuzhiyun #define PCIE_STAT_OFF 0x1a04
59*4882a593Smuzhiyun #define PCIE_STAT_BUS 0xff00
60*4882a593Smuzhiyun #define PCIE_STAT_DEV 0x1f0000
61*4882a593Smuzhiyun #define PCIE_STAT_LINK_DOWN BIT(0)
62*4882a593Smuzhiyun #define PCIE_RC_RTSTA 0x1a14
63*4882a593Smuzhiyun #define PCIE_DEBUG_CTRL 0x1a60
64*4882a593Smuzhiyun #define PCIE_DEBUG_SOFT_RESET BIT(20)
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun struct mvebu_pcie_port;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun /* Structure representing all PCIe interfaces */
69*4882a593Smuzhiyun struct mvebu_pcie {
70*4882a593Smuzhiyun struct platform_device *pdev;
71*4882a593Smuzhiyun struct mvebu_pcie_port *ports;
72*4882a593Smuzhiyun struct resource io;
73*4882a593Smuzhiyun struct resource realio;
74*4882a593Smuzhiyun struct resource mem;
75*4882a593Smuzhiyun struct resource busn;
76*4882a593Smuzhiyun int nports;
77*4882a593Smuzhiyun };
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun struct mvebu_pcie_window {
80*4882a593Smuzhiyun phys_addr_t base;
81*4882a593Smuzhiyun phys_addr_t remap;
82*4882a593Smuzhiyun size_t size;
83*4882a593Smuzhiyun };
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun /* Structure representing one PCIe interface */
86*4882a593Smuzhiyun struct mvebu_pcie_port {
87*4882a593Smuzhiyun char *name;
88*4882a593Smuzhiyun void __iomem *base;
89*4882a593Smuzhiyun u32 port;
90*4882a593Smuzhiyun u32 lane;
91*4882a593Smuzhiyun int devfn;
92*4882a593Smuzhiyun unsigned int mem_target;
93*4882a593Smuzhiyun unsigned int mem_attr;
94*4882a593Smuzhiyun unsigned int io_target;
95*4882a593Smuzhiyun unsigned int io_attr;
96*4882a593Smuzhiyun struct clk *clk;
97*4882a593Smuzhiyun struct gpio_desc *reset_gpio;
98*4882a593Smuzhiyun char *reset_name;
99*4882a593Smuzhiyun struct pci_bridge_emul bridge;
100*4882a593Smuzhiyun struct device_node *dn;
101*4882a593Smuzhiyun struct mvebu_pcie *pcie;
102*4882a593Smuzhiyun struct mvebu_pcie_window memwin;
103*4882a593Smuzhiyun struct mvebu_pcie_window iowin;
104*4882a593Smuzhiyun u32 saved_pcie_stat;
105*4882a593Smuzhiyun struct resource regs;
106*4882a593Smuzhiyun };
107*4882a593Smuzhiyun
mvebu_writel(struct mvebu_pcie_port * port,u32 val,u32 reg)108*4882a593Smuzhiyun static inline void mvebu_writel(struct mvebu_pcie_port *port, u32 val, u32 reg)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun writel(val, port->base + reg);
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
mvebu_readl(struct mvebu_pcie_port * port,u32 reg)113*4882a593Smuzhiyun static inline u32 mvebu_readl(struct mvebu_pcie_port *port, u32 reg)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun return readl(port->base + reg);
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
mvebu_has_ioport(struct mvebu_pcie_port * port)118*4882a593Smuzhiyun static inline bool mvebu_has_ioport(struct mvebu_pcie_port *port)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun return port->io_target != -1 && port->io_attr != -1;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
mvebu_pcie_link_up(struct mvebu_pcie_port * port)123*4882a593Smuzhiyun static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN);
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port * port,int nr)128*4882a593Smuzhiyun static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port *port, int nr)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun u32 stat;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun stat = mvebu_readl(port, PCIE_STAT_OFF);
133*4882a593Smuzhiyun stat &= ~PCIE_STAT_BUS;
134*4882a593Smuzhiyun stat |= nr << 8;
135*4882a593Smuzhiyun mvebu_writel(port, stat, PCIE_STAT_OFF);
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port * port,int nr)138*4882a593Smuzhiyun static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun u32 stat;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun stat = mvebu_readl(port, PCIE_STAT_OFF);
143*4882a593Smuzhiyun stat &= ~PCIE_STAT_DEV;
144*4882a593Smuzhiyun stat |= nr << 16;
145*4882a593Smuzhiyun mvebu_writel(port, stat, PCIE_STAT_OFF);
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun /*
149*4882a593Smuzhiyun * Setup PCIE BARs and Address Decode Wins:
150*4882a593Smuzhiyun * BAR[0] -> internal registers (needed for MSI)
151*4882a593Smuzhiyun * BAR[1] -> covers all DRAM banks
152*4882a593Smuzhiyun * BAR[2] -> Disabled
153*4882a593Smuzhiyun * WIN[0-3] -> DRAM bank[0-3]
154*4882a593Smuzhiyun */
mvebu_pcie_setup_wins(struct mvebu_pcie_port * port)155*4882a593Smuzhiyun static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun const struct mbus_dram_target_info *dram;
158*4882a593Smuzhiyun u32 size;
159*4882a593Smuzhiyun int i;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun dram = mv_mbus_dram_info();
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun /* First, disable and clear BARs and windows. */
164*4882a593Smuzhiyun for (i = 1; i < 3; i++) {
165*4882a593Smuzhiyun mvebu_writel(port, 0, PCIE_BAR_CTRL_OFF(i));
166*4882a593Smuzhiyun mvebu_writel(port, 0, PCIE_BAR_LO_OFF(i));
167*4882a593Smuzhiyun mvebu_writel(port, 0, PCIE_BAR_HI_OFF(i));
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun for (i = 0; i < 5; i++) {
171*4882a593Smuzhiyun mvebu_writel(port, 0, PCIE_WIN04_CTRL_OFF(i));
172*4882a593Smuzhiyun mvebu_writel(port, 0, PCIE_WIN04_BASE_OFF(i));
173*4882a593Smuzhiyun mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun mvebu_writel(port, 0, PCIE_WIN5_CTRL_OFF);
177*4882a593Smuzhiyun mvebu_writel(port, 0, PCIE_WIN5_BASE_OFF);
178*4882a593Smuzhiyun mvebu_writel(port, 0, PCIE_WIN5_REMAP_OFF);
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun /* Setup windows for DDR banks. Count total DDR size on the fly. */
181*4882a593Smuzhiyun size = 0;
182*4882a593Smuzhiyun for (i = 0; i < dram->num_cs; i++) {
183*4882a593Smuzhiyun const struct mbus_dram_window *cs = dram->cs + i;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun mvebu_writel(port, cs->base & 0xffff0000,
186*4882a593Smuzhiyun PCIE_WIN04_BASE_OFF(i));
187*4882a593Smuzhiyun mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
188*4882a593Smuzhiyun mvebu_writel(port,
189*4882a593Smuzhiyun ((cs->size - 1) & 0xffff0000) |
190*4882a593Smuzhiyun (cs->mbus_attr << 8) |
191*4882a593Smuzhiyun (dram->mbus_dram_target_id << 4) | 1,
192*4882a593Smuzhiyun PCIE_WIN04_CTRL_OFF(i));
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun size += cs->size;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun /* Round up 'size' to the nearest power of two. */
198*4882a593Smuzhiyun if ((size & (size - 1)) != 0)
199*4882a593Smuzhiyun size = 1 << fls(size);
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun /* Setup BAR[1] to all DRAM banks. */
202*4882a593Smuzhiyun mvebu_writel(port, dram->cs[0].base, PCIE_BAR_LO_OFF(1));
203*4882a593Smuzhiyun mvebu_writel(port, 0, PCIE_BAR_HI_OFF(1));
204*4882a593Smuzhiyun mvebu_writel(port, ((size - 1) & 0xffff0000) | 1,
205*4882a593Smuzhiyun PCIE_BAR_CTRL_OFF(1));
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /*
208*4882a593Smuzhiyun * Point BAR[0] to the device's internal registers.
209*4882a593Smuzhiyun */
210*4882a593Smuzhiyun mvebu_writel(port, round_down(port->regs.start, SZ_1M), PCIE_BAR_LO_OFF(0));
211*4882a593Smuzhiyun mvebu_writel(port, 0, PCIE_BAR_HI_OFF(0));
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
mvebu_pcie_setup_hw(struct mvebu_pcie_port * port)214*4882a593Smuzhiyun static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun u32 cmd, mask;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun /* Point PCIe unit MBUS decode windows to DRAM space. */
219*4882a593Smuzhiyun mvebu_pcie_setup_wins(port);
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun /* Master + slave enable. */
222*4882a593Smuzhiyun cmd = mvebu_readl(port, PCIE_CMD_OFF);
223*4882a593Smuzhiyun cmd |= PCI_COMMAND_IO;
224*4882a593Smuzhiyun cmd |= PCI_COMMAND_MEMORY;
225*4882a593Smuzhiyun cmd |= PCI_COMMAND_MASTER;
226*4882a593Smuzhiyun mvebu_writel(port, cmd, PCIE_CMD_OFF);
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun /* Enable interrupt lines A-D. */
229*4882a593Smuzhiyun mask = mvebu_readl(port, PCIE_MASK_OFF);
230*4882a593Smuzhiyun mask |= PCIE_MASK_ENABLE_INTS;
231*4882a593Smuzhiyun mvebu_writel(port, mask, PCIE_MASK_OFF);
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
mvebu_pcie_hw_rd_conf(struct mvebu_pcie_port * port,struct pci_bus * bus,u32 devfn,int where,int size,u32 * val)234*4882a593Smuzhiyun static int mvebu_pcie_hw_rd_conf(struct mvebu_pcie_port *port,
235*4882a593Smuzhiyun struct pci_bus *bus,
236*4882a593Smuzhiyun u32 devfn, int where, int size, u32 *val)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun void __iomem *conf_data = port->base + PCIE_CONF_DATA_OFF;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
241*4882a593Smuzhiyun PCIE_CONF_ADDR_OFF);
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun switch (size) {
244*4882a593Smuzhiyun case 1:
245*4882a593Smuzhiyun *val = readb_relaxed(conf_data + (where & 3));
246*4882a593Smuzhiyun break;
247*4882a593Smuzhiyun case 2:
248*4882a593Smuzhiyun *val = readw_relaxed(conf_data + (where & 2));
249*4882a593Smuzhiyun break;
250*4882a593Smuzhiyun case 4:
251*4882a593Smuzhiyun *val = readl_relaxed(conf_data);
252*4882a593Smuzhiyun break;
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun return PCIBIOS_SUCCESSFUL;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port * port,struct pci_bus * bus,u32 devfn,int where,int size,u32 val)258*4882a593Smuzhiyun static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port,
259*4882a593Smuzhiyun struct pci_bus *bus,
260*4882a593Smuzhiyun u32 devfn, int where, int size, u32 val)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun void __iomem *conf_data = port->base + PCIE_CONF_DATA_OFF;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
265*4882a593Smuzhiyun PCIE_CONF_ADDR_OFF);
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun switch (size) {
268*4882a593Smuzhiyun case 1:
269*4882a593Smuzhiyun writeb(val, conf_data + (where & 3));
270*4882a593Smuzhiyun break;
271*4882a593Smuzhiyun case 2:
272*4882a593Smuzhiyun writew(val, conf_data + (where & 2));
273*4882a593Smuzhiyun break;
274*4882a593Smuzhiyun case 4:
275*4882a593Smuzhiyun writel(val, conf_data);
276*4882a593Smuzhiyun break;
277*4882a593Smuzhiyun default:
278*4882a593Smuzhiyun return PCIBIOS_BAD_REGISTER_NUMBER;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun return PCIBIOS_SUCCESSFUL;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun /*
285*4882a593Smuzhiyun * Remove windows, starting from the largest ones to the smallest
286*4882a593Smuzhiyun * ones.
287*4882a593Smuzhiyun */
mvebu_pcie_del_windows(struct mvebu_pcie_port * port,phys_addr_t base,size_t size)288*4882a593Smuzhiyun static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port,
289*4882a593Smuzhiyun phys_addr_t base, size_t size)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun while (size) {
292*4882a593Smuzhiyun size_t sz = 1 << (fls(size) - 1);
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun mvebu_mbus_del_window(base, sz);
295*4882a593Smuzhiyun base += sz;
296*4882a593Smuzhiyun size -= sz;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun /*
301*4882a593Smuzhiyun * MBus windows can only have a power of two size, but PCI BARs do not
302*4882a593Smuzhiyun * have this constraint. Therefore, we have to split the PCI BAR into
303*4882a593Smuzhiyun * areas each having a power of two size. We start from the largest
304*4882a593Smuzhiyun * one (i.e highest order bit set in the size).
305*4882a593Smuzhiyun */
mvebu_pcie_add_windows(struct mvebu_pcie_port * port,unsigned int target,unsigned int attribute,phys_addr_t base,size_t size,phys_addr_t remap)306*4882a593Smuzhiyun static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
307*4882a593Smuzhiyun unsigned int target, unsigned int attribute,
308*4882a593Smuzhiyun phys_addr_t base, size_t size,
309*4882a593Smuzhiyun phys_addr_t remap)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun size_t size_mapped = 0;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun while (size) {
314*4882a593Smuzhiyun size_t sz = 1 << (fls(size) - 1);
315*4882a593Smuzhiyun int ret;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base,
318*4882a593Smuzhiyun sz, remap);
319*4882a593Smuzhiyun if (ret) {
320*4882a593Smuzhiyun phys_addr_t end = base + sz - 1;
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun dev_err(&port->pcie->pdev->dev,
323*4882a593Smuzhiyun "Could not create MBus window at [mem %pa-%pa]: %d\n",
324*4882a593Smuzhiyun &base, &end, ret);
325*4882a593Smuzhiyun mvebu_pcie_del_windows(port, base - size_mapped,
326*4882a593Smuzhiyun size_mapped);
327*4882a593Smuzhiyun return;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun size -= sz;
331*4882a593Smuzhiyun size_mapped += sz;
332*4882a593Smuzhiyun base += sz;
333*4882a593Smuzhiyun if (remap != MVEBU_MBUS_NO_REMAP)
334*4882a593Smuzhiyun remap += sz;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun
mvebu_pcie_set_window(struct mvebu_pcie_port * port,unsigned int target,unsigned int attribute,const struct mvebu_pcie_window * desired,struct mvebu_pcie_window * cur)338*4882a593Smuzhiyun static void mvebu_pcie_set_window(struct mvebu_pcie_port *port,
339*4882a593Smuzhiyun unsigned int target, unsigned int attribute,
340*4882a593Smuzhiyun const struct mvebu_pcie_window *desired,
341*4882a593Smuzhiyun struct mvebu_pcie_window *cur)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun if (desired->base == cur->base && desired->remap == cur->remap &&
344*4882a593Smuzhiyun desired->size == cur->size)
345*4882a593Smuzhiyun return;
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun if (cur->size != 0) {
348*4882a593Smuzhiyun mvebu_pcie_del_windows(port, cur->base, cur->size);
349*4882a593Smuzhiyun cur->size = 0;
350*4882a593Smuzhiyun cur->base = 0;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun /*
353*4882a593Smuzhiyun * If something tries to change the window while it is enabled
354*4882a593Smuzhiyun * the change will not be done atomically. That would be
355*4882a593Smuzhiyun * difficult to do in the general case.
356*4882a593Smuzhiyun */
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun if (desired->size == 0)
360*4882a593Smuzhiyun return;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun mvebu_pcie_add_windows(port, target, attribute, desired->base,
363*4882a593Smuzhiyun desired->size, desired->remap);
364*4882a593Smuzhiyun *cur = *desired;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port * port)367*4882a593Smuzhiyun static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun struct mvebu_pcie_window desired = {};
370*4882a593Smuzhiyun struct pci_bridge_emul_conf *conf = &port->bridge.conf;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun /* Are the new iobase/iolimit values invalid? */
373*4882a593Smuzhiyun if (conf->iolimit < conf->iobase ||
374*4882a593Smuzhiyun conf->iolimitupper < conf->iobaseupper ||
375*4882a593Smuzhiyun !(conf->command & PCI_COMMAND_IO)) {
376*4882a593Smuzhiyun mvebu_pcie_set_window(port, port->io_target, port->io_attr,
377*4882a593Smuzhiyun &desired, &port->iowin);
378*4882a593Smuzhiyun return;
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun if (!mvebu_has_ioport(port)) {
382*4882a593Smuzhiyun dev_WARN(&port->pcie->pdev->dev,
383*4882a593Smuzhiyun "Attempt to set IO when IO is disabled\n");
384*4882a593Smuzhiyun return;
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun /*
388*4882a593Smuzhiyun * We read the PCI-to-PCI bridge emulated registers, and
389*4882a593Smuzhiyun * calculate the base address and size of the address decoding
390*4882a593Smuzhiyun * window to setup, according to the PCI-to-PCI bridge
391*4882a593Smuzhiyun * specifications. iobase is the bus address, port->iowin_base
392*4882a593Smuzhiyun * is the CPU address.
393*4882a593Smuzhiyun */
394*4882a593Smuzhiyun desired.remap = ((conf->iobase & 0xF0) << 8) |
395*4882a593Smuzhiyun (conf->iobaseupper << 16);
396*4882a593Smuzhiyun desired.base = port->pcie->io.start + desired.remap;
397*4882a593Smuzhiyun desired.size = ((0xFFF | ((conf->iolimit & 0xF0) << 8) |
398*4882a593Smuzhiyun (conf->iolimitupper << 16)) -
399*4882a593Smuzhiyun desired.remap) +
400*4882a593Smuzhiyun 1;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired,
403*4882a593Smuzhiyun &port->iowin);
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
mvebu_pcie_handle_membase_change(struct mvebu_pcie_port * port)406*4882a593Smuzhiyun static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun struct mvebu_pcie_window desired = {.remap = MVEBU_MBUS_NO_REMAP};
409*4882a593Smuzhiyun struct pci_bridge_emul_conf *conf = &port->bridge.conf;
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun /* Are the new membase/memlimit values invalid? */
412*4882a593Smuzhiyun if (conf->memlimit < conf->membase ||
413*4882a593Smuzhiyun !(conf->command & PCI_COMMAND_MEMORY)) {
414*4882a593Smuzhiyun mvebu_pcie_set_window(port, port->mem_target, port->mem_attr,
415*4882a593Smuzhiyun &desired, &port->memwin);
416*4882a593Smuzhiyun return;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun /*
420*4882a593Smuzhiyun * We read the PCI-to-PCI bridge emulated registers, and
421*4882a593Smuzhiyun * calculate the base address and size of the address decoding
422*4882a593Smuzhiyun * window to setup, according to the PCI-to-PCI bridge
423*4882a593Smuzhiyun * specifications.
424*4882a593Smuzhiyun */
425*4882a593Smuzhiyun desired.base = ((conf->membase & 0xFFF0) << 16);
426*4882a593Smuzhiyun desired.size = (((conf->memlimit & 0xFFF0) << 16) | 0xFFFFF) -
427*4882a593Smuzhiyun desired.base + 1;
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired,
430*4882a593Smuzhiyun &port->memwin);
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun static pci_bridge_emul_read_status_t
mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul * bridge,int reg,u32 * value)434*4882a593Smuzhiyun mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
435*4882a593Smuzhiyun int reg, u32 *value)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun struct mvebu_pcie_port *port = bridge->data;
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun switch (reg) {
440*4882a593Smuzhiyun case PCI_EXP_DEVCAP:
441*4882a593Smuzhiyun *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP);
442*4882a593Smuzhiyun break;
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun case PCI_EXP_DEVCTL:
445*4882a593Smuzhiyun *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL) &
446*4882a593Smuzhiyun ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE |
447*4882a593Smuzhiyun PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE);
448*4882a593Smuzhiyun break;
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun case PCI_EXP_LNKCAP:
451*4882a593Smuzhiyun /*
452*4882a593Smuzhiyun * PCIe requires the clock power management capability to be
453*4882a593Smuzhiyun * hard-wired to zero for downstream ports
454*4882a593Smuzhiyun */
455*4882a593Smuzhiyun *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP) &
456*4882a593Smuzhiyun ~PCI_EXP_LNKCAP_CLKPM;
457*4882a593Smuzhiyun break;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun case PCI_EXP_LNKCTL:
460*4882a593Smuzhiyun *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
461*4882a593Smuzhiyun break;
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun case PCI_EXP_SLTCTL:
464*4882a593Smuzhiyun *value = PCI_EXP_SLTSTA_PDS << 16;
465*4882a593Smuzhiyun break;
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun case PCI_EXP_RTSTA:
468*4882a593Smuzhiyun *value = mvebu_readl(port, PCIE_RC_RTSTA);
469*4882a593Smuzhiyun break;
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun default:
472*4882a593Smuzhiyun return PCI_BRIDGE_EMUL_NOT_HANDLED;
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun return PCI_BRIDGE_EMUL_HANDLED;
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun static void
mvebu_pci_bridge_emul_base_conf_write(struct pci_bridge_emul * bridge,int reg,u32 old,u32 new,u32 mask)479*4882a593Smuzhiyun mvebu_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
480*4882a593Smuzhiyun int reg, u32 old, u32 new, u32 mask)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun struct mvebu_pcie_port *port = bridge->data;
483*4882a593Smuzhiyun struct pci_bridge_emul_conf *conf = &bridge->conf;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun switch (reg) {
486*4882a593Smuzhiyun case PCI_COMMAND:
487*4882a593Smuzhiyun {
488*4882a593Smuzhiyun if (!mvebu_has_ioport(port))
489*4882a593Smuzhiyun conf->command &= ~PCI_COMMAND_IO;
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun if ((old ^ new) & PCI_COMMAND_IO)
492*4882a593Smuzhiyun mvebu_pcie_handle_iobase_change(port);
493*4882a593Smuzhiyun if ((old ^ new) & PCI_COMMAND_MEMORY)
494*4882a593Smuzhiyun mvebu_pcie_handle_membase_change(port);
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun break;
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun case PCI_IO_BASE:
500*4882a593Smuzhiyun /*
501*4882a593Smuzhiyun * We keep bit 1 set, it is a read-only bit that
502*4882a593Smuzhiyun * indicates we support 32 bits addressing for the
503*4882a593Smuzhiyun * I/O
504*4882a593Smuzhiyun */
505*4882a593Smuzhiyun conf->iobase |= PCI_IO_RANGE_TYPE_32;
506*4882a593Smuzhiyun conf->iolimit |= PCI_IO_RANGE_TYPE_32;
507*4882a593Smuzhiyun mvebu_pcie_handle_iobase_change(port);
508*4882a593Smuzhiyun break;
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun case PCI_MEMORY_BASE:
511*4882a593Smuzhiyun mvebu_pcie_handle_membase_change(port);
512*4882a593Smuzhiyun break;
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun case PCI_IO_BASE_UPPER16:
515*4882a593Smuzhiyun mvebu_pcie_handle_iobase_change(port);
516*4882a593Smuzhiyun break;
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun case PCI_PRIMARY_BUS:
519*4882a593Smuzhiyun mvebu_pcie_set_local_bus_nr(port, conf->secondary_bus);
520*4882a593Smuzhiyun break;
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun default:
523*4882a593Smuzhiyun break;
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun static void
mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul * bridge,int reg,u32 old,u32 new,u32 mask)528*4882a593Smuzhiyun mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
529*4882a593Smuzhiyun int reg, u32 old, u32 new, u32 mask)
530*4882a593Smuzhiyun {
531*4882a593Smuzhiyun struct mvebu_pcie_port *port = bridge->data;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun switch (reg) {
534*4882a593Smuzhiyun case PCI_EXP_DEVCTL:
535*4882a593Smuzhiyun /*
536*4882a593Smuzhiyun * Armada370 data says these bits must always
537*4882a593Smuzhiyun * be zero when in root complex mode.
538*4882a593Smuzhiyun */
539*4882a593Smuzhiyun new &= ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE |
540*4882a593Smuzhiyun PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE);
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL);
543*4882a593Smuzhiyun break;
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun case PCI_EXP_LNKCTL:
546*4882a593Smuzhiyun /*
547*4882a593Smuzhiyun * If we don't support CLKREQ, we must ensure that the
548*4882a593Smuzhiyun * CLKREQ enable bit always reads zero. Since we haven't
549*4882a593Smuzhiyun * had this capability, and it's dependent on board wiring,
550*4882a593Smuzhiyun * disable it for the time being.
551*4882a593Smuzhiyun */
552*4882a593Smuzhiyun new &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
555*4882a593Smuzhiyun break;
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun case PCI_EXP_RTSTA:
558*4882a593Smuzhiyun mvebu_writel(port, new, PCIE_RC_RTSTA);
559*4882a593Smuzhiyun break;
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun static struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = {
564*4882a593Smuzhiyun .write_base = mvebu_pci_bridge_emul_base_conf_write,
565*4882a593Smuzhiyun .read_pcie = mvebu_pci_bridge_emul_pcie_conf_read,
566*4882a593Smuzhiyun .write_pcie = mvebu_pci_bridge_emul_pcie_conf_write,
567*4882a593Smuzhiyun };
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun /*
570*4882a593Smuzhiyun * Initialize the configuration space of the PCI-to-PCI bridge
571*4882a593Smuzhiyun * associated with the given PCIe interface.
572*4882a593Smuzhiyun */
mvebu_pci_bridge_emul_init(struct mvebu_pcie_port * port)573*4882a593Smuzhiyun static void mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port)
574*4882a593Smuzhiyun {
575*4882a593Smuzhiyun struct pci_bridge_emul *bridge = &port->bridge;
576*4882a593Smuzhiyun u32 pcie_cap = mvebu_readl(port, PCIE_CAP_PCIEXP);
577*4882a593Smuzhiyun u8 pcie_cap_ver = ((pcie_cap >> 16) & PCI_EXP_FLAGS_VERS);
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun bridge->conf.vendor = PCI_VENDOR_ID_MARVELL;
580*4882a593Smuzhiyun bridge->conf.device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16;
581*4882a593Smuzhiyun bridge->conf.class_revision =
582*4882a593Smuzhiyun mvebu_readl(port, PCIE_DEV_REV_OFF) & 0xff;
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun if (mvebu_has_ioport(port)) {
585*4882a593Smuzhiyun /* We support 32 bits I/O addressing */
586*4882a593Smuzhiyun bridge->conf.iobase = PCI_IO_RANGE_TYPE_32;
587*4882a593Smuzhiyun bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32;
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun /*
591*4882a593Smuzhiyun * Older mvebu hardware provides PCIe Capability structure only in
592*4882a593Smuzhiyun * version 1. New hardware provides it in version 2.
593*4882a593Smuzhiyun */
594*4882a593Smuzhiyun bridge->pcie_conf.cap = cpu_to_le16(pcie_cap_ver);
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun bridge->has_pcie = true;
597*4882a593Smuzhiyun bridge->data = port;
598*4882a593Smuzhiyun bridge->ops = &mvebu_pci_bridge_emul_ops;
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun pci_bridge_emul_init(bridge, PCI_BRIDGE_EMUL_NO_PREFETCHABLE_BAR);
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun
sys_to_pcie(struct pci_sys_data * sys)603*4882a593Smuzhiyun static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys)
604*4882a593Smuzhiyun {
605*4882a593Smuzhiyun return sys->private_data;
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun
mvebu_pcie_find_port(struct mvebu_pcie * pcie,struct pci_bus * bus,int devfn)608*4882a593Smuzhiyun static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
609*4882a593Smuzhiyun struct pci_bus *bus,
610*4882a593Smuzhiyun int devfn)
611*4882a593Smuzhiyun {
612*4882a593Smuzhiyun int i;
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun for (i = 0; i < pcie->nports; i++) {
615*4882a593Smuzhiyun struct mvebu_pcie_port *port = &pcie->ports[i];
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun if (bus->number == 0 && port->devfn == devfn)
618*4882a593Smuzhiyun return port;
619*4882a593Smuzhiyun if (bus->number != 0 &&
620*4882a593Smuzhiyun bus->number >= port->bridge.conf.secondary_bus &&
621*4882a593Smuzhiyun bus->number <= port->bridge.conf.subordinate_bus)
622*4882a593Smuzhiyun return port;
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun return NULL;
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun /* PCI configuration space write function */
mvebu_pcie_wr_conf(struct pci_bus * bus,u32 devfn,int where,int size,u32 val)629*4882a593Smuzhiyun static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
630*4882a593Smuzhiyun int where, int size, u32 val)
631*4882a593Smuzhiyun {
632*4882a593Smuzhiyun struct mvebu_pcie *pcie = bus->sysdata;
633*4882a593Smuzhiyun struct mvebu_pcie_port *port;
634*4882a593Smuzhiyun int ret;
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun port = mvebu_pcie_find_port(pcie, bus, devfn);
637*4882a593Smuzhiyun if (!port)
638*4882a593Smuzhiyun return PCIBIOS_DEVICE_NOT_FOUND;
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun /* Access the emulated PCI-to-PCI bridge */
641*4882a593Smuzhiyun if (bus->number == 0)
642*4882a593Smuzhiyun return pci_bridge_emul_conf_write(&port->bridge, where,
643*4882a593Smuzhiyun size, val);
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun if (!mvebu_pcie_link_up(port))
646*4882a593Smuzhiyun return PCIBIOS_DEVICE_NOT_FOUND;
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun /* Access the real PCIe interface */
649*4882a593Smuzhiyun ret = mvebu_pcie_hw_wr_conf(port, bus, devfn,
650*4882a593Smuzhiyun where, size, val);
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun return ret;
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun /* PCI configuration space read function */
mvebu_pcie_rd_conf(struct pci_bus * bus,u32 devfn,int where,int size,u32 * val)656*4882a593Smuzhiyun static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
657*4882a593Smuzhiyun int size, u32 *val)
658*4882a593Smuzhiyun {
659*4882a593Smuzhiyun struct mvebu_pcie *pcie = bus->sysdata;
660*4882a593Smuzhiyun struct mvebu_pcie_port *port;
661*4882a593Smuzhiyun int ret;
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun port = mvebu_pcie_find_port(pcie, bus, devfn);
664*4882a593Smuzhiyun if (!port) {
665*4882a593Smuzhiyun *val = 0xffffffff;
666*4882a593Smuzhiyun return PCIBIOS_DEVICE_NOT_FOUND;
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun /* Access the emulated PCI-to-PCI bridge */
670*4882a593Smuzhiyun if (bus->number == 0)
671*4882a593Smuzhiyun return pci_bridge_emul_conf_read(&port->bridge, where,
672*4882a593Smuzhiyun size, val);
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun if (!mvebu_pcie_link_up(port)) {
675*4882a593Smuzhiyun *val = 0xffffffff;
676*4882a593Smuzhiyun return PCIBIOS_DEVICE_NOT_FOUND;
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun /* Access the real PCIe interface */
680*4882a593Smuzhiyun ret = mvebu_pcie_hw_rd_conf(port, bus, devfn,
681*4882a593Smuzhiyun where, size, val);
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun return ret;
684*4882a593Smuzhiyun }
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun static struct pci_ops mvebu_pcie_ops = {
687*4882a593Smuzhiyun .read = mvebu_pcie_rd_conf,
688*4882a593Smuzhiyun .write = mvebu_pcie_wr_conf,
689*4882a593Smuzhiyun };
690*4882a593Smuzhiyun
mvebu_pcie_align_resource(struct pci_dev * dev,const struct resource * res,resource_size_t start,resource_size_t size,resource_size_t align)691*4882a593Smuzhiyun static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
692*4882a593Smuzhiyun const struct resource *res,
693*4882a593Smuzhiyun resource_size_t start,
694*4882a593Smuzhiyun resource_size_t size,
695*4882a593Smuzhiyun resource_size_t align)
696*4882a593Smuzhiyun {
697*4882a593Smuzhiyun if (dev->bus->number != 0)
698*4882a593Smuzhiyun return start;
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun /*
701*4882a593Smuzhiyun * On the PCI-to-PCI bridge side, the I/O windows must have at
702*4882a593Smuzhiyun * least a 64 KB size and the memory windows must have at
703*4882a593Smuzhiyun * least a 1 MB size. Moreover, MBus windows need to have a
704*4882a593Smuzhiyun * base address aligned on their size, and their size must be
705*4882a593Smuzhiyun * a power of two. This means that if the BAR doesn't have a
706*4882a593Smuzhiyun * power of two size, several MBus windows will actually be
707*4882a593Smuzhiyun * created. We need to ensure that the biggest MBus window
708*4882a593Smuzhiyun * (which will be the first one) is aligned on its size, which
709*4882a593Smuzhiyun * explains the rounddown_pow_of_two() being done here.
710*4882a593Smuzhiyun */
711*4882a593Smuzhiyun if (res->flags & IORESOURCE_IO)
712*4882a593Smuzhiyun return round_up(start, max_t(resource_size_t, SZ_64K,
713*4882a593Smuzhiyun rounddown_pow_of_two(size)));
714*4882a593Smuzhiyun else if (res->flags & IORESOURCE_MEM)
715*4882a593Smuzhiyun return round_up(start, max_t(resource_size_t, SZ_1M,
716*4882a593Smuzhiyun rounddown_pow_of_two(size)));
717*4882a593Smuzhiyun else
718*4882a593Smuzhiyun return start;
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun
mvebu_pcie_map_registers(struct platform_device * pdev,struct device_node * np,struct mvebu_pcie_port * port)721*4882a593Smuzhiyun static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev,
722*4882a593Smuzhiyun struct device_node *np,
723*4882a593Smuzhiyun struct mvebu_pcie_port *port)
724*4882a593Smuzhiyun {
725*4882a593Smuzhiyun int ret = 0;
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun ret = of_address_to_resource(np, 0, &port->regs);
728*4882a593Smuzhiyun if (ret)
729*4882a593Smuzhiyun return (void __iomem *)ERR_PTR(ret);
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun return devm_ioremap_resource(&pdev->dev, &port->regs);
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun #define DT_FLAGS_TO_TYPE(flags) (((flags) >> 24) & 0x03)
735*4882a593Smuzhiyun #define DT_TYPE_IO 0x1
736*4882a593Smuzhiyun #define DT_TYPE_MEM32 0x2
737*4882a593Smuzhiyun #define DT_CPUADDR_TO_TARGET(cpuaddr) (((cpuaddr) >> 56) & 0xFF)
738*4882a593Smuzhiyun #define DT_CPUADDR_TO_ATTR(cpuaddr) (((cpuaddr) >> 48) & 0xFF)
739*4882a593Smuzhiyun
mvebu_get_tgt_attr(struct device_node * np,int devfn,unsigned long type,unsigned int * tgt,unsigned int * attr)740*4882a593Smuzhiyun static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
741*4882a593Smuzhiyun unsigned long type,
742*4882a593Smuzhiyun unsigned int *tgt,
743*4882a593Smuzhiyun unsigned int *attr)
744*4882a593Smuzhiyun {
745*4882a593Smuzhiyun const int na = 3, ns = 2;
746*4882a593Smuzhiyun const __be32 *range;
747*4882a593Smuzhiyun int rlen, nranges, rangesz, pna, i;
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun *tgt = -1;
750*4882a593Smuzhiyun *attr = -1;
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun range = of_get_property(np, "ranges", &rlen);
753*4882a593Smuzhiyun if (!range)
754*4882a593Smuzhiyun return -EINVAL;
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun pna = of_n_addr_cells(np);
757*4882a593Smuzhiyun rangesz = pna + na + ns;
758*4882a593Smuzhiyun nranges = rlen / sizeof(__be32) / rangesz;
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun for (i = 0; i < nranges; i++, range += rangesz) {
761*4882a593Smuzhiyun u32 flags = of_read_number(range, 1);
762*4882a593Smuzhiyun u32 slot = of_read_number(range + 1, 1);
763*4882a593Smuzhiyun u64 cpuaddr = of_read_number(range + na, pna);
764*4882a593Smuzhiyun unsigned long rtype;
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_IO)
767*4882a593Smuzhiyun rtype = IORESOURCE_IO;
768*4882a593Smuzhiyun else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32)
769*4882a593Smuzhiyun rtype = IORESOURCE_MEM;
770*4882a593Smuzhiyun else
771*4882a593Smuzhiyun continue;
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun if (slot == PCI_SLOT(devfn) && type == rtype) {
774*4882a593Smuzhiyun *tgt = DT_CPUADDR_TO_TARGET(cpuaddr);
775*4882a593Smuzhiyun *attr = DT_CPUADDR_TO_ATTR(cpuaddr);
776*4882a593Smuzhiyun return 0;
777*4882a593Smuzhiyun }
778*4882a593Smuzhiyun }
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun return -ENOENT;
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
mvebu_pcie_suspend(struct device * dev)784*4882a593Smuzhiyun static int mvebu_pcie_suspend(struct device *dev)
785*4882a593Smuzhiyun {
786*4882a593Smuzhiyun struct mvebu_pcie *pcie;
787*4882a593Smuzhiyun int i;
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun pcie = dev_get_drvdata(dev);
790*4882a593Smuzhiyun for (i = 0; i < pcie->nports; i++) {
791*4882a593Smuzhiyun struct mvebu_pcie_port *port = pcie->ports + i;
792*4882a593Smuzhiyun port->saved_pcie_stat = mvebu_readl(port, PCIE_STAT_OFF);
793*4882a593Smuzhiyun }
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun return 0;
796*4882a593Smuzhiyun }
797*4882a593Smuzhiyun
mvebu_pcie_resume(struct device * dev)798*4882a593Smuzhiyun static int mvebu_pcie_resume(struct device *dev)
799*4882a593Smuzhiyun {
800*4882a593Smuzhiyun struct mvebu_pcie *pcie;
801*4882a593Smuzhiyun int i;
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun pcie = dev_get_drvdata(dev);
804*4882a593Smuzhiyun for (i = 0; i < pcie->nports; i++) {
805*4882a593Smuzhiyun struct mvebu_pcie_port *port = pcie->ports + i;
806*4882a593Smuzhiyun mvebu_writel(port, port->saved_pcie_stat, PCIE_STAT_OFF);
807*4882a593Smuzhiyun mvebu_pcie_setup_hw(port);
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun return 0;
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun #endif
813*4882a593Smuzhiyun
mvebu_pcie_port_clk_put(void * data)814*4882a593Smuzhiyun static void mvebu_pcie_port_clk_put(void *data)
815*4882a593Smuzhiyun {
816*4882a593Smuzhiyun struct mvebu_pcie_port *port = data;
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun clk_put(port->clk);
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun
mvebu_pcie_parse_port(struct mvebu_pcie * pcie,struct mvebu_pcie_port * port,struct device_node * child)821*4882a593Smuzhiyun static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie,
822*4882a593Smuzhiyun struct mvebu_pcie_port *port, struct device_node *child)
823*4882a593Smuzhiyun {
824*4882a593Smuzhiyun struct device *dev = &pcie->pdev->dev;
825*4882a593Smuzhiyun enum of_gpio_flags flags;
826*4882a593Smuzhiyun int reset_gpio, ret;
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun port->pcie = pcie;
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun if (of_property_read_u32(child, "marvell,pcie-port", &port->port)) {
831*4882a593Smuzhiyun dev_warn(dev, "ignoring %pOF, missing pcie-port property\n",
832*4882a593Smuzhiyun child);
833*4882a593Smuzhiyun goto skip;
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun if (of_property_read_u32(child, "marvell,pcie-lane", &port->lane))
837*4882a593Smuzhiyun port->lane = 0;
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun port->name = devm_kasprintf(dev, GFP_KERNEL, "pcie%d.%d", port->port,
840*4882a593Smuzhiyun port->lane);
841*4882a593Smuzhiyun if (!port->name) {
842*4882a593Smuzhiyun ret = -ENOMEM;
843*4882a593Smuzhiyun goto err;
844*4882a593Smuzhiyun }
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun port->devfn = of_pci_get_devfn(child);
847*4882a593Smuzhiyun if (port->devfn < 0)
848*4882a593Smuzhiyun goto skip;
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun ret = mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_MEM,
851*4882a593Smuzhiyun &port->mem_target, &port->mem_attr);
852*4882a593Smuzhiyun if (ret < 0) {
853*4882a593Smuzhiyun dev_err(dev, "%s: cannot get tgt/attr for mem window\n",
854*4882a593Smuzhiyun port->name);
855*4882a593Smuzhiyun goto skip;
856*4882a593Smuzhiyun }
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun if (resource_size(&pcie->io) != 0) {
859*4882a593Smuzhiyun mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_IO,
860*4882a593Smuzhiyun &port->io_target, &port->io_attr);
861*4882a593Smuzhiyun } else {
862*4882a593Smuzhiyun port->io_target = -1;
863*4882a593Smuzhiyun port->io_attr = -1;
864*4882a593Smuzhiyun }
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun reset_gpio = of_get_named_gpio_flags(child, "reset-gpios", 0, &flags);
867*4882a593Smuzhiyun if (reset_gpio == -EPROBE_DEFER) {
868*4882a593Smuzhiyun ret = reset_gpio;
869*4882a593Smuzhiyun goto err;
870*4882a593Smuzhiyun }
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun if (gpio_is_valid(reset_gpio)) {
873*4882a593Smuzhiyun unsigned long gpio_flags;
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun port->reset_name = devm_kasprintf(dev, GFP_KERNEL, "%s-reset",
876*4882a593Smuzhiyun port->name);
877*4882a593Smuzhiyun if (!port->reset_name) {
878*4882a593Smuzhiyun ret = -ENOMEM;
879*4882a593Smuzhiyun goto err;
880*4882a593Smuzhiyun }
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun if (flags & OF_GPIO_ACTIVE_LOW) {
883*4882a593Smuzhiyun dev_info(dev, "%pOF: reset gpio is active low\n",
884*4882a593Smuzhiyun child);
885*4882a593Smuzhiyun gpio_flags = GPIOF_ACTIVE_LOW |
886*4882a593Smuzhiyun GPIOF_OUT_INIT_LOW;
887*4882a593Smuzhiyun } else {
888*4882a593Smuzhiyun gpio_flags = GPIOF_OUT_INIT_HIGH;
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun ret = devm_gpio_request_one(dev, reset_gpio, gpio_flags,
892*4882a593Smuzhiyun port->reset_name);
893*4882a593Smuzhiyun if (ret) {
894*4882a593Smuzhiyun if (ret == -EPROBE_DEFER)
895*4882a593Smuzhiyun goto err;
896*4882a593Smuzhiyun goto skip;
897*4882a593Smuzhiyun }
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun port->reset_gpio = gpio_to_desc(reset_gpio);
900*4882a593Smuzhiyun }
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun port->clk = of_clk_get_by_name(child, NULL);
903*4882a593Smuzhiyun if (IS_ERR(port->clk)) {
904*4882a593Smuzhiyun dev_err(dev, "%s: cannot get clock\n", port->name);
905*4882a593Smuzhiyun goto skip;
906*4882a593Smuzhiyun }
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun ret = devm_add_action(dev, mvebu_pcie_port_clk_put, port);
909*4882a593Smuzhiyun if (ret < 0) {
910*4882a593Smuzhiyun clk_put(port->clk);
911*4882a593Smuzhiyun goto err;
912*4882a593Smuzhiyun }
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun return 1;
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun skip:
917*4882a593Smuzhiyun ret = 0;
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun /* In the case of skipping, we need to free these */
920*4882a593Smuzhiyun devm_kfree(dev, port->reset_name);
921*4882a593Smuzhiyun port->reset_name = NULL;
922*4882a593Smuzhiyun devm_kfree(dev, port->name);
923*4882a593Smuzhiyun port->name = NULL;
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun err:
926*4882a593Smuzhiyun return ret;
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun /*
930*4882a593Smuzhiyun * Power up a PCIe port. PCIe requires the refclk to be stable for 100µs
931*4882a593Smuzhiyun * prior to releasing PERST. See table 2-4 in section 2.6.2 AC Specifications
932*4882a593Smuzhiyun * of the PCI Express Card Electromechanical Specification, 1.1.
933*4882a593Smuzhiyun */
mvebu_pcie_powerup(struct mvebu_pcie_port * port)934*4882a593Smuzhiyun static int mvebu_pcie_powerup(struct mvebu_pcie_port *port)
935*4882a593Smuzhiyun {
936*4882a593Smuzhiyun int ret;
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun ret = clk_prepare_enable(port->clk);
939*4882a593Smuzhiyun if (ret < 0)
940*4882a593Smuzhiyun return ret;
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun if (port->reset_gpio) {
943*4882a593Smuzhiyun u32 reset_udelay = PCI_PM_D3COLD_WAIT * 1000;
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun of_property_read_u32(port->dn, "reset-delay-us",
946*4882a593Smuzhiyun &reset_udelay);
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun udelay(100);
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun gpiod_set_value_cansleep(port->reset_gpio, 0);
951*4882a593Smuzhiyun msleep(reset_udelay / 1000);
952*4882a593Smuzhiyun }
953*4882a593Smuzhiyun
954*4882a593Smuzhiyun return 0;
955*4882a593Smuzhiyun }
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun /*
958*4882a593Smuzhiyun * Power down a PCIe port. Strictly, PCIe requires us to place the card
959*4882a593Smuzhiyun * in D3hot state before asserting PERST#.
960*4882a593Smuzhiyun */
mvebu_pcie_powerdown(struct mvebu_pcie_port * port)961*4882a593Smuzhiyun static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port)
962*4882a593Smuzhiyun {
963*4882a593Smuzhiyun gpiod_set_value_cansleep(port->reset_gpio, 1);
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun clk_disable_unprepare(port->clk);
966*4882a593Smuzhiyun }
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun /*
969*4882a593Smuzhiyun * devm_of_pci_get_host_bridge_resources() only sets up translateable resources,
970*4882a593Smuzhiyun * so we need extra resource setup parsing our special DT properties encoding
971*4882a593Smuzhiyun * the MEM and IO apertures.
972*4882a593Smuzhiyun */
mvebu_pcie_parse_request_resources(struct mvebu_pcie * pcie)973*4882a593Smuzhiyun static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
974*4882a593Smuzhiyun {
975*4882a593Smuzhiyun struct device *dev = &pcie->pdev->dev;
976*4882a593Smuzhiyun struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
977*4882a593Smuzhiyun int ret;
978*4882a593Smuzhiyun
979*4882a593Smuzhiyun /* Get the PCIe memory aperture */
980*4882a593Smuzhiyun mvebu_mbus_get_pcie_mem_aperture(&pcie->mem);
981*4882a593Smuzhiyun if (resource_size(&pcie->mem) == 0) {
982*4882a593Smuzhiyun dev_err(dev, "invalid memory aperture size\n");
983*4882a593Smuzhiyun return -EINVAL;
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun pcie->mem.name = "PCI MEM";
987*4882a593Smuzhiyun pci_add_resource(&bridge->windows, &pcie->mem);
988*4882a593Smuzhiyun ret = devm_request_resource(dev, &iomem_resource, &pcie->mem);
989*4882a593Smuzhiyun if (ret)
990*4882a593Smuzhiyun return ret;
991*4882a593Smuzhiyun
992*4882a593Smuzhiyun /* Get the PCIe IO aperture */
993*4882a593Smuzhiyun mvebu_mbus_get_pcie_io_aperture(&pcie->io);
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun if (resource_size(&pcie->io) != 0) {
996*4882a593Smuzhiyun pcie->realio.flags = pcie->io.flags;
997*4882a593Smuzhiyun pcie->realio.start = PCIBIOS_MIN_IO;
998*4882a593Smuzhiyun pcie->realio.end = min_t(resource_size_t,
999*4882a593Smuzhiyun IO_SPACE_LIMIT - SZ_64K,
1000*4882a593Smuzhiyun resource_size(&pcie->io) - 1);
1001*4882a593Smuzhiyun pcie->realio.name = "PCI I/O";
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun pci_add_resource(&bridge->windows, &pcie->realio);
1004*4882a593Smuzhiyun ret = devm_request_resource(dev, &ioport_resource, &pcie->realio);
1005*4882a593Smuzhiyun if (ret)
1006*4882a593Smuzhiyun return ret;
1007*4882a593Smuzhiyun }
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun return 0;
1010*4882a593Smuzhiyun }
1011*4882a593Smuzhiyun
1012*4882a593Smuzhiyun /*
1013*4882a593Smuzhiyun * This is a copy of pci_host_probe(), except that it does the I/O
1014*4882a593Smuzhiyun * remap as the last step, once we are sure we won't fail.
1015*4882a593Smuzhiyun *
1016*4882a593Smuzhiyun * It should be removed once the I/O remap error handling issue has
1017*4882a593Smuzhiyun * been sorted out.
1018*4882a593Smuzhiyun */
mvebu_pci_host_probe(struct pci_host_bridge * bridge)1019*4882a593Smuzhiyun static int mvebu_pci_host_probe(struct pci_host_bridge *bridge)
1020*4882a593Smuzhiyun {
1021*4882a593Smuzhiyun struct mvebu_pcie *pcie;
1022*4882a593Smuzhiyun struct pci_bus *bus, *child;
1023*4882a593Smuzhiyun int ret;
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun ret = pci_scan_root_bus_bridge(bridge);
1026*4882a593Smuzhiyun if (ret < 0) {
1027*4882a593Smuzhiyun dev_err(bridge->dev.parent, "Scanning root bridge failed");
1028*4882a593Smuzhiyun return ret;
1029*4882a593Smuzhiyun }
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun pcie = pci_host_bridge_priv(bridge);
1032*4882a593Smuzhiyun if (resource_size(&pcie->io) != 0) {
1033*4882a593Smuzhiyun unsigned int i;
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun for (i = 0; i < resource_size(&pcie->realio); i += SZ_64K)
1036*4882a593Smuzhiyun pci_ioremap_io(i, pcie->io.start + i);
1037*4882a593Smuzhiyun }
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun bus = bridge->bus;
1040*4882a593Smuzhiyun
1041*4882a593Smuzhiyun /*
1042*4882a593Smuzhiyun * We insert PCI resources into the iomem_resource and
1043*4882a593Smuzhiyun * ioport_resource trees in either pci_bus_claim_resources()
1044*4882a593Smuzhiyun * or pci_bus_assign_resources().
1045*4882a593Smuzhiyun */
1046*4882a593Smuzhiyun if (pci_has_flag(PCI_PROBE_ONLY)) {
1047*4882a593Smuzhiyun pci_bus_claim_resources(bus);
1048*4882a593Smuzhiyun } else {
1049*4882a593Smuzhiyun pci_bus_size_bridges(bus);
1050*4882a593Smuzhiyun pci_bus_assign_resources(bus);
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun list_for_each_entry(child, &bus->children, node)
1053*4882a593Smuzhiyun pcie_bus_configure_settings(child);
1054*4882a593Smuzhiyun }
1055*4882a593Smuzhiyun
1056*4882a593Smuzhiyun pci_bus_add_devices(bus);
1057*4882a593Smuzhiyun return 0;
1058*4882a593Smuzhiyun }
1059*4882a593Smuzhiyun
mvebu_pcie_probe(struct platform_device * pdev)1060*4882a593Smuzhiyun static int mvebu_pcie_probe(struct platform_device *pdev)
1061*4882a593Smuzhiyun {
1062*4882a593Smuzhiyun struct device *dev = &pdev->dev;
1063*4882a593Smuzhiyun struct mvebu_pcie *pcie;
1064*4882a593Smuzhiyun struct pci_host_bridge *bridge;
1065*4882a593Smuzhiyun struct device_node *np = dev->of_node;
1066*4882a593Smuzhiyun struct device_node *child;
1067*4882a593Smuzhiyun int num, i, ret;
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct mvebu_pcie));
1070*4882a593Smuzhiyun if (!bridge)
1071*4882a593Smuzhiyun return -ENOMEM;
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun pcie = pci_host_bridge_priv(bridge);
1074*4882a593Smuzhiyun pcie->pdev = pdev;
1075*4882a593Smuzhiyun platform_set_drvdata(pdev, pcie);
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun ret = mvebu_pcie_parse_request_resources(pcie);
1078*4882a593Smuzhiyun if (ret)
1079*4882a593Smuzhiyun return ret;
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun num = of_get_available_child_count(np);
1082*4882a593Smuzhiyun
1083*4882a593Smuzhiyun pcie->ports = devm_kcalloc(dev, num, sizeof(*pcie->ports), GFP_KERNEL);
1084*4882a593Smuzhiyun if (!pcie->ports)
1085*4882a593Smuzhiyun return -ENOMEM;
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun i = 0;
1088*4882a593Smuzhiyun for_each_available_child_of_node(np, child) {
1089*4882a593Smuzhiyun struct mvebu_pcie_port *port = &pcie->ports[i];
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun ret = mvebu_pcie_parse_port(pcie, port, child);
1092*4882a593Smuzhiyun if (ret < 0) {
1093*4882a593Smuzhiyun of_node_put(child);
1094*4882a593Smuzhiyun return ret;
1095*4882a593Smuzhiyun } else if (ret == 0) {
1096*4882a593Smuzhiyun continue;
1097*4882a593Smuzhiyun }
1098*4882a593Smuzhiyun
1099*4882a593Smuzhiyun port->dn = child;
1100*4882a593Smuzhiyun i++;
1101*4882a593Smuzhiyun }
1102*4882a593Smuzhiyun pcie->nports = i;
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun for (i = 0; i < pcie->nports; i++) {
1105*4882a593Smuzhiyun struct mvebu_pcie_port *port = &pcie->ports[i];
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun child = port->dn;
1108*4882a593Smuzhiyun if (!child)
1109*4882a593Smuzhiyun continue;
1110*4882a593Smuzhiyun
1111*4882a593Smuzhiyun ret = mvebu_pcie_powerup(port);
1112*4882a593Smuzhiyun if (ret < 0)
1113*4882a593Smuzhiyun continue;
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun port->base = mvebu_pcie_map_registers(pdev, child, port);
1116*4882a593Smuzhiyun if (IS_ERR(port->base)) {
1117*4882a593Smuzhiyun dev_err(dev, "%s: cannot map registers\n", port->name);
1118*4882a593Smuzhiyun port->base = NULL;
1119*4882a593Smuzhiyun mvebu_pcie_powerdown(port);
1120*4882a593Smuzhiyun continue;
1121*4882a593Smuzhiyun }
1122*4882a593Smuzhiyun
1123*4882a593Smuzhiyun mvebu_pcie_setup_hw(port);
1124*4882a593Smuzhiyun mvebu_pcie_set_local_dev_nr(port, 1);
1125*4882a593Smuzhiyun mvebu_pci_bridge_emul_init(port);
1126*4882a593Smuzhiyun }
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun pcie->nports = i;
1129*4882a593Smuzhiyun
1130*4882a593Smuzhiyun bridge->sysdata = pcie;
1131*4882a593Smuzhiyun bridge->ops = &mvebu_pcie_ops;
1132*4882a593Smuzhiyun bridge->align_resource = mvebu_pcie_align_resource;
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun return mvebu_pci_host_probe(bridge);
1135*4882a593Smuzhiyun }
1136*4882a593Smuzhiyun
1137*4882a593Smuzhiyun static const struct of_device_id mvebu_pcie_of_match_table[] = {
1138*4882a593Smuzhiyun { .compatible = "marvell,armada-xp-pcie", },
1139*4882a593Smuzhiyun { .compatible = "marvell,armada-370-pcie", },
1140*4882a593Smuzhiyun { .compatible = "marvell,dove-pcie", },
1141*4882a593Smuzhiyun { .compatible = "marvell,kirkwood-pcie", },
1142*4882a593Smuzhiyun {},
1143*4882a593Smuzhiyun };
1144*4882a593Smuzhiyun
1145*4882a593Smuzhiyun static const struct dev_pm_ops mvebu_pcie_pm_ops = {
1146*4882a593Smuzhiyun SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume)
1147*4882a593Smuzhiyun };
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyun static struct platform_driver mvebu_pcie_driver = {
1150*4882a593Smuzhiyun .driver = {
1151*4882a593Smuzhiyun .name = "mvebu-pcie",
1152*4882a593Smuzhiyun .of_match_table = mvebu_pcie_of_match_table,
1153*4882a593Smuzhiyun /* driver unloading/unbinding currently not supported */
1154*4882a593Smuzhiyun .suppress_bind_attrs = true,
1155*4882a593Smuzhiyun .pm = &mvebu_pcie_pm_ops,
1156*4882a593Smuzhiyun },
1157*4882a593Smuzhiyun .probe = mvebu_pcie_probe,
1158*4882a593Smuzhiyun };
1159*4882a593Smuzhiyun builtin_platform_driver(mvebu_pcie_driver);
1160