1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * PCIe driver for Marvell MVEBU SoCs
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Based on Barebox drivers/pci/pci-mvebu.c
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Ported to U-Boot by:
7*4882a593Smuzhiyun * Anton Schubert <anton.schubert@gmx.de>
8*4882a593Smuzhiyun * Stefan Roese <sr@denx.de>
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * SPDX-License-Identifier: GPL-2.0
11*4882a593Smuzhiyun */
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <common.h>
14*4882a593Smuzhiyun #include <pci.h>
15*4882a593Smuzhiyun #include <linux/errno.h>
16*4882a593Smuzhiyun #include <asm/io.h>
17*4882a593Smuzhiyun #include <asm/arch/cpu.h>
18*4882a593Smuzhiyun #include <asm/arch/soc.h>
19*4882a593Smuzhiyun #include <linux/mbus.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun DECLARE_GLOBAL_DATA_PTR;
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun /* PCIe unit register offsets */
24*4882a593Smuzhiyun #define SELECT(x, n) ((x >> n) & 1UL)
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #define PCIE_DEV_ID_OFF 0x0000
27*4882a593Smuzhiyun #define PCIE_CMD_OFF 0x0004
28*4882a593Smuzhiyun #define PCIE_DEV_REV_OFF 0x0008
29*4882a593Smuzhiyun #define PCIE_BAR_LO_OFF(n) (0x0010 + ((n) << 3))
30*4882a593Smuzhiyun #define PCIE_BAR_HI_OFF(n) (0x0014 + ((n) << 3))
31*4882a593Smuzhiyun #define PCIE_CAPAB_OFF 0x0060
32*4882a593Smuzhiyun #define PCIE_CTRL_STAT_OFF 0x0068
33*4882a593Smuzhiyun #define PCIE_HEADER_LOG_4_OFF 0x0128
34*4882a593Smuzhiyun #define PCIE_BAR_CTRL_OFF(n) (0x1804 + (((n) - 1) * 4))
35*4882a593Smuzhiyun #define PCIE_WIN04_CTRL_OFF(n) (0x1820 + ((n) << 4))
36*4882a593Smuzhiyun #define PCIE_WIN04_BASE_OFF(n) (0x1824 + ((n) << 4))
37*4882a593Smuzhiyun #define PCIE_WIN04_REMAP_OFF(n) (0x182c + ((n) << 4))
38*4882a593Smuzhiyun #define PCIE_WIN5_CTRL_OFF 0x1880
39*4882a593Smuzhiyun #define PCIE_WIN5_BASE_OFF 0x1884
40*4882a593Smuzhiyun #define PCIE_WIN5_REMAP_OFF 0x188c
41*4882a593Smuzhiyun #define PCIE_CONF_ADDR_OFF 0x18f8
42*4882a593Smuzhiyun #define PCIE_CONF_ADDR_EN BIT(31)
43*4882a593Smuzhiyun #define PCIE_CONF_REG(r) ((((r) & 0xf00) << 16) | ((r) & 0xfc))
44*4882a593Smuzhiyun #define PCIE_CONF_BUS(b) (((b) & 0xff) << 16)
45*4882a593Smuzhiyun #define PCIE_CONF_DEV(d) (((d) & 0x1f) << 11)
46*4882a593Smuzhiyun #define PCIE_CONF_FUNC(f) (((f) & 0x7) << 8)
47*4882a593Smuzhiyun #define PCIE_CONF_ADDR(dev, reg) \
48*4882a593Smuzhiyun (PCIE_CONF_BUS(PCI_BUS(dev)) | PCIE_CONF_DEV(PCI_DEV(dev)) | \
49*4882a593Smuzhiyun PCIE_CONF_FUNC(PCI_FUNC(dev)) | PCIE_CONF_REG(reg) | \
50*4882a593Smuzhiyun PCIE_CONF_ADDR_EN)
51*4882a593Smuzhiyun #define PCIE_CONF_DATA_OFF 0x18fc
52*4882a593Smuzhiyun #define PCIE_MASK_OFF 0x1910
53*4882a593Smuzhiyun #define PCIE_MASK_ENABLE_INTS (0xf << 24)
54*4882a593Smuzhiyun #define PCIE_CTRL_OFF 0x1a00
55*4882a593Smuzhiyun #define PCIE_CTRL_X1_MODE BIT(0)
56*4882a593Smuzhiyun #define PCIE_STAT_OFF 0x1a04
57*4882a593Smuzhiyun #define PCIE_STAT_BUS (0xff << 8)
58*4882a593Smuzhiyun #define PCIE_STAT_DEV (0x1f << 16)
59*4882a593Smuzhiyun #define PCIE_STAT_LINK_DOWN BIT(0)
60*4882a593Smuzhiyun #define PCIE_DEBUG_CTRL 0x1a60
61*4882a593Smuzhiyun #define PCIE_DEBUG_SOFT_RESET BIT(20)
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun struct resource {
64*4882a593Smuzhiyun u32 start;
65*4882a593Smuzhiyun u32 end;
66*4882a593Smuzhiyun };
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun struct mvebu_pcie {
69*4882a593Smuzhiyun struct pci_controller hose;
70*4882a593Smuzhiyun char *name;
71*4882a593Smuzhiyun void __iomem *base;
72*4882a593Smuzhiyun void __iomem *membase;
73*4882a593Smuzhiyun struct resource mem;
74*4882a593Smuzhiyun void __iomem *iobase;
75*4882a593Smuzhiyun u32 port;
76*4882a593Smuzhiyun u32 lane;
77*4882a593Smuzhiyun u32 lane_mask;
78*4882a593Smuzhiyun pci_dev_t dev;
79*4882a593Smuzhiyun };
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun #define to_pcie(_hc) container_of(_hc, struct mvebu_pcie, pci)
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun /*
84*4882a593Smuzhiyun * MVEBU PCIe controller needs MEMORY and I/O BARs to be mapped
85*4882a593Smuzhiyun * into SoCs address space. Each controller will map 32M of MEM
86*4882a593Smuzhiyun * and 64K of I/O space when registered.
87*4882a593Smuzhiyun */
88*4882a593Smuzhiyun static void __iomem *mvebu_pcie_membase = (void __iomem *)MBUS_PCI_MEM_BASE;
89*4882a593Smuzhiyun #define PCIE_MEM_SIZE (32 << 20)
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun #if defined(CONFIG_ARMADA_38X)
92*4882a593Smuzhiyun #define PCIE_BASE(if) \
93*4882a593Smuzhiyun ((if) == 0 ? \
94*4882a593Smuzhiyun MVEBU_REG_PCIE0_BASE : \
95*4882a593Smuzhiyun (MVEBU_REG_PCIE_BASE + 0x4000 * (if - 1)))
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun /*
98*4882a593Smuzhiyun * On A38x MV6820 these PEX ports are supported:
99*4882a593Smuzhiyun * 0 - Port 0.0
100*4882a593Smuzhiyun * 1 - Port 1.0
101*4882a593Smuzhiyun * 2 - Port 2.0
102*4882a593Smuzhiyun * 3 - Port 3.0
103*4882a593Smuzhiyun */
104*4882a593Smuzhiyun #define MAX_PEX 4
105*4882a593Smuzhiyun static struct mvebu_pcie pcie_bus[MAX_PEX];
106*4882a593Smuzhiyun
mvebu_get_port_lane(struct mvebu_pcie * pcie,int pex_idx,int * mem_target,int * mem_attr)107*4882a593Smuzhiyun static void mvebu_get_port_lane(struct mvebu_pcie *pcie, int pex_idx,
108*4882a593Smuzhiyun int *mem_target, int *mem_attr)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun u8 port[] = { 0, 1, 2, 3 };
111*4882a593Smuzhiyun u8 lane[] = { 0, 0, 0, 0 };
112*4882a593Smuzhiyun u8 target[] = { 8, 4, 4, 4 };
113*4882a593Smuzhiyun u8 attr[] = { 0xe8, 0xe8, 0xd8, 0xb8 };
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun pcie->port = port[pex_idx];
116*4882a593Smuzhiyun pcie->lane = lane[pex_idx];
117*4882a593Smuzhiyun *mem_target = target[pex_idx];
118*4882a593Smuzhiyun *mem_attr = attr[pex_idx];
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun #else
121*4882a593Smuzhiyun #define PCIE_BASE(if) \
122*4882a593Smuzhiyun ((if) < 8 ? \
123*4882a593Smuzhiyun (MVEBU_REG_PCIE_BASE + ((if) / 4) * 0x40000 + ((if) % 4) * 0x4000) : \
124*4882a593Smuzhiyun (MVEBU_REG_PCIE_BASE + 0x2000 + ((if) % 8) * 0x40000))
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun /*
127*4882a593Smuzhiyun * On AXP MV78460 these PEX ports are supported:
128*4882a593Smuzhiyun * 0 - Port 0.0
129*4882a593Smuzhiyun * 1 - Port 0.1
130*4882a593Smuzhiyun * 2 - Port 0.2
131*4882a593Smuzhiyun * 3 - Port 0.3
132*4882a593Smuzhiyun * 4 - Port 1.0
133*4882a593Smuzhiyun * 5 - Port 1.1
134*4882a593Smuzhiyun * 6 - Port 1.2
135*4882a593Smuzhiyun * 7 - Port 1.3
136*4882a593Smuzhiyun * 8 - Port 2.0
137*4882a593Smuzhiyun * 9 - Port 3.0
138*4882a593Smuzhiyun */
139*4882a593Smuzhiyun #define MAX_PEX 10
140*4882a593Smuzhiyun static struct mvebu_pcie pcie_bus[MAX_PEX];
141*4882a593Smuzhiyun
mvebu_get_port_lane(struct mvebu_pcie * pcie,int pex_idx,int * mem_target,int * mem_attr)142*4882a593Smuzhiyun static void mvebu_get_port_lane(struct mvebu_pcie *pcie, int pex_idx,
143*4882a593Smuzhiyun int *mem_target, int *mem_attr)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun u8 port[] = { 0, 0, 0, 0, 1, 1, 1, 1, 2, 3 };
146*4882a593Smuzhiyun u8 lane[] = { 0, 1, 2, 3, 0, 1, 2, 3, 0, 0 };
147*4882a593Smuzhiyun u8 target[] = { 4, 4, 4, 4, 8, 8, 8, 8, 4, 8 };
148*4882a593Smuzhiyun u8 attr[] = { 0xe8, 0xd8, 0xb8, 0x78,
149*4882a593Smuzhiyun 0xe8, 0xd8, 0xb8, 0x78,
150*4882a593Smuzhiyun 0xf8, 0xf8 };
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun pcie->port = port[pex_idx];
153*4882a593Smuzhiyun pcie->lane = lane[pex_idx];
154*4882a593Smuzhiyun *mem_target = target[pex_idx];
155*4882a593Smuzhiyun *mem_attr = attr[pex_idx];
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun #endif
158*4882a593Smuzhiyun
mvebu_pex_unit_is_x4(int pex_idx)159*4882a593Smuzhiyun static int mvebu_pex_unit_is_x4(int pex_idx)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun int pex_unit = pex_idx < 9 ? pex_idx >> 2 : 3;
162*4882a593Smuzhiyun u32 mask = (0x0f << (pex_unit * 8));
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun return (readl(COMPHY_REFCLK_ALIGNMENT) & mask) == mask;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
mvebu_pcie_link_up(struct mvebu_pcie * pcie)167*4882a593Smuzhiyun static inline bool mvebu_pcie_link_up(struct mvebu_pcie *pcie)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun u32 val;
170*4882a593Smuzhiyun val = readl(pcie->base + PCIE_STAT_OFF);
171*4882a593Smuzhiyun return !(val & PCIE_STAT_LINK_DOWN);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
mvebu_pcie_set_local_bus_nr(struct mvebu_pcie * pcie,int busno)174*4882a593Smuzhiyun static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie *pcie, int busno)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun u32 stat;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun stat = readl(pcie->base + PCIE_STAT_OFF);
179*4882a593Smuzhiyun stat &= ~PCIE_STAT_BUS;
180*4882a593Smuzhiyun stat |= busno << 8;
181*4882a593Smuzhiyun writel(stat, pcie->base + PCIE_STAT_OFF);
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun
mvebu_pcie_set_local_dev_nr(struct mvebu_pcie * pcie,int devno)184*4882a593Smuzhiyun static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie *pcie, int devno)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun u32 stat;
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun stat = readl(pcie->base + PCIE_STAT_OFF);
189*4882a593Smuzhiyun stat &= ~PCIE_STAT_DEV;
190*4882a593Smuzhiyun stat |= devno << 16;
191*4882a593Smuzhiyun writel(stat, pcie->base + PCIE_STAT_OFF);
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
mvebu_pcie_get_local_bus_nr(struct mvebu_pcie * pcie)194*4882a593Smuzhiyun static int mvebu_pcie_get_local_bus_nr(struct mvebu_pcie *pcie)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun u32 stat;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun stat = readl(pcie->base + PCIE_STAT_OFF);
199*4882a593Smuzhiyun return (stat & PCIE_STAT_BUS) >> 8;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
mvebu_pcie_get_local_dev_nr(struct mvebu_pcie * pcie)202*4882a593Smuzhiyun static int mvebu_pcie_get_local_dev_nr(struct mvebu_pcie *pcie)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun u32 stat;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun stat = readl(pcie->base + PCIE_STAT_OFF);
207*4882a593Smuzhiyun return (stat & PCIE_STAT_DEV) >> 16;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
hose_to_pcie(struct pci_controller * hose)210*4882a593Smuzhiyun static inline struct mvebu_pcie *hose_to_pcie(struct pci_controller *hose)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun return container_of(hose, struct mvebu_pcie, hose);
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
mvebu_pcie_read_config_dword(struct pci_controller * hose,pci_dev_t dev,int offset,u32 * val)215*4882a593Smuzhiyun static int mvebu_pcie_read_config_dword(struct pci_controller *hose,
216*4882a593Smuzhiyun pci_dev_t dev, int offset, u32 *val)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun struct mvebu_pcie *pcie = hose_to_pcie(hose);
219*4882a593Smuzhiyun int local_bus = PCI_BUS(pcie->dev);
220*4882a593Smuzhiyun int local_dev = PCI_DEV(pcie->dev);
221*4882a593Smuzhiyun u32 reg;
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun /* Only allow one other device besides the local one on the local bus */
224*4882a593Smuzhiyun if (PCI_BUS(dev) == local_bus && PCI_DEV(dev) != local_dev) {
225*4882a593Smuzhiyun if (local_dev == 0 && PCI_DEV(dev) != 1) {
226*4882a593Smuzhiyun /*
227*4882a593Smuzhiyun * If local dev is 0, the first other dev can
228*4882a593Smuzhiyun * only be 1
229*4882a593Smuzhiyun */
230*4882a593Smuzhiyun *val = 0xffffffff;
231*4882a593Smuzhiyun return 1;
232*4882a593Smuzhiyun } else if (local_dev != 0 && PCI_DEV(dev) != 0) {
233*4882a593Smuzhiyun /*
234*4882a593Smuzhiyun * If local dev is not 0, the first other dev can
235*4882a593Smuzhiyun * only be 0
236*4882a593Smuzhiyun */
237*4882a593Smuzhiyun *val = 0xffffffff;
238*4882a593Smuzhiyun return 1;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun /* write address */
243*4882a593Smuzhiyun reg = PCIE_CONF_ADDR(dev, offset);
244*4882a593Smuzhiyun writel(reg, pcie->base + PCIE_CONF_ADDR_OFF);
245*4882a593Smuzhiyun *val = readl(pcie->base + PCIE_CONF_DATA_OFF);
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun return 0;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun
mvebu_pcie_write_config_dword(struct pci_controller * hose,pci_dev_t dev,int offset,u32 val)250*4882a593Smuzhiyun static int mvebu_pcie_write_config_dword(struct pci_controller *hose,
251*4882a593Smuzhiyun pci_dev_t dev, int offset, u32 val)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun struct mvebu_pcie *pcie = hose_to_pcie(hose);
254*4882a593Smuzhiyun int local_bus = PCI_BUS(pcie->dev);
255*4882a593Smuzhiyun int local_dev = PCI_DEV(pcie->dev);
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun /* Only allow one other device besides the local one on the local bus */
258*4882a593Smuzhiyun if (PCI_BUS(dev) == local_bus && PCI_DEV(dev) != local_dev) {
259*4882a593Smuzhiyun if (local_dev == 0 && PCI_DEV(dev) != 1) {
260*4882a593Smuzhiyun /*
261*4882a593Smuzhiyun * If local dev is 0, the first other dev can
262*4882a593Smuzhiyun * only be 1
263*4882a593Smuzhiyun */
264*4882a593Smuzhiyun return 1;
265*4882a593Smuzhiyun } else if (local_dev != 0 && PCI_DEV(dev) != 0) {
266*4882a593Smuzhiyun /*
267*4882a593Smuzhiyun * If local dev is not 0, the first other dev can
268*4882a593Smuzhiyun * only be 0
269*4882a593Smuzhiyun */
270*4882a593Smuzhiyun return 1;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun writel(PCIE_CONF_ADDR(dev, offset), pcie->base + PCIE_CONF_ADDR_OFF);
275*4882a593Smuzhiyun writel(val, pcie->base + PCIE_CONF_DATA_OFF);
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun return 0;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun /*
281*4882a593Smuzhiyun * Setup PCIE BARs and Address Decode Wins:
282*4882a593Smuzhiyun * BAR[0,2] -> disabled, BAR[1] -> covers all DRAM banks
283*4882a593Smuzhiyun * WIN[0-3] -> DRAM bank[0-3]
284*4882a593Smuzhiyun */
mvebu_pcie_setup_wins(struct mvebu_pcie * pcie)285*4882a593Smuzhiyun static void mvebu_pcie_setup_wins(struct mvebu_pcie *pcie)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun const struct mbus_dram_target_info *dram = mvebu_mbus_dram_info();
288*4882a593Smuzhiyun u32 size;
289*4882a593Smuzhiyun int i;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun /* First, disable and clear BARs and windows. */
292*4882a593Smuzhiyun for (i = 1; i < 3; i++) {
293*4882a593Smuzhiyun writel(0, pcie->base + PCIE_BAR_CTRL_OFF(i));
294*4882a593Smuzhiyun writel(0, pcie->base + PCIE_BAR_LO_OFF(i));
295*4882a593Smuzhiyun writel(0, pcie->base + PCIE_BAR_HI_OFF(i));
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun for (i = 0; i < 5; i++) {
299*4882a593Smuzhiyun writel(0, pcie->base + PCIE_WIN04_CTRL_OFF(i));
300*4882a593Smuzhiyun writel(0, pcie->base + PCIE_WIN04_BASE_OFF(i));
301*4882a593Smuzhiyun writel(0, pcie->base + PCIE_WIN04_REMAP_OFF(i));
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun writel(0, pcie->base + PCIE_WIN5_CTRL_OFF);
305*4882a593Smuzhiyun writel(0, pcie->base + PCIE_WIN5_BASE_OFF);
306*4882a593Smuzhiyun writel(0, pcie->base + PCIE_WIN5_REMAP_OFF);
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun /* Setup windows for DDR banks. Count total DDR size on the fly. */
309*4882a593Smuzhiyun size = 0;
310*4882a593Smuzhiyun for (i = 0; i < dram->num_cs; i++) {
311*4882a593Smuzhiyun const struct mbus_dram_window *cs = dram->cs + i;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun writel(cs->base & 0xffff0000,
314*4882a593Smuzhiyun pcie->base + PCIE_WIN04_BASE_OFF(i));
315*4882a593Smuzhiyun writel(0, pcie->base + PCIE_WIN04_REMAP_OFF(i));
316*4882a593Smuzhiyun writel(((cs->size - 1) & 0xffff0000) |
317*4882a593Smuzhiyun (cs->mbus_attr << 8) |
318*4882a593Smuzhiyun (dram->mbus_dram_target_id << 4) | 1,
319*4882a593Smuzhiyun pcie->base + PCIE_WIN04_CTRL_OFF(i));
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun size += cs->size;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun /* Round up 'size' to the nearest power of two. */
325*4882a593Smuzhiyun if ((size & (size - 1)) != 0)
326*4882a593Smuzhiyun size = 1 << fls(size);
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun /* Setup BAR[1] to all DRAM banks. */
329*4882a593Smuzhiyun writel(dram->cs[0].base | 0xc, pcie->base + PCIE_BAR_LO_OFF(1));
330*4882a593Smuzhiyun writel(0, pcie->base + PCIE_BAR_HI_OFF(1));
331*4882a593Smuzhiyun writel(((size - 1) & 0xffff0000) | 0x1,
332*4882a593Smuzhiyun pcie->base + PCIE_BAR_CTRL_OFF(1));
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun
pci_init_board(void)335*4882a593Smuzhiyun void pci_init_board(void)
336*4882a593Smuzhiyun {
337*4882a593Smuzhiyun int mem_target, mem_attr, i;
338*4882a593Smuzhiyun int bus = 0;
339*4882a593Smuzhiyun u32 reg;
340*4882a593Smuzhiyun u32 soc_ctrl = readl(MVEBU_SYSTEM_REG_BASE + 0x4);
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun /* Check SoC Control Power State */
343*4882a593Smuzhiyun debug("%s: SoC Control %08x, 0en %01lx, 1en %01lx, 2en %01lx\n",
344*4882a593Smuzhiyun __func__, soc_ctrl, SELECT(soc_ctrl, 0), SELECT(soc_ctrl, 1),
345*4882a593Smuzhiyun SELECT(soc_ctrl, 2));
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun for (i = 0; i < MAX_PEX; i++) {
348*4882a593Smuzhiyun struct mvebu_pcie *pcie = &pcie_bus[i];
349*4882a593Smuzhiyun struct pci_controller *hose = &pcie->hose;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun /* Get port number, lane number and memory target / attr */
352*4882a593Smuzhiyun mvebu_get_port_lane(pcie, i, &mem_target, &mem_attr);
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun /* Don't read at all from pci registers if port power is down */
355*4882a593Smuzhiyun if (SELECT(soc_ctrl, pcie->port) == 0) {
356*4882a593Smuzhiyun if (pcie->lane == 0)
357*4882a593Smuzhiyun debug("%s: skipping port %d\n", __func__, pcie->port);
358*4882a593Smuzhiyun continue;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun pcie->base = (void __iomem *)PCIE_BASE(i);
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun /* Check link and skip ports that have no link */
364*4882a593Smuzhiyun if (!mvebu_pcie_link_up(pcie)) {
365*4882a593Smuzhiyun debug("%s: PCIe %d.%d - down\n", __func__,
366*4882a593Smuzhiyun pcie->port, pcie->lane);
367*4882a593Smuzhiyun continue;
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun debug("%s: PCIe %d.%d - up, base %08x\n", __func__,
370*4882a593Smuzhiyun pcie->port, pcie->lane, (u32)pcie->base);
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun /* Read Id info and local bus/dev */
373*4882a593Smuzhiyun debug("direct conf read %08x, local bus %d, local dev %d\n",
374*4882a593Smuzhiyun readl(pcie->base), mvebu_pcie_get_local_bus_nr(pcie),
375*4882a593Smuzhiyun mvebu_pcie_get_local_dev_nr(pcie));
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun mvebu_pcie_set_local_bus_nr(pcie, bus);
378*4882a593Smuzhiyun mvebu_pcie_set_local_dev_nr(pcie, 0);
379*4882a593Smuzhiyun pcie->dev = PCI_BDF(bus, 0, 0);
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun pcie->mem.start = (u32)mvebu_pcie_membase;
382*4882a593Smuzhiyun pcie->mem.end = pcie->mem.start + PCIE_MEM_SIZE - 1;
383*4882a593Smuzhiyun mvebu_pcie_membase += PCIE_MEM_SIZE;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun if (mvebu_mbus_add_window_by_id(mem_target, mem_attr,
386*4882a593Smuzhiyun (phys_addr_t)pcie->mem.start,
387*4882a593Smuzhiyun PCIE_MEM_SIZE)) {
388*4882a593Smuzhiyun printf("PCIe unable to add mbus window for mem at %08x+%08x\n",
389*4882a593Smuzhiyun (u32)pcie->mem.start, PCIE_MEM_SIZE);
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun /* Setup windows and configure host bridge */
393*4882a593Smuzhiyun mvebu_pcie_setup_wins(pcie);
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun /* Master + slave enable. */
396*4882a593Smuzhiyun reg = readl(pcie->base + PCIE_CMD_OFF);
397*4882a593Smuzhiyun reg |= PCI_COMMAND_MEMORY;
398*4882a593Smuzhiyun reg |= PCI_COMMAND_MASTER;
399*4882a593Smuzhiyun reg |= BIT(10); /* disable interrupts */
400*4882a593Smuzhiyun writel(reg, pcie->base + PCIE_CMD_OFF);
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun /* Setup U-Boot PCI Controller */
403*4882a593Smuzhiyun hose->first_busno = 0;
404*4882a593Smuzhiyun hose->current_busno = bus;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun /* PCI memory space */
407*4882a593Smuzhiyun pci_set_region(hose->regions + 0, pcie->mem.start,
408*4882a593Smuzhiyun pcie->mem.start, PCIE_MEM_SIZE, PCI_REGION_MEM);
409*4882a593Smuzhiyun pci_set_region(hose->regions + 1,
410*4882a593Smuzhiyun 0, 0,
411*4882a593Smuzhiyun gd->ram_size,
412*4882a593Smuzhiyun PCI_REGION_MEM | PCI_REGION_SYS_MEMORY);
413*4882a593Smuzhiyun hose->region_count = 2;
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun pci_set_ops(hose,
416*4882a593Smuzhiyun pci_hose_read_config_byte_via_dword,
417*4882a593Smuzhiyun pci_hose_read_config_word_via_dword,
418*4882a593Smuzhiyun mvebu_pcie_read_config_dword,
419*4882a593Smuzhiyun pci_hose_write_config_byte_via_dword,
420*4882a593Smuzhiyun pci_hose_write_config_word_via_dword,
421*4882a593Smuzhiyun mvebu_pcie_write_config_dword);
422*4882a593Smuzhiyun pci_register_hose(hose);
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun hose->last_busno = pci_hose_scan(hose);
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun /* Set BAR0 to internal registers */
427*4882a593Smuzhiyun writel(SOC_REGS_PHY_BASE, pcie->base + PCIE_BAR_LO_OFF(0));
428*4882a593Smuzhiyun writel(0, pcie->base + PCIE_BAR_HI_OFF(0));
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun bus = hose->last_busno + 1;
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun /* need to skip more for X4 links, otherwise scan will hang */
433*4882a593Smuzhiyun if (mvebu_soc_family() == MVEBU_SOC_AXP) {
434*4882a593Smuzhiyun if (mvebu_pex_unit_is_x4(i))
435*4882a593Smuzhiyun i += 3;
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun }
439