xref: /OK3568_Linux_fs/u-boot/drivers/pci/pcie_imx.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Freescale i.MX6 PCI Express Root-Complex driver
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (C) 2013 Marek Vasut <marex@denx.de>
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Based on upstream Linux kernel driver:
7*4882a593Smuzhiyun  * pci-imx6.c:		Sean Cross <xobs@kosagi.com>
8*4882a593Smuzhiyun  * pcie-designware.c:	Jingoo Han <jg1.han@samsung.com>
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * SPDX-License-Identifier:	GPL-2.0
11*4882a593Smuzhiyun  */
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <common.h>
14*4882a593Smuzhiyun #include <pci.h>
15*4882a593Smuzhiyun #include <asm/arch/clock.h>
16*4882a593Smuzhiyun #include <asm/arch/iomux.h>
17*4882a593Smuzhiyun #include <asm/arch/crm_regs.h>
18*4882a593Smuzhiyun #include <asm/gpio.h>
19*4882a593Smuzhiyun #include <asm/io.h>
20*4882a593Smuzhiyun #include <linux/sizes.h>
21*4882a593Smuzhiyun #include <errno.h>
22*4882a593Smuzhiyun #include <asm/arch/sys_proto.h>
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #define PCI_ACCESS_READ  0
25*4882a593Smuzhiyun #define PCI_ACCESS_WRITE 1
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #ifdef CONFIG_MX6SX
28*4882a593Smuzhiyun #define MX6_DBI_ADDR	0x08ffc000
29*4882a593Smuzhiyun #define MX6_IO_ADDR	0x08000000
30*4882a593Smuzhiyun #define MX6_MEM_ADDR	0x08100000
31*4882a593Smuzhiyun #define MX6_ROOT_ADDR	0x08f00000
32*4882a593Smuzhiyun #else
33*4882a593Smuzhiyun #define MX6_DBI_ADDR	0x01ffc000
34*4882a593Smuzhiyun #define MX6_IO_ADDR	0x01000000
35*4882a593Smuzhiyun #define MX6_MEM_ADDR	0x01100000
36*4882a593Smuzhiyun #define MX6_ROOT_ADDR	0x01f00000
37*4882a593Smuzhiyun #endif
38*4882a593Smuzhiyun #define MX6_DBI_SIZE	0x4000
39*4882a593Smuzhiyun #define MX6_IO_SIZE	0x100000
40*4882a593Smuzhiyun #define MX6_MEM_SIZE	0xe00000
41*4882a593Smuzhiyun #define MX6_ROOT_SIZE	0xfc000
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun /* PCIe Port Logic registers (memory-mapped) */
44*4882a593Smuzhiyun #define PL_OFFSET 0x700
45*4882a593Smuzhiyun #define PCIE_PL_PFLR (PL_OFFSET + 0x08)
46*4882a593Smuzhiyun #define PCIE_PL_PFLR_LINK_STATE_MASK		(0x3f << 16)
47*4882a593Smuzhiyun #define PCIE_PL_PFLR_FORCE_LINK			(1 << 15)
48*4882a593Smuzhiyun #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
49*4882a593Smuzhiyun #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
50*4882a593Smuzhiyun #define PCIE_PHY_DEBUG_R1_LINK_UP		(1 << 4)
51*4882a593Smuzhiyun #define PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING	(1 << 29)
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun #define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
54*4882a593Smuzhiyun #define PCIE_PHY_CTRL_DATA_LOC 0
55*4882a593Smuzhiyun #define PCIE_PHY_CTRL_CAP_ADR_LOC 16
56*4882a593Smuzhiyun #define PCIE_PHY_CTRL_CAP_DAT_LOC 17
57*4882a593Smuzhiyun #define PCIE_PHY_CTRL_WR_LOC 18
58*4882a593Smuzhiyun #define PCIE_PHY_CTRL_RD_LOC 19
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun #define PCIE_PHY_STAT (PL_OFFSET + 0x110)
61*4882a593Smuzhiyun #define PCIE_PHY_STAT_DATA_LOC 0
62*4882a593Smuzhiyun #define PCIE_PHY_STAT_ACK_LOC 16
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun /* PHY registers (not memory-mapped) */
65*4882a593Smuzhiyun #define PCIE_PHY_RX_ASIC_OUT 0x100D
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun #define PHY_RX_OVRD_IN_LO 0x1005
68*4882a593Smuzhiyun #define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
69*4882a593Smuzhiyun #define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun #define PCIE_PHY_PUP_REQ		(1 << 7)
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun /* iATU registers */
74*4882a593Smuzhiyun #define PCIE_ATU_VIEWPORT		0x900
75*4882a593Smuzhiyun #define PCIE_ATU_REGION_INBOUND		(0x1 << 31)
76*4882a593Smuzhiyun #define PCIE_ATU_REGION_OUTBOUND	(0x0 << 31)
77*4882a593Smuzhiyun #define PCIE_ATU_REGION_INDEX1		(0x1 << 0)
78*4882a593Smuzhiyun #define PCIE_ATU_REGION_INDEX0		(0x0 << 0)
79*4882a593Smuzhiyun #define PCIE_ATU_CR1			0x904
80*4882a593Smuzhiyun #define PCIE_ATU_TYPE_MEM		(0x0 << 0)
81*4882a593Smuzhiyun #define PCIE_ATU_TYPE_IO		(0x2 << 0)
82*4882a593Smuzhiyun #define PCIE_ATU_TYPE_CFG0		(0x4 << 0)
83*4882a593Smuzhiyun #define PCIE_ATU_TYPE_CFG1		(0x5 << 0)
84*4882a593Smuzhiyun #define PCIE_ATU_CR2			0x908
85*4882a593Smuzhiyun #define PCIE_ATU_ENABLE			(0x1 << 31)
86*4882a593Smuzhiyun #define PCIE_ATU_BAR_MODE_ENABLE	(0x1 << 30)
87*4882a593Smuzhiyun #define PCIE_ATU_LOWER_BASE		0x90C
88*4882a593Smuzhiyun #define PCIE_ATU_UPPER_BASE		0x910
89*4882a593Smuzhiyun #define PCIE_ATU_LIMIT			0x914
90*4882a593Smuzhiyun #define PCIE_ATU_LOWER_TARGET		0x918
91*4882a593Smuzhiyun #define PCIE_ATU_BUS(x)			(((x) & 0xff) << 24)
92*4882a593Smuzhiyun #define PCIE_ATU_DEV(x)			(((x) & 0x1f) << 19)
93*4882a593Smuzhiyun #define PCIE_ATU_FUNC(x)		(((x) & 0x7) << 16)
94*4882a593Smuzhiyun #define PCIE_ATU_UPPER_TARGET		0x91C
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun /*
97*4882a593Smuzhiyun  * PHY access functions
98*4882a593Smuzhiyun  */
pcie_phy_poll_ack(void __iomem * dbi_base,int exp_val)99*4882a593Smuzhiyun static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	u32 val;
102*4882a593Smuzhiyun 	u32 max_iterations = 10;
103*4882a593Smuzhiyun 	u32 wait_counter = 0;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	do {
106*4882a593Smuzhiyun 		val = readl(dbi_base + PCIE_PHY_STAT);
107*4882a593Smuzhiyun 		val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
108*4882a593Smuzhiyun 		wait_counter++;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 		if (val == exp_val)
111*4882a593Smuzhiyun 			return 0;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 		udelay(1);
114*4882a593Smuzhiyun 	} while (wait_counter < max_iterations);
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	return -ETIMEDOUT;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun 
pcie_phy_wait_ack(void __iomem * dbi_base,int addr)119*4882a593Smuzhiyun static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun 	u32 val;
122*4882a593Smuzhiyun 	int ret;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	val = addr << PCIE_PHY_CTRL_DATA_LOC;
125*4882a593Smuzhiyun 	writel(val, dbi_base + PCIE_PHY_CTRL);
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
128*4882a593Smuzhiyun 	writel(val, dbi_base + PCIE_PHY_CTRL);
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	ret = pcie_phy_poll_ack(dbi_base, 1);
131*4882a593Smuzhiyun 	if (ret)
132*4882a593Smuzhiyun 		return ret;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	val = addr << PCIE_PHY_CTRL_DATA_LOC;
135*4882a593Smuzhiyun 	writel(val, dbi_base + PCIE_PHY_CTRL);
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	ret = pcie_phy_poll_ack(dbi_base, 0);
138*4882a593Smuzhiyun 	if (ret)
139*4882a593Smuzhiyun 		return ret;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	return 0;
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
pcie_phy_read(void __iomem * dbi_base,int addr,int * data)145*4882a593Smuzhiyun static int pcie_phy_read(void __iomem *dbi_base, int addr , int *data)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun 	u32 val, phy_ctl;
148*4882a593Smuzhiyun 	int ret;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	ret = pcie_phy_wait_ack(dbi_base, addr);
151*4882a593Smuzhiyun 	if (ret)
152*4882a593Smuzhiyun 		return ret;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	/* assert Read signal */
155*4882a593Smuzhiyun 	phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
156*4882a593Smuzhiyun 	writel(phy_ctl, dbi_base + PCIE_PHY_CTRL);
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	ret = pcie_phy_poll_ack(dbi_base, 1);
159*4882a593Smuzhiyun 	if (ret)
160*4882a593Smuzhiyun 		return ret;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	val = readl(dbi_base + PCIE_PHY_STAT);
163*4882a593Smuzhiyun 	*data = val & 0xffff;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	/* deassert Read signal */
166*4882a593Smuzhiyun 	writel(0x00, dbi_base + PCIE_PHY_CTRL);
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	ret = pcie_phy_poll_ack(dbi_base, 0);
169*4882a593Smuzhiyun 	if (ret)
170*4882a593Smuzhiyun 		return ret;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	return 0;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun 
pcie_phy_write(void __iomem * dbi_base,int addr,int data)175*4882a593Smuzhiyun static int pcie_phy_write(void __iomem *dbi_base, int addr, int data)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun 	u32 var;
178*4882a593Smuzhiyun 	int ret;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	/* write addr */
181*4882a593Smuzhiyun 	/* cap addr */
182*4882a593Smuzhiyun 	ret = pcie_phy_wait_ack(dbi_base, addr);
183*4882a593Smuzhiyun 	if (ret)
184*4882a593Smuzhiyun 		return ret;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	var = data << PCIE_PHY_CTRL_DATA_LOC;
187*4882a593Smuzhiyun 	writel(var, dbi_base + PCIE_PHY_CTRL);
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	/* capture data */
190*4882a593Smuzhiyun 	var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
191*4882a593Smuzhiyun 	writel(var, dbi_base + PCIE_PHY_CTRL);
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	ret = pcie_phy_poll_ack(dbi_base, 1);
194*4882a593Smuzhiyun 	if (ret)
195*4882a593Smuzhiyun 		return ret;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	/* deassert cap data */
198*4882a593Smuzhiyun 	var = data << PCIE_PHY_CTRL_DATA_LOC;
199*4882a593Smuzhiyun 	writel(var, dbi_base + PCIE_PHY_CTRL);
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	/* wait for ack de-assertion */
202*4882a593Smuzhiyun 	ret = pcie_phy_poll_ack(dbi_base, 0);
203*4882a593Smuzhiyun 	if (ret)
204*4882a593Smuzhiyun 		return ret;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	/* assert wr signal */
207*4882a593Smuzhiyun 	var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
208*4882a593Smuzhiyun 	writel(var, dbi_base + PCIE_PHY_CTRL);
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	/* wait for ack */
211*4882a593Smuzhiyun 	ret = pcie_phy_poll_ack(dbi_base, 1);
212*4882a593Smuzhiyun 	if (ret)
213*4882a593Smuzhiyun 		return ret;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	/* deassert wr signal */
216*4882a593Smuzhiyun 	var = data << PCIE_PHY_CTRL_DATA_LOC;
217*4882a593Smuzhiyun 	writel(var, dbi_base + PCIE_PHY_CTRL);
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	/* wait for ack de-assertion */
220*4882a593Smuzhiyun 	ret = pcie_phy_poll_ack(dbi_base, 0);
221*4882a593Smuzhiyun 	if (ret)
222*4882a593Smuzhiyun 		return ret;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	writel(0x0, dbi_base + PCIE_PHY_CTRL);
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	return 0;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun 
imx6_pcie_link_up(void)229*4882a593Smuzhiyun static int imx6_pcie_link_up(void)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun 	u32 rc, ltssm;
232*4882a593Smuzhiyun 	int rx_valid, temp;
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	/* link is debug bit 36, debug register 1 starts at bit 32 */
235*4882a593Smuzhiyun 	rc = readl(MX6_DBI_ADDR + PCIE_PHY_DEBUG_R1);
236*4882a593Smuzhiyun 	if ((rc & PCIE_PHY_DEBUG_R1_LINK_UP) &&
237*4882a593Smuzhiyun 	    !(rc & PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING))
238*4882a593Smuzhiyun 		return -EAGAIN;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	/*
241*4882a593Smuzhiyun 	 * From L0, initiate MAC entry to gen2 if EP/RC supports gen2.
242*4882a593Smuzhiyun 	 * Wait 2ms (LTSSM timeout is 24ms, PHY lock is ~5us in gen2).
243*4882a593Smuzhiyun 	 * If (MAC/LTSSM.state == Recovery.RcvrLock)
244*4882a593Smuzhiyun 	 * && (PHY/rx_valid==0) then pulse PHY/rx_reset. Transition
245*4882a593Smuzhiyun 	 * to gen2 is stuck
246*4882a593Smuzhiyun 	 */
247*4882a593Smuzhiyun 	pcie_phy_read((void *)MX6_DBI_ADDR, PCIE_PHY_RX_ASIC_OUT, &rx_valid);
248*4882a593Smuzhiyun 	ltssm = readl(MX6_DBI_ADDR + PCIE_PHY_DEBUG_R0) & 0x3F;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	if (rx_valid & 0x01)
251*4882a593Smuzhiyun 		return 0;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	if (ltssm != 0x0d)
254*4882a593Smuzhiyun 		return 0;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	printf("transition to gen2 is stuck, reset PHY!\n");
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	pcie_phy_read((void *)MX6_DBI_ADDR, PHY_RX_OVRD_IN_LO, &temp);
259*4882a593Smuzhiyun 	temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | PHY_RX_OVRD_IN_LO_RX_PLL_EN);
260*4882a593Smuzhiyun 	pcie_phy_write((void *)MX6_DBI_ADDR, PHY_RX_OVRD_IN_LO, temp);
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	udelay(3000);
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	pcie_phy_read((void *)MX6_DBI_ADDR, PHY_RX_OVRD_IN_LO, &temp);
265*4882a593Smuzhiyun 	temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | PHY_RX_OVRD_IN_LO_RX_PLL_EN);
266*4882a593Smuzhiyun 	pcie_phy_write((void *)MX6_DBI_ADDR, PHY_RX_OVRD_IN_LO, temp);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	return 0;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun /*
272*4882a593Smuzhiyun  * iATU region setup
273*4882a593Smuzhiyun  */
imx_pcie_regions_setup(void)274*4882a593Smuzhiyun static int imx_pcie_regions_setup(void)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun 	/*
277*4882a593Smuzhiyun 	 * i.MX6 defines 16MB in the AXI address map for PCIe.
278*4882a593Smuzhiyun 	 *
279*4882a593Smuzhiyun 	 * That address space excepted the pcie registers is
280*4882a593Smuzhiyun 	 * split and defined into different regions by iATU,
281*4882a593Smuzhiyun 	 * with sizes and offsets as follows:
282*4882a593Smuzhiyun 	 *
283*4882a593Smuzhiyun 	 * 0x0100_0000 --- 0x010F_FFFF 1MB IORESOURCE_IO
284*4882a593Smuzhiyun 	 * 0x0110_0000 --- 0x01EF_FFFF 14MB IORESOURCE_MEM
285*4882a593Smuzhiyun 	 * 0x01F0_0000 --- 0x01FF_FFFF 1MB Cfg + Registers
286*4882a593Smuzhiyun 	 */
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	/* CMD reg:I/O space, MEM space, and Bus Master Enable */
289*4882a593Smuzhiyun 	setbits_le32(MX6_DBI_ADDR | PCI_COMMAND,
290*4882a593Smuzhiyun 		     PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	/* Set the CLASS_REV of RC CFG header to PCI_CLASS_BRIDGE_PCI */
293*4882a593Smuzhiyun 	setbits_le32(MX6_DBI_ADDR + PCI_CLASS_REVISION,
294*4882a593Smuzhiyun 		     PCI_CLASS_BRIDGE_PCI << 16);
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	/* Region #0 is used for Outbound CFG space access. */
297*4882a593Smuzhiyun 	writel(0, MX6_DBI_ADDR + PCIE_ATU_VIEWPORT);
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	writel(MX6_ROOT_ADDR, MX6_DBI_ADDR + PCIE_ATU_LOWER_BASE);
300*4882a593Smuzhiyun 	writel(0, MX6_DBI_ADDR + PCIE_ATU_UPPER_BASE);
301*4882a593Smuzhiyun 	writel(MX6_ROOT_ADDR + MX6_ROOT_SIZE, MX6_DBI_ADDR + PCIE_ATU_LIMIT);
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	writel(0, MX6_DBI_ADDR + PCIE_ATU_LOWER_TARGET);
304*4882a593Smuzhiyun 	writel(0, MX6_DBI_ADDR + PCIE_ATU_UPPER_TARGET);
305*4882a593Smuzhiyun 	writel(PCIE_ATU_TYPE_CFG0, MX6_DBI_ADDR + PCIE_ATU_CR1);
306*4882a593Smuzhiyun 	writel(PCIE_ATU_ENABLE, MX6_DBI_ADDR + PCIE_ATU_CR2);
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	return 0;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun /*
312*4882a593Smuzhiyun  * PCI Express accessors
313*4882a593Smuzhiyun  */
get_bus_address(pci_dev_t d,int where)314*4882a593Smuzhiyun static uint32_t get_bus_address(pci_dev_t d, int where)
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun 	uint32_t va_address;
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	/* Reconfigure Region #0 */
319*4882a593Smuzhiyun 	writel(0, MX6_DBI_ADDR + PCIE_ATU_VIEWPORT);
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	if (PCI_BUS(d) < 2)
322*4882a593Smuzhiyun 		writel(PCIE_ATU_TYPE_CFG0, MX6_DBI_ADDR + PCIE_ATU_CR1);
323*4882a593Smuzhiyun 	else
324*4882a593Smuzhiyun 		writel(PCIE_ATU_TYPE_CFG1, MX6_DBI_ADDR + PCIE_ATU_CR1);
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	if (PCI_BUS(d) == 0) {
327*4882a593Smuzhiyun 		va_address = MX6_DBI_ADDR;
328*4882a593Smuzhiyun 	} else {
329*4882a593Smuzhiyun 		writel(d << 8, MX6_DBI_ADDR + PCIE_ATU_LOWER_TARGET);
330*4882a593Smuzhiyun 		va_address = MX6_IO_ADDR + SZ_16M - SZ_1M;
331*4882a593Smuzhiyun 	}
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	va_address += (where & ~0x3);
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	return va_address;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun 
imx_pcie_addr_valid(pci_dev_t d)338*4882a593Smuzhiyun static int imx_pcie_addr_valid(pci_dev_t d)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun 	if ((PCI_BUS(d) == 0) && (PCI_DEV(d) > 1))
341*4882a593Smuzhiyun 		return -EINVAL;
342*4882a593Smuzhiyun 	if ((PCI_BUS(d) == 1) && (PCI_DEV(d) > 0))
343*4882a593Smuzhiyun 		return -EINVAL;
344*4882a593Smuzhiyun 	return 0;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun /*
348*4882a593Smuzhiyun  * Replace the original ARM DABT handler with a simple jump-back one.
349*4882a593Smuzhiyun  *
350*4882a593Smuzhiyun  * The problem here is that if we have a PCIe bridge attached to this PCIe
351*4882a593Smuzhiyun  * controller, but no PCIe device is connected to the bridges' downstream
352*4882a593Smuzhiyun  * port, the attempt to read/write from/to the config space will produce
353*4882a593Smuzhiyun  * a DABT. This is a behavior of the controller and can not be disabled
354*4882a593Smuzhiyun  * unfortuatelly.
355*4882a593Smuzhiyun  *
356*4882a593Smuzhiyun  * To work around the problem, we backup the current DABT handler address
357*4882a593Smuzhiyun  * and replace it with our own DABT handler, which only bounces right back
358*4882a593Smuzhiyun  * into the code.
359*4882a593Smuzhiyun  */
imx_pcie_fix_dabt_handler(bool set)360*4882a593Smuzhiyun static void imx_pcie_fix_dabt_handler(bool set)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun 	extern uint32_t *_data_abort;
363*4882a593Smuzhiyun 	uint32_t *data_abort_addr = (uint32_t *)&_data_abort;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	static const uint32_t data_abort_bounce_handler = 0xe25ef004;
366*4882a593Smuzhiyun 	uint32_t data_abort_bounce_addr = (uint32_t)&data_abort_bounce_handler;
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	static uint32_t data_abort_backup;
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	if (set) {
371*4882a593Smuzhiyun 		data_abort_backup = *data_abort_addr;
372*4882a593Smuzhiyun 		*data_abort_addr = data_abort_bounce_addr;
373*4882a593Smuzhiyun 	} else {
374*4882a593Smuzhiyun 		*data_abort_addr = data_abort_backup;
375*4882a593Smuzhiyun 	}
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun 
imx_pcie_read_config(struct pci_controller * hose,pci_dev_t d,int where,u32 * val)378*4882a593Smuzhiyun static int imx_pcie_read_config(struct pci_controller *hose, pci_dev_t d,
379*4882a593Smuzhiyun 				int where, u32 *val)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun 	uint32_t va_address;
382*4882a593Smuzhiyun 	int ret;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	ret = imx_pcie_addr_valid(d);
385*4882a593Smuzhiyun 	if (ret) {
386*4882a593Smuzhiyun 		*val = 0xffffffff;
387*4882a593Smuzhiyun 		return 0;
388*4882a593Smuzhiyun 	}
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	va_address = get_bus_address(d, where);
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	/*
393*4882a593Smuzhiyun 	 * Read the PCIe config space. We must replace the DABT handler
394*4882a593Smuzhiyun 	 * here in case we got data abort from the PCIe controller, see
395*4882a593Smuzhiyun 	 * imx_pcie_fix_dabt_handler() description. Note that writing the
396*4882a593Smuzhiyun 	 * "val" with valid value is also imperative here as in case we
397*4882a593Smuzhiyun 	 * did got DABT, the val would contain random value.
398*4882a593Smuzhiyun 	 */
399*4882a593Smuzhiyun 	imx_pcie_fix_dabt_handler(true);
400*4882a593Smuzhiyun 	writel(0xffffffff, val);
401*4882a593Smuzhiyun 	*val = readl(va_address);
402*4882a593Smuzhiyun 	imx_pcie_fix_dabt_handler(false);
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	return 0;
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun 
imx_pcie_write_config(struct pci_controller * hose,pci_dev_t d,int where,u32 val)407*4882a593Smuzhiyun static int imx_pcie_write_config(struct pci_controller *hose, pci_dev_t d,
408*4882a593Smuzhiyun 			int where, u32 val)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun 	uint32_t va_address = 0;
411*4882a593Smuzhiyun 	int ret;
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	ret = imx_pcie_addr_valid(d);
414*4882a593Smuzhiyun 	if (ret)
415*4882a593Smuzhiyun 		return ret;
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	va_address = get_bus_address(d, where);
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	/*
420*4882a593Smuzhiyun 	 * Write the PCIe config space. We must replace the DABT handler
421*4882a593Smuzhiyun 	 * here in case we got data abort from the PCIe controller, see
422*4882a593Smuzhiyun 	 * imx_pcie_fix_dabt_handler() description.
423*4882a593Smuzhiyun 	 */
424*4882a593Smuzhiyun 	imx_pcie_fix_dabt_handler(true);
425*4882a593Smuzhiyun 	writel(val, va_address);
426*4882a593Smuzhiyun 	imx_pcie_fix_dabt_handler(false);
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	return 0;
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun /*
432*4882a593Smuzhiyun  * Initial bus setup
433*4882a593Smuzhiyun  */
imx6_pcie_assert_core_reset(void)434*4882a593Smuzhiyun static int imx6_pcie_assert_core_reset(void)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun 	struct iomuxc *iomuxc_regs = (struct iomuxc *)IOMUXC_BASE_ADDR;
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	if (is_mx6dqp())
439*4882a593Smuzhiyun 		setbits_le32(&iomuxc_regs->gpr[1], IOMUXC_GPR1_PCIE_SW_RST);
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun #if defined(CONFIG_MX6SX)
442*4882a593Smuzhiyun 	struct gpc *gpc_regs = (struct gpc *)GPC_BASE_ADDR;
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	/* SSP_EN is not used on MX6SX anymore */
445*4882a593Smuzhiyun 	setbits_le32(&iomuxc_regs->gpr[12], IOMUXC_GPR12_TEST_POWERDOWN);
446*4882a593Smuzhiyun 	/* Force PCIe PHY reset */
447*4882a593Smuzhiyun 	setbits_le32(&iomuxc_regs->gpr[5], IOMUXC_GPR5_PCIE_BTNRST);
448*4882a593Smuzhiyun 	/* Power up PCIe PHY */
449*4882a593Smuzhiyun 	setbits_le32(&gpc_regs->cntr, PCIE_PHY_PUP_REQ);
450*4882a593Smuzhiyun #else
451*4882a593Smuzhiyun 	/*
452*4882a593Smuzhiyun 	 * If the bootloader already enabled the link we need some special
453*4882a593Smuzhiyun 	 * handling to get the core back into a state where it is safe to
454*4882a593Smuzhiyun 	 * touch it for configuration.  As there is no dedicated reset signal
455*4882a593Smuzhiyun 	 * wired up for MX6QDL, we need to manually force LTSSM into "detect"
456*4882a593Smuzhiyun 	 * state before completely disabling LTSSM, which is a prerequisite
457*4882a593Smuzhiyun 	 * for core configuration.
458*4882a593Smuzhiyun 	 *
459*4882a593Smuzhiyun 	 * If both LTSSM_ENABLE and REF_SSP_ENABLE are active we have a strong
460*4882a593Smuzhiyun 	 * indication that the bootloader activated the link.
461*4882a593Smuzhiyun 	 */
462*4882a593Smuzhiyun 	if (is_mx6dq()) {
463*4882a593Smuzhiyun 		u32 val, gpr1, gpr12;
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 		gpr1 = readl(&iomuxc_regs->gpr[1]);
466*4882a593Smuzhiyun 		gpr12 = readl(&iomuxc_regs->gpr[12]);
467*4882a593Smuzhiyun 		if ((gpr1 & IOMUXC_GPR1_PCIE_REF_CLK_EN) &&
468*4882a593Smuzhiyun 		    (gpr12 & IOMUXC_GPR12_PCIE_CTL_2)) {
469*4882a593Smuzhiyun 			val = readl(MX6_DBI_ADDR + PCIE_PL_PFLR);
470*4882a593Smuzhiyun 			val &= ~PCIE_PL_PFLR_LINK_STATE_MASK;
471*4882a593Smuzhiyun 			val |= PCIE_PL_PFLR_FORCE_LINK;
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 			imx_pcie_fix_dabt_handler(true);
474*4882a593Smuzhiyun 			writel(val, MX6_DBI_ADDR + PCIE_PL_PFLR);
475*4882a593Smuzhiyun 			imx_pcie_fix_dabt_handler(false);
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 			gpr12 &= ~IOMUXC_GPR12_PCIE_CTL_2;
478*4882a593Smuzhiyun 			writel(val, &iomuxc_regs->gpr[12]);
479*4882a593Smuzhiyun 		}
480*4882a593Smuzhiyun 	}
481*4882a593Smuzhiyun 	setbits_le32(&iomuxc_regs->gpr[1], IOMUXC_GPR1_TEST_POWERDOWN);
482*4882a593Smuzhiyun 	clrbits_le32(&iomuxc_regs->gpr[1], IOMUXC_GPR1_REF_SSP_EN);
483*4882a593Smuzhiyun #endif
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	return 0;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun 
imx6_pcie_init_phy(void)488*4882a593Smuzhiyun static int imx6_pcie_init_phy(void)
489*4882a593Smuzhiyun {
490*4882a593Smuzhiyun 	struct iomuxc *iomuxc_regs = (struct iomuxc *)IOMUXC_BASE_ADDR;
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	clrbits_le32(&iomuxc_regs->gpr[12], IOMUXC_GPR12_APPS_LTSSM_ENABLE);
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	clrsetbits_le32(&iomuxc_regs->gpr[12],
495*4882a593Smuzhiyun 			IOMUXC_GPR12_DEVICE_TYPE_MASK,
496*4882a593Smuzhiyun 			IOMUXC_GPR12_DEVICE_TYPE_RC);
497*4882a593Smuzhiyun 	clrsetbits_le32(&iomuxc_regs->gpr[12],
498*4882a593Smuzhiyun 			IOMUXC_GPR12_LOS_LEVEL_MASK,
499*4882a593Smuzhiyun 			IOMUXC_GPR12_LOS_LEVEL_9);
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun #ifdef CONFIG_MX6SX
502*4882a593Smuzhiyun 	clrsetbits_le32(&iomuxc_regs->gpr[12],
503*4882a593Smuzhiyun 			IOMUXC_GPR12_RX_EQ_MASK,
504*4882a593Smuzhiyun 			IOMUXC_GPR12_RX_EQ_2);
505*4882a593Smuzhiyun #endif
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	writel((0x0 << IOMUXC_GPR8_PCS_TX_DEEMPH_GEN1_OFFSET) |
508*4882a593Smuzhiyun 	       (0x0 << IOMUXC_GPR8_PCS_TX_DEEMPH_GEN2_3P5DB_OFFSET) |
509*4882a593Smuzhiyun 	       (20 << IOMUXC_GPR8_PCS_TX_DEEMPH_GEN2_6DB_OFFSET) |
510*4882a593Smuzhiyun 	       (127 << IOMUXC_GPR8_PCS_TX_SWING_FULL_OFFSET) |
511*4882a593Smuzhiyun 	       (127 << IOMUXC_GPR8_PCS_TX_SWING_LOW_OFFSET),
512*4882a593Smuzhiyun 	       &iomuxc_regs->gpr[8]);
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	return 0;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun 
imx6_pcie_toggle_power(void)517*4882a593Smuzhiyun __weak int imx6_pcie_toggle_power(void)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun #ifdef CONFIG_PCIE_IMX_POWER_GPIO
520*4882a593Smuzhiyun 	gpio_direction_output(CONFIG_PCIE_IMX_POWER_GPIO, 0);
521*4882a593Smuzhiyun 	mdelay(20);
522*4882a593Smuzhiyun 	gpio_set_value(CONFIG_PCIE_IMX_POWER_GPIO, 1);
523*4882a593Smuzhiyun 	mdelay(20);
524*4882a593Smuzhiyun #endif
525*4882a593Smuzhiyun 	return 0;
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun 
imx6_pcie_toggle_reset(void)528*4882a593Smuzhiyun __weak int imx6_pcie_toggle_reset(void)
529*4882a593Smuzhiyun {
530*4882a593Smuzhiyun 	/*
531*4882a593Smuzhiyun 	 * See 'PCI EXPRESS BASE SPECIFICATION, REV 3.0, SECTION 6.6.1'
532*4882a593Smuzhiyun 	 * for detailed understanding of the PCIe CR reset logic.
533*4882a593Smuzhiyun 	 *
534*4882a593Smuzhiyun 	 * The PCIe #PERST reset line _MUST_ be connected, otherwise your
535*4882a593Smuzhiyun 	 * design does not conform to the specification. You must wait at
536*4882a593Smuzhiyun 	 * least 20 ms after de-asserting the #PERST so the EP device can
537*4882a593Smuzhiyun 	 * do self-initialisation.
538*4882a593Smuzhiyun 	 *
539*4882a593Smuzhiyun 	 * In case your #PERST pin is connected to a plain GPIO pin of the
540*4882a593Smuzhiyun 	 * CPU, you can define CONFIG_PCIE_IMX_PERST_GPIO in your board's
541*4882a593Smuzhiyun 	 * configuration file and the condition below will handle the rest
542*4882a593Smuzhiyun 	 * of the reset toggling.
543*4882a593Smuzhiyun 	 *
544*4882a593Smuzhiyun 	 * In case your #PERST toggling logic is more complex, for example
545*4882a593Smuzhiyun 	 * connected via CPLD or somesuch, you can override this function
546*4882a593Smuzhiyun 	 * in your board file and implement reset logic as needed. You must
547*4882a593Smuzhiyun 	 * not forget to wait at least 20 ms after de-asserting #PERST in
548*4882a593Smuzhiyun 	 * this case either though.
549*4882a593Smuzhiyun 	 *
550*4882a593Smuzhiyun 	 * In case your #PERST line of the PCIe EP device is not connected
551*4882a593Smuzhiyun 	 * at all, your design is broken and you should fix your design,
552*4882a593Smuzhiyun 	 * otherwise you will observe problems like for example the link
553*4882a593Smuzhiyun 	 * not coming up after rebooting the system back from running Linux
554*4882a593Smuzhiyun 	 * that uses the PCIe as well OR the PCIe link might not come up in
555*4882a593Smuzhiyun 	 * Linux at all in the first place since it's in some non-reset
556*4882a593Smuzhiyun 	 * state due to being previously used in U-Boot.
557*4882a593Smuzhiyun 	 */
558*4882a593Smuzhiyun #ifdef CONFIG_PCIE_IMX_PERST_GPIO
559*4882a593Smuzhiyun 	gpio_direction_output(CONFIG_PCIE_IMX_PERST_GPIO, 0);
560*4882a593Smuzhiyun 	mdelay(20);
561*4882a593Smuzhiyun 	gpio_set_value(CONFIG_PCIE_IMX_PERST_GPIO, 1);
562*4882a593Smuzhiyun 	mdelay(20);
563*4882a593Smuzhiyun #else
564*4882a593Smuzhiyun 	puts("WARNING: Make sure the PCIe #PERST line is connected!\n");
565*4882a593Smuzhiyun #endif
566*4882a593Smuzhiyun 	return 0;
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun 
imx6_pcie_deassert_core_reset(void)569*4882a593Smuzhiyun static int imx6_pcie_deassert_core_reset(void)
570*4882a593Smuzhiyun {
571*4882a593Smuzhiyun 	struct iomuxc *iomuxc_regs = (struct iomuxc *)IOMUXC_BASE_ADDR;
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	imx6_pcie_toggle_power();
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	enable_pcie_clock();
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	if (is_mx6dqp())
578*4882a593Smuzhiyun 		clrbits_le32(&iomuxc_regs->gpr[1], IOMUXC_GPR1_PCIE_SW_RST);
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	/*
581*4882a593Smuzhiyun 	 * Wait for the clock to settle a bit, when the clock are sourced
582*4882a593Smuzhiyun 	 * from the CPU, we need about 30 ms to settle.
583*4882a593Smuzhiyun 	 */
584*4882a593Smuzhiyun 	mdelay(50);
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun #if defined(CONFIG_MX6SX)
587*4882a593Smuzhiyun 	/* SSP_EN is not used on MX6SX anymore */
588*4882a593Smuzhiyun 	clrbits_le32(&iomuxc_regs->gpr[12], IOMUXC_GPR12_TEST_POWERDOWN);
589*4882a593Smuzhiyun 	/* Clear PCIe PHY reset bit */
590*4882a593Smuzhiyun 	clrbits_le32(&iomuxc_regs->gpr[5], IOMUXC_GPR5_PCIE_BTNRST);
591*4882a593Smuzhiyun #else
592*4882a593Smuzhiyun 	/* Enable PCIe */
593*4882a593Smuzhiyun 	clrbits_le32(&iomuxc_regs->gpr[1], IOMUXC_GPR1_TEST_POWERDOWN);
594*4882a593Smuzhiyun 	setbits_le32(&iomuxc_regs->gpr[1], IOMUXC_GPR1_REF_SSP_EN);
595*4882a593Smuzhiyun #endif
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	imx6_pcie_toggle_reset();
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	return 0;
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun 
imx_pcie_link_up(void)602*4882a593Smuzhiyun static int imx_pcie_link_up(void)
603*4882a593Smuzhiyun {
604*4882a593Smuzhiyun 	struct iomuxc *iomuxc_regs = (struct iomuxc *)IOMUXC_BASE_ADDR;
605*4882a593Smuzhiyun 	uint32_t tmp;
606*4882a593Smuzhiyun 	int count = 0;
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	imx6_pcie_assert_core_reset();
609*4882a593Smuzhiyun 	imx6_pcie_init_phy();
610*4882a593Smuzhiyun 	imx6_pcie_deassert_core_reset();
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	imx_pcie_regions_setup();
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 	/*
615*4882a593Smuzhiyun 	 * FIXME: Force the PCIe RC to Gen1 operation
616*4882a593Smuzhiyun 	 * The RC must be forced into Gen1 mode before bringing the link
617*4882a593Smuzhiyun 	 * up, otherwise no downstream devices are detected. After the
618*4882a593Smuzhiyun 	 * link is up, a managed Gen1->Gen2 transition can be initiated.
619*4882a593Smuzhiyun 	 */
620*4882a593Smuzhiyun 	tmp = readl(MX6_DBI_ADDR + 0x7c);
621*4882a593Smuzhiyun 	tmp &= ~0xf;
622*4882a593Smuzhiyun 	tmp |= 0x1;
623*4882a593Smuzhiyun 	writel(tmp, MX6_DBI_ADDR + 0x7c);
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	/* LTSSM enable, starting link. */
626*4882a593Smuzhiyun 	setbits_le32(&iomuxc_regs->gpr[12], IOMUXC_GPR12_APPS_LTSSM_ENABLE);
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 	while (!imx6_pcie_link_up()) {
629*4882a593Smuzhiyun 		udelay(10);
630*4882a593Smuzhiyun 		count++;
631*4882a593Smuzhiyun 		if (count >= 4000) {
632*4882a593Smuzhiyun #ifdef CONFIG_PCI_SCAN_SHOW
633*4882a593Smuzhiyun 			puts("PCI:   pcie phy link never came up\n");
634*4882a593Smuzhiyun #endif
635*4882a593Smuzhiyun 			debug("DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
636*4882a593Smuzhiyun 			      readl(MX6_DBI_ADDR + PCIE_PHY_DEBUG_R0),
637*4882a593Smuzhiyun 			      readl(MX6_DBI_ADDR + PCIE_PHY_DEBUG_R1));
638*4882a593Smuzhiyun 			return -EINVAL;
639*4882a593Smuzhiyun 		}
640*4882a593Smuzhiyun 	}
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 	return 0;
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun 
imx_pcie_init(void)645*4882a593Smuzhiyun void imx_pcie_init(void)
646*4882a593Smuzhiyun {
647*4882a593Smuzhiyun 	/* Static instance of the controller. */
648*4882a593Smuzhiyun 	static struct pci_controller	pcc;
649*4882a593Smuzhiyun 	struct pci_controller		*hose = &pcc;
650*4882a593Smuzhiyun 	int ret;
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun 	memset(&pcc, 0, sizeof(pcc));
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	/* PCI I/O space */
655*4882a593Smuzhiyun 	pci_set_region(&hose->regions[0],
656*4882a593Smuzhiyun 		       MX6_IO_ADDR, MX6_IO_ADDR,
657*4882a593Smuzhiyun 		       MX6_IO_SIZE, PCI_REGION_IO);
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	/* PCI memory space */
660*4882a593Smuzhiyun 	pci_set_region(&hose->regions[1],
661*4882a593Smuzhiyun 		       MX6_MEM_ADDR, MX6_MEM_ADDR,
662*4882a593Smuzhiyun 		       MX6_MEM_SIZE, PCI_REGION_MEM);
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	/* System memory space */
665*4882a593Smuzhiyun 	pci_set_region(&hose->regions[2],
666*4882a593Smuzhiyun 		       MMDC0_ARB_BASE_ADDR, MMDC0_ARB_BASE_ADDR,
667*4882a593Smuzhiyun 		       0xefffffff, PCI_REGION_MEM | PCI_REGION_SYS_MEMORY);
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 	hose->region_count = 3;
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 	pci_set_ops(hose,
672*4882a593Smuzhiyun 		    pci_hose_read_config_byte_via_dword,
673*4882a593Smuzhiyun 		    pci_hose_read_config_word_via_dword,
674*4882a593Smuzhiyun 		    imx_pcie_read_config,
675*4882a593Smuzhiyun 		    pci_hose_write_config_byte_via_dword,
676*4882a593Smuzhiyun 		    pci_hose_write_config_word_via_dword,
677*4882a593Smuzhiyun 		    imx_pcie_write_config);
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 	/* Start the controller. */
680*4882a593Smuzhiyun 	ret = imx_pcie_link_up();
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 	if (!ret) {
683*4882a593Smuzhiyun 		pci_register_hose(hose);
684*4882a593Smuzhiyun 		hose->last_busno = pci_hose_scan(hose);
685*4882a593Smuzhiyun 	}
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun 
imx_pcie_remove(void)688*4882a593Smuzhiyun void imx_pcie_remove(void)
689*4882a593Smuzhiyun {
690*4882a593Smuzhiyun 	imx6_pcie_assert_core_reset();
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun /* Probe function. */
pci_init_board(void)694*4882a593Smuzhiyun void pci_init_board(void)
695*4882a593Smuzhiyun {
696*4882a593Smuzhiyun 	imx_pcie_init();
697*4882a593Smuzhiyun }
698