xref: /OK3568_Linux_fs/kernel/arch/mips/pci/pcie-octeon.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * This file is subject to the terms and conditions of the GNU General Public
3*4882a593Smuzhiyun  * License.  See the file "COPYING" in the main directory of this archive
4*4882a593Smuzhiyun  * for more details.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright (C) 2007, 2008, 2009, 2010, 2011 Cavium Networks
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun #include <linux/kernel.h>
9*4882a593Smuzhiyun #include <linux/init.h>
10*4882a593Smuzhiyun #include <linux/pci.h>
11*4882a593Smuzhiyun #include <linux/interrupt.h>
12*4882a593Smuzhiyun #include <linux/time.h>
13*4882a593Smuzhiyun #include <linux/delay.h>
14*4882a593Smuzhiyun #include <linux/moduleparam.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include <asm/octeon/octeon.h>
17*4882a593Smuzhiyun #include <asm/octeon/cvmx-npei-defs.h>
18*4882a593Smuzhiyun #include <asm/octeon/cvmx-pciercx-defs.h>
19*4882a593Smuzhiyun #include <asm/octeon/cvmx-pescx-defs.h>
20*4882a593Smuzhiyun #include <asm/octeon/cvmx-pexp-defs.h>
21*4882a593Smuzhiyun #include <asm/octeon/cvmx-pemx-defs.h>
22*4882a593Smuzhiyun #include <asm/octeon/cvmx-dpi-defs.h>
23*4882a593Smuzhiyun #include <asm/octeon/cvmx-sli-defs.h>
24*4882a593Smuzhiyun #include <asm/octeon/cvmx-sriox-defs.h>
25*4882a593Smuzhiyun #include <asm/octeon/cvmx-helper-errata.h>
26*4882a593Smuzhiyun #include <asm/octeon/pci-octeon.h>
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #define MRRS_CN5XXX 0 /* 128 byte Max Read Request Size */
29*4882a593Smuzhiyun #define MPS_CN5XXX  0 /* 128 byte Max Packet Size (Limit of most PCs) */
30*4882a593Smuzhiyun #define MRRS_CN6XXX 3 /* 1024 byte Max Read Request Size */
31*4882a593Smuzhiyun #define MPS_CN6XXX  0 /* 128 byte Max Packet Size (Limit of most PCs) */
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun /* Module parameter to disable PCI probing */
34*4882a593Smuzhiyun static int pcie_disable;
35*4882a593Smuzhiyun module_param(pcie_disable, int, S_IRUGO);
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun static int enable_pcie_14459_war;
38*4882a593Smuzhiyun static int enable_pcie_bus_num_war[2];
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun union cvmx_pcie_address {
41*4882a593Smuzhiyun 	uint64_t u64;
42*4882a593Smuzhiyun 	struct {
43*4882a593Smuzhiyun 		uint64_t upper:2;	/* Normally 2 for XKPHYS */
44*4882a593Smuzhiyun 		uint64_t reserved_49_61:13;	/* Must be zero */
45*4882a593Smuzhiyun 		uint64_t io:1;	/* 1 for IO space access */
46*4882a593Smuzhiyun 		uint64_t did:5; /* PCIe DID = 3 */
47*4882a593Smuzhiyun 		uint64_t subdid:3;	/* PCIe SubDID = 1 */
48*4882a593Smuzhiyun 		uint64_t reserved_36_39:4;	/* Must be zero */
49*4882a593Smuzhiyun 		uint64_t es:2;	/* Endian swap = 1 */
50*4882a593Smuzhiyun 		uint64_t port:2;	/* PCIe port 0,1 */
51*4882a593Smuzhiyun 		uint64_t reserved_29_31:3;	/* Must be zero */
52*4882a593Smuzhiyun 		/*
53*4882a593Smuzhiyun 		 * Selects the type of the configuration request (0 = type 0,
54*4882a593Smuzhiyun 		 * 1 = type 1).
55*4882a593Smuzhiyun 		 */
56*4882a593Smuzhiyun 		uint64_t ty:1;
57*4882a593Smuzhiyun 		/* Target bus number sent in the ID in the request. */
58*4882a593Smuzhiyun 		uint64_t bus:8;
59*4882a593Smuzhiyun 		/*
60*4882a593Smuzhiyun 		 * Target device number sent in the ID in the
61*4882a593Smuzhiyun 		 * request. Note that Dev must be zero for type 0
62*4882a593Smuzhiyun 		 * configuration requests.
63*4882a593Smuzhiyun 		 */
64*4882a593Smuzhiyun 		uint64_t dev:5;
65*4882a593Smuzhiyun 		/* Target function number sent in the ID in the request. */
66*4882a593Smuzhiyun 		uint64_t func:3;
67*4882a593Smuzhiyun 		/*
68*4882a593Smuzhiyun 		 * Selects a register in the configuration space of
69*4882a593Smuzhiyun 		 * the target.
70*4882a593Smuzhiyun 		 */
71*4882a593Smuzhiyun 		uint64_t reg:12;
72*4882a593Smuzhiyun 	} config;
73*4882a593Smuzhiyun 	struct {
74*4882a593Smuzhiyun 		uint64_t upper:2;	/* Normally 2 for XKPHYS */
75*4882a593Smuzhiyun 		uint64_t reserved_49_61:13;	/* Must be zero */
76*4882a593Smuzhiyun 		uint64_t io:1;	/* 1 for IO space access */
77*4882a593Smuzhiyun 		uint64_t did:5; /* PCIe DID = 3 */
78*4882a593Smuzhiyun 		uint64_t subdid:3;	/* PCIe SubDID = 2 */
79*4882a593Smuzhiyun 		uint64_t reserved_36_39:4;	/* Must be zero */
80*4882a593Smuzhiyun 		uint64_t es:2;	/* Endian swap = 1 */
81*4882a593Smuzhiyun 		uint64_t port:2;	/* PCIe port 0,1 */
82*4882a593Smuzhiyun 		uint64_t address:32;	/* PCIe IO address */
83*4882a593Smuzhiyun 	} io;
84*4882a593Smuzhiyun 	struct {
85*4882a593Smuzhiyun 		uint64_t upper:2;	/* Normally 2 for XKPHYS */
86*4882a593Smuzhiyun 		uint64_t reserved_49_61:13;	/* Must be zero */
87*4882a593Smuzhiyun 		uint64_t io:1;	/* 1 for IO space access */
88*4882a593Smuzhiyun 		uint64_t did:5; /* PCIe DID = 3 */
89*4882a593Smuzhiyun 		uint64_t subdid:3;	/* PCIe SubDID = 3-6 */
90*4882a593Smuzhiyun 		uint64_t reserved_36_39:4;	/* Must be zero */
91*4882a593Smuzhiyun 		uint64_t address:36;	/* PCIe Mem address */
92*4882a593Smuzhiyun 	} mem;
93*4882a593Smuzhiyun };
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun static int cvmx_pcie_rc_initialize(int pcie_port);
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun /**
98*4882a593Smuzhiyun  * Return the Core virtual base address for PCIe IO access. IOs are
99*4882a593Smuzhiyun  * read/written as an offset from this address.
100*4882a593Smuzhiyun  *
101*4882a593Smuzhiyun  * @pcie_port: PCIe port the IO is for
102*4882a593Smuzhiyun  *
103*4882a593Smuzhiyun  * Returns 64bit Octeon IO base address for read/write
104*4882a593Smuzhiyun  */
cvmx_pcie_get_io_base_address(int pcie_port)105*4882a593Smuzhiyun static inline uint64_t cvmx_pcie_get_io_base_address(int pcie_port)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	union cvmx_pcie_address pcie_addr;
108*4882a593Smuzhiyun 	pcie_addr.u64 = 0;
109*4882a593Smuzhiyun 	pcie_addr.io.upper = 0;
110*4882a593Smuzhiyun 	pcie_addr.io.io = 1;
111*4882a593Smuzhiyun 	pcie_addr.io.did = 3;
112*4882a593Smuzhiyun 	pcie_addr.io.subdid = 2;
113*4882a593Smuzhiyun 	pcie_addr.io.es = 1;
114*4882a593Smuzhiyun 	pcie_addr.io.port = pcie_port;
115*4882a593Smuzhiyun 	return pcie_addr.u64;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun /**
119*4882a593Smuzhiyun  * Size of the IO address region returned at address
120*4882a593Smuzhiyun  * cvmx_pcie_get_io_base_address()
121*4882a593Smuzhiyun  *
122*4882a593Smuzhiyun  * @pcie_port: PCIe port the IO is for
123*4882a593Smuzhiyun  *
124*4882a593Smuzhiyun  * Returns Size of the IO window
125*4882a593Smuzhiyun  */
cvmx_pcie_get_io_size(int pcie_port)126*4882a593Smuzhiyun static inline uint64_t cvmx_pcie_get_io_size(int pcie_port)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	return 1ull << 32;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun /**
132*4882a593Smuzhiyun  * Return the Core virtual base address for PCIe MEM access. Memory is
133*4882a593Smuzhiyun  * read/written as an offset from this address.
134*4882a593Smuzhiyun  *
135*4882a593Smuzhiyun  * @pcie_port: PCIe port the IO is for
136*4882a593Smuzhiyun  *
137*4882a593Smuzhiyun  * Returns 64bit Octeon IO base address for read/write
138*4882a593Smuzhiyun  */
cvmx_pcie_get_mem_base_address(int pcie_port)139*4882a593Smuzhiyun static inline uint64_t cvmx_pcie_get_mem_base_address(int pcie_port)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun 	union cvmx_pcie_address pcie_addr;
142*4882a593Smuzhiyun 	pcie_addr.u64 = 0;
143*4882a593Smuzhiyun 	pcie_addr.mem.upper = 0;
144*4882a593Smuzhiyun 	pcie_addr.mem.io = 1;
145*4882a593Smuzhiyun 	pcie_addr.mem.did = 3;
146*4882a593Smuzhiyun 	pcie_addr.mem.subdid = 3 + pcie_port;
147*4882a593Smuzhiyun 	return pcie_addr.u64;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun /**
151*4882a593Smuzhiyun  * Size of the Mem address region returned at address
152*4882a593Smuzhiyun  * cvmx_pcie_get_mem_base_address()
153*4882a593Smuzhiyun  *
154*4882a593Smuzhiyun  * @pcie_port: PCIe port the IO is for
155*4882a593Smuzhiyun  *
156*4882a593Smuzhiyun  * Returns Size of the Mem window
157*4882a593Smuzhiyun  */
cvmx_pcie_get_mem_size(int pcie_port)158*4882a593Smuzhiyun static inline uint64_t cvmx_pcie_get_mem_size(int pcie_port)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun 	return 1ull << 36;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun /**
164*4882a593Smuzhiyun  * Read a PCIe config space register indirectly. This is used for
165*4882a593Smuzhiyun  * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
166*4882a593Smuzhiyun  *
167*4882a593Smuzhiyun  * @pcie_port:	PCIe port to read from
168*4882a593Smuzhiyun  * @cfg_offset: Address to read
169*4882a593Smuzhiyun  *
170*4882a593Smuzhiyun  * Returns Value read
171*4882a593Smuzhiyun  */
cvmx_pcie_cfgx_read(int pcie_port,uint32_t cfg_offset)172*4882a593Smuzhiyun static uint32_t cvmx_pcie_cfgx_read(int pcie_port, uint32_t cfg_offset)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun 	if (octeon_has_feature(OCTEON_FEATURE_NPEI)) {
175*4882a593Smuzhiyun 		union cvmx_pescx_cfg_rd pescx_cfg_rd;
176*4882a593Smuzhiyun 		pescx_cfg_rd.u64 = 0;
177*4882a593Smuzhiyun 		pescx_cfg_rd.s.addr = cfg_offset;
178*4882a593Smuzhiyun 		cvmx_write_csr(CVMX_PESCX_CFG_RD(pcie_port), pescx_cfg_rd.u64);
179*4882a593Smuzhiyun 		pescx_cfg_rd.u64 = cvmx_read_csr(CVMX_PESCX_CFG_RD(pcie_port));
180*4882a593Smuzhiyun 		return pescx_cfg_rd.s.data;
181*4882a593Smuzhiyun 	} else {
182*4882a593Smuzhiyun 		union cvmx_pemx_cfg_rd pemx_cfg_rd;
183*4882a593Smuzhiyun 		pemx_cfg_rd.u64 = 0;
184*4882a593Smuzhiyun 		pemx_cfg_rd.s.addr = cfg_offset;
185*4882a593Smuzhiyun 		cvmx_write_csr(CVMX_PEMX_CFG_RD(pcie_port), pemx_cfg_rd.u64);
186*4882a593Smuzhiyun 		pemx_cfg_rd.u64 = cvmx_read_csr(CVMX_PEMX_CFG_RD(pcie_port));
187*4882a593Smuzhiyun 		return pemx_cfg_rd.s.data;
188*4882a593Smuzhiyun 	}
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun /**
192*4882a593Smuzhiyun  * Write a PCIe config space register indirectly. This is used for
193*4882a593Smuzhiyun  * registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
194*4882a593Smuzhiyun  *
195*4882a593Smuzhiyun  * @pcie_port:	PCIe port to write to
196*4882a593Smuzhiyun  * @cfg_offset: Address to write
197*4882a593Smuzhiyun  * @val:	Value to write
198*4882a593Smuzhiyun  */
cvmx_pcie_cfgx_write(int pcie_port,uint32_t cfg_offset,uint32_t val)199*4882a593Smuzhiyun static void cvmx_pcie_cfgx_write(int pcie_port, uint32_t cfg_offset,
200*4882a593Smuzhiyun 				 uint32_t val)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun 	if (octeon_has_feature(OCTEON_FEATURE_NPEI)) {
203*4882a593Smuzhiyun 		union cvmx_pescx_cfg_wr pescx_cfg_wr;
204*4882a593Smuzhiyun 		pescx_cfg_wr.u64 = 0;
205*4882a593Smuzhiyun 		pescx_cfg_wr.s.addr = cfg_offset;
206*4882a593Smuzhiyun 		pescx_cfg_wr.s.data = val;
207*4882a593Smuzhiyun 		cvmx_write_csr(CVMX_PESCX_CFG_WR(pcie_port), pescx_cfg_wr.u64);
208*4882a593Smuzhiyun 	} else {
209*4882a593Smuzhiyun 		union cvmx_pemx_cfg_wr pemx_cfg_wr;
210*4882a593Smuzhiyun 		pemx_cfg_wr.u64 = 0;
211*4882a593Smuzhiyun 		pemx_cfg_wr.s.addr = cfg_offset;
212*4882a593Smuzhiyun 		pemx_cfg_wr.s.data = val;
213*4882a593Smuzhiyun 		cvmx_write_csr(CVMX_PEMX_CFG_WR(pcie_port), pemx_cfg_wr.u64);
214*4882a593Smuzhiyun 	}
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun /**
218*4882a593Smuzhiyun  * Build a PCIe config space request address for a device
219*4882a593Smuzhiyun  *
220*4882a593Smuzhiyun  * @pcie_port: PCIe port to access
221*4882a593Smuzhiyun  * @bus:       Sub bus
222*4882a593Smuzhiyun  * @dev:       Device ID
223*4882a593Smuzhiyun  * @fn:	       Device sub function
224*4882a593Smuzhiyun  * @reg:       Register to access
225*4882a593Smuzhiyun  *
226*4882a593Smuzhiyun  * Returns 64bit Octeon IO address
227*4882a593Smuzhiyun  */
__cvmx_pcie_build_config_addr(int pcie_port,int bus,int dev,int fn,int reg)228*4882a593Smuzhiyun static inline uint64_t __cvmx_pcie_build_config_addr(int pcie_port, int bus,
229*4882a593Smuzhiyun 						     int dev, int fn, int reg)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun 	union cvmx_pcie_address pcie_addr;
232*4882a593Smuzhiyun 	union cvmx_pciercx_cfg006 pciercx_cfg006;
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	pciercx_cfg006.u32 =
235*4882a593Smuzhiyun 	    cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG006(pcie_port));
236*4882a593Smuzhiyun 	if ((bus <= pciercx_cfg006.s.pbnum) && (dev != 0))
237*4882a593Smuzhiyun 		return 0;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	pcie_addr.u64 = 0;
240*4882a593Smuzhiyun 	pcie_addr.config.upper = 2;
241*4882a593Smuzhiyun 	pcie_addr.config.io = 1;
242*4882a593Smuzhiyun 	pcie_addr.config.did = 3;
243*4882a593Smuzhiyun 	pcie_addr.config.subdid = 1;
244*4882a593Smuzhiyun 	pcie_addr.config.es = 1;
245*4882a593Smuzhiyun 	pcie_addr.config.port = pcie_port;
246*4882a593Smuzhiyun 	pcie_addr.config.ty = (bus > pciercx_cfg006.s.pbnum);
247*4882a593Smuzhiyun 	pcie_addr.config.bus = bus;
248*4882a593Smuzhiyun 	pcie_addr.config.dev = dev;
249*4882a593Smuzhiyun 	pcie_addr.config.func = fn;
250*4882a593Smuzhiyun 	pcie_addr.config.reg = reg;
251*4882a593Smuzhiyun 	return pcie_addr.u64;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun /**
255*4882a593Smuzhiyun  * Read 8bits from a Device's config space
256*4882a593Smuzhiyun  *
257*4882a593Smuzhiyun  * @pcie_port: PCIe port the device is on
258*4882a593Smuzhiyun  * @bus:       Sub bus
259*4882a593Smuzhiyun  * @dev:       Device ID
260*4882a593Smuzhiyun  * @fn:	       Device sub function
261*4882a593Smuzhiyun  * @reg:       Register to access
262*4882a593Smuzhiyun  *
263*4882a593Smuzhiyun  * Returns Result of the read
264*4882a593Smuzhiyun  */
cvmx_pcie_config_read8(int pcie_port,int bus,int dev,int fn,int reg)265*4882a593Smuzhiyun static uint8_t cvmx_pcie_config_read8(int pcie_port, int bus, int dev,
266*4882a593Smuzhiyun 				      int fn, int reg)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun 	uint64_t address =
269*4882a593Smuzhiyun 	    __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
270*4882a593Smuzhiyun 	if (address)
271*4882a593Smuzhiyun 		return cvmx_read64_uint8(address);
272*4882a593Smuzhiyun 	else
273*4882a593Smuzhiyun 		return 0xff;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun /**
277*4882a593Smuzhiyun  * Read 16bits from a Device's config space
278*4882a593Smuzhiyun  *
279*4882a593Smuzhiyun  * @pcie_port: PCIe port the device is on
280*4882a593Smuzhiyun  * @bus:       Sub bus
281*4882a593Smuzhiyun  * @dev:       Device ID
282*4882a593Smuzhiyun  * @fn:	       Device sub function
283*4882a593Smuzhiyun  * @reg:       Register to access
284*4882a593Smuzhiyun  *
285*4882a593Smuzhiyun  * Returns Result of the read
286*4882a593Smuzhiyun  */
cvmx_pcie_config_read16(int pcie_port,int bus,int dev,int fn,int reg)287*4882a593Smuzhiyun static uint16_t cvmx_pcie_config_read16(int pcie_port, int bus, int dev,
288*4882a593Smuzhiyun 					int fn, int reg)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun 	uint64_t address =
291*4882a593Smuzhiyun 	    __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
292*4882a593Smuzhiyun 	if (address)
293*4882a593Smuzhiyun 		return le16_to_cpu(cvmx_read64_uint16(address));
294*4882a593Smuzhiyun 	else
295*4882a593Smuzhiyun 		return 0xffff;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun /**
299*4882a593Smuzhiyun  * Read 32bits from a Device's config space
300*4882a593Smuzhiyun  *
301*4882a593Smuzhiyun  * @pcie_port: PCIe port the device is on
302*4882a593Smuzhiyun  * @bus:       Sub bus
303*4882a593Smuzhiyun  * @dev:       Device ID
304*4882a593Smuzhiyun  * @fn:	       Device sub function
305*4882a593Smuzhiyun  * @reg:       Register to access
306*4882a593Smuzhiyun  *
307*4882a593Smuzhiyun  * Returns Result of the read
308*4882a593Smuzhiyun  */
cvmx_pcie_config_read32(int pcie_port,int bus,int dev,int fn,int reg)309*4882a593Smuzhiyun static uint32_t cvmx_pcie_config_read32(int pcie_port, int bus, int dev,
310*4882a593Smuzhiyun 					int fn, int reg)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun 	uint64_t address =
313*4882a593Smuzhiyun 	    __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
314*4882a593Smuzhiyun 	if (address)
315*4882a593Smuzhiyun 		return le32_to_cpu(cvmx_read64_uint32(address));
316*4882a593Smuzhiyun 	else
317*4882a593Smuzhiyun 		return 0xffffffff;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun /**
321*4882a593Smuzhiyun  * Write 8bits to a Device's config space
322*4882a593Smuzhiyun  *
323*4882a593Smuzhiyun  * @pcie_port: PCIe port the device is on
324*4882a593Smuzhiyun  * @bus:       Sub bus
325*4882a593Smuzhiyun  * @dev:       Device ID
326*4882a593Smuzhiyun  * @fn:	       Device sub function
327*4882a593Smuzhiyun  * @reg:       Register to access
328*4882a593Smuzhiyun  * @val:       Value to write
329*4882a593Smuzhiyun  */
cvmx_pcie_config_write8(int pcie_port,int bus,int dev,int fn,int reg,uint8_t val)330*4882a593Smuzhiyun static void cvmx_pcie_config_write8(int pcie_port, int bus, int dev, int fn,
331*4882a593Smuzhiyun 				    int reg, uint8_t val)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun 	uint64_t address =
334*4882a593Smuzhiyun 	    __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
335*4882a593Smuzhiyun 	if (address)
336*4882a593Smuzhiyun 		cvmx_write64_uint8(address, val);
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun /**
340*4882a593Smuzhiyun  * Write 16bits to a Device's config space
341*4882a593Smuzhiyun  *
342*4882a593Smuzhiyun  * @pcie_port: PCIe port the device is on
343*4882a593Smuzhiyun  * @bus:       Sub bus
344*4882a593Smuzhiyun  * @dev:       Device ID
345*4882a593Smuzhiyun  * @fn:	       Device sub function
346*4882a593Smuzhiyun  * @reg:       Register to access
347*4882a593Smuzhiyun  * @val:       Value to write
348*4882a593Smuzhiyun  */
cvmx_pcie_config_write16(int pcie_port,int bus,int dev,int fn,int reg,uint16_t val)349*4882a593Smuzhiyun static void cvmx_pcie_config_write16(int pcie_port, int bus, int dev, int fn,
350*4882a593Smuzhiyun 				     int reg, uint16_t val)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun 	uint64_t address =
353*4882a593Smuzhiyun 	    __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
354*4882a593Smuzhiyun 	if (address)
355*4882a593Smuzhiyun 		cvmx_write64_uint16(address, cpu_to_le16(val));
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun /**
359*4882a593Smuzhiyun  * Write 32bits to a Device's config space
360*4882a593Smuzhiyun  *
361*4882a593Smuzhiyun  * @pcie_port: PCIe port the device is on
362*4882a593Smuzhiyun  * @bus:       Sub bus
363*4882a593Smuzhiyun  * @dev:       Device ID
364*4882a593Smuzhiyun  * @fn:	       Device sub function
365*4882a593Smuzhiyun  * @reg:       Register to access
366*4882a593Smuzhiyun  * @val:       Value to write
367*4882a593Smuzhiyun  */
cvmx_pcie_config_write32(int pcie_port,int bus,int dev,int fn,int reg,uint32_t val)368*4882a593Smuzhiyun static void cvmx_pcie_config_write32(int pcie_port, int bus, int dev, int fn,
369*4882a593Smuzhiyun 				     int reg, uint32_t val)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun 	uint64_t address =
372*4882a593Smuzhiyun 	    __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
373*4882a593Smuzhiyun 	if (address)
374*4882a593Smuzhiyun 		cvmx_write64_uint32(address, cpu_to_le32(val));
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun /**
378*4882a593Smuzhiyun  * Initialize the RC config space CSRs
379*4882a593Smuzhiyun  *
380*4882a593Smuzhiyun  * @pcie_port: PCIe port to initialize
381*4882a593Smuzhiyun  */
__cvmx_pcie_rc_initialize_config_space(int pcie_port)382*4882a593Smuzhiyun static void __cvmx_pcie_rc_initialize_config_space(int pcie_port)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun 	union cvmx_pciercx_cfg030 pciercx_cfg030;
385*4882a593Smuzhiyun 	union cvmx_pciercx_cfg070 pciercx_cfg070;
386*4882a593Smuzhiyun 	union cvmx_pciercx_cfg001 pciercx_cfg001;
387*4882a593Smuzhiyun 	union cvmx_pciercx_cfg032 pciercx_cfg032;
388*4882a593Smuzhiyun 	union cvmx_pciercx_cfg006 pciercx_cfg006;
389*4882a593Smuzhiyun 	union cvmx_pciercx_cfg008 pciercx_cfg008;
390*4882a593Smuzhiyun 	union cvmx_pciercx_cfg009 pciercx_cfg009;
391*4882a593Smuzhiyun 	union cvmx_pciercx_cfg010 pciercx_cfg010;
392*4882a593Smuzhiyun 	union cvmx_pciercx_cfg011 pciercx_cfg011;
393*4882a593Smuzhiyun 	union cvmx_pciercx_cfg035 pciercx_cfg035;
394*4882a593Smuzhiyun 	union cvmx_pciercx_cfg075 pciercx_cfg075;
395*4882a593Smuzhiyun 	union cvmx_pciercx_cfg034 pciercx_cfg034;
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	/* Max Payload Size (PCIE*_CFG030[MPS]) */
398*4882a593Smuzhiyun 	/* Max Read Request Size (PCIE*_CFG030[MRRS]) */
399*4882a593Smuzhiyun 	/* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */
400*4882a593Smuzhiyun 	/* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	pciercx_cfg030.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG030(pcie_port));
403*4882a593Smuzhiyun 	if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
404*4882a593Smuzhiyun 		pciercx_cfg030.s.mps = MPS_CN5XXX;
405*4882a593Smuzhiyun 		pciercx_cfg030.s.mrrs = MRRS_CN5XXX;
406*4882a593Smuzhiyun 	} else {
407*4882a593Smuzhiyun 		pciercx_cfg030.s.mps = MPS_CN6XXX;
408*4882a593Smuzhiyun 		pciercx_cfg030.s.mrrs = MRRS_CN6XXX;
409*4882a593Smuzhiyun 	}
410*4882a593Smuzhiyun 	/*
411*4882a593Smuzhiyun 	 * Enable relaxed order processing. This will allow devices to
412*4882a593Smuzhiyun 	 * affect read response ordering.
413*4882a593Smuzhiyun 	 */
414*4882a593Smuzhiyun 	pciercx_cfg030.s.ro_en = 1;
415*4882a593Smuzhiyun 	/* Enable no snoop processing. Not used by Octeon */
416*4882a593Smuzhiyun 	pciercx_cfg030.s.ns_en = 1;
417*4882a593Smuzhiyun 	/* Correctable error reporting enable. */
418*4882a593Smuzhiyun 	pciercx_cfg030.s.ce_en = 1;
419*4882a593Smuzhiyun 	/* Non-fatal error reporting enable. */
420*4882a593Smuzhiyun 	pciercx_cfg030.s.nfe_en = 1;
421*4882a593Smuzhiyun 	/* Fatal error reporting enable. */
422*4882a593Smuzhiyun 	pciercx_cfg030.s.fe_en = 1;
423*4882a593Smuzhiyun 	/* Unsupported request reporting enable. */
424*4882a593Smuzhiyun 	pciercx_cfg030.s.ur_en = 1;
425*4882a593Smuzhiyun 	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG030(pcie_port), pciercx_cfg030.u32);
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	if (octeon_has_feature(OCTEON_FEATURE_NPEI)) {
429*4882a593Smuzhiyun 		union cvmx_npei_ctl_status2 npei_ctl_status2;
430*4882a593Smuzhiyun 		/*
431*4882a593Smuzhiyun 		 * Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match
432*4882a593Smuzhiyun 		 * PCIE*_CFG030[MPS].  Max Read Request Size
433*4882a593Smuzhiyun 		 * (NPEI_CTL_STATUS2[MRRS]) must not exceed
434*4882a593Smuzhiyun 		 * PCIE*_CFG030[MRRS]
435*4882a593Smuzhiyun 		 */
436*4882a593Smuzhiyun 		npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2);
437*4882a593Smuzhiyun 		/* Max payload size = 128 bytes for best Octeon DMA performance */
438*4882a593Smuzhiyun 		npei_ctl_status2.s.mps = MPS_CN5XXX;
439*4882a593Smuzhiyun 		/* Max read request size = 128 bytes for best Octeon DMA performance */
440*4882a593Smuzhiyun 		npei_ctl_status2.s.mrrs = MRRS_CN5XXX;
441*4882a593Smuzhiyun 		if (pcie_port)
442*4882a593Smuzhiyun 			npei_ctl_status2.s.c1_b1_s = 3; /* Port1 BAR1 Size 256MB */
443*4882a593Smuzhiyun 		else
444*4882a593Smuzhiyun 			npei_ctl_status2.s.c0_b1_s = 3; /* Port0 BAR1 Size 256MB */
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 		cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64);
447*4882a593Smuzhiyun 	} else {
448*4882a593Smuzhiyun 		/*
449*4882a593Smuzhiyun 		 * Max Payload Size (DPI_SLI_PRTX_CFG[MPS]) must match
450*4882a593Smuzhiyun 		 * PCIE*_CFG030[MPS].  Max Read Request Size
451*4882a593Smuzhiyun 		 * (DPI_SLI_PRTX_CFG[MRRS]) must not exceed
452*4882a593Smuzhiyun 		 * PCIE*_CFG030[MRRS].
453*4882a593Smuzhiyun 		 */
454*4882a593Smuzhiyun 		union cvmx_dpi_sli_prtx_cfg prt_cfg;
455*4882a593Smuzhiyun 		union cvmx_sli_s2m_portx_ctl sli_s2m_portx_ctl;
456*4882a593Smuzhiyun 		prt_cfg.u64 = cvmx_read_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port));
457*4882a593Smuzhiyun 		prt_cfg.s.mps = MPS_CN6XXX;
458*4882a593Smuzhiyun 		prt_cfg.s.mrrs = MRRS_CN6XXX;
459*4882a593Smuzhiyun 		/* Max outstanding load request. */
460*4882a593Smuzhiyun 		prt_cfg.s.molr = 32;
461*4882a593Smuzhiyun 		cvmx_write_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port), prt_cfg.u64);
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 		sli_s2m_portx_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port));
464*4882a593Smuzhiyun 		sli_s2m_portx_ctl.s.mrrs = MRRS_CN6XXX;
465*4882a593Smuzhiyun 		cvmx_write_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port), sli_s2m_portx_ctl.u64);
466*4882a593Smuzhiyun 	}
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	/* ECRC Generation (PCIE*_CFG070[GE,CE]) */
469*4882a593Smuzhiyun 	pciercx_cfg070.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG070(pcie_port));
470*4882a593Smuzhiyun 	pciercx_cfg070.s.ge = 1;	/* ECRC generation enable. */
471*4882a593Smuzhiyun 	pciercx_cfg070.s.ce = 1;	/* ECRC check enable. */
472*4882a593Smuzhiyun 	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG070(pcie_port), pciercx_cfg070.u32);
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	/*
475*4882a593Smuzhiyun 	 * Access Enables (PCIE*_CFG001[MSAE,ME])
476*4882a593Smuzhiyun 	 * ME and MSAE should always be set.
477*4882a593Smuzhiyun 	 * Interrupt Disable (PCIE*_CFG001[I_DIS])
478*4882a593Smuzhiyun 	 * System Error Message Enable (PCIE*_CFG001[SEE])
479*4882a593Smuzhiyun 	 */
480*4882a593Smuzhiyun 	pciercx_cfg001.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG001(pcie_port));
481*4882a593Smuzhiyun 	pciercx_cfg001.s.msae = 1;	/* Memory space enable. */
482*4882a593Smuzhiyun 	pciercx_cfg001.s.me = 1;	/* Bus master enable. */
483*4882a593Smuzhiyun 	pciercx_cfg001.s.i_dis = 1;	/* INTx assertion disable. */
484*4882a593Smuzhiyun 	pciercx_cfg001.s.see = 1;	/* SERR# enable */
485*4882a593Smuzhiyun 	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG001(pcie_port), pciercx_cfg001.u32);
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	/* Advanced Error Recovery Message Enables */
488*4882a593Smuzhiyun 	/* (PCIE*_CFG066,PCIE*_CFG067,PCIE*_CFG069) */
489*4882a593Smuzhiyun 	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG066(pcie_port), 0);
490*4882a593Smuzhiyun 	/* Use CVMX_PCIERCX_CFG067 hardware default */
491*4882a593Smuzhiyun 	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG069(pcie_port), 0);
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	/* Active State Power Management (PCIE*_CFG032[ASLPC]) */
495*4882a593Smuzhiyun 	pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
496*4882a593Smuzhiyun 	pciercx_cfg032.s.aslpc = 0; /* Active state Link PM control. */
497*4882a593Smuzhiyun 	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG032(pcie_port), pciercx_cfg032.u32);
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	/*
500*4882a593Smuzhiyun 	 * Link Width Mode (PCIERCn_CFG452[LME]) - Set during
501*4882a593Smuzhiyun 	 * cvmx_pcie_rc_initialize_link()
502*4882a593Smuzhiyun 	 *
503*4882a593Smuzhiyun 	 * Primary Bus Number (PCIERCn_CFG006[PBNUM])
504*4882a593Smuzhiyun 	 *
505*4882a593Smuzhiyun 	 * We set the primary bus number to 1 so IDT bridges are
506*4882a593Smuzhiyun 	 * happy. They don't like zero.
507*4882a593Smuzhiyun 	 */
508*4882a593Smuzhiyun 	pciercx_cfg006.u32 = 0;
509*4882a593Smuzhiyun 	pciercx_cfg006.s.pbnum = 1;
510*4882a593Smuzhiyun 	pciercx_cfg006.s.sbnum = 1;
511*4882a593Smuzhiyun 	pciercx_cfg006.s.subbnum = 1;
512*4882a593Smuzhiyun 	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG006(pcie_port), pciercx_cfg006.u32);
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	/*
516*4882a593Smuzhiyun 	 * Memory-mapped I/O BAR (PCIERCn_CFG008)
517*4882a593Smuzhiyun 	 * Most applications should disable the memory-mapped I/O BAR by
518*4882a593Smuzhiyun 	 * setting PCIERCn_CFG008[ML_ADDR] < PCIERCn_CFG008[MB_ADDR]
519*4882a593Smuzhiyun 	 */
520*4882a593Smuzhiyun 	pciercx_cfg008.u32 = 0;
521*4882a593Smuzhiyun 	pciercx_cfg008.s.mb_addr = 0x100;
522*4882a593Smuzhiyun 	pciercx_cfg008.s.ml_addr = 0;
523*4882a593Smuzhiyun 	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG008(pcie_port), pciercx_cfg008.u32);
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	/*
527*4882a593Smuzhiyun 	 * Prefetchable BAR (PCIERCn_CFG009,PCIERCn_CFG010,PCIERCn_CFG011)
528*4882a593Smuzhiyun 	 * Most applications should disable the prefetchable BAR by setting
529*4882a593Smuzhiyun 	 * PCIERCn_CFG011[UMEM_LIMIT],PCIERCn_CFG009[LMEM_LIMIT] <
530*4882a593Smuzhiyun 	 * PCIERCn_CFG010[UMEM_BASE],PCIERCn_CFG009[LMEM_BASE]
531*4882a593Smuzhiyun 	 */
532*4882a593Smuzhiyun 	pciercx_cfg009.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG009(pcie_port));
533*4882a593Smuzhiyun 	pciercx_cfg010.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG010(pcie_port));
534*4882a593Smuzhiyun 	pciercx_cfg011.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG011(pcie_port));
535*4882a593Smuzhiyun 	pciercx_cfg009.s.lmem_base = 0x100;
536*4882a593Smuzhiyun 	pciercx_cfg009.s.lmem_limit = 0;
537*4882a593Smuzhiyun 	pciercx_cfg010.s.umem_base = 0x100;
538*4882a593Smuzhiyun 	pciercx_cfg011.s.umem_limit = 0;
539*4882a593Smuzhiyun 	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG009(pcie_port), pciercx_cfg009.u32);
540*4882a593Smuzhiyun 	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG010(pcie_port), pciercx_cfg010.u32);
541*4882a593Smuzhiyun 	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG011(pcie_port), pciercx_cfg011.u32);
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	/*
544*4882a593Smuzhiyun 	 * System Error Interrupt Enables (PCIERCn_CFG035[SECEE,SEFEE,SENFEE])
545*4882a593Smuzhiyun 	 * PME Interrupt Enables (PCIERCn_CFG035[PMEIE])
546*4882a593Smuzhiyun 	*/
547*4882a593Smuzhiyun 	pciercx_cfg035.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG035(pcie_port));
548*4882a593Smuzhiyun 	pciercx_cfg035.s.secee = 1; /* System error on correctable error enable. */
549*4882a593Smuzhiyun 	pciercx_cfg035.s.sefee = 1; /* System error on fatal error enable. */
550*4882a593Smuzhiyun 	pciercx_cfg035.s.senfee = 1; /* System error on non-fatal error enable. */
551*4882a593Smuzhiyun 	pciercx_cfg035.s.pmeie = 1; /* PME interrupt enable. */
552*4882a593Smuzhiyun 	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG035(pcie_port), pciercx_cfg035.u32);
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	/*
555*4882a593Smuzhiyun 	 * Advanced Error Recovery Interrupt Enables
556*4882a593Smuzhiyun 	 * (PCIERCn_CFG075[CERE,NFERE,FERE])
557*4882a593Smuzhiyun 	 */
558*4882a593Smuzhiyun 	pciercx_cfg075.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG075(pcie_port));
559*4882a593Smuzhiyun 	pciercx_cfg075.s.cere = 1; /* Correctable error reporting enable. */
560*4882a593Smuzhiyun 	pciercx_cfg075.s.nfere = 1; /* Non-fatal error reporting enable. */
561*4882a593Smuzhiyun 	pciercx_cfg075.s.fere = 1; /* Fatal error reporting enable. */
562*4882a593Smuzhiyun 	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG075(pcie_port), pciercx_cfg075.u32);
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	/*
565*4882a593Smuzhiyun 	 * HP Interrupt Enables (PCIERCn_CFG034[HPINT_EN],
566*4882a593Smuzhiyun 	 * PCIERCn_CFG034[DLLS_EN,CCINT_EN])
567*4882a593Smuzhiyun 	 */
568*4882a593Smuzhiyun 	pciercx_cfg034.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG034(pcie_port));
569*4882a593Smuzhiyun 	pciercx_cfg034.s.hpint_en = 1; /* Hot-plug interrupt enable. */
570*4882a593Smuzhiyun 	pciercx_cfg034.s.dlls_en = 1; /* Data Link Layer state changed enable */
571*4882a593Smuzhiyun 	pciercx_cfg034.s.ccint_en = 1; /* Command completed interrupt enable. */
572*4882a593Smuzhiyun 	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG034(pcie_port), pciercx_cfg034.u32);
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun /**
576*4882a593Smuzhiyun  * Initialize a host mode PCIe gen 1 link. This function takes a PCIe
577*4882a593Smuzhiyun  * port from reset to a link up state. Software can then begin
578*4882a593Smuzhiyun  * configuring the rest of the link.
579*4882a593Smuzhiyun  *
580*4882a593Smuzhiyun  * @pcie_port: PCIe port to initialize
581*4882a593Smuzhiyun  *
582*4882a593Smuzhiyun  * Returns Zero on success
583*4882a593Smuzhiyun  */
__cvmx_pcie_rc_initialize_link_gen1(int pcie_port)584*4882a593Smuzhiyun static int __cvmx_pcie_rc_initialize_link_gen1(int pcie_port)
585*4882a593Smuzhiyun {
586*4882a593Smuzhiyun 	uint64_t start_cycle;
587*4882a593Smuzhiyun 	union cvmx_pescx_ctl_status pescx_ctl_status;
588*4882a593Smuzhiyun 	union cvmx_pciercx_cfg452 pciercx_cfg452;
589*4882a593Smuzhiyun 	union cvmx_pciercx_cfg032 pciercx_cfg032;
590*4882a593Smuzhiyun 	union cvmx_pciercx_cfg448 pciercx_cfg448;
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 	/* Set the lane width */
593*4882a593Smuzhiyun 	pciercx_cfg452.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG452(pcie_port));
594*4882a593Smuzhiyun 	pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
595*4882a593Smuzhiyun 	if (pescx_ctl_status.s.qlm_cfg == 0)
596*4882a593Smuzhiyun 		/* We're in 8 lane (56XX) or 4 lane (54XX) mode */
597*4882a593Smuzhiyun 		pciercx_cfg452.s.lme = 0xf;
598*4882a593Smuzhiyun 	else
599*4882a593Smuzhiyun 		/* We're in 4 lane (56XX) or 2 lane (52XX) mode */
600*4882a593Smuzhiyun 		pciercx_cfg452.s.lme = 0x7;
601*4882a593Smuzhiyun 	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG452(pcie_port), pciercx_cfg452.u32);
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 	/*
604*4882a593Smuzhiyun 	 * CN52XX pass 1.x has an errata where length mismatches on UR
605*4882a593Smuzhiyun 	 * responses can cause bus errors on 64bit memory
606*4882a593Smuzhiyun 	 * reads. Turning off length error checking fixes this.
607*4882a593Smuzhiyun 	 */
608*4882a593Smuzhiyun 	if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
609*4882a593Smuzhiyun 		union cvmx_pciercx_cfg455 pciercx_cfg455;
610*4882a593Smuzhiyun 		pciercx_cfg455.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG455(pcie_port));
611*4882a593Smuzhiyun 		pciercx_cfg455.s.m_cpl_len_err = 1;
612*4882a593Smuzhiyun 		cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG455(pcie_port), pciercx_cfg455.u32);
613*4882a593Smuzhiyun 	}
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	/* Lane swap needs to be manually enabled for CN52XX */
616*4882a593Smuzhiyun 	if (OCTEON_IS_MODEL(OCTEON_CN52XX) && (pcie_port == 1)) {
617*4882a593Smuzhiyun 		pescx_ctl_status.s.lane_swp = 1;
618*4882a593Smuzhiyun 		cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port), pescx_ctl_status.u64);
619*4882a593Smuzhiyun 	}
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 	/* Bring up the link */
622*4882a593Smuzhiyun 	pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
623*4882a593Smuzhiyun 	pescx_ctl_status.s.lnk_enb = 1;
624*4882a593Smuzhiyun 	cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port), pescx_ctl_status.u64);
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 	/*
627*4882a593Smuzhiyun 	 * CN52XX pass 1.0: Due to a bug in 2nd order CDR, it needs to
628*4882a593Smuzhiyun 	 * be disabled.
629*4882a593Smuzhiyun 	 */
630*4882a593Smuzhiyun 	if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0))
631*4882a593Smuzhiyun 		__cvmx_helper_errata_qlm_disable_2nd_order_cdr(0);
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	/* Wait for the link to come up */
634*4882a593Smuzhiyun 	start_cycle = cvmx_get_cycle();
635*4882a593Smuzhiyun 	do {
636*4882a593Smuzhiyun 		if (cvmx_get_cycle() - start_cycle > 2 * octeon_get_clock_rate()) {
637*4882a593Smuzhiyun 			cvmx_dprintf("PCIe: Port %d link timeout\n", pcie_port);
638*4882a593Smuzhiyun 			return -1;
639*4882a593Smuzhiyun 		}
640*4882a593Smuzhiyun 		__delay(10000);
641*4882a593Smuzhiyun 		pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
642*4882a593Smuzhiyun 	} while (pciercx_cfg032.s.dlla == 0);
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 	/* Clear all pending errors */
645*4882a593Smuzhiyun 	cvmx_write_csr(CVMX_PEXP_NPEI_INT_SUM, cvmx_read_csr(CVMX_PEXP_NPEI_INT_SUM));
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	/*
648*4882a593Smuzhiyun 	 * Update the Replay Time Limit. Empirically, some PCIe
649*4882a593Smuzhiyun 	 * devices take a little longer to respond than expected under
650*4882a593Smuzhiyun 	 * load. As a workaround for this we configure the Replay Time
651*4882a593Smuzhiyun 	 * Limit to the value expected for a 512 byte MPS instead of
652*4882a593Smuzhiyun 	 * our actual 256 byte MPS. The numbers below are directly
653*4882a593Smuzhiyun 	 * from the PCIe spec table 3-4.
654*4882a593Smuzhiyun 	 */
655*4882a593Smuzhiyun 	pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port));
656*4882a593Smuzhiyun 	switch (pciercx_cfg032.s.nlw) {
657*4882a593Smuzhiyun 	case 1:		/* 1 lane */
658*4882a593Smuzhiyun 		pciercx_cfg448.s.rtl = 1677;
659*4882a593Smuzhiyun 		break;
660*4882a593Smuzhiyun 	case 2:		/* 2 lanes */
661*4882a593Smuzhiyun 		pciercx_cfg448.s.rtl = 867;
662*4882a593Smuzhiyun 		break;
663*4882a593Smuzhiyun 	case 4:		/* 4 lanes */
664*4882a593Smuzhiyun 		pciercx_cfg448.s.rtl = 462;
665*4882a593Smuzhiyun 		break;
666*4882a593Smuzhiyun 	case 8:		/* 8 lanes */
667*4882a593Smuzhiyun 		pciercx_cfg448.s.rtl = 258;
668*4882a593Smuzhiyun 		break;
669*4882a593Smuzhiyun 	}
670*4882a593Smuzhiyun 	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32);
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	return 0;
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun 
__cvmx_increment_ba(union cvmx_sli_mem_access_subidx * pmas)675*4882a593Smuzhiyun static void __cvmx_increment_ba(union cvmx_sli_mem_access_subidx *pmas)
676*4882a593Smuzhiyun {
677*4882a593Smuzhiyun 	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
678*4882a593Smuzhiyun 		pmas->cn68xx.ba++;
679*4882a593Smuzhiyun 	else
680*4882a593Smuzhiyun 		pmas->s.ba++;
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun /**
684*4882a593Smuzhiyun  * Initialize a PCIe gen 1 port for use in host(RC) mode. It doesn't
685*4882a593Smuzhiyun  * enumerate the bus.
686*4882a593Smuzhiyun  *
687*4882a593Smuzhiyun  * @pcie_port: PCIe port to initialize
688*4882a593Smuzhiyun  *
689*4882a593Smuzhiyun  * Returns Zero on success
690*4882a593Smuzhiyun  */
__cvmx_pcie_rc_initialize_gen1(int pcie_port)691*4882a593Smuzhiyun static int __cvmx_pcie_rc_initialize_gen1(int pcie_port)
692*4882a593Smuzhiyun {
693*4882a593Smuzhiyun 	int i;
694*4882a593Smuzhiyun 	int base;
695*4882a593Smuzhiyun 	u64 addr_swizzle;
696*4882a593Smuzhiyun 	union cvmx_ciu_soft_prst ciu_soft_prst;
697*4882a593Smuzhiyun 	union cvmx_pescx_bist_status pescx_bist_status;
698*4882a593Smuzhiyun 	union cvmx_pescx_bist_status2 pescx_bist_status2;
699*4882a593Smuzhiyun 	union cvmx_npei_ctl_status npei_ctl_status;
700*4882a593Smuzhiyun 	union cvmx_npei_mem_access_ctl npei_mem_access_ctl;
701*4882a593Smuzhiyun 	union cvmx_npei_mem_access_subidx mem_access_subid;
702*4882a593Smuzhiyun 	union cvmx_npei_dbg_data npei_dbg_data;
703*4882a593Smuzhiyun 	union cvmx_pescx_ctl_status2 pescx_ctl_status2;
704*4882a593Smuzhiyun 	union cvmx_pciercx_cfg032 pciercx_cfg032;
705*4882a593Smuzhiyun 	union cvmx_npei_bar1_indexx bar1_index;
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun retry:
708*4882a593Smuzhiyun 	/*
709*4882a593Smuzhiyun 	 * Make sure we aren't trying to setup a target mode interface
710*4882a593Smuzhiyun 	 * in host mode.
711*4882a593Smuzhiyun 	 */
712*4882a593Smuzhiyun 	npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
713*4882a593Smuzhiyun 	if ((pcie_port == 0) && !npei_ctl_status.s.host_mode) {
714*4882a593Smuzhiyun 		cvmx_dprintf("PCIe: Port %d in endpoint mode\n", pcie_port);
715*4882a593Smuzhiyun 		return -1;
716*4882a593Smuzhiyun 	}
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 	/*
719*4882a593Smuzhiyun 	 * Make sure a CN52XX isn't trying to bring up port 1 when it
720*4882a593Smuzhiyun 	 * is disabled.
721*4882a593Smuzhiyun 	 */
722*4882a593Smuzhiyun 	if (OCTEON_IS_MODEL(OCTEON_CN52XX)) {
723*4882a593Smuzhiyun 		npei_dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
724*4882a593Smuzhiyun 		if ((pcie_port == 1) && npei_dbg_data.cn52xx.qlm0_link_width) {
725*4882a593Smuzhiyun 			cvmx_dprintf("PCIe: ERROR: cvmx_pcie_rc_initialize() called on port1, but port1 is disabled\n");
726*4882a593Smuzhiyun 			return -1;
727*4882a593Smuzhiyun 		}
728*4882a593Smuzhiyun 	}
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 	/*
731*4882a593Smuzhiyun 	 * PCIe switch arbitration mode. '0' == fixed priority NPEI,
732*4882a593Smuzhiyun 	 * PCIe0, then PCIe1. '1' == round robin.
733*4882a593Smuzhiyun 	 */
734*4882a593Smuzhiyun 	npei_ctl_status.s.arb = 1;
735*4882a593Smuzhiyun 	/* Allow up to 0x20 config retries */
736*4882a593Smuzhiyun 	npei_ctl_status.s.cfg_rtry = 0x20;
737*4882a593Smuzhiyun 	/*
738*4882a593Smuzhiyun 	 * CN52XX pass1.x has an errata where P0_NTAGS and P1_NTAGS
739*4882a593Smuzhiyun 	 * don't reset.
740*4882a593Smuzhiyun 	 */
741*4882a593Smuzhiyun 	if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
742*4882a593Smuzhiyun 		npei_ctl_status.s.p0_ntags = 0x20;
743*4882a593Smuzhiyun 		npei_ctl_status.s.p1_ntags = 0x20;
744*4882a593Smuzhiyun 	}
745*4882a593Smuzhiyun 	cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS, npei_ctl_status.u64);
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun 	/* Bring the PCIe out of reset */
748*4882a593Smuzhiyun 	if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200) {
749*4882a593Smuzhiyun 		/*
750*4882a593Smuzhiyun 		 * The EBH5200 board swapped the PCIe reset lines on
751*4882a593Smuzhiyun 		 * the board. As a workaround for this bug, we bring
752*4882a593Smuzhiyun 		 * both PCIe ports out of reset at the same time
753*4882a593Smuzhiyun 		 * instead of on separate calls. So for port 0, we
754*4882a593Smuzhiyun 		 * bring both out of reset and do nothing on port 1
755*4882a593Smuzhiyun 		 */
756*4882a593Smuzhiyun 		if (pcie_port == 0) {
757*4882a593Smuzhiyun 			ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
758*4882a593Smuzhiyun 			/*
759*4882a593Smuzhiyun 			 * After a chip reset the PCIe will also be in
760*4882a593Smuzhiyun 			 * reset. If it isn't, most likely someone is
761*4882a593Smuzhiyun 			 * trying to init it again without a proper
762*4882a593Smuzhiyun 			 * PCIe reset.
763*4882a593Smuzhiyun 			 */
764*4882a593Smuzhiyun 			if (ciu_soft_prst.s.soft_prst == 0) {
765*4882a593Smuzhiyun 				/* Reset the ports */
766*4882a593Smuzhiyun 				ciu_soft_prst.s.soft_prst = 1;
767*4882a593Smuzhiyun 				cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
768*4882a593Smuzhiyun 				ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
769*4882a593Smuzhiyun 				ciu_soft_prst.s.soft_prst = 1;
770*4882a593Smuzhiyun 				cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
771*4882a593Smuzhiyun 				/* Wait until pcie resets the ports. */
772*4882a593Smuzhiyun 				udelay(2000);
773*4882a593Smuzhiyun 			}
774*4882a593Smuzhiyun 			ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
775*4882a593Smuzhiyun 			ciu_soft_prst.s.soft_prst = 0;
776*4882a593Smuzhiyun 			cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
777*4882a593Smuzhiyun 			ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
778*4882a593Smuzhiyun 			ciu_soft_prst.s.soft_prst = 0;
779*4882a593Smuzhiyun 			cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
780*4882a593Smuzhiyun 		}
781*4882a593Smuzhiyun 	} else {
782*4882a593Smuzhiyun 		/*
783*4882a593Smuzhiyun 		 * The normal case: The PCIe ports are completely
784*4882a593Smuzhiyun 		 * separate and can be brought out of reset
785*4882a593Smuzhiyun 		 * independently.
786*4882a593Smuzhiyun 		 */
787*4882a593Smuzhiyun 		if (pcie_port)
788*4882a593Smuzhiyun 			ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
789*4882a593Smuzhiyun 		else
790*4882a593Smuzhiyun 			ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
791*4882a593Smuzhiyun 		/*
792*4882a593Smuzhiyun 		 * After a chip reset the PCIe will also be in
793*4882a593Smuzhiyun 		 * reset. If it isn't, most likely someone is trying
794*4882a593Smuzhiyun 		 * to init it again without a proper PCIe reset.
795*4882a593Smuzhiyun 		 */
796*4882a593Smuzhiyun 		if (ciu_soft_prst.s.soft_prst == 0) {
797*4882a593Smuzhiyun 			/* Reset the port */
798*4882a593Smuzhiyun 			ciu_soft_prst.s.soft_prst = 1;
799*4882a593Smuzhiyun 			if (pcie_port)
800*4882a593Smuzhiyun 				cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
801*4882a593Smuzhiyun 			else
802*4882a593Smuzhiyun 				cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
803*4882a593Smuzhiyun 			/* Wait until pcie resets the ports. */
804*4882a593Smuzhiyun 			udelay(2000);
805*4882a593Smuzhiyun 		}
806*4882a593Smuzhiyun 		if (pcie_port) {
807*4882a593Smuzhiyun 			ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
808*4882a593Smuzhiyun 			ciu_soft_prst.s.soft_prst = 0;
809*4882a593Smuzhiyun 			cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
810*4882a593Smuzhiyun 		} else {
811*4882a593Smuzhiyun 			ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
812*4882a593Smuzhiyun 			ciu_soft_prst.s.soft_prst = 0;
813*4882a593Smuzhiyun 			cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
814*4882a593Smuzhiyun 		}
815*4882a593Smuzhiyun 	}
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 	/*
818*4882a593Smuzhiyun 	 * Wait for PCIe reset to complete. Due to errata PCIE-700, we
819*4882a593Smuzhiyun 	 * don't poll PESCX_CTL_STATUS2[PCIERST], but simply wait a
820*4882a593Smuzhiyun 	 * fixed number of cycles.
821*4882a593Smuzhiyun 	 */
822*4882a593Smuzhiyun 	__delay(400000);
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun 	/*
825*4882a593Smuzhiyun 	 * PESCX_BIST_STATUS2[PCLK_RUN] was missing on pass 1 of
826*4882a593Smuzhiyun 	 * CN56XX and CN52XX, so we only probe it on newer chips
827*4882a593Smuzhiyun 	 */
828*4882a593Smuzhiyun 	if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
829*4882a593Smuzhiyun 		/* Clear PCLK_RUN so we can check if the clock is running */
830*4882a593Smuzhiyun 		pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port));
831*4882a593Smuzhiyun 		pescx_ctl_status2.s.pclk_run = 1;
832*4882a593Smuzhiyun 		cvmx_write_csr(CVMX_PESCX_CTL_STATUS2(pcie_port), pescx_ctl_status2.u64);
833*4882a593Smuzhiyun 		/* Now that we cleared PCLK_RUN, wait for it to be set
834*4882a593Smuzhiyun 		 * again telling us the clock is running
835*4882a593Smuzhiyun 		 */
836*4882a593Smuzhiyun 		if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CTL_STATUS2(pcie_port),
837*4882a593Smuzhiyun 					  union cvmx_pescx_ctl_status2, pclk_run, ==, 1, 10000)) {
838*4882a593Smuzhiyun 			cvmx_dprintf("PCIe: Port %d isn't clocked, skipping.\n", pcie_port);
839*4882a593Smuzhiyun 			return -1;
840*4882a593Smuzhiyun 		}
841*4882a593Smuzhiyun 	}
842*4882a593Smuzhiyun 
843*4882a593Smuzhiyun 	/*
844*4882a593Smuzhiyun 	 * Check and make sure PCIe came out of reset. If it doesn't
845*4882a593Smuzhiyun 	 * the board probably hasn't wired the clocks up and the
846*4882a593Smuzhiyun 	 * interface should be skipped.
847*4882a593Smuzhiyun 	 */
848*4882a593Smuzhiyun 	pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port));
849*4882a593Smuzhiyun 	if (pescx_ctl_status2.s.pcierst) {
850*4882a593Smuzhiyun 		cvmx_dprintf("PCIe: Port %d stuck in reset, skipping.\n", pcie_port);
851*4882a593Smuzhiyun 		return -1;
852*4882a593Smuzhiyun 	}
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun 	/*
855*4882a593Smuzhiyun 	 * Check BIST2 status. If any bits are set skip this
856*4882a593Smuzhiyun 	 * interface. This is an attempt to catch PCIE-813 on pass 1
857*4882a593Smuzhiyun 	 * parts.
858*4882a593Smuzhiyun 	 */
859*4882a593Smuzhiyun 	pescx_bist_status2.u64 = cvmx_read_csr(CVMX_PESCX_BIST_STATUS2(pcie_port));
860*4882a593Smuzhiyun 	if (pescx_bist_status2.u64) {
861*4882a593Smuzhiyun 		cvmx_dprintf("PCIe: Port %d BIST2 failed. Most likely this port isn't hooked up, skipping.\n",
862*4882a593Smuzhiyun 			     pcie_port);
863*4882a593Smuzhiyun 		return -1;
864*4882a593Smuzhiyun 	}
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 	/* Check BIST status */
867*4882a593Smuzhiyun 	pescx_bist_status.u64 = cvmx_read_csr(CVMX_PESCX_BIST_STATUS(pcie_port));
868*4882a593Smuzhiyun 	if (pescx_bist_status.u64)
869*4882a593Smuzhiyun 		cvmx_dprintf("PCIe: BIST FAILED for port %d (0x%016llx)\n",
870*4882a593Smuzhiyun 			     pcie_port, CAST64(pescx_bist_status.u64));
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 	/* Initialize the config space CSRs */
873*4882a593Smuzhiyun 	__cvmx_pcie_rc_initialize_config_space(pcie_port);
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun 	/* Bring the link up */
876*4882a593Smuzhiyun 	if (__cvmx_pcie_rc_initialize_link_gen1(pcie_port)) {
877*4882a593Smuzhiyun 		cvmx_dprintf("PCIe: Failed to initialize port %d, probably the slot is empty\n",
878*4882a593Smuzhiyun 			     pcie_port);
879*4882a593Smuzhiyun 		return -1;
880*4882a593Smuzhiyun 	}
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun 	/* Store merge control (NPEI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */
883*4882a593Smuzhiyun 	npei_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL);
884*4882a593Smuzhiyun 	npei_mem_access_ctl.s.max_word = 0;	/* Allow 16 words to combine */
885*4882a593Smuzhiyun 	npei_mem_access_ctl.s.timer = 127;	/* Wait up to 127 cycles for more data */
886*4882a593Smuzhiyun 	cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL, npei_mem_access_ctl.u64);
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun 	/* Setup Mem access SubDIDs */
889*4882a593Smuzhiyun 	mem_access_subid.u64 = 0;
890*4882a593Smuzhiyun 	mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
891*4882a593Smuzhiyun 	mem_access_subid.s.nmerge = 1;	/* Due to an errata on pass 1 chips, no merging is allowed. */
892*4882a593Smuzhiyun 	mem_access_subid.s.esr = 1;	/* Endian-swap for Reads. */
893*4882a593Smuzhiyun 	mem_access_subid.s.esw = 1;	/* Endian-swap for Writes. */
894*4882a593Smuzhiyun 	mem_access_subid.s.nsr = 0;	/* Enable Snooping for Reads. Octeon doesn't care, but devices might want this more conservative setting */
895*4882a593Smuzhiyun 	mem_access_subid.s.nsw = 0;	/* Enable Snoop for Writes. */
896*4882a593Smuzhiyun 	mem_access_subid.s.ror = 0;	/* Disable Relaxed Ordering for Reads. */
897*4882a593Smuzhiyun 	mem_access_subid.s.row = 0;	/* Disable Relaxed Ordering for Writes. */
898*4882a593Smuzhiyun 	mem_access_subid.s.ba = 0;	/* PCIe Adddress Bits <63:34>. */
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 	/*
901*4882a593Smuzhiyun 	 * Setup mem access 12-15 for port 0, 16-19 for port 1,
902*4882a593Smuzhiyun 	 * supplying 36 bits of address space.
903*4882a593Smuzhiyun 	 */
904*4882a593Smuzhiyun 	for (i = 12 + pcie_port * 4; i < 16 + pcie_port * 4; i++) {
905*4882a593Smuzhiyun 		cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(i), mem_access_subid.u64);
906*4882a593Smuzhiyun 		mem_access_subid.s.ba += 1; /* Set each SUBID to extend the addressable range */
907*4882a593Smuzhiyun 	}
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun 	/*
910*4882a593Smuzhiyun 	 * Disable the peer to peer forwarding register. This must be
911*4882a593Smuzhiyun 	 * setup by the OS after it enumerates the bus and assigns
912*4882a593Smuzhiyun 	 * addresses to the PCIe busses.
913*4882a593Smuzhiyun 	 */
914*4882a593Smuzhiyun 	for (i = 0; i < 4; i++) {
915*4882a593Smuzhiyun 		cvmx_write_csr(CVMX_PESCX_P2P_BARX_START(i, pcie_port), -1);
916*4882a593Smuzhiyun 		cvmx_write_csr(CVMX_PESCX_P2P_BARX_END(i, pcie_port), -1);
917*4882a593Smuzhiyun 	}
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun 	/* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */
920*4882a593Smuzhiyun 	cvmx_write_csr(CVMX_PESCX_P2N_BAR0_START(pcie_port), 0);
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun 	/* BAR1 follows BAR2 with a gap so it has the same address as for gen2. */
923*4882a593Smuzhiyun 	cvmx_write_csr(CVMX_PESCX_P2N_BAR1_START(pcie_port), CVMX_PCIE_BAR1_RC_BASE);
924*4882a593Smuzhiyun 
925*4882a593Smuzhiyun 	bar1_index.u32 = 0;
926*4882a593Smuzhiyun 	bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22);
927*4882a593Smuzhiyun 	bar1_index.s.ca = 1;	   /* Not Cached */
928*4882a593Smuzhiyun 	bar1_index.s.end_swp = 1;  /* Endian Swap mode */
929*4882a593Smuzhiyun 	bar1_index.s.addr_v = 1;   /* Valid entry */
930*4882a593Smuzhiyun 
931*4882a593Smuzhiyun 	base = pcie_port ? 16 : 0;
932*4882a593Smuzhiyun 
933*4882a593Smuzhiyun 	/* Big endian swizzle for 32-bit PEXP_NCB register. */
934*4882a593Smuzhiyun #ifdef __MIPSEB__
935*4882a593Smuzhiyun 	addr_swizzle = 4;
936*4882a593Smuzhiyun #else
937*4882a593Smuzhiyun 	addr_swizzle = 0;
938*4882a593Smuzhiyun #endif
939*4882a593Smuzhiyun 	for (i = 0; i < 16; i++) {
940*4882a593Smuzhiyun 		cvmx_write64_uint32((CVMX_PEXP_NPEI_BAR1_INDEXX(base) ^ addr_swizzle),
941*4882a593Smuzhiyun 				    bar1_index.u32);
942*4882a593Smuzhiyun 		base++;
943*4882a593Smuzhiyun 		/* 256MB / 16 >> 22 == 4 */
944*4882a593Smuzhiyun 		bar1_index.s.addr_idx += (((1ull << 28) / 16ull) >> 22);
945*4882a593Smuzhiyun 	}
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 	/*
948*4882a593Smuzhiyun 	 * Set Octeon's BAR2 to decode 0-2^39. Bar0 and Bar1 take
949*4882a593Smuzhiyun 	 * precedence where they overlap. It also overlaps with the
950*4882a593Smuzhiyun 	 * device addresses, so make sure the peer to peer forwarding
951*4882a593Smuzhiyun 	 * is set right.
952*4882a593Smuzhiyun 	 */
953*4882a593Smuzhiyun 	cvmx_write_csr(CVMX_PESCX_P2N_BAR2_START(pcie_port), 0);
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 	/*
956*4882a593Smuzhiyun 	 * Setup BAR2 attributes
957*4882a593Smuzhiyun 	 *
958*4882a593Smuzhiyun 	 * Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM])
959*4882a593Smuzhiyun 	 * - PTLP_RO,CTLP_RO should normally be set (except for debug).
960*4882a593Smuzhiyun 	 * - WAIT_COM=0 will likely work for all applications.
961*4882a593Smuzhiyun 	 *
962*4882a593Smuzhiyun 	 * Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM]).
963*4882a593Smuzhiyun 	 */
964*4882a593Smuzhiyun 	if (pcie_port) {
965*4882a593Smuzhiyun 		union cvmx_npei_ctl_port1 npei_ctl_port;
966*4882a593Smuzhiyun 		npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT1);
967*4882a593Smuzhiyun 		npei_ctl_port.s.bar2_enb = 1;
968*4882a593Smuzhiyun 		npei_ctl_port.s.bar2_esx = 1;
969*4882a593Smuzhiyun 		npei_ctl_port.s.bar2_cax = 0;
970*4882a593Smuzhiyun 		npei_ctl_port.s.ptlp_ro = 1;
971*4882a593Smuzhiyun 		npei_ctl_port.s.ctlp_ro = 1;
972*4882a593Smuzhiyun 		npei_ctl_port.s.wait_com = 0;
973*4882a593Smuzhiyun 		npei_ctl_port.s.waitl_com = 0;
974*4882a593Smuzhiyun 		cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT1, npei_ctl_port.u64);
975*4882a593Smuzhiyun 	} else {
976*4882a593Smuzhiyun 		union cvmx_npei_ctl_port0 npei_ctl_port;
977*4882a593Smuzhiyun 		npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT0);
978*4882a593Smuzhiyun 		npei_ctl_port.s.bar2_enb = 1;
979*4882a593Smuzhiyun 		npei_ctl_port.s.bar2_esx = 1;
980*4882a593Smuzhiyun 		npei_ctl_port.s.bar2_cax = 0;
981*4882a593Smuzhiyun 		npei_ctl_port.s.ptlp_ro = 1;
982*4882a593Smuzhiyun 		npei_ctl_port.s.ctlp_ro = 1;
983*4882a593Smuzhiyun 		npei_ctl_port.s.wait_com = 0;
984*4882a593Smuzhiyun 		npei_ctl_port.s.waitl_com = 0;
985*4882a593Smuzhiyun 		cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT0, npei_ctl_port.u64);
986*4882a593Smuzhiyun 	}
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun 	/*
989*4882a593Smuzhiyun 	 * Both pass 1 and pass 2 of CN52XX and CN56XX have an errata
990*4882a593Smuzhiyun 	 * that causes TLP ordering to not be preserved after multiple
991*4882a593Smuzhiyun 	 * PCIe port resets. This code detects this fault and corrects
992*4882a593Smuzhiyun 	 * it by aligning the TLP counters properly. Another link
993*4882a593Smuzhiyun 	 * reset is then performed. See PCIE-13340
994*4882a593Smuzhiyun 	 */
995*4882a593Smuzhiyun 	if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
996*4882a593Smuzhiyun 	    OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
997*4882a593Smuzhiyun 	    OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) ||
998*4882a593Smuzhiyun 	    OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
999*4882a593Smuzhiyun 		union cvmx_npei_dbg_data dbg_data;
1000*4882a593Smuzhiyun 		int old_in_fif_p_count;
1001*4882a593Smuzhiyun 		int in_fif_p_count;
1002*4882a593Smuzhiyun 		int out_p_count;
1003*4882a593Smuzhiyun 		int in_p_offset = (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) ? 4 : 1;
1004*4882a593Smuzhiyun 		int i;
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun 		/*
1007*4882a593Smuzhiyun 		 * Choose a write address of 1MB. It should be
1008*4882a593Smuzhiyun 		 * harmless as all bars haven't been setup.
1009*4882a593Smuzhiyun 		 */
1010*4882a593Smuzhiyun 		uint64_t write_address = (cvmx_pcie_get_mem_base_address(pcie_port) + 0x100000) | (1ull<<63);
1011*4882a593Smuzhiyun 
1012*4882a593Smuzhiyun 		/*
1013*4882a593Smuzhiyun 		 * Make sure at least in_p_offset have been executed before we try and
1014*4882a593Smuzhiyun 		 * read in_fif_p_count
1015*4882a593Smuzhiyun 		 */
1016*4882a593Smuzhiyun 		i = in_p_offset;
1017*4882a593Smuzhiyun 		while (i--) {
1018*4882a593Smuzhiyun 			cvmx_write64_uint32(write_address, 0);
1019*4882a593Smuzhiyun 			__delay(10000);
1020*4882a593Smuzhiyun 		}
1021*4882a593Smuzhiyun 
1022*4882a593Smuzhiyun 		/*
1023*4882a593Smuzhiyun 		 * Read the IN_FIF_P_COUNT from the debug
1024*4882a593Smuzhiyun 		 * select. IN_FIF_P_COUNT can be unstable sometimes so
1025*4882a593Smuzhiyun 		 * read it twice with a write between the reads.  This
1026*4882a593Smuzhiyun 		 * way we can tell the value is good as it will
1027*4882a593Smuzhiyun 		 * increment by one due to the write
1028*4882a593Smuzhiyun 		 */
1029*4882a593Smuzhiyun 		cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, (pcie_port) ? 0xd7fc : 0xcffc);
1030*4882a593Smuzhiyun 		cvmx_read_csr(CVMX_PEXP_NPEI_DBG_SELECT);
1031*4882a593Smuzhiyun 		do {
1032*4882a593Smuzhiyun 			dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
1033*4882a593Smuzhiyun 			old_in_fif_p_count = dbg_data.s.data & 0xff;
1034*4882a593Smuzhiyun 			cvmx_write64_uint32(write_address, 0);
1035*4882a593Smuzhiyun 			__delay(10000);
1036*4882a593Smuzhiyun 			dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
1037*4882a593Smuzhiyun 			in_fif_p_count = dbg_data.s.data & 0xff;
1038*4882a593Smuzhiyun 		} while (in_fif_p_count != ((old_in_fif_p_count+1) & 0xff));
1039*4882a593Smuzhiyun 
1040*4882a593Smuzhiyun 		/* Update in_fif_p_count for it's offset with respect to out_p_count */
1041*4882a593Smuzhiyun 		in_fif_p_count = (in_fif_p_count + in_p_offset) & 0xff;
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun 		/* Read the OUT_P_COUNT from the debug select */
1044*4882a593Smuzhiyun 		cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, (pcie_port) ? 0xd00f : 0xc80f);
1045*4882a593Smuzhiyun 		cvmx_read_csr(CVMX_PEXP_NPEI_DBG_SELECT);
1046*4882a593Smuzhiyun 		dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
1047*4882a593Smuzhiyun 		out_p_count = (dbg_data.s.data>>1) & 0xff;
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun 		/* Check that the two counters are aligned */
1050*4882a593Smuzhiyun 		if (out_p_count != in_fif_p_count) {
1051*4882a593Smuzhiyun 			cvmx_dprintf("PCIe: Port %d aligning TLP counters as workaround to maintain ordering\n", pcie_port);
1052*4882a593Smuzhiyun 			while (in_fif_p_count != 0) {
1053*4882a593Smuzhiyun 				cvmx_write64_uint32(write_address, 0);
1054*4882a593Smuzhiyun 				__delay(10000);
1055*4882a593Smuzhiyun 				in_fif_p_count = (in_fif_p_count + 1) & 0xff;
1056*4882a593Smuzhiyun 			}
1057*4882a593Smuzhiyun 			/*
1058*4882a593Smuzhiyun 			 * The EBH5200 board swapped the PCIe reset
1059*4882a593Smuzhiyun 			 * lines on the board. This means we must
1060*4882a593Smuzhiyun 			 * bring both links down and up, which will
1061*4882a593Smuzhiyun 			 * cause the PCIe0 to need alignment
1062*4882a593Smuzhiyun 			 * again. Lots of messages will be displayed,
1063*4882a593Smuzhiyun 			 * but everything should work
1064*4882a593Smuzhiyun 			 */
1065*4882a593Smuzhiyun 			if ((cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200) &&
1066*4882a593Smuzhiyun 				(pcie_port == 1))
1067*4882a593Smuzhiyun 				cvmx_pcie_rc_initialize(0);
1068*4882a593Smuzhiyun 			/* Rety bringing this port up */
1069*4882a593Smuzhiyun 			goto retry;
1070*4882a593Smuzhiyun 		}
1071*4882a593Smuzhiyun 	}
1072*4882a593Smuzhiyun 
1073*4882a593Smuzhiyun 	/* Display the link status */
1074*4882a593Smuzhiyun 	pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
1075*4882a593Smuzhiyun 	cvmx_dprintf("PCIe: Port %d link active, %d lanes\n", pcie_port, pciercx_cfg032.s.nlw);
1076*4882a593Smuzhiyun 
1077*4882a593Smuzhiyun 	return 0;
1078*4882a593Smuzhiyun }
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun /**
1081*4882a593Smuzhiyun   * Initialize a host mode PCIe gen 2 link. This function takes a PCIe
1082*4882a593Smuzhiyun  * port from reset to a link up state. Software can then begin
1083*4882a593Smuzhiyun  * configuring the rest of the link.
1084*4882a593Smuzhiyun  *
1085*4882a593Smuzhiyun  * @pcie_port: PCIe port to initialize
1086*4882a593Smuzhiyun  *
1087*4882a593Smuzhiyun  * Return Zero on success.
1088*4882a593Smuzhiyun  */
__cvmx_pcie_rc_initialize_link_gen2(int pcie_port)1089*4882a593Smuzhiyun static int __cvmx_pcie_rc_initialize_link_gen2(int pcie_port)
1090*4882a593Smuzhiyun {
1091*4882a593Smuzhiyun 	uint64_t start_cycle;
1092*4882a593Smuzhiyun 	union cvmx_pemx_ctl_status pem_ctl_status;
1093*4882a593Smuzhiyun 	union cvmx_pciercx_cfg032 pciercx_cfg032;
1094*4882a593Smuzhiyun 	union cvmx_pciercx_cfg448 pciercx_cfg448;
1095*4882a593Smuzhiyun 
1096*4882a593Smuzhiyun 	/* Bring up the link */
1097*4882a593Smuzhiyun 	pem_ctl_status.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(pcie_port));
1098*4882a593Smuzhiyun 	pem_ctl_status.s.lnk_enb = 1;
1099*4882a593Smuzhiyun 	cvmx_write_csr(CVMX_PEMX_CTL_STATUS(pcie_port), pem_ctl_status.u64);
1100*4882a593Smuzhiyun 
1101*4882a593Smuzhiyun 	/* Wait for the link to come up */
1102*4882a593Smuzhiyun 	start_cycle = cvmx_get_cycle();
1103*4882a593Smuzhiyun 	do {
1104*4882a593Smuzhiyun 		if (cvmx_get_cycle() - start_cycle >  octeon_get_clock_rate())
1105*4882a593Smuzhiyun 			return -1;
1106*4882a593Smuzhiyun 		__delay(10000);
1107*4882a593Smuzhiyun 		pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
1108*4882a593Smuzhiyun 	} while ((pciercx_cfg032.s.dlla == 0) || (pciercx_cfg032.s.lt == 1));
1109*4882a593Smuzhiyun 
1110*4882a593Smuzhiyun 	/*
1111*4882a593Smuzhiyun 	 * Update the Replay Time Limit. Empirically, some PCIe
1112*4882a593Smuzhiyun 	 * devices take a little longer to respond than expected under
1113*4882a593Smuzhiyun 	 * load. As a workaround for this we configure the Replay Time
1114*4882a593Smuzhiyun 	 * Limit to the value expected for a 512 byte MPS instead of
1115*4882a593Smuzhiyun 	 * our actual 256 byte MPS. The numbers below are directly
1116*4882a593Smuzhiyun 	 * from the PCIe spec table 3-4
1117*4882a593Smuzhiyun 	 */
1118*4882a593Smuzhiyun 	pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port));
1119*4882a593Smuzhiyun 	switch (pciercx_cfg032.s.nlw) {
1120*4882a593Smuzhiyun 	case 1: /* 1 lane */
1121*4882a593Smuzhiyun 		pciercx_cfg448.s.rtl = 1677;
1122*4882a593Smuzhiyun 		break;
1123*4882a593Smuzhiyun 	case 2: /* 2 lanes */
1124*4882a593Smuzhiyun 		pciercx_cfg448.s.rtl = 867;
1125*4882a593Smuzhiyun 		break;
1126*4882a593Smuzhiyun 	case 4: /* 4 lanes */
1127*4882a593Smuzhiyun 		pciercx_cfg448.s.rtl = 462;
1128*4882a593Smuzhiyun 		break;
1129*4882a593Smuzhiyun 	case 8: /* 8 lanes */
1130*4882a593Smuzhiyun 		pciercx_cfg448.s.rtl = 258;
1131*4882a593Smuzhiyun 		break;
1132*4882a593Smuzhiyun 	}
1133*4882a593Smuzhiyun 	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32);
1134*4882a593Smuzhiyun 
1135*4882a593Smuzhiyun 	return 0;
1136*4882a593Smuzhiyun }
1137*4882a593Smuzhiyun 
1138*4882a593Smuzhiyun 
1139*4882a593Smuzhiyun /**
1140*4882a593Smuzhiyun  * Initialize a PCIe gen 2 port for use in host(RC) mode. It doesn't enumerate
1141*4882a593Smuzhiyun  * the bus.
1142*4882a593Smuzhiyun  *
1143*4882a593Smuzhiyun  * @pcie_port: PCIe port to initialize
1144*4882a593Smuzhiyun  *
1145*4882a593Smuzhiyun  * Returns Zero on success.
1146*4882a593Smuzhiyun  */
__cvmx_pcie_rc_initialize_gen2(int pcie_port)1147*4882a593Smuzhiyun static int __cvmx_pcie_rc_initialize_gen2(int pcie_port)
1148*4882a593Smuzhiyun {
1149*4882a593Smuzhiyun 	int i;
1150*4882a593Smuzhiyun 	union cvmx_ciu_soft_prst ciu_soft_prst;
1151*4882a593Smuzhiyun 	union cvmx_mio_rst_ctlx mio_rst_ctl;
1152*4882a593Smuzhiyun 	union cvmx_pemx_bar_ctl pemx_bar_ctl;
1153*4882a593Smuzhiyun 	union cvmx_pemx_ctl_status pemx_ctl_status;
1154*4882a593Smuzhiyun 	union cvmx_pemx_bist_status pemx_bist_status;
1155*4882a593Smuzhiyun 	union cvmx_pemx_bist_status2 pemx_bist_status2;
1156*4882a593Smuzhiyun 	union cvmx_pciercx_cfg032 pciercx_cfg032;
1157*4882a593Smuzhiyun 	union cvmx_pciercx_cfg515 pciercx_cfg515;
1158*4882a593Smuzhiyun 	union cvmx_sli_ctl_portx sli_ctl_portx;
1159*4882a593Smuzhiyun 	union cvmx_sli_mem_access_ctl sli_mem_access_ctl;
1160*4882a593Smuzhiyun 	union cvmx_sli_mem_access_subidx mem_access_subid;
1161*4882a593Smuzhiyun 	union cvmx_sriox_status_reg sriox_status_reg;
1162*4882a593Smuzhiyun 	union cvmx_pemx_bar1_indexx bar1_index;
1163*4882a593Smuzhiyun 
1164*4882a593Smuzhiyun 	if (octeon_has_feature(OCTEON_FEATURE_SRIO)) {
1165*4882a593Smuzhiyun 		/* Make sure this interface isn't SRIO */
1166*4882a593Smuzhiyun 		if (OCTEON_IS_MODEL(OCTEON_CN66XX)) {
1167*4882a593Smuzhiyun 			/*
1168*4882a593Smuzhiyun 			 * The CN66XX requires reading the
1169*4882a593Smuzhiyun 			 * MIO_QLMX_CFG register to figure out the
1170*4882a593Smuzhiyun 			 * port type.
1171*4882a593Smuzhiyun 			 */
1172*4882a593Smuzhiyun 			union cvmx_mio_qlmx_cfg qlmx_cfg;
1173*4882a593Smuzhiyun 			qlmx_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(pcie_port));
1174*4882a593Smuzhiyun 
1175*4882a593Smuzhiyun 			if (qlmx_cfg.s.qlm_spd == 15) {
1176*4882a593Smuzhiyun 				pr_notice("PCIe: Port %d is disabled, skipping.\n", pcie_port);
1177*4882a593Smuzhiyun 				return -1;
1178*4882a593Smuzhiyun 			}
1179*4882a593Smuzhiyun 
1180*4882a593Smuzhiyun 			switch (qlmx_cfg.s.qlm_spd) {
1181*4882a593Smuzhiyun 			case 0x1: /* SRIO 1x4 short */
1182*4882a593Smuzhiyun 			case 0x3: /* SRIO 1x4 long */
1183*4882a593Smuzhiyun 			case 0x4: /* SRIO 2x2 short */
1184*4882a593Smuzhiyun 			case 0x6: /* SRIO 2x2 long */
1185*4882a593Smuzhiyun 				pr_notice("PCIe: Port %d is SRIO, skipping.\n", pcie_port);
1186*4882a593Smuzhiyun 				return -1;
1187*4882a593Smuzhiyun 			case 0x9: /* SGMII */
1188*4882a593Smuzhiyun 				pr_notice("PCIe: Port %d is SGMII, skipping.\n", pcie_port);
1189*4882a593Smuzhiyun 				return -1;
1190*4882a593Smuzhiyun 			case 0xb: /* XAUI */
1191*4882a593Smuzhiyun 				pr_notice("PCIe: Port %d is XAUI, skipping.\n", pcie_port);
1192*4882a593Smuzhiyun 				return -1;
1193*4882a593Smuzhiyun 			case 0x0: /* PCIE gen2 */
1194*4882a593Smuzhiyun 			case 0x8: /* PCIE gen2 (alias) */
1195*4882a593Smuzhiyun 			case 0x2: /* PCIE gen1 */
1196*4882a593Smuzhiyun 			case 0xa: /* PCIE gen1 (alias) */
1197*4882a593Smuzhiyun 				break;
1198*4882a593Smuzhiyun 			default:
1199*4882a593Smuzhiyun 				pr_notice("PCIe: Port %d is unknown, skipping.\n", pcie_port);
1200*4882a593Smuzhiyun 				return -1;
1201*4882a593Smuzhiyun 			}
1202*4882a593Smuzhiyun 		} else {
1203*4882a593Smuzhiyun 			sriox_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(pcie_port));
1204*4882a593Smuzhiyun 			if (sriox_status_reg.s.srio) {
1205*4882a593Smuzhiyun 				pr_notice("PCIe: Port %d is SRIO, skipping.\n", pcie_port);
1206*4882a593Smuzhiyun 				return -1;
1207*4882a593Smuzhiyun 			}
1208*4882a593Smuzhiyun 		}
1209*4882a593Smuzhiyun 	}
1210*4882a593Smuzhiyun 
1211*4882a593Smuzhiyun #if 0
1212*4882a593Smuzhiyun     /* This code is so that the PCIe analyzer is able to see 63XX traffic */
1213*4882a593Smuzhiyun 	pr_notice("PCIE : init for pcie analyzer.\n");
1214*4882a593Smuzhiyun 	cvmx_helper_qlm_jtag_init();
1215*4882a593Smuzhiyun 	cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85);
1216*4882a593Smuzhiyun 	cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1);
1217*4882a593Smuzhiyun 	cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86);
1218*4882a593Smuzhiyun 	cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85);
1219*4882a593Smuzhiyun 	cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1);
1220*4882a593Smuzhiyun 	cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86);
1221*4882a593Smuzhiyun 	cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85);
1222*4882a593Smuzhiyun 	cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1);
1223*4882a593Smuzhiyun 	cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86);
1224*4882a593Smuzhiyun 	cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85);
1225*4882a593Smuzhiyun 	cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1);
1226*4882a593Smuzhiyun 	cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86);
1227*4882a593Smuzhiyun 	cvmx_helper_qlm_jtag_update(pcie_port);
1228*4882a593Smuzhiyun #endif
1229*4882a593Smuzhiyun 
1230*4882a593Smuzhiyun 	/* Make sure we aren't trying to setup a target mode interface in host mode */
1231*4882a593Smuzhiyun 	mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(pcie_port));
1232*4882a593Smuzhiyun 	if (!mio_rst_ctl.s.host_mode) {
1233*4882a593Smuzhiyun 		pr_notice("PCIe: Port %d in endpoint mode.\n", pcie_port);
1234*4882a593Smuzhiyun 		return -1;
1235*4882a593Smuzhiyun 	}
1236*4882a593Smuzhiyun 
1237*4882a593Smuzhiyun 	/* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */
1238*4882a593Smuzhiyun 	if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0)) {
1239*4882a593Smuzhiyun 		if (pcie_port) {
1240*4882a593Smuzhiyun 			union cvmx_ciu_qlm ciu_qlm;
1241*4882a593Smuzhiyun 			ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM1);
1242*4882a593Smuzhiyun 			ciu_qlm.s.txbypass = 1;
1243*4882a593Smuzhiyun 			ciu_qlm.s.txdeemph = 5;
1244*4882a593Smuzhiyun 			ciu_qlm.s.txmargin = 0x17;
1245*4882a593Smuzhiyun 			cvmx_write_csr(CVMX_CIU_QLM1, ciu_qlm.u64);
1246*4882a593Smuzhiyun 		} else {
1247*4882a593Smuzhiyun 			union cvmx_ciu_qlm ciu_qlm;
1248*4882a593Smuzhiyun 			ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM0);
1249*4882a593Smuzhiyun 			ciu_qlm.s.txbypass = 1;
1250*4882a593Smuzhiyun 			ciu_qlm.s.txdeemph = 5;
1251*4882a593Smuzhiyun 			ciu_qlm.s.txmargin = 0x17;
1252*4882a593Smuzhiyun 			cvmx_write_csr(CVMX_CIU_QLM0, ciu_qlm.u64);
1253*4882a593Smuzhiyun 		}
1254*4882a593Smuzhiyun 	}
1255*4882a593Smuzhiyun 	/* Bring the PCIe out of reset */
1256*4882a593Smuzhiyun 	if (pcie_port)
1257*4882a593Smuzhiyun 		ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
1258*4882a593Smuzhiyun 	else
1259*4882a593Smuzhiyun 		ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
1260*4882a593Smuzhiyun 	/*
1261*4882a593Smuzhiyun 	 * After a chip reset the PCIe will also be in reset. If it
1262*4882a593Smuzhiyun 	 * isn't, most likely someone is trying to init it again
1263*4882a593Smuzhiyun 	 * without a proper PCIe reset
1264*4882a593Smuzhiyun 	 */
1265*4882a593Smuzhiyun 	if (ciu_soft_prst.s.soft_prst == 0) {
1266*4882a593Smuzhiyun 		/* Reset the port */
1267*4882a593Smuzhiyun 		ciu_soft_prst.s.soft_prst = 1;
1268*4882a593Smuzhiyun 		if (pcie_port)
1269*4882a593Smuzhiyun 			cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
1270*4882a593Smuzhiyun 		else
1271*4882a593Smuzhiyun 			cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
1272*4882a593Smuzhiyun 		/* Wait until pcie resets the ports. */
1273*4882a593Smuzhiyun 		udelay(2000);
1274*4882a593Smuzhiyun 	}
1275*4882a593Smuzhiyun 	if (pcie_port) {
1276*4882a593Smuzhiyun 		ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
1277*4882a593Smuzhiyun 		ciu_soft_prst.s.soft_prst = 0;
1278*4882a593Smuzhiyun 		cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
1279*4882a593Smuzhiyun 	} else {
1280*4882a593Smuzhiyun 		ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
1281*4882a593Smuzhiyun 		ciu_soft_prst.s.soft_prst = 0;
1282*4882a593Smuzhiyun 		cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
1283*4882a593Smuzhiyun 	}
1284*4882a593Smuzhiyun 
1285*4882a593Smuzhiyun 	/* Wait for PCIe reset to complete */
1286*4882a593Smuzhiyun 	udelay(1000);
1287*4882a593Smuzhiyun 
1288*4882a593Smuzhiyun 	/*
1289*4882a593Smuzhiyun 	 * Check and make sure PCIe came out of reset. If it doesn't
1290*4882a593Smuzhiyun 	 * the board probably hasn't wired the clocks up and the
1291*4882a593Smuzhiyun 	 * interface should be skipped.
1292*4882a593Smuzhiyun 	 */
1293*4882a593Smuzhiyun 	if (CVMX_WAIT_FOR_FIELD64(CVMX_MIO_RST_CTLX(pcie_port), union cvmx_mio_rst_ctlx, rst_done, ==, 1, 10000)) {
1294*4882a593Smuzhiyun 		pr_notice("PCIe: Port %d stuck in reset, skipping.\n", pcie_port);
1295*4882a593Smuzhiyun 		return -1;
1296*4882a593Smuzhiyun 	}
1297*4882a593Smuzhiyun 
1298*4882a593Smuzhiyun 	/* Check BIST status */
1299*4882a593Smuzhiyun 	pemx_bist_status.u64 = cvmx_read_csr(CVMX_PEMX_BIST_STATUS(pcie_port));
1300*4882a593Smuzhiyun 	if (pemx_bist_status.u64)
1301*4882a593Smuzhiyun 		pr_notice("PCIe: BIST FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pemx_bist_status.u64));
1302*4882a593Smuzhiyun 	pemx_bist_status2.u64 = cvmx_read_csr(CVMX_PEMX_BIST_STATUS2(pcie_port));
1303*4882a593Smuzhiyun 	/* Errata PCIE-14766 may cause the lower 6 bits to be randomly set on CN63XXp1 */
1304*4882a593Smuzhiyun 	if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X))
1305*4882a593Smuzhiyun 		pemx_bist_status2.u64 &= ~0x3full;
1306*4882a593Smuzhiyun 	if (pemx_bist_status2.u64)
1307*4882a593Smuzhiyun 		pr_notice("PCIe: BIST2 FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pemx_bist_status2.u64));
1308*4882a593Smuzhiyun 
1309*4882a593Smuzhiyun 	/* Initialize the config space CSRs */
1310*4882a593Smuzhiyun 	__cvmx_pcie_rc_initialize_config_space(pcie_port);
1311*4882a593Smuzhiyun 
1312*4882a593Smuzhiyun 	/* Enable gen2 speed selection */
1313*4882a593Smuzhiyun 	pciercx_cfg515.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG515(pcie_port));
1314*4882a593Smuzhiyun 	pciercx_cfg515.s.dsc = 1;
1315*4882a593Smuzhiyun 	cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG515(pcie_port), pciercx_cfg515.u32);
1316*4882a593Smuzhiyun 
1317*4882a593Smuzhiyun 	/* Bring the link up */
1318*4882a593Smuzhiyun 	if (__cvmx_pcie_rc_initialize_link_gen2(pcie_port)) {
1319*4882a593Smuzhiyun 		/*
1320*4882a593Smuzhiyun 		 * Some gen1 devices don't handle the gen 2 training
1321*4882a593Smuzhiyun 		 * correctly. Disable gen2 and try again with only
1322*4882a593Smuzhiyun 		 * gen1
1323*4882a593Smuzhiyun 		 */
1324*4882a593Smuzhiyun 		union cvmx_pciercx_cfg031 pciercx_cfg031;
1325*4882a593Smuzhiyun 		pciercx_cfg031.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG031(pcie_port));
1326*4882a593Smuzhiyun 		pciercx_cfg031.s.mls = 1;
1327*4882a593Smuzhiyun 		cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG031(pcie_port), pciercx_cfg031.u32);
1328*4882a593Smuzhiyun 		if (__cvmx_pcie_rc_initialize_link_gen2(pcie_port)) {
1329*4882a593Smuzhiyun 			pr_notice("PCIe: Link timeout on port %d, probably the slot is empty\n", pcie_port);
1330*4882a593Smuzhiyun 			return -1;
1331*4882a593Smuzhiyun 		}
1332*4882a593Smuzhiyun 	}
1333*4882a593Smuzhiyun 
1334*4882a593Smuzhiyun 	/* Store merge control (SLI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */
1335*4882a593Smuzhiyun 	sli_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_MEM_ACCESS_CTL);
1336*4882a593Smuzhiyun 	sli_mem_access_ctl.s.max_word = 0;	/* Allow 16 words to combine */
1337*4882a593Smuzhiyun 	sli_mem_access_ctl.s.timer = 127;	/* Wait up to 127 cycles for more data */
1338*4882a593Smuzhiyun 	cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_CTL, sli_mem_access_ctl.u64);
1339*4882a593Smuzhiyun 
1340*4882a593Smuzhiyun 	/* Setup Mem access SubDIDs */
1341*4882a593Smuzhiyun 	mem_access_subid.u64 = 0;
1342*4882a593Smuzhiyun 	mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
1343*4882a593Smuzhiyun 	mem_access_subid.s.nmerge = 0;	/* Allow merging as it works on CN6XXX. */
1344*4882a593Smuzhiyun 	mem_access_subid.s.esr = 1;	/* Endian-swap for Reads. */
1345*4882a593Smuzhiyun 	mem_access_subid.s.esw = 1;	/* Endian-swap for Writes. */
1346*4882a593Smuzhiyun 	mem_access_subid.s.wtype = 0;	/* "No snoop" and "Relaxed ordering" are not set */
1347*4882a593Smuzhiyun 	mem_access_subid.s.rtype = 0;	/* "No snoop" and "Relaxed ordering" are not set */
1348*4882a593Smuzhiyun 	/* PCIe Adddress Bits <63:34>. */
1349*4882a593Smuzhiyun 	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
1350*4882a593Smuzhiyun 		mem_access_subid.cn68xx.ba = 0;
1351*4882a593Smuzhiyun 	else
1352*4882a593Smuzhiyun 		mem_access_subid.s.ba = 0;
1353*4882a593Smuzhiyun 
1354*4882a593Smuzhiyun 	/*
1355*4882a593Smuzhiyun 	 * Setup mem access 12-15 for port 0, 16-19 for port 1,
1356*4882a593Smuzhiyun 	 * supplying 36 bits of address space.
1357*4882a593Smuzhiyun 	 */
1358*4882a593Smuzhiyun 	for (i = 12 + pcie_port * 4; i < 16 + pcie_port * 4; i++) {
1359*4882a593Smuzhiyun 		cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(i), mem_access_subid.u64);
1360*4882a593Smuzhiyun 		/* Set each SUBID to extend the addressable range */
1361*4882a593Smuzhiyun 		__cvmx_increment_ba(&mem_access_subid);
1362*4882a593Smuzhiyun 	}
1363*4882a593Smuzhiyun 
1364*4882a593Smuzhiyun 	/*
1365*4882a593Smuzhiyun 	 * Disable the peer to peer forwarding register. This must be
1366*4882a593Smuzhiyun 	 * setup by the OS after it enumerates the bus and assigns
1367*4882a593Smuzhiyun 	 * addresses to the PCIe busses.
1368*4882a593Smuzhiyun 	 */
1369*4882a593Smuzhiyun 	for (i = 0; i < 4; i++) {
1370*4882a593Smuzhiyun 		cvmx_write_csr(CVMX_PEMX_P2P_BARX_START(i, pcie_port), -1);
1371*4882a593Smuzhiyun 		cvmx_write_csr(CVMX_PEMX_P2P_BARX_END(i, pcie_port), -1);
1372*4882a593Smuzhiyun 	}
1373*4882a593Smuzhiyun 
1374*4882a593Smuzhiyun 	/* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */
1375*4882a593Smuzhiyun 	cvmx_write_csr(CVMX_PEMX_P2N_BAR0_START(pcie_port), 0);
1376*4882a593Smuzhiyun 
1377*4882a593Smuzhiyun 	/*
1378*4882a593Smuzhiyun 	 * Set Octeon's BAR2 to decode 0-2^41. Bar0 and Bar1 take
1379*4882a593Smuzhiyun 	 * precedence where they overlap. It also overlaps with the
1380*4882a593Smuzhiyun 	 * device addresses, so make sure the peer to peer forwarding
1381*4882a593Smuzhiyun 	 * is set right.
1382*4882a593Smuzhiyun 	 */
1383*4882a593Smuzhiyun 	cvmx_write_csr(CVMX_PEMX_P2N_BAR2_START(pcie_port), 0);
1384*4882a593Smuzhiyun 
1385*4882a593Smuzhiyun 	/*
1386*4882a593Smuzhiyun 	 * Setup BAR2 attributes
1387*4882a593Smuzhiyun 	 * Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM])
1388*4882a593Smuzhiyun 	 * - PTLP_RO,CTLP_RO should normally be set (except for debug).
1389*4882a593Smuzhiyun 	 * - WAIT_COM=0 will likely work for all applications.
1390*4882a593Smuzhiyun 	 * Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM])
1391*4882a593Smuzhiyun 	 */
1392*4882a593Smuzhiyun 	pemx_bar_ctl.u64 = cvmx_read_csr(CVMX_PEMX_BAR_CTL(pcie_port));
1393*4882a593Smuzhiyun 	pemx_bar_ctl.s.bar1_siz = 3;  /* 256MB BAR1*/
1394*4882a593Smuzhiyun 	pemx_bar_ctl.s.bar2_enb = 1;
1395*4882a593Smuzhiyun 	pemx_bar_ctl.s.bar2_esx = 1;
1396*4882a593Smuzhiyun 	pemx_bar_ctl.s.bar2_cax = 0;
1397*4882a593Smuzhiyun 	cvmx_write_csr(CVMX_PEMX_BAR_CTL(pcie_port), pemx_bar_ctl.u64);
1398*4882a593Smuzhiyun 	sli_ctl_portx.u64 = cvmx_read_csr(CVMX_PEXP_SLI_CTL_PORTX(pcie_port));
1399*4882a593Smuzhiyun 	sli_ctl_portx.s.ptlp_ro = 1;
1400*4882a593Smuzhiyun 	sli_ctl_portx.s.ctlp_ro = 1;
1401*4882a593Smuzhiyun 	sli_ctl_portx.s.wait_com = 0;
1402*4882a593Smuzhiyun 	sli_ctl_portx.s.waitl_com = 0;
1403*4882a593Smuzhiyun 	cvmx_write_csr(CVMX_PEXP_SLI_CTL_PORTX(pcie_port), sli_ctl_portx.u64);
1404*4882a593Smuzhiyun 
1405*4882a593Smuzhiyun 	/* BAR1 follows BAR2 */
1406*4882a593Smuzhiyun 	cvmx_write_csr(CVMX_PEMX_P2N_BAR1_START(pcie_port), CVMX_PCIE_BAR1_RC_BASE);
1407*4882a593Smuzhiyun 
1408*4882a593Smuzhiyun 	bar1_index.u64 = 0;
1409*4882a593Smuzhiyun 	bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22);
1410*4882a593Smuzhiyun 	bar1_index.s.ca = 1;	   /* Not Cached */
1411*4882a593Smuzhiyun 	bar1_index.s.end_swp = 1;  /* Endian Swap mode */
1412*4882a593Smuzhiyun 	bar1_index.s.addr_v = 1;   /* Valid entry */
1413*4882a593Smuzhiyun 
1414*4882a593Smuzhiyun 	for (i = 0; i < 16; i++) {
1415*4882a593Smuzhiyun 		cvmx_write_csr(CVMX_PEMX_BAR1_INDEXX(i, pcie_port), bar1_index.u64);
1416*4882a593Smuzhiyun 		/* 256MB / 16 >> 22 == 4 */
1417*4882a593Smuzhiyun 		bar1_index.s.addr_idx += (((1ull << 28) / 16ull) >> 22);
1418*4882a593Smuzhiyun 	}
1419*4882a593Smuzhiyun 
1420*4882a593Smuzhiyun 	/*
1421*4882a593Smuzhiyun 	 * Allow config retries for 250ms. Count is based off the 5Ghz
1422*4882a593Smuzhiyun 	 * SERDES clock.
1423*4882a593Smuzhiyun 	 */
1424*4882a593Smuzhiyun 	pemx_ctl_status.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(pcie_port));
1425*4882a593Smuzhiyun 	pemx_ctl_status.s.cfg_rtry = 250 * 5000000 / 0x10000;
1426*4882a593Smuzhiyun 	cvmx_write_csr(CVMX_PEMX_CTL_STATUS(pcie_port), pemx_ctl_status.u64);
1427*4882a593Smuzhiyun 
1428*4882a593Smuzhiyun 	/* Display the link status */
1429*4882a593Smuzhiyun 	pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
1430*4882a593Smuzhiyun 	pr_notice("PCIe: Port %d link active, %d lanes, speed gen%d\n", pcie_port, pciercx_cfg032.s.nlw, pciercx_cfg032.s.ls);
1431*4882a593Smuzhiyun 
1432*4882a593Smuzhiyun 	return 0;
1433*4882a593Smuzhiyun }
1434*4882a593Smuzhiyun 
1435*4882a593Smuzhiyun /**
1436*4882a593Smuzhiyun  * Initialize a PCIe port for use in host(RC) mode. It doesn't enumerate the bus.
1437*4882a593Smuzhiyun  *
1438*4882a593Smuzhiyun  * @pcie_port: PCIe port to initialize
1439*4882a593Smuzhiyun  *
1440*4882a593Smuzhiyun  * Returns Zero on success
1441*4882a593Smuzhiyun  */
cvmx_pcie_rc_initialize(int pcie_port)1442*4882a593Smuzhiyun static int cvmx_pcie_rc_initialize(int pcie_port)
1443*4882a593Smuzhiyun {
1444*4882a593Smuzhiyun 	int result;
1445*4882a593Smuzhiyun 	if (octeon_has_feature(OCTEON_FEATURE_NPEI))
1446*4882a593Smuzhiyun 		result = __cvmx_pcie_rc_initialize_gen1(pcie_port);
1447*4882a593Smuzhiyun 	else
1448*4882a593Smuzhiyun 		result = __cvmx_pcie_rc_initialize_gen2(pcie_port);
1449*4882a593Smuzhiyun 	return result;
1450*4882a593Smuzhiyun }
1451*4882a593Smuzhiyun 
1452*4882a593Smuzhiyun /* Above was cvmx-pcie.c, below original pcie.c */
1453*4882a593Smuzhiyun 
1454*4882a593Smuzhiyun /**
1455*4882a593Smuzhiyun  * Map a PCI device to the appropriate interrupt line
1456*4882a593Smuzhiyun  *
1457*4882a593Smuzhiyun  * @dev:    The Linux PCI device structure for the device to map
1458*4882a593Smuzhiyun  * @slot:   The slot number for this device on __BUS 0__. Linux
1459*4882a593Smuzhiyun  *		 enumerates through all the bridges and figures out the
1460*4882a593Smuzhiyun  *		 slot on Bus 0 where this device eventually hooks to.
1461*4882a593Smuzhiyun  * @pin:    The PCI interrupt pin read from the device, then swizzled
1462*4882a593Smuzhiyun  *		 as it goes through each bridge.
1463*4882a593Smuzhiyun  * Returns Interrupt number for the device
1464*4882a593Smuzhiyun  */
octeon_pcie_pcibios_map_irq(const struct pci_dev * dev,u8 slot,u8 pin)1465*4882a593Smuzhiyun int octeon_pcie_pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
1466*4882a593Smuzhiyun {
1467*4882a593Smuzhiyun 	/*
1468*4882a593Smuzhiyun 	 * The EBH5600 board with the PCI to PCIe bridge mistakenly
1469*4882a593Smuzhiyun 	 * wires the first slot for both device id 2 and interrupt
1470*4882a593Smuzhiyun 	 * A. According to the PCI spec, device id 2 should be C. The
1471*4882a593Smuzhiyun 	 * following kludge attempts to fix this.
1472*4882a593Smuzhiyun 	 */
1473*4882a593Smuzhiyun 	if (strstr(octeon_board_type_string(), "EBH5600") &&
1474*4882a593Smuzhiyun 	    dev->bus && dev->bus->parent) {
1475*4882a593Smuzhiyun 		/*
1476*4882a593Smuzhiyun 		 * Iterate all the way up the device chain and find
1477*4882a593Smuzhiyun 		 * the root bus.
1478*4882a593Smuzhiyun 		 */
1479*4882a593Smuzhiyun 		while (dev->bus && dev->bus->parent)
1480*4882a593Smuzhiyun 			dev = to_pci_dev(dev->bus->bridge);
1481*4882a593Smuzhiyun 		/*
1482*4882a593Smuzhiyun 		 * If the root bus is number 0 and the PEX 8114 is the
1483*4882a593Smuzhiyun 		 * root, assume we are behind the miswired bus. We
1484*4882a593Smuzhiyun 		 * need to correct the swizzle level by two. Yuck.
1485*4882a593Smuzhiyun 		 */
1486*4882a593Smuzhiyun 		if ((dev->bus->number == 1) &&
1487*4882a593Smuzhiyun 		    (dev->vendor == 0x10b5) && (dev->device == 0x8114)) {
1488*4882a593Smuzhiyun 			/*
1489*4882a593Smuzhiyun 			 * The pin field is one based, not zero. We
1490*4882a593Smuzhiyun 			 * need to swizzle it by minus two.
1491*4882a593Smuzhiyun 			 */
1492*4882a593Smuzhiyun 			pin = ((pin - 3) & 3) + 1;
1493*4882a593Smuzhiyun 		}
1494*4882a593Smuzhiyun 	}
1495*4882a593Smuzhiyun 	/*
1496*4882a593Smuzhiyun 	 * The -1 is because pin starts with one, not zero. It might
1497*4882a593Smuzhiyun 	 * be that this equation needs to include the slot number, but
1498*4882a593Smuzhiyun 	 * I don't have hardware to check that against.
1499*4882a593Smuzhiyun 	 */
1500*4882a593Smuzhiyun 	return pin - 1 + OCTEON_IRQ_PCI_INT0;
1501*4882a593Smuzhiyun }
1502*4882a593Smuzhiyun 
set_cfg_read_retry(u32 retry_cnt)1503*4882a593Smuzhiyun static	void set_cfg_read_retry(u32 retry_cnt)
1504*4882a593Smuzhiyun {
1505*4882a593Smuzhiyun 	union cvmx_pemx_ctl_status pemx_ctl;
1506*4882a593Smuzhiyun 	pemx_ctl.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(1));
1507*4882a593Smuzhiyun 	pemx_ctl.s.cfg_rtry = retry_cnt;
1508*4882a593Smuzhiyun 	cvmx_write_csr(CVMX_PEMX_CTL_STATUS(1), pemx_ctl.u64);
1509*4882a593Smuzhiyun }
1510*4882a593Smuzhiyun 
1511*4882a593Smuzhiyun 
disable_cfg_read_retry(void)1512*4882a593Smuzhiyun static u32 disable_cfg_read_retry(void)
1513*4882a593Smuzhiyun {
1514*4882a593Smuzhiyun 	u32 retry_cnt;
1515*4882a593Smuzhiyun 
1516*4882a593Smuzhiyun 	union cvmx_pemx_ctl_status pemx_ctl;
1517*4882a593Smuzhiyun 	pemx_ctl.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(1));
1518*4882a593Smuzhiyun 	retry_cnt =  pemx_ctl.s.cfg_rtry;
1519*4882a593Smuzhiyun 	pemx_ctl.s.cfg_rtry = 0;
1520*4882a593Smuzhiyun 	cvmx_write_csr(CVMX_PEMX_CTL_STATUS(1), pemx_ctl.u64);
1521*4882a593Smuzhiyun 	return retry_cnt;
1522*4882a593Smuzhiyun }
1523*4882a593Smuzhiyun 
is_cfg_retry(void)1524*4882a593Smuzhiyun static int is_cfg_retry(void)
1525*4882a593Smuzhiyun {
1526*4882a593Smuzhiyun 	union cvmx_pemx_int_sum pemx_int_sum;
1527*4882a593Smuzhiyun 	pemx_int_sum.u64 = cvmx_read_csr(CVMX_PEMX_INT_SUM(1));
1528*4882a593Smuzhiyun 	if (pemx_int_sum.s.crs_dr)
1529*4882a593Smuzhiyun 		return 1;
1530*4882a593Smuzhiyun 	return 0;
1531*4882a593Smuzhiyun }
1532*4882a593Smuzhiyun 
1533*4882a593Smuzhiyun /*
1534*4882a593Smuzhiyun  * Read a value from configuration space
1535*4882a593Smuzhiyun  *
1536*4882a593Smuzhiyun  */
octeon_pcie_read_config(unsigned int pcie_port,struct pci_bus * bus,unsigned int devfn,int reg,int size,u32 * val)1537*4882a593Smuzhiyun static int octeon_pcie_read_config(unsigned int pcie_port, struct pci_bus *bus,
1538*4882a593Smuzhiyun 				   unsigned int devfn, int reg, int size,
1539*4882a593Smuzhiyun 				   u32 *val)
1540*4882a593Smuzhiyun {
1541*4882a593Smuzhiyun 	union octeon_cvmemctl cvmmemctl;
1542*4882a593Smuzhiyun 	union octeon_cvmemctl cvmmemctl_save;
1543*4882a593Smuzhiyun 	int bus_number = bus->number;
1544*4882a593Smuzhiyun 	int cfg_retry = 0;
1545*4882a593Smuzhiyun 	int retry_cnt = 0;
1546*4882a593Smuzhiyun 	int max_retry_cnt = 10;
1547*4882a593Smuzhiyun 	u32 cfg_retry_cnt = 0;
1548*4882a593Smuzhiyun 
1549*4882a593Smuzhiyun 	cvmmemctl_save.u64 = 0;
1550*4882a593Smuzhiyun 	BUG_ON(pcie_port >= ARRAY_SIZE(enable_pcie_bus_num_war));
1551*4882a593Smuzhiyun 	/*
1552*4882a593Smuzhiyun 	 * For the top level bus make sure our hardware bus number
1553*4882a593Smuzhiyun 	 * matches the software one
1554*4882a593Smuzhiyun 	 */
1555*4882a593Smuzhiyun 	if (bus->parent == NULL) {
1556*4882a593Smuzhiyun 		if (enable_pcie_bus_num_war[pcie_port])
1557*4882a593Smuzhiyun 			bus_number = 0;
1558*4882a593Smuzhiyun 		else {
1559*4882a593Smuzhiyun 			union cvmx_pciercx_cfg006 pciercx_cfg006;
1560*4882a593Smuzhiyun 			pciercx_cfg006.u32 = cvmx_pcie_cfgx_read(pcie_port,
1561*4882a593Smuzhiyun 					     CVMX_PCIERCX_CFG006(pcie_port));
1562*4882a593Smuzhiyun 			if (pciercx_cfg006.s.pbnum != bus_number) {
1563*4882a593Smuzhiyun 				pciercx_cfg006.s.pbnum = bus_number;
1564*4882a593Smuzhiyun 				pciercx_cfg006.s.sbnum = bus_number;
1565*4882a593Smuzhiyun 				pciercx_cfg006.s.subbnum = bus_number;
1566*4882a593Smuzhiyun 				cvmx_pcie_cfgx_write(pcie_port,
1567*4882a593Smuzhiyun 					    CVMX_PCIERCX_CFG006(pcie_port),
1568*4882a593Smuzhiyun 					    pciercx_cfg006.u32);
1569*4882a593Smuzhiyun 			}
1570*4882a593Smuzhiyun 		}
1571*4882a593Smuzhiyun 	}
1572*4882a593Smuzhiyun 
1573*4882a593Smuzhiyun 	/*
1574*4882a593Smuzhiyun 	 * PCIe only has a single device connected to Octeon. It is
1575*4882a593Smuzhiyun 	 * always device ID 0. Don't bother doing reads for other
1576*4882a593Smuzhiyun 	 * device IDs on the first segment.
1577*4882a593Smuzhiyun 	 */
1578*4882a593Smuzhiyun 	if ((bus->parent == NULL) && (devfn >> 3 != 0))
1579*4882a593Smuzhiyun 		return PCIBIOS_FUNC_NOT_SUPPORTED;
1580*4882a593Smuzhiyun 
1581*4882a593Smuzhiyun 	/*
1582*4882a593Smuzhiyun 	 * The following is a workaround for the CN57XX, CN56XX,
1583*4882a593Smuzhiyun 	 * CN55XX, and CN54XX errata with PCIe config reads from non
1584*4882a593Smuzhiyun 	 * existent devices.  These chips will hang the PCIe link if a
1585*4882a593Smuzhiyun 	 * config read is performed that causes a UR response.
1586*4882a593Smuzhiyun 	 */
1587*4882a593Smuzhiyun 	if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1) ||
1588*4882a593Smuzhiyun 	    OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_1)) {
1589*4882a593Smuzhiyun 		/*
1590*4882a593Smuzhiyun 		 * For our EBH5600 board, port 0 has a bridge with two
1591*4882a593Smuzhiyun 		 * PCI-X slots. We need a new special checks to make
1592*4882a593Smuzhiyun 		 * sure we only probe valid stuff.  The PCIe->PCI-X
1593*4882a593Smuzhiyun 		 * bridge only respondes to device ID 0, function
1594*4882a593Smuzhiyun 		 * 0-1
1595*4882a593Smuzhiyun 		 */
1596*4882a593Smuzhiyun 		if ((bus->parent == NULL) && (devfn >= 2))
1597*4882a593Smuzhiyun 			return PCIBIOS_FUNC_NOT_SUPPORTED;
1598*4882a593Smuzhiyun 		/*
1599*4882a593Smuzhiyun 		 * The PCI-X slots are device ID 2,3. Choose one of
1600*4882a593Smuzhiyun 		 * the below "if" blocks based on what is plugged into
1601*4882a593Smuzhiyun 		 * the board.
1602*4882a593Smuzhiyun 		 */
1603*4882a593Smuzhiyun #if 1
1604*4882a593Smuzhiyun 		/* Use this option if you aren't using either slot */
1605*4882a593Smuzhiyun 		if (bus_number == 2)
1606*4882a593Smuzhiyun 			return PCIBIOS_FUNC_NOT_SUPPORTED;
1607*4882a593Smuzhiyun #elif 0
1608*4882a593Smuzhiyun 		/*
1609*4882a593Smuzhiyun 		 * Use this option if you are using the first slot but
1610*4882a593Smuzhiyun 		 * not the second.
1611*4882a593Smuzhiyun 		 */
1612*4882a593Smuzhiyun 		if ((bus_number == 2) && (devfn >> 3 != 2))
1613*4882a593Smuzhiyun 			return PCIBIOS_FUNC_NOT_SUPPORTED;
1614*4882a593Smuzhiyun #elif 0
1615*4882a593Smuzhiyun 		/*
1616*4882a593Smuzhiyun 		 * Use this option if you are using the second slot
1617*4882a593Smuzhiyun 		 * but not the first.
1618*4882a593Smuzhiyun 		 */
1619*4882a593Smuzhiyun 		if ((bus_number == 2) && (devfn >> 3 != 3))
1620*4882a593Smuzhiyun 			return PCIBIOS_FUNC_NOT_SUPPORTED;
1621*4882a593Smuzhiyun #elif 0
1622*4882a593Smuzhiyun 		/* Use this opion if you are using both slots */
1623*4882a593Smuzhiyun 		if ((bus_number == 2) &&
1624*4882a593Smuzhiyun 		    !((devfn == (2 << 3)) || (devfn == (3 << 3))))
1625*4882a593Smuzhiyun 			return PCIBIOS_FUNC_NOT_SUPPORTED;
1626*4882a593Smuzhiyun #endif
1627*4882a593Smuzhiyun 
1628*4882a593Smuzhiyun 		/* The following #if gives a more complicated example. This is
1629*4882a593Smuzhiyun 		   the required checks for running a Nitrox CN16XX-NHBX in the
1630*4882a593Smuzhiyun 		   slot of the EBH5600. This card has a PLX PCIe bridge with
1631*4882a593Smuzhiyun 		   four Nitrox PLX parts behind it */
1632*4882a593Smuzhiyun #if 0
1633*4882a593Smuzhiyun 		/* PLX bridge with 4 ports */
1634*4882a593Smuzhiyun 		if ((bus_number == 4) &&
1635*4882a593Smuzhiyun 		    !((devfn >> 3 >= 1) && (devfn >> 3 <= 4)))
1636*4882a593Smuzhiyun 			return PCIBIOS_FUNC_NOT_SUPPORTED;
1637*4882a593Smuzhiyun 		/* Nitrox behind PLX 1 */
1638*4882a593Smuzhiyun 		if ((bus_number == 5) && (devfn >> 3 != 0))
1639*4882a593Smuzhiyun 			return PCIBIOS_FUNC_NOT_SUPPORTED;
1640*4882a593Smuzhiyun 		/* Nitrox behind PLX 2 */
1641*4882a593Smuzhiyun 		if ((bus_number == 6) && (devfn >> 3 != 0))
1642*4882a593Smuzhiyun 			return PCIBIOS_FUNC_NOT_SUPPORTED;
1643*4882a593Smuzhiyun 		/* Nitrox behind PLX 3 */
1644*4882a593Smuzhiyun 		if ((bus_number == 7) && (devfn >> 3 != 0))
1645*4882a593Smuzhiyun 			return PCIBIOS_FUNC_NOT_SUPPORTED;
1646*4882a593Smuzhiyun 		/* Nitrox behind PLX 4 */
1647*4882a593Smuzhiyun 		if ((bus_number == 8) && (devfn >> 3 != 0))
1648*4882a593Smuzhiyun 			return PCIBIOS_FUNC_NOT_SUPPORTED;
1649*4882a593Smuzhiyun #endif
1650*4882a593Smuzhiyun 
1651*4882a593Smuzhiyun 		/*
1652*4882a593Smuzhiyun 		 * Shorten the DID timeout so bus errors for PCIe
1653*4882a593Smuzhiyun 		 * config reads from non existent devices happen
1654*4882a593Smuzhiyun 		 * faster. This allows us to continue booting even if
1655*4882a593Smuzhiyun 		 * the above "if" checks are wrong.  Once one of these
1656*4882a593Smuzhiyun 		 * errors happens, the PCIe port is dead.
1657*4882a593Smuzhiyun 		 */
1658*4882a593Smuzhiyun 		cvmmemctl_save.u64 = __read_64bit_c0_register($11, 7);
1659*4882a593Smuzhiyun 		cvmmemctl.u64 = cvmmemctl_save.u64;
1660*4882a593Smuzhiyun 		cvmmemctl.s.didtto = 2;
1661*4882a593Smuzhiyun 		__write_64bit_c0_register($11, 7, cvmmemctl.u64);
1662*4882a593Smuzhiyun 	}
1663*4882a593Smuzhiyun 
1664*4882a593Smuzhiyun 	if ((OCTEON_IS_MODEL(OCTEON_CN63XX)) && (enable_pcie_14459_war))
1665*4882a593Smuzhiyun 		cfg_retry_cnt = disable_cfg_read_retry();
1666*4882a593Smuzhiyun 
1667*4882a593Smuzhiyun 	pr_debug("pcie_cfg_rd port=%d b=%d devfn=0x%03x reg=0x%03x"
1668*4882a593Smuzhiyun 		 " size=%d ", pcie_port, bus_number, devfn, reg, size);
1669*4882a593Smuzhiyun 	do {
1670*4882a593Smuzhiyun 		switch (size) {
1671*4882a593Smuzhiyun 		case 4:
1672*4882a593Smuzhiyun 			*val = cvmx_pcie_config_read32(pcie_port, bus_number,
1673*4882a593Smuzhiyun 				devfn >> 3, devfn & 0x7, reg);
1674*4882a593Smuzhiyun 		break;
1675*4882a593Smuzhiyun 		case 2:
1676*4882a593Smuzhiyun 			*val = cvmx_pcie_config_read16(pcie_port, bus_number,
1677*4882a593Smuzhiyun 				devfn >> 3, devfn & 0x7, reg);
1678*4882a593Smuzhiyun 		break;
1679*4882a593Smuzhiyun 		case 1:
1680*4882a593Smuzhiyun 			*val = cvmx_pcie_config_read8(pcie_port, bus_number,
1681*4882a593Smuzhiyun 				devfn >> 3, devfn & 0x7, reg);
1682*4882a593Smuzhiyun 		break;
1683*4882a593Smuzhiyun 		default:
1684*4882a593Smuzhiyun 			if (OCTEON_IS_MODEL(OCTEON_CN63XX))
1685*4882a593Smuzhiyun 				set_cfg_read_retry(cfg_retry_cnt);
1686*4882a593Smuzhiyun 			return PCIBIOS_FUNC_NOT_SUPPORTED;
1687*4882a593Smuzhiyun 		}
1688*4882a593Smuzhiyun 		if ((OCTEON_IS_MODEL(OCTEON_CN63XX)) &&
1689*4882a593Smuzhiyun 			(enable_pcie_14459_war)) {
1690*4882a593Smuzhiyun 			cfg_retry = is_cfg_retry();
1691*4882a593Smuzhiyun 			retry_cnt++;
1692*4882a593Smuzhiyun 			if (retry_cnt > max_retry_cnt) {
1693*4882a593Smuzhiyun 				pr_err(" pcie cfg_read retries failed. retry_cnt=%d\n",
1694*4882a593Smuzhiyun 				       retry_cnt);
1695*4882a593Smuzhiyun 				cfg_retry = 0;
1696*4882a593Smuzhiyun 			}
1697*4882a593Smuzhiyun 		}
1698*4882a593Smuzhiyun 	} while (cfg_retry);
1699*4882a593Smuzhiyun 
1700*4882a593Smuzhiyun 	if ((OCTEON_IS_MODEL(OCTEON_CN63XX)) && (enable_pcie_14459_war))
1701*4882a593Smuzhiyun 		set_cfg_read_retry(cfg_retry_cnt);
1702*4882a593Smuzhiyun 	pr_debug("val=%08x  : tries=%02d\n", *val, retry_cnt);
1703*4882a593Smuzhiyun 	if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1) ||
1704*4882a593Smuzhiyun 	    OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_1))
1705*4882a593Smuzhiyun 		write_c0_cvmmemctl(cvmmemctl_save.u64);
1706*4882a593Smuzhiyun 	return PCIBIOS_SUCCESSFUL;
1707*4882a593Smuzhiyun }
1708*4882a593Smuzhiyun 
octeon_pcie0_read_config(struct pci_bus * bus,unsigned int devfn,int reg,int size,u32 * val)1709*4882a593Smuzhiyun static int octeon_pcie0_read_config(struct pci_bus *bus, unsigned int devfn,
1710*4882a593Smuzhiyun 				    int reg, int size, u32 *val)
1711*4882a593Smuzhiyun {
1712*4882a593Smuzhiyun 	return octeon_pcie_read_config(0, bus, devfn, reg, size, val);
1713*4882a593Smuzhiyun }
1714*4882a593Smuzhiyun 
octeon_pcie1_read_config(struct pci_bus * bus,unsigned int devfn,int reg,int size,u32 * val)1715*4882a593Smuzhiyun static int octeon_pcie1_read_config(struct pci_bus *bus, unsigned int devfn,
1716*4882a593Smuzhiyun 				    int reg, int size, u32 *val)
1717*4882a593Smuzhiyun {
1718*4882a593Smuzhiyun 	return octeon_pcie_read_config(1, bus, devfn, reg, size, val);
1719*4882a593Smuzhiyun }
1720*4882a593Smuzhiyun 
octeon_dummy_read_config(struct pci_bus * bus,unsigned int devfn,int reg,int size,u32 * val)1721*4882a593Smuzhiyun static int octeon_dummy_read_config(struct pci_bus *bus, unsigned int devfn,
1722*4882a593Smuzhiyun 				    int reg, int size, u32 *val)
1723*4882a593Smuzhiyun {
1724*4882a593Smuzhiyun 	return PCIBIOS_FUNC_NOT_SUPPORTED;
1725*4882a593Smuzhiyun }
1726*4882a593Smuzhiyun 
1727*4882a593Smuzhiyun /*
1728*4882a593Smuzhiyun  * Write a value to PCI configuration space
1729*4882a593Smuzhiyun  */
octeon_pcie_write_config(unsigned int pcie_port,struct pci_bus * bus,unsigned int devfn,int reg,int size,u32 val)1730*4882a593Smuzhiyun static int octeon_pcie_write_config(unsigned int pcie_port, struct pci_bus *bus,
1731*4882a593Smuzhiyun 				    unsigned int devfn, int reg,
1732*4882a593Smuzhiyun 				    int size, u32 val)
1733*4882a593Smuzhiyun {
1734*4882a593Smuzhiyun 	int bus_number = bus->number;
1735*4882a593Smuzhiyun 
1736*4882a593Smuzhiyun 	BUG_ON(pcie_port >= ARRAY_SIZE(enable_pcie_bus_num_war));
1737*4882a593Smuzhiyun 
1738*4882a593Smuzhiyun 	if ((bus->parent == NULL) && (enable_pcie_bus_num_war[pcie_port]))
1739*4882a593Smuzhiyun 		bus_number = 0;
1740*4882a593Smuzhiyun 
1741*4882a593Smuzhiyun 	pr_debug("pcie_cfg_wr port=%d b=%d devfn=0x%03x"
1742*4882a593Smuzhiyun 		 " reg=0x%03x size=%d val=%08x\n", pcie_port, bus_number, devfn,
1743*4882a593Smuzhiyun 		 reg, size, val);
1744*4882a593Smuzhiyun 
1745*4882a593Smuzhiyun 
1746*4882a593Smuzhiyun 	switch (size) {
1747*4882a593Smuzhiyun 	case 4:
1748*4882a593Smuzhiyun 		cvmx_pcie_config_write32(pcie_port, bus_number, devfn >> 3,
1749*4882a593Smuzhiyun 					 devfn & 0x7, reg, val);
1750*4882a593Smuzhiyun 		break;
1751*4882a593Smuzhiyun 	case 2:
1752*4882a593Smuzhiyun 		cvmx_pcie_config_write16(pcie_port, bus_number, devfn >> 3,
1753*4882a593Smuzhiyun 					 devfn & 0x7, reg, val);
1754*4882a593Smuzhiyun 		break;
1755*4882a593Smuzhiyun 	case 1:
1756*4882a593Smuzhiyun 		cvmx_pcie_config_write8(pcie_port, bus_number, devfn >> 3,
1757*4882a593Smuzhiyun 					devfn & 0x7, reg, val);
1758*4882a593Smuzhiyun 		break;
1759*4882a593Smuzhiyun 	default:
1760*4882a593Smuzhiyun 		return PCIBIOS_FUNC_NOT_SUPPORTED;
1761*4882a593Smuzhiyun 	}
1762*4882a593Smuzhiyun 	return PCIBIOS_SUCCESSFUL;
1763*4882a593Smuzhiyun }
1764*4882a593Smuzhiyun 
octeon_pcie0_write_config(struct pci_bus * bus,unsigned int devfn,int reg,int size,u32 val)1765*4882a593Smuzhiyun static int octeon_pcie0_write_config(struct pci_bus *bus, unsigned int devfn,
1766*4882a593Smuzhiyun 				     int reg, int size, u32 val)
1767*4882a593Smuzhiyun {
1768*4882a593Smuzhiyun 	return octeon_pcie_write_config(0, bus, devfn, reg, size, val);
1769*4882a593Smuzhiyun }
1770*4882a593Smuzhiyun 
octeon_pcie1_write_config(struct pci_bus * bus,unsigned int devfn,int reg,int size,u32 val)1771*4882a593Smuzhiyun static int octeon_pcie1_write_config(struct pci_bus *bus, unsigned int devfn,
1772*4882a593Smuzhiyun 				     int reg, int size, u32 val)
1773*4882a593Smuzhiyun {
1774*4882a593Smuzhiyun 	return octeon_pcie_write_config(1, bus, devfn, reg, size, val);
1775*4882a593Smuzhiyun }
1776*4882a593Smuzhiyun 
octeon_dummy_write_config(struct pci_bus * bus,unsigned int devfn,int reg,int size,u32 val)1777*4882a593Smuzhiyun static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn,
1778*4882a593Smuzhiyun 				     int reg, int size, u32 val)
1779*4882a593Smuzhiyun {
1780*4882a593Smuzhiyun 	return PCIBIOS_FUNC_NOT_SUPPORTED;
1781*4882a593Smuzhiyun }
1782*4882a593Smuzhiyun 
1783*4882a593Smuzhiyun static struct pci_ops octeon_pcie0_ops = {
1784*4882a593Smuzhiyun 	.read	= octeon_pcie0_read_config,
1785*4882a593Smuzhiyun 	.write	= octeon_pcie0_write_config,
1786*4882a593Smuzhiyun };
1787*4882a593Smuzhiyun 
1788*4882a593Smuzhiyun static struct resource octeon_pcie0_mem_resource = {
1789*4882a593Smuzhiyun 	.name = "Octeon PCIe0 MEM",
1790*4882a593Smuzhiyun 	.flags = IORESOURCE_MEM,
1791*4882a593Smuzhiyun };
1792*4882a593Smuzhiyun 
1793*4882a593Smuzhiyun static struct resource octeon_pcie0_io_resource = {
1794*4882a593Smuzhiyun 	.name = "Octeon PCIe0 IO",
1795*4882a593Smuzhiyun 	.flags = IORESOURCE_IO,
1796*4882a593Smuzhiyun };
1797*4882a593Smuzhiyun 
1798*4882a593Smuzhiyun static struct pci_controller octeon_pcie0_controller = {
1799*4882a593Smuzhiyun 	.pci_ops = &octeon_pcie0_ops,
1800*4882a593Smuzhiyun 	.mem_resource = &octeon_pcie0_mem_resource,
1801*4882a593Smuzhiyun 	.io_resource = &octeon_pcie0_io_resource,
1802*4882a593Smuzhiyun };
1803*4882a593Smuzhiyun 
1804*4882a593Smuzhiyun static struct pci_ops octeon_pcie1_ops = {
1805*4882a593Smuzhiyun 	.read	= octeon_pcie1_read_config,
1806*4882a593Smuzhiyun 	.write	= octeon_pcie1_write_config,
1807*4882a593Smuzhiyun };
1808*4882a593Smuzhiyun 
1809*4882a593Smuzhiyun static struct resource octeon_pcie1_mem_resource = {
1810*4882a593Smuzhiyun 	.name = "Octeon PCIe1 MEM",
1811*4882a593Smuzhiyun 	.flags = IORESOURCE_MEM,
1812*4882a593Smuzhiyun };
1813*4882a593Smuzhiyun 
1814*4882a593Smuzhiyun static struct resource octeon_pcie1_io_resource = {
1815*4882a593Smuzhiyun 	.name = "Octeon PCIe1 IO",
1816*4882a593Smuzhiyun 	.flags = IORESOURCE_IO,
1817*4882a593Smuzhiyun };
1818*4882a593Smuzhiyun 
1819*4882a593Smuzhiyun static struct pci_controller octeon_pcie1_controller = {
1820*4882a593Smuzhiyun 	.pci_ops = &octeon_pcie1_ops,
1821*4882a593Smuzhiyun 	.mem_resource = &octeon_pcie1_mem_resource,
1822*4882a593Smuzhiyun 	.io_resource = &octeon_pcie1_io_resource,
1823*4882a593Smuzhiyun };
1824*4882a593Smuzhiyun 
1825*4882a593Smuzhiyun static struct pci_ops octeon_dummy_ops = {
1826*4882a593Smuzhiyun 	.read	= octeon_dummy_read_config,
1827*4882a593Smuzhiyun 	.write	= octeon_dummy_write_config,
1828*4882a593Smuzhiyun };
1829*4882a593Smuzhiyun 
1830*4882a593Smuzhiyun static struct resource octeon_dummy_mem_resource = {
1831*4882a593Smuzhiyun 	.name = "Virtual PCIe MEM",
1832*4882a593Smuzhiyun 	.flags = IORESOURCE_MEM,
1833*4882a593Smuzhiyun };
1834*4882a593Smuzhiyun 
1835*4882a593Smuzhiyun static struct resource octeon_dummy_io_resource = {
1836*4882a593Smuzhiyun 	.name = "Virtual PCIe IO",
1837*4882a593Smuzhiyun 	.flags = IORESOURCE_IO,
1838*4882a593Smuzhiyun };
1839*4882a593Smuzhiyun 
1840*4882a593Smuzhiyun static struct pci_controller octeon_dummy_controller = {
1841*4882a593Smuzhiyun 	.pci_ops = &octeon_dummy_ops,
1842*4882a593Smuzhiyun 	.mem_resource = &octeon_dummy_mem_resource,
1843*4882a593Smuzhiyun 	.io_resource = &octeon_dummy_io_resource,
1844*4882a593Smuzhiyun };
1845*4882a593Smuzhiyun 
device_needs_bus_num_war(uint32_t deviceid)1846*4882a593Smuzhiyun static int device_needs_bus_num_war(uint32_t deviceid)
1847*4882a593Smuzhiyun {
1848*4882a593Smuzhiyun #define IDT_VENDOR_ID 0x111d
1849*4882a593Smuzhiyun 
1850*4882a593Smuzhiyun 	if ((deviceid  & 0xffff) == IDT_VENDOR_ID)
1851*4882a593Smuzhiyun 		return 1;
1852*4882a593Smuzhiyun 	return 0;
1853*4882a593Smuzhiyun }
1854*4882a593Smuzhiyun 
1855*4882a593Smuzhiyun /**
1856*4882a593Smuzhiyun  * Initialize the Octeon PCIe controllers
1857*4882a593Smuzhiyun  *
1858*4882a593Smuzhiyun  * Returns
1859*4882a593Smuzhiyun  */
octeon_pcie_setup(void)1860*4882a593Smuzhiyun static int __init octeon_pcie_setup(void)
1861*4882a593Smuzhiyun {
1862*4882a593Smuzhiyun 	int result;
1863*4882a593Smuzhiyun 	int host_mode;
1864*4882a593Smuzhiyun 	int srio_war15205 = 0, port;
1865*4882a593Smuzhiyun 	union cvmx_sli_ctl_portx sli_ctl_portx;
1866*4882a593Smuzhiyun 	union cvmx_sriox_status_reg sriox_status_reg;
1867*4882a593Smuzhiyun 
1868*4882a593Smuzhiyun 	/* These chips don't have PCIe */
1869*4882a593Smuzhiyun 	if (!octeon_has_feature(OCTEON_FEATURE_PCIE))
1870*4882a593Smuzhiyun 		return 0;
1871*4882a593Smuzhiyun 
1872*4882a593Smuzhiyun 	/* No PCIe simulation */
1873*4882a593Smuzhiyun 	if (octeon_is_simulation())
1874*4882a593Smuzhiyun 		return 0;
1875*4882a593Smuzhiyun 
1876*4882a593Smuzhiyun 	/* Disable PCI if instructed on the command line */
1877*4882a593Smuzhiyun 	if (pcie_disable)
1878*4882a593Smuzhiyun 		return 0;
1879*4882a593Smuzhiyun 
1880*4882a593Smuzhiyun 	/* Point pcibios_map_irq() to the PCIe version of it */
1881*4882a593Smuzhiyun 	octeon_pcibios_map_irq = octeon_pcie_pcibios_map_irq;
1882*4882a593Smuzhiyun 
1883*4882a593Smuzhiyun 	/*
1884*4882a593Smuzhiyun 	 * PCIe I/O range. It is based on port 0 but includes up until
1885*4882a593Smuzhiyun 	 * port 1's end.
1886*4882a593Smuzhiyun 	 */
1887*4882a593Smuzhiyun 	set_io_port_base(CVMX_ADD_IO_SEG(cvmx_pcie_get_io_base_address(0)));
1888*4882a593Smuzhiyun 	ioport_resource.start = 0;
1889*4882a593Smuzhiyun 	ioport_resource.end =
1890*4882a593Smuzhiyun 		cvmx_pcie_get_io_base_address(1) -
1891*4882a593Smuzhiyun 		cvmx_pcie_get_io_base_address(0) + cvmx_pcie_get_io_size(1) - 1;
1892*4882a593Smuzhiyun 
1893*4882a593Smuzhiyun 	/*
1894*4882a593Smuzhiyun 	 * Create a dummy PCIe controller to swallow up bus 0. IDT bridges
1895*4882a593Smuzhiyun 	 * don't work if the primary bus number is zero. Here we add a fake
1896*4882a593Smuzhiyun 	 * PCIe controller that the kernel will give bus 0. This allows
1897*4882a593Smuzhiyun 	 * us to not change the normal kernel bus enumeration
1898*4882a593Smuzhiyun 	 */
1899*4882a593Smuzhiyun 	octeon_dummy_controller.io_map_base = -1;
1900*4882a593Smuzhiyun 	octeon_dummy_controller.mem_resource->start = (1ull<<48);
1901*4882a593Smuzhiyun 	octeon_dummy_controller.mem_resource->end = (1ull<<48);
1902*4882a593Smuzhiyun 	register_pci_controller(&octeon_dummy_controller);
1903*4882a593Smuzhiyun 
1904*4882a593Smuzhiyun 	if (octeon_has_feature(OCTEON_FEATURE_NPEI)) {
1905*4882a593Smuzhiyun 		union cvmx_npei_ctl_status npei_ctl_status;
1906*4882a593Smuzhiyun 		npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
1907*4882a593Smuzhiyun 		host_mode = npei_ctl_status.s.host_mode;
1908*4882a593Smuzhiyun 		octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_PCIE;
1909*4882a593Smuzhiyun 	} else {
1910*4882a593Smuzhiyun 		union cvmx_mio_rst_ctlx mio_rst_ctl;
1911*4882a593Smuzhiyun 		mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(0));
1912*4882a593Smuzhiyun 		host_mode = mio_rst_ctl.s.host_mode;
1913*4882a593Smuzhiyun 		octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_PCIE2;
1914*4882a593Smuzhiyun 	}
1915*4882a593Smuzhiyun 
1916*4882a593Smuzhiyun 	if (host_mode) {
1917*4882a593Smuzhiyun 		pr_notice("PCIe: Initializing port 0\n");
1918*4882a593Smuzhiyun 		/* CN63XX pass 1_x/2.0 errata PCIe-15205 */
1919*4882a593Smuzhiyun 		if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) ||
1920*4882a593Smuzhiyun 			OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0)) {
1921*4882a593Smuzhiyun 			sriox_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(0));
1922*4882a593Smuzhiyun 			if (sriox_status_reg.s.srio) {
1923*4882a593Smuzhiyun 				srio_war15205 += 1;	 /* Port is SRIO */
1924*4882a593Smuzhiyun 				port = 0;
1925*4882a593Smuzhiyun 			}
1926*4882a593Smuzhiyun 		}
1927*4882a593Smuzhiyun 		result = cvmx_pcie_rc_initialize(0);
1928*4882a593Smuzhiyun 		if (result == 0) {
1929*4882a593Smuzhiyun 			uint32_t device0;
1930*4882a593Smuzhiyun 			/* Memory offsets are physical addresses */
1931*4882a593Smuzhiyun 			octeon_pcie0_controller.mem_offset =
1932*4882a593Smuzhiyun 				cvmx_pcie_get_mem_base_address(0);
1933*4882a593Smuzhiyun 			/* IO offsets are Mips virtual addresses */
1934*4882a593Smuzhiyun 			octeon_pcie0_controller.io_map_base =
1935*4882a593Smuzhiyun 				CVMX_ADD_IO_SEG(cvmx_pcie_get_io_base_address
1936*4882a593Smuzhiyun 						(0));
1937*4882a593Smuzhiyun 			octeon_pcie0_controller.io_offset = 0;
1938*4882a593Smuzhiyun 			/*
1939*4882a593Smuzhiyun 			 * To keep things similar to PCI, we start
1940*4882a593Smuzhiyun 			 * device addresses at the same place as PCI
1941*4882a593Smuzhiyun 			 * uisng big bar support. This normally
1942*4882a593Smuzhiyun 			 * translates to 4GB-256MB, which is the same
1943*4882a593Smuzhiyun 			 * as most x86 PCs.
1944*4882a593Smuzhiyun 			 */
1945*4882a593Smuzhiyun 			octeon_pcie0_controller.mem_resource->start =
1946*4882a593Smuzhiyun 				cvmx_pcie_get_mem_base_address(0) +
1947*4882a593Smuzhiyun 				(4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20);
1948*4882a593Smuzhiyun 			octeon_pcie0_controller.mem_resource->end =
1949*4882a593Smuzhiyun 				cvmx_pcie_get_mem_base_address(0) +
1950*4882a593Smuzhiyun 				cvmx_pcie_get_mem_size(0) - 1;
1951*4882a593Smuzhiyun 			/*
1952*4882a593Smuzhiyun 			 * Ports must be above 16KB for the ISA bus
1953*4882a593Smuzhiyun 			 * filtering in the PCI-X to PCI bridge.
1954*4882a593Smuzhiyun 			 */
1955*4882a593Smuzhiyun 			octeon_pcie0_controller.io_resource->start = 4 << 10;
1956*4882a593Smuzhiyun 			octeon_pcie0_controller.io_resource->end =
1957*4882a593Smuzhiyun 				cvmx_pcie_get_io_size(0) - 1;
1958*4882a593Smuzhiyun 			msleep(100); /* Some devices need extra time */
1959*4882a593Smuzhiyun 			register_pci_controller(&octeon_pcie0_controller);
1960*4882a593Smuzhiyun 			device0 = cvmx_pcie_config_read32(0, 0, 0, 0, 0);
1961*4882a593Smuzhiyun 			enable_pcie_bus_num_war[0] =
1962*4882a593Smuzhiyun 				device_needs_bus_num_war(device0);
1963*4882a593Smuzhiyun 		}
1964*4882a593Smuzhiyun 	} else {
1965*4882a593Smuzhiyun 		pr_notice("PCIe: Port 0 in endpoint mode, skipping.\n");
1966*4882a593Smuzhiyun 		/* CN63XX pass 1_x/2.0 errata PCIe-15205 */
1967*4882a593Smuzhiyun 		if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) ||
1968*4882a593Smuzhiyun 			OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0)) {
1969*4882a593Smuzhiyun 			srio_war15205 += 1;
1970*4882a593Smuzhiyun 			port = 0;
1971*4882a593Smuzhiyun 		}
1972*4882a593Smuzhiyun 	}
1973*4882a593Smuzhiyun 
1974*4882a593Smuzhiyun 	if (octeon_has_feature(OCTEON_FEATURE_NPEI)) {
1975*4882a593Smuzhiyun 		host_mode = 1;
1976*4882a593Smuzhiyun 		/* Skip the 2nd port on CN52XX if port 0 is in 4 lane mode */
1977*4882a593Smuzhiyun 		if (OCTEON_IS_MODEL(OCTEON_CN52XX)) {
1978*4882a593Smuzhiyun 			union cvmx_npei_dbg_data dbg_data;
1979*4882a593Smuzhiyun 			dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
1980*4882a593Smuzhiyun 			if (dbg_data.cn52xx.qlm0_link_width)
1981*4882a593Smuzhiyun 				host_mode = 0;
1982*4882a593Smuzhiyun 		}
1983*4882a593Smuzhiyun 	} else {
1984*4882a593Smuzhiyun 		union cvmx_mio_rst_ctlx mio_rst_ctl;
1985*4882a593Smuzhiyun 		mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(1));
1986*4882a593Smuzhiyun 		host_mode = mio_rst_ctl.s.host_mode;
1987*4882a593Smuzhiyun 	}
1988*4882a593Smuzhiyun 
1989*4882a593Smuzhiyun 	if (host_mode) {
1990*4882a593Smuzhiyun 		pr_notice("PCIe: Initializing port 1\n");
1991*4882a593Smuzhiyun 		/* CN63XX pass 1_x/2.0 errata PCIe-15205 */
1992*4882a593Smuzhiyun 		if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) ||
1993*4882a593Smuzhiyun 			OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0)) {
1994*4882a593Smuzhiyun 			sriox_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(1));
1995*4882a593Smuzhiyun 			if (sriox_status_reg.s.srio) {
1996*4882a593Smuzhiyun 				srio_war15205 += 1;	 /* Port is SRIO */
1997*4882a593Smuzhiyun 				port = 1;
1998*4882a593Smuzhiyun 			}
1999*4882a593Smuzhiyun 		}
2000*4882a593Smuzhiyun 		result = cvmx_pcie_rc_initialize(1);
2001*4882a593Smuzhiyun 		if (result == 0) {
2002*4882a593Smuzhiyun 			uint32_t device0;
2003*4882a593Smuzhiyun 			/* Memory offsets are physical addresses */
2004*4882a593Smuzhiyun 			octeon_pcie1_controller.mem_offset =
2005*4882a593Smuzhiyun 				cvmx_pcie_get_mem_base_address(1);
2006*4882a593Smuzhiyun 			/*
2007*4882a593Smuzhiyun 			 * To calculate the address for accessing the 2nd PCIe device,
2008*4882a593Smuzhiyun 			 * either 'io_map_base' (pci_iomap()), or 'mips_io_port_base'
2009*4882a593Smuzhiyun 			 * (ioport_map()) value is added to
2010*4882a593Smuzhiyun 			 * pci_resource_start(dev,bar)). The 'mips_io_port_base' is set
2011*4882a593Smuzhiyun 			 * only once based on first PCIe. Also changing 'io_map_base'
2012*4882a593Smuzhiyun 			 * based on first slot's value so that both the routines will
2013*4882a593Smuzhiyun 			 * work properly.
2014*4882a593Smuzhiyun 			 */
2015*4882a593Smuzhiyun 			octeon_pcie1_controller.io_map_base =
2016*4882a593Smuzhiyun 				CVMX_ADD_IO_SEG(cvmx_pcie_get_io_base_address(0));
2017*4882a593Smuzhiyun 			/* IO offsets are Mips virtual addresses */
2018*4882a593Smuzhiyun 			octeon_pcie1_controller.io_offset =
2019*4882a593Smuzhiyun 				cvmx_pcie_get_io_base_address(1) -
2020*4882a593Smuzhiyun 				cvmx_pcie_get_io_base_address(0);
2021*4882a593Smuzhiyun 			/*
2022*4882a593Smuzhiyun 			 * To keep things similar to PCI, we start device
2023*4882a593Smuzhiyun 			 * addresses at the same place as PCI uisng big bar
2024*4882a593Smuzhiyun 			 * support. This normally translates to 4GB-256MB,
2025*4882a593Smuzhiyun 			 * which is the same as most x86 PCs.
2026*4882a593Smuzhiyun 			 */
2027*4882a593Smuzhiyun 			octeon_pcie1_controller.mem_resource->start =
2028*4882a593Smuzhiyun 				cvmx_pcie_get_mem_base_address(1) + (4ul << 30) -
2029*4882a593Smuzhiyun 				(OCTEON_PCI_BAR1_HOLE_SIZE << 20);
2030*4882a593Smuzhiyun 			octeon_pcie1_controller.mem_resource->end =
2031*4882a593Smuzhiyun 				cvmx_pcie_get_mem_base_address(1) +
2032*4882a593Smuzhiyun 				cvmx_pcie_get_mem_size(1) - 1;
2033*4882a593Smuzhiyun 			/*
2034*4882a593Smuzhiyun 			 * Ports must be above 16KB for the ISA bus filtering
2035*4882a593Smuzhiyun 			 * in the PCI-X to PCI bridge.
2036*4882a593Smuzhiyun 			 */
2037*4882a593Smuzhiyun 			octeon_pcie1_controller.io_resource->start =
2038*4882a593Smuzhiyun 				cvmx_pcie_get_io_base_address(1) -
2039*4882a593Smuzhiyun 				cvmx_pcie_get_io_base_address(0);
2040*4882a593Smuzhiyun 			octeon_pcie1_controller.io_resource->end =
2041*4882a593Smuzhiyun 				octeon_pcie1_controller.io_resource->start +
2042*4882a593Smuzhiyun 				cvmx_pcie_get_io_size(1) - 1;
2043*4882a593Smuzhiyun 			msleep(100); /* Some devices need extra time */
2044*4882a593Smuzhiyun 			register_pci_controller(&octeon_pcie1_controller);
2045*4882a593Smuzhiyun 			device0 = cvmx_pcie_config_read32(1, 0, 0, 0, 0);
2046*4882a593Smuzhiyun 			enable_pcie_bus_num_war[1] =
2047*4882a593Smuzhiyun 				device_needs_bus_num_war(device0);
2048*4882a593Smuzhiyun 		}
2049*4882a593Smuzhiyun 	} else {
2050*4882a593Smuzhiyun 		pr_notice("PCIe: Port 1 not in root complex mode, skipping.\n");
2051*4882a593Smuzhiyun 		/* CN63XX pass 1_x/2.0 errata PCIe-15205  */
2052*4882a593Smuzhiyun 		if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) ||
2053*4882a593Smuzhiyun 			OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0)) {
2054*4882a593Smuzhiyun 			srio_war15205 += 1;
2055*4882a593Smuzhiyun 			port = 1;
2056*4882a593Smuzhiyun 		}
2057*4882a593Smuzhiyun 	}
2058*4882a593Smuzhiyun 
2059*4882a593Smuzhiyun 	/*
2060*4882a593Smuzhiyun 	 * CN63XX pass 1_x/2.0 errata PCIe-15205 requires setting all
2061*4882a593Smuzhiyun 	 * of SRIO MACs SLI_CTL_PORT*[INT*_MAP] to similar value and
2062*4882a593Smuzhiyun 	 * all of PCIe Macs SLI_CTL_PORT*[INT*_MAP] to different value
2063*4882a593Smuzhiyun 	 * from the previous set values
2064*4882a593Smuzhiyun 	 */
2065*4882a593Smuzhiyun 	if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) ||
2066*4882a593Smuzhiyun 		OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0)) {
2067*4882a593Smuzhiyun 		if (srio_war15205 == 1) {
2068*4882a593Smuzhiyun 			sli_ctl_portx.u64 = cvmx_read_csr(CVMX_PEXP_SLI_CTL_PORTX(port));
2069*4882a593Smuzhiyun 			sli_ctl_portx.s.inta_map = 1;
2070*4882a593Smuzhiyun 			sli_ctl_portx.s.intb_map = 1;
2071*4882a593Smuzhiyun 			sli_ctl_portx.s.intc_map = 1;
2072*4882a593Smuzhiyun 			sli_ctl_portx.s.intd_map = 1;
2073*4882a593Smuzhiyun 			cvmx_write_csr(CVMX_PEXP_SLI_CTL_PORTX(port), sli_ctl_portx.u64);
2074*4882a593Smuzhiyun 
2075*4882a593Smuzhiyun 			sli_ctl_portx.u64 = cvmx_read_csr(CVMX_PEXP_SLI_CTL_PORTX(!port));
2076*4882a593Smuzhiyun 			sli_ctl_portx.s.inta_map = 0;
2077*4882a593Smuzhiyun 			sli_ctl_portx.s.intb_map = 0;
2078*4882a593Smuzhiyun 			sli_ctl_portx.s.intc_map = 0;
2079*4882a593Smuzhiyun 			sli_ctl_portx.s.intd_map = 0;
2080*4882a593Smuzhiyun 			cvmx_write_csr(CVMX_PEXP_SLI_CTL_PORTX(!port), sli_ctl_portx.u64);
2081*4882a593Smuzhiyun 		}
2082*4882a593Smuzhiyun 	}
2083*4882a593Smuzhiyun 
2084*4882a593Smuzhiyun 	octeon_pci_dma_init();
2085*4882a593Smuzhiyun 
2086*4882a593Smuzhiyun 	return 0;
2087*4882a593Smuzhiyun }
2088*4882a593Smuzhiyun arch_initcall(octeon_pcie_setup);
2089