1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * MPC83xx/85xx/86xx PCI/PCIE support routing.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright 2007-2012 Freescale Semiconductor, Inc.
6*4882a593Smuzhiyun * Copyright 2008-2009 MontaVista Software, Inc.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Initial author: Xianghua Xiao <x.xiao@freescale.com>
9*4882a593Smuzhiyun * Recode: ZHANG WEI <wei.zhang@freescale.com>
10*4882a593Smuzhiyun * Rewrite the routing for Frescale PCI and PCI Express
11*4882a593Smuzhiyun * Roy Zang <tie-fei.zang@freescale.com>
12*4882a593Smuzhiyun * MPC83xx PCI-Express support:
13*4882a593Smuzhiyun * Tony Li <tony.li@freescale.com>
14*4882a593Smuzhiyun * Anton Vorontsov <avorontsov@ru.mvista.com>
15*4882a593Smuzhiyun */
16*4882a593Smuzhiyun #include <linux/kernel.h>
17*4882a593Smuzhiyun #include <linux/pci.h>
18*4882a593Smuzhiyun #include <linux/delay.h>
19*4882a593Smuzhiyun #include <linux/string.h>
20*4882a593Smuzhiyun #include <linux/fsl/edac.h>
21*4882a593Smuzhiyun #include <linux/init.h>
22*4882a593Smuzhiyun #include <linux/interrupt.h>
23*4882a593Smuzhiyun #include <linux/memblock.h>
24*4882a593Smuzhiyun #include <linux/log2.h>
25*4882a593Smuzhiyun #include <linux/platform_device.h>
26*4882a593Smuzhiyun #include <linux/slab.h>
27*4882a593Smuzhiyun #include <linux/suspend.h>
28*4882a593Smuzhiyun #include <linux/syscore_ops.h>
29*4882a593Smuzhiyun #include <linux/uaccess.h>
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #include <asm/io.h>
32*4882a593Smuzhiyun #include <asm/prom.h>
33*4882a593Smuzhiyun #include <asm/pci-bridge.h>
34*4882a593Smuzhiyun #include <asm/ppc-pci.h>
35*4882a593Smuzhiyun #include <asm/machdep.h>
36*4882a593Smuzhiyun #include <asm/mpc85xx.h>
37*4882a593Smuzhiyun #include <asm/disassemble.h>
38*4882a593Smuzhiyun #include <asm/ppc-opcode.h>
39*4882a593Smuzhiyun #include <asm/swiotlb.h>
40*4882a593Smuzhiyun #include <sysdev/fsl_soc.h>
41*4882a593Smuzhiyun #include <sysdev/fsl_pci.h>
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun static int fsl_pcie_bus_fixup, is_mpc83xx_pci;
44*4882a593Smuzhiyun
quirk_fsl_pcie_early(struct pci_dev * dev)45*4882a593Smuzhiyun static void quirk_fsl_pcie_early(struct pci_dev *dev)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun u8 hdr_type;
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /* if we aren't a PCIe don't bother */
50*4882a593Smuzhiyun if (!pci_is_pcie(dev))
51*4882a593Smuzhiyun return;
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun /* if we aren't in host mode don't bother */
54*4882a593Smuzhiyun pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type);
55*4882a593Smuzhiyun if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
56*4882a593Smuzhiyun return;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun dev->class = PCI_CLASS_BRIDGE_PCI << 8;
59*4882a593Smuzhiyun fsl_pcie_bus_fixup = 1;
60*4882a593Smuzhiyun return;
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun static int fsl_indirect_read_config(struct pci_bus *, unsigned int,
64*4882a593Smuzhiyun int, int, u32 *);
65*4882a593Smuzhiyun
fsl_pcie_check_link(struct pci_controller * hose)66*4882a593Smuzhiyun static int fsl_pcie_check_link(struct pci_controller *hose)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun u32 val = 0;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun if (hose->indirect_type & PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK) {
71*4882a593Smuzhiyun if (hose->ops->read == fsl_indirect_read_config)
72*4882a593Smuzhiyun __indirect_read_config(hose, hose->first_busno, 0,
73*4882a593Smuzhiyun PCIE_LTSSM, 4, &val);
74*4882a593Smuzhiyun else
75*4882a593Smuzhiyun early_read_config_dword(hose, 0, 0, PCIE_LTSSM, &val);
76*4882a593Smuzhiyun if (val < PCIE_LTSSM_L0)
77*4882a593Smuzhiyun return 1;
78*4882a593Smuzhiyun } else {
79*4882a593Smuzhiyun struct ccsr_pci __iomem *pci = hose->private_data;
80*4882a593Smuzhiyun /* for PCIe IP rev 3.0 or greater use CSR0 for link state */
81*4882a593Smuzhiyun val = (in_be32(&pci->pex_csr0) & PEX_CSR0_LTSSM_MASK)
82*4882a593Smuzhiyun >> PEX_CSR0_LTSSM_SHIFT;
83*4882a593Smuzhiyun if (val != PEX_CSR0_LTSSM_L0)
84*4882a593Smuzhiyun return 1;
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun return 0;
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun
fsl_indirect_read_config(struct pci_bus * bus,unsigned int devfn,int offset,int len,u32 * val)90*4882a593Smuzhiyun static int fsl_indirect_read_config(struct pci_bus *bus, unsigned int devfn,
91*4882a593Smuzhiyun int offset, int len, u32 *val)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun struct pci_controller *hose = pci_bus_to_host(bus);
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun if (fsl_pcie_check_link(hose))
96*4882a593Smuzhiyun hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
97*4882a593Smuzhiyun else
98*4882a593Smuzhiyun hose->indirect_type &= ~PPC_INDIRECT_TYPE_NO_PCIE_LINK;
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun return indirect_read_config(bus, devfn, offset, len, val);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun static struct pci_ops fsl_indirect_pcie_ops =
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun .read = fsl_indirect_read_config,
108*4882a593Smuzhiyun .write = indirect_write_config,
109*4882a593Smuzhiyun };
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun static u64 pci64_dma_offset;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun #ifdef CONFIG_SWIOTLB
pci_dma_dev_setup_swiotlb(struct pci_dev * pdev)114*4882a593Smuzhiyun static void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun struct pci_controller *hose = pci_bus_to_host(pdev->bus);
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun pdev->dev.bus_dma_limit =
119*4882a593Smuzhiyun hose->dma_window_base_cur + hose->dma_window_size - 1;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
setup_swiotlb_ops(struct pci_controller * hose)122*4882a593Smuzhiyun static void setup_swiotlb_ops(struct pci_controller *hose)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun if (ppc_swiotlb_enable)
125*4882a593Smuzhiyun hose->controller_ops.dma_dev_setup = pci_dma_dev_setup_swiotlb;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun #else
setup_swiotlb_ops(struct pci_controller * hose)128*4882a593Smuzhiyun static inline void setup_swiotlb_ops(struct pci_controller *hose) {}
129*4882a593Smuzhiyun #endif
130*4882a593Smuzhiyun
fsl_pci_dma_set_mask(struct device * dev,u64 dma_mask)131*4882a593Smuzhiyun static void fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun /*
134*4882a593Smuzhiyun * Fix up PCI devices that are able to DMA to the large inbound
135*4882a593Smuzhiyun * mapping that allows addressing any RAM address from across PCI.
136*4882a593Smuzhiyun */
137*4882a593Smuzhiyun if (dev_is_pci(dev) && dma_mask >= pci64_dma_offset * 2 - 1) {
138*4882a593Smuzhiyun dev->bus_dma_limit = 0;
139*4882a593Smuzhiyun dev->archdata.dma_offset = pci64_dma_offset;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
setup_one_atmu(struct ccsr_pci __iomem * pci,unsigned int index,const struct resource * res,resource_size_t offset)143*4882a593Smuzhiyun static int setup_one_atmu(struct ccsr_pci __iomem *pci,
144*4882a593Smuzhiyun unsigned int index, const struct resource *res,
145*4882a593Smuzhiyun resource_size_t offset)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun resource_size_t pci_addr = res->start - offset;
148*4882a593Smuzhiyun resource_size_t phys_addr = res->start;
149*4882a593Smuzhiyun resource_size_t size = resource_size(res);
150*4882a593Smuzhiyun u32 flags = 0x80044000; /* enable & mem R/W */
151*4882a593Smuzhiyun unsigned int i;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun pr_debug("PCI MEM resource start 0x%016llx, size 0x%016llx.\n",
154*4882a593Smuzhiyun (u64)res->start, (u64)size);
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun if (res->flags & IORESOURCE_PREFETCH)
157*4882a593Smuzhiyun flags |= 0x10000000; /* enable relaxed ordering */
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun for (i = 0; size > 0; i++) {
160*4882a593Smuzhiyun unsigned int bits = min_t(u32, ilog2(size),
161*4882a593Smuzhiyun __ffs(pci_addr | phys_addr));
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun if (index + i >= 5)
164*4882a593Smuzhiyun return -1;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun out_be32(&pci->pow[index + i].potar, pci_addr >> 12);
167*4882a593Smuzhiyun out_be32(&pci->pow[index + i].potear, (u64)pci_addr >> 44);
168*4882a593Smuzhiyun out_be32(&pci->pow[index + i].powbar, phys_addr >> 12);
169*4882a593Smuzhiyun out_be32(&pci->pow[index + i].powar, flags | (bits - 1));
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun pci_addr += (resource_size_t)1U << bits;
172*4882a593Smuzhiyun phys_addr += (resource_size_t)1U << bits;
173*4882a593Smuzhiyun size -= (resource_size_t)1U << bits;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun return i;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
is_kdump(void)179*4882a593Smuzhiyun static bool is_kdump(void)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun struct device_node *node;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun node = of_find_node_by_type(NULL, "memory");
184*4882a593Smuzhiyun if (!node) {
185*4882a593Smuzhiyun WARN_ON_ONCE(1);
186*4882a593Smuzhiyun return false;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun return of_property_read_bool(node, "linux,usable-memory");
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun /* atmu setup for fsl pci/pcie controller */
setup_pci_atmu(struct pci_controller * hose)193*4882a593Smuzhiyun static void setup_pci_atmu(struct pci_controller *hose)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun struct ccsr_pci __iomem *pci = hose->private_data;
196*4882a593Smuzhiyun int i, j, n, mem_log, win_idx = 3, start_idx = 1, end_idx = 4;
197*4882a593Smuzhiyun u64 mem, sz, paddr_hi = 0;
198*4882a593Smuzhiyun u64 offset = 0, paddr_lo = ULLONG_MAX;
199*4882a593Smuzhiyun u32 pcicsrbar = 0, pcicsrbar_sz;
200*4882a593Smuzhiyun u32 piwar = PIWAR_EN | PIWAR_PF | PIWAR_TGI_LOCAL |
201*4882a593Smuzhiyun PIWAR_READ_SNOOP | PIWAR_WRITE_SNOOP;
202*4882a593Smuzhiyun const u64 *reg;
203*4882a593Smuzhiyun int len;
204*4882a593Smuzhiyun bool setup_inbound;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun /*
207*4882a593Smuzhiyun * If this is kdump, we don't want to trigger a bunch of PCI
208*4882a593Smuzhiyun * errors by closing the window on in-flight DMA.
209*4882a593Smuzhiyun *
210*4882a593Smuzhiyun * We still run most of the function's logic so that things like
211*4882a593Smuzhiyun * hose->dma_window_size still get set.
212*4882a593Smuzhiyun */
213*4882a593Smuzhiyun setup_inbound = !is_kdump();
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun if (of_device_is_compatible(hose->dn, "fsl,bsc9132-pcie")) {
216*4882a593Smuzhiyun /*
217*4882a593Smuzhiyun * BSC9132 Rev1.0 has an issue where all the PEX inbound
218*4882a593Smuzhiyun * windows have implemented the default target value as 0xf
219*4882a593Smuzhiyun * for CCSR space.In all Freescale legacy devices the target
220*4882a593Smuzhiyun * of 0xf is reserved for local memory space. 9132 Rev1.0
221*4882a593Smuzhiyun * now has local mempry space mapped to target 0x0 instead of
222*4882a593Smuzhiyun * 0xf. Hence adding a workaround to remove the target 0xf
223*4882a593Smuzhiyun * defined for memory space from Inbound window attributes.
224*4882a593Smuzhiyun */
225*4882a593Smuzhiyun piwar &= ~PIWAR_TGI_LOCAL;
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
229*4882a593Smuzhiyun if (in_be32(&pci->block_rev1) >= PCIE_IP_REV_2_2) {
230*4882a593Smuzhiyun win_idx = 2;
231*4882a593Smuzhiyun start_idx = 0;
232*4882a593Smuzhiyun end_idx = 3;
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun /* Disable all windows (except powar0 since it's ignored) */
237*4882a593Smuzhiyun for(i = 1; i < 5; i++)
238*4882a593Smuzhiyun out_be32(&pci->pow[i].powar, 0);
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun if (setup_inbound) {
241*4882a593Smuzhiyun for (i = start_idx; i < end_idx; i++)
242*4882a593Smuzhiyun out_be32(&pci->piw[i].piwar, 0);
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun /* Setup outbound MEM window */
246*4882a593Smuzhiyun for(i = 0, j = 1; i < 3; i++) {
247*4882a593Smuzhiyun if (!(hose->mem_resources[i].flags & IORESOURCE_MEM))
248*4882a593Smuzhiyun continue;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun paddr_lo = min(paddr_lo, (u64)hose->mem_resources[i].start);
251*4882a593Smuzhiyun paddr_hi = max(paddr_hi, (u64)hose->mem_resources[i].end);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun /* We assume all memory resources have the same offset */
254*4882a593Smuzhiyun offset = hose->mem_offset[i];
255*4882a593Smuzhiyun n = setup_one_atmu(pci, j, &hose->mem_resources[i], offset);
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun if (n < 0 || j >= 5) {
258*4882a593Smuzhiyun pr_err("Ran out of outbound PCI ATMUs for resource %d!\n", i);
259*4882a593Smuzhiyun hose->mem_resources[i].flags |= IORESOURCE_DISABLED;
260*4882a593Smuzhiyun } else
261*4882a593Smuzhiyun j += n;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun /* Setup outbound IO window */
265*4882a593Smuzhiyun if (hose->io_resource.flags & IORESOURCE_IO) {
266*4882a593Smuzhiyun if (j >= 5) {
267*4882a593Smuzhiyun pr_err("Ran out of outbound PCI ATMUs for IO resource\n");
268*4882a593Smuzhiyun } else {
269*4882a593Smuzhiyun pr_debug("PCI IO resource start 0x%016llx, size 0x%016llx, "
270*4882a593Smuzhiyun "phy base 0x%016llx.\n",
271*4882a593Smuzhiyun (u64)hose->io_resource.start,
272*4882a593Smuzhiyun (u64)resource_size(&hose->io_resource),
273*4882a593Smuzhiyun (u64)hose->io_base_phys);
274*4882a593Smuzhiyun out_be32(&pci->pow[j].potar, (hose->io_resource.start >> 12));
275*4882a593Smuzhiyun out_be32(&pci->pow[j].potear, 0);
276*4882a593Smuzhiyun out_be32(&pci->pow[j].powbar, (hose->io_base_phys >> 12));
277*4882a593Smuzhiyun /* Enable, IO R/W */
278*4882a593Smuzhiyun out_be32(&pci->pow[j].powar, 0x80088000
279*4882a593Smuzhiyun | (ilog2(hose->io_resource.end
280*4882a593Smuzhiyun - hose->io_resource.start + 1) - 1));
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun /* convert to pci address space */
285*4882a593Smuzhiyun paddr_hi -= offset;
286*4882a593Smuzhiyun paddr_lo -= offset;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun if (paddr_hi == paddr_lo) {
289*4882a593Smuzhiyun pr_err("%pOF: No outbound window space\n", hose->dn);
290*4882a593Smuzhiyun return;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun if (paddr_lo == 0) {
294*4882a593Smuzhiyun pr_err("%pOF: No space for inbound window\n", hose->dn);
295*4882a593Smuzhiyun return;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun /* setup PCSRBAR/PEXCSRBAR */
299*4882a593Smuzhiyun early_write_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, 0xffffffff);
300*4882a593Smuzhiyun early_read_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, &pcicsrbar_sz);
301*4882a593Smuzhiyun pcicsrbar_sz = ~pcicsrbar_sz + 1;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun if (paddr_hi < (0x100000000ull - pcicsrbar_sz) ||
304*4882a593Smuzhiyun (paddr_lo > 0x100000000ull))
305*4882a593Smuzhiyun pcicsrbar = 0x100000000ull - pcicsrbar_sz;
306*4882a593Smuzhiyun else
307*4882a593Smuzhiyun pcicsrbar = (paddr_lo - pcicsrbar_sz) & -pcicsrbar_sz;
308*4882a593Smuzhiyun early_write_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, pcicsrbar);
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun paddr_lo = min(paddr_lo, (u64)pcicsrbar);
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun pr_info("%pOF: PCICSRBAR @ 0x%x\n", hose->dn, pcicsrbar);
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun /* Setup inbound mem window */
315*4882a593Smuzhiyun mem = memblock_end_of_DRAM();
316*4882a593Smuzhiyun pr_info("%s: end of DRAM %llx\n", __func__, mem);
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun /*
319*4882a593Smuzhiyun * The msi-address-64 property, if it exists, indicates the physical
320*4882a593Smuzhiyun * address of the MSIIR register. Normally, this register is located
321*4882a593Smuzhiyun * inside CCSR, so the ATMU that covers all of CCSR is used. But if
322*4882a593Smuzhiyun * this property exists, then we normally need to create a new ATMU
323*4882a593Smuzhiyun * for it. For now, however, we cheat. The only entity that creates
324*4882a593Smuzhiyun * this property is the Freescale hypervisor, and the address is
325*4882a593Smuzhiyun * specified in the partition configuration. Typically, the address
326*4882a593Smuzhiyun * is located in the page immediately after the end of DDR. If so, we
327*4882a593Smuzhiyun * can avoid allocating a new ATMU by extending the DDR ATMU by one
328*4882a593Smuzhiyun * page.
329*4882a593Smuzhiyun */
330*4882a593Smuzhiyun reg = of_get_property(hose->dn, "msi-address-64", &len);
331*4882a593Smuzhiyun if (reg && (len == sizeof(u64))) {
332*4882a593Smuzhiyun u64 address = be64_to_cpup(reg);
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun if ((address >= mem) && (address < (mem + PAGE_SIZE))) {
335*4882a593Smuzhiyun pr_info("%pOF: extending DDR ATMU to cover MSIIR", hose->dn);
336*4882a593Smuzhiyun mem += PAGE_SIZE;
337*4882a593Smuzhiyun } else {
338*4882a593Smuzhiyun /* TODO: Create a new ATMU for MSIIR */
339*4882a593Smuzhiyun pr_warn("%pOF: msi-address-64 address of %llx is "
340*4882a593Smuzhiyun "unsupported\n", hose->dn, address);
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun sz = min(mem, paddr_lo);
345*4882a593Smuzhiyun mem_log = ilog2(sz);
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun /* PCIe can overmap inbound & outbound since RX & TX are separated */
348*4882a593Smuzhiyun if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
349*4882a593Smuzhiyun /* Size window to exact size if power-of-two or one size up */
350*4882a593Smuzhiyun if ((1ull << mem_log) != mem) {
351*4882a593Smuzhiyun mem_log++;
352*4882a593Smuzhiyun if ((1ull << mem_log) > mem)
353*4882a593Smuzhiyun pr_info("%pOF: Setting PCI inbound window "
354*4882a593Smuzhiyun "greater than memory size\n", hose->dn);
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun piwar |= ((mem_log - 1) & PIWAR_SZ_MASK);
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun if (setup_inbound) {
360*4882a593Smuzhiyun /* Setup inbound memory window */
361*4882a593Smuzhiyun out_be32(&pci->piw[win_idx].pitar, 0x00000000);
362*4882a593Smuzhiyun out_be32(&pci->piw[win_idx].piwbar, 0x00000000);
363*4882a593Smuzhiyun out_be32(&pci->piw[win_idx].piwar, piwar);
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun win_idx--;
367*4882a593Smuzhiyun hose->dma_window_base_cur = 0x00000000;
368*4882a593Smuzhiyun hose->dma_window_size = (resource_size_t)sz;
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun /*
371*4882a593Smuzhiyun * if we have >4G of memory setup second PCI inbound window to
372*4882a593Smuzhiyun * let devices that are 64-bit address capable to work w/o
373*4882a593Smuzhiyun * SWIOTLB and access the full range of memory
374*4882a593Smuzhiyun */
375*4882a593Smuzhiyun if (sz != mem) {
376*4882a593Smuzhiyun mem_log = ilog2(mem);
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun /* Size window up if we dont fit in exact power-of-2 */
379*4882a593Smuzhiyun if ((1ull << mem_log) != mem)
380*4882a593Smuzhiyun mem_log++;
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun piwar = (piwar & ~PIWAR_SZ_MASK) | (mem_log - 1);
383*4882a593Smuzhiyun pci64_dma_offset = 1ULL << mem_log;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun if (setup_inbound) {
386*4882a593Smuzhiyun /* Setup inbound memory window */
387*4882a593Smuzhiyun out_be32(&pci->piw[win_idx].pitar, 0x00000000);
388*4882a593Smuzhiyun out_be32(&pci->piw[win_idx].piwbear,
389*4882a593Smuzhiyun pci64_dma_offset >> 44);
390*4882a593Smuzhiyun out_be32(&pci->piw[win_idx].piwbar,
391*4882a593Smuzhiyun pci64_dma_offset >> 12);
392*4882a593Smuzhiyun out_be32(&pci->piw[win_idx].piwar, piwar);
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun /*
396*4882a593Smuzhiyun * install our own dma_set_mask handler to fixup dma_ops
397*4882a593Smuzhiyun * and dma_offset
398*4882a593Smuzhiyun */
399*4882a593Smuzhiyun ppc_md.dma_set_mask = fsl_pci_dma_set_mask;
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun pr_info("%pOF: Setup 64-bit PCI DMA window\n", hose->dn);
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun } else {
404*4882a593Smuzhiyun u64 paddr = 0;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun if (setup_inbound) {
407*4882a593Smuzhiyun /* Setup inbound memory window */
408*4882a593Smuzhiyun out_be32(&pci->piw[win_idx].pitar, paddr >> 12);
409*4882a593Smuzhiyun out_be32(&pci->piw[win_idx].piwbar, paddr >> 12);
410*4882a593Smuzhiyun out_be32(&pci->piw[win_idx].piwar,
411*4882a593Smuzhiyun (piwar | (mem_log - 1)));
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun win_idx--;
415*4882a593Smuzhiyun paddr += 1ull << mem_log;
416*4882a593Smuzhiyun sz -= 1ull << mem_log;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun if (sz) {
419*4882a593Smuzhiyun mem_log = ilog2(sz);
420*4882a593Smuzhiyun piwar |= (mem_log - 1);
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun if (setup_inbound) {
423*4882a593Smuzhiyun out_be32(&pci->piw[win_idx].pitar,
424*4882a593Smuzhiyun paddr >> 12);
425*4882a593Smuzhiyun out_be32(&pci->piw[win_idx].piwbar,
426*4882a593Smuzhiyun paddr >> 12);
427*4882a593Smuzhiyun out_be32(&pci->piw[win_idx].piwar, piwar);
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun win_idx--;
431*4882a593Smuzhiyun paddr += 1ull << mem_log;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun hose->dma_window_base_cur = 0x00000000;
435*4882a593Smuzhiyun hose->dma_window_size = (resource_size_t)paddr;
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun if (hose->dma_window_size < mem) {
439*4882a593Smuzhiyun #ifdef CONFIG_SWIOTLB
440*4882a593Smuzhiyun ppc_swiotlb_enable = 1;
441*4882a593Smuzhiyun #else
442*4882a593Smuzhiyun pr_err("%pOF: ERROR: Memory size exceeds PCI ATMU ability to "
443*4882a593Smuzhiyun "map - enable CONFIG_SWIOTLB to avoid dma errors.\n",
444*4882a593Smuzhiyun hose->dn);
445*4882a593Smuzhiyun #endif
446*4882a593Smuzhiyun /* adjusting outbound windows could reclaim space in mem map */
447*4882a593Smuzhiyun if (paddr_hi < 0xffffffffull)
448*4882a593Smuzhiyun pr_warn("%pOF: WARNING: Outbound window cfg leaves "
449*4882a593Smuzhiyun "gaps in memory map. Adjusting the memory map "
450*4882a593Smuzhiyun "could reduce unnecessary bounce buffering.\n",
451*4882a593Smuzhiyun hose->dn);
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun pr_info("%pOF: DMA window size is 0x%llx\n", hose->dn,
454*4882a593Smuzhiyun (u64)hose->dma_window_size);
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun
setup_pci_cmd(struct pci_controller * hose)458*4882a593Smuzhiyun static void __init setup_pci_cmd(struct pci_controller *hose)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun u16 cmd;
461*4882a593Smuzhiyun int cap_x;
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun early_read_config_word(hose, 0, 0, PCI_COMMAND, &cmd);
464*4882a593Smuzhiyun cmd |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY
465*4882a593Smuzhiyun | PCI_COMMAND_IO;
466*4882a593Smuzhiyun early_write_config_word(hose, 0, 0, PCI_COMMAND, cmd);
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun cap_x = early_find_capability(hose, 0, 0, PCI_CAP_ID_PCIX);
469*4882a593Smuzhiyun if (cap_x) {
470*4882a593Smuzhiyun int pci_x_cmd = cap_x + PCI_X_CMD;
471*4882a593Smuzhiyun cmd = PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ
472*4882a593Smuzhiyun | PCI_X_CMD_ERO | PCI_X_CMD_DPERR_E;
473*4882a593Smuzhiyun early_write_config_word(hose, 0, 0, pci_x_cmd, cmd);
474*4882a593Smuzhiyun } else {
475*4882a593Smuzhiyun early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0x80);
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun
fsl_pcibios_fixup_bus(struct pci_bus * bus)479*4882a593Smuzhiyun void fsl_pcibios_fixup_bus(struct pci_bus *bus)
480*4882a593Smuzhiyun {
481*4882a593Smuzhiyun struct pci_controller *hose = pci_bus_to_host(bus);
482*4882a593Smuzhiyun int i, is_pcie = 0, no_link;
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun /* The root complex bridge comes up with bogus resources,
485*4882a593Smuzhiyun * we copy the PHB ones in.
486*4882a593Smuzhiyun *
487*4882a593Smuzhiyun * With the current generic PCI code, the PHB bus no longer
488*4882a593Smuzhiyun * has bus->resource[0..4] set, so things are a bit more
489*4882a593Smuzhiyun * tricky.
490*4882a593Smuzhiyun */
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun if (fsl_pcie_bus_fixup)
493*4882a593Smuzhiyun is_pcie = early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP);
494*4882a593Smuzhiyun no_link = !!(hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK);
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun if (bus->parent == hose->bus && (is_pcie || no_link)) {
497*4882a593Smuzhiyun for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; ++i) {
498*4882a593Smuzhiyun struct resource *res = bus->resource[i];
499*4882a593Smuzhiyun struct resource *par;
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun if (!res)
502*4882a593Smuzhiyun continue;
503*4882a593Smuzhiyun if (i == 0)
504*4882a593Smuzhiyun par = &hose->io_resource;
505*4882a593Smuzhiyun else if (i < 4)
506*4882a593Smuzhiyun par = &hose->mem_resources[i-1];
507*4882a593Smuzhiyun else par = NULL;
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun res->start = par ? par->start : 0;
510*4882a593Smuzhiyun res->end = par ? par->end : 0;
511*4882a593Smuzhiyun res->flags = par ? par->flags : 0;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun
fsl_add_bridge(struct platform_device * pdev,int is_primary)516*4882a593Smuzhiyun int fsl_add_bridge(struct platform_device *pdev, int is_primary)
517*4882a593Smuzhiyun {
518*4882a593Smuzhiyun int len;
519*4882a593Smuzhiyun struct pci_controller *hose;
520*4882a593Smuzhiyun struct resource rsrc;
521*4882a593Smuzhiyun const int *bus_range;
522*4882a593Smuzhiyun u8 hdr_type, progif;
523*4882a593Smuzhiyun u32 class_code;
524*4882a593Smuzhiyun struct device_node *dev;
525*4882a593Smuzhiyun struct ccsr_pci __iomem *pci;
526*4882a593Smuzhiyun u16 temp;
527*4882a593Smuzhiyun u32 svr = mfspr(SPRN_SVR);
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun dev = pdev->dev.of_node;
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun if (!of_device_is_available(dev)) {
532*4882a593Smuzhiyun pr_warn("%pOF: disabled\n", dev);
533*4882a593Smuzhiyun return -ENODEV;
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun pr_debug("Adding PCI host bridge %pOF\n", dev);
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun /* Fetch host bridge registers address */
539*4882a593Smuzhiyun if (of_address_to_resource(dev, 0, &rsrc)) {
540*4882a593Smuzhiyun printk(KERN_WARNING "Can't get pci register base!");
541*4882a593Smuzhiyun return -ENOMEM;
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun /* Get bus range if any */
545*4882a593Smuzhiyun bus_range = of_get_property(dev, "bus-range", &len);
546*4882a593Smuzhiyun if (bus_range == NULL || len < 2 * sizeof(int))
547*4882a593Smuzhiyun printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
548*4882a593Smuzhiyun " bus 0\n", dev);
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun pci_add_flags(PCI_REASSIGN_ALL_BUS);
551*4882a593Smuzhiyun hose = pcibios_alloc_controller(dev);
552*4882a593Smuzhiyun if (!hose)
553*4882a593Smuzhiyun return -ENOMEM;
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun /* set platform device as the parent */
556*4882a593Smuzhiyun hose->parent = &pdev->dev;
557*4882a593Smuzhiyun hose->first_busno = bus_range ? bus_range[0] : 0x0;
558*4882a593Smuzhiyun hose->last_busno = bus_range ? bus_range[1] : 0xff;
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n",
561*4882a593Smuzhiyun (u64)rsrc.start, (u64)resource_size(&rsrc));
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun pci = hose->private_data = ioremap(rsrc.start, resource_size(&rsrc));
564*4882a593Smuzhiyun if (!hose->private_data)
565*4882a593Smuzhiyun goto no_bridge;
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
568*4882a593Smuzhiyun PPC_INDIRECT_TYPE_BIG_ENDIAN);
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0)
571*4882a593Smuzhiyun hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
574*4882a593Smuzhiyun /* use fsl_indirect_read_config for PCIe */
575*4882a593Smuzhiyun hose->ops = &fsl_indirect_pcie_ops;
576*4882a593Smuzhiyun /* For PCIE read HEADER_TYPE to identify controller mode */
577*4882a593Smuzhiyun early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type);
578*4882a593Smuzhiyun if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
579*4882a593Smuzhiyun goto no_bridge;
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun } else {
582*4882a593Smuzhiyun /* For PCI read PROG to identify controller mode */
583*4882a593Smuzhiyun early_read_config_byte(hose, 0, 0, PCI_CLASS_PROG, &progif);
584*4882a593Smuzhiyun if ((progif & 1) &&
585*4882a593Smuzhiyun !of_property_read_bool(dev, "fsl,pci-agent-force-enum"))
586*4882a593Smuzhiyun goto no_bridge;
587*4882a593Smuzhiyun }
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun setup_pci_cmd(hose);
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun /* check PCI express link status */
592*4882a593Smuzhiyun if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
593*4882a593Smuzhiyun hose->indirect_type |= PPC_INDIRECT_TYPE_EXT_REG |
594*4882a593Smuzhiyun PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS;
595*4882a593Smuzhiyun if (fsl_pcie_check_link(hose))
596*4882a593Smuzhiyun hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
597*4882a593Smuzhiyun /* Fix Class Code to PCI_CLASS_BRIDGE_PCI_NORMAL for pre-3.0 controller */
598*4882a593Smuzhiyun if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0) {
599*4882a593Smuzhiyun early_read_config_dword(hose, 0, 0, PCIE_FSL_CSR_CLASSCODE, &class_code);
600*4882a593Smuzhiyun class_code &= 0xff;
601*4882a593Smuzhiyun class_code |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8;
602*4882a593Smuzhiyun early_write_config_dword(hose, 0, 0, PCIE_FSL_CSR_CLASSCODE, class_code);
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun } else {
605*4882a593Smuzhiyun /*
606*4882a593Smuzhiyun * Set PBFR(PCI Bus Function Register)[10] = 1 to
607*4882a593Smuzhiyun * disable the combining of crossing cacheline
608*4882a593Smuzhiyun * boundary requests into one burst transaction.
609*4882a593Smuzhiyun * PCI-X operation is not affected.
610*4882a593Smuzhiyun * Fix erratum PCI 5 on MPC8548
611*4882a593Smuzhiyun */
612*4882a593Smuzhiyun #define PCI_BUS_FUNCTION 0x44
613*4882a593Smuzhiyun #define PCI_BUS_FUNCTION_MDS 0x400 /* Master disable streaming */
614*4882a593Smuzhiyun if (((SVR_SOC_VER(svr) == SVR_8543) ||
615*4882a593Smuzhiyun (SVR_SOC_VER(svr) == SVR_8545) ||
616*4882a593Smuzhiyun (SVR_SOC_VER(svr) == SVR_8547) ||
617*4882a593Smuzhiyun (SVR_SOC_VER(svr) == SVR_8548)) &&
618*4882a593Smuzhiyun !early_find_capability(hose, 0, 0, PCI_CAP_ID_PCIX)) {
619*4882a593Smuzhiyun early_read_config_word(hose, 0, 0,
620*4882a593Smuzhiyun PCI_BUS_FUNCTION, &temp);
621*4882a593Smuzhiyun temp |= PCI_BUS_FUNCTION_MDS;
622*4882a593Smuzhiyun early_write_config_word(hose, 0, 0,
623*4882a593Smuzhiyun PCI_BUS_FUNCTION, temp);
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
628*4882a593Smuzhiyun "Firmware bus number: %d->%d\n",
629*4882a593Smuzhiyun (unsigned long long)rsrc.start, hose->first_busno,
630*4882a593Smuzhiyun hose->last_busno);
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun pr_debug(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
633*4882a593Smuzhiyun hose, hose->cfg_addr, hose->cfg_data);
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun /* Interpret the "ranges" property */
636*4882a593Smuzhiyun /* This also maps the I/O region and sets isa_io/mem_base */
637*4882a593Smuzhiyun pci_process_bridge_OF_ranges(hose, dev, is_primary);
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun /* Setup PEX window registers */
640*4882a593Smuzhiyun setup_pci_atmu(hose);
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun /* Set up controller operations */
643*4882a593Smuzhiyun setup_swiotlb_ops(hose);
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun return 0;
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun no_bridge:
648*4882a593Smuzhiyun iounmap(hose->private_data);
649*4882a593Smuzhiyun /* unmap cfg_data & cfg_addr separately if not on same page */
650*4882a593Smuzhiyun if (((unsigned long)hose->cfg_data & PAGE_MASK) !=
651*4882a593Smuzhiyun ((unsigned long)hose->cfg_addr & PAGE_MASK))
652*4882a593Smuzhiyun iounmap(hose->cfg_data);
653*4882a593Smuzhiyun iounmap(hose->cfg_addr);
654*4882a593Smuzhiyun pcibios_free_controller(hose);
655*4882a593Smuzhiyun return -ENODEV;
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun #endif /* CONFIG_FSL_SOC_BOOKE || CONFIG_PPC_86xx */
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID,
660*4882a593Smuzhiyun quirk_fsl_pcie_early);
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun #if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_MPC512x)
663*4882a593Smuzhiyun struct mpc83xx_pcie_priv {
664*4882a593Smuzhiyun void __iomem *cfg_type0;
665*4882a593Smuzhiyun void __iomem *cfg_type1;
666*4882a593Smuzhiyun u32 dev_base;
667*4882a593Smuzhiyun };
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun struct pex_inbound_window {
670*4882a593Smuzhiyun u32 ar;
671*4882a593Smuzhiyun u32 tar;
672*4882a593Smuzhiyun u32 barl;
673*4882a593Smuzhiyun u32 barh;
674*4882a593Smuzhiyun };
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun /*
677*4882a593Smuzhiyun * With the convention of u-boot, the PCIE outbound window 0 serves
678*4882a593Smuzhiyun * as configuration transactions outbound.
679*4882a593Smuzhiyun */
680*4882a593Smuzhiyun #define PEX_OUTWIN0_BAR 0xCA4
681*4882a593Smuzhiyun #define PEX_OUTWIN0_TAL 0xCA8
682*4882a593Smuzhiyun #define PEX_OUTWIN0_TAH 0xCAC
683*4882a593Smuzhiyun #define PEX_RC_INWIN_BASE 0xE60
684*4882a593Smuzhiyun #define PEX_RCIWARn_EN 0x1
685*4882a593Smuzhiyun
mpc83xx_pcie_exclude_device(struct pci_bus * bus,unsigned int devfn)686*4882a593Smuzhiyun static int mpc83xx_pcie_exclude_device(struct pci_bus *bus, unsigned int devfn)
687*4882a593Smuzhiyun {
688*4882a593Smuzhiyun struct pci_controller *hose = pci_bus_to_host(bus);
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun if (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK)
691*4882a593Smuzhiyun return PCIBIOS_DEVICE_NOT_FOUND;
692*4882a593Smuzhiyun /*
693*4882a593Smuzhiyun * Workaround for the HW bug: for Type 0 configure transactions the
694*4882a593Smuzhiyun * PCI-E controller does not check the device number bits and just
695*4882a593Smuzhiyun * assumes that the device number bits are 0.
696*4882a593Smuzhiyun */
697*4882a593Smuzhiyun if (bus->number == hose->first_busno ||
698*4882a593Smuzhiyun bus->primary == hose->first_busno) {
699*4882a593Smuzhiyun if (devfn & 0xf8)
700*4882a593Smuzhiyun return PCIBIOS_DEVICE_NOT_FOUND;
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun if (ppc_md.pci_exclude_device) {
704*4882a593Smuzhiyun if (ppc_md.pci_exclude_device(hose, bus->number, devfn))
705*4882a593Smuzhiyun return PCIBIOS_DEVICE_NOT_FOUND;
706*4882a593Smuzhiyun }
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun return PCIBIOS_SUCCESSFUL;
709*4882a593Smuzhiyun }
710*4882a593Smuzhiyun
mpc83xx_pcie_remap_cfg(struct pci_bus * bus,unsigned int devfn,int offset)711*4882a593Smuzhiyun static void __iomem *mpc83xx_pcie_remap_cfg(struct pci_bus *bus,
712*4882a593Smuzhiyun unsigned int devfn, int offset)
713*4882a593Smuzhiyun {
714*4882a593Smuzhiyun struct pci_controller *hose = pci_bus_to_host(bus);
715*4882a593Smuzhiyun struct mpc83xx_pcie_priv *pcie = hose->dn->data;
716*4882a593Smuzhiyun u32 dev_base = bus->number << 24 | devfn << 16;
717*4882a593Smuzhiyun int ret;
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun ret = mpc83xx_pcie_exclude_device(bus, devfn);
720*4882a593Smuzhiyun if (ret)
721*4882a593Smuzhiyun return NULL;
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun offset &= 0xfff;
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun /* Type 0 */
726*4882a593Smuzhiyun if (bus->number == hose->first_busno)
727*4882a593Smuzhiyun return pcie->cfg_type0 + offset;
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun if (pcie->dev_base == dev_base)
730*4882a593Smuzhiyun goto mapped;
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAL, dev_base);
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun pcie->dev_base = dev_base;
735*4882a593Smuzhiyun mapped:
736*4882a593Smuzhiyun return pcie->cfg_type1 + offset;
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun
mpc83xx_pcie_write_config(struct pci_bus * bus,unsigned int devfn,int offset,int len,u32 val)739*4882a593Smuzhiyun static int mpc83xx_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
740*4882a593Smuzhiyun int offset, int len, u32 val)
741*4882a593Smuzhiyun {
742*4882a593Smuzhiyun struct pci_controller *hose = pci_bus_to_host(bus);
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun /* PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS */
745*4882a593Smuzhiyun if (offset == PCI_PRIMARY_BUS && bus->number == hose->first_busno)
746*4882a593Smuzhiyun val &= 0xffffff00;
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun return pci_generic_config_write(bus, devfn, offset, len, val);
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun static struct pci_ops mpc83xx_pcie_ops = {
752*4882a593Smuzhiyun .map_bus = mpc83xx_pcie_remap_cfg,
753*4882a593Smuzhiyun .read = pci_generic_config_read,
754*4882a593Smuzhiyun .write = mpc83xx_pcie_write_config,
755*4882a593Smuzhiyun };
756*4882a593Smuzhiyun
mpc83xx_pcie_setup(struct pci_controller * hose,struct resource * reg)757*4882a593Smuzhiyun static int __init mpc83xx_pcie_setup(struct pci_controller *hose,
758*4882a593Smuzhiyun struct resource *reg)
759*4882a593Smuzhiyun {
760*4882a593Smuzhiyun struct mpc83xx_pcie_priv *pcie;
761*4882a593Smuzhiyun u32 cfg_bar;
762*4882a593Smuzhiyun int ret = -ENOMEM;
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun pcie = zalloc_maybe_bootmem(sizeof(*pcie), GFP_KERNEL);
765*4882a593Smuzhiyun if (!pcie)
766*4882a593Smuzhiyun return ret;
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun pcie->cfg_type0 = ioremap(reg->start, resource_size(reg));
769*4882a593Smuzhiyun if (!pcie->cfg_type0)
770*4882a593Smuzhiyun goto err0;
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun cfg_bar = in_le32(pcie->cfg_type0 + PEX_OUTWIN0_BAR);
773*4882a593Smuzhiyun if (!cfg_bar) {
774*4882a593Smuzhiyun /* PCI-E isn't configured. */
775*4882a593Smuzhiyun ret = -ENODEV;
776*4882a593Smuzhiyun goto err1;
777*4882a593Smuzhiyun }
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun pcie->cfg_type1 = ioremap(cfg_bar, 0x1000);
780*4882a593Smuzhiyun if (!pcie->cfg_type1)
781*4882a593Smuzhiyun goto err1;
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun WARN_ON(hose->dn->data);
784*4882a593Smuzhiyun hose->dn->data = pcie;
785*4882a593Smuzhiyun hose->ops = &mpc83xx_pcie_ops;
786*4882a593Smuzhiyun hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAH, 0);
789*4882a593Smuzhiyun out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAL, 0);
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun if (fsl_pcie_check_link(hose))
792*4882a593Smuzhiyun hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun return 0;
795*4882a593Smuzhiyun err1:
796*4882a593Smuzhiyun iounmap(pcie->cfg_type0);
797*4882a593Smuzhiyun err0:
798*4882a593Smuzhiyun kfree(pcie);
799*4882a593Smuzhiyun return ret;
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun }
802*4882a593Smuzhiyun
mpc83xx_add_bridge(struct device_node * dev)803*4882a593Smuzhiyun int __init mpc83xx_add_bridge(struct device_node *dev)
804*4882a593Smuzhiyun {
805*4882a593Smuzhiyun int ret;
806*4882a593Smuzhiyun int len;
807*4882a593Smuzhiyun struct pci_controller *hose;
808*4882a593Smuzhiyun struct resource rsrc_reg;
809*4882a593Smuzhiyun struct resource rsrc_cfg;
810*4882a593Smuzhiyun const int *bus_range;
811*4882a593Smuzhiyun int primary;
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun is_mpc83xx_pci = 1;
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun if (!of_device_is_available(dev)) {
816*4882a593Smuzhiyun pr_warn("%pOF: disabled by the firmware.\n",
817*4882a593Smuzhiyun dev);
818*4882a593Smuzhiyun return -ENODEV;
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun pr_debug("Adding PCI host bridge %pOF\n", dev);
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun /* Fetch host bridge registers address */
823*4882a593Smuzhiyun if (of_address_to_resource(dev, 0, &rsrc_reg)) {
824*4882a593Smuzhiyun printk(KERN_WARNING "Can't get pci register base!\n");
825*4882a593Smuzhiyun return -ENOMEM;
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun memset(&rsrc_cfg, 0, sizeof(rsrc_cfg));
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun if (of_address_to_resource(dev, 1, &rsrc_cfg)) {
831*4882a593Smuzhiyun printk(KERN_WARNING
832*4882a593Smuzhiyun "No pci config register base in dev tree, "
833*4882a593Smuzhiyun "using default\n");
834*4882a593Smuzhiyun /*
835*4882a593Smuzhiyun * MPC83xx supports up to two host controllers
836*4882a593Smuzhiyun * one at 0x8500 has config space registers at 0x8300
837*4882a593Smuzhiyun * one at 0x8600 has config space registers at 0x8380
838*4882a593Smuzhiyun */
839*4882a593Smuzhiyun if ((rsrc_reg.start & 0xfffff) == 0x8500)
840*4882a593Smuzhiyun rsrc_cfg.start = (rsrc_reg.start & 0xfff00000) + 0x8300;
841*4882a593Smuzhiyun else if ((rsrc_reg.start & 0xfffff) == 0x8600)
842*4882a593Smuzhiyun rsrc_cfg.start = (rsrc_reg.start & 0xfff00000) + 0x8380;
843*4882a593Smuzhiyun }
844*4882a593Smuzhiyun /*
845*4882a593Smuzhiyun * Controller at offset 0x8500 is primary
846*4882a593Smuzhiyun */
847*4882a593Smuzhiyun if ((rsrc_reg.start & 0xfffff) == 0x8500)
848*4882a593Smuzhiyun primary = 1;
849*4882a593Smuzhiyun else
850*4882a593Smuzhiyun primary = 0;
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun /* Get bus range if any */
853*4882a593Smuzhiyun bus_range = of_get_property(dev, "bus-range", &len);
854*4882a593Smuzhiyun if (bus_range == NULL || len < 2 * sizeof(int)) {
855*4882a593Smuzhiyun printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
856*4882a593Smuzhiyun " bus 0\n", dev);
857*4882a593Smuzhiyun }
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun pci_add_flags(PCI_REASSIGN_ALL_BUS);
860*4882a593Smuzhiyun hose = pcibios_alloc_controller(dev);
861*4882a593Smuzhiyun if (!hose)
862*4882a593Smuzhiyun return -ENOMEM;
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun hose->first_busno = bus_range ? bus_range[0] : 0;
865*4882a593Smuzhiyun hose->last_busno = bus_range ? bus_range[1] : 0xff;
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun if (of_device_is_compatible(dev, "fsl,mpc8314-pcie")) {
868*4882a593Smuzhiyun ret = mpc83xx_pcie_setup(hose, &rsrc_reg);
869*4882a593Smuzhiyun if (ret)
870*4882a593Smuzhiyun goto err0;
871*4882a593Smuzhiyun } else {
872*4882a593Smuzhiyun setup_indirect_pci(hose, rsrc_cfg.start,
873*4882a593Smuzhiyun rsrc_cfg.start + 4, 0);
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
877*4882a593Smuzhiyun "Firmware bus number: %d->%d\n",
878*4882a593Smuzhiyun (unsigned long long)rsrc_reg.start, hose->first_busno,
879*4882a593Smuzhiyun hose->last_busno);
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun pr_debug(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
882*4882a593Smuzhiyun hose, hose->cfg_addr, hose->cfg_data);
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun /* Interpret the "ranges" property */
885*4882a593Smuzhiyun /* This also maps the I/O region and sets isa_io/mem_base */
886*4882a593Smuzhiyun pci_process_bridge_OF_ranges(hose, dev, primary);
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun return 0;
889*4882a593Smuzhiyun err0:
890*4882a593Smuzhiyun pcibios_free_controller(hose);
891*4882a593Smuzhiyun return ret;
892*4882a593Smuzhiyun }
893*4882a593Smuzhiyun #endif /* CONFIG_PPC_83xx */
894*4882a593Smuzhiyun
fsl_pci_immrbar_base(struct pci_controller * hose)895*4882a593Smuzhiyun u64 fsl_pci_immrbar_base(struct pci_controller *hose)
896*4882a593Smuzhiyun {
897*4882a593Smuzhiyun #ifdef CONFIG_PPC_83xx
898*4882a593Smuzhiyun if (is_mpc83xx_pci) {
899*4882a593Smuzhiyun struct mpc83xx_pcie_priv *pcie = hose->dn->data;
900*4882a593Smuzhiyun struct pex_inbound_window *in;
901*4882a593Smuzhiyun int i;
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun /* Walk the Root Complex Inbound windows to match IMMR base */
904*4882a593Smuzhiyun in = pcie->cfg_type0 + PEX_RC_INWIN_BASE;
905*4882a593Smuzhiyun for (i = 0; i < 4; i++) {
906*4882a593Smuzhiyun /* not enabled, skip */
907*4882a593Smuzhiyun if (!(in_le32(&in[i].ar) & PEX_RCIWARn_EN))
908*4882a593Smuzhiyun continue;
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun if (get_immrbase() == in_le32(&in[i].tar))
911*4882a593Smuzhiyun return (u64)in_le32(&in[i].barh) << 32 |
912*4882a593Smuzhiyun in_le32(&in[i].barl);
913*4882a593Smuzhiyun }
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun printk(KERN_WARNING "could not find PCI BAR matching IMMR\n");
916*4882a593Smuzhiyun }
917*4882a593Smuzhiyun #endif
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
920*4882a593Smuzhiyun if (!is_mpc83xx_pci) {
921*4882a593Smuzhiyun u32 base;
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun pci_bus_read_config_dword(hose->bus,
924*4882a593Smuzhiyun PCI_DEVFN(0, 0), PCI_BASE_ADDRESS_0, &base);
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun /*
927*4882a593Smuzhiyun * For PEXCSRBAR, bit 3-0 indicate prefetchable and
928*4882a593Smuzhiyun * address type. So when getting base address, these
929*4882a593Smuzhiyun * bits should be masked
930*4882a593Smuzhiyun */
931*4882a593Smuzhiyun base &= PCI_BASE_ADDRESS_MEM_MASK;
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun return base;
934*4882a593Smuzhiyun }
935*4882a593Smuzhiyun #endif
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun return 0;
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun #ifdef CONFIG_E500
mcheck_handle_load(struct pt_regs * regs,u32 inst)941*4882a593Smuzhiyun static int mcheck_handle_load(struct pt_regs *regs, u32 inst)
942*4882a593Smuzhiyun {
943*4882a593Smuzhiyun unsigned int rd, ra, rb, d;
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun rd = get_rt(inst);
946*4882a593Smuzhiyun ra = get_ra(inst);
947*4882a593Smuzhiyun rb = get_rb(inst);
948*4882a593Smuzhiyun d = get_d(inst);
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun switch (get_op(inst)) {
951*4882a593Smuzhiyun case 31:
952*4882a593Smuzhiyun switch (get_xop(inst)) {
953*4882a593Smuzhiyun case OP_31_XOP_LWZX:
954*4882a593Smuzhiyun case OP_31_XOP_LWBRX:
955*4882a593Smuzhiyun regs->gpr[rd] = 0xffffffff;
956*4882a593Smuzhiyun break;
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun case OP_31_XOP_LWZUX:
959*4882a593Smuzhiyun regs->gpr[rd] = 0xffffffff;
960*4882a593Smuzhiyun regs->gpr[ra] += regs->gpr[rb];
961*4882a593Smuzhiyun break;
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun case OP_31_XOP_LBZX:
964*4882a593Smuzhiyun regs->gpr[rd] = 0xff;
965*4882a593Smuzhiyun break;
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun case OP_31_XOP_LBZUX:
968*4882a593Smuzhiyun regs->gpr[rd] = 0xff;
969*4882a593Smuzhiyun regs->gpr[ra] += regs->gpr[rb];
970*4882a593Smuzhiyun break;
971*4882a593Smuzhiyun
972*4882a593Smuzhiyun case OP_31_XOP_LHZX:
973*4882a593Smuzhiyun case OP_31_XOP_LHBRX:
974*4882a593Smuzhiyun regs->gpr[rd] = 0xffff;
975*4882a593Smuzhiyun break;
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun case OP_31_XOP_LHZUX:
978*4882a593Smuzhiyun regs->gpr[rd] = 0xffff;
979*4882a593Smuzhiyun regs->gpr[ra] += regs->gpr[rb];
980*4882a593Smuzhiyun break;
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun case OP_31_XOP_LHAX:
983*4882a593Smuzhiyun regs->gpr[rd] = ~0UL;
984*4882a593Smuzhiyun break;
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun case OP_31_XOP_LHAUX:
987*4882a593Smuzhiyun regs->gpr[rd] = ~0UL;
988*4882a593Smuzhiyun regs->gpr[ra] += regs->gpr[rb];
989*4882a593Smuzhiyun break;
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun default:
992*4882a593Smuzhiyun return 0;
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun break;
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun case OP_LWZ:
997*4882a593Smuzhiyun regs->gpr[rd] = 0xffffffff;
998*4882a593Smuzhiyun break;
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun case OP_LWZU:
1001*4882a593Smuzhiyun regs->gpr[rd] = 0xffffffff;
1002*4882a593Smuzhiyun regs->gpr[ra] += (s16)d;
1003*4882a593Smuzhiyun break;
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun case OP_LBZ:
1006*4882a593Smuzhiyun regs->gpr[rd] = 0xff;
1007*4882a593Smuzhiyun break;
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun case OP_LBZU:
1010*4882a593Smuzhiyun regs->gpr[rd] = 0xff;
1011*4882a593Smuzhiyun regs->gpr[ra] += (s16)d;
1012*4882a593Smuzhiyun break;
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun case OP_LHZ:
1015*4882a593Smuzhiyun regs->gpr[rd] = 0xffff;
1016*4882a593Smuzhiyun break;
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun case OP_LHZU:
1019*4882a593Smuzhiyun regs->gpr[rd] = 0xffff;
1020*4882a593Smuzhiyun regs->gpr[ra] += (s16)d;
1021*4882a593Smuzhiyun break;
1022*4882a593Smuzhiyun
1023*4882a593Smuzhiyun case OP_LHA:
1024*4882a593Smuzhiyun regs->gpr[rd] = ~0UL;
1025*4882a593Smuzhiyun break;
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun case OP_LHAU:
1028*4882a593Smuzhiyun regs->gpr[rd] = ~0UL;
1029*4882a593Smuzhiyun regs->gpr[ra] += (s16)d;
1030*4882a593Smuzhiyun break;
1031*4882a593Smuzhiyun
1032*4882a593Smuzhiyun default:
1033*4882a593Smuzhiyun return 0;
1034*4882a593Smuzhiyun }
1035*4882a593Smuzhiyun
1036*4882a593Smuzhiyun return 1;
1037*4882a593Smuzhiyun }
1038*4882a593Smuzhiyun
is_in_pci_mem_space(phys_addr_t addr)1039*4882a593Smuzhiyun static int is_in_pci_mem_space(phys_addr_t addr)
1040*4882a593Smuzhiyun {
1041*4882a593Smuzhiyun struct pci_controller *hose;
1042*4882a593Smuzhiyun struct resource *res;
1043*4882a593Smuzhiyun int i;
1044*4882a593Smuzhiyun
1045*4882a593Smuzhiyun list_for_each_entry(hose, &hose_list, list_node) {
1046*4882a593Smuzhiyun if (!(hose->indirect_type & PPC_INDIRECT_TYPE_EXT_REG))
1047*4882a593Smuzhiyun continue;
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun for (i = 0; i < 3; i++) {
1050*4882a593Smuzhiyun res = &hose->mem_resources[i];
1051*4882a593Smuzhiyun if ((res->flags & IORESOURCE_MEM) &&
1052*4882a593Smuzhiyun addr >= res->start && addr <= res->end)
1053*4882a593Smuzhiyun return 1;
1054*4882a593Smuzhiyun }
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun return 0;
1057*4882a593Smuzhiyun }
1058*4882a593Smuzhiyun
fsl_pci_mcheck_exception(struct pt_regs * regs)1059*4882a593Smuzhiyun int fsl_pci_mcheck_exception(struct pt_regs *regs)
1060*4882a593Smuzhiyun {
1061*4882a593Smuzhiyun u32 inst;
1062*4882a593Smuzhiyun int ret;
1063*4882a593Smuzhiyun phys_addr_t addr = 0;
1064*4882a593Smuzhiyun
1065*4882a593Smuzhiyun /* Let KVM/QEMU deal with the exception */
1066*4882a593Smuzhiyun if (regs->msr & MSR_GS)
1067*4882a593Smuzhiyun return 0;
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun #ifdef CONFIG_PHYS_64BIT
1070*4882a593Smuzhiyun addr = mfspr(SPRN_MCARU);
1071*4882a593Smuzhiyun addr <<= 32;
1072*4882a593Smuzhiyun #endif
1073*4882a593Smuzhiyun addr += mfspr(SPRN_MCAR);
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun if (is_in_pci_mem_space(addr)) {
1076*4882a593Smuzhiyun if (user_mode(regs))
1077*4882a593Smuzhiyun ret = copy_from_user_nofault(&inst,
1078*4882a593Smuzhiyun (void __user *)regs->nip, sizeof(inst));
1079*4882a593Smuzhiyun else
1080*4882a593Smuzhiyun ret = get_kernel_nofault(inst, (void *)regs->nip);
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun if (!ret && mcheck_handle_load(regs, inst)) {
1083*4882a593Smuzhiyun regs->nip += 4;
1084*4882a593Smuzhiyun return 1;
1085*4882a593Smuzhiyun }
1086*4882a593Smuzhiyun }
1087*4882a593Smuzhiyun
1088*4882a593Smuzhiyun return 0;
1089*4882a593Smuzhiyun }
1090*4882a593Smuzhiyun #endif
1091*4882a593Smuzhiyun
1092*4882a593Smuzhiyun #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
1093*4882a593Smuzhiyun static const struct of_device_id pci_ids[] = {
1094*4882a593Smuzhiyun { .compatible = "fsl,mpc8540-pci", },
1095*4882a593Smuzhiyun { .compatible = "fsl,mpc8548-pcie", },
1096*4882a593Smuzhiyun { .compatible = "fsl,mpc8610-pci", },
1097*4882a593Smuzhiyun { .compatible = "fsl,mpc8641-pcie", },
1098*4882a593Smuzhiyun { .compatible = "fsl,qoriq-pcie", },
1099*4882a593Smuzhiyun { .compatible = "fsl,qoriq-pcie-v2.1", },
1100*4882a593Smuzhiyun { .compatible = "fsl,qoriq-pcie-v2.2", },
1101*4882a593Smuzhiyun { .compatible = "fsl,qoriq-pcie-v2.3", },
1102*4882a593Smuzhiyun { .compatible = "fsl,qoriq-pcie-v2.4", },
1103*4882a593Smuzhiyun { .compatible = "fsl,qoriq-pcie-v3.0", },
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun /*
1106*4882a593Smuzhiyun * The following entries are for compatibility with older device
1107*4882a593Smuzhiyun * trees.
1108*4882a593Smuzhiyun */
1109*4882a593Smuzhiyun { .compatible = "fsl,p1022-pcie", },
1110*4882a593Smuzhiyun { .compatible = "fsl,p4080-pcie", },
1111*4882a593Smuzhiyun
1112*4882a593Smuzhiyun {},
1113*4882a593Smuzhiyun };
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun struct device_node *fsl_pci_primary;
1116*4882a593Smuzhiyun
fsl_pci_assign_primary(void)1117*4882a593Smuzhiyun void fsl_pci_assign_primary(void)
1118*4882a593Smuzhiyun {
1119*4882a593Smuzhiyun struct device_node *np;
1120*4882a593Smuzhiyun
1121*4882a593Smuzhiyun /* Callers can specify the primary bus using other means. */
1122*4882a593Smuzhiyun if (fsl_pci_primary)
1123*4882a593Smuzhiyun return;
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun /* If a PCI host bridge contains an ISA node, it's primary. */
1126*4882a593Smuzhiyun np = of_find_node_by_type(NULL, "isa");
1127*4882a593Smuzhiyun while ((fsl_pci_primary = of_get_parent(np))) {
1128*4882a593Smuzhiyun of_node_put(np);
1129*4882a593Smuzhiyun np = fsl_pci_primary;
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun if (of_match_node(pci_ids, np) && of_device_is_available(np))
1132*4882a593Smuzhiyun return;
1133*4882a593Smuzhiyun }
1134*4882a593Smuzhiyun
1135*4882a593Smuzhiyun /*
1136*4882a593Smuzhiyun * If there's no PCI host bridge with ISA, arbitrarily
1137*4882a593Smuzhiyun * designate one as primary. This can go away once
1138*4882a593Smuzhiyun * various bugs with primary-less systems are fixed.
1139*4882a593Smuzhiyun */
1140*4882a593Smuzhiyun for_each_matching_node(np, pci_ids) {
1141*4882a593Smuzhiyun if (of_device_is_available(np)) {
1142*4882a593Smuzhiyun fsl_pci_primary = np;
1143*4882a593Smuzhiyun of_node_put(np);
1144*4882a593Smuzhiyun return;
1145*4882a593Smuzhiyun }
1146*4882a593Smuzhiyun }
1147*4882a593Smuzhiyun }
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
fsl_pci_pme_handle(int irq,void * dev_id)1150*4882a593Smuzhiyun static irqreturn_t fsl_pci_pme_handle(int irq, void *dev_id)
1151*4882a593Smuzhiyun {
1152*4882a593Smuzhiyun struct pci_controller *hose = dev_id;
1153*4882a593Smuzhiyun struct ccsr_pci __iomem *pci = hose->private_data;
1154*4882a593Smuzhiyun u32 dr;
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun dr = in_be32(&pci->pex_pme_mes_dr);
1157*4882a593Smuzhiyun if (!dr)
1158*4882a593Smuzhiyun return IRQ_NONE;
1159*4882a593Smuzhiyun
1160*4882a593Smuzhiyun out_be32(&pci->pex_pme_mes_dr, dr);
1161*4882a593Smuzhiyun
1162*4882a593Smuzhiyun return IRQ_HANDLED;
1163*4882a593Smuzhiyun }
1164*4882a593Smuzhiyun
fsl_pci_pme_probe(struct pci_controller * hose)1165*4882a593Smuzhiyun static int fsl_pci_pme_probe(struct pci_controller *hose)
1166*4882a593Smuzhiyun {
1167*4882a593Smuzhiyun struct ccsr_pci __iomem *pci;
1168*4882a593Smuzhiyun struct pci_dev *dev;
1169*4882a593Smuzhiyun int pme_irq;
1170*4882a593Smuzhiyun int res;
1171*4882a593Smuzhiyun u16 pms;
1172*4882a593Smuzhiyun
1173*4882a593Smuzhiyun /* Get hose's pci_dev */
1174*4882a593Smuzhiyun dev = list_first_entry(&hose->bus->devices, typeof(*dev), bus_list);
1175*4882a593Smuzhiyun
1176*4882a593Smuzhiyun /* PME Disable */
1177*4882a593Smuzhiyun pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pms);
1178*4882a593Smuzhiyun pms &= ~PCI_PM_CTRL_PME_ENABLE;
1179*4882a593Smuzhiyun pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pms);
1180*4882a593Smuzhiyun
1181*4882a593Smuzhiyun pme_irq = irq_of_parse_and_map(hose->dn, 0);
1182*4882a593Smuzhiyun if (!pme_irq) {
1183*4882a593Smuzhiyun dev_err(&dev->dev, "Failed to map PME interrupt.\n");
1184*4882a593Smuzhiyun
1185*4882a593Smuzhiyun return -ENXIO;
1186*4882a593Smuzhiyun }
1187*4882a593Smuzhiyun
1188*4882a593Smuzhiyun res = devm_request_irq(hose->parent, pme_irq,
1189*4882a593Smuzhiyun fsl_pci_pme_handle,
1190*4882a593Smuzhiyun IRQF_SHARED,
1191*4882a593Smuzhiyun "[PCI] PME", hose);
1192*4882a593Smuzhiyun if (res < 0) {
1193*4882a593Smuzhiyun dev_err(&dev->dev, "Unable to request irq %d for PME\n", pme_irq);
1194*4882a593Smuzhiyun irq_dispose_mapping(pme_irq);
1195*4882a593Smuzhiyun
1196*4882a593Smuzhiyun return -ENODEV;
1197*4882a593Smuzhiyun }
1198*4882a593Smuzhiyun
1199*4882a593Smuzhiyun pci = hose->private_data;
1200*4882a593Smuzhiyun
1201*4882a593Smuzhiyun /* Enable PTOD, ENL23D & EXL23D */
1202*4882a593Smuzhiyun clrbits32(&pci->pex_pme_mes_disr,
1203*4882a593Smuzhiyun PME_DISR_EN_PTOD | PME_DISR_EN_ENL23D | PME_DISR_EN_EXL23D);
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun out_be32(&pci->pex_pme_mes_ier, 0);
1206*4882a593Smuzhiyun setbits32(&pci->pex_pme_mes_ier,
1207*4882a593Smuzhiyun PME_DISR_EN_PTOD | PME_DISR_EN_ENL23D | PME_DISR_EN_EXL23D);
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyun /* PME Enable */
1210*4882a593Smuzhiyun pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pms);
1211*4882a593Smuzhiyun pms |= PCI_PM_CTRL_PME_ENABLE;
1212*4882a593Smuzhiyun pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pms);
1213*4882a593Smuzhiyun
1214*4882a593Smuzhiyun return 0;
1215*4882a593Smuzhiyun }
1216*4882a593Smuzhiyun
send_pme_turnoff_message(struct pci_controller * hose)1217*4882a593Smuzhiyun static void send_pme_turnoff_message(struct pci_controller *hose)
1218*4882a593Smuzhiyun {
1219*4882a593Smuzhiyun struct ccsr_pci __iomem *pci = hose->private_data;
1220*4882a593Smuzhiyun u32 dr;
1221*4882a593Smuzhiyun int i;
1222*4882a593Smuzhiyun
1223*4882a593Smuzhiyun /* Send PME_Turn_Off Message Request */
1224*4882a593Smuzhiyun setbits32(&pci->pex_pmcr, PEX_PMCR_PTOMR);
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun /* Wait trun off done */
1227*4882a593Smuzhiyun for (i = 0; i < 150; i++) {
1228*4882a593Smuzhiyun dr = in_be32(&pci->pex_pme_mes_dr);
1229*4882a593Smuzhiyun if (dr) {
1230*4882a593Smuzhiyun out_be32(&pci->pex_pme_mes_dr, dr);
1231*4882a593Smuzhiyun break;
1232*4882a593Smuzhiyun }
1233*4882a593Smuzhiyun
1234*4882a593Smuzhiyun udelay(1000);
1235*4882a593Smuzhiyun }
1236*4882a593Smuzhiyun }
1237*4882a593Smuzhiyun
fsl_pci_syscore_do_suspend(struct pci_controller * hose)1238*4882a593Smuzhiyun static void fsl_pci_syscore_do_suspend(struct pci_controller *hose)
1239*4882a593Smuzhiyun {
1240*4882a593Smuzhiyun send_pme_turnoff_message(hose);
1241*4882a593Smuzhiyun }
1242*4882a593Smuzhiyun
fsl_pci_syscore_suspend(void)1243*4882a593Smuzhiyun static int fsl_pci_syscore_suspend(void)
1244*4882a593Smuzhiyun {
1245*4882a593Smuzhiyun struct pci_controller *hose, *tmp;
1246*4882a593Smuzhiyun
1247*4882a593Smuzhiyun list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
1248*4882a593Smuzhiyun fsl_pci_syscore_do_suspend(hose);
1249*4882a593Smuzhiyun
1250*4882a593Smuzhiyun return 0;
1251*4882a593Smuzhiyun }
1252*4882a593Smuzhiyun
fsl_pci_syscore_do_resume(struct pci_controller * hose)1253*4882a593Smuzhiyun static void fsl_pci_syscore_do_resume(struct pci_controller *hose)
1254*4882a593Smuzhiyun {
1255*4882a593Smuzhiyun struct ccsr_pci __iomem *pci = hose->private_data;
1256*4882a593Smuzhiyun u32 dr;
1257*4882a593Smuzhiyun int i;
1258*4882a593Smuzhiyun
1259*4882a593Smuzhiyun /* Send Exit L2 State Message */
1260*4882a593Smuzhiyun setbits32(&pci->pex_pmcr, PEX_PMCR_EXL2S);
1261*4882a593Smuzhiyun
1262*4882a593Smuzhiyun /* Wait exit done */
1263*4882a593Smuzhiyun for (i = 0; i < 150; i++) {
1264*4882a593Smuzhiyun dr = in_be32(&pci->pex_pme_mes_dr);
1265*4882a593Smuzhiyun if (dr) {
1266*4882a593Smuzhiyun out_be32(&pci->pex_pme_mes_dr, dr);
1267*4882a593Smuzhiyun break;
1268*4882a593Smuzhiyun }
1269*4882a593Smuzhiyun
1270*4882a593Smuzhiyun udelay(1000);
1271*4882a593Smuzhiyun }
1272*4882a593Smuzhiyun
1273*4882a593Smuzhiyun setup_pci_atmu(hose);
1274*4882a593Smuzhiyun }
1275*4882a593Smuzhiyun
fsl_pci_syscore_resume(void)1276*4882a593Smuzhiyun static void fsl_pci_syscore_resume(void)
1277*4882a593Smuzhiyun {
1278*4882a593Smuzhiyun struct pci_controller *hose, *tmp;
1279*4882a593Smuzhiyun
1280*4882a593Smuzhiyun list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
1281*4882a593Smuzhiyun fsl_pci_syscore_do_resume(hose);
1282*4882a593Smuzhiyun }
1283*4882a593Smuzhiyun
1284*4882a593Smuzhiyun static struct syscore_ops pci_syscore_pm_ops = {
1285*4882a593Smuzhiyun .suspend = fsl_pci_syscore_suspend,
1286*4882a593Smuzhiyun .resume = fsl_pci_syscore_resume,
1287*4882a593Smuzhiyun };
1288*4882a593Smuzhiyun #endif
1289*4882a593Smuzhiyun
fsl_pcibios_fixup_phb(struct pci_controller * phb)1290*4882a593Smuzhiyun void fsl_pcibios_fixup_phb(struct pci_controller *phb)
1291*4882a593Smuzhiyun {
1292*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
1293*4882a593Smuzhiyun fsl_pci_pme_probe(phb);
1294*4882a593Smuzhiyun #endif
1295*4882a593Smuzhiyun }
1296*4882a593Smuzhiyun
add_err_dev(struct platform_device * pdev)1297*4882a593Smuzhiyun static int add_err_dev(struct platform_device *pdev)
1298*4882a593Smuzhiyun {
1299*4882a593Smuzhiyun struct platform_device *errdev;
1300*4882a593Smuzhiyun struct mpc85xx_edac_pci_plat_data pd = {
1301*4882a593Smuzhiyun .of_node = pdev->dev.of_node
1302*4882a593Smuzhiyun };
1303*4882a593Smuzhiyun
1304*4882a593Smuzhiyun errdev = platform_device_register_resndata(&pdev->dev,
1305*4882a593Smuzhiyun "mpc85xx-pci-edac",
1306*4882a593Smuzhiyun PLATFORM_DEVID_AUTO,
1307*4882a593Smuzhiyun pdev->resource,
1308*4882a593Smuzhiyun pdev->num_resources,
1309*4882a593Smuzhiyun &pd, sizeof(pd));
1310*4882a593Smuzhiyun
1311*4882a593Smuzhiyun return PTR_ERR_OR_ZERO(errdev);
1312*4882a593Smuzhiyun }
1313*4882a593Smuzhiyun
fsl_pci_probe(struct platform_device * pdev)1314*4882a593Smuzhiyun static int fsl_pci_probe(struct platform_device *pdev)
1315*4882a593Smuzhiyun {
1316*4882a593Smuzhiyun struct device_node *node;
1317*4882a593Smuzhiyun int ret;
1318*4882a593Smuzhiyun
1319*4882a593Smuzhiyun node = pdev->dev.of_node;
1320*4882a593Smuzhiyun ret = fsl_add_bridge(pdev, fsl_pci_primary == node);
1321*4882a593Smuzhiyun if (ret)
1322*4882a593Smuzhiyun return ret;
1323*4882a593Smuzhiyun
1324*4882a593Smuzhiyun ret = add_err_dev(pdev);
1325*4882a593Smuzhiyun if (ret)
1326*4882a593Smuzhiyun dev_err(&pdev->dev, "couldn't register error device: %d\n",
1327*4882a593Smuzhiyun ret);
1328*4882a593Smuzhiyun
1329*4882a593Smuzhiyun return 0;
1330*4882a593Smuzhiyun }
1331*4882a593Smuzhiyun
1332*4882a593Smuzhiyun static struct platform_driver fsl_pci_driver = {
1333*4882a593Smuzhiyun .driver = {
1334*4882a593Smuzhiyun .name = "fsl-pci",
1335*4882a593Smuzhiyun .of_match_table = pci_ids,
1336*4882a593Smuzhiyun },
1337*4882a593Smuzhiyun .probe = fsl_pci_probe,
1338*4882a593Smuzhiyun };
1339*4882a593Smuzhiyun
fsl_pci_init(void)1340*4882a593Smuzhiyun static int __init fsl_pci_init(void)
1341*4882a593Smuzhiyun {
1342*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
1343*4882a593Smuzhiyun register_syscore_ops(&pci_syscore_pm_ops);
1344*4882a593Smuzhiyun #endif
1345*4882a593Smuzhiyun return platform_driver_register(&fsl_pci_driver);
1346*4882a593Smuzhiyun }
1347*4882a593Smuzhiyun arch_initcall(fsl_pci_init);
1348*4882a593Smuzhiyun #endif
1349