1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org),
4*4882a593Smuzhiyun * IBM Corp.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #undef DEBUG
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/kernel.h>
10*4882a593Smuzhiyun #include <linux/pci.h>
11*4882a593Smuzhiyun #include <linux/delay.h>
12*4882a593Smuzhiyun #include <linux/string.h>
13*4882a593Smuzhiyun #include <linux/init.h>
14*4882a593Smuzhiyun #include <linux/irq.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include <asm/sections.h>
17*4882a593Smuzhiyun #include <asm/io.h>
18*4882a593Smuzhiyun #include <asm/prom.h>
19*4882a593Smuzhiyun #include <asm/pci-bridge.h>
20*4882a593Smuzhiyun #include <asm/machdep.h>
21*4882a593Smuzhiyun #include <asm/iommu.h>
22*4882a593Smuzhiyun #include <asm/ppc-pci.h>
23*4882a593Smuzhiyun #include <asm/isa-bridge.h>
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #include "maple.h"
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #ifdef DEBUG
28*4882a593Smuzhiyun #define DBG(x...) printk(x)
29*4882a593Smuzhiyun #else
30*4882a593Smuzhiyun #define DBG(x...)
31*4882a593Smuzhiyun #endif
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun static struct pci_controller *u3_agp, *u3_ht, *u4_pcie;
34*4882a593Smuzhiyun
fixup_one_level_bus_range(struct device_node * node,int higher)35*4882a593Smuzhiyun static int __init fixup_one_level_bus_range(struct device_node *node, int higher)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun for (; node != 0;node = node->sibling) {
38*4882a593Smuzhiyun const int *bus_range;
39*4882a593Smuzhiyun const unsigned int *class_code;
40*4882a593Smuzhiyun int len;
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /* For PCI<->PCI bridges or CardBus bridges, we go down */
43*4882a593Smuzhiyun class_code = of_get_property(node, "class-code", NULL);
44*4882a593Smuzhiyun if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
45*4882a593Smuzhiyun (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
46*4882a593Smuzhiyun continue;
47*4882a593Smuzhiyun bus_range = of_get_property(node, "bus-range", &len);
48*4882a593Smuzhiyun if (bus_range != NULL && len > 2 * sizeof(int)) {
49*4882a593Smuzhiyun if (bus_range[1] > higher)
50*4882a593Smuzhiyun higher = bus_range[1];
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun higher = fixup_one_level_bus_range(node->child, higher);
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun return higher;
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun /* This routine fixes the "bus-range" property of all bridges in the
58*4882a593Smuzhiyun * system since they tend to have their "last" member wrong on macs
59*4882a593Smuzhiyun *
60*4882a593Smuzhiyun * Note that the bus numbers manipulated here are OF bus numbers, they
61*4882a593Smuzhiyun * are not Linux bus numbers.
62*4882a593Smuzhiyun */
fixup_bus_range(struct device_node * bridge)63*4882a593Smuzhiyun static void __init fixup_bus_range(struct device_node *bridge)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun int *bus_range;
66*4882a593Smuzhiyun struct property *prop;
67*4882a593Smuzhiyun int len;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /* Lookup the "bus-range" property for the hose */
70*4882a593Smuzhiyun prop = of_find_property(bridge, "bus-range", &len);
71*4882a593Smuzhiyun if (prop == NULL || prop->value == NULL || len < 2 * sizeof(int)) {
72*4882a593Smuzhiyun printk(KERN_WARNING "Can't get bus-range for %pOF\n",
73*4882a593Smuzhiyun bridge);
74*4882a593Smuzhiyun return;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun bus_range = prop->value;
77*4882a593Smuzhiyun bus_range[1] = fixup_one_level_bus_range(bridge->child, bus_range[1]);
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun
u3_agp_cfa0(u8 devfn,u8 off)81*4882a593Smuzhiyun static unsigned long u3_agp_cfa0(u8 devfn, u8 off)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun return (1 << (unsigned long)PCI_SLOT(devfn)) |
84*4882a593Smuzhiyun ((unsigned long)PCI_FUNC(devfn) << 8) |
85*4882a593Smuzhiyun ((unsigned long)off & 0xFCUL);
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
u3_agp_cfa1(u8 bus,u8 devfn,u8 off)88*4882a593Smuzhiyun static unsigned long u3_agp_cfa1(u8 bus, u8 devfn, u8 off)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun return ((unsigned long)bus << 16) |
91*4882a593Smuzhiyun ((unsigned long)devfn << 8) |
92*4882a593Smuzhiyun ((unsigned long)off & 0xFCUL) |
93*4882a593Smuzhiyun 1UL;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
u3_agp_cfg_access(struct pci_controller * hose,u8 bus,u8 dev_fn,u8 offset)96*4882a593Smuzhiyun static volatile void __iomem *u3_agp_cfg_access(struct pci_controller* hose,
97*4882a593Smuzhiyun u8 bus, u8 dev_fn, u8 offset)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun unsigned int caddr;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun if (bus == hose->first_busno) {
102*4882a593Smuzhiyun if (dev_fn < (11 << 3))
103*4882a593Smuzhiyun return NULL;
104*4882a593Smuzhiyun caddr = u3_agp_cfa0(dev_fn, offset);
105*4882a593Smuzhiyun } else
106*4882a593Smuzhiyun caddr = u3_agp_cfa1(bus, dev_fn, offset);
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun /* Uninorth will return garbage if we don't read back the value ! */
109*4882a593Smuzhiyun do {
110*4882a593Smuzhiyun out_le32(hose->cfg_addr, caddr);
111*4882a593Smuzhiyun } while (in_le32(hose->cfg_addr) != caddr);
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun offset &= 0x07;
114*4882a593Smuzhiyun return hose->cfg_data + offset;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
u3_agp_read_config(struct pci_bus * bus,unsigned int devfn,int offset,int len,u32 * val)117*4882a593Smuzhiyun static int u3_agp_read_config(struct pci_bus *bus, unsigned int devfn,
118*4882a593Smuzhiyun int offset, int len, u32 *val)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun struct pci_controller *hose;
121*4882a593Smuzhiyun volatile void __iomem *addr;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun hose = pci_bus_to_host(bus);
124*4882a593Smuzhiyun if (hose == NULL)
125*4882a593Smuzhiyun return PCIBIOS_DEVICE_NOT_FOUND;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun addr = u3_agp_cfg_access(hose, bus->number, devfn, offset);
128*4882a593Smuzhiyun if (!addr)
129*4882a593Smuzhiyun return PCIBIOS_DEVICE_NOT_FOUND;
130*4882a593Smuzhiyun /*
131*4882a593Smuzhiyun * Note: the caller has already checked that offset is
132*4882a593Smuzhiyun * suitably aligned and that len is 1, 2 or 4.
133*4882a593Smuzhiyun */
134*4882a593Smuzhiyun switch (len) {
135*4882a593Smuzhiyun case 1:
136*4882a593Smuzhiyun *val = in_8(addr);
137*4882a593Smuzhiyun break;
138*4882a593Smuzhiyun case 2:
139*4882a593Smuzhiyun *val = in_le16(addr);
140*4882a593Smuzhiyun break;
141*4882a593Smuzhiyun default:
142*4882a593Smuzhiyun *val = in_le32(addr);
143*4882a593Smuzhiyun break;
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun return PCIBIOS_SUCCESSFUL;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
u3_agp_write_config(struct pci_bus * bus,unsigned int devfn,int offset,int len,u32 val)148*4882a593Smuzhiyun static int u3_agp_write_config(struct pci_bus *bus, unsigned int devfn,
149*4882a593Smuzhiyun int offset, int len, u32 val)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun struct pci_controller *hose;
152*4882a593Smuzhiyun volatile void __iomem *addr;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun hose = pci_bus_to_host(bus);
155*4882a593Smuzhiyun if (hose == NULL)
156*4882a593Smuzhiyun return PCIBIOS_DEVICE_NOT_FOUND;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun addr = u3_agp_cfg_access(hose, bus->number, devfn, offset);
159*4882a593Smuzhiyun if (!addr)
160*4882a593Smuzhiyun return PCIBIOS_DEVICE_NOT_FOUND;
161*4882a593Smuzhiyun /*
162*4882a593Smuzhiyun * Note: the caller has already checked that offset is
163*4882a593Smuzhiyun * suitably aligned and that len is 1, 2 or 4.
164*4882a593Smuzhiyun */
165*4882a593Smuzhiyun switch (len) {
166*4882a593Smuzhiyun case 1:
167*4882a593Smuzhiyun out_8(addr, val);
168*4882a593Smuzhiyun break;
169*4882a593Smuzhiyun case 2:
170*4882a593Smuzhiyun out_le16(addr, val);
171*4882a593Smuzhiyun break;
172*4882a593Smuzhiyun default:
173*4882a593Smuzhiyun out_le32(addr, val);
174*4882a593Smuzhiyun break;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun return PCIBIOS_SUCCESSFUL;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun static struct pci_ops u3_agp_pci_ops =
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun .read = u3_agp_read_config,
182*4882a593Smuzhiyun .write = u3_agp_write_config,
183*4882a593Smuzhiyun };
184*4882a593Smuzhiyun
u3_ht_cfa0(u8 devfn,u8 off)185*4882a593Smuzhiyun static unsigned long u3_ht_cfa0(u8 devfn, u8 off)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun return (devfn << 8) | off;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
u3_ht_cfa1(u8 bus,u8 devfn,u8 off)190*4882a593Smuzhiyun static unsigned long u3_ht_cfa1(u8 bus, u8 devfn, u8 off)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun return u3_ht_cfa0(devfn, off) + (bus << 16) + 0x01000000UL;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
u3_ht_cfg_access(struct pci_controller * hose,u8 bus,u8 devfn,u8 offset)195*4882a593Smuzhiyun static volatile void __iomem *u3_ht_cfg_access(struct pci_controller* hose,
196*4882a593Smuzhiyun u8 bus, u8 devfn, u8 offset)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun if (bus == hose->first_busno) {
199*4882a593Smuzhiyun if (PCI_SLOT(devfn) == 0)
200*4882a593Smuzhiyun return NULL;
201*4882a593Smuzhiyun return hose->cfg_data + u3_ht_cfa0(devfn, offset);
202*4882a593Smuzhiyun } else
203*4882a593Smuzhiyun return hose->cfg_data + u3_ht_cfa1(bus, devfn, offset);
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
u3_ht_root_read_config(struct pci_controller * hose,u8 offset,int len,u32 * val)206*4882a593Smuzhiyun static int u3_ht_root_read_config(struct pci_controller *hose, u8 offset,
207*4882a593Smuzhiyun int len, u32 *val)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun volatile void __iomem *addr;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun addr = hose->cfg_addr;
212*4882a593Smuzhiyun addr += ((offset & ~3) << 2) + (4 - len - (offset & 3));
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun switch (len) {
215*4882a593Smuzhiyun case 1:
216*4882a593Smuzhiyun *val = in_8(addr);
217*4882a593Smuzhiyun break;
218*4882a593Smuzhiyun case 2:
219*4882a593Smuzhiyun *val = in_be16(addr);
220*4882a593Smuzhiyun break;
221*4882a593Smuzhiyun default:
222*4882a593Smuzhiyun *val = in_be32(addr);
223*4882a593Smuzhiyun break;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun return PCIBIOS_SUCCESSFUL;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
u3_ht_root_write_config(struct pci_controller * hose,u8 offset,int len,u32 val)229*4882a593Smuzhiyun static int u3_ht_root_write_config(struct pci_controller *hose, u8 offset,
230*4882a593Smuzhiyun int len, u32 val)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun volatile void __iomem *addr;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun addr = hose->cfg_addr + ((offset & ~3) << 2) + (4 - len - (offset & 3));
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun if (offset >= PCI_BASE_ADDRESS_0 && offset < PCI_CAPABILITY_LIST)
237*4882a593Smuzhiyun return PCIBIOS_SUCCESSFUL;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun switch (len) {
240*4882a593Smuzhiyun case 1:
241*4882a593Smuzhiyun out_8(addr, val);
242*4882a593Smuzhiyun break;
243*4882a593Smuzhiyun case 2:
244*4882a593Smuzhiyun out_be16(addr, val);
245*4882a593Smuzhiyun break;
246*4882a593Smuzhiyun default:
247*4882a593Smuzhiyun out_be32(addr, val);
248*4882a593Smuzhiyun break;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun return PCIBIOS_SUCCESSFUL;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun
u3_ht_read_config(struct pci_bus * bus,unsigned int devfn,int offset,int len,u32 * val)254*4882a593Smuzhiyun static int u3_ht_read_config(struct pci_bus *bus, unsigned int devfn,
255*4882a593Smuzhiyun int offset, int len, u32 *val)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun struct pci_controller *hose;
258*4882a593Smuzhiyun volatile void __iomem *addr;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun hose = pci_bus_to_host(bus);
261*4882a593Smuzhiyun if (hose == NULL)
262*4882a593Smuzhiyun return PCIBIOS_DEVICE_NOT_FOUND;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun if (bus->number == hose->first_busno && devfn == PCI_DEVFN(0, 0))
265*4882a593Smuzhiyun return u3_ht_root_read_config(hose, offset, len, val);
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun if (offset > 0xff)
268*4882a593Smuzhiyun return PCIBIOS_BAD_REGISTER_NUMBER;
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun addr = u3_ht_cfg_access(hose, bus->number, devfn, offset);
271*4882a593Smuzhiyun if (!addr)
272*4882a593Smuzhiyun return PCIBIOS_DEVICE_NOT_FOUND;
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun /*
275*4882a593Smuzhiyun * Note: the caller has already checked that offset is
276*4882a593Smuzhiyun * suitably aligned and that len is 1, 2 or 4.
277*4882a593Smuzhiyun */
278*4882a593Smuzhiyun switch (len) {
279*4882a593Smuzhiyun case 1:
280*4882a593Smuzhiyun *val = in_8(addr);
281*4882a593Smuzhiyun break;
282*4882a593Smuzhiyun case 2:
283*4882a593Smuzhiyun *val = in_le16(addr);
284*4882a593Smuzhiyun break;
285*4882a593Smuzhiyun default:
286*4882a593Smuzhiyun *val = in_le32(addr);
287*4882a593Smuzhiyun break;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun return PCIBIOS_SUCCESSFUL;
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
u3_ht_write_config(struct pci_bus * bus,unsigned int devfn,int offset,int len,u32 val)292*4882a593Smuzhiyun static int u3_ht_write_config(struct pci_bus *bus, unsigned int devfn,
293*4882a593Smuzhiyun int offset, int len, u32 val)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun struct pci_controller *hose;
296*4882a593Smuzhiyun volatile void __iomem *addr;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun hose = pci_bus_to_host(bus);
299*4882a593Smuzhiyun if (hose == NULL)
300*4882a593Smuzhiyun return PCIBIOS_DEVICE_NOT_FOUND;
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun if (bus->number == hose->first_busno && devfn == PCI_DEVFN(0, 0))
303*4882a593Smuzhiyun return u3_ht_root_write_config(hose, offset, len, val);
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun if (offset > 0xff)
306*4882a593Smuzhiyun return PCIBIOS_BAD_REGISTER_NUMBER;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun addr = u3_ht_cfg_access(hose, bus->number, devfn, offset);
309*4882a593Smuzhiyun if (!addr)
310*4882a593Smuzhiyun return PCIBIOS_DEVICE_NOT_FOUND;
311*4882a593Smuzhiyun /*
312*4882a593Smuzhiyun * Note: the caller has already checked that offset is
313*4882a593Smuzhiyun * suitably aligned and that len is 1, 2 or 4.
314*4882a593Smuzhiyun */
315*4882a593Smuzhiyun switch (len) {
316*4882a593Smuzhiyun case 1:
317*4882a593Smuzhiyun out_8(addr, val);
318*4882a593Smuzhiyun break;
319*4882a593Smuzhiyun case 2:
320*4882a593Smuzhiyun out_le16(addr, val);
321*4882a593Smuzhiyun break;
322*4882a593Smuzhiyun default:
323*4882a593Smuzhiyun out_le32(addr, val);
324*4882a593Smuzhiyun break;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun return PCIBIOS_SUCCESSFUL;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun static struct pci_ops u3_ht_pci_ops =
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun .read = u3_ht_read_config,
332*4882a593Smuzhiyun .write = u3_ht_write_config,
333*4882a593Smuzhiyun };
334*4882a593Smuzhiyun
u4_pcie_cfa0(unsigned int devfn,unsigned int off)335*4882a593Smuzhiyun static unsigned int u4_pcie_cfa0(unsigned int devfn, unsigned int off)
336*4882a593Smuzhiyun {
337*4882a593Smuzhiyun return (1 << PCI_SLOT(devfn)) |
338*4882a593Smuzhiyun (PCI_FUNC(devfn) << 8) |
339*4882a593Smuzhiyun ((off >> 8) << 28) |
340*4882a593Smuzhiyun (off & 0xfcu);
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun
u4_pcie_cfa1(unsigned int bus,unsigned int devfn,unsigned int off)343*4882a593Smuzhiyun static unsigned int u4_pcie_cfa1(unsigned int bus, unsigned int devfn,
344*4882a593Smuzhiyun unsigned int off)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun return (bus << 16) |
347*4882a593Smuzhiyun (devfn << 8) |
348*4882a593Smuzhiyun ((off >> 8) << 28) |
349*4882a593Smuzhiyun (off & 0xfcu) | 1u;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun
u4_pcie_cfg_access(struct pci_controller * hose,u8 bus,u8 dev_fn,int offset)352*4882a593Smuzhiyun static volatile void __iomem *u4_pcie_cfg_access(struct pci_controller* hose,
353*4882a593Smuzhiyun u8 bus, u8 dev_fn, int offset)
354*4882a593Smuzhiyun {
355*4882a593Smuzhiyun unsigned int caddr;
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun if (bus == hose->first_busno)
358*4882a593Smuzhiyun caddr = u4_pcie_cfa0(dev_fn, offset);
359*4882a593Smuzhiyun else
360*4882a593Smuzhiyun caddr = u4_pcie_cfa1(bus, dev_fn, offset);
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun /* Uninorth will return garbage if we don't read back the value ! */
363*4882a593Smuzhiyun do {
364*4882a593Smuzhiyun out_le32(hose->cfg_addr, caddr);
365*4882a593Smuzhiyun } while (in_le32(hose->cfg_addr) != caddr);
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun offset &= 0x03;
368*4882a593Smuzhiyun return hose->cfg_data + offset;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun
u4_pcie_read_config(struct pci_bus * bus,unsigned int devfn,int offset,int len,u32 * val)371*4882a593Smuzhiyun static int u4_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
372*4882a593Smuzhiyun int offset, int len, u32 *val)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun struct pci_controller *hose;
375*4882a593Smuzhiyun volatile void __iomem *addr;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun hose = pci_bus_to_host(bus);
378*4882a593Smuzhiyun if (hose == NULL)
379*4882a593Smuzhiyun return PCIBIOS_DEVICE_NOT_FOUND;
380*4882a593Smuzhiyun if (offset >= 0x1000)
381*4882a593Smuzhiyun return PCIBIOS_BAD_REGISTER_NUMBER;
382*4882a593Smuzhiyun addr = u4_pcie_cfg_access(hose, bus->number, devfn, offset);
383*4882a593Smuzhiyun if (!addr)
384*4882a593Smuzhiyun return PCIBIOS_DEVICE_NOT_FOUND;
385*4882a593Smuzhiyun /*
386*4882a593Smuzhiyun * Note: the caller has already checked that offset is
387*4882a593Smuzhiyun * suitably aligned and that len is 1, 2 or 4.
388*4882a593Smuzhiyun */
389*4882a593Smuzhiyun switch (len) {
390*4882a593Smuzhiyun case 1:
391*4882a593Smuzhiyun *val = in_8(addr);
392*4882a593Smuzhiyun break;
393*4882a593Smuzhiyun case 2:
394*4882a593Smuzhiyun *val = in_le16(addr);
395*4882a593Smuzhiyun break;
396*4882a593Smuzhiyun default:
397*4882a593Smuzhiyun *val = in_le32(addr);
398*4882a593Smuzhiyun break;
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun return PCIBIOS_SUCCESSFUL;
401*4882a593Smuzhiyun }
u4_pcie_write_config(struct pci_bus * bus,unsigned int devfn,int offset,int len,u32 val)402*4882a593Smuzhiyun static int u4_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
403*4882a593Smuzhiyun int offset, int len, u32 val)
404*4882a593Smuzhiyun {
405*4882a593Smuzhiyun struct pci_controller *hose;
406*4882a593Smuzhiyun volatile void __iomem *addr;
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun hose = pci_bus_to_host(bus);
409*4882a593Smuzhiyun if (hose == NULL)
410*4882a593Smuzhiyun return PCIBIOS_DEVICE_NOT_FOUND;
411*4882a593Smuzhiyun if (offset >= 0x1000)
412*4882a593Smuzhiyun return PCIBIOS_BAD_REGISTER_NUMBER;
413*4882a593Smuzhiyun addr = u4_pcie_cfg_access(hose, bus->number, devfn, offset);
414*4882a593Smuzhiyun if (!addr)
415*4882a593Smuzhiyun return PCIBIOS_DEVICE_NOT_FOUND;
416*4882a593Smuzhiyun /*
417*4882a593Smuzhiyun * Note: the caller has already checked that offset is
418*4882a593Smuzhiyun * suitably aligned and that len is 1, 2 or 4.
419*4882a593Smuzhiyun */
420*4882a593Smuzhiyun switch (len) {
421*4882a593Smuzhiyun case 1:
422*4882a593Smuzhiyun out_8(addr, val);
423*4882a593Smuzhiyun break;
424*4882a593Smuzhiyun case 2:
425*4882a593Smuzhiyun out_le16(addr, val);
426*4882a593Smuzhiyun break;
427*4882a593Smuzhiyun default:
428*4882a593Smuzhiyun out_le32(addr, val);
429*4882a593Smuzhiyun break;
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun return PCIBIOS_SUCCESSFUL;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun static struct pci_ops u4_pcie_pci_ops =
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun .read = u4_pcie_read_config,
437*4882a593Smuzhiyun .write = u4_pcie_write_config,
438*4882a593Smuzhiyun };
439*4882a593Smuzhiyun
setup_u3_agp(struct pci_controller * hose)440*4882a593Smuzhiyun static void __init setup_u3_agp(struct pci_controller* hose)
441*4882a593Smuzhiyun {
442*4882a593Smuzhiyun /* On G5, we move AGP up to high bus number so we don't need
443*4882a593Smuzhiyun * to reassign bus numbers for HT. If we ever have P2P bridges
444*4882a593Smuzhiyun * on AGP, we'll have to move pci_assign_all_buses to the
445*4882a593Smuzhiyun * pci_controller structure so we enable it for AGP and not for
446*4882a593Smuzhiyun * HT childs.
447*4882a593Smuzhiyun * We hard code the address because of the different size of
448*4882a593Smuzhiyun * the reg address cell, we shall fix that by killing struct
449*4882a593Smuzhiyun * reg_property and using some accessor functions instead
450*4882a593Smuzhiyun */
451*4882a593Smuzhiyun hose->first_busno = 0xf0;
452*4882a593Smuzhiyun hose->last_busno = 0xff;
453*4882a593Smuzhiyun hose->ops = &u3_agp_pci_ops;
454*4882a593Smuzhiyun hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000);
455*4882a593Smuzhiyun hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000);
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun u3_agp = hose;
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun
setup_u4_pcie(struct pci_controller * hose)460*4882a593Smuzhiyun static void __init setup_u4_pcie(struct pci_controller* hose)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun /* We currently only implement the "non-atomic" config space, to
463*4882a593Smuzhiyun * be optimised later.
464*4882a593Smuzhiyun */
465*4882a593Smuzhiyun hose->ops = &u4_pcie_pci_ops;
466*4882a593Smuzhiyun hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000);
467*4882a593Smuzhiyun hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000);
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun u4_pcie = hose;
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun
setup_u3_ht(struct pci_controller * hose)472*4882a593Smuzhiyun static void __init setup_u3_ht(struct pci_controller* hose)
473*4882a593Smuzhiyun {
474*4882a593Smuzhiyun hose->ops = &u3_ht_pci_ops;
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun /* We hard code the address because of the different size of
477*4882a593Smuzhiyun * the reg address cell, we shall fix that by killing struct
478*4882a593Smuzhiyun * reg_property and using some accessor functions instead
479*4882a593Smuzhiyun */
480*4882a593Smuzhiyun hose->cfg_data = ioremap(0xf2000000, 0x02000000);
481*4882a593Smuzhiyun hose->cfg_addr = ioremap(0xf8070000, 0x1000);
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun hose->first_busno = 0;
484*4882a593Smuzhiyun hose->last_busno = 0xef;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun u3_ht = hose;
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun
maple_add_bridge(struct device_node * dev)489*4882a593Smuzhiyun static int __init maple_add_bridge(struct device_node *dev)
490*4882a593Smuzhiyun {
491*4882a593Smuzhiyun int len;
492*4882a593Smuzhiyun struct pci_controller *hose;
493*4882a593Smuzhiyun char* disp_name;
494*4882a593Smuzhiyun const int *bus_range;
495*4882a593Smuzhiyun int primary = 1;
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun DBG("Adding PCI host bridge %pOF\n", dev);
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun bus_range = of_get_property(dev, "bus-range", &len);
500*4882a593Smuzhiyun if (bus_range == NULL || len < 2 * sizeof(int)) {
501*4882a593Smuzhiyun printk(KERN_WARNING "Can't get bus-range for %pOF, assume bus 0\n",
502*4882a593Smuzhiyun dev);
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun hose = pcibios_alloc_controller(dev);
506*4882a593Smuzhiyun if (hose == NULL)
507*4882a593Smuzhiyun return -ENOMEM;
508*4882a593Smuzhiyun hose->first_busno = bus_range ? bus_range[0] : 0;
509*4882a593Smuzhiyun hose->last_busno = bus_range ? bus_range[1] : 0xff;
510*4882a593Smuzhiyun hose->controller_ops = maple_pci_controller_ops;
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun disp_name = NULL;
513*4882a593Smuzhiyun if (of_device_is_compatible(dev, "u3-agp")) {
514*4882a593Smuzhiyun setup_u3_agp(hose);
515*4882a593Smuzhiyun disp_name = "U3-AGP";
516*4882a593Smuzhiyun primary = 0;
517*4882a593Smuzhiyun } else if (of_device_is_compatible(dev, "u3-ht")) {
518*4882a593Smuzhiyun setup_u3_ht(hose);
519*4882a593Smuzhiyun disp_name = "U3-HT";
520*4882a593Smuzhiyun primary = 1;
521*4882a593Smuzhiyun } else if (of_device_is_compatible(dev, "u4-pcie")) {
522*4882a593Smuzhiyun setup_u4_pcie(hose);
523*4882a593Smuzhiyun disp_name = "U4-PCIE";
524*4882a593Smuzhiyun primary = 0;
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun printk(KERN_INFO "Found %s PCI host bridge. Firmware bus number: %d->%d\n",
527*4882a593Smuzhiyun disp_name, hose->first_busno, hose->last_busno);
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun /* Interpret the "ranges" property */
530*4882a593Smuzhiyun /* This also maps the I/O region and sets isa_io/mem_base */
531*4882a593Smuzhiyun pci_process_bridge_OF_ranges(hose, dev, primary);
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun /* Fixup "bus-range" OF property */
534*4882a593Smuzhiyun fixup_bus_range(dev);
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun /* Check for legacy IOs */
537*4882a593Smuzhiyun isa_bridge_find_early(hose);
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun return 0;
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun
maple_pci_irq_fixup(struct pci_dev * dev)543*4882a593Smuzhiyun void maple_pci_irq_fixup(struct pci_dev *dev)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun DBG(" -> maple_pci_irq_fixup\n");
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun /* Fixup IRQ for PCIe host */
548*4882a593Smuzhiyun if (u4_pcie != NULL && dev->bus->number == 0 &&
549*4882a593Smuzhiyun pci_bus_to_host(dev->bus) == u4_pcie) {
550*4882a593Smuzhiyun printk(KERN_DEBUG "Fixup U4 PCIe IRQ\n");
551*4882a593Smuzhiyun dev->irq = irq_create_mapping(NULL, 1);
552*4882a593Smuzhiyun if (dev->irq)
553*4882a593Smuzhiyun irq_set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW);
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun /* Hide AMD8111 IDE interrupt when in legacy mode so
557*4882a593Smuzhiyun * the driver calls pci_get_legacy_ide_irq()
558*4882a593Smuzhiyun */
559*4882a593Smuzhiyun if (dev->vendor == PCI_VENDOR_ID_AMD &&
560*4882a593Smuzhiyun dev->device == PCI_DEVICE_ID_AMD_8111_IDE &&
561*4882a593Smuzhiyun (dev->class & 5) != 5) {
562*4882a593Smuzhiyun dev->irq = 0;
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun DBG(" <- maple_pci_irq_fixup\n");
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun
maple_pci_root_bridge_prepare(struct pci_host_bridge * bridge)568*4882a593Smuzhiyun static int maple_pci_root_bridge_prepare(struct pci_host_bridge *bridge)
569*4882a593Smuzhiyun {
570*4882a593Smuzhiyun struct pci_controller *hose = pci_bus_to_host(bridge->bus);
571*4882a593Smuzhiyun struct device_node *np, *child;
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun if (hose != u3_agp)
574*4882a593Smuzhiyun return 0;
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun /* Fixup the PCI<->OF mapping for U3 AGP due to bus renumbering. We
577*4882a593Smuzhiyun * assume there is no P2P bridge on the AGP bus, which should be a
578*4882a593Smuzhiyun * safe assumptions hopefully.
579*4882a593Smuzhiyun */
580*4882a593Smuzhiyun np = hose->dn;
581*4882a593Smuzhiyun PCI_DN(np)->busno = 0xf0;
582*4882a593Smuzhiyun for_each_child_of_node(np, child)
583*4882a593Smuzhiyun PCI_DN(child)->busno = 0xf0;
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun return 0;
586*4882a593Smuzhiyun }
587*4882a593Smuzhiyun
maple_pci_init(void)588*4882a593Smuzhiyun void __init maple_pci_init(void)
589*4882a593Smuzhiyun {
590*4882a593Smuzhiyun struct device_node *np, *root;
591*4882a593Smuzhiyun struct device_node *ht = NULL;
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun /* Probe root PCI hosts, that is on U3 the AGP host and the
594*4882a593Smuzhiyun * HyperTransport host. That one is actually "kept" around
595*4882a593Smuzhiyun * and actually added last as it's resource management relies
596*4882a593Smuzhiyun * on the AGP resources to have been setup first
597*4882a593Smuzhiyun */
598*4882a593Smuzhiyun root = of_find_node_by_path("/");
599*4882a593Smuzhiyun if (root == NULL) {
600*4882a593Smuzhiyun printk(KERN_CRIT "maple_find_bridges: can't find root of device tree\n");
601*4882a593Smuzhiyun return;
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun for_each_child_of_node(root, np) {
604*4882a593Smuzhiyun if (!of_node_is_type(np, "pci") && !of_node_is_type(np, "ht"))
605*4882a593Smuzhiyun continue;
606*4882a593Smuzhiyun if ((of_device_is_compatible(np, "u4-pcie") ||
607*4882a593Smuzhiyun of_device_is_compatible(np, "u3-agp")) &&
608*4882a593Smuzhiyun maple_add_bridge(np) == 0)
609*4882a593Smuzhiyun of_node_get(np);
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun if (of_device_is_compatible(np, "u3-ht")) {
612*4882a593Smuzhiyun of_node_get(np);
613*4882a593Smuzhiyun ht = np;
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun of_node_put(root);
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun /* Now setup the HyperTransport host if we found any
619*4882a593Smuzhiyun */
620*4882a593Smuzhiyun if (ht && maple_add_bridge(ht) != 0)
621*4882a593Smuzhiyun of_node_put(ht);
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun ppc_md.pcibios_root_bridge_prepare = maple_pci_root_bridge_prepare;
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun /* Tell pci.c to not change any resource allocations. */
626*4882a593Smuzhiyun pci_add_flags(PCI_PROBE_ONLY);
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun
maple_pci_get_legacy_ide_irq(struct pci_dev * pdev,int channel)629*4882a593Smuzhiyun int maple_pci_get_legacy_ide_irq(struct pci_dev *pdev, int channel)
630*4882a593Smuzhiyun {
631*4882a593Smuzhiyun struct device_node *np;
632*4882a593Smuzhiyun unsigned int defirq = channel ? 15 : 14;
633*4882a593Smuzhiyun unsigned int irq;
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun if (pdev->vendor != PCI_VENDOR_ID_AMD ||
636*4882a593Smuzhiyun pdev->device != PCI_DEVICE_ID_AMD_8111_IDE)
637*4882a593Smuzhiyun return defirq;
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun np = pci_device_to_OF_node(pdev);
640*4882a593Smuzhiyun if (np == NULL) {
641*4882a593Smuzhiyun printk("Failed to locate OF node for IDE %s\n",
642*4882a593Smuzhiyun pci_name(pdev));
643*4882a593Smuzhiyun return defirq;
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun irq = irq_of_parse_and_map(np, channel & 0x1);
646*4882a593Smuzhiyun if (!irq) {
647*4882a593Smuzhiyun printk("Failed to map onboard IDE interrupt for channel %d\n",
648*4882a593Smuzhiyun channel);
649*4882a593Smuzhiyun return defirq;
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun return irq;
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun
quirk_ipr_msi(struct pci_dev * dev)654*4882a593Smuzhiyun static void quirk_ipr_msi(struct pci_dev *dev)
655*4882a593Smuzhiyun {
656*4882a593Smuzhiyun /* Something prevents MSIs from the IPR from working on Bimini,
657*4882a593Smuzhiyun * and the driver has no smarts to recover. So disable MSI
658*4882a593Smuzhiyun * on it for now. */
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun if (machine_is(maple)) {
661*4882a593Smuzhiyun dev->no_msi = 1;
662*4882a593Smuzhiyun dev_info(&dev->dev, "Quirk disabled MSI\n");
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun }
665*4882a593Smuzhiyun DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
666*4882a593Smuzhiyun quirk_ipr_msi);
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun struct pci_controller_ops maple_pci_controller_ops = {
669*4882a593Smuzhiyun };
670