xref: /OK3568_Linux_fs/kernel/arch/powerpc/kernel/pci-common.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Contains common pci routines for ALL ppc platform
4*4882a593Smuzhiyun  * (based on pci_32.c and pci_64.c)
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Port for PPC64 David Engebretsen, IBM Corp.
7*4882a593Smuzhiyun  * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
10*4882a593Smuzhiyun  *   Rework, based on alpha PCI code.
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  * Common pmac/prep/chrp pci routines. -- Cort
13*4882a593Smuzhiyun  */
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <linux/kernel.h>
16*4882a593Smuzhiyun #include <linux/pci.h>
17*4882a593Smuzhiyun #include <linux/string.h>
18*4882a593Smuzhiyun #include <linux/init.h>
19*4882a593Smuzhiyun #include <linux/delay.h>
20*4882a593Smuzhiyun #include <linux/export.h>
21*4882a593Smuzhiyun #include <linux/of_address.h>
22*4882a593Smuzhiyun #include <linux/of_pci.h>
23*4882a593Smuzhiyun #include <linux/mm.h>
24*4882a593Smuzhiyun #include <linux/shmem_fs.h>
25*4882a593Smuzhiyun #include <linux/list.h>
26*4882a593Smuzhiyun #include <linux/syscalls.h>
27*4882a593Smuzhiyun #include <linux/irq.h>
28*4882a593Smuzhiyun #include <linux/vmalloc.h>
29*4882a593Smuzhiyun #include <linux/slab.h>
30*4882a593Smuzhiyun #include <linux/vgaarb.h>
31*4882a593Smuzhiyun #include <linux/numa.h>
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #include <asm/processor.h>
34*4882a593Smuzhiyun #include <asm/io.h>
35*4882a593Smuzhiyun #include <asm/prom.h>
36*4882a593Smuzhiyun #include <asm/pci-bridge.h>
37*4882a593Smuzhiyun #include <asm/byteorder.h>
38*4882a593Smuzhiyun #include <asm/machdep.h>
39*4882a593Smuzhiyun #include <asm/ppc-pci.h>
40*4882a593Smuzhiyun #include <asm/eeh.h>
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun #include "../../../drivers/pci/pci.h"
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun /* hose_spinlock protects accesses to the the phb_bitmap. */
45*4882a593Smuzhiyun static DEFINE_SPINLOCK(hose_spinlock);
46*4882a593Smuzhiyun LIST_HEAD(hose_list);
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun /* For dynamic PHB numbering on get_phb_number(): max number of PHBs. */
49*4882a593Smuzhiyun #define MAX_PHBS 0x10000
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun /*
52*4882a593Smuzhiyun  * For dynamic PHB numbering: used/free PHBs tracking bitmap.
53*4882a593Smuzhiyun  * Accesses to this bitmap should be protected by hose_spinlock.
54*4882a593Smuzhiyun  */
55*4882a593Smuzhiyun static DECLARE_BITMAP(phb_bitmap, MAX_PHBS);
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun /* ISA Memory physical address */
58*4882a593Smuzhiyun resource_size_t isa_mem_base;
59*4882a593Smuzhiyun EXPORT_SYMBOL(isa_mem_base);
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun static const struct dma_map_ops *pci_dma_ops;
63*4882a593Smuzhiyun 
set_pci_dma_ops(const struct dma_map_ops * dma_ops)64*4882a593Smuzhiyun void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	pci_dma_ops = dma_ops;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun 
get_phb_number(struct device_node * dn)69*4882a593Smuzhiyun static int get_phb_number(struct device_node *dn)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	int ret, phb_id = -1;
72*4882a593Smuzhiyun 	u64 prop;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	/*
75*4882a593Smuzhiyun 	 * Try fixed PHB numbering first, by checking archs and reading
76*4882a593Smuzhiyun 	 * the respective device-tree properties. Firstly, try reading
77*4882a593Smuzhiyun 	 * standard "linux,pci-domain", then try reading "ibm,opal-phbid"
78*4882a593Smuzhiyun 	 * (only present in powernv OPAL environment), then try device-tree
79*4882a593Smuzhiyun 	 * alias and as the last try to use lower bits of "reg" property.
80*4882a593Smuzhiyun 	 */
81*4882a593Smuzhiyun 	ret = of_get_pci_domain_nr(dn);
82*4882a593Smuzhiyun 	if (ret >= 0) {
83*4882a593Smuzhiyun 		prop = ret;
84*4882a593Smuzhiyun 		ret = 0;
85*4882a593Smuzhiyun 	}
86*4882a593Smuzhiyun 	if (ret)
87*4882a593Smuzhiyun 		ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop);
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	if (ret) {
90*4882a593Smuzhiyun 		ret = of_alias_get_id(dn, "pci");
91*4882a593Smuzhiyun 		if (ret >= 0) {
92*4882a593Smuzhiyun 			prop = ret;
93*4882a593Smuzhiyun 			ret = 0;
94*4882a593Smuzhiyun 		}
95*4882a593Smuzhiyun 	}
96*4882a593Smuzhiyun 	if (ret) {
97*4882a593Smuzhiyun 		u32 prop_32;
98*4882a593Smuzhiyun 		ret = of_property_read_u32_index(dn, "reg", 1, &prop_32);
99*4882a593Smuzhiyun 		prop = prop_32;
100*4882a593Smuzhiyun 	}
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	if (!ret)
103*4882a593Smuzhiyun 		phb_id = (int)(prop & (MAX_PHBS - 1));
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	spin_lock(&hose_spinlock);
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	/* We need to be sure to not use the same PHB number twice. */
108*4882a593Smuzhiyun 	if ((phb_id >= 0) && !test_and_set_bit(phb_id, phb_bitmap))
109*4882a593Smuzhiyun 		goto out_unlock;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	/* If everything fails then fallback to dynamic PHB numbering. */
112*4882a593Smuzhiyun 	phb_id = find_first_zero_bit(phb_bitmap, MAX_PHBS);
113*4882a593Smuzhiyun 	BUG_ON(phb_id >= MAX_PHBS);
114*4882a593Smuzhiyun 	set_bit(phb_id, phb_bitmap);
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun out_unlock:
117*4882a593Smuzhiyun 	spin_unlock(&hose_spinlock);
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	return phb_id;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun 
pcibios_alloc_controller(struct device_node * dev)122*4882a593Smuzhiyun struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun 	struct pci_controller *phb;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
127*4882a593Smuzhiyun 	if (phb == NULL)
128*4882a593Smuzhiyun 		return NULL;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	phb->global_number = get_phb_number(dev);
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	spin_lock(&hose_spinlock);
133*4882a593Smuzhiyun 	list_add_tail(&phb->list_node, &hose_list);
134*4882a593Smuzhiyun 	spin_unlock(&hose_spinlock);
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	phb->dn = dev;
137*4882a593Smuzhiyun 	phb->is_dynamic = slab_is_available();
138*4882a593Smuzhiyun #ifdef CONFIG_PPC64
139*4882a593Smuzhiyun 	if (dev) {
140*4882a593Smuzhiyun 		int nid = of_node_to_nid(dev);
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 		if (nid < 0 || !node_online(nid))
143*4882a593Smuzhiyun 			nid = NUMA_NO_NODE;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 		PHB_SET_NODE(phb, nid);
146*4882a593Smuzhiyun 	}
147*4882a593Smuzhiyun #endif
148*4882a593Smuzhiyun 	return phb;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pcibios_alloc_controller);
151*4882a593Smuzhiyun 
pcibios_free_controller(struct pci_controller * phb)152*4882a593Smuzhiyun void pcibios_free_controller(struct pci_controller *phb)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	spin_lock(&hose_spinlock);
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	/* Clear bit of phb_bitmap to allow reuse of this PHB number. */
157*4882a593Smuzhiyun 	if (phb->global_number < MAX_PHBS)
158*4882a593Smuzhiyun 		clear_bit(phb->global_number, phb_bitmap);
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	list_del(&phb->list_node);
161*4882a593Smuzhiyun 	spin_unlock(&hose_spinlock);
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	if (phb->is_dynamic)
164*4882a593Smuzhiyun 		kfree(phb);
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pcibios_free_controller);
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun /*
169*4882a593Smuzhiyun  * This function is used to call pcibios_free_controller()
170*4882a593Smuzhiyun  * in a deferred manner: a callback from the PCI subsystem.
171*4882a593Smuzhiyun  *
172*4882a593Smuzhiyun  * _*DO NOT*_ call pcibios_free_controller() explicitly if
173*4882a593Smuzhiyun  * this is used (or it may access an invalid *phb pointer).
174*4882a593Smuzhiyun  *
175*4882a593Smuzhiyun  * The callback occurs when all references to the root bus
176*4882a593Smuzhiyun  * are dropped (e.g., child buses/devices and their users).
177*4882a593Smuzhiyun  *
178*4882a593Smuzhiyun  * It's called as .release_fn() of 'struct pci_host_bridge'
179*4882a593Smuzhiyun  * which is associated with the 'struct pci_controller.bus'
180*4882a593Smuzhiyun  * (root bus) - it expects .release_data to hold a pointer
181*4882a593Smuzhiyun  * to 'struct pci_controller'.
182*4882a593Smuzhiyun  *
183*4882a593Smuzhiyun  * In order to use it, register .release_fn()/release_data
184*4882a593Smuzhiyun  * like this:
185*4882a593Smuzhiyun  *
186*4882a593Smuzhiyun  * pci_set_host_bridge_release(bridge,
187*4882a593Smuzhiyun  *                             pcibios_free_controller_deferred
188*4882a593Smuzhiyun  *                             (void *) phb);
189*4882a593Smuzhiyun  *
190*4882a593Smuzhiyun  * e.g. in the pcibios_root_bridge_prepare() callback from
191*4882a593Smuzhiyun  * pci_create_root_bus().
192*4882a593Smuzhiyun  */
pcibios_free_controller_deferred(struct pci_host_bridge * bridge)193*4882a593Smuzhiyun void pcibios_free_controller_deferred(struct pci_host_bridge *bridge)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun 	struct pci_controller *phb = (struct pci_controller *)
196*4882a593Smuzhiyun 					 bridge->release_data;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	pr_debug("domain %d, dynamic %d\n", phb->global_number, phb->is_dynamic);
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	pcibios_free_controller(phb);
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pcibios_free_controller_deferred);
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun /*
205*4882a593Smuzhiyun  * The function is used to return the minimal alignment
206*4882a593Smuzhiyun  * for memory or I/O windows of the associated P2P bridge.
207*4882a593Smuzhiyun  * By default, 4KiB alignment for I/O windows and 1MiB for
208*4882a593Smuzhiyun  * memory windows.
209*4882a593Smuzhiyun  */
pcibios_window_alignment(struct pci_bus * bus,unsigned long type)210*4882a593Smuzhiyun resource_size_t pcibios_window_alignment(struct pci_bus *bus,
211*4882a593Smuzhiyun 					 unsigned long type)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	struct pci_controller *phb = pci_bus_to_host(bus);
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	if (phb->controller_ops.window_alignment)
216*4882a593Smuzhiyun 		return phb->controller_ops.window_alignment(bus, type);
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	/*
219*4882a593Smuzhiyun 	 * PCI core will figure out the default
220*4882a593Smuzhiyun 	 * alignment: 4KiB for I/O and 1MiB for
221*4882a593Smuzhiyun 	 * memory window.
222*4882a593Smuzhiyun 	 */
223*4882a593Smuzhiyun 	return 1;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun 
pcibios_setup_bridge(struct pci_bus * bus,unsigned long type)226*4882a593Smuzhiyun void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun 	struct pci_controller *hose = pci_bus_to_host(bus);
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	if (hose->controller_ops.setup_bridge)
231*4882a593Smuzhiyun 		hose->controller_ops.setup_bridge(bus, type);
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun 
pcibios_reset_secondary_bus(struct pci_dev * dev)234*4882a593Smuzhiyun void pcibios_reset_secondary_bus(struct pci_dev *dev)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun 	struct pci_controller *phb = pci_bus_to_host(dev->bus);
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	if (phb->controller_ops.reset_secondary_bus) {
239*4882a593Smuzhiyun 		phb->controller_ops.reset_secondary_bus(dev);
240*4882a593Smuzhiyun 		return;
241*4882a593Smuzhiyun 	}
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	pci_reset_secondary_bus(dev);
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun 
pcibios_default_alignment(void)246*4882a593Smuzhiyun resource_size_t pcibios_default_alignment(void)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun 	if (ppc_md.pcibios_default_alignment)
249*4882a593Smuzhiyun 		return ppc_md.pcibios_default_alignment();
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	return 0;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun #ifdef CONFIG_PCI_IOV
pcibios_iov_resource_alignment(struct pci_dev * pdev,int resno)255*4882a593Smuzhiyun resource_size_t pcibios_iov_resource_alignment(struct pci_dev *pdev, int resno)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun 	if (ppc_md.pcibios_iov_resource_alignment)
258*4882a593Smuzhiyun 		return ppc_md.pcibios_iov_resource_alignment(pdev, resno);
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	return pci_iov_resource_size(pdev, resno);
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun 
pcibios_sriov_enable(struct pci_dev * pdev,u16 num_vfs)263*4882a593Smuzhiyun int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun 	if (ppc_md.pcibios_sriov_enable)
266*4882a593Smuzhiyun 		return ppc_md.pcibios_sriov_enable(pdev, num_vfs);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	return 0;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun 
pcibios_sriov_disable(struct pci_dev * pdev)271*4882a593Smuzhiyun int pcibios_sriov_disable(struct pci_dev *pdev)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	if (ppc_md.pcibios_sriov_disable)
274*4882a593Smuzhiyun 		return ppc_md.pcibios_sriov_disable(pdev);
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	return 0;
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun #endif /* CONFIG_PCI_IOV */
280*4882a593Smuzhiyun 
pcibios_io_size(const struct pci_controller * hose)281*4882a593Smuzhiyun static resource_size_t pcibios_io_size(const struct pci_controller *hose)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun #ifdef CONFIG_PPC64
284*4882a593Smuzhiyun 	return hose->pci_io_size;
285*4882a593Smuzhiyun #else
286*4882a593Smuzhiyun 	return resource_size(&hose->io_resource);
287*4882a593Smuzhiyun #endif
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun 
pcibios_vaddr_is_ioport(void __iomem * address)290*4882a593Smuzhiyun int pcibios_vaddr_is_ioport(void __iomem *address)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun 	int ret = 0;
293*4882a593Smuzhiyun 	struct pci_controller *hose;
294*4882a593Smuzhiyun 	resource_size_t size;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	spin_lock(&hose_spinlock);
297*4882a593Smuzhiyun 	list_for_each_entry(hose, &hose_list, list_node) {
298*4882a593Smuzhiyun 		size = pcibios_io_size(hose);
299*4882a593Smuzhiyun 		if (address >= hose->io_base_virt &&
300*4882a593Smuzhiyun 		    address < (hose->io_base_virt + size)) {
301*4882a593Smuzhiyun 			ret = 1;
302*4882a593Smuzhiyun 			break;
303*4882a593Smuzhiyun 		}
304*4882a593Smuzhiyun 	}
305*4882a593Smuzhiyun 	spin_unlock(&hose_spinlock);
306*4882a593Smuzhiyun 	return ret;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun 
pci_address_to_pio(phys_addr_t address)309*4882a593Smuzhiyun unsigned long pci_address_to_pio(phys_addr_t address)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun 	struct pci_controller *hose;
312*4882a593Smuzhiyun 	resource_size_t size;
313*4882a593Smuzhiyun 	unsigned long ret = ~0;
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	spin_lock(&hose_spinlock);
316*4882a593Smuzhiyun 	list_for_each_entry(hose, &hose_list, list_node) {
317*4882a593Smuzhiyun 		size = pcibios_io_size(hose);
318*4882a593Smuzhiyun 		if (address >= hose->io_base_phys &&
319*4882a593Smuzhiyun 		    address < (hose->io_base_phys + size)) {
320*4882a593Smuzhiyun 			unsigned long base =
321*4882a593Smuzhiyun 				(unsigned long)hose->io_base_virt - _IO_BASE;
322*4882a593Smuzhiyun 			ret = base + (address - hose->io_base_phys);
323*4882a593Smuzhiyun 			break;
324*4882a593Smuzhiyun 		}
325*4882a593Smuzhiyun 	}
326*4882a593Smuzhiyun 	spin_unlock(&hose_spinlock);
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	return ret;
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pci_address_to_pio);
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun /*
333*4882a593Smuzhiyun  * Return the domain number for this bus.
334*4882a593Smuzhiyun  */
pci_domain_nr(struct pci_bus * bus)335*4882a593Smuzhiyun int pci_domain_nr(struct pci_bus *bus)
336*4882a593Smuzhiyun {
337*4882a593Smuzhiyun 	struct pci_controller *hose = pci_bus_to_host(bus);
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	return hose->global_number;
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun EXPORT_SYMBOL(pci_domain_nr);
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun /* This routine is meant to be used early during boot, when the
344*4882a593Smuzhiyun  * PCI bus numbers have not yet been assigned, and you need to
345*4882a593Smuzhiyun  * issue PCI config cycles to an OF device.
346*4882a593Smuzhiyun  * It could also be used to "fix" RTAS config cycles if you want
347*4882a593Smuzhiyun  * to set pci_assign_all_buses to 1 and still use RTAS for PCI
348*4882a593Smuzhiyun  * config cycles.
349*4882a593Smuzhiyun  */
pci_find_hose_for_OF_device(struct device_node * node)350*4882a593Smuzhiyun struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun 	while(node) {
353*4882a593Smuzhiyun 		struct pci_controller *hose, *tmp;
354*4882a593Smuzhiyun 		list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
355*4882a593Smuzhiyun 			if (hose->dn == node)
356*4882a593Smuzhiyun 				return hose;
357*4882a593Smuzhiyun 		node = node->parent;
358*4882a593Smuzhiyun 	}
359*4882a593Smuzhiyun 	return NULL;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun 
pci_find_controller_for_domain(int domain_nr)362*4882a593Smuzhiyun struct pci_controller *pci_find_controller_for_domain(int domain_nr)
363*4882a593Smuzhiyun {
364*4882a593Smuzhiyun 	struct pci_controller *hose;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	list_for_each_entry(hose, &hose_list, list_node)
367*4882a593Smuzhiyun 		if (hose->global_number == domain_nr)
368*4882a593Smuzhiyun 			return hose;
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	return NULL;
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun /*
374*4882a593Smuzhiyun  * Reads the interrupt pin to determine if interrupt is use by card.
375*4882a593Smuzhiyun  * If the interrupt is used, then gets the interrupt line from the
376*4882a593Smuzhiyun  * openfirmware and sets it in the pci_dev and pci_config line.
377*4882a593Smuzhiyun  */
pci_read_irq_line(struct pci_dev * pci_dev)378*4882a593Smuzhiyun static int pci_read_irq_line(struct pci_dev *pci_dev)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun 	int virq;
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	/* Try to get a mapping from the device-tree */
385*4882a593Smuzhiyun 	virq = of_irq_parse_and_map_pci(pci_dev, 0, 0);
386*4882a593Smuzhiyun 	if (virq <= 0) {
387*4882a593Smuzhiyun 		u8 line, pin;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 		/* If that fails, lets fallback to what is in the config
390*4882a593Smuzhiyun 		 * space and map that through the default controller. We
391*4882a593Smuzhiyun 		 * also set the type to level low since that's what PCI
392*4882a593Smuzhiyun 		 * interrupts are. If your platform does differently, then
393*4882a593Smuzhiyun 		 * either provide a proper interrupt tree or don't use this
394*4882a593Smuzhiyun 		 * function.
395*4882a593Smuzhiyun 		 */
396*4882a593Smuzhiyun 		if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin))
397*4882a593Smuzhiyun 			return -1;
398*4882a593Smuzhiyun 		if (pin == 0)
399*4882a593Smuzhiyun 			return -1;
400*4882a593Smuzhiyun 		if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
401*4882a593Smuzhiyun 		    line == 0xff || line == 0) {
402*4882a593Smuzhiyun 			return -1;
403*4882a593Smuzhiyun 		}
404*4882a593Smuzhiyun 		pr_debug(" No map ! Using line %d (pin %d) from PCI config\n",
405*4882a593Smuzhiyun 			 line, pin);
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 		virq = irq_create_mapping(NULL, line);
408*4882a593Smuzhiyun 		if (virq)
409*4882a593Smuzhiyun 			irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
410*4882a593Smuzhiyun 	}
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	if (!virq) {
413*4882a593Smuzhiyun 		pr_debug(" Failed to map !\n");
414*4882a593Smuzhiyun 		return -1;
415*4882a593Smuzhiyun 	}
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	pr_debug(" Mapped to linux irq %d\n", virq);
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	pci_dev->irq = virq;
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	return 0;
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun /*
425*4882a593Smuzhiyun  * Platform support for /proc/bus/pci/X/Y mmap()s.
426*4882a593Smuzhiyun  *  -- paulus.
427*4882a593Smuzhiyun  */
pci_iobar_pfn(struct pci_dev * pdev,int bar,struct vm_area_struct * vma)428*4882a593Smuzhiyun int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma)
429*4882a593Smuzhiyun {
430*4882a593Smuzhiyun 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
431*4882a593Smuzhiyun 	resource_size_t ioaddr = pci_resource_start(pdev, bar);
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	if (!hose)
434*4882a593Smuzhiyun 		return -EINVAL;
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	/* Convert to an offset within this PCI controller */
437*4882a593Smuzhiyun 	ioaddr -= (unsigned long)hose->io_base_virt - _IO_BASE;
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	vma->vm_pgoff += (ioaddr + hose->io_base_phys) >> PAGE_SHIFT;
440*4882a593Smuzhiyun 	return 0;
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun /*
444*4882a593Smuzhiyun  * This one is used by /dev/mem and fbdev who have no clue about the
445*4882a593Smuzhiyun  * PCI device, it tries to find the PCI device first and calls the
446*4882a593Smuzhiyun  * above routine
447*4882a593Smuzhiyun  */
pci_phys_mem_access_prot(struct file * file,unsigned long pfn,unsigned long size,pgprot_t prot)448*4882a593Smuzhiyun pgprot_t pci_phys_mem_access_prot(struct file *file,
449*4882a593Smuzhiyun 				  unsigned long pfn,
450*4882a593Smuzhiyun 				  unsigned long size,
451*4882a593Smuzhiyun 				  pgprot_t prot)
452*4882a593Smuzhiyun {
453*4882a593Smuzhiyun 	struct pci_dev *pdev = NULL;
454*4882a593Smuzhiyun 	struct resource *found = NULL;
455*4882a593Smuzhiyun 	resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
456*4882a593Smuzhiyun 	int i;
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	if (page_is_ram(pfn))
459*4882a593Smuzhiyun 		return prot;
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 	prot = pgprot_noncached(prot);
462*4882a593Smuzhiyun 	for_each_pci_dev(pdev) {
463*4882a593Smuzhiyun 		for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
464*4882a593Smuzhiyun 			struct resource *rp = &pdev->resource[i];
465*4882a593Smuzhiyun 			int flags = rp->flags;
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 			/* Active and same type? */
468*4882a593Smuzhiyun 			if ((flags & IORESOURCE_MEM) == 0)
469*4882a593Smuzhiyun 				continue;
470*4882a593Smuzhiyun 			/* In the range of this resource? */
471*4882a593Smuzhiyun 			if (offset < (rp->start & PAGE_MASK) ||
472*4882a593Smuzhiyun 			    offset > rp->end)
473*4882a593Smuzhiyun 				continue;
474*4882a593Smuzhiyun 			found = rp;
475*4882a593Smuzhiyun 			break;
476*4882a593Smuzhiyun 		}
477*4882a593Smuzhiyun 		if (found)
478*4882a593Smuzhiyun 			break;
479*4882a593Smuzhiyun 	}
480*4882a593Smuzhiyun 	if (found) {
481*4882a593Smuzhiyun 		if (found->flags & IORESOURCE_PREFETCH)
482*4882a593Smuzhiyun 			prot = pgprot_noncached_wc(prot);
483*4882a593Smuzhiyun 		pci_dev_put(pdev);
484*4882a593Smuzhiyun 	}
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n",
487*4882a593Smuzhiyun 		 (unsigned long long)offset, pgprot_val(prot));
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	return prot;
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun /* This provides legacy IO read access on a bus */
pci_legacy_read(struct pci_bus * bus,loff_t port,u32 * val,size_t size)493*4882a593Smuzhiyun int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
494*4882a593Smuzhiyun {
495*4882a593Smuzhiyun 	unsigned long offset;
496*4882a593Smuzhiyun 	struct pci_controller *hose = pci_bus_to_host(bus);
497*4882a593Smuzhiyun 	struct resource *rp = &hose->io_resource;
498*4882a593Smuzhiyun 	void __iomem *addr;
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	/* Check if port can be supported by that bus. We only check
501*4882a593Smuzhiyun 	 * the ranges of the PHB though, not the bus itself as the rules
502*4882a593Smuzhiyun 	 * for forwarding legacy cycles down bridges are not our problem
503*4882a593Smuzhiyun 	 * here. So if the host bridge supports it, we do it.
504*4882a593Smuzhiyun 	 */
505*4882a593Smuzhiyun 	offset = (unsigned long)hose->io_base_virt - _IO_BASE;
506*4882a593Smuzhiyun 	offset += port;
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 	if (!(rp->flags & IORESOURCE_IO))
509*4882a593Smuzhiyun 		return -ENXIO;
510*4882a593Smuzhiyun 	if (offset < rp->start || (offset + size) > rp->end)
511*4882a593Smuzhiyun 		return -ENXIO;
512*4882a593Smuzhiyun 	addr = hose->io_base_virt + port;
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	switch(size) {
515*4882a593Smuzhiyun 	case 1:
516*4882a593Smuzhiyun 		*((u8 *)val) = in_8(addr);
517*4882a593Smuzhiyun 		return 1;
518*4882a593Smuzhiyun 	case 2:
519*4882a593Smuzhiyun 		if (port & 1)
520*4882a593Smuzhiyun 			return -EINVAL;
521*4882a593Smuzhiyun 		*((u16 *)val) = in_le16(addr);
522*4882a593Smuzhiyun 		return 2;
523*4882a593Smuzhiyun 	case 4:
524*4882a593Smuzhiyun 		if (port & 3)
525*4882a593Smuzhiyun 			return -EINVAL;
526*4882a593Smuzhiyun 		*((u32 *)val) = in_le32(addr);
527*4882a593Smuzhiyun 		return 4;
528*4882a593Smuzhiyun 	}
529*4882a593Smuzhiyun 	return -EINVAL;
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun /* This provides legacy IO write access on a bus */
pci_legacy_write(struct pci_bus * bus,loff_t port,u32 val,size_t size)533*4882a593Smuzhiyun int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
534*4882a593Smuzhiyun {
535*4882a593Smuzhiyun 	unsigned long offset;
536*4882a593Smuzhiyun 	struct pci_controller *hose = pci_bus_to_host(bus);
537*4882a593Smuzhiyun 	struct resource *rp = &hose->io_resource;
538*4882a593Smuzhiyun 	void __iomem *addr;
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 	/* Check if port can be supported by that bus. We only check
541*4882a593Smuzhiyun 	 * the ranges of the PHB though, not the bus itself as the rules
542*4882a593Smuzhiyun 	 * for forwarding legacy cycles down bridges are not our problem
543*4882a593Smuzhiyun 	 * here. So if the host bridge supports it, we do it.
544*4882a593Smuzhiyun 	 */
545*4882a593Smuzhiyun 	offset = (unsigned long)hose->io_base_virt - _IO_BASE;
546*4882a593Smuzhiyun 	offset += port;
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	if (!(rp->flags & IORESOURCE_IO))
549*4882a593Smuzhiyun 		return -ENXIO;
550*4882a593Smuzhiyun 	if (offset < rp->start || (offset + size) > rp->end)
551*4882a593Smuzhiyun 		return -ENXIO;
552*4882a593Smuzhiyun 	addr = hose->io_base_virt + port;
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	/* WARNING: The generic code is idiotic. It gets passed a pointer
555*4882a593Smuzhiyun 	 * to what can be a 1, 2 or 4 byte quantity and always reads that
556*4882a593Smuzhiyun 	 * as a u32, which means that we have to correct the location of
557*4882a593Smuzhiyun 	 * the data read within those 32 bits for size 1 and 2
558*4882a593Smuzhiyun 	 */
559*4882a593Smuzhiyun 	switch(size) {
560*4882a593Smuzhiyun 	case 1:
561*4882a593Smuzhiyun 		out_8(addr, val >> 24);
562*4882a593Smuzhiyun 		return 1;
563*4882a593Smuzhiyun 	case 2:
564*4882a593Smuzhiyun 		if (port & 1)
565*4882a593Smuzhiyun 			return -EINVAL;
566*4882a593Smuzhiyun 		out_le16(addr, val >> 16);
567*4882a593Smuzhiyun 		return 2;
568*4882a593Smuzhiyun 	case 4:
569*4882a593Smuzhiyun 		if (port & 3)
570*4882a593Smuzhiyun 			return -EINVAL;
571*4882a593Smuzhiyun 		out_le32(addr, val);
572*4882a593Smuzhiyun 		return 4;
573*4882a593Smuzhiyun 	}
574*4882a593Smuzhiyun 	return -EINVAL;
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun /* This provides legacy IO or memory mmap access on a bus */
pci_mmap_legacy_page_range(struct pci_bus * bus,struct vm_area_struct * vma,enum pci_mmap_state mmap_state)578*4882a593Smuzhiyun int pci_mmap_legacy_page_range(struct pci_bus *bus,
579*4882a593Smuzhiyun 			       struct vm_area_struct *vma,
580*4882a593Smuzhiyun 			       enum pci_mmap_state mmap_state)
581*4882a593Smuzhiyun {
582*4882a593Smuzhiyun 	struct pci_controller *hose = pci_bus_to_host(bus);
583*4882a593Smuzhiyun 	resource_size_t offset =
584*4882a593Smuzhiyun 		((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
585*4882a593Smuzhiyun 	resource_size_t size = vma->vm_end - vma->vm_start;
586*4882a593Smuzhiyun 	struct resource *rp;
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n",
589*4882a593Smuzhiyun 		 pci_domain_nr(bus), bus->number,
590*4882a593Smuzhiyun 		 mmap_state == pci_mmap_mem ? "MEM" : "IO",
591*4882a593Smuzhiyun 		 (unsigned long long)offset,
592*4882a593Smuzhiyun 		 (unsigned long long)(offset + size - 1));
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	if (mmap_state == pci_mmap_mem) {
595*4882a593Smuzhiyun 		/* Hack alert !
596*4882a593Smuzhiyun 		 *
597*4882a593Smuzhiyun 		 * Because X is lame and can fail starting if it gets an error trying
598*4882a593Smuzhiyun 		 * to mmap legacy_mem (instead of just moving on without legacy memory
599*4882a593Smuzhiyun 		 * access) we fake it here by giving it anonymous memory, effectively
600*4882a593Smuzhiyun 		 * behaving just like /dev/zero
601*4882a593Smuzhiyun 		 */
602*4882a593Smuzhiyun 		if ((offset + size) > hose->isa_mem_size) {
603*4882a593Smuzhiyun 			printk(KERN_DEBUG
604*4882a593Smuzhiyun 			       "Process %s (pid:%d) mapped non-existing PCI legacy memory for 0%04x:%02x\n",
605*4882a593Smuzhiyun 			       current->comm, current->pid, pci_domain_nr(bus), bus->number);
606*4882a593Smuzhiyun 			if (vma->vm_flags & VM_SHARED)
607*4882a593Smuzhiyun 				return shmem_zero_setup(vma);
608*4882a593Smuzhiyun 			return 0;
609*4882a593Smuzhiyun 		}
610*4882a593Smuzhiyun 		offset += hose->isa_mem_phys;
611*4882a593Smuzhiyun 	} else {
612*4882a593Smuzhiyun 		unsigned long io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
613*4882a593Smuzhiyun 		unsigned long roffset = offset + io_offset;
614*4882a593Smuzhiyun 		rp = &hose->io_resource;
615*4882a593Smuzhiyun 		if (!(rp->flags & IORESOURCE_IO))
616*4882a593Smuzhiyun 			return -ENXIO;
617*4882a593Smuzhiyun 		if (roffset < rp->start || (roffset + size) > rp->end)
618*4882a593Smuzhiyun 			return -ENXIO;
619*4882a593Smuzhiyun 		offset += hose->io_base_phys;
620*4882a593Smuzhiyun 	}
621*4882a593Smuzhiyun 	pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 	vma->vm_pgoff = offset >> PAGE_SHIFT;
624*4882a593Smuzhiyun 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
625*4882a593Smuzhiyun 	return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
626*4882a593Smuzhiyun 			       vma->vm_end - vma->vm_start,
627*4882a593Smuzhiyun 			       vma->vm_page_prot);
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun 
pci_resource_to_user(const struct pci_dev * dev,int bar,const struct resource * rsrc,resource_size_t * start,resource_size_t * end)630*4882a593Smuzhiyun void pci_resource_to_user(const struct pci_dev *dev, int bar,
631*4882a593Smuzhiyun 			  const struct resource *rsrc,
632*4882a593Smuzhiyun 			  resource_size_t *start, resource_size_t *end)
633*4882a593Smuzhiyun {
634*4882a593Smuzhiyun 	struct pci_bus_region region;
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 	if (rsrc->flags & IORESOURCE_IO) {
637*4882a593Smuzhiyun 		pcibios_resource_to_bus(dev->bus, &region,
638*4882a593Smuzhiyun 					(struct resource *) rsrc);
639*4882a593Smuzhiyun 		*start = region.start;
640*4882a593Smuzhiyun 		*end = region.end;
641*4882a593Smuzhiyun 		return;
642*4882a593Smuzhiyun 	}
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 	/* We pass a CPU physical address to userland for MMIO instead of a
645*4882a593Smuzhiyun 	 * BAR value because X is lame and expects to be able to use that
646*4882a593Smuzhiyun 	 * to pass to /dev/mem!
647*4882a593Smuzhiyun 	 *
648*4882a593Smuzhiyun 	 * That means we may have 64-bit values where some apps only expect
649*4882a593Smuzhiyun 	 * 32 (like X itself since it thinks only Sparc has 64-bit MMIO).
650*4882a593Smuzhiyun 	 */
651*4882a593Smuzhiyun 	*start = rsrc->start;
652*4882a593Smuzhiyun 	*end = rsrc->end;
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun /**
656*4882a593Smuzhiyun  * pci_process_bridge_OF_ranges - Parse PCI bridge resources from device tree
657*4882a593Smuzhiyun  * @hose: newly allocated pci_controller to be setup
658*4882a593Smuzhiyun  * @dev: device node of the host bridge
659*4882a593Smuzhiyun  * @primary: set if primary bus (32 bits only, soon to be deprecated)
660*4882a593Smuzhiyun  *
661*4882a593Smuzhiyun  * This function will parse the "ranges" property of a PCI host bridge device
662*4882a593Smuzhiyun  * node and setup the resource mapping of a pci controller based on its
663*4882a593Smuzhiyun  * content.
664*4882a593Smuzhiyun  *
665*4882a593Smuzhiyun  * Life would be boring if it wasn't for a few issues that we have to deal
666*4882a593Smuzhiyun  * with here:
667*4882a593Smuzhiyun  *
668*4882a593Smuzhiyun  *   - We can only cope with one IO space range and up to 3 Memory space
669*4882a593Smuzhiyun  *     ranges. However, some machines (thanks Apple !) tend to split their
670*4882a593Smuzhiyun  *     space into lots of small contiguous ranges. So we have to coalesce.
671*4882a593Smuzhiyun  *
672*4882a593Smuzhiyun  *   - Some busses have IO space not starting at 0, which causes trouble with
673*4882a593Smuzhiyun  *     the way we do our IO resource renumbering. The code somewhat deals with
674*4882a593Smuzhiyun  *     it for 64 bits but I would expect problems on 32 bits.
675*4882a593Smuzhiyun  *
676*4882a593Smuzhiyun  *   - Some 32 bits platforms such as 4xx can have physical space larger than
677*4882a593Smuzhiyun  *     32 bits so we need to use 64 bits values for the parsing
678*4882a593Smuzhiyun  */
pci_process_bridge_OF_ranges(struct pci_controller * hose,struct device_node * dev,int primary)679*4882a593Smuzhiyun void pci_process_bridge_OF_ranges(struct pci_controller *hose,
680*4882a593Smuzhiyun 				  struct device_node *dev, int primary)
681*4882a593Smuzhiyun {
682*4882a593Smuzhiyun 	int memno = 0;
683*4882a593Smuzhiyun 	struct resource *res;
684*4882a593Smuzhiyun 	struct of_pci_range range;
685*4882a593Smuzhiyun 	struct of_pci_range_parser parser;
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	printk(KERN_INFO "PCI host bridge %pOF %s ranges:\n",
688*4882a593Smuzhiyun 	       dev, primary ? "(primary)" : "");
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	/* Check for ranges property */
691*4882a593Smuzhiyun 	if (of_pci_range_parser_init(&parser, dev))
692*4882a593Smuzhiyun 		return;
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun 	/* Parse it */
695*4882a593Smuzhiyun 	for_each_of_pci_range(&parser, &range) {
696*4882a593Smuzhiyun 		/* If we failed translation or got a zero-sized region
697*4882a593Smuzhiyun 		 * (some FW try to feed us with non sensical zero sized regions
698*4882a593Smuzhiyun 		 * such as power3 which look like some kind of attempt at exposing
699*4882a593Smuzhiyun 		 * the VGA memory hole)
700*4882a593Smuzhiyun 		 */
701*4882a593Smuzhiyun 		if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
702*4882a593Smuzhiyun 			continue;
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 		/* Act based on address space type */
705*4882a593Smuzhiyun 		res = NULL;
706*4882a593Smuzhiyun 		switch (range.flags & IORESOURCE_TYPE_BITS) {
707*4882a593Smuzhiyun 		case IORESOURCE_IO:
708*4882a593Smuzhiyun 			printk(KERN_INFO
709*4882a593Smuzhiyun 			       "  IO 0x%016llx..0x%016llx -> 0x%016llx\n",
710*4882a593Smuzhiyun 			       range.cpu_addr, range.cpu_addr + range.size - 1,
711*4882a593Smuzhiyun 			       range.pci_addr);
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 			/* We support only one IO range */
714*4882a593Smuzhiyun 			if (hose->pci_io_size) {
715*4882a593Smuzhiyun 				printk(KERN_INFO
716*4882a593Smuzhiyun 				       " \\--> Skipped (too many) !\n");
717*4882a593Smuzhiyun 				continue;
718*4882a593Smuzhiyun 			}
719*4882a593Smuzhiyun #ifdef CONFIG_PPC32
720*4882a593Smuzhiyun 			/* On 32 bits, limit I/O space to 16MB */
721*4882a593Smuzhiyun 			if (range.size > 0x01000000)
722*4882a593Smuzhiyun 				range.size = 0x01000000;
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 			/* 32 bits needs to map IOs here */
725*4882a593Smuzhiyun 			hose->io_base_virt = ioremap(range.cpu_addr,
726*4882a593Smuzhiyun 						range.size);
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 			/* Expect trouble if pci_addr is not 0 */
729*4882a593Smuzhiyun 			if (primary)
730*4882a593Smuzhiyun 				isa_io_base =
731*4882a593Smuzhiyun 					(unsigned long)hose->io_base_virt;
732*4882a593Smuzhiyun #endif /* CONFIG_PPC32 */
733*4882a593Smuzhiyun 			/* pci_io_size and io_base_phys always represent IO
734*4882a593Smuzhiyun 			 * space starting at 0 so we factor in pci_addr
735*4882a593Smuzhiyun 			 */
736*4882a593Smuzhiyun 			hose->pci_io_size = range.pci_addr + range.size;
737*4882a593Smuzhiyun 			hose->io_base_phys = range.cpu_addr - range.pci_addr;
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun 			/* Build resource */
740*4882a593Smuzhiyun 			res = &hose->io_resource;
741*4882a593Smuzhiyun 			range.cpu_addr = range.pci_addr;
742*4882a593Smuzhiyun 			break;
743*4882a593Smuzhiyun 		case IORESOURCE_MEM:
744*4882a593Smuzhiyun 			printk(KERN_INFO
745*4882a593Smuzhiyun 			       " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
746*4882a593Smuzhiyun 			       range.cpu_addr, range.cpu_addr + range.size - 1,
747*4882a593Smuzhiyun 			       range.pci_addr,
748*4882a593Smuzhiyun 			       (range.flags & IORESOURCE_PREFETCH) ?
749*4882a593Smuzhiyun 			       "Prefetch" : "");
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 			/* We support only 3 memory ranges */
752*4882a593Smuzhiyun 			if (memno >= 3) {
753*4882a593Smuzhiyun 				printk(KERN_INFO
754*4882a593Smuzhiyun 				       " \\--> Skipped (too many) !\n");
755*4882a593Smuzhiyun 				continue;
756*4882a593Smuzhiyun 			}
757*4882a593Smuzhiyun 			/* Handles ISA memory hole space here */
758*4882a593Smuzhiyun 			if (range.pci_addr == 0) {
759*4882a593Smuzhiyun 				if (primary || isa_mem_base == 0)
760*4882a593Smuzhiyun 					isa_mem_base = range.cpu_addr;
761*4882a593Smuzhiyun 				hose->isa_mem_phys = range.cpu_addr;
762*4882a593Smuzhiyun 				hose->isa_mem_size = range.size;
763*4882a593Smuzhiyun 			}
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun 			/* Build resource */
766*4882a593Smuzhiyun 			hose->mem_offset[memno] = range.cpu_addr -
767*4882a593Smuzhiyun 							range.pci_addr;
768*4882a593Smuzhiyun 			res = &hose->mem_resources[memno++];
769*4882a593Smuzhiyun 			break;
770*4882a593Smuzhiyun 		}
771*4882a593Smuzhiyun 		if (res != NULL) {
772*4882a593Smuzhiyun 			res->name = dev->full_name;
773*4882a593Smuzhiyun 			res->flags = range.flags;
774*4882a593Smuzhiyun 			res->start = range.cpu_addr;
775*4882a593Smuzhiyun 			res->end = range.cpu_addr + range.size - 1;
776*4882a593Smuzhiyun 			res->parent = res->child = res->sibling = NULL;
777*4882a593Smuzhiyun 		}
778*4882a593Smuzhiyun 	}
779*4882a593Smuzhiyun }
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun /* Decide whether to display the domain number in /proc */
pci_proc_domain(struct pci_bus * bus)782*4882a593Smuzhiyun int pci_proc_domain(struct pci_bus *bus)
783*4882a593Smuzhiyun {
784*4882a593Smuzhiyun 	struct pci_controller *hose = pci_bus_to_host(bus);
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 	if (!pci_has_flag(PCI_ENABLE_PROC_DOMAINS))
787*4882a593Smuzhiyun 		return 0;
788*4882a593Smuzhiyun 	if (pci_has_flag(PCI_COMPAT_DOMAIN_0))
789*4882a593Smuzhiyun 		return hose->global_number != 0;
790*4882a593Smuzhiyun 	return 1;
791*4882a593Smuzhiyun }
792*4882a593Smuzhiyun 
pcibios_root_bridge_prepare(struct pci_host_bridge * bridge)793*4882a593Smuzhiyun int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
794*4882a593Smuzhiyun {
795*4882a593Smuzhiyun 	if (ppc_md.pcibios_root_bridge_prepare)
796*4882a593Smuzhiyun 		return ppc_md.pcibios_root_bridge_prepare(bridge);
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 	return 0;
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun /* This header fixup will do the resource fixup for all devices as they are
802*4882a593Smuzhiyun  * probed, but not for bridge ranges
803*4882a593Smuzhiyun  */
pcibios_fixup_resources(struct pci_dev * dev)804*4882a593Smuzhiyun static void pcibios_fixup_resources(struct pci_dev *dev)
805*4882a593Smuzhiyun {
806*4882a593Smuzhiyun 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
807*4882a593Smuzhiyun 	int i;
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun 	if (!hose) {
810*4882a593Smuzhiyun 		printk(KERN_ERR "No host bridge for PCI dev %s !\n",
811*4882a593Smuzhiyun 		       pci_name(dev));
812*4882a593Smuzhiyun 		return;
813*4882a593Smuzhiyun 	}
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun 	if (dev->is_virtfn)
816*4882a593Smuzhiyun 		return;
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
819*4882a593Smuzhiyun 		struct resource *res = dev->resource + i;
820*4882a593Smuzhiyun 		struct pci_bus_region reg;
821*4882a593Smuzhiyun 		if (!res->flags)
822*4882a593Smuzhiyun 			continue;
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun 		/* If we're going to re-assign everything, we mark all resources
825*4882a593Smuzhiyun 		 * as unset (and 0-base them). In addition, we mark BARs starting
826*4882a593Smuzhiyun 		 * at 0 as unset as well, except if PCI_PROBE_ONLY is also set
827*4882a593Smuzhiyun 		 * since in that case, we don't want to re-assign anything
828*4882a593Smuzhiyun 		 */
829*4882a593Smuzhiyun 		pcibios_resource_to_bus(dev->bus, &reg, res);
830*4882a593Smuzhiyun 		if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) ||
831*4882a593Smuzhiyun 		    (reg.start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
832*4882a593Smuzhiyun 			/* Only print message if not re-assigning */
833*4882a593Smuzhiyun 			if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC))
834*4882a593Smuzhiyun 				pr_debug("PCI:%s Resource %d %pR is unassigned\n",
835*4882a593Smuzhiyun 					 pci_name(dev), i, res);
836*4882a593Smuzhiyun 			res->end -= res->start;
837*4882a593Smuzhiyun 			res->start = 0;
838*4882a593Smuzhiyun 			res->flags |= IORESOURCE_UNSET;
839*4882a593Smuzhiyun 			continue;
840*4882a593Smuzhiyun 		}
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 		pr_debug("PCI:%s Resource %d %pR\n", pci_name(dev), i, res);
843*4882a593Smuzhiyun 	}
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 	/* Call machine specific resource fixup */
846*4882a593Smuzhiyun 	if (ppc_md.pcibios_fixup_resources)
847*4882a593Smuzhiyun 		ppc_md.pcibios_fixup_resources(dev);
848*4882a593Smuzhiyun }
849*4882a593Smuzhiyun DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun /* This function tries to figure out if a bridge resource has been initialized
852*4882a593Smuzhiyun  * by the firmware or not. It doesn't have to be absolutely bullet proof, but
853*4882a593Smuzhiyun  * things go more smoothly when it gets it right. It should covers cases such
854*4882a593Smuzhiyun  * as Apple "closed" bridge resources and bare-metal pSeries unassigned bridges
855*4882a593Smuzhiyun  */
pcibios_uninitialized_bridge_resource(struct pci_bus * bus,struct resource * res)856*4882a593Smuzhiyun static int pcibios_uninitialized_bridge_resource(struct pci_bus *bus,
857*4882a593Smuzhiyun 						 struct resource *res)
858*4882a593Smuzhiyun {
859*4882a593Smuzhiyun 	struct pci_controller *hose = pci_bus_to_host(bus);
860*4882a593Smuzhiyun 	struct pci_dev *dev = bus->self;
861*4882a593Smuzhiyun 	resource_size_t offset;
862*4882a593Smuzhiyun 	struct pci_bus_region region;
863*4882a593Smuzhiyun 	u16 command;
864*4882a593Smuzhiyun 	int i;
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 	/* We don't do anything if PCI_PROBE_ONLY is set */
867*4882a593Smuzhiyun 	if (pci_has_flag(PCI_PROBE_ONLY))
868*4882a593Smuzhiyun 		return 0;
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun 	/* Job is a bit different between memory and IO */
871*4882a593Smuzhiyun 	if (res->flags & IORESOURCE_MEM) {
872*4882a593Smuzhiyun 		pcibios_resource_to_bus(dev->bus, &region, res);
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun 		/* If the BAR is non-0 then it's probably been initialized */
875*4882a593Smuzhiyun 		if (region.start != 0)
876*4882a593Smuzhiyun 			return 0;
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun 		/* The BAR is 0, let's check if memory decoding is enabled on
879*4882a593Smuzhiyun 		 * the bridge. If not, we consider it unassigned
880*4882a593Smuzhiyun 		 */
881*4882a593Smuzhiyun 		pci_read_config_word(dev, PCI_COMMAND, &command);
882*4882a593Smuzhiyun 		if ((command & PCI_COMMAND_MEMORY) == 0)
883*4882a593Smuzhiyun 			return 1;
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 		/* Memory decoding is enabled and the BAR is 0. If any of the bridge
886*4882a593Smuzhiyun 		 * resources covers that starting address (0 then it's good enough for
887*4882a593Smuzhiyun 		 * us for memory space)
888*4882a593Smuzhiyun 		 */
889*4882a593Smuzhiyun 		for (i = 0; i < 3; i++) {
890*4882a593Smuzhiyun 			if ((hose->mem_resources[i].flags & IORESOURCE_MEM) &&
891*4882a593Smuzhiyun 			    hose->mem_resources[i].start == hose->mem_offset[i])
892*4882a593Smuzhiyun 				return 0;
893*4882a593Smuzhiyun 		}
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 		/* Well, it starts at 0 and we know it will collide so we may as
896*4882a593Smuzhiyun 		 * well consider it as unassigned. That covers the Apple case.
897*4882a593Smuzhiyun 		 */
898*4882a593Smuzhiyun 		return 1;
899*4882a593Smuzhiyun 	} else {
900*4882a593Smuzhiyun 		/* If the BAR is non-0, then we consider it assigned */
901*4882a593Smuzhiyun 		offset = (unsigned long)hose->io_base_virt - _IO_BASE;
902*4882a593Smuzhiyun 		if (((res->start - offset) & 0xfffffffful) != 0)
903*4882a593Smuzhiyun 			return 0;
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun 		/* Here, we are a bit different than memory as typically IO space
906*4882a593Smuzhiyun 		 * starting at low addresses -is- valid. What we do instead if that
907*4882a593Smuzhiyun 		 * we consider as unassigned anything that doesn't have IO enabled
908*4882a593Smuzhiyun 		 * in the PCI command register, and that's it.
909*4882a593Smuzhiyun 		 */
910*4882a593Smuzhiyun 		pci_read_config_word(dev, PCI_COMMAND, &command);
911*4882a593Smuzhiyun 		if (command & PCI_COMMAND_IO)
912*4882a593Smuzhiyun 			return 0;
913*4882a593Smuzhiyun 
914*4882a593Smuzhiyun 		/* It's starting at 0 and IO is disabled in the bridge, consider
915*4882a593Smuzhiyun 		 * it unassigned
916*4882a593Smuzhiyun 		 */
917*4882a593Smuzhiyun 		return 1;
918*4882a593Smuzhiyun 	}
919*4882a593Smuzhiyun }
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun /* Fixup resources of a PCI<->PCI bridge */
pcibios_fixup_bridge(struct pci_bus * bus)922*4882a593Smuzhiyun static void pcibios_fixup_bridge(struct pci_bus *bus)
923*4882a593Smuzhiyun {
924*4882a593Smuzhiyun 	struct resource *res;
925*4882a593Smuzhiyun 	int i;
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun 	struct pci_dev *dev = bus->self;
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 	pci_bus_for_each_resource(bus, res, i) {
930*4882a593Smuzhiyun 		if (!res || !res->flags)
931*4882a593Smuzhiyun 			continue;
932*4882a593Smuzhiyun 		if (i >= 3 && bus->self->transparent)
933*4882a593Smuzhiyun 			continue;
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun 		/* If we're going to reassign everything, we can
936*4882a593Smuzhiyun 		 * shrink the P2P resource to have size as being
937*4882a593Smuzhiyun 		 * of 0 in order to save space.
938*4882a593Smuzhiyun 		 */
939*4882a593Smuzhiyun 		if (pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
940*4882a593Smuzhiyun 			res->flags |= IORESOURCE_UNSET;
941*4882a593Smuzhiyun 			res->start = 0;
942*4882a593Smuzhiyun 			res->end = -1;
943*4882a593Smuzhiyun 			continue;
944*4882a593Smuzhiyun 		}
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun 		pr_debug("PCI:%s Bus rsrc %d %pR\n", pci_name(dev), i, res);
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun 		/* Try to detect uninitialized P2P bridge resources,
949*4882a593Smuzhiyun 		 * and clear them out so they get re-assigned later
950*4882a593Smuzhiyun 		 */
951*4882a593Smuzhiyun 		if (pcibios_uninitialized_bridge_resource(bus, res)) {
952*4882a593Smuzhiyun 			res->flags = 0;
953*4882a593Smuzhiyun 			pr_debug("PCI:%s            (unassigned)\n", pci_name(dev));
954*4882a593Smuzhiyun 		}
955*4882a593Smuzhiyun 	}
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun 
pcibios_setup_bus_self(struct pci_bus * bus)958*4882a593Smuzhiyun void pcibios_setup_bus_self(struct pci_bus *bus)
959*4882a593Smuzhiyun {
960*4882a593Smuzhiyun 	struct pci_controller *phb;
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun 	/* Fix up the bus resources for P2P bridges */
963*4882a593Smuzhiyun 	if (bus->self != NULL)
964*4882a593Smuzhiyun 		pcibios_fixup_bridge(bus);
965*4882a593Smuzhiyun 
966*4882a593Smuzhiyun 	/* Platform specific bus fixups. This is currently only used
967*4882a593Smuzhiyun 	 * by fsl_pci and I'm hoping to get rid of it at some point
968*4882a593Smuzhiyun 	 */
969*4882a593Smuzhiyun 	if (ppc_md.pcibios_fixup_bus)
970*4882a593Smuzhiyun 		ppc_md.pcibios_fixup_bus(bus);
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 	/* Setup bus DMA mappings */
973*4882a593Smuzhiyun 	phb = pci_bus_to_host(bus);
974*4882a593Smuzhiyun 	if (phb->controller_ops.dma_bus_setup)
975*4882a593Smuzhiyun 		phb->controller_ops.dma_bus_setup(bus);
976*4882a593Smuzhiyun }
977*4882a593Smuzhiyun 
pcibios_bus_add_device(struct pci_dev * dev)978*4882a593Smuzhiyun void pcibios_bus_add_device(struct pci_dev *dev)
979*4882a593Smuzhiyun {
980*4882a593Smuzhiyun 	struct pci_controller *phb;
981*4882a593Smuzhiyun 	/* Fixup NUMA node as it may not be setup yet by the generic
982*4882a593Smuzhiyun 	 * code and is needed by the DMA init
983*4882a593Smuzhiyun 	 */
984*4882a593Smuzhiyun 	set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun 	/* Hook up default DMA ops */
987*4882a593Smuzhiyun 	set_dma_ops(&dev->dev, pci_dma_ops);
988*4882a593Smuzhiyun 	dev->dev.archdata.dma_offset = PCI_DRAM_OFFSET;
989*4882a593Smuzhiyun 
990*4882a593Smuzhiyun 	/* Additional platform DMA/iommu setup */
991*4882a593Smuzhiyun 	phb = pci_bus_to_host(dev->bus);
992*4882a593Smuzhiyun 	if (phb->controller_ops.dma_dev_setup)
993*4882a593Smuzhiyun 		phb->controller_ops.dma_dev_setup(dev);
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun 	/* Read default IRQs and fixup if necessary */
996*4882a593Smuzhiyun 	pci_read_irq_line(dev);
997*4882a593Smuzhiyun 	if (ppc_md.pci_irq_fixup)
998*4882a593Smuzhiyun 		ppc_md.pci_irq_fixup(dev);
999*4882a593Smuzhiyun 
1000*4882a593Smuzhiyun 	if (ppc_md.pcibios_bus_add_device)
1001*4882a593Smuzhiyun 		ppc_md.pcibios_bus_add_device(dev);
1002*4882a593Smuzhiyun }
1003*4882a593Smuzhiyun 
pcibios_add_device(struct pci_dev * dev)1004*4882a593Smuzhiyun int pcibios_add_device(struct pci_dev *dev)
1005*4882a593Smuzhiyun {
1006*4882a593Smuzhiyun #ifdef CONFIG_PCI_IOV
1007*4882a593Smuzhiyun 	if (ppc_md.pcibios_fixup_sriov)
1008*4882a593Smuzhiyun 		ppc_md.pcibios_fixup_sriov(dev);
1009*4882a593Smuzhiyun #endif /* CONFIG_PCI_IOV */
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun 	return 0;
1012*4882a593Smuzhiyun }
1013*4882a593Smuzhiyun 
pcibios_set_master(struct pci_dev * dev)1014*4882a593Smuzhiyun void pcibios_set_master(struct pci_dev *dev)
1015*4882a593Smuzhiyun {
1016*4882a593Smuzhiyun 	/* No special bus mastering setup handling */
1017*4882a593Smuzhiyun }
1018*4882a593Smuzhiyun 
pcibios_fixup_bus(struct pci_bus * bus)1019*4882a593Smuzhiyun void pcibios_fixup_bus(struct pci_bus *bus)
1020*4882a593Smuzhiyun {
1021*4882a593Smuzhiyun 	/* When called from the generic PCI probe, read PCI<->PCI bridge
1022*4882a593Smuzhiyun 	 * bases. This is -not- called when generating the PCI tree from
1023*4882a593Smuzhiyun 	 * the OF device-tree.
1024*4882a593Smuzhiyun 	 */
1025*4882a593Smuzhiyun 	pci_read_bridge_bases(bus);
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun 	/* Now fixup the bus bus */
1028*4882a593Smuzhiyun 	pcibios_setup_bus_self(bus);
1029*4882a593Smuzhiyun }
1030*4882a593Smuzhiyun EXPORT_SYMBOL(pcibios_fixup_bus);
1031*4882a593Smuzhiyun 
skip_isa_ioresource_align(struct pci_dev * dev)1032*4882a593Smuzhiyun static int skip_isa_ioresource_align(struct pci_dev *dev)
1033*4882a593Smuzhiyun {
1034*4882a593Smuzhiyun 	if (pci_has_flag(PCI_CAN_SKIP_ISA_ALIGN) &&
1035*4882a593Smuzhiyun 	    !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA))
1036*4882a593Smuzhiyun 		return 1;
1037*4882a593Smuzhiyun 	return 0;
1038*4882a593Smuzhiyun }
1039*4882a593Smuzhiyun 
1040*4882a593Smuzhiyun /*
1041*4882a593Smuzhiyun  * We need to avoid collisions with `mirrored' VGA ports
1042*4882a593Smuzhiyun  * and other strange ISA hardware, so we always want the
1043*4882a593Smuzhiyun  * addresses to be allocated in the 0x000-0x0ff region
1044*4882a593Smuzhiyun  * modulo 0x400.
1045*4882a593Smuzhiyun  *
1046*4882a593Smuzhiyun  * Why? Because some silly external IO cards only decode
1047*4882a593Smuzhiyun  * the low 10 bits of the IO address. The 0x00-0xff region
1048*4882a593Smuzhiyun  * is reserved for motherboard devices that decode all 16
1049*4882a593Smuzhiyun  * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
1050*4882a593Smuzhiyun  * but we want to try to avoid allocating at 0x2900-0x2bff
1051*4882a593Smuzhiyun  * which might have be mirrored at 0x0100-0x03ff..
1052*4882a593Smuzhiyun  */
pcibios_align_resource(void * data,const struct resource * res,resource_size_t size,resource_size_t align)1053*4882a593Smuzhiyun resource_size_t pcibios_align_resource(void *data, const struct resource *res,
1054*4882a593Smuzhiyun 				resource_size_t size, resource_size_t align)
1055*4882a593Smuzhiyun {
1056*4882a593Smuzhiyun 	struct pci_dev *dev = data;
1057*4882a593Smuzhiyun 	resource_size_t start = res->start;
1058*4882a593Smuzhiyun 
1059*4882a593Smuzhiyun 	if (res->flags & IORESOURCE_IO) {
1060*4882a593Smuzhiyun 		if (skip_isa_ioresource_align(dev))
1061*4882a593Smuzhiyun 			return start;
1062*4882a593Smuzhiyun 		if (start & 0x300)
1063*4882a593Smuzhiyun 			start = (start + 0x3ff) & ~0x3ff;
1064*4882a593Smuzhiyun 	}
1065*4882a593Smuzhiyun 
1066*4882a593Smuzhiyun 	return start;
1067*4882a593Smuzhiyun }
1068*4882a593Smuzhiyun EXPORT_SYMBOL(pcibios_align_resource);
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun /*
1071*4882a593Smuzhiyun  * Reparent resource children of pr that conflict with res
1072*4882a593Smuzhiyun  * under res, and make res replace those children.
1073*4882a593Smuzhiyun  */
reparent_resources(struct resource * parent,struct resource * res)1074*4882a593Smuzhiyun static int reparent_resources(struct resource *parent,
1075*4882a593Smuzhiyun 				     struct resource *res)
1076*4882a593Smuzhiyun {
1077*4882a593Smuzhiyun 	struct resource *p, **pp;
1078*4882a593Smuzhiyun 	struct resource **firstpp = NULL;
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun 	for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
1081*4882a593Smuzhiyun 		if (p->end < res->start)
1082*4882a593Smuzhiyun 			continue;
1083*4882a593Smuzhiyun 		if (res->end < p->start)
1084*4882a593Smuzhiyun 			break;
1085*4882a593Smuzhiyun 		if (p->start < res->start || p->end > res->end)
1086*4882a593Smuzhiyun 			return -1;	/* not completely contained */
1087*4882a593Smuzhiyun 		if (firstpp == NULL)
1088*4882a593Smuzhiyun 			firstpp = pp;
1089*4882a593Smuzhiyun 	}
1090*4882a593Smuzhiyun 	if (firstpp == NULL)
1091*4882a593Smuzhiyun 		return -1;	/* didn't find any conflicting entries? */
1092*4882a593Smuzhiyun 	res->parent = parent;
1093*4882a593Smuzhiyun 	res->child = *firstpp;
1094*4882a593Smuzhiyun 	res->sibling = *pp;
1095*4882a593Smuzhiyun 	*firstpp = res;
1096*4882a593Smuzhiyun 	*pp = NULL;
1097*4882a593Smuzhiyun 	for (p = res->child; p != NULL; p = p->sibling) {
1098*4882a593Smuzhiyun 		p->parent = res;
1099*4882a593Smuzhiyun 		pr_debug("PCI: Reparented %s %pR under %s\n",
1100*4882a593Smuzhiyun 			 p->name, p, res->name);
1101*4882a593Smuzhiyun 	}
1102*4882a593Smuzhiyun 	return 0;
1103*4882a593Smuzhiyun }
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun /*
1106*4882a593Smuzhiyun  *  Handle resources of PCI devices.  If the world were perfect, we could
1107*4882a593Smuzhiyun  *  just allocate all the resource regions and do nothing more.  It isn't.
1108*4882a593Smuzhiyun  *  On the other hand, we cannot just re-allocate all devices, as it would
1109*4882a593Smuzhiyun  *  require us to know lots of host bridge internals.  So we attempt to
1110*4882a593Smuzhiyun  *  keep as much of the original configuration as possible, but tweak it
1111*4882a593Smuzhiyun  *  when it's found to be wrong.
1112*4882a593Smuzhiyun  *
1113*4882a593Smuzhiyun  *  Known BIOS problems we have to work around:
1114*4882a593Smuzhiyun  *	- I/O or memory regions not configured
1115*4882a593Smuzhiyun  *	- regions configured, but not enabled in the command register
1116*4882a593Smuzhiyun  *	- bogus I/O addresses above 64K used
1117*4882a593Smuzhiyun  *	- expansion ROMs left enabled (this may sound harmless, but given
1118*4882a593Smuzhiyun  *	  the fact the PCI specs explicitly allow address decoders to be
1119*4882a593Smuzhiyun  *	  shared between expansion ROMs and other resource regions, it's
1120*4882a593Smuzhiyun  *	  at least dangerous)
1121*4882a593Smuzhiyun  *
1122*4882a593Smuzhiyun  *  Our solution:
1123*4882a593Smuzhiyun  *	(1) Allocate resources for all buses behind PCI-to-PCI bridges.
1124*4882a593Smuzhiyun  *	    This gives us fixed barriers on where we can allocate.
1125*4882a593Smuzhiyun  *	(2) Allocate resources for all enabled devices.  If there is
1126*4882a593Smuzhiyun  *	    a collision, just mark the resource as unallocated. Also
1127*4882a593Smuzhiyun  *	    disable expansion ROMs during this step.
1128*4882a593Smuzhiyun  *	(3) Try to allocate resources for disabled devices.  If the
1129*4882a593Smuzhiyun  *	    resources were assigned correctly, everything goes well,
1130*4882a593Smuzhiyun  *	    if they weren't, they won't disturb allocation of other
1131*4882a593Smuzhiyun  *	    resources.
1132*4882a593Smuzhiyun  *	(4) Assign new addresses to resources which were either
1133*4882a593Smuzhiyun  *	    not configured at all or misconfigured.  If explicitly
1134*4882a593Smuzhiyun  *	    requested by the user, configure expansion ROM address
1135*4882a593Smuzhiyun  *	    as well.
1136*4882a593Smuzhiyun  */
1137*4882a593Smuzhiyun 
pcibios_allocate_bus_resources(struct pci_bus * bus)1138*4882a593Smuzhiyun static void pcibios_allocate_bus_resources(struct pci_bus *bus)
1139*4882a593Smuzhiyun {
1140*4882a593Smuzhiyun 	struct pci_bus *b;
1141*4882a593Smuzhiyun 	int i;
1142*4882a593Smuzhiyun 	struct resource *res, *pr;
1143*4882a593Smuzhiyun 
1144*4882a593Smuzhiyun 	pr_debug("PCI: Allocating bus resources for %04x:%02x...\n",
1145*4882a593Smuzhiyun 		 pci_domain_nr(bus), bus->number);
1146*4882a593Smuzhiyun 
1147*4882a593Smuzhiyun 	pci_bus_for_each_resource(bus, res, i) {
1148*4882a593Smuzhiyun 		if (!res || !res->flags || res->start > res->end || res->parent)
1149*4882a593Smuzhiyun 			continue;
1150*4882a593Smuzhiyun 
1151*4882a593Smuzhiyun 		/* If the resource was left unset at this point, we clear it */
1152*4882a593Smuzhiyun 		if (res->flags & IORESOURCE_UNSET)
1153*4882a593Smuzhiyun 			goto clear_resource;
1154*4882a593Smuzhiyun 
1155*4882a593Smuzhiyun 		if (bus->parent == NULL)
1156*4882a593Smuzhiyun 			pr = (res->flags & IORESOURCE_IO) ?
1157*4882a593Smuzhiyun 				&ioport_resource : &iomem_resource;
1158*4882a593Smuzhiyun 		else {
1159*4882a593Smuzhiyun 			pr = pci_find_parent_resource(bus->self, res);
1160*4882a593Smuzhiyun 			if (pr == res) {
1161*4882a593Smuzhiyun 				/* this happens when the generic PCI
1162*4882a593Smuzhiyun 				 * code (wrongly) decides that this
1163*4882a593Smuzhiyun 				 * bridge is transparent  -- paulus
1164*4882a593Smuzhiyun 				 */
1165*4882a593Smuzhiyun 				continue;
1166*4882a593Smuzhiyun 			}
1167*4882a593Smuzhiyun 		}
1168*4882a593Smuzhiyun 
1169*4882a593Smuzhiyun 		pr_debug("PCI: %s (bus %d) bridge rsrc %d: %pR, parent %p (%s)\n",
1170*4882a593Smuzhiyun 			 bus->self ? pci_name(bus->self) : "PHB", bus->number,
1171*4882a593Smuzhiyun 			 i, res, pr, (pr && pr->name) ? pr->name : "nil");
1172*4882a593Smuzhiyun 
1173*4882a593Smuzhiyun 		if (pr && !(pr->flags & IORESOURCE_UNSET)) {
1174*4882a593Smuzhiyun 			struct pci_dev *dev = bus->self;
1175*4882a593Smuzhiyun 
1176*4882a593Smuzhiyun 			if (request_resource(pr, res) == 0)
1177*4882a593Smuzhiyun 				continue;
1178*4882a593Smuzhiyun 			/*
1179*4882a593Smuzhiyun 			 * Must be a conflict with an existing entry.
1180*4882a593Smuzhiyun 			 * Move that entry (or entries) under the
1181*4882a593Smuzhiyun 			 * bridge resource and try again.
1182*4882a593Smuzhiyun 			 */
1183*4882a593Smuzhiyun 			if (reparent_resources(pr, res) == 0)
1184*4882a593Smuzhiyun 				continue;
1185*4882a593Smuzhiyun 
1186*4882a593Smuzhiyun 			if (dev && i < PCI_BRIDGE_RESOURCE_NUM &&
1187*4882a593Smuzhiyun 			    pci_claim_bridge_resource(dev,
1188*4882a593Smuzhiyun 						i + PCI_BRIDGE_RESOURCES) == 0)
1189*4882a593Smuzhiyun 				continue;
1190*4882a593Smuzhiyun 		}
1191*4882a593Smuzhiyun 		pr_warn("PCI: Cannot allocate resource region %d of PCI bridge %d, will remap\n",
1192*4882a593Smuzhiyun 			i, bus->number);
1193*4882a593Smuzhiyun 	clear_resource:
1194*4882a593Smuzhiyun 		/* The resource might be figured out when doing
1195*4882a593Smuzhiyun 		 * reassignment based on the resources required
1196*4882a593Smuzhiyun 		 * by the downstream PCI devices. Here we set
1197*4882a593Smuzhiyun 		 * the size of the resource to be 0 in order to
1198*4882a593Smuzhiyun 		 * save more space.
1199*4882a593Smuzhiyun 		 */
1200*4882a593Smuzhiyun 		res->start = 0;
1201*4882a593Smuzhiyun 		res->end = -1;
1202*4882a593Smuzhiyun 		res->flags = 0;
1203*4882a593Smuzhiyun 	}
1204*4882a593Smuzhiyun 
1205*4882a593Smuzhiyun 	list_for_each_entry(b, &bus->children, node)
1206*4882a593Smuzhiyun 		pcibios_allocate_bus_resources(b);
1207*4882a593Smuzhiyun }
1208*4882a593Smuzhiyun 
alloc_resource(struct pci_dev * dev,int idx)1209*4882a593Smuzhiyun static inline void alloc_resource(struct pci_dev *dev, int idx)
1210*4882a593Smuzhiyun {
1211*4882a593Smuzhiyun 	struct resource *pr, *r = &dev->resource[idx];
1212*4882a593Smuzhiyun 
1213*4882a593Smuzhiyun 	pr_debug("PCI: Allocating %s: Resource %d: %pR\n",
1214*4882a593Smuzhiyun 		 pci_name(dev), idx, r);
1215*4882a593Smuzhiyun 
1216*4882a593Smuzhiyun 	pr = pci_find_parent_resource(dev, r);
1217*4882a593Smuzhiyun 	if (!pr || (pr->flags & IORESOURCE_UNSET) ||
1218*4882a593Smuzhiyun 	    request_resource(pr, r) < 0) {
1219*4882a593Smuzhiyun 		printk(KERN_WARNING "PCI: Cannot allocate resource region %d"
1220*4882a593Smuzhiyun 		       " of device %s, will remap\n", idx, pci_name(dev));
1221*4882a593Smuzhiyun 		if (pr)
1222*4882a593Smuzhiyun 			pr_debug("PCI:  parent is %p: %pR\n", pr, pr);
1223*4882a593Smuzhiyun 		/* We'll assign a new address later */
1224*4882a593Smuzhiyun 		r->flags |= IORESOURCE_UNSET;
1225*4882a593Smuzhiyun 		r->end -= r->start;
1226*4882a593Smuzhiyun 		r->start = 0;
1227*4882a593Smuzhiyun 	}
1228*4882a593Smuzhiyun }
1229*4882a593Smuzhiyun 
pcibios_allocate_resources(int pass)1230*4882a593Smuzhiyun static void __init pcibios_allocate_resources(int pass)
1231*4882a593Smuzhiyun {
1232*4882a593Smuzhiyun 	struct pci_dev *dev = NULL;
1233*4882a593Smuzhiyun 	int idx, disabled;
1234*4882a593Smuzhiyun 	u16 command;
1235*4882a593Smuzhiyun 	struct resource *r;
1236*4882a593Smuzhiyun 
1237*4882a593Smuzhiyun 	for_each_pci_dev(dev) {
1238*4882a593Smuzhiyun 		pci_read_config_word(dev, PCI_COMMAND, &command);
1239*4882a593Smuzhiyun 		for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
1240*4882a593Smuzhiyun 			r = &dev->resource[idx];
1241*4882a593Smuzhiyun 			if (r->parent)		/* Already allocated */
1242*4882a593Smuzhiyun 				continue;
1243*4882a593Smuzhiyun 			if (!r->flags || (r->flags & IORESOURCE_UNSET))
1244*4882a593Smuzhiyun 				continue;	/* Not assigned at all */
1245*4882a593Smuzhiyun 			/* We only allocate ROMs on pass 1 just in case they
1246*4882a593Smuzhiyun 			 * have been screwed up by firmware
1247*4882a593Smuzhiyun 			 */
1248*4882a593Smuzhiyun 			if (idx == PCI_ROM_RESOURCE )
1249*4882a593Smuzhiyun 				disabled = 1;
1250*4882a593Smuzhiyun 			if (r->flags & IORESOURCE_IO)
1251*4882a593Smuzhiyun 				disabled = !(command & PCI_COMMAND_IO);
1252*4882a593Smuzhiyun 			else
1253*4882a593Smuzhiyun 				disabled = !(command & PCI_COMMAND_MEMORY);
1254*4882a593Smuzhiyun 			if (pass == disabled)
1255*4882a593Smuzhiyun 				alloc_resource(dev, idx);
1256*4882a593Smuzhiyun 		}
1257*4882a593Smuzhiyun 		if (pass)
1258*4882a593Smuzhiyun 			continue;
1259*4882a593Smuzhiyun 		r = &dev->resource[PCI_ROM_RESOURCE];
1260*4882a593Smuzhiyun 		if (r->flags) {
1261*4882a593Smuzhiyun 			/* Turn the ROM off, leave the resource region,
1262*4882a593Smuzhiyun 			 * but keep it unregistered.
1263*4882a593Smuzhiyun 			 */
1264*4882a593Smuzhiyun 			u32 reg;
1265*4882a593Smuzhiyun 			pci_read_config_dword(dev, dev->rom_base_reg, &reg);
1266*4882a593Smuzhiyun 			if (reg & PCI_ROM_ADDRESS_ENABLE) {
1267*4882a593Smuzhiyun 				pr_debug("PCI: Switching off ROM of %s\n",
1268*4882a593Smuzhiyun 					 pci_name(dev));
1269*4882a593Smuzhiyun 				r->flags &= ~IORESOURCE_ROM_ENABLE;
1270*4882a593Smuzhiyun 				pci_write_config_dword(dev, dev->rom_base_reg,
1271*4882a593Smuzhiyun 						       reg & ~PCI_ROM_ADDRESS_ENABLE);
1272*4882a593Smuzhiyun 			}
1273*4882a593Smuzhiyun 		}
1274*4882a593Smuzhiyun 	}
1275*4882a593Smuzhiyun }
1276*4882a593Smuzhiyun 
pcibios_reserve_legacy_regions(struct pci_bus * bus)1277*4882a593Smuzhiyun static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
1278*4882a593Smuzhiyun {
1279*4882a593Smuzhiyun 	struct pci_controller *hose = pci_bus_to_host(bus);
1280*4882a593Smuzhiyun 	resource_size_t	offset;
1281*4882a593Smuzhiyun 	struct resource *res, *pres;
1282*4882a593Smuzhiyun 	int i;
1283*4882a593Smuzhiyun 
1284*4882a593Smuzhiyun 	pr_debug("Reserving legacy ranges for domain %04x\n", pci_domain_nr(bus));
1285*4882a593Smuzhiyun 
1286*4882a593Smuzhiyun 	/* Check for IO */
1287*4882a593Smuzhiyun 	if (!(hose->io_resource.flags & IORESOURCE_IO))
1288*4882a593Smuzhiyun 		goto no_io;
1289*4882a593Smuzhiyun 	offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1290*4882a593Smuzhiyun 	res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1291*4882a593Smuzhiyun 	BUG_ON(res == NULL);
1292*4882a593Smuzhiyun 	res->name = "Legacy IO";
1293*4882a593Smuzhiyun 	res->flags = IORESOURCE_IO;
1294*4882a593Smuzhiyun 	res->start = offset;
1295*4882a593Smuzhiyun 	res->end = (offset + 0xfff) & 0xfffffffful;
1296*4882a593Smuzhiyun 	pr_debug("Candidate legacy IO: %pR\n", res);
1297*4882a593Smuzhiyun 	if (request_resource(&hose->io_resource, res)) {
1298*4882a593Smuzhiyun 		printk(KERN_DEBUG
1299*4882a593Smuzhiyun 		       "PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
1300*4882a593Smuzhiyun 		       pci_domain_nr(bus), bus->number, res);
1301*4882a593Smuzhiyun 		kfree(res);
1302*4882a593Smuzhiyun 	}
1303*4882a593Smuzhiyun 
1304*4882a593Smuzhiyun  no_io:
1305*4882a593Smuzhiyun 	/* Check for memory */
1306*4882a593Smuzhiyun 	for (i = 0; i < 3; i++) {
1307*4882a593Smuzhiyun 		pres = &hose->mem_resources[i];
1308*4882a593Smuzhiyun 		offset = hose->mem_offset[i];
1309*4882a593Smuzhiyun 		if (!(pres->flags & IORESOURCE_MEM))
1310*4882a593Smuzhiyun 			continue;
1311*4882a593Smuzhiyun 		pr_debug("hose mem res: %pR\n", pres);
1312*4882a593Smuzhiyun 		if ((pres->start - offset) <= 0xa0000 &&
1313*4882a593Smuzhiyun 		    (pres->end - offset) >= 0xbffff)
1314*4882a593Smuzhiyun 			break;
1315*4882a593Smuzhiyun 	}
1316*4882a593Smuzhiyun 	if (i >= 3)
1317*4882a593Smuzhiyun 		return;
1318*4882a593Smuzhiyun 	res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1319*4882a593Smuzhiyun 	BUG_ON(res == NULL);
1320*4882a593Smuzhiyun 	res->name = "Legacy VGA memory";
1321*4882a593Smuzhiyun 	res->flags = IORESOURCE_MEM;
1322*4882a593Smuzhiyun 	res->start = 0xa0000 + offset;
1323*4882a593Smuzhiyun 	res->end = 0xbffff + offset;
1324*4882a593Smuzhiyun 	pr_debug("Candidate VGA memory: %pR\n", res);
1325*4882a593Smuzhiyun 	if (request_resource(pres, res)) {
1326*4882a593Smuzhiyun 		printk(KERN_DEBUG
1327*4882a593Smuzhiyun 		       "PCI %04x:%02x Cannot reserve VGA memory %pR\n",
1328*4882a593Smuzhiyun 		       pci_domain_nr(bus), bus->number, res);
1329*4882a593Smuzhiyun 		kfree(res);
1330*4882a593Smuzhiyun 	}
1331*4882a593Smuzhiyun }
1332*4882a593Smuzhiyun 
pcibios_resource_survey(void)1333*4882a593Smuzhiyun void __init pcibios_resource_survey(void)
1334*4882a593Smuzhiyun {
1335*4882a593Smuzhiyun 	struct pci_bus *b;
1336*4882a593Smuzhiyun 
1337*4882a593Smuzhiyun 	/* Allocate and assign resources */
1338*4882a593Smuzhiyun 	list_for_each_entry(b, &pci_root_buses, node)
1339*4882a593Smuzhiyun 		pcibios_allocate_bus_resources(b);
1340*4882a593Smuzhiyun 	if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
1341*4882a593Smuzhiyun 		pcibios_allocate_resources(0);
1342*4882a593Smuzhiyun 		pcibios_allocate_resources(1);
1343*4882a593Smuzhiyun 	}
1344*4882a593Smuzhiyun 
1345*4882a593Smuzhiyun 	/* Before we start assigning unassigned resource, we try to reserve
1346*4882a593Smuzhiyun 	 * the low IO area and the VGA memory area if they intersect the
1347*4882a593Smuzhiyun 	 * bus available resources to avoid allocating things on top of them
1348*4882a593Smuzhiyun 	 */
1349*4882a593Smuzhiyun 	if (!pci_has_flag(PCI_PROBE_ONLY)) {
1350*4882a593Smuzhiyun 		list_for_each_entry(b, &pci_root_buses, node)
1351*4882a593Smuzhiyun 			pcibios_reserve_legacy_regions(b);
1352*4882a593Smuzhiyun 	}
1353*4882a593Smuzhiyun 
1354*4882a593Smuzhiyun 	/* Now, if the platform didn't decide to blindly trust the firmware,
1355*4882a593Smuzhiyun 	 * we proceed to assigning things that were left unassigned
1356*4882a593Smuzhiyun 	 */
1357*4882a593Smuzhiyun 	if (!pci_has_flag(PCI_PROBE_ONLY)) {
1358*4882a593Smuzhiyun 		pr_debug("PCI: Assigning unassigned resources...\n");
1359*4882a593Smuzhiyun 		pci_assign_unassigned_resources();
1360*4882a593Smuzhiyun 	}
1361*4882a593Smuzhiyun }
1362*4882a593Smuzhiyun 
1363*4882a593Smuzhiyun /* This is used by the PCI hotplug driver to allocate resource
1364*4882a593Smuzhiyun  * of newly plugged busses. We can try to consolidate with the
1365*4882a593Smuzhiyun  * rest of the code later, for now, keep it as-is as our main
1366*4882a593Smuzhiyun  * resource allocation function doesn't deal with sub-trees yet.
1367*4882a593Smuzhiyun  */
pcibios_claim_one_bus(struct pci_bus * bus)1368*4882a593Smuzhiyun void pcibios_claim_one_bus(struct pci_bus *bus)
1369*4882a593Smuzhiyun {
1370*4882a593Smuzhiyun 	struct pci_dev *dev;
1371*4882a593Smuzhiyun 	struct pci_bus *child_bus;
1372*4882a593Smuzhiyun 
1373*4882a593Smuzhiyun 	list_for_each_entry(dev, &bus->devices, bus_list) {
1374*4882a593Smuzhiyun 		int i;
1375*4882a593Smuzhiyun 
1376*4882a593Smuzhiyun 		for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1377*4882a593Smuzhiyun 			struct resource *r = &dev->resource[i];
1378*4882a593Smuzhiyun 
1379*4882a593Smuzhiyun 			if (r->parent || !r->start || !r->flags)
1380*4882a593Smuzhiyun 				continue;
1381*4882a593Smuzhiyun 
1382*4882a593Smuzhiyun 			pr_debug("PCI: Claiming %s: Resource %d: %pR\n",
1383*4882a593Smuzhiyun 				 pci_name(dev), i, r);
1384*4882a593Smuzhiyun 
1385*4882a593Smuzhiyun 			if (pci_claim_resource(dev, i) == 0)
1386*4882a593Smuzhiyun 				continue;
1387*4882a593Smuzhiyun 
1388*4882a593Smuzhiyun 			pci_claim_bridge_resource(dev, i);
1389*4882a593Smuzhiyun 		}
1390*4882a593Smuzhiyun 	}
1391*4882a593Smuzhiyun 
1392*4882a593Smuzhiyun 	list_for_each_entry(child_bus, &bus->children, node)
1393*4882a593Smuzhiyun 		pcibios_claim_one_bus(child_bus);
1394*4882a593Smuzhiyun }
1395*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
1396*4882a593Smuzhiyun 
1397*4882a593Smuzhiyun 
1398*4882a593Smuzhiyun /* pcibios_finish_adding_to_bus
1399*4882a593Smuzhiyun  *
1400*4882a593Smuzhiyun  * This is to be called by the hotplug code after devices have been
1401*4882a593Smuzhiyun  * added to a bus, this include calling it for a PHB that is just
1402*4882a593Smuzhiyun  * being added
1403*4882a593Smuzhiyun  */
pcibios_finish_adding_to_bus(struct pci_bus * bus)1404*4882a593Smuzhiyun void pcibios_finish_adding_to_bus(struct pci_bus *bus)
1405*4882a593Smuzhiyun {
1406*4882a593Smuzhiyun 	pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n",
1407*4882a593Smuzhiyun 		 pci_domain_nr(bus), bus->number);
1408*4882a593Smuzhiyun 
1409*4882a593Smuzhiyun 	/* Allocate bus and devices resources */
1410*4882a593Smuzhiyun 	pcibios_allocate_bus_resources(bus);
1411*4882a593Smuzhiyun 	pcibios_claim_one_bus(bus);
1412*4882a593Smuzhiyun 	if (!pci_has_flag(PCI_PROBE_ONLY)) {
1413*4882a593Smuzhiyun 		if (bus->self)
1414*4882a593Smuzhiyun 			pci_assign_unassigned_bridge_resources(bus->self);
1415*4882a593Smuzhiyun 		else
1416*4882a593Smuzhiyun 			pci_assign_unassigned_bus_resources(bus);
1417*4882a593Smuzhiyun 	}
1418*4882a593Smuzhiyun 
1419*4882a593Smuzhiyun 	/* Add new devices to global lists.  Register in proc, sysfs. */
1420*4882a593Smuzhiyun 	pci_bus_add_devices(bus);
1421*4882a593Smuzhiyun }
1422*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
1423*4882a593Smuzhiyun 
pcibios_enable_device(struct pci_dev * dev,int mask)1424*4882a593Smuzhiyun int pcibios_enable_device(struct pci_dev *dev, int mask)
1425*4882a593Smuzhiyun {
1426*4882a593Smuzhiyun 	struct pci_controller *phb = pci_bus_to_host(dev->bus);
1427*4882a593Smuzhiyun 
1428*4882a593Smuzhiyun 	if (phb->controller_ops.enable_device_hook)
1429*4882a593Smuzhiyun 		if (!phb->controller_ops.enable_device_hook(dev))
1430*4882a593Smuzhiyun 			return -EINVAL;
1431*4882a593Smuzhiyun 
1432*4882a593Smuzhiyun 	return pci_enable_resources(dev, mask);
1433*4882a593Smuzhiyun }
1434*4882a593Smuzhiyun 
pcibios_disable_device(struct pci_dev * dev)1435*4882a593Smuzhiyun void pcibios_disable_device(struct pci_dev *dev)
1436*4882a593Smuzhiyun {
1437*4882a593Smuzhiyun 	struct pci_controller *phb = pci_bus_to_host(dev->bus);
1438*4882a593Smuzhiyun 
1439*4882a593Smuzhiyun 	if (phb->controller_ops.disable_device)
1440*4882a593Smuzhiyun 		phb->controller_ops.disable_device(dev);
1441*4882a593Smuzhiyun }
1442*4882a593Smuzhiyun 
pcibios_io_space_offset(struct pci_controller * hose)1443*4882a593Smuzhiyun resource_size_t pcibios_io_space_offset(struct pci_controller *hose)
1444*4882a593Smuzhiyun {
1445*4882a593Smuzhiyun 	return (unsigned long) hose->io_base_virt - _IO_BASE;
1446*4882a593Smuzhiyun }
1447*4882a593Smuzhiyun 
pcibios_setup_phb_resources(struct pci_controller * hose,struct list_head * resources)1448*4882a593Smuzhiyun static void pcibios_setup_phb_resources(struct pci_controller *hose,
1449*4882a593Smuzhiyun 					struct list_head *resources)
1450*4882a593Smuzhiyun {
1451*4882a593Smuzhiyun 	struct resource *res;
1452*4882a593Smuzhiyun 	resource_size_t offset;
1453*4882a593Smuzhiyun 	int i;
1454*4882a593Smuzhiyun 
1455*4882a593Smuzhiyun 	/* Hookup PHB IO resource */
1456*4882a593Smuzhiyun 	res = &hose->io_resource;
1457*4882a593Smuzhiyun 
1458*4882a593Smuzhiyun 	if (!res->flags) {
1459*4882a593Smuzhiyun 		pr_debug("PCI: I/O resource not set for host"
1460*4882a593Smuzhiyun 			 " bridge %pOF (domain %d)\n",
1461*4882a593Smuzhiyun 			 hose->dn, hose->global_number);
1462*4882a593Smuzhiyun 	} else {
1463*4882a593Smuzhiyun 		offset = pcibios_io_space_offset(hose);
1464*4882a593Smuzhiyun 
1465*4882a593Smuzhiyun 		pr_debug("PCI: PHB IO resource    = %pR off 0x%08llx\n",
1466*4882a593Smuzhiyun 			 res, (unsigned long long)offset);
1467*4882a593Smuzhiyun 		pci_add_resource_offset(resources, res, offset);
1468*4882a593Smuzhiyun 	}
1469*4882a593Smuzhiyun 
1470*4882a593Smuzhiyun 	/* Hookup PHB Memory resources */
1471*4882a593Smuzhiyun 	for (i = 0; i < 3; ++i) {
1472*4882a593Smuzhiyun 		res = &hose->mem_resources[i];
1473*4882a593Smuzhiyun 		if (!res->flags)
1474*4882a593Smuzhiyun 			continue;
1475*4882a593Smuzhiyun 
1476*4882a593Smuzhiyun 		offset = hose->mem_offset[i];
1477*4882a593Smuzhiyun 		pr_debug("PCI: PHB MEM resource %d = %pR off 0x%08llx\n", i,
1478*4882a593Smuzhiyun 			 res, (unsigned long long)offset);
1479*4882a593Smuzhiyun 
1480*4882a593Smuzhiyun 		pci_add_resource_offset(resources, res, offset);
1481*4882a593Smuzhiyun 	}
1482*4882a593Smuzhiyun }
1483*4882a593Smuzhiyun 
1484*4882a593Smuzhiyun /*
1485*4882a593Smuzhiyun  * Null PCI config access functions, for the case when we can't
1486*4882a593Smuzhiyun  * find a hose.
1487*4882a593Smuzhiyun  */
1488*4882a593Smuzhiyun #define NULL_PCI_OP(rw, size, type)					\
1489*4882a593Smuzhiyun static int								\
1490*4882a593Smuzhiyun null_##rw##_config_##size(struct pci_dev *dev, int offset, type val)	\
1491*4882a593Smuzhiyun {									\
1492*4882a593Smuzhiyun 	return PCIBIOS_DEVICE_NOT_FOUND;    				\
1493*4882a593Smuzhiyun }
1494*4882a593Smuzhiyun 
1495*4882a593Smuzhiyun static int
null_read_config(struct pci_bus * bus,unsigned int devfn,int offset,int len,u32 * val)1496*4882a593Smuzhiyun null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
1497*4882a593Smuzhiyun 		 int len, u32 *val)
1498*4882a593Smuzhiyun {
1499*4882a593Smuzhiyun 	return PCIBIOS_DEVICE_NOT_FOUND;
1500*4882a593Smuzhiyun }
1501*4882a593Smuzhiyun 
1502*4882a593Smuzhiyun static int
null_write_config(struct pci_bus * bus,unsigned int devfn,int offset,int len,u32 val)1503*4882a593Smuzhiyun null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
1504*4882a593Smuzhiyun 		  int len, u32 val)
1505*4882a593Smuzhiyun {
1506*4882a593Smuzhiyun 	return PCIBIOS_DEVICE_NOT_FOUND;
1507*4882a593Smuzhiyun }
1508*4882a593Smuzhiyun 
1509*4882a593Smuzhiyun static struct pci_ops null_pci_ops =
1510*4882a593Smuzhiyun {
1511*4882a593Smuzhiyun 	.read = null_read_config,
1512*4882a593Smuzhiyun 	.write = null_write_config,
1513*4882a593Smuzhiyun };
1514*4882a593Smuzhiyun 
1515*4882a593Smuzhiyun /*
1516*4882a593Smuzhiyun  * These functions are used early on before PCI scanning is done
1517*4882a593Smuzhiyun  * and all of the pci_dev and pci_bus structures have been created.
1518*4882a593Smuzhiyun  */
1519*4882a593Smuzhiyun static struct pci_bus *
fake_pci_bus(struct pci_controller * hose,int busnr)1520*4882a593Smuzhiyun fake_pci_bus(struct pci_controller *hose, int busnr)
1521*4882a593Smuzhiyun {
1522*4882a593Smuzhiyun 	static struct pci_bus bus;
1523*4882a593Smuzhiyun 
1524*4882a593Smuzhiyun 	if (hose == NULL) {
1525*4882a593Smuzhiyun 		printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr);
1526*4882a593Smuzhiyun 	}
1527*4882a593Smuzhiyun 	bus.number = busnr;
1528*4882a593Smuzhiyun 	bus.sysdata = hose;
1529*4882a593Smuzhiyun 	bus.ops = hose? hose->ops: &null_pci_ops;
1530*4882a593Smuzhiyun 	return &bus;
1531*4882a593Smuzhiyun }
1532*4882a593Smuzhiyun 
1533*4882a593Smuzhiyun #define EARLY_PCI_OP(rw, size, type)					\
1534*4882a593Smuzhiyun int early_##rw##_config_##size(struct pci_controller *hose, int bus,	\
1535*4882a593Smuzhiyun 			       int devfn, int offset, type value)	\
1536*4882a593Smuzhiyun {									\
1537*4882a593Smuzhiyun 	return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus),	\
1538*4882a593Smuzhiyun 					    devfn, offset, value);	\
1539*4882a593Smuzhiyun }
1540*4882a593Smuzhiyun 
EARLY_PCI_OP(read,byte,u8 *)1541*4882a593Smuzhiyun EARLY_PCI_OP(read, byte, u8 *)
1542*4882a593Smuzhiyun EARLY_PCI_OP(read, word, u16 *)
1543*4882a593Smuzhiyun EARLY_PCI_OP(read, dword, u32 *)
1544*4882a593Smuzhiyun EARLY_PCI_OP(write, byte, u8)
1545*4882a593Smuzhiyun EARLY_PCI_OP(write, word, u16)
1546*4882a593Smuzhiyun EARLY_PCI_OP(write, dword, u32)
1547*4882a593Smuzhiyun 
1548*4882a593Smuzhiyun int early_find_capability(struct pci_controller *hose, int bus, int devfn,
1549*4882a593Smuzhiyun 			  int cap)
1550*4882a593Smuzhiyun {
1551*4882a593Smuzhiyun 	return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
1552*4882a593Smuzhiyun }
1553*4882a593Smuzhiyun 
pcibios_get_phb_of_node(struct pci_bus * bus)1554*4882a593Smuzhiyun struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
1555*4882a593Smuzhiyun {
1556*4882a593Smuzhiyun 	struct pci_controller *hose = bus->sysdata;
1557*4882a593Smuzhiyun 
1558*4882a593Smuzhiyun 	return of_node_get(hose->dn);
1559*4882a593Smuzhiyun }
1560*4882a593Smuzhiyun 
1561*4882a593Smuzhiyun /**
1562*4882a593Smuzhiyun  * pci_scan_phb - Given a pci_controller, setup and scan the PCI bus
1563*4882a593Smuzhiyun  * @hose: Pointer to the PCI host controller instance structure
1564*4882a593Smuzhiyun  */
pcibios_scan_phb(struct pci_controller * hose)1565*4882a593Smuzhiyun void pcibios_scan_phb(struct pci_controller *hose)
1566*4882a593Smuzhiyun {
1567*4882a593Smuzhiyun 	LIST_HEAD(resources);
1568*4882a593Smuzhiyun 	struct pci_bus *bus;
1569*4882a593Smuzhiyun 	struct device_node *node = hose->dn;
1570*4882a593Smuzhiyun 	int mode;
1571*4882a593Smuzhiyun 
1572*4882a593Smuzhiyun 	pr_debug("PCI: Scanning PHB %pOF\n", node);
1573*4882a593Smuzhiyun 
1574*4882a593Smuzhiyun 	/* Get some IO space for the new PHB */
1575*4882a593Smuzhiyun 	pcibios_setup_phb_io_space(hose);
1576*4882a593Smuzhiyun 
1577*4882a593Smuzhiyun 	/* Wire up PHB bus resources */
1578*4882a593Smuzhiyun 	pcibios_setup_phb_resources(hose, &resources);
1579*4882a593Smuzhiyun 
1580*4882a593Smuzhiyun 	hose->busn.start = hose->first_busno;
1581*4882a593Smuzhiyun 	hose->busn.end	 = hose->last_busno;
1582*4882a593Smuzhiyun 	hose->busn.flags = IORESOURCE_BUS;
1583*4882a593Smuzhiyun 	pci_add_resource(&resources, &hose->busn);
1584*4882a593Smuzhiyun 
1585*4882a593Smuzhiyun 	/* Create an empty bus for the toplevel */
1586*4882a593Smuzhiyun 	bus = pci_create_root_bus(hose->parent, hose->first_busno,
1587*4882a593Smuzhiyun 				  hose->ops, hose, &resources);
1588*4882a593Smuzhiyun 	if (bus == NULL) {
1589*4882a593Smuzhiyun 		pr_err("Failed to create bus for PCI domain %04x\n",
1590*4882a593Smuzhiyun 			hose->global_number);
1591*4882a593Smuzhiyun 		pci_free_resource_list(&resources);
1592*4882a593Smuzhiyun 		return;
1593*4882a593Smuzhiyun 	}
1594*4882a593Smuzhiyun 	hose->bus = bus;
1595*4882a593Smuzhiyun 
1596*4882a593Smuzhiyun 	/* Get probe mode and perform scan */
1597*4882a593Smuzhiyun 	mode = PCI_PROBE_NORMAL;
1598*4882a593Smuzhiyun 	if (node && hose->controller_ops.probe_mode)
1599*4882a593Smuzhiyun 		mode = hose->controller_ops.probe_mode(bus);
1600*4882a593Smuzhiyun 	pr_debug("    probe mode: %d\n", mode);
1601*4882a593Smuzhiyun 	if (mode == PCI_PROBE_DEVTREE)
1602*4882a593Smuzhiyun 		of_scan_bus(node, bus);
1603*4882a593Smuzhiyun 
1604*4882a593Smuzhiyun 	if (mode == PCI_PROBE_NORMAL) {
1605*4882a593Smuzhiyun 		pci_bus_update_busn_res_end(bus, 255);
1606*4882a593Smuzhiyun 		hose->last_busno = pci_scan_child_bus(bus);
1607*4882a593Smuzhiyun 		pci_bus_update_busn_res_end(bus, hose->last_busno);
1608*4882a593Smuzhiyun 	}
1609*4882a593Smuzhiyun 
1610*4882a593Smuzhiyun 	/* Platform gets a chance to do some global fixups before
1611*4882a593Smuzhiyun 	 * we proceed to resource allocation
1612*4882a593Smuzhiyun 	 */
1613*4882a593Smuzhiyun 	if (ppc_md.pcibios_fixup_phb)
1614*4882a593Smuzhiyun 		ppc_md.pcibios_fixup_phb(hose);
1615*4882a593Smuzhiyun 
1616*4882a593Smuzhiyun 	/* Configure PCI Express settings */
1617*4882a593Smuzhiyun 	if (bus && !pci_has_flag(PCI_PROBE_ONLY)) {
1618*4882a593Smuzhiyun 		struct pci_bus *child;
1619*4882a593Smuzhiyun 		list_for_each_entry(child, &bus->children, node)
1620*4882a593Smuzhiyun 			pcie_bus_configure_settings(child);
1621*4882a593Smuzhiyun 	}
1622*4882a593Smuzhiyun }
1623*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pcibios_scan_phb);
1624*4882a593Smuzhiyun 
fixup_hide_host_resource_fsl(struct pci_dev * dev)1625*4882a593Smuzhiyun static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
1626*4882a593Smuzhiyun {
1627*4882a593Smuzhiyun 	int i, class = dev->class >> 8;
1628*4882a593Smuzhiyun 	/* When configured as agent, programing interface = 1 */
1629*4882a593Smuzhiyun 	int prog_if = dev->class & 0xf;
1630*4882a593Smuzhiyun 
1631*4882a593Smuzhiyun 	if ((class == PCI_CLASS_PROCESSOR_POWERPC ||
1632*4882a593Smuzhiyun 	     class == PCI_CLASS_BRIDGE_OTHER) &&
1633*4882a593Smuzhiyun 		(dev->hdr_type == PCI_HEADER_TYPE_NORMAL) &&
1634*4882a593Smuzhiyun 		(prog_if == 0) &&
1635*4882a593Smuzhiyun 		(dev->bus->parent == NULL)) {
1636*4882a593Smuzhiyun 		for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1637*4882a593Smuzhiyun 			dev->resource[i].start = 0;
1638*4882a593Smuzhiyun 			dev->resource[i].end = 0;
1639*4882a593Smuzhiyun 			dev->resource[i].flags = 0;
1640*4882a593Smuzhiyun 		}
1641*4882a593Smuzhiyun 	}
1642*4882a593Smuzhiyun }
1643*4882a593Smuzhiyun DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
1644*4882a593Smuzhiyun DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
1645*4882a593Smuzhiyun 
1646*4882a593Smuzhiyun 
discover_phbs(void)1647*4882a593Smuzhiyun static int __init discover_phbs(void)
1648*4882a593Smuzhiyun {
1649*4882a593Smuzhiyun 	if (ppc_md.discover_phbs)
1650*4882a593Smuzhiyun 		ppc_md.discover_phbs();
1651*4882a593Smuzhiyun 
1652*4882a593Smuzhiyun 	return 0;
1653*4882a593Smuzhiyun }
1654*4882a593Smuzhiyun core_initcall(discover_phbs);
1655