xref: /OK3568_Linux_fs/kernel/arch/sparc/kernel/pci.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* pci.c: UltraSparc PCI controller support.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com)
5*4882a593Smuzhiyun  * Copyright (C) 1998, 1999 Eddie C. Dost   (ecd@skynet.be)
6*4882a593Smuzhiyun  * Copyright (C) 1999 Jakub Jelinek   (jj@ultra.linux.cz)
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * OF tree based PCI bus probing taken from the PowerPC port
9*4882a593Smuzhiyun  * with minor modifications, see there for credits.
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/export.h>
13*4882a593Smuzhiyun #include <linux/kernel.h>
14*4882a593Smuzhiyun #include <linux/string.h>
15*4882a593Smuzhiyun #include <linux/sched.h>
16*4882a593Smuzhiyun #include <linux/capability.h>
17*4882a593Smuzhiyun #include <linux/errno.h>
18*4882a593Smuzhiyun #include <linux/pci.h>
19*4882a593Smuzhiyun #include <linux/msi.h>
20*4882a593Smuzhiyun #include <linux/irq.h>
21*4882a593Smuzhiyun #include <linux/init.h>
22*4882a593Smuzhiyun #include <linux/of.h>
23*4882a593Smuzhiyun #include <linux/of_device.h>
24*4882a593Smuzhiyun #include <linux/pgtable.h>
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #include <linux/uaccess.h>
27*4882a593Smuzhiyun #include <asm/irq.h>
28*4882a593Smuzhiyun #include <asm/prom.h>
29*4882a593Smuzhiyun #include <asm/apb.h>
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #include "pci_impl.h"
32*4882a593Smuzhiyun #include "kernel.h"
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun /* List of all PCI controllers found in the system. */
35*4882a593Smuzhiyun struct pci_pbm_info *pci_pbm_root = NULL;
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun /* Each PBM found gets a unique index. */
38*4882a593Smuzhiyun int pci_num_pbms = 0;
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun volatile int pci_poke_in_progress;
41*4882a593Smuzhiyun volatile int pci_poke_cpu = -1;
42*4882a593Smuzhiyun volatile int pci_poke_faulted;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun static DEFINE_SPINLOCK(pci_poke_lock);
45*4882a593Smuzhiyun 
pci_config_read8(u8 * addr,u8 * ret)46*4882a593Smuzhiyun void pci_config_read8(u8 *addr, u8 *ret)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun 	unsigned long flags;
49*4882a593Smuzhiyun 	u8 byte;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	spin_lock_irqsave(&pci_poke_lock, flags);
52*4882a593Smuzhiyun 	pci_poke_cpu = smp_processor_id();
53*4882a593Smuzhiyun 	pci_poke_in_progress = 1;
54*4882a593Smuzhiyun 	pci_poke_faulted = 0;
55*4882a593Smuzhiyun 	__asm__ __volatile__("membar #Sync\n\t"
56*4882a593Smuzhiyun 			     "lduba [%1] %2, %0\n\t"
57*4882a593Smuzhiyun 			     "membar #Sync"
58*4882a593Smuzhiyun 			     : "=r" (byte)
59*4882a593Smuzhiyun 			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
60*4882a593Smuzhiyun 			     : "memory");
61*4882a593Smuzhiyun 	pci_poke_in_progress = 0;
62*4882a593Smuzhiyun 	pci_poke_cpu = -1;
63*4882a593Smuzhiyun 	if (!pci_poke_faulted)
64*4882a593Smuzhiyun 		*ret = byte;
65*4882a593Smuzhiyun 	spin_unlock_irqrestore(&pci_poke_lock, flags);
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun 
pci_config_read16(u16 * addr,u16 * ret)68*4882a593Smuzhiyun void pci_config_read16(u16 *addr, u16 *ret)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun 	unsigned long flags;
71*4882a593Smuzhiyun 	u16 word;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	spin_lock_irqsave(&pci_poke_lock, flags);
74*4882a593Smuzhiyun 	pci_poke_cpu = smp_processor_id();
75*4882a593Smuzhiyun 	pci_poke_in_progress = 1;
76*4882a593Smuzhiyun 	pci_poke_faulted = 0;
77*4882a593Smuzhiyun 	__asm__ __volatile__("membar #Sync\n\t"
78*4882a593Smuzhiyun 			     "lduha [%1] %2, %0\n\t"
79*4882a593Smuzhiyun 			     "membar #Sync"
80*4882a593Smuzhiyun 			     : "=r" (word)
81*4882a593Smuzhiyun 			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
82*4882a593Smuzhiyun 			     : "memory");
83*4882a593Smuzhiyun 	pci_poke_in_progress = 0;
84*4882a593Smuzhiyun 	pci_poke_cpu = -1;
85*4882a593Smuzhiyun 	if (!pci_poke_faulted)
86*4882a593Smuzhiyun 		*ret = word;
87*4882a593Smuzhiyun 	spin_unlock_irqrestore(&pci_poke_lock, flags);
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun 
pci_config_read32(u32 * addr,u32 * ret)90*4882a593Smuzhiyun void pci_config_read32(u32 *addr, u32 *ret)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun 	unsigned long flags;
93*4882a593Smuzhiyun 	u32 dword;
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	spin_lock_irqsave(&pci_poke_lock, flags);
96*4882a593Smuzhiyun 	pci_poke_cpu = smp_processor_id();
97*4882a593Smuzhiyun 	pci_poke_in_progress = 1;
98*4882a593Smuzhiyun 	pci_poke_faulted = 0;
99*4882a593Smuzhiyun 	__asm__ __volatile__("membar #Sync\n\t"
100*4882a593Smuzhiyun 			     "lduwa [%1] %2, %0\n\t"
101*4882a593Smuzhiyun 			     "membar #Sync"
102*4882a593Smuzhiyun 			     : "=r" (dword)
103*4882a593Smuzhiyun 			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
104*4882a593Smuzhiyun 			     : "memory");
105*4882a593Smuzhiyun 	pci_poke_in_progress = 0;
106*4882a593Smuzhiyun 	pci_poke_cpu = -1;
107*4882a593Smuzhiyun 	if (!pci_poke_faulted)
108*4882a593Smuzhiyun 		*ret = dword;
109*4882a593Smuzhiyun 	spin_unlock_irqrestore(&pci_poke_lock, flags);
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun 
pci_config_write8(u8 * addr,u8 val)112*4882a593Smuzhiyun void pci_config_write8(u8 *addr, u8 val)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun 	unsigned long flags;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	spin_lock_irqsave(&pci_poke_lock, flags);
117*4882a593Smuzhiyun 	pci_poke_cpu = smp_processor_id();
118*4882a593Smuzhiyun 	pci_poke_in_progress = 1;
119*4882a593Smuzhiyun 	pci_poke_faulted = 0;
120*4882a593Smuzhiyun 	__asm__ __volatile__("membar #Sync\n\t"
121*4882a593Smuzhiyun 			     "stba %0, [%1] %2\n\t"
122*4882a593Smuzhiyun 			     "membar #Sync"
123*4882a593Smuzhiyun 			     : /* no outputs */
124*4882a593Smuzhiyun 			     : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
125*4882a593Smuzhiyun 			     : "memory");
126*4882a593Smuzhiyun 	pci_poke_in_progress = 0;
127*4882a593Smuzhiyun 	pci_poke_cpu = -1;
128*4882a593Smuzhiyun 	spin_unlock_irqrestore(&pci_poke_lock, flags);
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun 
pci_config_write16(u16 * addr,u16 val)131*4882a593Smuzhiyun void pci_config_write16(u16 *addr, u16 val)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun 	unsigned long flags;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	spin_lock_irqsave(&pci_poke_lock, flags);
136*4882a593Smuzhiyun 	pci_poke_cpu = smp_processor_id();
137*4882a593Smuzhiyun 	pci_poke_in_progress = 1;
138*4882a593Smuzhiyun 	pci_poke_faulted = 0;
139*4882a593Smuzhiyun 	__asm__ __volatile__("membar #Sync\n\t"
140*4882a593Smuzhiyun 			     "stha %0, [%1] %2\n\t"
141*4882a593Smuzhiyun 			     "membar #Sync"
142*4882a593Smuzhiyun 			     : /* no outputs */
143*4882a593Smuzhiyun 			     : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
144*4882a593Smuzhiyun 			     : "memory");
145*4882a593Smuzhiyun 	pci_poke_in_progress = 0;
146*4882a593Smuzhiyun 	pci_poke_cpu = -1;
147*4882a593Smuzhiyun 	spin_unlock_irqrestore(&pci_poke_lock, flags);
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun 
pci_config_write32(u32 * addr,u32 val)150*4882a593Smuzhiyun void pci_config_write32(u32 *addr, u32 val)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun 	unsigned long flags;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	spin_lock_irqsave(&pci_poke_lock, flags);
155*4882a593Smuzhiyun 	pci_poke_cpu = smp_processor_id();
156*4882a593Smuzhiyun 	pci_poke_in_progress = 1;
157*4882a593Smuzhiyun 	pci_poke_faulted = 0;
158*4882a593Smuzhiyun 	__asm__ __volatile__("membar #Sync\n\t"
159*4882a593Smuzhiyun 			     "stwa %0, [%1] %2\n\t"
160*4882a593Smuzhiyun 			     "membar #Sync"
161*4882a593Smuzhiyun 			     : /* no outputs */
162*4882a593Smuzhiyun 			     : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
163*4882a593Smuzhiyun 			     : "memory");
164*4882a593Smuzhiyun 	pci_poke_in_progress = 0;
165*4882a593Smuzhiyun 	pci_poke_cpu = -1;
166*4882a593Smuzhiyun 	spin_unlock_irqrestore(&pci_poke_lock, flags);
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun static int ofpci_verbose;
170*4882a593Smuzhiyun 
ofpci_debug(char * str)171*4882a593Smuzhiyun static int __init ofpci_debug(char *str)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun 	int val = 0;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	get_option(&str, &val);
176*4882a593Smuzhiyun 	if (val)
177*4882a593Smuzhiyun 		ofpci_verbose = 1;
178*4882a593Smuzhiyun 	return 1;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun __setup("ofpci_debug=", ofpci_debug);
182*4882a593Smuzhiyun 
pci_parse_of_flags(u32 addr0)183*4882a593Smuzhiyun static unsigned long pci_parse_of_flags(u32 addr0)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun 	unsigned long flags = 0;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	if (addr0 & 0x02000000) {
188*4882a593Smuzhiyun 		flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY;
189*4882a593Smuzhiyun 		flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M;
190*4882a593Smuzhiyun 		if (addr0 & 0x01000000)
191*4882a593Smuzhiyun 			flags |= IORESOURCE_MEM_64
192*4882a593Smuzhiyun 				 | PCI_BASE_ADDRESS_MEM_TYPE_64;
193*4882a593Smuzhiyun 		if (addr0 & 0x40000000)
194*4882a593Smuzhiyun 			flags |= IORESOURCE_PREFETCH
195*4882a593Smuzhiyun 				 | PCI_BASE_ADDRESS_MEM_PREFETCH;
196*4882a593Smuzhiyun 	} else if (addr0 & 0x01000000)
197*4882a593Smuzhiyun 		flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO;
198*4882a593Smuzhiyun 	return flags;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun /* The of_device layer has translated all of the assigned-address properties
202*4882a593Smuzhiyun  * into physical address resources, we only have to figure out the register
203*4882a593Smuzhiyun  * mapping.
204*4882a593Smuzhiyun  */
pci_parse_of_addrs(struct platform_device * op,struct device_node * node,struct pci_dev * dev)205*4882a593Smuzhiyun static void pci_parse_of_addrs(struct platform_device *op,
206*4882a593Smuzhiyun 			       struct device_node *node,
207*4882a593Smuzhiyun 			       struct pci_dev *dev)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun 	struct resource *op_res;
210*4882a593Smuzhiyun 	const u32 *addrs;
211*4882a593Smuzhiyun 	int proplen;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	addrs = of_get_property(node, "assigned-addresses", &proplen);
214*4882a593Smuzhiyun 	if (!addrs)
215*4882a593Smuzhiyun 		return;
216*4882a593Smuzhiyun 	if (ofpci_verbose)
217*4882a593Smuzhiyun 		pci_info(dev, "    parse addresses (%d bytes) @ %p\n",
218*4882a593Smuzhiyun 			 proplen, addrs);
219*4882a593Smuzhiyun 	op_res = &op->resource[0];
220*4882a593Smuzhiyun 	for (; proplen >= 20; proplen -= 20, addrs += 5, op_res++) {
221*4882a593Smuzhiyun 		struct resource *res;
222*4882a593Smuzhiyun 		unsigned long flags;
223*4882a593Smuzhiyun 		int i;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 		flags = pci_parse_of_flags(addrs[0]);
226*4882a593Smuzhiyun 		if (!flags)
227*4882a593Smuzhiyun 			continue;
228*4882a593Smuzhiyun 		i = addrs[0] & 0xff;
229*4882a593Smuzhiyun 		if (ofpci_verbose)
230*4882a593Smuzhiyun 			pci_info(dev, "  start: %llx, end: %llx, i: %x\n",
231*4882a593Smuzhiyun 				 op_res->start, op_res->end, i);
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 		if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
234*4882a593Smuzhiyun 			res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
235*4882a593Smuzhiyun 		} else if (i == dev->rom_base_reg) {
236*4882a593Smuzhiyun 			res = &dev->resource[PCI_ROM_RESOURCE];
237*4882a593Smuzhiyun 			flags |= IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
238*4882a593Smuzhiyun 		} else {
239*4882a593Smuzhiyun 			pci_err(dev, "bad cfg reg num 0x%x\n", i);
240*4882a593Smuzhiyun 			continue;
241*4882a593Smuzhiyun 		}
242*4882a593Smuzhiyun 		res->start = op_res->start;
243*4882a593Smuzhiyun 		res->end = op_res->end;
244*4882a593Smuzhiyun 		res->flags = flags;
245*4882a593Smuzhiyun 		res->name = pci_name(dev);
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 		pci_info(dev, "reg 0x%x: %pR\n", i, res);
248*4882a593Smuzhiyun 	}
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun 
pci_init_dev_archdata(struct dev_archdata * sd,void * iommu,void * stc,void * host_controller,struct platform_device * op,int numa_node)251*4882a593Smuzhiyun static void pci_init_dev_archdata(struct dev_archdata *sd, void *iommu,
252*4882a593Smuzhiyun 				  void *stc, void *host_controller,
253*4882a593Smuzhiyun 				  struct platform_device  *op,
254*4882a593Smuzhiyun 				  int numa_node)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun 	sd->iommu = iommu;
257*4882a593Smuzhiyun 	sd->stc = stc;
258*4882a593Smuzhiyun 	sd->host_controller = host_controller;
259*4882a593Smuzhiyun 	sd->op = op;
260*4882a593Smuzhiyun 	sd->numa_node = numa_node;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun 
of_create_pci_dev(struct pci_pbm_info * pbm,struct device_node * node,struct pci_bus * bus,int devfn)263*4882a593Smuzhiyun static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
264*4882a593Smuzhiyun 					 struct device_node *node,
265*4882a593Smuzhiyun 					 struct pci_bus *bus, int devfn)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun 	struct dev_archdata *sd;
268*4882a593Smuzhiyun 	struct platform_device *op;
269*4882a593Smuzhiyun 	struct pci_dev *dev;
270*4882a593Smuzhiyun 	u32 class;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	dev = pci_alloc_dev(bus);
273*4882a593Smuzhiyun 	if (!dev)
274*4882a593Smuzhiyun 		return NULL;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	op = of_find_device_by_node(node);
277*4882a593Smuzhiyun 	sd = &dev->dev.archdata;
278*4882a593Smuzhiyun 	pci_init_dev_archdata(sd, pbm->iommu, &pbm->stc, pbm, op,
279*4882a593Smuzhiyun 			      pbm->numa_node);
280*4882a593Smuzhiyun 	sd = &op->dev.archdata;
281*4882a593Smuzhiyun 	sd->iommu = pbm->iommu;
282*4882a593Smuzhiyun 	sd->stc = &pbm->stc;
283*4882a593Smuzhiyun 	sd->numa_node = pbm->numa_node;
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	if (of_node_name_eq(node, "ebus"))
286*4882a593Smuzhiyun 		of_propagate_archdata(op);
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	if (ofpci_verbose)
289*4882a593Smuzhiyun 		pci_info(bus,"    create device, devfn: %x, type: %s\n",
290*4882a593Smuzhiyun 			 devfn, of_node_get_device_type(node));
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	dev->sysdata = node;
293*4882a593Smuzhiyun 	dev->dev.parent = bus->bridge;
294*4882a593Smuzhiyun 	dev->dev.bus = &pci_bus_type;
295*4882a593Smuzhiyun 	dev->dev.of_node = of_node_get(node);
296*4882a593Smuzhiyun 	dev->devfn = devfn;
297*4882a593Smuzhiyun 	dev->multifunction = 0;		/* maybe a lie? */
298*4882a593Smuzhiyun 	set_pcie_port_type(dev);
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	pci_dev_assign_slot(dev);
301*4882a593Smuzhiyun 	dev->vendor = of_getintprop_default(node, "vendor-id", 0xffff);
302*4882a593Smuzhiyun 	dev->device = of_getintprop_default(node, "device-id", 0xffff);
303*4882a593Smuzhiyun 	dev->subsystem_vendor =
304*4882a593Smuzhiyun 		of_getintprop_default(node, "subsystem-vendor-id", 0);
305*4882a593Smuzhiyun 	dev->subsystem_device =
306*4882a593Smuzhiyun 		of_getintprop_default(node, "subsystem-id", 0);
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	dev->cfg_size = pci_cfg_space_size(dev);
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	/* We can't actually use the firmware value, we have
311*4882a593Smuzhiyun 	 * to read what is in the register right now.  One
312*4882a593Smuzhiyun 	 * reason is that in the case of IDE interfaces the
313*4882a593Smuzhiyun 	 * firmware can sample the value before the the IDE
314*4882a593Smuzhiyun 	 * interface is programmed into native mode.
315*4882a593Smuzhiyun 	 */
316*4882a593Smuzhiyun 	pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
317*4882a593Smuzhiyun 	dev->class = class >> 8;
318*4882a593Smuzhiyun 	dev->revision = class & 0xff;
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(bus),
321*4882a593Smuzhiyun 		dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	/* I have seen IDE devices which will not respond to
324*4882a593Smuzhiyun 	 * the bmdma simplex check reads if bus mastering is
325*4882a593Smuzhiyun 	 * disabled.
326*4882a593Smuzhiyun 	 */
327*4882a593Smuzhiyun 	if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE)
328*4882a593Smuzhiyun 		pci_set_master(dev);
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	dev->current_state = PCI_UNKNOWN;	/* unknown power state */
331*4882a593Smuzhiyun 	dev->error_state = pci_channel_io_normal;
332*4882a593Smuzhiyun 	dev->dma_mask = 0xffffffff;
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	if (of_node_name_eq(node, "pci")) {
335*4882a593Smuzhiyun 		/* a PCI-PCI bridge */
336*4882a593Smuzhiyun 		dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
337*4882a593Smuzhiyun 		dev->rom_base_reg = PCI_ROM_ADDRESS1;
338*4882a593Smuzhiyun 	} else if (of_node_is_type(node, "cardbus")) {
339*4882a593Smuzhiyun 		dev->hdr_type = PCI_HEADER_TYPE_CARDBUS;
340*4882a593Smuzhiyun 	} else {
341*4882a593Smuzhiyun 		dev->hdr_type = PCI_HEADER_TYPE_NORMAL;
342*4882a593Smuzhiyun 		dev->rom_base_reg = PCI_ROM_ADDRESS;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 		dev->irq = sd->op->archdata.irqs[0];
345*4882a593Smuzhiyun 		if (dev->irq == 0xffffffff)
346*4882a593Smuzhiyun 			dev->irq = PCI_IRQ_NONE;
347*4882a593Smuzhiyun 	}
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	pci_info(dev, "[%04x:%04x] type %02x class %#08x\n",
350*4882a593Smuzhiyun 		 dev->vendor, dev->device, dev->hdr_type, dev->class);
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	pci_parse_of_addrs(sd->op, node, dev);
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	if (ofpci_verbose)
355*4882a593Smuzhiyun 		pci_info(dev, "    adding to system ...\n");
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	pci_device_add(dev, bus);
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	return dev;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun 
apb_calc_first_last(u8 map,u32 * first_p,u32 * last_p)362*4882a593Smuzhiyun static void apb_calc_first_last(u8 map, u32 *first_p, u32 *last_p)
363*4882a593Smuzhiyun {
364*4882a593Smuzhiyun 	u32 idx, first, last;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	first = 8;
367*4882a593Smuzhiyun 	last = 0;
368*4882a593Smuzhiyun 	for (idx = 0; idx < 8; idx++) {
369*4882a593Smuzhiyun 		if ((map & (1 << idx)) != 0) {
370*4882a593Smuzhiyun 			if (first > idx)
371*4882a593Smuzhiyun 				first = idx;
372*4882a593Smuzhiyun 			if (last < idx)
373*4882a593Smuzhiyun 				last = idx;
374*4882a593Smuzhiyun 		}
375*4882a593Smuzhiyun 	}
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	*first_p = first;
378*4882a593Smuzhiyun 	*last_p = last;
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun /* Cook up fake bus resources for SUNW,simba PCI bridges which lack
382*4882a593Smuzhiyun  * a proper 'ranges' property.
383*4882a593Smuzhiyun  */
apb_fake_ranges(struct pci_dev * dev,struct pci_bus * bus,struct pci_pbm_info * pbm)384*4882a593Smuzhiyun static void apb_fake_ranges(struct pci_dev *dev,
385*4882a593Smuzhiyun 			    struct pci_bus *bus,
386*4882a593Smuzhiyun 			    struct pci_pbm_info *pbm)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun 	struct pci_bus_region region;
389*4882a593Smuzhiyun 	struct resource *res;
390*4882a593Smuzhiyun 	u32 first, last;
391*4882a593Smuzhiyun 	u8 map;
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	pci_read_config_byte(dev, APB_IO_ADDRESS_MAP, &map);
394*4882a593Smuzhiyun 	apb_calc_first_last(map, &first, &last);
395*4882a593Smuzhiyun 	res = bus->resource[0];
396*4882a593Smuzhiyun 	res->flags = IORESOURCE_IO;
397*4882a593Smuzhiyun 	region.start = (first << 21);
398*4882a593Smuzhiyun 	region.end = (last << 21) + ((1 << 21) - 1);
399*4882a593Smuzhiyun 	pcibios_bus_to_resource(dev->bus, res, &region);
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	pci_read_config_byte(dev, APB_MEM_ADDRESS_MAP, &map);
402*4882a593Smuzhiyun 	apb_calc_first_last(map, &first, &last);
403*4882a593Smuzhiyun 	res = bus->resource[1];
404*4882a593Smuzhiyun 	res->flags = IORESOURCE_MEM;
405*4882a593Smuzhiyun 	region.start = (first << 29);
406*4882a593Smuzhiyun 	region.end = (last << 29) + ((1 << 29) - 1);
407*4882a593Smuzhiyun 	pcibios_bus_to_resource(dev->bus, res, &region);
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun static void pci_of_scan_bus(struct pci_pbm_info *pbm,
411*4882a593Smuzhiyun 			    struct device_node *node,
412*4882a593Smuzhiyun 			    struct pci_bus *bus);
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun #define GET_64BIT(prop, i)	((((u64) (prop)[(i)]) << 32) | (prop)[(i)+1])
415*4882a593Smuzhiyun 
of_scan_pci_bridge(struct pci_pbm_info * pbm,struct device_node * node,struct pci_dev * dev)416*4882a593Smuzhiyun static void of_scan_pci_bridge(struct pci_pbm_info *pbm,
417*4882a593Smuzhiyun 			       struct device_node *node,
418*4882a593Smuzhiyun 			       struct pci_dev *dev)
419*4882a593Smuzhiyun {
420*4882a593Smuzhiyun 	struct pci_bus *bus;
421*4882a593Smuzhiyun 	const u32 *busrange, *ranges;
422*4882a593Smuzhiyun 	int len, i, simba;
423*4882a593Smuzhiyun 	struct pci_bus_region region;
424*4882a593Smuzhiyun 	struct resource *res;
425*4882a593Smuzhiyun 	unsigned int flags;
426*4882a593Smuzhiyun 	u64 size;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	if (ofpci_verbose)
429*4882a593Smuzhiyun 		pci_info(dev, "of_scan_pci_bridge(%pOF)\n", node);
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	/* parse bus-range property */
432*4882a593Smuzhiyun 	busrange = of_get_property(node, "bus-range", &len);
433*4882a593Smuzhiyun 	if (busrange == NULL || len != 8) {
434*4882a593Smuzhiyun 		pci_info(dev, "Can't get bus-range for PCI-PCI bridge %pOF\n",
435*4882a593Smuzhiyun 		       node);
436*4882a593Smuzhiyun 		return;
437*4882a593Smuzhiyun 	}
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	if (ofpci_verbose)
440*4882a593Smuzhiyun 		pci_info(dev, "    Bridge bus range [%u --> %u]\n",
441*4882a593Smuzhiyun 			 busrange[0], busrange[1]);
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	ranges = of_get_property(node, "ranges", &len);
444*4882a593Smuzhiyun 	simba = 0;
445*4882a593Smuzhiyun 	if (ranges == NULL) {
446*4882a593Smuzhiyun 		const char *model = of_get_property(node, "model", NULL);
447*4882a593Smuzhiyun 		if (model && !strcmp(model, "SUNW,simba"))
448*4882a593Smuzhiyun 			simba = 1;
449*4882a593Smuzhiyun 	}
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	bus = pci_add_new_bus(dev->bus, dev, busrange[0]);
452*4882a593Smuzhiyun 	if (!bus) {
453*4882a593Smuzhiyun 		pci_err(dev, "Failed to create pci bus for %pOF\n",
454*4882a593Smuzhiyun 			node);
455*4882a593Smuzhiyun 		return;
456*4882a593Smuzhiyun 	}
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	bus->primary = dev->bus->number;
459*4882a593Smuzhiyun 	pci_bus_insert_busn_res(bus, busrange[0], busrange[1]);
460*4882a593Smuzhiyun 	bus->bridge_ctl = 0;
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	if (ofpci_verbose)
463*4882a593Smuzhiyun 		pci_info(dev, "    Bridge ranges[%p] simba[%d]\n",
464*4882a593Smuzhiyun 			 ranges, simba);
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	/* parse ranges property, or cook one up by hand for Simba */
467*4882a593Smuzhiyun 	/* PCI #address-cells == 3 and #size-cells == 2 always */
468*4882a593Smuzhiyun 	res = &dev->resource[PCI_BRIDGE_RESOURCES];
469*4882a593Smuzhiyun 	for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) {
470*4882a593Smuzhiyun 		res->flags = 0;
471*4882a593Smuzhiyun 		bus->resource[i] = res;
472*4882a593Smuzhiyun 		++res;
473*4882a593Smuzhiyun 	}
474*4882a593Smuzhiyun 	if (simba) {
475*4882a593Smuzhiyun 		apb_fake_ranges(dev, bus, pbm);
476*4882a593Smuzhiyun 		goto after_ranges;
477*4882a593Smuzhiyun 	} else if (ranges == NULL) {
478*4882a593Smuzhiyun 		pci_read_bridge_bases(bus);
479*4882a593Smuzhiyun 		goto after_ranges;
480*4882a593Smuzhiyun 	}
481*4882a593Smuzhiyun 	i = 1;
482*4882a593Smuzhiyun 	for (; len >= 32; len -= 32, ranges += 8) {
483*4882a593Smuzhiyun 		u64 start;
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 		if (ofpci_verbose)
486*4882a593Smuzhiyun 			pci_info(dev, "    RAW Range[%08x:%08x:%08x:%08x:%08x:%08x:"
487*4882a593Smuzhiyun 				 "%08x:%08x]\n",
488*4882a593Smuzhiyun 				 ranges[0], ranges[1], ranges[2], ranges[3],
489*4882a593Smuzhiyun 				 ranges[4], ranges[5], ranges[6], ranges[7]);
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 		flags = pci_parse_of_flags(ranges[0]);
492*4882a593Smuzhiyun 		size = GET_64BIT(ranges, 6);
493*4882a593Smuzhiyun 		if (flags == 0 || size == 0)
494*4882a593Smuzhiyun 			continue;
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 		/* On PCI-Express systems, PCI bridges that have no devices downstream
497*4882a593Smuzhiyun 		 * have a bogus size value where the first 32-bit cell is 0xffffffff.
498*4882a593Smuzhiyun 		 * This results in a bogus range where start + size overflows.
499*4882a593Smuzhiyun 		 *
500*4882a593Smuzhiyun 		 * Just skip these otherwise the kernel will complain when the resource
501*4882a593Smuzhiyun 		 * tries to be claimed.
502*4882a593Smuzhiyun 		 */
503*4882a593Smuzhiyun 		if (size >> 32 == 0xffffffff)
504*4882a593Smuzhiyun 			continue;
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 		if (flags & IORESOURCE_IO) {
507*4882a593Smuzhiyun 			res = bus->resource[0];
508*4882a593Smuzhiyun 			if (res->flags) {
509*4882a593Smuzhiyun 				pci_err(dev, "ignoring extra I/O range"
510*4882a593Smuzhiyun 					" for bridge %pOF\n", node);
511*4882a593Smuzhiyun 				continue;
512*4882a593Smuzhiyun 			}
513*4882a593Smuzhiyun 		} else {
514*4882a593Smuzhiyun 			if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) {
515*4882a593Smuzhiyun 				pci_err(dev, "too many memory ranges"
516*4882a593Smuzhiyun 					" for bridge %pOF\n", node);
517*4882a593Smuzhiyun 				continue;
518*4882a593Smuzhiyun 			}
519*4882a593Smuzhiyun 			res = bus->resource[i];
520*4882a593Smuzhiyun 			++i;
521*4882a593Smuzhiyun 		}
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 		res->flags = flags;
524*4882a593Smuzhiyun 		region.start = start = GET_64BIT(ranges, 1);
525*4882a593Smuzhiyun 		region.end = region.start + size - 1;
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 		if (ofpci_verbose)
528*4882a593Smuzhiyun 			pci_info(dev, "      Using flags[%08x] start[%016llx] size[%016llx]\n",
529*4882a593Smuzhiyun 				 flags, start, size);
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 		pcibios_bus_to_resource(dev->bus, res, &region);
532*4882a593Smuzhiyun 	}
533*4882a593Smuzhiyun after_ranges:
534*4882a593Smuzhiyun 	sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
535*4882a593Smuzhiyun 		bus->number);
536*4882a593Smuzhiyun 	if (ofpci_verbose)
537*4882a593Smuzhiyun 		pci_info(dev, "    bus name: %s\n", bus->name);
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	pci_of_scan_bus(pbm, node, bus);
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun 
pci_of_scan_bus(struct pci_pbm_info * pbm,struct device_node * node,struct pci_bus * bus)542*4882a593Smuzhiyun static void pci_of_scan_bus(struct pci_pbm_info *pbm,
543*4882a593Smuzhiyun 			    struct device_node *node,
544*4882a593Smuzhiyun 			    struct pci_bus *bus)
545*4882a593Smuzhiyun {
546*4882a593Smuzhiyun 	struct device_node *child;
547*4882a593Smuzhiyun 	const u32 *reg;
548*4882a593Smuzhiyun 	int reglen, devfn, prev_devfn;
549*4882a593Smuzhiyun 	struct pci_dev *dev;
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	if (ofpci_verbose)
552*4882a593Smuzhiyun 		pci_info(bus, "scan_bus[%pOF] bus no %d\n",
553*4882a593Smuzhiyun 			 node, bus->number);
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	child = NULL;
556*4882a593Smuzhiyun 	prev_devfn = -1;
557*4882a593Smuzhiyun 	while ((child = of_get_next_child(node, child)) != NULL) {
558*4882a593Smuzhiyun 		if (ofpci_verbose)
559*4882a593Smuzhiyun 			pci_info(bus, "  * %pOF\n", child);
560*4882a593Smuzhiyun 		reg = of_get_property(child, "reg", &reglen);
561*4882a593Smuzhiyun 		if (reg == NULL || reglen < 20)
562*4882a593Smuzhiyun 			continue;
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 		devfn = (reg[0] >> 8) & 0xff;
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 		/* This is a workaround for some device trees
567*4882a593Smuzhiyun 		 * which list PCI devices twice.  On the V100
568*4882a593Smuzhiyun 		 * for example, device number 3 is listed twice.
569*4882a593Smuzhiyun 		 * Once as "pm" and once again as "lomp".
570*4882a593Smuzhiyun 		 */
571*4882a593Smuzhiyun 		if (devfn == prev_devfn)
572*4882a593Smuzhiyun 			continue;
573*4882a593Smuzhiyun 		prev_devfn = devfn;
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 		/* create a new pci_dev for this device */
576*4882a593Smuzhiyun 		dev = of_create_pci_dev(pbm, child, bus, devfn);
577*4882a593Smuzhiyun 		if (!dev)
578*4882a593Smuzhiyun 			continue;
579*4882a593Smuzhiyun 		if (ofpci_verbose)
580*4882a593Smuzhiyun 			pci_info(dev, "dev header type: %x\n", dev->hdr_type);
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 		if (pci_is_bridge(dev))
583*4882a593Smuzhiyun 			of_scan_pci_bridge(pbm, child, dev);
584*4882a593Smuzhiyun 	}
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun static ssize_t
show_pciobppath_attr(struct device * dev,struct device_attribute * attr,char * buf)588*4882a593Smuzhiyun show_pciobppath_attr(struct device * dev, struct device_attribute * attr, char * buf)
589*4882a593Smuzhiyun {
590*4882a593Smuzhiyun 	struct pci_dev *pdev;
591*4882a593Smuzhiyun 	struct device_node *dp;
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	pdev = to_pci_dev(dev);
594*4882a593Smuzhiyun 	dp = pdev->dev.of_node;
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	return scnprintf(buf, PAGE_SIZE, "%pOF\n", dp);
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH, show_pciobppath_attr, NULL);
600*4882a593Smuzhiyun 
pci_bus_register_of_sysfs(struct pci_bus * bus)601*4882a593Smuzhiyun static void pci_bus_register_of_sysfs(struct pci_bus *bus)
602*4882a593Smuzhiyun {
603*4882a593Smuzhiyun 	struct pci_dev *dev;
604*4882a593Smuzhiyun 	struct pci_bus *child_bus;
605*4882a593Smuzhiyun 	int err;
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	list_for_each_entry(dev, &bus->devices, bus_list) {
608*4882a593Smuzhiyun 		/* we don't really care if we can create this file or
609*4882a593Smuzhiyun 		 * not, but we need to assign the result of the call
610*4882a593Smuzhiyun 		 * or the world will fall under alien invasion and
611*4882a593Smuzhiyun 		 * everybody will be frozen on a spaceship ready to be
612*4882a593Smuzhiyun 		 * eaten on alpha centauri by some green and jelly
613*4882a593Smuzhiyun 		 * humanoid.
614*4882a593Smuzhiyun 		 */
615*4882a593Smuzhiyun 		err = sysfs_create_file(&dev->dev.kobj, &dev_attr_obppath.attr);
616*4882a593Smuzhiyun 		(void) err;
617*4882a593Smuzhiyun 	}
618*4882a593Smuzhiyun 	list_for_each_entry(child_bus, &bus->children, node)
619*4882a593Smuzhiyun 		pci_bus_register_of_sysfs(child_bus);
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun 
pci_claim_legacy_resources(struct pci_dev * dev)622*4882a593Smuzhiyun static void pci_claim_legacy_resources(struct pci_dev *dev)
623*4882a593Smuzhiyun {
624*4882a593Smuzhiyun 	struct pci_bus_region region;
625*4882a593Smuzhiyun 	struct resource *p, *root, *conflict;
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
628*4882a593Smuzhiyun 		return;
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 	p = kzalloc(sizeof(*p), GFP_KERNEL);
631*4882a593Smuzhiyun 	if (!p)
632*4882a593Smuzhiyun 		return;
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	p->name = "Video RAM area";
635*4882a593Smuzhiyun 	p->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 	region.start = 0xa0000UL;
638*4882a593Smuzhiyun 	region.end = region.start + 0x1ffffUL;
639*4882a593Smuzhiyun 	pcibios_bus_to_resource(dev->bus, p, &region);
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 	root = pci_find_parent_resource(dev, p);
642*4882a593Smuzhiyun 	if (!root) {
643*4882a593Smuzhiyun 		pci_info(dev, "can't claim VGA legacy %pR: no compatible bridge window\n", p);
644*4882a593Smuzhiyun 		goto err;
645*4882a593Smuzhiyun 	}
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	conflict = request_resource_conflict(root, p);
648*4882a593Smuzhiyun 	if (conflict) {
649*4882a593Smuzhiyun 		pci_info(dev, "can't claim VGA legacy %pR: address conflict with %s %pR\n",
650*4882a593Smuzhiyun 			 p, conflict->name, conflict);
651*4882a593Smuzhiyun 		goto err;
652*4882a593Smuzhiyun 	}
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	pci_info(dev, "VGA legacy framebuffer %pR\n", p);
655*4882a593Smuzhiyun 	return;
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun err:
658*4882a593Smuzhiyun 	kfree(p);
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun 
pci_claim_bus_resources(struct pci_bus * bus)661*4882a593Smuzhiyun static void pci_claim_bus_resources(struct pci_bus *bus)
662*4882a593Smuzhiyun {
663*4882a593Smuzhiyun 	struct pci_bus *child_bus;
664*4882a593Smuzhiyun 	struct pci_dev *dev;
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	list_for_each_entry(dev, &bus->devices, bus_list) {
667*4882a593Smuzhiyun 		int i;
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 		for (i = 0; i < PCI_NUM_RESOURCES; i++) {
670*4882a593Smuzhiyun 			struct resource *r = &dev->resource[i];
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 			if (r->parent || !r->start || !r->flags)
673*4882a593Smuzhiyun 				continue;
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 			if (ofpci_verbose)
676*4882a593Smuzhiyun 				pci_info(dev, "Claiming Resource %d: %pR\n",
677*4882a593Smuzhiyun 					 i, r);
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 			pci_claim_resource(dev, i);
680*4882a593Smuzhiyun 		}
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 		pci_claim_legacy_resources(dev);
683*4882a593Smuzhiyun 	}
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	list_for_each_entry(child_bus, &bus->children, node)
686*4882a593Smuzhiyun 		pci_claim_bus_resources(child_bus);
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun 
pci_scan_one_pbm(struct pci_pbm_info * pbm,struct device * parent)689*4882a593Smuzhiyun struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm,
690*4882a593Smuzhiyun 				 struct device *parent)
691*4882a593Smuzhiyun {
692*4882a593Smuzhiyun 	LIST_HEAD(resources);
693*4882a593Smuzhiyun 	struct device_node *node = pbm->op->dev.of_node;
694*4882a593Smuzhiyun 	struct pci_bus *bus;
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	printk("PCI: Scanning PBM %pOF\n", node);
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 	pci_add_resource_offset(&resources, &pbm->io_space,
699*4882a593Smuzhiyun 				pbm->io_offset);
700*4882a593Smuzhiyun 	pci_add_resource_offset(&resources, &pbm->mem_space,
701*4882a593Smuzhiyun 				pbm->mem_offset);
702*4882a593Smuzhiyun 	if (pbm->mem64_space.flags)
703*4882a593Smuzhiyun 		pci_add_resource_offset(&resources, &pbm->mem64_space,
704*4882a593Smuzhiyun 					pbm->mem64_offset);
705*4882a593Smuzhiyun 	pbm->busn.start = pbm->pci_first_busno;
706*4882a593Smuzhiyun 	pbm->busn.end	= pbm->pci_last_busno;
707*4882a593Smuzhiyun 	pbm->busn.flags	= IORESOURCE_BUS;
708*4882a593Smuzhiyun 	pci_add_resource(&resources, &pbm->busn);
709*4882a593Smuzhiyun 	bus = pci_create_root_bus(parent, pbm->pci_first_busno, pbm->pci_ops,
710*4882a593Smuzhiyun 				  pbm, &resources);
711*4882a593Smuzhiyun 	if (!bus) {
712*4882a593Smuzhiyun 		printk(KERN_ERR "Failed to create bus for %pOF\n", node);
713*4882a593Smuzhiyun 		pci_free_resource_list(&resources);
714*4882a593Smuzhiyun 		return NULL;
715*4882a593Smuzhiyun 	}
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 	pci_of_scan_bus(pbm, node, bus);
718*4882a593Smuzhiyun 	pci_bus_register_of_sysfs(bus);
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 	pci_claim_bus_resources(bus);
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 	pci_bus_add_devices(bus);
723*4882a593Smuzhiyun 	return bus;
724*4882a593Smuzhiyun }
725*4882a593Smuzhiyun 
pcibios_enable_device(struct pci_dev * dev,int mask)726*4882a593Smuzhiyun int pcibios_enable_device(struct pci_dev *dev, int mask)
727*4882a593Smuzhiyun {
728*4882a593Smuzhiyun 	u16 cmd, oldcmd;
729*4882a593Smuzhiyun 	int i;
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	pci_read_config_word(dev, PCI_COMMAND, &cmd);
732*4882a593Smuzhiyun 	oldcmd = cmd;
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
735*4882a593Smuzhiyun 		struct resource *res = &dev->resource[i];
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 		/* Only set up the requested stuff */
738*4882a593Smuzhiyun 		if (!(mask & (1<<i)))
739*4882a593Smuzhiyun 			continue;
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun 		if (res->flags & IORESOURCE_IO)
742*4882a593Smuzhiyun 			cmd |= PCI_COMMAND_IO;
743*4882a593Smuzhiyun 		if (res->flags & IORESOURCE_MEM)
744*4882a593Smuzhiyun 			cmd |= PCI_COMMAND_MEMORY;
745*4882a593Smuzhiyun 	}
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun 	if (cmd != oldcmd) {
748*4882a593Smuzhiyun 		pci_info(dev, "enabling device (%04x -> %04x)\n", oldcmd, cmd);
749*4882a593Smuzhiyun 		pci_write_config_word(dev, PCI_COMMAND, cmd);
750*4882a593Smuzhiyun 	}
751*4882a593Smuzhiyun 	return 0;
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun /* Platform support for /proc/bus/pci/X/Y mmap()s. */
755*4882a593Smuzhiyun 
756*4882a593Smuzhiyun /* If the user uses a host-bridge as the PCI device, he may use
757*4882a593Smuzhiyun  * this to perform a raw mmap() of the I/O or MEM space behind
758*4882a593Smuzhiyun  * that controller.
759*4882a593Smuzhiyun  *
760*4882a593Smuzhiyun  * This can be useful for execution of x86 PCI bios initialization code
761*4882a593Smuzhiyun  * on a PCI card, like the xfree86 int10 stuff does.
762*4882a593Smuzhiyun  */
__pci_mmap_make_offset_bus(struct pci_dev * pdev,struct vm_area_struct * vma,enum pci_mmap_state mmap_state)763*4882a593Smuzhiyun static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struct *vma,
764*4882a593Smuzhiyun 				      enum pci_mmap_state mmap_state)
765*4882a593Smuzhiyun {
766*4882a593Smuzhiyun 	struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
767*4882a593Smuzhiyun 	unsigned long space_size, user_offset, user_size;
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	if (mmap_state == pci_mmap_io) {
770*4882a593Smuzhiyun 		space_size = resource_size(&pbm->io_space);
771*4882a593Smuzhiyun 	} else {
772*4882a593Smuzhiyun 		space_size = resource_size(&pbm->mem_space);
773*4882a593Smuzhiyun 	}
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 	/* Make sure the request is in range. */
776*4882a593Smuzhiyun 	user_offset = vma->vm_pgoff << PAGE_SHIFT;
777*4882a593Smuzhiyun 	user_size = vma->vm_end - vma->vm_start;
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 	if (user_offset >= space_size ||
780*4882a593Smuzhiyun 	    (user_offset + user_size) > space_size)
781*4882a593Smuzhiyun 		return -EINVAL;
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	if (mmap_state == pci_mmap_io) {
784*4882a593Smuzhiyun 		vma->vm_pgoff = (pbm->io_space.start +
785*4882a593Smuzhiyun 				 user_offset) >> PAGE_SHIFT;
786*4882a593Smuzhiyun 	} else {
787*4882a593Smuzhiyun 		vma->vm_pgoff = (pbm->mem_space.start +
788*4882a593Smuzhiyun 				 user_offset) >> PAGE_SHIFT;
789*4882a593Smuzhiyun 	}
790*4882a593Smuzhiyun 
791*4882a593Smuzhiyun 	return 0;
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun /* Adjust vm_pgoff of VMA such that it is the physical page offset
795*4882a593Smuzhiyun  * corresponding to the 32-bit pci bus offset for DEV requested by the user.
796*4882a593Smuzhiyun  *
797*4882a593Smuzhiyun  * Basically, the user finds the base address for his device which he wishes
798*4882a593Smuzhiyun  * to mmap.  They read the 32-bit value from the config space base register,
799*4882a593Smuzhiyun  * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
800*4882a593Smuzhiyun  * offset parameter of mmap on /proc/bus/pci/XXX for that device.
801*4882a593Smuzhiyun  *
802*4882a593Smuzhiyun  * Returns negative error code on failure, zero on success.
803*4882a593Smuzhiyun  */
__pci_mmap_make_offset(struct pci_dev * pdev,struct vm_area_struct * vma,enum pci_mmap_state mmap_state)804*4882a593Smuzhiyun static int __pci_mmap_make_offset(struct pci_dev *pdev,
805*4882a593Smuzhiyun 				  struct vm_area_struct *vma,
806*4882a593Smuzhiyun 				  enum pci_mmap_state mmap_state)
807*4882a593Smuzhiyun {
808*4882a593Smuzhiyun 	unsigned long user_paddr, user_size;
809*4882a593Smuzhiyun 	int i, err;
810*4882a593Smuzhiyun 
811*4882a593Smuzhiyun 	/* First compute the physical address in vma->vm_pgoff,
812*4882a593Smuzhiyun 	 * making sure the user offset is within range in the
813*4882a593Smuzhiyun 	 * appropriate PCI space.
814*4882a593Smuzhiyun 	 */
815*4882a593Smuzhiyun 	err = __pci_mmap_make_offset_bus(pdev, vma, mmap_state);
816*4882a593Smuzhiyun 	if (err)
817*4882a593Smuzhiyun 		return err;
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 	/* If this is a mapping on a host bridge, any address
820*4882a593Smuzhiyun 	 * is OK.
821*4882a593Smuzhiyun 	 */
822*4882a593Smuzhiyun 	if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
823*4882a593Smuzhiyun 		return err;
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 	/* Otherwise make sure it's in the range for one of the
826*4882a593Smuzhiyun 	 * device's resources.
827*4882a593Smuzhiyun 	 */
828*4882a593Smuzhiyun 	user_paddr = vma->vm_pgoff << PAGE_SHIFT;
829*4882a593Smuzhiyun 	user_size = vma->vm_end - vma->vm_start;
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
832*4882a593Smuzhiyun 		struct resource *rp = &pdev->resource[i];
833*4882a593Smuzhiyun 		resource_size_t aligned_end;
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun 		/* Active? */
836*4882a593Smuzhiyun 		if (!rp->flags)
837*4882a593Smuzhiyun 			continue;
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun 		/* Same type? */
840*4882a593Smuzhiyun 		if (i == PCI_ROM_RESOURCE) {
841*4882a593Smuzhiyun 			if (mmap_state != pci_mmap_mem)
842*4882a593Smuzhiyun 				continue;
843*4882a593Smuzhiyun 		} else {
844*4882a593Smuzhiyun 			if ((mmap_state == pci_mmap_io &&
845*4882a593Smuzhiyun 			     (rp->flags & IORESOURCE_IO) == 0) ||
846*4882a593Smuzhiyun 			    (mmap_state == pci_mmap_mem &&
847*4882a593Smuzhiyun 			     (rp->flags & IORESOURCE_MEM) == 0))
848*4882a593Smuzhiyun 				continue;
849*4882a593Smuzhiyun 		}
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun 		/* Align the resource end to the next page address.
852*4882a593Smuzhiyun 		 * PAGE_SIZE intentionally added instead of (PAGE_SIZE - 1),
853*4882a593Smuzhiyun 		 * because actually we need the address of the next byte
854*4882a593Smuzhiyun 		 * after rp->end.
855*4882a593Smuzhiyun 		 */
856*4882a593Smuzhiyun 		aligned_end = (rp->end + PAGE_SIZE) & PAGE_MASK;
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun 		if ((rp->start <= user_paddr) &&
859*4882a593Smuzhiyun 		    (user_paddr + user_size) <= aligned_end)
860*4882a593Smuzhiyun 			break;
861*4882a593Smuzhiyun 	}
862*4882a593Smuzhiyun 
863*4882a593Smuzhiyun 	if (i > PCI_ROM_RESOURCE)
864*4882a593Smuzhiyun 		return -EINVAL;
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 	return 0;
867*4882a593Smuzhiyun }
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun /* Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
870*4882a593Smuzhiyun  * device mapping.
871*4882a593Smuzhiyun  */
__pci_mmap_set_pgprot(struct pci_dev * dev,struct vm_area_struct * vma,enum pci_mmap_state mmap_state)872*4882a593Smuzhiyun static void __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
873*4882a593Smuzhiyun 					     enum pci_mmap_state mmap_state)
874*4882a593Smuzhiyun {
875*4882a593Smuzhiyun 	/* Our io_remap_pfn_range takes care of this, do nothing.  */
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun /* Perform the actual remap of the pages for a PCI device mapping, as appropriate
879*4882a593Smuzhiyun  * for this architecture.  The region in the process to map is described by vm_start
880*4882a593Smuzhiyun  * and vm_end members of VMA, the base physical address is found in vm_pgoff.
881*4882a593Smuzhiyun  * The pci device structure is provided so that architectures may make mapping
882*4882a593Smuzhiyun  * decisions on a per-device or per-bus basis.
883*4882a593Smuzhiyun  *
884*4882a593Smuzhiyun  * Returns a negative error code on failure, zero on success.
885*4882a593Smuzhiyun  */
pci_mmap_page_range(struct pci_dev * dev,int bar,struct vm_area_struct * vma,enum pci_mmap_state mmap_state,int write_combine)886*4882a593Smuzhiyun int pci_mmap_page_range(struct pci_dev *dev, int bar,
887*4882a593Smuzhiyun 			struct vm_area_struct *vma,
888*4882a593Smuzhiyun 			enum pci_mmap_state mmap_state, int write_combine)
889*4882a593Smuzhiyun {
890*4882a593Smuzhiyun 	int ret;
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 	ret = __pci_mmap_make_offset(dev, vma, mmap_state);
893*4882a593Smuzhiyun 	if (ret < 0)
894*4882a593Smuzhiyun 		return ret;
895*4882a593Smuzhiyun 
896*4882a593Smuzhiyun 	__pci_mmap_set_pgprot(dev, vma, mmap_state);
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
899*4882a593Smuzhiyun 	ret = io_remap_pfn_range(vma, vma->vm_start,
900*4882a593Smuzhiyun 				 vma->vm_pgoff,
901*4882a593Smuzhiyun 				 vma->vm_end - vma->vm_start,
902*4882a593Smuzhiyun 				 vma->vm_page_prot);
903*4882a593Smuzhiyun 	if (ret)
904*4882a593Smuzhiyun 		return ret;
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 	return 0;
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun #ifdef CONFIG_NUMA
pcibus_to_node(struct pci_bus * pbus)910*4882a593Smuzhiyun int pcibus_to_node(struct pci_bus *pbus)
911*4882a593Smuzhiyun {
912*4882a593Smuzhiyun 	struct pci_pbm_info *pbm = pbus->sysdata;
913*4882a593Smuzhiyun 
914*4882a593Smuzhiyun 	return pbm->numa_node;
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun EXPORT_SYMBOL(pcibus_to_node);
917*4882a593Smuzhiyun #endif
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun /* Return the domain number for this pci bus */
920*4882a593Smuzhiyun 
pci_domain_nr(struct pci_bus * pbus)921*4882a593Smuzhiyun int pci_domain_nr(struct pci_bus *pbus)
922*4882a593Smuzhiyun {
923*4882a593Smuzhiyun 	struct pci_pbm_info *pbm = pbus->sysdata;
924*4882a593Smuzhiyun 	int ret;
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 	if (!pbm) {
927*4882a593Smuzhiyun 		ret = -ENXIO;
928*4882a593Smuzhiyun 	} else {
929*4882a593Smuzhiyun 		ret = pbm->index;
930*4882a593Smuzhiyun 	}
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 	return ret;
933*4882a593Smuzhiyun }
934*4882a593Smuzhiyun EXPORT_SYMBOL(pci_domain_nr);
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun #ifdef CONFIG_PCI_MSI
arch_setup_msi_irq(struct pci_dev * pdev,struct msi_desc * desc)937*4882a593Smuzhiyun int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
938*4882a593Smuzhiyun {
939*4882a593Smuzhiyun 	struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
940*4882a593Smuzhiyun 	unsigned int irq;
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun 	if (!pbm->setup_msi_irq)
943*4882a593Smuzhiyun 		return -EINVAL;
944*4882a593Smuzhiyun 
945*4882a593Smuzhiyun 	return pbm->setup_msi_irq(&irq, pdev, desc);
946*4882a593Smuzhiyun }
947*4882a593Smuzhiyun 
arch_teardown_msi_irq(unsigned int irq)948*4882a593Smuzhiyun void arch_teardown_msi_irq(unsigned int irq)
949*4882a593Smuzhiyun {
950*4882a593Smuzhiyun 	struct msi_desc *entry = irq_get_msi_desc(irq);
951*4882a593Smuzhiyun 	struct pci_dev *pdev = msi_desc_to_pci_dev(entry);
952*4882a593Smuzhiyun 	struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
953*4882a593Smuzhiyun 
954*4882a593Smuzhiyun 	if (pbm->teardown_msi_irq)
955*4882a593Smuzhiyun 		pbm->teardown_msi_irq(irq, pdev);
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun #endif /* !(CONFIG_PCI_MSI) */
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun /* ALI sound chips generate 31-bits of DMA, a special register
960*4882a593Smuzhiyun  * determines what bit 31 is emitted as.
961*4882a593Smuzhiyun  */
ali_sound_dma_hack(struct device * dev,u64 device_mask)962*4882a593Smuzhiyun int ali_sound_dma_hack(struct device *dev, u64 device_mask)
963*4882a593Smuzhiyun {
964*4882a593Smuzhiyun 	struct iommu *iommu = dev->archdata.iommu;
965*4882a593Smuzhiyun 	struct pci_dev *ali_isa_bridge;
966*4882a593Smuzhiyun 	u8 val;
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun 	if (!dev_is_pci(dev))
969*4882a593Smuzhiyun 		return 0;
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun 	if (to_pci_dev(dev)->vendor != PCI_VENDOR_ID_AL ||
972*4882a593Smuzhiyun 	    to_pci_dev(dev)->device != PCI_DEVICE_ID_AL_M5451 ||
973*4882a593Smuzhiyun 	    device_mask != 0x7fffffff)
974*4882a593Smuzhiyun 		return 0;
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun 	ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL,
977*4882a593Smuzhiyun 					 PCI_DEVICE_ID_AL_M1533,
978*4882a593Smuzhiyun 					 NULL);
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 	pci_read_config_byte(ali_isa_bridge, 0x7e, &val);
981*4882a593Smuzhiyun 	if (iommu->dma_addr_mask & 0x80000000)
982*4882a593Smuzhiyun 		val |= 0x01;
983*4882a593Smuzhiyun 	else
984*4882a593Smuzhiyun 		val &= ~0x01;
985*4882a593Smuzhiyun 	pci_write_config_byte(ali_isa_bridge, 0x7e, val);
986*4882a593Smuzhiyun 	pci_dev_put(ali_isa_bridge);
987*4882a593Smuzhiyun 	return 1;
988*4882a593Smuzhiyun }
989*4882a593Smuzhiyun 
pci_resource_to_user(const struct pci_dev * pdev,int bar,const struct resource * rp,resource_size_t * start,resource_size_t * end)990*4882a593Smuzhiyun void pci_resource_to_user(const struct pci_dev *pdev, int bar,
991*4882a593Smuzhiyun 			  const struct resource *rp, resource_size_t *start,
992*4882a593Smuzhiyun 			  resource_size_t *end)
993*4882a593Smuzhiyun {
994*4882a593Smuzhiyun 	struct pci_bus_region region;
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun 	/*
997*4882a593Smuzhiyun 	 * "User" addresses are shown in /sys/devices/pci.../.../resource
998*4882a593Smuzhiyun 	 * and /proc/bus/pci/devices and used as mmap offsets for
999*4882a593Smuzhiyun 	 * /proc/bus/pci/BB/DD.F files (see proc_bus_pci_mmap()).
1000*4882a593Smuzhiyun 	 *
1001*4882a593Smuzhiyun 	 * On sparc, these are PCI bus addresses, i.e., raw BAR values.
1002*4882a593Smuzhiyun 	 */
1003*4882a593Smuzhiyun 	pcibios_resource_to_bus(pdev->bus, &region, (struct resource *) rp);
1004*4882a593Smuzhiyun 	*start = region.start;
1005*4882a593Smuzhiyun 	*end = region.end;
1006*4882a593Smuzhiyun }
1007*4882a593Smuzhiyun 
pcibios_set_master(struct pci_dev * dev)1008*4882a593Smuzhiyun void pcibios_set_master(struct pci_dev *dev)
1009*4882a593Smuzhiyun {
1010*4882a593Smuzhiyun 	/* No special bus mastering setup handling */
1011*4882a593Smuzhiyun }
1012*4882a593Smuzhiyun 
1013*4882a593Smuzhiyun #ifdef CONFIG_PCI_IOV
pcibios_add_device(struct pci_dev * dev)1014*4882a593Smuzhiyun int pcibios_add_device(struct pci_dev *dev)
1015*4882a593Smuzhiyun {
1016*4882a593Smuzhiyun 	struct pci_dev *pdev;
1017*4882a593Smuzhiyun 
1018*4882a593Smuzhiyun 	/* Add sriov arch specific initialization here.
1019*4882a593Smuzhiyun 	 * Copy dev_archdata from PF to VF
1020*4882a593Smuzhiyun 	 */
1021*4882a593Smuzhiyun 	if (dev->is_virtfn) {
1022*4882a593Smuzhiyun 		struct dev_archdata *psd;
1023*4882a593Smuzhiyun 
1024*4882a593Smuzhiyun 		pdev = dev->physfn;
1025*4882a593Smuzhiyun 		psd = &pdev->dev.archdata;
1026*4882a593Smuzhiyun 		pci_init_dev_archdata(&dev->dev.archdata, psd->iommu,
1027*4882a593Smuzhiyun 				      psd->stc, psd->host_controller, NULL,
1028*4882a593Smuzhiyun 				      psd->numa_node);
1029*4882a593Smuzhiyun 	}
1030*4882a593Smuzhiyun 	return 0;
1031*4882a593Smuzhiyun }
1032*4882a593Smuzhiyun #endif /* CONFIG_PCI_IOV */
1033*4882a593Smuzhiyun 
pcibios_init(void)1034*4882a593Smuzhiyun static int __init pcibios_init(void)
1035*4882a593Smuzhiyun {
1036*4882a593Smuzhiyun 	pci_dfl_cache_line_size = 64 >> 2;
1037*4882a593Smuzhiyun 	return 0;
1038*4882a593Smuzhiyun }
1039*4882a593Smuzhiyun subsys_initcall(pcibios_init);
1040*4882a593Smuzhiyun 
1041*4882a593Smuzhiyun #ifdef CONFIG_SYSFS
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun #define SLOT_NAME_SIZE  11  /* Max decimal digits + null in u32 */
1044*4882a593Smuzhiyun 
pcie_bus_slot_names(struct pci_bus * pbus)1045*4882a593Smuzhiyun static void pcie_bus_slot_names(struct pci_bus *pbus)
1046*4882a593Smuzhiyun {
1047*4882a593Smuzhiyun 	struct pci_dev *pdev;
1048*4882a593Smuzhiyun 	struct pci_bus *bus;
1049*4882a593Smuzhiyun 
1050*4882a593Smuzhiyun 	list_for_each_entry(pdev, &pbus->devices, bus_list) {
1051*4882a593Smuzhiyun 		char name[SLOT_NAME_SIZE];
1052*4882a593Smuzhiyun 		struct pci_slot *pci_slot;
1053*4882a593Smuzhiyun 		const u32 *slot_num;
1054*4882a593Smuzhiyun 		int len;
1055*4882a593Smuzhiyun 
1056*4882a593Smuzhiyun 		slot_num = of_get_property(pdev->dev.of_node,
1057*4882a593Smuzhiyun 					   "physical-slot#", &len);
1058*4882a593Smuzhiyun 
1059*4882a593Smuzhiyun 		if (slot_num == NULL || len != 4)
1060*4882a593Smuzhiyun 			continue;
1061*4882a593Smuzhiyun 
1062*4882a593Smuzhiyun 		snprintf(name, sizeof(name), "%u", slot_num[0]);
1063*4882a593Smuzhiyun 		pci_slot = pci_create_slot(pbus, slot_num[0], name, NULL);
1064*4882a593Smuzhiyun 
1065*4882a593Smuzhiyun 		if (IS_ERR(pci_slot))
1066*4882a593Smuzhiyun 			pr_err("PCI: pci_create_slot returned %ld.\n",
1067*4882a593Smuzhiyun 			       PTR_ERR(pci_slot));
1068*4882a593Smuzhiyun 	}
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun 	list_for_each_entry(bus, &pbus->children, node)
1071*4882a593Smuzhiyun 		pcie_bus_slot_names(bus);
1072*4882a593Smuzhiyun }
1073*4882a593Smuzhiyun 
pci_bus_slot_names(struct device_node * node,struct pci_bus * bus)1074*4882a593Smuzhiyun static void pci_bus_slot_names(struct device_node *node, struct pci_bus *bus)
1075*4882a593Smuzhiyun {
1076*4882a593Smuzhiyun 	const struct pci_slot_names {
1077*4882a593Smuzhiyun 		u32	slot_mask;
1078*4882a593Smuzhiyun 		char	names[0];
1079*4882a593Smuzhiyun 	} *prop;
1080*4882a593Smuzhiyun 	const char *sp;
1081*4882a593Smuzhiyun 	int len, i;
1082*4882a593Smuzhiyun 	u32 mask;
1083*4882a593Smuzhiyun 
1084*4882a593Smuzhiyun 	prop = of_get_property(node, "slot-names", &len);
1085*4882a593Smuzhiyun 	if (!prop)
1086*4882a593Smuzhiyun 		return;
1087*4882a593Smuzhiyun 
1088*4882a593Smuzhiyun 	mask = prop->slot_mask;
1089*4882a593Smuzhiyun 	sp = prop->names;
1090*4882a593Smuzhiyun 
1091*4882a593Smuzhiyun 	if (ofpci_verbose)
1092*4882a593Smuzhiyun 		pci_info(bus, "Making slots for [%pOF] mask[0x%02x]\n",
1093*4882a593Smuzhiyun 			 node, mask);
1094*4882a593Smuzhiyun 
1095*4882a593Smuzhiyun 	i = 0;
1096*4882a593Smuzhiyun 	while (mask) {
1097*4882a593Smuzhiyun 		struct pci_slot *pci_slot;
1098*4882a593Smuzhiyun 		u32 this_bit = 1 << i;
1099*4882a593Smuzhiyun 
1100*4882a593Smuzhiyun 		if (!(mask & this_bit)) {
1101*4882a593Smuzhiyun 			i++;
1102*4882a593Smuzhiyun 			continue;
1103*4882a593Smuzhiyun 		}
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun 		if (ofpci_verbose)
1106*4882a593Smuzhiyun 			pci_info(bus, "Making slot [%s]\n", sp);
1107*4882a593Smuzhiyun 
1108*4882a593Smuzhiyun 		pci_slot = pci_create_slot(bus, i, sp, NULL);
1109*4882a593Smuzhiyun 		if (IS_ERR(pci_slot))
1110*4882a593Smuzhiyun 			pci_err(bus, "pci_create_slot returned %ld\n",
1111*4882a593Smuzhiyun 				PTR_ERR(pci_slot));
1112*4882a593Smuzhiyun 
1113*4882a593Smuzhiyun 		sp += strlen(sp) + 1;
1114*4882a593Smuzhiyun 		mask &= ~this_bit;
1115*4882a593Smuzhiyun 		i++;
1116*4882a593Smuzhiyun 	}
1117*4882a593Smuzhiyun }
1118*4882a593Smuzhiyun 
of_pci_slot_init(void)1119*4882a593Smuzhiyun static int __init of_pci_slot_init(void)
1120*4882a593Smuzhiyun {
1121*4882a593Smuzhiyun 	struct pci_bus *pbus = NULL;
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun 	while ((pbus = pci_find_next_bus(pbus)) != NULL) {
1124*4882a593Smuzhiyun 		struct device_node *node;
1125*4882a593Smuzhiyun 		struct pci_dev *pdev;
1126*4882a593Smuzhiyun 
1127*4882a593Smuzhiyun 		pdev = list_first_entry(&pbus->devices, struct pci_dev,
1128*4882a593Smuzhiyun 					bus_list);
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun 		if (pdev && pci_is_pcie(pdev)) {
1131*4882a593Smuzhiyun 			pcie_bus_slot_names(pbus);
1132*4882a593Smuzhiyun 		} else {
1133*4882a593Smuzhiyun 
1134*4882a593Smuzhiyun 			if (pbus->self) {
1135*4882a593Smuzhiyun 
1136*4882a593Smuzhiyun 				/* PCI->PCI bridge */
1137*4882a593Smuzhiyun 				node = pbus->self->dev.of_node;
1138*4882a593Smuzhiyun 
1139*4882a593Smuzhiyun 			} else {
1140*4882a593Smuzhiyun 				struct pci_pbm_info *pbm = pbus->sysdata;
1141*4882a593Smuzhiyun 
1142*4882a593Smuzhiyun 				/* Host PCI controller */
1143*4882a593Smuzhiyun 				node = pbm->op->dev.of_node;
1144*4882a593Smuzhiyun 			}
1145*4882a593Smuzhiyun 
1146*4882a593Smuzhiyun 			pci_bus_slot_names(node, pbus);
1147*4882a593Smuzhiyun 		}
1148*4882a593Smuzhiyun 	}
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun 	return 0;
1151*4882a593Smuzhiyun }
1152*4882a593Smuzhiyun device_initcall(of_pci_slot_init);
1153*4882a593Smuzhiyun #endif
1154