xref: /OK3568_Linux_fs/kernel/drivers/pci/bus.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * From setup-res.c, by:
4*4882a593Smuzhiyun  *	Dave Rusling (david.rusling@reo.mts.dec.com)
5*4882a593Smuzhiyun  *	David Mosberger (davidm@cs.arizona.edu)
6*4882a593Smuzhiyun  *	David Miller (davem@redhat.com)
7*4882a593Smuzhiyun  *	Ivan Kokshaysky (ink@jurassic.park.msu.ru)
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun #include <linux/module.h>
10*4882a593Smuzhiyun #include <linux/kernel.h>
11*4882a593Smuzhiyun #include <linux/pci.h>
12*4882a593Smuzhiyun #include <linux/errno.h>
13*4882a593Smuzhiyun #include <linux/ioport.h>
14*4882a593Smuzhiyun #include <linux/proc_fs.h>
15*4882a593Smuzhiyun #include <linux/slab.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include "pci.h"
18*4882a593Smuzhiyun 
pci_add_resource_offset(struct list_head * resources,struct resource * res,resource_size_t offset)19*4882a593Smuzhiyun void pci_add_resource_offset(struct list_head *resources, struct resource *res,
20*4882a593Smuzhiyun 			     resource_size_t offset)
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun 	struct resource_entry *entry;
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun 	entry = resource_list_create_entry(res, 0);
25*4882a593Smuzhiyun 	if (!entry) {
26*4882a593Smuzhiyun 		pr_err("PCI: can't add host bridge window %pR\n", res);
27*4882a593Smuzhiyun 		return;
28*4882a593Smuzhiyun 	}
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun 	entry->offset = offset;
31*4882a593Smuzhiyun 	resource_list_add_tail(entry, resources);
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun EXPORT_SYMBOL(pci_add_resource_offset);
34*4882a593Smuzhiyun 
pci_add_resource(struct list_head * resources,struct resource * res)35*4882a593Smuzhiyun void pci_add_resource(struct list_head *resources, struct resource *res)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun 	pci_add_resource_offset(resources, res, 0);
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun EXPORT_SYMBOL(pci_add_resource);
40*4882a593Smuzhiyun 
pci_free_resource_list(struct list_head * resources)41*4882a593Smuzhiyun void pci_free_resource_list(struct list_head *resources)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun 	resource_list_free(resources);
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun EXPORT_SYMBOL(pci_free_resource_list);
46*4882a593Smuzhiyun 
pci_bus_add_resource(struct pci_bus * bus,struct resource * res,unsigned int flags)47*4882a593Smuzhiyun void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
48*4882a593Smuzhiyun 			  unsigned int flags)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun 	struct pci_bus_resource *bus_res;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	bus_res = kzalloc(sizeof(struct pci_bus_resource), GFP_KERNEL);
53*4882a593Smuzhiyun 	if (!bus_res) {
54*4882a593Smuzhiyun 		dev_err(&bus->dev, "can't add %pR resource\n", res);
55*4882a593Smuzhiyun 		return;
56*4882a593Smuzhiyun 	}
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	bus_res->res = res;
59*4882a593Smuzhiyun 	bus_res->flags = flags;
60*4882a593Smuzhiyun 	list_add_tail(&bus_res->list, &bus->resources);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun 
pci_bus_resource_n(const struct pci_bus * bus,int n)63*4882a593Smuzhiyun struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun 	struct pci_bus_resource *bus_res;
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	if (n < PCI_BRIDGE_RESOURCE_NUM)
68*4882a593Smuzhiyun 		return bus->resource[n];
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	n -= PCI_BRIDGE_RESOURCE_NUM;
71*4882a593Smuzhiyun 	list_for_each_entry(bus_res, &bus->resources, list) {
72*4882a593Smuzhiyun 		if (n-- == 0)
73*4882a593Smuzhiyun 			return bus_res->res;
74*4882a593Smuzhiyun 	}
75*4882a593Smuzhiyun 	return NULL;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pci_bus_resource_n);
78*4882a593Smuzhiyun 
pci_bus_remove_resources(struct pci_bus * bus)79*4882a593Smuzhiyun void pci_bus_remove_resources(struct pci_bus *bus)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun 	int i;
82*4882a593Smuzhiyun 	struct pci_bus_resource *bus_res, *tmp;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
85*4882a593Smuzhiyun 		bus->resource[i] = NULL;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	list_for_each_entry_safe(bus_res, tmp, &bus->resources, list) {
88*4882a593Smuzhiyun 		list_del(&bus_res->list);
89*4882a593Smuzhiyun 		kfree(bus_res);
90*4882a593Smuzhiyun 	}
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun 
devm_request_pci_bus_resources(struct device * dev,struct list_head * resources)93*4882a593Smuzhiyun int devm_request_pci_bus_resources(struct device *dev,
94*4882a593Smuzhiyun 				   struct list_head *resources)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	struct resource_entry *win;
97*4882a593Smuzhiyun 	struct resource *parent, *res;
98*4882a593Smuzhiyun 	int err;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	resource_list_for_each_entry(win, resources) {
101*4882a593Smuzhiyun 		res = win->res;
102*4882a593Smuzhiyun 		switch (resource_type(res)) {
103*4882a593Smuzhiyun 		case IORESOURCE_IO:
104*4882a593Smuzhiyun 			parent = &ioport_resource;
105*4882a593Smuzhiyun 			break;
106*4882a593Smuzhiyun 		case IORESOURCE_MEM:
107*4882a593Smuzhiyun 			parent = &iomem_resource;
108*4882a593Smuzhiyun 			break;
109*4882a593Smuzhiyun 		default:
110*4882a593Smuzhiyun 			continue;
111*4882a593Smuzhiyun 		}
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 		err = devm_request_resource(dev, parent, res);
114*4882a593Smuzhiyun 		if (err)
115*4882a593Smuzhiyun 			return err;
116*4882a593Smuzhiyun 	}
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	return 0;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(devm_request_pci_bus_resources);
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun static struct pci_bus_region pci_32_bit = {0, 0xffffffffULL};
123*4882a593Smuzhiyun #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
124*4882a593Smuzhiyun static struct pci_bus_region pci_64_bit = {0,
125*4882a593Smuzhiyun 				(pci_bus_addr_t) 0xffffffffffffffffULL};
126*4882a593Smuzhiyun static struct pci_bus_region pci_high = {(pci_bus_addr_t) 0x100000000ULL,
127*4882a593Smuzhiyun 				(pci_bus_addr_t) 0xffffffffffffffffULL};
128*4882a593Smuzhiyun #endif
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun /*
131*4882a593Smuzhiyun  * @res contains CPU addresses.  Clip it so the corresponding bus addresses
132*4882a593Smuzhiyun  * on @bus are entirely within @region.  This is used to control the bus
133*4882a593Smuzhiyun  * addresses of resources we allocate, e.g., we may need a resource that
134*4882a593Smuzhiyun  * can be mapped by a 32-bit BAR.
135*4882a593Smuzhiyun  */
pci_clip_resource_to_region(struct pci_bus * bus,struct resource * res,struct pci_bus_region * region)136*4882a593Smuzhiyun static void pci_clip_resource_to_region(struct pci_bus *bus,
137*4882a593Smuzhiyun 					struct resource *res,
138*4882a593Smuzhiyun 					struct pci_bus_region *region)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun 	struct pci_bus_region r;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	pcibios_resource_to_bus(bus, &r, res);
143*4882a593Smuzhiyun 	if (r.start < region->start)
144*4882a593Smuzhiyun 		r.start = region->start;
145*4882a593Smuzhiyun 	if (r.end > region->end)
146*4882a593Smuzhiyun 		r.end = region->end;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	if (r.end < r.start)
149*4882a593Smuzhiyun 		res->end = res->start - 1;
150*4882a593Smuzhiyun 	else
151*4882a593Smuzhiyun 		pcibios_bus_to_resource(bus, res, &r);
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun 
pci_bus_alloc_from_region(struct pci_bus * bus,struct resource * res,resource_size_t size,resource_size_t align,resource_size_t min,unsigned long type_mask,resource_size_t (* alignf)(void *,const struct resource *,resource_size_t,resource_size_t),void * alignf_data,struct pci_bus_region * region)154*4882a593Smuzhiyun static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
155*4882a593Smuzhiyun 		resource_size_t size, resource_size_t align,
156*4882a593Smuzhiyun 		resource_size_t min, unsigned long type_mask,
157*4882a593Smuzhiyun 		resource_size_t (*alignf)(void *,
158*4882a593Smuzhiyun 					  const struct resource *,
159*4882a593Smuzhiyun 					  resource_size_t,
160*4882a593Smuzhiyun 					  resource_size_t),
161*4882a593Smuzhiyun 		void *alignf_data,
162*4882a593Smuzhiyun 		struct pci_bus_region *region)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun 	int i, ret;
165*4882a593Smuzhiyun 	struct resource *r, avail;
166*4882a593Smuzhiyun 	resource_size_t max;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	type_mask |= IORESOURCE_TYPE_BITS;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	pci_bus_for_each_resource(bus, r, i) {
171*4882a593Smuzhiyun 		resource_size_t min_used = min;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 		if (!r)
174*4882a593Smuzhiyun 			continue;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 		/* type_mask must match */
177*4882a593Smuzhiyun 		if ((res->flags ^ r->flags) & type_mask)
178*4882a593Smuzhiyun 			continue;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 		/* We cannot allocate a non-prefetching resource
181*4882a593Smuzhiyun 		   from a pre-fetching area */
182*4882a593Smuzhiyun 		if ((r->flags & IORESOURCE_PREFETCH) &&
183*4882a593Smuzhiyun 		    !(res->flags & IORESOURCE_PREFETCH))
184*4882a593Smuzhiyun 			continue;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 		avail = *r;
187*4882a593Smuzhiyun 		pci_clip_resource_to_region(bus, &avail, region);
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 		/*
190*4882a593Smuzhiyun 		 * "min" is typically PCIBIOS_MIN_IO or PCIBIOS_MIN_MEM to
191*4882a593Smuzhiyun 		 * protect badly documented motherboard resources, but if
192*4882a593Smuzhiyun 		 * this is an already-configured bridge window, its start
193*4882a593Smuzhiyun 		 * overrides "min".
194*4882a593Smuzhiyun 		 */
195*4882a593Smuzhiyun 		if (avail.start)
196*4882a593Smuzhiyun 			min_used = avail.start;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 		max = avail.end;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 		/* Ok, try it out.. */
201*4882a593Smuzhiyun 		ret = allocate_resource(r, res, size, min_used, max,
202*4882a593Smuzhiyun 					align, alignf, alignf_data);
203*4882a593Smuzhiyun 		if (ret == 0)
204*4882a593Smuzhiyun 			return 0;
205*4882a593Smuzhiyun 	}
206*4882a593Smuzhiyun 	return -ENOMEM;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun /**
210*4882a593Smuzhiyun  * pci_bus_alloc_resource - allocate a resource from a parent bus
211*4882a593Smuzhiyun  * @bus: PCI bus
212*4882a593Smuzhiyun  * @res: resource to allocate
213*4882a593Smuzhiyun  * @size: size of resource to allocate
214*4882a593Smuzhiyun  * @align: alignment of resource to allocate
215*4882a593Smuzhiyun  * @min: minimum /proc/iomem address to allocate
216*4882a593Smuzhiyun  * @type_mask: IORESOURCE_* type flags
217*4882a593Smuzhiyun  * @alignf: resource alignment function
218*4882a593Smuzhiyun  * @alignf_data: data argument for resource alignment function
219*4882a593Smuzhiyun  *
220*4882a593Smuzhiyun  * Given the PCI bus a device resides on, the size, minimum address,
221*4882a593Smuzhiyun  * alignment and type, try to find an acceptable resource allocation
222*4882a593Smuzhiyun  * for a specific device resource.
223*4882a593Smuzhiyun  */
pci_bus_alloc_resource(struct pci_bus * bus,struct resource * res,resource_size_t size,resource_size_t align,resource_size_t min,unsigned long type_mask,resource_size_t (* alignf)(void *,const struct resource *,resource_size_t,resource_size_t),void * alignf_data)224*4882a593Smuzhiyun int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
225*4882a593Smuzhiyun 		resource_size_t size, resource_size_t align,
226*4882a593Smuzhiyun 		resource_size_t min, unsigned long type_mask,
227*4882a593Smuzhiyun 		resource_size_t (*alignf)(void *,
228*4882a593Smuzhiyun 					  const struct resource *,
229*4882a593Smuzhiyun 					  resource_size_t,
230*4882a593Smuzhiyun 					  resource_size_t),
231*4882a593Smuzhiyun 		void *alignf_data)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
234*4882a593Smuzhiyun 	int rc;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	if (res->flags & IORESOURCE_MEM_64) {
237*4882a593Smuzhiyun 		rc = pci_bus_alloc_from_region(bus, res, size, align, min,
238*4882a593Smuzhiyun 					       type_mask, alignf, alignf_data,
239*4882a593Smuzhiyun 					       &pci_high);
240*4882a593Smuzhiyun 		if (rc == 0)
241*4882a593Smuzhiyun 			return 0;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 		return pci_bus_alloc_from_region(bus, res, size, align, min,
244*4882a593Smuzhiyun 						 type_mask, alignf, alignf_data,
245*4882a593Smuzhiyun 						 &pci_64_bit);
246*4882a593Smuzhiyun 	}
247*4882a593Smuzhiyun #endif
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	return pci_bus_alloc_from_region(bus, res, size, align, min,
250*4882a593Smuzhiyun 					 type_mask, alignf, alignf_data,
251*4882a593Smuzhiyun 					 &pci_32_bit);
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun EXPORT_SYMBOL(pci_bus_alloc_resource);
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun /*
256*4882a593Smuzhiyun  * The @idx resource of @dev should be a PCI-PCI bridge window.  If this
257*4882a593Smuzhiyun  * resource fits inside a window of an upstream bridge, do nothing.  If it
258*4882a593Smuzhiyun  * overlaps an upstream window but extends outside it, clip the resource so
259*4882a593Smuzhiyun  * it fits completely inside.
260*4882a593Smuzhiyun  */
pci_bus_clip_resource(struct pci_dev * dev,int idx)261*4882a593Smuzhiyun bool pci_bus_clip_resource(struct pci_dev *dev, int idx)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun 	struct pci_bus *bus = dev->bus;
264*4882a593Smuzhiyun 	struct resource *res = &dev->resource[idx];
265*4882a593Smuzhiyun 	struct resource orig_res = *res;
266*4882a593Smuzhiyun 	struct resource *r;
267*4882a593Smuzhiyun 	int i;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	pci_bus_for_each_resource(bus, r, i) {
270*4882a593Smuzhiyun 		resource_size_t start, end;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 		if (!r)
273*4882a593Smuzhiyun 			continue;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 		if (resource_type(res) != resource_type(r))
276*4882a593Smuzhiyun 			continue;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 		start = max(r->start, res->start);
279*4882a593Smuzhiyun 		end = min(r->end, res->end);
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 		if (start > end)
282*4882a593Smuzhiyun 			continue;	/* no overlap */
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 		if (res->start == start && res->end == end)
285*4882a593Smuzhiyun 			return false;	/* no change */
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 		res->start = start;
288*4882a593Smuzhiyun 		res->end = end;
289*4882a593Smuzhiyun 		res->flags &= ~IORESOURCE_UNSET;
290*4882a593Smuzhiyun 		orig_res.flags &= ~IORESOURCE_UNSET;
291*4882a593Smuzhiyun 		pci_info(dev, "%pR clipped to %pR\n", &orig_res, res);
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 		return true;
294*4882a593Smuzhiyun 	}
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	return false;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun 
pcibios_resource_survey_bus(struct pci_bus * bus)299*4882a593Smuzhiyun void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { }
300*4882a593Smuzhiyun 
pcibios_bus_add_device(struct pci_dev * pdev)301*4882a593Smuzhiyun void __weak pcibios_bus_add_device(struct pci_dev *pdev) { }
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun /**
304*4882a593Smuzhiyun  * pci_bus_add_device - start driver for a single device
305*4882a593Smuzhiyun  * @dev: device to add
306*4882a593Smuzhiyun  *
307*4882a593Smuzhiyun  * This adds add sysfs entries and start device drivers
308*4882a593Smuzhiyun  */
pci_bus_add_device(struct pci_dev * dev)309*4882a593Smuzhiyun void pci_bus_add_device(struct pci_dev *dev)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun 	int retval;
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	/*
314*4882a593Smuzhiyun 	 * Can not put in pci_device_add yet because resources
315*4882a593Smuzhiyun 	 * are not assigned yet for some devices.
316*4882a593Smuzhiyun 	 */
317*4882a593Smuzhiyun 	pcibios_bus_add_device(dev);
318*4882a593Smuzhiyun 	pci_fixup_device(pci_fixup_final, dev);
319*4882a593Smuzhiyun 	pci_create_sysfs_dev_files(dev);
320*4882a593Smuzhiyun 	pci_proc_attach_device(dev);
321*4882a593Smuzhiyun 	pci_bridge_d3_update(dev);
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	dev->match_driver = true;
324*4882a593Smuzhiyun 	retval = device_attach(&dev->dev);
325*4882a593Smuzhiyun 	if (retval < 0 && retval != -EPROBE_DEFER)
326*4882a593Smuzhiyun 		pci_warn(dev, "device attach failed (%d)\n", retval);
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	pci_dev_assign_added(dev, true);
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pci_bus_add_device);
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun /**
333*4882a593Smuzhiyun  * pci_bus_add_devices - start driver for PCI devices
334*4882a593Smuzhiyun  * @bus: bus to check for new devices
335*4882a593Smuzhiyun  *
336*4882a593Smuzhiyun  * Start driver for PCI devices and add some sysfs entries.
337*4882a593Smuzhiyun  */
pci_bus_add_devices(const struct pci_bus * bus)338*4882a593Smuzhiyun void pci_bus_add_devices(const struct pci_bus *bus)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun 	struct pci_dev *dev;
341*4882a593Smuzhiyun 	struct pci_bus *child;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	list_for_each_entry(dev, &bus->devices, bus_list) {
344*4882a593Smuzhiyun 		/* Skip already-added devices */
345*4882a593Smuzhiyun 		if (pci_dev_is_added(dev))
346*4882a593Smuzhiyun 			continue;
347*4882a593Smuzhiyun 		pci_bus_add_device(dev);
348*4882a593Smuzhiyun 	}
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	list_for_each_entry(dev, &bus->devices, bus_list) {
351*4882a593Smuzhiyun 		/* Skip if device attach failed */
352*4882a593Smuzhiyun 		if (!pci_dev_is_added(dev))
353*4882a593Smuzhiyun 			continue;
354*4882a593Smuzhiyun 		child = dev->subordinate;
355*4882a593Smuzhiyun 		if (child)
356*4882a593Smuzhiyun 			pci_bus_add_devices(child);
357*4882a593Smuzhiyun 	}
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun EXPORT_SYMBOL(pci_bus_add_devices);
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun /** pci_walk_bus - walk devices on/under bus, calling callback.
362*4882a593Smuzhiyun  *  @top      bus whose devices should be walked
363*4882a593Smuzhiyun  *  @cb       callback to be called for each device found
364*4882a593Smuzhiyun  *  @userdata arbitrary pointer to be passed to callback.
365*4882a593Smuzhiyun  *
366*4882a593Smuzhiyun  *  Walk the given bus, including any bridged devices
367*4882a593Smuzhiyun  *  on buses under this bus.  Call the provided callback
368*4882a593Smuzhiyun  *  on each device found.
369*4882a593Smuzhiyun  *
370*4882a593Smuzhiyun  *  We check the return of @cb each time. If it returns anything
371*4882a593Smuzhiyun  *  other than 0, we break out.
372*4882a593Smuzhiyun  *
373*4882a593Smuzhiyun  */
pci_walk_bus(struct pci_bus * top,int (* cb)(struct pci_dev *,void *),void * userdata)374*4882a593Smuzhiyun void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
375*4882a593Smuzhiyun 		  void *userdata)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun 	struct pci_dev *dev;
378*4882a593Smuzhiyun 	struct pci_bus *bus;
379*4882a593Smuzhiyun 	struct list_head *next;
380*4882a593Smuzhiyun 	int retval;
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	bus = top;
383*4882a593Smuzhiyun 	down_read(&pci_bus_sem);
384*4882a593Smuzhiyun 	next = top->devices.next;
385*4882a593Smuzhiyun 	for (;;) {
386*4882a593Smuzhiyun 		if (next == &bus->devices) {
387*4882a593Smuzhiyun 			/* end of this bus, go up or finish */
388*4882a593Smuzhiyun 			if (bus == top)
389*4882a593Smuzhiyun 				break;
390*4882a593Smuzhiyun 			next = bus->self->bus_list.next;
391*4882a593Smuzhiyun 			bus = bus->self->bus;
392*4882a593Smuzhiyun 			continue;
393*4882a593Smuzhiyun 		}
394*4882a593Smuzhiyun 		dev = list_entry(next, struct pci_dev, bus_list);
395*4882a593Smuzhiyun 		if (dev->subordinate) {
396*4882a593Smuzhiyun 			/* this is a pci-pci bridge, do its devices next */
397*4882a593Smuzhiyun 			next = dev->subordinate->devices.next;
398*4882a593Smuzhiyun 			bus = dev->subordinate;
399*4882a593Smuzhiyun 		} else
400*4882a593Smuzhiyun 			next = dev->bus_list.next;
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 		retval = cb(dev, userdata);
403*4882a593Smuzhiyun 		if (retval)
404*4882a593Smuzhiyun 			break;
405*4882a593Smuzhiyun 	}
406*4882a593Smuzhiyun 	up_read(&pci_bus_sem);
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pci_walk_bus);
409*4882a593Smuzhiyun 
pci_bus_get(struct pci_bus * bus)410*4882a593Smuzhiyun struct pci_bus *pci_bus_get(struct pci_bus *bus)
411*4882a593Smuzhiyun {
412*4882a593Smuzhiyun 	if (bus)
413*4882a593Smuzhiyun 		get_device(&bus->dev);
414*4882a593Smuzhiyun 	return bus;
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun 
pci_bus_put(struct pci_bus * bus)417*4882a593Smuzhiyun void pci_bus_put(struct pci_bus *bus)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun 	if (bus)
420*4882a593Smuzhiyun 		put_device(&bus->dev);
421*4882a593Smuzhiyun }
422