1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Low-Level PCI Access for i386 machines
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright 1993, 1994 Drew Eckhardt
6*4882a593Smuzhiyun * Visionary Computing
7*4882a593Smuzhiyun * (Unix and Linux consulting and custom programming)
8*4882a593Smuzhiyun * Drew@Colorado.EDU
9*4882a593Smuzhiyun * +1 (303) 786-7975
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * Drew's work was sponsored by:
12*4882a593Smuzhiyun * iX Multiuser Multitasking Magazine
13*4882a593Smuzhiyun * Hannover, Germany
14*4882a593Smuzhiyun * hm@ix.de
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * Copyright 1997--2000 Martin Mares <mj@ucw.cz>
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * For more information, please consult the following manuals (look at
19*4882a593Smuzhiyun * http://www.pcisig.com/ for how to get them):
20*4882a593Smuzhiyun *
21*4882a593Smuzhiyun * PCI BIOS Specification
22*4882a593Smuzhiyun * PCI Local Bus Specification
23*4882a593Smuzhiyun * PCI to PCI Bridge Specification
24*4882a593Smuzhiyun * PCI System Design Guide
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun */
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #include <linux/types.h>
29*4882a593Smuzhiyun #include <linux/kernel.h>
30*4882a593Smuzhiyun #include <linux/export.h>
31*4882a593Smuzhiyun #include <linux/pci.h>
32*4882a593Smuzhiyun #include <linux/init.h>
33*4882a593Smuzhiyun #include <linux/ioport.h>
34*4882a593Smuzhiyun #include <linux/errno.h>
35*4882a593Smuzhiyun #include <linux/memblock.h>
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun #include <asm/memtype.h>
38*4882a593Smuzhiyun #include <asm/e820/api.h>
39*4882a593Smuzhiyun #include <asm/pci_x86.h>
40*4882a593Smuzhiyun #include <asm/io_apic.h>
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun /*
44*4882a593Smuzhiyun * This list of dynamic mappings is for temporarily maintaining
45*4882a593Smuzhiyun * original BIOS BAR addresses for possible reinstatement.
46*4882a593Smuzhiyun */
47*4882a593Smuzhiyun struct pcibios_fwaddrmap {
48*4882a593Smuzhiyun struct list_head list;
49*4882a593Smuzhiyun struct pci_dev *dev;
50*4882a593Smuzhiyun resource_size_t fw_addr[DEVICE_COUNT_RESOURCE];
51*4882a593Smuzhiyun };
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun static LIST_HEAD(pcibios_fwaddrmappings);
54*4882a593Smuzhiyun static DEFINE_SPINLOCK(pcibios_fwaddrmap_lock);
55*4882a593Smuzhiyun static bool pcibios_fw_addr_done;
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun /* Must be called with 'pcibios_fwaddrmap_lock' lock held. */
pcibios_fwaddrmap_lookup(struct pci_dev * dev)58*4882a593Smuzhiyun static struct pcibios_fwaddrmap *pcibios_fwaddrmap_lookup(struct pci_dev *dev)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun struct pcibios_fwaddrmap *map;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun lockdep_assert_held(&pcibios_fwaddrmap_lock);
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun list_for_each_entry(map, &pcibios_fwaddrmappings, list)
65*4882a593Smuzhiyun if (map->dev == dev)
66*4882a593Smuzhiyun return map;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun return NULL;
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun static void
pcibios_save_fw_addr(struct pci_dev * dev,int idx,resource_size_t fw_addr)72*4882a593Smuzhiyun pcibios_save_fw_addr(struct pci_dev *dev, int idx, resource_size_t fw_addr)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun unsigned long flags;
75*4882a593Smuzhiyun struct pcibios_fwaddrmap *map;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun if (pcibios_fw_addr_done)
78*4882a593Smuzhiyun return;
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags);
81*4882a593Smuzhiyun map = pcibios_fwaddrmap_lookup(dev);
82*4882a593Smuzhiyun if (!map) {
83*4882a593Smuzhiyun spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags);
84*4882a593Smuzhiyun map = kzalloc(sizeof(*map), GFP_KERNEL);
85*4882a593Smuzhiyun if (!map)
86*4882a593Smuzhiyun return;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun map->dev = pci_dev_get(dev);
89*4882a593Smuzhiyun map->fw_addr[idx] = fw_addr;
90*4882a593Smuzhiyun INIT_LIST_HEAD(&map->list);
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags);
93*4882a593Smuzhiyun list_add_tail(&map->list, &pcibios_fwaddrmappings);
94*4882a593Smuzhiyun } else
95*4882a593Smuzhiyun map->fw_addr[idx] = fw_addr;
96*4882a593Smuzhiyun spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags);
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
pcibios_retrieve_fw_addr(struct pci_dev * dev,int idx)99*4882a593Smuzhiyun resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun unsigned long flags;
102*4882a593Smuzhiyun struct pcibios_fwaddrmap *map;
103*4882a593Smuzhiyun resource_size_t fw_addr = 0;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun if (pcibios_fw_addr_done)
106*4882a593Smuzhiyun return 0;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags);
109*4882a593Smuzhiyun map = pcibios_fwaddrmap_lookup(dev);
110*4882a593Smuzhiyun if (map)
111*4882a593Smuzhiyun fw_addr = map->fw_addr[idx];
112*4882a593Smuzhiyun spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags);
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun return fw_addr;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
pcibios_fw_addr_list_del(void)117*4882a593Smuzhiyun static void __init pcibios_fw_addr_list_del(void)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun unsigned long flags;
120*4882a593Smuzhiyun struct pcibios_fwaddrmap *entry, *next;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags);
123*4882a593Smuzhiyun list_for_each_entry_safe(entry, next, &pcibios_fwaddrmappings, list) {
124*4882a593Smuzhiyun list_del(&entry->list);
125*4882a593Smuzhiyun pci_dev_put(entry->dev);
126*4882a593Smuzhiyun kfree(entry);
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags);
129*4882a593Smuzhiyun pcibios_fw_addr_done = true;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun static int
skip_isa_ioresource_align(struct pci_dev * dev)133*4882a593Smuzhiyun skip_isa_ioresource_align(struct pci_dev *dev) {
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun if ((pci_probe & PCI_CAN_SKIP_ISA_ALIGN) &&
136*4882a593Smuzhiyun !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA))
137*4882a593Smuzhiyun return 1;
138*4882a593Smuzhiyun return 0;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun /*
142*4882a593Smuzhiyun * We need to avoid collisions with `mirrored' VGA ports
143*4882a593Smuzhiyun * and other strange ISA hardware, so we always want the
144*4882a593Smuzhiyun * addresses to be allocated in the 0x000-0x0ff region
145*4882a593Smuzhiyun * modulo 0x400.
146*4882a593Smuzhiyun *
147*4882a593Smuzhiyun * Why? Because some silly external IO cards only decode
148*4882a593Smuzhiyun * the low 10 bits of the IO address. The 0x00-0xff region
149*4882a593Smuzhiyun * is reserved for motherboard devices that decode all 16
150*4882a593Smuzhiyun * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
151*4882a593Smuzhiyun * but we want to try to avoid allocating at 0x2900-0x2bff
152*4882a593Smuzhiyun * which might have be mirrored at 0x0100-0x03ff..
153*4882a593Smuzhiyun */
154*4882a593Smuzhiyun resource_size_t
pcibios_align_resource(void * data,const struct resource * res,resource_size_t size,resource_size_t align)155*4882a593Smuzhiyun pcibios_align_resource(void *data, const struct resource *res,
156*4882a593Smuzhiyun resource_size_t size, resource_size_t align)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun struct pci_dev *dev = data;
159*4882a593Smuzhiyun resource_size_t start = res->start;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun if (res->flags & IORESOURCE_IO) {
162*4882a593Smuzhiyun if (skip_isa_ioresource_align(dev))
163*4882a593Smuzhiyun return start;
164*4882a593Smuzhiyun if (start & 0x300)
165*4882a593Smuzhiyun start = (start + 0x3ff) & ~0x3ff;
166*4882a593Smuzhiyun } else if (res->flags & IORESOURCE_MEM) {
167*4882a593Smuzhiyun /* The low 1MB range is reserved for ISA cards */
168*4882a593Smuzhiyun if (start < BIOS_END)
169*4882a593Smuzhiyun start = BIOS_END;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun return start;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun EXPORT_SYMBOL(pcibios_align_resource);
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /*
176*4882a593Smuzhiyun * Handle resources of PCI devices. If the world were perfect, we could
177*4882a593Smuzhiyun * just allocate all the resource regions and do nothing more. It isn't.
178*4882a593Smuzhiyun * On the other hand, we cannot just re-allocate all devices, as it would
179*4882a593Smuzhiyun * require us to know lots of host bridge internals. So we attempt to
180*4882a593Smuzhiyun * keep as much of the original configuration as possible, but tweak it
181*4882a593Smuzhiyun * when it's found to be wrong.
182*4882a593Smuzhiyun *
183*4882a593Smuzhiyun * Known BIOS problems we have to work around:
184*4882a593Smuzhiyun * - I/O or memory regions not configured
185*4882a593Smuzhiyun * - regions configured, but not enabled in the command register
186*4882a593Smuzhiyun * - bogus I/O addresses above 64K used
187*4882a593Smuzhiyun * - expansion ROMs left enabled (this may sound harmless, but given
188*4882a593Smuzhiyun * the fact the PCI specs explicitly allow address decoders to be
189*4882a593Smuzhiyun * shared between expansion ROMs and other resource regions, it's
190*4882a593Smuzhiyun * at least dangerous)
191*4882a593Smuzhiyun * - bad resource sizes or overlaps with other regions
192*4882a593Smuzhiyun *
193*4882a593Smuzhiyun * Our solution:
194*4882a593Smuzhiyun * (1) Allocate resources for all buses behind PCI-to-PCI bridges.
195*4882a593Smuzhiyun * This gives us fixed barriers on where we can allocate.
196*4882a593Smuzhiyun * (2) Allocate resources for all enabled devices. If there is
197*4882a593Smuzhiyun * a collision, just mark the resource as unallocated. Also
198*4882a593Smuzhiyun * disable expansion ROMs during this step.
199*4882a593Smuzhiyun * (3) Try to allocate resources for disabled devices. If the
200*4882a593Smuzhiyun * resources were assigned correctly, everything goes well,
201*4882a593Smuzhiyun * if they weren't, they won't disturb allocation of other
202*4882a593Smuzhiyun * resources.
203*4882a593Smuzhiyun * (4) Assign new addresses to resources which were either
204*4882a593Smuzhiyun * not configured at all or misconfigured. If explicitly
205*4882a593Smuzhiyun * requested by the user, configure expansion ROM address
206*4882a593Smuzhiyun * as well.
207*4882a593Smuzhiyun */
208*4882a593Smuzhiyun
pcibios_allocate_bridge_resources(struct pci_dev * dev)209*4882a593Smuzhiyun static void pcibios_allocate_bridge_resources(struct pci_dev *dev)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun int idx;
212*4882a593Smuzhiyun struct resource *r;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
215*4882a593Smuzhiyun r = &dev->resource[idx];
216*4882a593Smuzhiyun if (!r->flags)
217*4882a593Smuzhiyun continue;
218*4882a593Smuzhiyun if (r->parent) /* Already allocated */
219*4882a593Smuzhiyun continue;
220*4882a593Smuzhiyun if (!r->start || pci_claim_bridge_resource(dev, idx) < 0) {
221*4882a593Smuzhiyun /*
222*4882a593Smuzhiyun * Something is wrong with the region.
223*4882a593Smuzhiyun * Invalidate the resource to prevent
224*4882a593Smuzhiyun * child resource allocations in this
225*4882a593Smuzhiyun * range.
226*4882a593Smuzhiyun */
227*4882a593Smuzhiyun r->start = r->end = 0;
228*4882a593Smuzhiyun r->flags = 0;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun
pcibios_allocate_bus_resources(struct pci_bus * bus)233*4882a593Smuzhiyun static void pcibios_allocate_bus_resources(struct pci_bus *bus)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun struct pci_bus *child;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun /* Depth-First Search on bus tree */
238*4882a593Smuzhiyun if (bus->self)
239*4882a593Smuzhiyun pcibios_allocate_bridge_resources(bus->self);
240*4882a593Smuzhiyun list_for_each_entry(child, &bus->children, node)
241*4882a593Smuzhiyun pcibios_allocate_bus_resources(child);
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun struct pci_check_idx_range {
245*4882a593Smuzhiyun int start;
246*4882a593Smuzhiyun int end;
247*4882a593Smuzhiyun };
248*4882a593Smuzhiyun
pcibios_allocate_dev_resources(struct pci_dev * dev,int pass)249*4882a593Smuzhiyun static void pcibios_allocate_dev_resources(struct pci_dev *dev, int pass)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun int idx, disabled, i;
252*4882a593Smuzhiyun u16 command;
253*4882a593Smuzhiyun struct resource *r;
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun struct pci_check_idx_range idx_range[] = {
256*4882a593Smuzhiyun { PCI_STD_RESOURCES, PCI_STD_RESOURCE_END },
257*4882a593Smuzhiyun #ifdef CONFIG_PCI_IOV
258*4882a593Smuzhiyun { PCI_IOV_RESOURCES, PCI_IOV_RESOURCE_END },
259*4882a593Smuzhiyun #endif
260*4882a593Smuzhiyun };
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun pci_read_config_word(dev, PCI_COMMAND, &command);
263*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(idx_range); i++)
264*4882a593Smuzhiyun for (idx = idx_range[i].start; idx <= idx_range[i].end; idx++) {
265*4882a593Smuzhiyun r = &dev->resource[idx];
266*4882a593Smuzhiyun if (r->parent) /* Already allocated */
267*4882a593Smuzhiyun continue;
268*4882a593Smuzhiyun if (!r->start) /* Address not assigned at all */
269*4882a593Smuzhiyun continue;
270*4882a593Smuzhiyun if (r->flags & IORESOURCE_IO)
271*4882a593Smuzhiyun disabled = !(command & PCI_COMMAND_IO);
272*4882a593Smuzhiyun else
273*4882a593Smuzhiyun disabled = !(command & PCI_COMMAND_MEMORY);
274*4882a593Smuzhiyun if (pass == disabled) {
275*4882a593Smuzhiyun dev_dbg(&dev->dev,
276*4882a593Smuzhiyun "BAR %d: reserving %pr (d=%d, p=%d)\n",
277*4882a593Smuzhiyun idx, r, disabled, pass);
278*4882a593Smuzhiyun if (pci_claim_resource(dev, idx) < 0) {
279*4882a593Smuzhiyun if (r->flags & IORESOURCE_PCI_FIXED) {
280*4882a593Smuzhiyun dev_info(&dev->dev, "BAR %d %pR is immovable\n",
281*4882a593Smuzhiyun idx, r);
282*4882a593Smuzhiyun } else {
283*4882a593Smuzhiyun /* We'll assign a new address later */
284*4882a593Smuzhiyun pcibios_save_fw_addr(dev,
285*4882a593Smuzhiyun idx, r->start);
286*4882a593Smuzhiyun r->end -= r->start;
287*4882a593Smuzhiyun r->start = 0;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun if (!pass) {
293*4882a593Smuzhiyun r = &dev->resource[PCI_ROM_RESOURCE];
294*4882a593Smuzhiyun if (r->flags & IORESOURCE_ROM_ENABLE) {
295*4882a593Smuzhiyun /* Turn the ROM off, leave the resource region,
296*4882a593Smuzhiyun * but keep it unregistered. */
297*4882a593Smuzhiyun u32 reg;
298*4882a593Smuzhiyun dev_dbg(&dev->dev, "disabling ROM %pR\n", r);
299*4882a593Smuzhiyun r->flags &= ~IORESOURCE_ROM_ENABLE;
300*4882a593Smuzhiyun pci_read_config_dword(dev, dev->rom_base_reg, ®);
301*4882a593Smuzhiyun pci_write_config_dword(dev, dev->rom_base_reg,
302*4882a593Smuzhiyun reg & ~PCI_ROM_ADDRESS_ENABLE);
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
pcibios_allocate_resources(struct pci_bus * bus,int pass)307*4882a593Smuzhiyun static void pcibios_allocate_resources(struct pci_bus *bus, int pass)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun struct pci_dev *dev;
310*4882a593Smuzhiyun struct pci_bus *child;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun list_for_each_entry(dev, &bus->devices, bus_list) {
313*4882a593Smuzhiyun pcibios_allocate_dev_resources(dev, pass);
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun child = dev->subordinate;
316*4882a593Smuzhiyun if (child)
317*4882a593Smuzhiyun pcibios_allocate_resources(child, pass);
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
pcibios_allocate_dev_rom_resource(struct pci_dev * dev)321*4882a593Smuzhiyun static void pcibios_allocate_dev_rom_resource(struct pci_dev *dev)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun struct resource *r;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun /*
326*4882a593Smuzhiyun * Try to use BIOS settings for ROMs, otherwise let
327*4882a593Smuzhiyun * pci_assign_unassigned_resources() allocate the new
328*4882a593Smuzhiyun * addresses.
329*4882a593Smuzhiyun */
330*4882a593Smuzhiyun r = &dev->resource[PCI_ROM_RESOURCE];
331*4882a593Smuzhiyun if (!r->flags || !r->start)
332*4882a593Smuzhiyun return;
333*4882a593Smuzhiyun if (r->parent) /* Already allocated */
334*4882a593Smuzhiyun return;
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun if (pci_claim_resource(dev, PCI_ROM_RESOURCE) < 0) {
337*4882a593Smuzhiyun r->end -= r->start;
338*4882a593Smuzhiyun r->start = 0;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun }
pcibios_allocate_rom_resources(struct pci_bus * bus)341*4882a593Smuzhiyun static void pcibios_allocate_rom_resources(struct pci_bus *bus)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun struct pci_dev *dev;
344*4882a593Smuzhiyun struct pci_bus *child;
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun list_for_each_entry(dev, &bus->devices, bus_list) {
347*4882a593Smuzhiyun pcibios_allocate_dev_rom_resource(dev);
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun child = dev->subordinate;
350*4882a593Smuzhiyun if (child)
351*4882a593Smuzhiyun pcibios_allocate_rom_resources(child);
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun
pcibios_assign_resources(void)355*4882a593Smuzhiyun static int __init pcibios_assign_resources(void)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun struct pci_bus *bus;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun if (!(pci_probe & PCI_ASSIGN_ROMS))
360*4882a593Smuzhiyun list_for_each_entry(bus, &pci_root_buses, node)
361*4882a593Smuzhiyun pcibios_allocate_rom_resources(bus);
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun pci_assign_unassigned_resources();
364*4882a593Smuzhiyun pcibios_fw_addr_list_del();
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun return 0;
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun /**
370*4882a593Smuzhiyun * called in fs_initcall (one below subsys_initcall),
371*4882a593Smuzhiyun * give a chance for motherboard reserve resources
372*4882a593Smuzhiyun */
373*4882a593Smuzhiyun fs_initcall(pcibios_assign_resources);
374*4882a593Smuzhiyun
pcibios_resource_survey_bus(struct pci_bus * bus)375*4882a593Smuzhiyun void pcibios_resource_survey_bus(struct pci_bus *bus)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun dev_printk(KERN_DEBUG, &bus->dev, "Allocating resources\n");
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun pcibios_allocate_bus_resources(bus);
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun pcibios_allocate_resources(bus, 0);
382*4882a593Smuzhiyun pcibios_allocate_resources(bus, 1);
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun if (!(pci_probe & PCI_ASSIGN_ROMS))
385*4882a593Smuzhiyun pcibios_allocate_rom_resources(bus);
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun
pcibios_resource_survey(void)388*4882a593Smuzhiyun void __init pcibios_resource_survey(void)
389*4882a593Smuzhiyun {
390*4882a593Smuzhiyun struct pci_bus *bus;
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun DBG("PCI: Allocating resources\n");
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun list_for_each_entry(bus, &pci_root_buses, node)
395*4882a593Smuzhiyun pcibios_allocate_bus_resources(bus);
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun list_for_each_entry(bus, &pci_root_buses, node)
398*4882a593Smuzhiyun pcibios_allocate_resources(bus, 0);
399*4882a593Smuzhiyun list_for_each_entry(bus, &pci_root_buses, node)
400*4882a593Smuzhiyun pcibios_allocate_resources(bus, 1);
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun e820__reserve_resources_late();
403*4882a593Smuzhiyun /*
404*4882a593Smuzhiyun * Insert the IO APIC resources after PCI initialization has
405*4882a593Smuzhiyun * occurred to handle IO APICS that are mapped in on a BAR in
406*4882a593Smuzhiyun * PCI space, but before trying to assign unassigned pci res.
407*4882a593Smuzhiyun */
408*4882a593Smuzhiyun ioapic_insert_resources();
409*4882a593Smuzhiyun }
410