1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Support routines for initializing a PCI subsystem
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Extruded from code written by
6*4882a593Smuzhiyun * Dave Rusling (david.rusling@reo.mts.dec.com)
7*4882a593Smuzhiyun * David Mosberger (davidm@cs.arizona.edu)
8*4882a593Smuzhiyun * David Miller (davem@redhat.com)
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Fixed for multiple PCI buses, 1999 Andrea Arcangeli <andrea@suse.de>
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
13*4882a593Smuzhiyun * Resource sorting
14*4882a593Smuzhiyun */
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include <linux/kernel.h>
17*4882a593Smuzhiyun #include <linux/export.h>
18*4882a593Smuzhiyun #include <linux/pci.h>
19*4882a593Smuzhiyun #include <linux/errno.h>
20*4882a593Smuzhiyun #include <linux/ioport.h>
21*4882a593Smuzhiyun #include <linux/cache.h>
22*4882a593Smuzhiyun #include <linux/slab.h>
23*4882a593Smuzhiyun #include "pci.h"
24*4882a593Smuzhiyun
pci_std_update_resource(struct pci_dev * dev,int resno)25*4882a593Smuzhiyun static void pci_std_update_resource(struct pci_dev *dev, int resno)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun struct pci_bus_region region;
28*4882a593Smuzhiyun bool disable;
29*4882a593Smuzhiyun u16 cmd;
30*4882a593Smuzhiyun u32 new, check, mask;
31*4882a593Smuzhiyun int reg;
32*4882a593Smuzhiyun struct resource *res = dev->resource + resno;
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun /* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */
35*4882a593Smuzhiyun if (dev->is_virtfn)
36*4882a593Smuzhiyun return;
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun /*
39*4882a593Smuzhiyun * Ignore resources for unimplemented BARs and unused resource slots
40*4882a593Smuzhiyun * for 64 bit BARs.
41*4882a593Smuzhiyun */
42*4882a593Smuzhiyun if (!res->flags)
43*4882a593Smuzhiyun return;
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun if (res->flags & IORESOURCE_UNSET)
46*4882a593Smuzhiyun return;
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun /*
49*4882a593Smuzhiyun * Ignore non-moveable resources. This might be legacy resources for
50*4882a593Smuzhiyun * which no functional BAR register exists or another important
51*4882a593Smuzhiyun * system resource we shouldn't move around.
52*4882a593Smuzhiyun */
53*4882a593Smuzhiyun if (res->flags & IORESOURCE_PCI_FIXED)
54*4882a593Smuzhiyun return;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun pcibios_resource_to_bus(dev->bus, ®ion, res);
57*4882a593Smuzhiyun new = region.start;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun if (res->flags & IORESOURCE_IO) {
60*4882a593Smuzhiyun mask = (u32)PCI_BASE_ADDRESS_IO_MASK;
61*4882a593Smuzhiyun new |= res->flags & ~PCI_BASE_ADDRESS_IO_MASK;
62*4882a593Smuzhiyun } else if (resno == PCI_ROM_RESOURCE) {
63*4882a593Smuzhiyun mask = PCI_ROM_ADDRESS_MASK;
64*4882a593Smuzhiyun } else {
65*4882a593Smuzhiyun mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
66*4882a593Smuzhiyun new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun if (resno < PCI_ROM_RESOURCE) {
70*4882a593Smuzhiyun reg = PCI_BASE_ADDRESS_0 + 4 * resno;
71*4882a593Smuzhiyun } else if (resno == PCI_ROM_RESOURCE) {
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun /*
74*4882a593Smuzhiyun * Apparently some Matrox devices have ROM BARs that read
75*4882a593Smuzhiyun * as zero when disabled, so don't update ROM BARs unless
76*4882a593Smuzhiyun * they're enabled. See
77*4882a593Smuzhiyun * https://lore.kernel.org/r/43147B3D.1030309@vc.cvut.cz/
78*4882a593Smuzhiyun */
79*4882a593Smuzhiyun if (!(res->flags & IORESOURCE_ROM_ENABLE))
80*4882a593Smuzhiyun return;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun reg = dev->rom_base_reg;
83*4882a593Smuzhiyun new |= PCI_ROM_ADDRESS_ENABLE;
84*4882a593Smuzhiyun } else
85*4882a593Smuzhiyun return;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /*
88*4882a593Smuzhiyun * We can't update a 64-bit BAR atomically, so when possible,
89*4882a593Smuzhiyun * disable decoding so that a half-updated BAR won't conflict
90*4882a593Smuzhiyun * with another device.
91*4882a593Smuzhiyun */
92*4882a593Smuzhiyun disable = (res->flags & IORESOURCE_MEM_64) && !dev->mmio_always_on;
93*4882a593Smuzhiyun if (disable) {
94*4882a593Smuzhiyun pci_read_config_word(dev, PCI_COMMAND, &cmd);
95*4882a593Smuzhiyun pci_write_config_word(dev, PCI_COMMAND,
96*4882a593Smuzhiyun cmd & ~PCI_COMMAND_MEMORY);
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun pci_write_config_dword(dev, reg, new);
100*4882a593Smuzhiyun pci_read_config_dword(dev, reg, &check);
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun if ((new ^ check) & mask) {
103*4882a593Smuzhiyun pci_err(dev, "BAR %d: error updating (%#08x != %#08x)\n",
104*4882a593Smuzhiyun resno, new, check);
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun if (res->flags & IORESOURCE_MEM_64) {
108*4882a593Smuzhiyun new = region.start >> 16 >> 16;
109*4882a593Smuzhiyun pci_write_config_dword(dev, reg + 4, new);
110*4882a593Smuzhiyun pci_read_config_dword(dev, reg + 4, &check);
111*4882a593Smuzhiyun if (check != new) {
112*4882a593Smuzhiyun pci_err(dev, "BAR %d: error updating (high %#08x != %#08x)\n",
113*4882a593Smuzhiyun resno, new, check);
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun if (disable)
118*4882a593Smuzhiyun pci_write_config_word(dev, PCI_COMMAND, cmd);
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
pci_update_resource(struct pci_dev * dev,int resno)121*4882a593Smuzhiyun void pci_update_resource(struct pci_dev *dev, int resno)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun if (resno <= PCI_ROM_RESOURCE)
124*4882a593Smuzhiyun pci_std_update_resource(dev, resno);
125*4882a593Smuzhiyun #ifdef CONFIG_PCI_IOV
126*4882a593Smuzhiyun else if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END)
127*4882a593Smuzhiyun pci_iov_update_resource(dev, resno);
128*4882a593Smuzhiyun #endif
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
pci_claim_resource(struct pci_dev * dev,int resource)131*4882a593Smuzhiyun int pci_claim_resource(struct pci_dev *dev, int resource)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun struct resource *res = &dev->resource[resource];
134*4882a593Smuzhiyun struct resource *root, *conflict;
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun if (res->flags & IORESOURCE_UNSET) {
137*4882a593Smuzhiyun pci_info(dev, "can't claim BAR %d %pR: no address assigned\n",
138*4882a593Smuzhiyun resource, res);
139*4882a593Smuzhiyun return -EINVAL;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /*
143*4882a593Smuzhiyun * If we have a shadow copy in RAM, the PCI device doesn't respond
144*4882a593Smuzhiyun * to the shadow range, so we don't need to claim it, and upstream
145*4882a593Smuzhiyun * bridges don't need to route the range to the device.
146*4882a593Smuzhiyun */
147*4882a593Smuzhiyun if (res->flags & IORESOURCE_ROM_SHADOW)
148*4882a593Smuzhiyun return 0;
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun root = pci_find_parent_resource(dev, res);
151*4882a593Smuzhiyun if (!root) {
152*4882a593Smuzhiyun pci_info(dev, "can't claim BAR %d %pR: no compatible bridge window\n",
153*4882a593Smuzhiyun resource, res);
154*4882a593Smuzhiyun res->flags |= IORESOURCE_UNSET;
155*4882a593Smuzhiyun return -EINVAL;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun conflict = request_resource_conflict(root, res);
159*4882a593Smuzhiyun if (conflict) {
160*4882a593Smuzhiyun pci_info(dev, "can't claim BAR %d %pR: address conflict with %s %pR\n",
161*4882a593Smuzhiyun resource, res, conflict->name, conflict);
162*4882a593Smuzhiyun res->flags |= IORESOURCE_UNSET;
163*4882a593Smuzhiyun return -EBUSY;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun return 0;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun EXPORT_SYMBOL(pci_claim_resource);
169*4882a593Smuzhiyun
pci_disable_bridge_window(struct pci_dev * dev)170*4882a593Smuzhiyun void pci_disable_bridge_window(struct pci_dev *dev)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun /* MMIO Base/Limit */
173*4882a593Smuzhiyun pci_write_config_dword(dev, PCI_MEMORY_BASE, 0x0000fff0);
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /* Prefetchable MMIO Base/Limit */
176*4882a593Smuzhiyun pci_write_config_dword(dev, PCI_PREF_LIMIT_UPPER32, 0);
177*4882a593Smuzhiyun pci_write_config_dword(dev, PCI_PREF_MEMORY_BASE, 0x0000fff0);
178*4882a593Smuzhiyun pci_write_config_dword(dev, PCI_PREF_BASE_UPPER32, 0xffffffff);
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun /*
182*4882a593Smuzhiyun * Generic function that returns a value indicating that the device's
183*4882a593Smuzhiyun * original BIOS BAR address was not saved and so is not available for
184*4882a593Smuzhiyun * reinstatement.
185*4882a593Smuzhiyun *
186*4882a593Smuzhiyun * Can be over-ridden by architecture specific code that implements
187*4882a593Smuzhiyun * reinstatement functionality rather than leaving it disabled when
188*4882a593Smuzhiyun * normal allocation attempts fail.
189*4882a593Smuzhiyun */
pcibios_retrieve_fw_addr(struct pci_dev * dev,int idx)190*4882a593Smuzhiyun resource_size_t __weak pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun return 0;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
pci_revert_fw_address(struct resource * res,struct pci_dev * dev,int resno,resource_size_t size)195*4882a593Smuzhiyun static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
196*4882a593Smuzhiyun int resno, resource_size_t size)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun struct resource *root, *conflict;
199*4882a593Smuzhiyun resource_size_t fw_addr, start, end;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun fw_addr = pcibios_retrieve_fw_addr(dev, resno);
202*4882a593Smuzhiyun if (!fw_addr)
203*4882a593Smuzhiyun return -ENOMEM;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun start = res->start;
206*4882a593Smuzhiyun end = res->end;
207*4882a593Smuzhiyun res->start = fw_addr;
208*4882a593Smuzhiyun res->end = res->start + size - 1;
209*4882a593Smuzhiyun res->flags &= ~IORESOURCE_UNSET;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun root = pci_find_parent_resource(dev, res);
212*4882a593Smuzhiyun if (!root) {
213*4882a593Smuzhiyun /*
214*4882a593Smuzhiyun * If dev is behind a bridge, accesses will only reach it
215*4882a593Smuzhiyun * if res is inside the relevant bridge window.
216*4882a593Smuzhiyun */
217*4882a593Smuzhiyun if (pci_upstream_bridge(dev))
218*4882a593Smuzhiyun return -ENXIO;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun /*
221*4882a593Smuzhiyun * On the root bus, assume the host bridge will forward
222*4882a593Smuzhiyun * everything.
223*4882a593Smuzhiyun */
224*4882a593Smuzhiyun if (res->flags & IORESOURCE_IO)
225*4882a593Smuzhiyun root = &ioport_resource;
226*4882a593Smuzhiyun else
227*4882a593Smuzhiyun root = &iomem_resource;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun pci_info(dev, "BAR %d: trying firmware assignment %pR\n",
231*4882a593Smuzhiyun resno, res);
232*4882a593Smuzhiyun conflict = request_resource_conflict(root, res);
233*4882a593Smuzhiyun if (conflict) {
234*4882a593Smuzhiyun pci_info(dev, "BAR %d: %pR conflicts with %s %pR\n",
235*4882a593Smuzhiyun resno, res, conflict->name, conflict);
236*4882a593Smuzhiyun res->start = start;
237*4882a593Smuzhiyun res->end = end;
238*4882a593Smuzhiyun res->flags |= IORESOURCE_UNSET;
239*4882a593Smuzhiyun return -EBUSY;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun return 0;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun /*
245*4882a593Smuzhiyun * We don't have to worry about legacy ISA devices, so nothing to do here.
246*4882a593Smuzhiyun * This is marked as __weak because multiple architectures define it; it should
247*4882a593Smuzhiyun * eventually go away.
248*4882a593Smuzhiyun */
pcibios_align_resource(void * data,const struct resource * res,resource_size_t size,resource_size_t align)249*4882a593Smuzhiyun resource_size_t __weak pcibios_align_resource(void *data,
250*4882a593Smuzhiyun const struct resource *res,
251*4882a593Smuzhiyun resource_size_t size,
252*4882a593Smuzhiyun resource_size_t align)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun return res->start;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
__pci_assign_resource(struct pci_bus * bus,struct pci_dev * dev,int resno,resource_size_t size,resource_size_t align)257*4882a593Smuzhiyun static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
258*4882a593Smuzhiyun int resno, resource_size_t size, resource_size_t align)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun struct resource *res = dev->resource + resno;
261*4882a593Smuzhiyun resource_size_t min;
262*4882a593Smuzhiyun int ret;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun /*
267*4882a593Smuzhiyun * First, try exact prefetching match. Even if a 64-bit
268*4882a593Smuzhiyun * prefetchable bridge window is below 4GB, we can't put a 32-bit
269*4882a593Smuzhiyun * prefetchable resource in it because pbus_size_mem() assumes a
270*4882a593Smuzhiyun * 64-bit window will contain no 32-bit resources. If we assign
271*4882a593Smuzhiyun * things differently than they were sized, not everything will fit.
272*4882a593Smuzhiyun */
273*4882a593Smuzhiyun ret = pci_bus_alloc_resource(bus, res, size, align, min,
274*4882a593Smuzhiyun IORESOURCE_PREFETCH | IORESOURCE_MEM_64,
275*4882a593Smuzhiyun pcibios_align_resource, dev);
276*4882a593Smuzhiyun if (ret == 0)
277*4882a593Smuzhiyun return 0;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun /*
280*4882a593Smuzhiyun * If the prefetchable window is only 32 bits wide, we can put
281*4882a593Smuzhiyun * 64-bit prefetchable resources in it.
282*4882a593Smuzhiyun */
283*4882a593Smuzhiyun if ((res->flags & (IORESOURCE_PREFETCH | IORESOURCE_MEM_64)) ==
284*4882a593Smuzhiyun (IORESOURCE_PREFETCH | IORESOURCE_MEM_64)) {
285*4882a593Smuzhiyun ret = pci_bus_alloc_resource(bus, res, size, align, min,
286*4882a593Smuzhiyun IORESOURCE_PREFETCH,
287*4882a593Smuzhiyun pcibios_align_resource, dev);
288*4882a593Smuzhiyun if (ret == 0)
289*4882a593Smuzhiyun return 0;
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun /*
293*4882a593Smuzhiyun * If we didn't find a better match, we can put any memory resource
294*4882a593Smuzhiyun * in a non-prefetchable window. If this resource is 32 bits and
295*4882a593Smuzhiyun * non-prefetchable, the first call already tried the only possibility
296*4882a593Smuzhiyun * so we don't need to try again.
297*4882a593Smuzhiyun */
298*4882a593Smuzhiyun if (res->flags & (IORESOURCE_PREFETCH | IORESOURCE_MEM_64))
299*4882a593Smuzhiyun ret = pci_bus_alloc_resource(bus, res, size, align, min, 0,
300*4882a593Smuzhiyun pcibios_align_resource, dev);
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun return ret;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
_pci_assign_resource(struct pci_dev * dev,int resno,resource_size_t size,resource_size_t min_align)305*4882a593Smuzhiyun static int _pci_assign_resource(struct pci_dev *dev, int resno,
306*4882a593Smuzhiyun resource_size_t size, resource_size_t min_align)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun struct pci_bus *bus;
309*4882a593Smuzhiyun int ret;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun bus = dev->bus;
312*4882a593Smuzhiyun while ((ret = __pci_assign_resource(bus, dev, resno, size, min_align))) {
313*4882a593Smuzhiyun if (!bus->parent || !bus->self->transparent)
314*4882a593Smuzhiyun break;
315*4882a593Smuzhiyun bus = bus->parent;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun return ret;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
pci_assign_resource(struct pci_dev * dev,int resno)321*4882a593Smuzhiyun int pci_assign_resource(struct pci_dev *dev, int resno)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun struct resource *res = dev->resource + resno;
324*4882a593Smuzhiyun resource_size_t align, size;
325*4882a593Smuzhiyun int ret;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun if (res->flags & IORESOURCE_PCI_FIXED)
328*4882a593Smuzhiyun return 0;
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun res->flags |= IORESOURCE_UNSET;
331*4882a593Smuzhiyun align = pci_resource_alignment(dev, res);
332*4882a593Smuzhiyun if (!align) {
333*4882a593Smuzhiyun pci_info(dev, "BAR %d: can't assign %pR (bogus alignment)\n",
334*4882a593Smuzhiyun resno, res);
335*4882a593Smuzhiyun return -EINVAL;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun size = resource_size(res);
339*4882a593Smuzhiyun ret = _pci_assign_resource(dev, resno, size, align);
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun /*
342*4882a593Smuzhiyun * If we failed to assign anything, let's try the address
343*4882a593Smuzhiyun * where firmware left it. That at least has a chance of
344*4882a593Smuzhiyun * working, which is better than just leaving it disabled.
345*4882a593Smuzhiyun */
346*4882a593Smuzhiyun if (ret < 0) {
347*4882a593Smuzhiyun pci_info(dev, "BAR %d: no space for %pR\n", resno, res);
348*4882a593Smuzhiyun ret = pci_revert_fw_address(res, dev, resno, size);
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun if (ret < 0) {
352*4882a593Smuzhiyun pci_info(dev, "BAR %d: failed to assign %pR\n", resno, res);
353*4882a593Smuzhiyun return ret;
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun res->flags &= ~IORESOURCE_UNSET;
357*4882a593Smuzhiyun res->flags &= ~IORESOURCE_STARTALIGN;
358*4882a593Smuzhiyun pci_info(dev, "BAR %d: assigned %pR\n", resno, res);
359*4882a593Smuzhiyun if (resno < PCI_BRIDGE_RESOURCES)
360*4882a593Smuzhiyun pci_update_resource(dev, resno);
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun return 0;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun EXPORT_SYMBOL(pci_assign_resource);
365*4882a593Smuzhiyun
pci_reassign_resource(struct pci_dev * dev,int resno,resource_size_t addsize,resource_size_t min_align)366*4882a593Smuzhiyun int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsize,
367*4882a593Smuzhiyun resource_size_t min_align)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun struct resource *res = dev->resource + resno;
370*4882a593Smuzhiyun unsigned long flags;
371*4882a593Smuzhiyun resource_size_t new_size;
372*4882a593Smuzhiyun int ret;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun if (res->flags & IORESOURCE_PCI_FIXED)
375*4882a593Smuzhiyun return 0;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun flags = res->flags;
378*4882a593Smuzhiyun res->flags |= IORESOURCE_UNSET;
379*4882a593Smuzhiyun if (!res->parent) {
380*4882a593Smuzhiyun pci_info(dev, "BAR %d: can't reassign an unassigned resource %pR\n",
381*4882a593Smuzhiyun resno, res);
382*4882a593Smuzhiyun return -EINVAL;
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun /* already aligned with min_align */
386*4882a593Smuzhiyun new_size = resource_size(res) + addsize;
387*4882a593Smuzhiyun ret = _pci_assign_resource(dev, resno, new_size, min_align);
388*4882a593Smuzhiyun if (ret) {
389*4882a593Smuzhiyun res->flags = flags;
390*4882a593Smuzhiyun pci_info(dev, "BAR %d: %pR (failed to expand by %#llx)\n",
391*4882a593Smuzhiyun resno, res, (unsigned long long) addsize);
392*4882a593Smuzhiyun return ret;
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun res->flags &= ~IORESOURCE_UNSET;
396*4882a593Smuzhiyun res->flags &= ~IORESOURCE_STARTALIGN;
397*4882a593Smuzhiyun pci_info(dev, "BAR %d: reassigned %pR (expanded by %#llx)\n",
398*4882a593Smuzhiyun resno, res, (unsigned long long) addsize);
399*4882a593Smuzhiyun if (resno < PCI_BRIDGE_RESOURCES)
400*4882a593Smuzhiyun pci_update_resource(dev, resno);
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun return 0;
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun
pci_release_resource(struct pci_dev * dev,int resno)405*4882a593Smuzhiyun void pci_release_resource(struct pci_dev *dev, int resno)
406*4882a593Smuzhiyun {
407*4882a593Smuzhiyun struct resource *res = dev->resource + resno;
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun pci_info(dev, "BAR %d: releasing %pR\n", resno, res);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun if (!res->parent)
412*4882a593Smuzhiyun return;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun release_resource(res);
415*4882a593Smuzhiyun res->end = resource_size(res) - 1;
416*4882a593Smuzhiyun res->start = 0;
417*4882a593Smuzhiyun res->flags |= IORESOURCE_UNSET;
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun EXPORT_SYMBOL(pci_release_resource);
420*4882a593Smuzhiyun
pci_resize_resource(struct pci_dev * dev,int resno,int size)421*4882a593Smuzhiyun int pci_resize_resource(struct pci_dev *dev, int resno, int size)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun struct resource *res = dev->resource + resno;
424*4882a593Smuzhiyun struct pci_host_bridge *host;
425*4882a593Smuzhiyun int old, ret;
426*4882a593Smuzhiyun u32 sizes;
427*4882a593Smuzhiyun u16 cmd;
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun /* Check if we must preserve the firmware's resource assignment */
430*4882a593Smuzhiyun host = pci_find_host_bridge(dev->bus);
431*4882a593Smuzhiyun if (host->preserve_config)
432*4882a593Smuzhiyun return -ENOTSUPP;
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun /* Make sure the resource isn't assigned before resizing it. */
435*4882a593Smuzhiyun if (!(res->flags & IORESOURCE_UNSET))
436*4882a593Smuzhiyun return -EBUSY;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun pci_read_config_word(dev, PCI_COMMAND, &cmd);
439*4882a593Smuzhiyun if (cmd & PCI_COMMAND_MEMORY)
440*4882a593Smuzhiyun return -EBUSY;
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun sizes = pci_rebar_get_possible_sizes(dev, resno);
443*4882a593Smuzhiyun if (!sizes)
444*4882a593Smuzhiyun return -ENOTSUPP;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun if (!(sizes & BIT(size)))
447*4882a593Smuzhiyun return -EINVAL;
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun old = pci_rebar_get_current_size(dev, resno);
450*4882a593Smuzhiyun if (old < 0)
451*4882a593Smuzhiyun return old;
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun ret = pci_rebar_set_size(dev, resno, size);
454*4882a593Smuzhiyun if (ret)
455*4882a593Smuzhiyun return ret;
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun res->end = res->start + pci_rebar_size_to_bytes(size) - 1;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun /* Check if the new config works by trying to assign everything. */
460*4882a593Smuzhiyun if (dev->bus->self) {
461*4882a593Smuzhiyun ret = pci_reassign_bridge_resources(dev->bus->self, res->flags);
462*4882a593Smuzhiyun if (ret)
463*4882a593Smuzhiyun goto error_resize;
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun return 0;
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun error_resize:
468*4882a593Smuzhiyun pci_rebar_set_size(dev, resno, old);
469*4882a593Smuzhiyun res->end = res->start + pci_rebar_size_to_bytes(old) - 1;
470*4882a593Smuzhiyun return ret;
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun EXPORT_SYMBOL(pci_resize_resource);
473*4882a593Smuzhiyun
pci_enable_resources(struct pci_dev * dev,int mask)474*4882a593Smuzhiyun int pci_enable_resources(struct pci_dev *dev, int mask)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun u16 cmd, old_cmd;
477*4882a593Smuzhiyun int i;
478*4882a593Smuzhiyun struct resource *r;
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun pci_read_config_word(dev, PCI_COMMAND, &cmd);
481*4882a593Smuzhiyun old_cmd = cmd;
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun for (i = 0; i < PCI_NUM_RESOURCES; i++) {
484*4882a593Smuzhiyun if (!(mask & (1 << i)))
485*4882a593Smuzhiyun continue;
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun r = &dev->resource[i];
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
490*4882a593Smuzhiyun continue;
491*4882a593Smuzhiyun if ((i == PCI_ROM_RESOURCE) &&
492*4882a593Smuzhiyun (!(r->flags & IORESOURCE_ROM_ENABLE)))
493*4882a593Smuzhiyun continue;
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun if (r->flags & IORESOURCE_UNSET) {
496*4882a593Smuzhiyun pci_err(dev, "can't enable device: BAR %d %pR not assigned\n",
497*4882a593Smuzhiyun i, r);
498*4882a593Smuzhiyun return -EINVAL;
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun if (!r->parent) {
502*4882a593Smuzhiyun pci_err(dev, "can't enable device: BAR %d %pR not claimed\n",
503*4882a593Smuzhiyun i, r);
504*4882a593Smuzhiyun return -EINVAL;
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun if (r->flags & IORESOURCE_IO)
508*4882a593Smuzhiyun cmd |= PCI_COMMAND_IO;
509*4882a593Smuzhiyun if (r->flags & IORESOURCE_MEM)
510*4882a593Smuzhiyun cmd |= PCI_COMMAND_MEMORY;
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun if (cmd != old_cmd) {
514*4882a593Smuzhiyun pci_info(dev, "enabling device (%04x -> %04x)\n", old_cmd, cmd);
515*4882a593Smuzhiyun pci_write_config_word(dev, PCI_COMMAND, cmd);
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun return 0;
518*4882a593Smuzhiyun }
519