1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Intel MID PCI support
4*4882a593Smuzhiyun * Copyright (c) 2008 Intel Corporation
5*4882a593Smuzhiyun * Jesse Barnes <jesse.barnes@intel.com>
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Moorestown has an interesting PCI implementation:
8*4882a593Smuzhiyun * - configuration space is memory mapped (as defined by MCFG)
9*4882a593Smuzhiyun * - Lincroft devices also have a real, type 1 configuration space
10*4882a593Smuzhiyun * - Early Lincroft silicon has a type 1 access bug that will cause
11*4882a593Smuzhiyun * a hang if non-existent devices are accessed
12*4882a593Smuzhiyun * - some devices have the "fixed BAR" capability, which means
13*4882a593Smuzhiyun * they can't be relocated or modified; check for that during
14*4882a593Smuzhiyun * BAR sizing
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * So, we use the MCFG space for all reads and writes, but also send
17*4882a593Smuzhiyun * Lincroft writes to type 1 space. But only read/write if the device
18*4882a593Smuzhiyun * actually exists, otherwise return all 1s for reads and bit bucket
19*4882a593Smuzhiyun * the writes.
20*4882a593Smuzhiyun */
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #include <linux/sched.h>
23*4882a593Smuzhiyun #include <linux/pci.h>
24*4882a593Smuzhiyun #include <linux/ioport.h>
25*4882a593Smuzhiyun #include <linux/init.h>
26*4882a593Smuzhiyun #include <linux/dmi.h>
27*4882a593Smuzhiyun #include <linux/acpi.h>
28*4882a593Smuzhiyun #include <linux/io.h>
29*4882a593Smuzhiyun #include <linux/smp.h>
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #include <asm/segment.h>
32*4882a593Smuzhiyun #include <asm/pci_x86.h>
33*4882a593Smuzhiyun #include <asm/hw_irq.h>
34*4882a593Smuzhiyun #include <asm/io_apic.h>
35*4882a593Smuzhiyun #include <asm/intel-mid.h>
36*4882a593Smuzhiyun #include <asm/acpi.h>
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #define PCIE_CAP_OFFSET 0x100
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /* Quirks for the listed devices */
41*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_MRFLD_MMC 0x1190
42*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_MRFLD_HSU 0x1191
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /* Fixed BAR fields */
45*4882a593Smuzhiyun #define PCIE_VNDR_CAP_ID_FIXED_BAR 0x00 /* Fixed BAR (TBD) */
46*4882a593Smuzhiyun #define PCI_FIXED_BAR_0_SIZE 0x04
47*4882a593Smuzhiyun #define PCI_FIXED_BAR_1_SIZE 0x08
48*4882a593Smuzhiyun #define PCI_FIXED_BAR_2_SIZE 0x0c
49*4882a593Smuzhiyun #define PCI_FIXED_BAR_3_SIZE 0x10
50*4882a593Smuzhiyun #define PCI_FIXED_BAR_4_SIZE 0x14
51*4882a593Smuzhiyun #define PCI_FIXED_BAR_5_SIZE 0x1c
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun static int pci_soc_mode;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /**
56*4882a593Smuzhiyun * fixed_bar_cap - return the offset of the fixed BAR cap if found
57*4882a593Smuzhiyun * @bus: PCI bus
58*4882a593Smuzhiyun * @devfn: device in question
59*4882a593Smuzhiyun *
60*4882a593Smuzhiyun * Look for the fixed BAR cap on @bus and @devfn, returning its offset
61*4882a593Smuzhiyun * if found or 0 otherwise.
62*4882a593Smuzhiyun */
fixed_bar_cap(struct pci_bus * bus,unsigned int devfn)63*4882a593Smuzhiyun static int fixed_bar_cap(struct pci_bus *bus, unsigned int devfn)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun int pos;
66*4882a593Smuzhiyun u32 pcie_cap = 0, cap_data;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun pos = PCIE_CAP_OFFSET;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun if (!raw_pci_ext_ops)
71*4882a593Smuzhiyun return 0;
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun while (pos) {
74*4882a593Smuzhiyun if (raw_pci_ext_ops->read(pci_domain_nr(bus), bus->number,
75*4882a593Smuzhiyun devfn, pos, 4, &pcie_cap))
76*4882a593Smuzhiyun return 0;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun if (PCI_EXT_CAP_ID(pcie_cap) == 0x0000 ||
79*4882a593Smuzhiyun PCI_EXT_CAP_ID(pcie_cap) == 0xffff)
80*4882a593Smuzhiyun break;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun if (PCI_EXT_CAP_ID(pcie_cap) == PCI_EXT_CAP_ID_VNDR) {
83*4882a593Smuzhiyun raw_pci_ext_ops->read(pci_domain_nr(bus), bus->number,
84*4882a593Smuzhiyun devfn, pos + 4, 4, &cap_data);
85*4882a593Smuzhiyun if ((cap_data & 0xffff) == PCIE_VNDR_CAP_ID_FIXED_BAR)
86*4882a593Smuzhiyun return pos;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun pos = PCI_EXT_CAP_NEXT(pcie_cap);
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun return 0;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
pci_device_update_fixed(struct pci_bus * bus,unsigned int devfn,int reg,int len,u32 val,int offset)95*4882a593Smuzhiyun static int pci_device_update_fixed(struct pci_bus *bus, unsigned int devfn,
96*4882a593Smuzhiyun int reg, int len, u32 val, int offset)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun u32 size;
99*4882a593Smuzhiyun unsigned int domain, busnum;
100*4882a593Smuzhiyun int bar = (reg - PCI_BASE_ADDRESS_0) >> 2;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun domain = pci_domain_nr(bus);
103*4882a593Smuzhiyun busnum = bus->number;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun if (val == ~0 && len == 4) {
106*4882a593Smuzhiyun unsigned long decode;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun raw_pci_ext_ops->read(domain, busnum, devfn,
109*4882a593Smuzhiyun offset + 8 + (bar * 4), 4, &size);
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun /* Turn the size into a decode pattern for the sizing code */
112*4882a593Smuzhiyun if (size) {
113*4882a593Smuzhiyun decode = size - 1;
114*4882a593Smuzhiyun decode |= decode >> 1;
115*4882a593Smuzhiyun decode |= decode >> 2;
116*4882a593Smuzhiyun decode |= decode >> 4;
117*4882a593Smuzhiyun decode |= decode >> 8;
118*4882a593Smuzhiyun decode |= decode >> 16;
119*4882a593Smuzhiyun decode++;
120*4882a593Smuzhiyun decode = ~(decode - 1);
121*4882a593Smuzhiyun } else {
122*4882a593Smuzhiyun decode = 0;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun /*
126*4882a593Smuzhiyun * If val is all ones, the core code is trying to size the reg,
127*4882a593Smuzhiyun * so update the mmconfig space with the real size.
128*4882a593Smuzhiyun *
129*4882a593Smuzhiyun * Note: this assumes the fixed size we got is a power of two.
130*4882a593Smuzhiyun */
131*4882a593Smuzhiyun return raw_pci_ext_ops->write(domain, busnum, devfn, reg, 4,
132*4882a593Smuzhiyun decode);
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun /* This is some other kind of BAR write, so just do it. */
136*4882a593Smuzhiyun return raw_pci_ext_ops->write(domain, busnum, devfn, reg, len, val);
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun /**
140*4882a593Smuzhiyun * type1_access_ok - check whether to use type 1
141*4882a593Smuzhiyun * @bus: bus number
142*4882a593Smuzhiyun * @devfn: device & function in question
143*4882a593Smuzhiyun *
144*4882a593Smuzhiyun * If the bus is on a Lincroft chip and it exists, or is not on a Lincroft at
145*4882a593Smuzhiyun * all, the we can go ahead with any reads & writes. If it's on a Lincroft,
146*4882a593Smuzhiyun * but doesn't exist, avoid the access altogether to keep the chip from
147*4882a593Smuzhiyun * hanging.
148*4882a593Smuzhiyun */
type1_access_ok(unsigned int bus,unsigned int devfn,int reg)149*4882a593Smuzhiyun static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun /*
152*4882a593Smuzhiyun * This is a workaround for A0 LNC bug where PCI status register does
153*4882a593Smuzhiyun * not have new CAP bit set. can not be written by SW either.
154*4882a593Smuzhiyun *
155*4882a593Smuzhiyun * PCI header type in real LNC indicates a single function device, this
156*4882a593Smuzhiyun * will prevent probing other devices under the same function in PCI
157*4882a593Smuzhiyun * shim. Therefore, use the header type in shim instead.
158*4882a593Smuzhiyun */
159*4882a593Smuzhiyun if (reg >= 0x100 || reg == PCI_STATUS || reg == PCI_HEADER_TYPE)
160*4882a593Smuzhiyun return false;
161*4882a593Smuzhiyun if (bus == 0 && (devfn == PCI_DEVFN(2, 0)
162*4882a593Smuzhiyun || devfn == PCI_DEVFN(0, 0)
163*4882a593Smuzhiyun || devfn == PCI_DEVFN(3, 0)))
164*4882a593Smuzhiyun return true;
165*4882a593Smuzhiyun return false; /* Langwell on others */
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
pci_read(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * value)168*4882a593Smuzhiyun static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
169*4882a593Smuzhiyun int size, u32 *value)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun if (type1_access_ok(bus->number, devfn, where))
172*4882a593Smuzhiyun return pci_direct_conf1.read(pci_domain_nr(bus), bus->number,
173*4882a593Smuzhiyun devfn, where, size, value);
174*4882a593Smuzhiyun return raw_pci_ext_ops->read(pci_domain_nr(bus), bus->number,
175*4882a593Smuzhiyun devfn, where, size, value);
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
pci_write(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 value)178*4882a593Smuzhiyun static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
179*4882a593Smuzhiyun int size, u32 value)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun int offset;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /*
184*4882a593Smuzhiyun * On MRST, there is no PCI ROM BAR, this will cause a subsequent read
185*4882a593Smuzhiyun * to ROM BAR return 0 then being ignored.
186*4882a593Smuzhiyun */
187*4882a593Smuzhiyun if (where == PCI_ROM_ADDRESS)
188*4882a593Smuzhiyun return 0;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun /*
191*4882a593Smuzhiyun * Devices with fixed BARs need special handling:
192*4882a593Smuzhiyun * - BAR sizing code will save, write ~0, read size, restore
193*4882a593Smuzhiyun * - so writes to fixed BARs need special handling
194*4882a593Smuzhiyun * - other writes to fixed BAR devices should go through mmconfig
195*4882a593Smuzhiyun */
196*4882a593Smuzhiyun offset = fixed_bar_cap(bus, devfn);
197*4882a593Smuzhiyun if (offset &&
198*4882a593Smuzhiyun (where >= PCI_BASE_ADDRESS_0 && where <= PCI_BASE_ADDRESS_5)) {
199*4882a593Smuzhiyun return pci_device_update_fixed(bus, devfn, where, size, value,
200*4882a593Smuzhiyun offset);
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun /*
204*4882a593Smuzhiyun * On Moorestown update both real & mmconfig space
205*4882a593Smuzhiyun * Note: early Lincroft silicon can't handle type 1 accesses to
206*4882a593Smuzhiyun * non-existent devices, so just eat the write in that case.
207*4882a593Smuzhiyun */
208*4882a593Smuzhiyun if (type1_access_ok(bus->number, devfn, where))
209*4882a593Smuzhiyun return pci_direct_conf1.write(pci_domain_nr(bus), bus->number,
210*4882a593Smuzhiyun devfn, where, size, value);
211*4882a593Smuzhiyun return raw_pci_ext_ops->write(pci_domain_nr(bus), bus->number, devfn,
212*4882a593Smuzhiyun where, size, value);
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
intel_mid_pci_irq_enable(struct pci_dev * dev)215*4882a593Smuzhiyun static int intel_mid_pci_irq_enable(struct pci_dev *dev)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun struct irq_alloc_info info;
218*4882a593Smuzhiyun int polarity;
219*4882a593Smuzhiyun int ret;
220*4882a593Smuzhiyun u8 gsi;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun if (dev->irq_managed && dev->irq > 0)
223*4882a593Smuzhiyun return 0;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun ret = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
226*4882a593Smuzhiyun if (ret < 0) {
227*4882a593Smuzhiyun dev_warn(&dev->dev, "Failed to read interrupt line: %d\n", ret);
228*4882a593Smuzhiyun return ret;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun switch (intel_mid_identify_cpu()) {
232*4882a593Smuzhiyun case INTEL_MID_CPU_CHIP_TANGIER:
233*4882a593Smuzhiyun polarity = IOAPIC_POL_HIGH;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /* Special treatment for IRQ0 */
236*4882a593Smuzhiyun if (gsi == 0) {
237*4882a593Smuzhiyun /*
238*4882a593Smuzhiyun * Skip HS UART common registers device since it has
239*4882a593Smuzhiyun * IRQ0 assigned and not used by the kernel.
240*4882a593Smuzhiyun */
241*4882a593Smuzhiyun if (dev->device == PCI_DEVICE_ID_INTEL_MRFLD_HSU)
242*4882a593Smuzhiyun return -EBUSY;
243*4882a593Smuzhiyun /*
244*4882a593Smuzhiyun * TNG has IRQ0 assigned to eMMC controller. But there
245*4882a593Smuzhiyun * are also other devices with bogus PCI configuration
246*4882a593Smuzhiyun * that have IRQ0 assigned. This check ensures that
247*4882a593Smuzhiyun * eMMC gets it. The rest of devices still could be
248*4882a593Smuzhiyun * enabled without interrupt line being allocated.
249*4882a593Smuzhiyun */
250*4882a593Smuzhiyun if (dev->device != PCI_DEVICE_ID_INTEL_MRFLD_MMC)
251*4882a593Smuzhiyun return 0;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun break;
254*4882a593Smuzhiyun default:
255*4882a593Smuzhiyun polarity = IOAPIC_POL_LOW;
256*4882a593Smuzhiyun break;
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun ioapic_set_alloc_attr(&info, dev_to_node(&dev->dev), 1, polarity);
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun /*
262*4882a593Smuzhiyun * MRST only have IOAPIC, the PCI irq lines are 1:1 mapped to
263*4882a593Smuzhiyun * IOAPIC RTE entries, so we just enable RTE for the device.
264*4882a593Smuzhiyun */
265*4882a593Smuzhiyun ret = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info);
266*4882a593Smuzhiyun if (ret < 0)
267*4882a593Smuzhiyun return ret;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun dev->irq = ret;
270*4882a593Smuzhiyun dev->irq_managed = 1;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun return 0;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
intel_mid_pci_irq_disable(struct pci_dev * dev)275*4882a593Smuzhiyun static void intel_mid_pci_irq_disable(struct pci_dev *dev)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun if (!mp_should_keep_irq(&dev->dev) && dev->irq_managed &&
278*4882a593Smuzhiyun dev->irq > 0) {
279*4882a593Smuzhiyun mp_unmap_irq(dev->irq);
280*4882a593Smuzhiyun dev->irq_managed = 0;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun static const struct pci_ops intel_mid_pci_ops __initconst = {
285*4882a593Smuzhiyun .read = pci_read,
286*4882a593Smuzhiyun .write = pci_write,
287*4882a593Smuzhiyun };
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun /**
290*4882a593Smuzhiyun * intel_mid_pci_init - installs intel_mid_pci_ops
291*4882a593Smuzhiyun *
292*4882a593Smuzhiyun * Moorestown has an interesting PCI implementation (see above).
293*4882a593Smuzhiyun * Called when the early platform detection installs it.
294*4882a593Smuzhiyun */
intel_mid_pci_init(void)295*4882a593Smuzhiyun int __init intel_mid_pci_init(void)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun pr_info("Intel MID platform detected, using MID PCI ops\n");
298*4882a593Smuzhiyun pci_mmcfg_late_init();
299*4882a593Smuzhiyun pcibios_enable_irq = intel_mid_pci_irq_enable;
300*4882a593Smuzhiyun pcibios_disable_irq = intel_mid_pci_irq_disable;
301*4882a593Smuzhiyun pci_root_ops = intel_mid_pci_ops;
302*4882a593Smuzhiyun pci_soc_mode = 1;
303*4882a593Smuzhiyun /* Continue with standard init */
304*4882a593Smuzhiyun acpi_noirq_set();
305*4882a593Smuzhiyun return 1;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun /*
309*4882a593Smuzhiyun * Langwell devices are not true PCI devices; they are not subject to 10 ms
310*4882a593Smuzhiyun * d3 to d0 delay required by PCI spec.
311*4882a593Smuzhiyun */
pci_d3delay_fixup(struct pci_dev * dev)312*4882a593Smuzhiyun static void pci_d3delay_fixup(struct pci_dev *dev)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun /*
315*4882a593Smuzhiyun * PCI fixups are effectively decided compile time. If we have a dual
316*4882a593Smuzhiyun * SoC/non-SoC kernel we don't want to mangle d3 on non-SoC devices.
317*4882a593Smuzhiyun */
318*4882a593Smuzhiyun if (!pci_soc_mode)
319*4882a593Smuzhiyun return;
320*4882a593Smuzhiyun /*
321*4882a593Smuzhiyun * True PCI devices in Lincroft should allow type 1 access, the rest
322*4882a593Smuzhiyun * are Langwell fake PCI devices.
323*4882a593Smuzhiyun */
324*4882a593Smuzhiyun if (type1_access_ok(dev->bus->number, dev->devfn, PCI_DEVICE_ID))
325*4882a593Smuzhiyun return;
326*4882a593Smuzhiyun dev->d3hot_delay = 0;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_d3delay_fixup);
329*4882a593Smuzhiyun
mid_power_off_one_device(struct pci_dev * dev)330*4882a593Smuzhiyun static void mid_power_off_one_device(struct pci_dev *dev)
331*4882a593Smuzhiyun {
332*4882a593Smuzhiyun u16 pmcsr;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun /*
335*4882a593Smuzhiyun * Update current state first, otherwise PCI core enforces PCI_D0 in
336*4882a593Smuzhiyun * pci_set_power_state() for devices which status was PCI_UNKNOWN.
337*4882a593Smuzhiyun */
338*4882a593Smuzhiyun pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
339*4882a593Smuzhiyun dev->current_state = (pci_power_t __force)(pmcsr & PCI_PM_CTRL_STATE_MASK);
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun pci_set_power_state(dev, PCI_D3hot);
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
mid_power_off_devices(struct pci_dev * dev)344*4882a593Smuzhiyun static void mid_power_off_devices(struct pci_dev *dev)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun int id;
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun if (!pci_soc_mode)
349*4882a593Smuzhiyun return;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun id = intel_mid_pwr_get_lss_id(dev);
352*4882a593Smuzhiyun if (id < 0)
353*4882a593Smuzhiyun return;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun /*
356*4882a593Smuzhiyun * This sets only PMCSR bits. The actual power off will happen in
357*4882a593Smuzhiyun * arch/x86/platform/intel-mid/pwr.c.
358*4882a593Smuzhiyun */
359*4882a593Smuzhiyun mid_power_off_one_device(dev);
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, mid_power_off_devices);
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun /*
365*4882a593Smuzhiyun * Langwell devices reside at fixed offsets, don't try to move them.
366*4882a593Smuzhiyun */
pci_fixed_bar_fixup(struct pci_dev * dev)367*4882a593Smuzhiyun static void pci_fixed_bar_fixup(struct pci_dev *dev)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun unsigned long offset;
370*4882a593Smuzhiyun u32 size;
371*4882a593Smuzhiyun int i;
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun if (!pci_soc_mode)
374*4882a593Smuzhiyun return;
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun /* Must have extended configuration space */
377*4882a593Smuzhiyun if (dev->cfg_size < PCIE_CAP_OFFSET + 4)
378*4882a593Smuzhiyun return;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun /* Fixup the BAR sizes for fixed BAR devices and make them unmoveable */
381*4882a593Smuzhiyun offset = fixed_bar_cap(dev->bus, dev->devfn);
382*4882a593Smuzhiyun if (!offset || PCI_DEVFN(2, 0) == dev->devfn ||
383*4882a593Smuzhiyun PCI_DEVFN(2, 2) == dev->devfn)
384*4882a593Smuzhiyun return;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun for (i = 0; i < PCI_STD_NUM_BARS; i++) {
387*4882a593Smuzhiyun pci_read_config_dword(dev, offset + 8 + (i * 4), &size);
388*4882a593Smuzhiyun dev->resource[i].end = dev->resource[i].start + size - 1;
389*4882a593Smuzhiyun dev->resource[i].flags |= IORESOURCE_PCI_FIXED;
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_fixed_bar_fixup);
393