1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Exceptions for specific devices. Usually work-arounds for fatal design flaws.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include <linux/delay.h>
7*4882a593Smuzhiyun #include <linux/dmi.h>
8*4882a593Smuzhiyun #include <linux/pci.h>
9*4882a593Smuzhiyun #include <linux/vgaarb.h>
10*4882a593Smuzhiyun #include <asm/hpet.h>
11*4882a593Smuzhiyun #include <asm/pci_x86.h>
12*4882a593Smuzhiyun
pci_fixup_i450nx(struct pci_dev * d)13*4882a593Smuzhiyun static void pci_fixup_i450nx(struct pci_dev *d)
14*4882a593Smuzhiyun {
15*4882a593Smuzhiyun /*
16*4882a593Smuzhiyun * i450NX -- Find and scan all secondary buses on all PXB's.
17*4882a593Smuzhiyun */
18*4882a593Smuzhiyun int pxb, reg;
19*4882a593Smuzhiyun u8 busno, suba, subb;
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun dev_warn(&d->dev, "Searching for i450NX host bridges\n");
22*4882a593Smuzhiyun reg = 0xd0;
23*4882a593Smuzhiyun for(pxb = 0; pxb < 2; pxb++) {
24*4882a593Smuzhiyun pci_read_config_byte(d, reg++, &busno);
25*4882a593Smuzhiyun pci_read_config_byte(d, reg++, &suba);
26*4882a593Smuzhiyun pci_read_config_byte(d, reg++, &subb);
27*4882a593Smuzhiyun dev_dbg(&d->dev, "i450NX PXB %d: %02x/%02x/%02x\n", pxb, busno,
28*4882a593Smuzhiyun suba, subb);
29*4882a593Smuzhiyun if (busno)
30*4882a593Smuzhiyun pcibios_scan_root(busno); /* Bus A */
31*4882a593Smuzhiyun if (suba < subb)
32*4882a593Smuzhiyun pcibios_scan_root(suba+1); /* Bus B */
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun pcibios_last_bus = -1;
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82451NX, pci_fixup_i450nx);
37*4882a593Smuzhiyun
pci_fixup_i450gx(struct pci_dev * d)38*4882a593Smuzhiyun static void pci_fixup_i450gx(struct pci_dev *d)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun /*
41*4882a593Smuzhiyun * i450GX and i450KX -- Find and scan all secondary buses.
42*4882a593Smuzhiyun * (called separately for each PCI bridge found)
43*4882a593Smuzhiyun */
44*4882a593Smuzhiyun u8 busno;
45*4882a593Smuzhiyun pci_read_config_byte(d, 0x4a, &busno);
46*4882a593Smuzhiyun dev_info(&d->dev, "i440KX/GX host bridge; secondary bus %02x\n", busno);
47*4882a593Smuzhiyun pcibios_scan_root(busno);
48*4882a593Smuzhiyun pcibios_last_bus = -1;
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454GX, pci_fixup_i450gx);
51*4882a593Smuzhiyun
pci_fixup_umc_ide(struct pci_dev * d)52*4882a593Smuzhiyun static void pci_fixup_umc_ide(struct pci_dev *d)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun /*
55*4882a593Smuzhiyun * UM8886BF IDE controller sets region type bits incorrectly,
56*4882a593Smuzhiyun * therefore they look like memory despite of them being I/O.
57*4882a593Smuzhiyun */
58*4882a593Smuzhiyun int i;
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun dev_warn(&d->dev, "Fixing base address flags\n");
61*4882a593Smuzhiyun for(i = 0; i < 4; i++)
62*4882a593Smuzhiyun d->resource[i].flags |= PCI_BASE_ADDRESS_SPACE_IO;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886BF, pci_fixup_umc_ide);
65*4882a593Smuzhiyun
pci_fixup_latency(struct pci_dev * d)66*4882a593Smuzhiyun static void pci_fixup_latency(struct pci_dev *d)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun /*
69*4882a593Smuzhiyun * SiS 5597 and 5598 chipsets require latency timer set to
70*4882a593Smuzhiyun * at most 32 to avoid lockups.
71*4882a593Smuzhiyun */
72*4882a593Smuzhiyun dev_dbg(&d->dev, "Setting max latency to 32\n");
73*4882a593Smuzhiyun pcibios_max_latency = 32;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, pci_fixup_latency);
76*4882a593Smuzhiyun DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5598, pci_fixup_latency);
77*4882a593Smuzhiyun
pci_fixup_piix4_acpi(struct pci_dev * d)78*4882a593Smuzhiyun static void pci_fixup_piix4_acpi(struct pci_dev *d)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun /*
81*4882a593Smuzhiyun * PIIX4 ACPI device: hardwired IRQ9
82*4882a593Smuzhiyun */
83*4882a593Smuzhiyun d->irq = 9;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, pci_fixup_piix4_acpi);
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /*
88*4882a593Smuzhiyun * Addresses issues with problems in the memory write queue timer in
89*4882a593Smuzhiyun * certain VIA Northbridges. This bugfix is per VIA's specifications,
90*4882a593Smuzhiyun * except for the KL133/KM133: clearing bit 5 on those Northbridges seems
91*4882a593Smuzhiyun * to trigger a bug in its integrated ProSavage video card, which
92*4882a593Smuzhiyun * causes screen corruption. We only clear bits 6 and 7 for that chipset,
93*4882a593Smuzhiyun * until VIA can provide us with definitive information on why screen
94*4882a593Smuzhiyun * corruption occurs, and what exactly those bits do.
95*4882a593Smuzhiyun *
96*4882a593Smuzhiyun * VIA 8363,8622,8361 Northbridges:
97*4882a593Smuzhiyun * - bits 5, 6, 7 at offset 0x55 need to be turned off
98*4882a593Smuzhiyun * VIA 8367 (KT266x) Northbridges:
99*4882a593Smuzhiyun * - bits 5, 6, 7 at offset 0x95 need to be turned off
100*4882a593Smuzhiyun * VIA 8363 rev 0x81/0x84 (KL133/KM133) Northbridges:
101*4882a593Smuzhiyun * - bits 6, 7 at offset 0x55 need to be turned off
102*4882a593Smuzhiyun */
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun #define VIA_8363_KL133_REVISION_ID 0x81
105*4882a593Smuzhiyun #define VIA_8363_KM133_REVISION_ID 0x84
106*4882a593Smuzhiyun
pci_fixup_via_northbridge_bug(struct pci_dev * d)107*4882a593Smuzhiyun static void pci_fixup_via_northbridge_bug(struct pci_dev *d)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun u8 v;
110*4882a593Smuzhiyun int where = 0x55;
111*4882a593Smuzhiyun int mask = 0x1f; /* clear bits 5, 6, 7 by default */
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun if (d->device == PCI_DEVICE_ID_VIA_8367_0) {
114*4882a593Smuzhiyun /* fix pci bus latency issues resulted by NB bios error
115*4882a593Smuzhiyun it appears on bug free^Wreduced kt266x's bios forces
116*4882a593Smuzhiyun NB latency to zero */
117*4882a593Smuzhiyun pci_write_config_byte(d, PCI_LATENCY_TIMER, 0);
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun where = 0x95; /* the memory write queue timer register is
120*4882a593Smuzhiyun different for the KT266x's: 0x95 not 0x55 */
121*4882a593Smuzhiyun } else if (d->device == PCI_DEVICE_ID_VIA_8363_0 &&
122*4882a593Smuzhiyun (d->revision == VIA_8363_KL133_REVISION_ID ||
123*4882a593Smuzhiyun d->revision == VIA_8363_KM133_REVISION_ID)) {
124*4882a593Smuzhiyun mask = 0x3f; /* clear only bits 6 and 7; clearing bit 5
125*4882a593Smuzhiyun causes screen corruption on the KL133/KM133 */
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun pci_read_config_byte(d, where, &v);
129*4882a593Smuzhiyun if (v & ~mask) {
130*4882a593Smuzhiyun dev_warn(&d->dev, "Disabling VIA memory write queue (PCI ID %04x, rev %02x): [%02x] %02x & %02x -> %02x\n", \
131*4882a593Smuzhiyun d->device, d->revision, where, v, mask, v & mask);
132*4882a593Smuzhiyun v &= mask;
133*4882a593Smuzhiyun pci_write_config_byte(d, where, v);
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, pci_fixup_via_northbridge_bug);
137*4882a593Smuzhiyun DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8622, pci_fixup_via_northbridge_bug);
138*4882a593Smuzhiyun DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, pci_fixup_via_northbridge_bug);
139*4882a593Smuzhiyun DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8367_0, pci_fixup_via_northbridge_bug);
140*4882a593Smuzhiyun DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, pci_fixup_via_northbridge_bug);
141*4882a593Smuzhiyun DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8622, pci_fixup_via_northbridge_bug);
142*4882a593Smuzhiyun DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, pci_fixup_via_northbridge_bug);
143*4882a593Smuzhiyun DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8367_0, pci_fixup_via_northbridge_bug);
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun /*
146*4882a593Smuzhiyun * For some reasons Intel decided that certain parts of their
147*4882a593Smuzhiyun * 815, 845 and some other chipsets must look like PCI-to-PCI bridges
148*4882a593Smuzhiyun * while they are obviously not. The 82801 family (AA, AB, BAM/CAM,
149*4882a593Smuzhiyun * BA/CA/DB and E) PCI bridges are actually HUB-to-PCI ones, according
150*4882a593Smuzhiyun * to Intel terminology. These devices do forward all addresses from
151*4882a593Smuzhiyun * system to PCI bus no matter what are their window settings, so they are
152*4882a593Smuzhiyun * "transparent" (or subtractive decoding) from programmers point of view.
153*4882a593Smuzhiyun */
pci_fixup_transparent_bridge(struct pci_dev * dev)154*4882a593Smuzhiyun static void pci_fixup_transparent_bridge(struct pci_dev *dev)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun if ((dev->device & 0xff00) == 0x2400)
157*4882a593Smuzhiyun dev->transparent = 1;
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
160*4882a593Smuzhiyun PCI_CLASS_BRIDGE_PCI, 8, pci_fixup_transparent_bridge);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun /*
163*4882a593Smuzhiyun * Fixup for C1 Halt Disconnect problem on nForce2 systems.
164*4882a593Smuzhiyun *
165*4882a593Smuzhiyun * From information provided by "Allen Martin" <AMartin@nvidia.com>:
166*4882a593Smuzhiyun *
167*4882a593Smuzhiyun * A hang is caused when the CPU generates a very fast CONNECT/HALT cycle
168*4882a593Smuzhiyun * sequence. Workaround is to set the SYSTEM_IDLE_TIMEOUT to 80 ns.
169*4882a593Smuzhiyun * This allows the state-machine and timer to return to a proper state within
170*4882a593Smuzhiyun * 80 ns of the CONNECT and probe appearing together. Since the CPU will not
171*4882a593Smuzhiyun * issue another HALT within 80 ns of the initial HALT, the failure condition
172*4882a593Smuzhiyun * is avoided.
173*4882a593Smuzhiyun */
pci_fixup_nforce2(struct pci_dev * dev)174*4882a593Smuzhiyun static void pci_fixup_nforce2(struct pci_dev *dev)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun u32 val;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun /*
179*4882a593Smuzhiyun * Chip Old value New value
180*4882a593Smuzhiyun * C17 0x1F0FFF01 0x1F01FF01
181*4882a593Smuzhiyun * C18D 0x9F0FFF01 0x9F01FF01
182*4882a593Smuzhiyun *
183*4882a593Smuzhiyun * Northbridge chip version may be determined by
184*4882a593Smuzhiyun * reading the PCI revision ID (0xC1 or greater is C18D).
185*4882a593Smuzhiyun */
186*4882a593Smuzhiyun pci_read_config_dword(dev, 0x6c, &val);
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun /*
189*4882a593Smuzhiyun * Apply fixup if needed, but don't touch disconnect state
190*4882a593Smuzhiyun */
191*4882a593Smuzhiyun if ((val & 0x00FF0000) != 0x00010000) {
192*4882a593Smuzhiyun dev_warn(&dev->dev, "nForce2 C1 Halt Disconnect fixup\n");
193*4882a593Smuzhiyun pci_write_config_dword(dev, 0x6c, (val & 0xFF00FFFF) | 0x00010000);
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2, pci_fixup_nforce2);
197*4882a593Smuzhiyun DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2, pci_fixup_nforce2);
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun /* Max PCI Express root ports */
200*4882a593Smuzhiyun #define MAX_PCIEROOT 6
201*4882a593Smuzhiyun static int quirk_aspm_offset[MAX_PCIEROOT << 3];
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun #define GET_INDEX(a, b) ((((a) - PCI_DEVICE_ID_INTEL_MCH_PA) << 3) + ((b) & 7))
204*4882a593Smuzhiyun
quirk_pcie_aspm_read(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * value)205*4882a593Smuzhiyun static int quirk_pcie_aspm_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun return raw_pci_read(pci_domain_nr(bus), bus->number,
208*4882a593Smuzhiyun devfn, where, size, value);
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun /*
212*4882a593Smuzhiyun * Replace the original pci bus ops for write with a new one that will filter
213*4882a593Smuzhiyun * the request to insure ASPM cannot be enabled.
214*4882a593Smuzhiyun */
quirk_pcie_aspm_write(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 value)215*4882a593Smuzhiyun static int quirk_pcie_aspm_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun u8 offset;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun offset = quirk_aspm_offset[GET_INDEX(bus->self->device, devfn)];
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun if ((offset) && (where == offset))
222*4882a593Smuzhiyun value = value & ~PCI_EXP_LNKCTL_ASPMC;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun return raw_pci_write(pci_domain_nr(bus), bus->number,
225*4882a593Smuzhiyun devfn, where, size, value);
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun static struct pci_ops quirk_pcie_aspm_ops = {
229*4882a593Smuzhiyun .read = quirk_pcie_aspm_read,
230*4882a593Smuzhiyun .write = quirk_pcie_aspm_write,
231*4882a593Smuzhiyun };
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun /*
234*4882a593Smuzhiyun * Prevents PCI Express ASPM (Active State Power Management) being enabled.
235*4882a593Smuzhiyun *
236*4882a593Smuzhiyun * Save the register offset, where the ASPM control bits are located,
237*4882a593Smuzhiyun * for each PCI Express device that is in the device list of
238*4882a593Smuzhiyun * the root port in an array for fast indexing. Replace the bus ops
239*4882a593Smuzhiyun * with the modified one.
240*4882a593Smuzhiyun */
pcie_rootport_aspm_quirk(struct pci_dev * pdev)241*4882a593Smuzhiyun static void pcie_rootport_aspm_quirk(struct pci_dev *pdev)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun int i;
244*4882a593Smuzhiyun struct pci_bus *pbus;
245*4882a593Smuzhiyun struct pci_dev *dev;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun if ((pbus = pdev->subordinate) == NULL)
248*4882a593Smuzhiyun return;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun /*
251*4882a593Smuzhiyun * Check if the DID of pdev matches one of the six root ports. This
252*4882a593Smuzhiyun * check is needed in the case this function is called directly by the
253*4882a593Smuzhiyun * hot-plug driver.
254*4882a593Smuzhiyun */
255*4882a593Smuzhiyun if ((pdev->device < PCI_DEVICE_ID_INTEL_MCH_PA) ||
256*4882a593Smuzhiyun (pdev->device > PCI_DEVICE_ID_INTEL_MCH_PC1))
257*4882a593Smuzhiyun return;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun if (list_empty(&pbus->devices)) {
260*4882a593Smuzhiyun /*
261*4882a593Smuzhiyun * If no device is attached to the root port at power-up or
262*4882a593Smuzhiyun * after hot-remove, the pbus->devices is empty and this code
263*4882a593Smuzhiyun * will set the offsets to zero and the bus ops to parent's bus
264*4882a593Smuzhiyun * ops, which is unmodified.
265*4882a593Smuzhiyun */
266*4882a593Smuzhiyun for (i = GET_INDEX(pdev->device, 0); i <= GET_INDEX(pdev->device, 7); ++i)
267*4882a593Smuzhiyun quirk_aspm_offset[i] = 0;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun pci_bus_set_ops(pbus, pbus->parent->ops);
270*4882a593Smuzhiyun } else {
271*4882a593Smuzhiyun /*
272*4882a593Smuzhiyun * If devices are attached to the root port at power-up or
273*4882a593Smuzhiyun * after hot-add, the code loops through the device list of
274*4882a593Smuzhiyun * each root port to save the register offsets and replace the
275*4882a593Smuzhiyun * bus ops.
276*4882a593Smuzhiyun */
277*4882a593Smuzhiyun list_for_each_entry(dev, &pbus->devices, bus_list)
278*4882a593Smuzhiyun /* There are 0 to 8 devices attached to this bus */
279*4882a593Smuzhiyun quirk_aspm_offset[GET_INDEX(pdev->device, dev->devfn)] =
280*4882a593Smuzhiyun dev->pcie_cap + PCI_EXP_LNKCTL;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun pci_bus_set_ops(pbus, &quirk_pcie_aspm_ops);
283*4882a593Smuzhiyun dev_info(&pbus->dev, "writes to ASPM control bits will be ignored\n");
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PA, pcie_rootport_aspm_quirk);
288*4882a593Smuzhiyun DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PA1, pcie_rootport_aspm_quirk);
289*4882a593Smuzhiyun DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PB, pcie_rootport_aspm_quirk);
290*4882a593Smuzhiyun DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PB1, pcie_rootport_aspm_quirk);
291*4882a593Smuzhiyun DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PC, pcie_rootport_aspm_quirk);
292*4882a593Smuzhiyun DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PC1, pcie_rootport_aspm_quirk);
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun /*
295*4882a593Smuzhiyun * Fixup to mark boot BIOS video selected by BIOS before it changes
296*4882a593Smuzhiyun *
297*4882a593Smuzhiyun * From information provided by "Jon Smirl" <jonsmirl@gmail.com>
298*4882a593Smuzhiyun *
299*4882a593Smuzhiyun * The standard boot ROM sequence for an x86 machine uses the BIOS
300*4882a593Smuzhiyun * to select an initial video card for boot display. This boot video
301*4882a593Smuzhiyun * card will have its BIOS copied to 0xC0000 in system RAM.
302*4882a593Smuzhiyun * IORESOURCE_ROM_SHADOW is used to associate the boot video
303*4882a593Smuzhiyun * card with this copy. On laptops this copy has to be used since
304*4882a593Smuzhiyun * the main ROM may be compressed or combined with another image.
305*4882a593Smuzhiyun * See pci_map_rom() for use of this flag. Before marking the device
306*4882a593Smuzhiyun * with IORESOURCE_ROM_SHADOW check if a vga_default_device is already set
307*4882a593Smuzhiyun * by either arch code or vga-arbitration; if so only apply the fixup to this
308*4882a593Smuzhiyun * already-determined primary video card.
309*4882a593Smuzhiyun */
310*4882a593Smuzhiyun
pci_fixup_video(struct pci_dev * pdev)311*4882a593Smuzhiyun static void pci_fixup_video(struct pci_dev *pdev)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun struct pci_dev *bridge;
314*4882a593Smuzhiyun struct pci_bus *bus;
315*4882a593Smuzhiyun u16 config;
316*4882a593Smuzhiyun struct resource *res;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun /* Is VGA routed to us? */
319*4882a593Smuzhiyun bus = pdev->bus;
320*4882a593Smuzhiyun while (bus) {
321*4882a593Smuzhiyun bridge = bus->self;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun /*
324*4882a593Smuzhiyun * From information provided by
325*4882a593Smuzhiyun * "David Miller" <davem@davemloft.net>
326*4882a593Smuzhiyun * The bridge control register is valid for PCI header
327*4882a593Smuzhiyun * type BRIDGE, or CARDBUS. Host to PCI controllers use
328*4882a593Smuzhiyun * PCI header type NORMAL.
329*4882a593Smuzhiyun */
330*4882a593Smuzhiyun if (bridge && (pci_is_bridge(bridge))) {
331*4882a593Smuzhiyun pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
332*4882a593Smuzhiyun &config);
333*4882a593Smuzhiyun if (!(config & PCI_BRIDGE_CTL_VGA))
334*4882a593Smuzhiyun return;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun bus = bus->parent;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun if (!vga_default_device() || pdev == vga_default_device()) {
339*4882a593Smuzhiyun pci_read_config_word(pdev, PCI_COMMAND, &config);
340*4882a593Smuzhiyun if (config & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
341*4882a593Smuzhiyun res = &pdev->resource[PCI_ROM_RESOURCE];
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun pci_disable_rom(pdev);
344*4882a593Smuzhiyun if (res->parent)
345*4882a593Smuzhiyun release_resource(res);
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun res->start = 0xC0000;
348*4882a593Smuzhiyun res->end = res->start + 0x20000 - 1;
349*4882a593Smuzhiyun res->flags = IORESOURCE_MEM | IORESOURCE_ROM_SHADOW |
350*4882a593Smuzhiyun IORESOURCE_PCI_FIXED;
351*4882a593Smuzhiyun dev_info(&pdev->dev, "Video device with shadowed ROM at %pR\n",
352*4882a593Smuzhiyun res);
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
357*4882a593Smuzhiyun PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_video);
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun static const struct dmi_system_id msi_k8t_dmi_table[] = {
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun .ident = "MSI-K8T-Neo2Fir",
363*4882a593Smuzhiyun .matches = {
364*4882a593Smuzhiyun DMI_MATCH(DMI_SYS_VENDOR, "MSI"),
365*4882a593Smuzhiyun DMI_MATCH(DMI_PRODUCT_NAME, "MS-6702E"),
366*4882a593Smuzhiyun },
367*4882a593Smuzhiyun },
368*4882a593Smuzhiyun {}
369*4882a593Smuzhiyun };
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun /*
372*4882a593Smuzhiyun * The AMD-Athlon64 board MSI "K8T Neo2-FIR" disables the onboard sound
373*4882a593Smuzhiyun * card if a PCI-soundcard is added.
374*4882a593Smuzhiyun *
375*4882a593Smuzhiyun * The BIOS only gives options "DISABLED" and "AUTO". This code sets
376*4882a593Smuzhiyun * the corresponding register-value to enable the soundcard.
377*4882a593Smuzhiyun *
378*4882a593Smuzhiyun * The soundcard is only enabled, if the mainborad is identified
379*4882a593Smuzhiyun * via DMI-tables and the soundcard is detected to be off.
380*4882a593Smuzhiyun */
pci_fixup_msi_k8t_onboard_sound(struct pci_dev * dev)381*4882a593Smuzhiyun static void pci_fixup_msi_k8t_onboard_sound(struct pci_dev *dev)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun unsigned char val;
384*4882a593Smuzhiyun if (!dmi_check_system(msi_k8t_dmi_table))
385*4882a593Smuzhiyun return; /* only applies to MSI K8T Neo2-FIR */
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun pci_read_config_byte(dev, 0x50, &val);
388*4882a593Smuzhiyun if (val & 0x40) {
389*4882a593Smuzhiyun pci_write_config_byte(dev, 0x50, val & (~0x40));
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun /* verify the change for status output */
392*4882a593Smuzhiyun pci_read_config_byte(dev, 0x50, &val);
393*4882a593Smuzhiyun if (val & 0x40)
394*4882a593Smuzhiyun dev_info(&dev->dev, "Detected MSI K8T Neo2-FIR; "
395*4882a593Smuzhiyun "can't enable onboard soundcard!\n");
396*4882a593Smuzhiyun else
397*4882a593Smuzhiyun dev_info(&dev->dev, "Detected MSI K8T Neo2-FIR; "
398*4882a593Smuzhiyun "enabled onboard soundcard\n");
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
402*4882a593Smuzhiyun pci_fixup_msi_k8t_onboard_sound);
403*4882a593Smuzhiyun DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
404*4882a593Smuzhiyun pci_fixup_msi_k8t_onboard_sound);
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun /*
407*4882a593Smuzhiyun * Some Toshiba laptops need extra code to enable their TI TSB43AB22/A.
408*4882a593Smuzhiyun *
409*4882a593Smuzhiyun * We pretend to bring them out of full D3 state, and restore the proper
410*4882a593Smuzhiyun * IRQ, PCI cache line size, and BARs, otherwise the device won't function
411*4882a593Smuzhiyun * properly. In some cases, the device will generate an interrupt on
412*4882a593Smuzhiyun * the wrong IRQ line, causing any devices sharing the line it's
413*4882a593Smuzhiyun * *supposed* to use to be disabled by the kernel's IRQ debug code.
414*4882a593Smuzhiyun */
415*4882a593Smuzhiyun static u16 toshiba_line_size;
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun static const struct dmi_system_id toshiba_ohci1394_dmi_table[] = {
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun .ident = "Toshiba PS5 based laptop",
420*4882a593Smuzhiyun .matches = {
421*4882a593Smuzhiyun DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
422*4882a593Smuzhiyun DMI_MATCH(DMI_PRODUCT_VERSION, "PS5"),
423*4882a593Smuzhiyun },
424*4882a593Smuzhiyun },
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun .ident = "Toshiba PSM4 based laptop",
427*4882a593Smuzhiyun .matches = {
428*4882a593Smuzhiyun DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
429*4882a593Smuzhiyun DMI_MATCH(DMI_PRODUCT_VERSION, "PSM4"),
430*4882a593Smuzhiyun },
431*4882a593Smuzhiyun },
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun .ident = "Toshiba A40 based laptop",
434*4882a593Smuzhiyun .matches = {
435*4882a593Smuzhiyun DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
436*4882a593Smuzhiyun DMI_MATCH(DMI_PRODUCT_VERSION, "PSA40U"),
437*4882a593Smuzhiyun },
438*4882a593Smuzhiyun },
439*4882a593Smuzhiyun { }
440*4882a593Smuzhiyun };
441*4882a593Smuzhiyun
pci_pre_fixup_toshiba_ohci1394(struct pci_dev * dev)442*4882a593Smuzhiyun static void pci_pre_fixup_toshiba_ohci1394(struct pci_dev *dev)
443*4882a593Smuzhiyun {
444*4882a593Smuzhiyun if (!dmi_check_system(toshiba_ohci1394_dmi_table))
445*4882a593Smuzhiyun return; /* only applies to certain Toshibas (so far) */
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun dev->current_state = PCI_D3cold;
448*4882a593Smuzhiyun pci_read_config_word(dev, PCI_CACHE_LINE_SIZE, &toshiba_line_size);
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, 0x8032,
451*4882a593Smuzhiyun pci_pre_fixup_toshiba_ohci1394);
452*4882a593Smuzhiyun
pci_post_fixup_toshiba_ohci1394(struct pci_dev * dev)453*4882a593Smuzhiyun static void pci_post_fixup_toshiba_ohci1394(struct pci_dev *dev)
454*4882a593Smuzhiyun {
455*4882a593Smuzhiyun if (!dmi_check_system(toshiba_ohci1394_dmi_table))
456*4882a593Smuzhiyun return; /* only applies to certain Toshibas (so far) */
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun /* Restore config space on Toshiba laptops */
459*4882a593Smuzhiyun pci_write_config_word(dev, PCI_CACHE_LINE_SIZE, toshiba_line_size);
460*4882a593Smuzhiyun pci_read_config_byte(dev, PCI_INTERRUPT_LINE, (u8 *)&dev->irq);
461*4882a593Smuzhiyun pci_write_config_dword(dev, PCI_BASE_ADDRESS_0,
462*4882a593Smuzhiyun pci_resource_start(dev, 0));
463*4882a593Smuzhiyun pci_write_config_dword(dev, PCI_BASE_ADDRESS_1,
464*4882a593Smuzhiyun pci_resource_start(dev, 1));
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_TI, 0x8032,
467*4882a593Smuzhiyun pci_post_fixup_toshiba_ohci1394);
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun /*
471*4882a593Smuzhiyun * Prevent the BIOS trapping accesses to the Cyrix CS5530A video device
472*4882a593Smuzhiyun * configuration space.
473*4882a593Smuzhiyun */
pci_early_fixup_cyrix_5530(struct pci_dev * dev)474*4882a593Smuzhiyun static void pci_early_fixup_cyrix_5530(struct pci_dev *dev)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun u8 r;
477*4882a593Smuzhiyun /* clear 'F4 Video Configuration Trap' bit */
478*4882a593Smuzhiyun pci_read_config_byte(dev, 0x42, &r);
479*4882a593Smuzhiyun r &= 0xfd;
480*4882a593Smuzhiyun pci_write_config_byte(dev, 0x42, r);
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY,
483*4882a593Smuzhiyun pci_early_fixup_cyrix_5530);
484*4882a593Smuzhiyun DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY,
485*4882a593Smuzhiyun pci_early_fixup_cyrix_5530);
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun /*
488*4882a593Smuzhiyun * Siemens Nixdorf AG FSC Multiprocessor Interrupt Controller:
489*4882a593Smuzhiyun * prevent update of the BAR0, which doesn't look like a normal BAR.
490*4882a593Smuzhiyun */
pci_siemens_interrupt_controller(struct pci_dev * dev)491*4882a593Smuzhiyun static void pci_siemens_interrupt_controller(struct pci_dev *dev)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun dev->resource[0].flags |= IORESOURCE_PCI_FIXED;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SIEMENS, 0x0015,
496*4882a593Smuzhiyun pci_siemens_interrupt_controller);
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun /*
499*4882a593Smuzhiyun * SB600: Disable BAR1 on device 14.0 to avoid HPET resources from
500*4882a593Smuzhiyun * confusing the PCI engine:
501*4882a593Smuzhiyun */
sb600_disable_hpet_bar(struct pci_dev * dev)502*4882a593Smuzhiyun static void sb600_disable_hpet_bar(struct pci_dev *dev)
503*4882a593Smuzhiyun {
504*4882a593Smuzhiyun u8 val;
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun /*
507*4882a593Smuzhiyun * The SB600 and SB700 both share the same device
508*4882a593Smuzhiyun * ID, but the PM register 0x55 does something different
509*4882a593Smuzhiyun * for the SB700, so make sure we are dealing with the
510*4882a593Smuzhiyun * SB600 before touching the bit:
511*4882a593Smuzhiyun */
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun pci_read_config_byte(dev, 0x08, &val);
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun if (val < 0x2F) {
516*4882a593Smuzhiyun outb(0x55, 0xCD6);
517*4882a593Smuzhiyun val = inb(0xCD7);
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun /* Set bit 7 in PM register 0x55 */
520*4882a593Smuzhiyun outb(0x55, 0xCD6);
521*4882a593Smuzhiyun outb(val | 0x80, 0xCD7);
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATI, 0x4385, sb600_disable_hpet_bar);
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun #ifdef CONFIG_HPET_TIMER
sb600_hpet_quirk(struct pci_dev * dev)527*4882a593Smuzhiyun static void sb600_hpet_quirk(struct pci_dev *dev)
528*4882a593Smuzhiyun {
529*4882a593Smuzhiyun struct resource *r = &dev->resource[1];
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun if (r->flags & IORESOURCE_MEM && r->start == hpet_address) {
532*4882a593Smuzhiyun r->flags |= IORESOURCE_PCI_FIXED;
533*4882a593Smuzhiyun dev_info(&dev->dev, "reg 0x14 contains HPET; making it immovable\n");
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, 0x4385, sb600_hpet_quirk);
537*4882a593Smuzhiyun #endif
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun /*
540*4882a593Smuzhiyun * Twinhead H12Y needs us to block out a region otherwise we map devices
541*4882a593Smuzhiyun * there and any access kills the box.
542*4882a593Smuzhiyun *
543*4882a593Smuzhiyun * See: https://bugzilla.kernel.org/show_bug.cgi?id=10231
544*4882a593Smuzhiyun *
545*4882a593Smuzhiyun * Match off the LPC and svid/sdid (older kernels lose the bridge subvendor)
546*4882a593Smuzhiyun */
twinhead_reserve_killing_zone(struct pci_dev * dev)547*4882a593Smuzhiyun static void twinhead_reserve_killing_zone(struct pci_dev *dev)
548*4882a593Smuzhiyun {
549*4882a593Smuzhiyun if (dev->subsystem_vendor == 0x14FF && dev->subsystem_device == 0xA003) {
550*4882a593Smuzhiyun pr_info("Reserving memory on Twinhead H12Y\n");
551*4882a593Smuzhiyun request_mem_region(0xFFB00000, 0x100000, "twinhead");
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone);
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun /*
557*4882a593Smuzhiyun * Device [8086:2fc0]
558*4882a593Smuzhiyun * Erratum HSE43
559*4882a593Smuzhiyun * CONFIG_TDP_NOMINAL CSR Implemented at Incorrect Offset
560*4882a593Smuzhiyun * https://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v3-spec-update.html
561*4882a593Smuzhiyun *
562*4882a593Smuzhiyun * Devices [8086:6f60,6fa0,6fc0]
563*4882a593Smuzhiyun * Erratum BDF2
564*4882a593Smuzhiyun * PCI BARs in the Home Agent Will Return Non-Zero Values During Enumeration
565*4882a593Smuzhiyun * https://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v4-spec-update.html
566*4882a593Smuzhiyun */
pci_invalid_bar(struct pci_dev * dev)567*4882a593Smuzhiyun static void pci_invalid_bar(struct pci_dev *dev)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun dev->non_compliant_bars = 1;
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, pci_invalid_bar);
572*4882a593Smuzhiyun DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar);
573*4882a593Smuzhiyun DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar);
574*4882a593Smuzhiyun DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar);
575*4882a593Smuzhiyun DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ec, pci_invalid_bar);
576*4882a593Smuzhiyun DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ed, pci_invalid_bar);
577*4882a593Smuzhiyun DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26c, pci_invalid_bar);
578*4882a593Smuzhiyun DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26d, pci_invalid_bar);
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun /*
581*4882a593Smuzhiyun * Device [1022:7808]
582*4882a593Smuzhiyun * 23. USB Wake on Connect/Disconnect with Low Speed Devices
583*4882a593Smuzhiyun * https://support.amd.com/TechDocs/46837.pdf
584*4882a593Smuzhiyun * Appendix A2
585*4882a593Smuzhiyun * https://support.amd.com/TechDocs/42413.pdf
586*4882a593Smuzhiyun */
pci_fixup_amd_ehci_pme(struct pci_dev * dev)587*4882a593Smuzhiyun static void pci_fixup_amd_ehci_pme(struct pci_dev *dev)
588*4882a593Smuzhiyun {
589*4882a593Smuzhiyun dev_info(&dev->dev, "PME# does not work under D3, disabling it\n");
590*4882a593Smuzhiyun dev->pme_support &= ~((PCI_PM_CAP_PME_D3hot | PCI_PM_CAP_PME_D3cold)
591*4882a593Smuzhiyun >> PCI_PM_CAP_PME_SHIFT);
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x7808, pci_fixup_amd_ehci_pme);
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun /*
596*4882a593Smuzhiyun * Device [1022:7914]
597*4882a593Smuzhiyun * When in D0, PME# doesn't get asserted when plugging USB 2.0 device.
598*4882a593Smuzhiyun */
pci_fixup_amd_fch_xhci_pme(struct pci_dev * dev)599*4882a593Smuzhiyun static void pci_fixup_amd_fch_xhci_pme(struct pci_dev *dev)
600*4882a593Smuzhiyun {
601*4882a593Smuzhiyun dev_info(&dev->dev, "PME# does not work under D0, disabling it\n");
602*4882a593Smuzhiyun dev->pme_support &= ~(PCI_PM_CAP_PME_D0 >> PCI_PM_CAP_PME_SHIFT);
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x7914, pci_fixup_amd_fch_xhci_pme);
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun /*
607*4882a593Smuzhiyun * Apple MacBook Pro: Avoid [mem 0x7fa00000-0x7fbfffff]
608*4882a593Smuzhiyun *
609*4882a593Smuzhiyun * Using the [mem 0x7fa00000-0x7fbfffff] region, e.g., by assigning it to
610*4882a593Smuzhiyun * the 00:1c.0 Root Port, causes a conflict with [io 0x1804], which is used
611*4882a593Smuzhiyun * for soft poweroff and suspend-to-RAM.
612*4882a593Smuzhiyun *
613*4882a593Smuzhiyun * As far as we know, this is related to the address space, not to the Root
614*4882a593Smuzhiyun * Port itself. Attaching the quirk to the Root Port is a convenience, but
615*4882a593Smuzhiyun * it could probably also be a standalone DMI quirk.
616*4882a593Smuzhiyun *
617*4882a593Smuzhiyun * https://bugzilla.kernel.org/show_bug.cgi?id=103211
618*4882a593Smuzhiyun */
quirk_apple_mbp_poweroff(struct pci_dev * pdev)619*4882a593Smuzhiyun static void quirk_apple_mbp_poweroff(struct pci_dev *pdev)
620*4882a593Smuzhiyun {
621*4882a593Smuzhiyun struct device *dev = &pdev->dev;
622*4882a593Smuzhiyun struct resource *res;
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun if ((!dmi_match(DMI_PRODUCT_NAME, "MacBookPro11,4") &&
625*4882a593Smuzhiyun !dmi_match(DMI_PRODUCT_NAME, "MacBookPro11,5")) ||
626*4882a593Smuzhiyun pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x1c, 0))
627*4882a593Smuzhiyun return;
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun res = request_mem_region(0x7fa00000, 0x200000,
630*4882a593Smuzhiyun "MacBook Pro poweroff workaround");
631*4882a593Smuzhiyun if (res)
632*4882a593Smuzhiyun dev_info(dev, "claimed %s %pR\n", res->name, res);
633*4882a593Smuzhiyun else
634*4882a593Smuzhiyun dev_info(dev, "can't work around MacBook Pro poweroff issue\n");
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8c10, quirk_apple_mbp_poweroff);
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun /*
639*4882a593Smuzhiyun * VMD-enabled root ports will change the source ID for all messages
640*4882a593Smuzhiyun * to the VMD device. Rather than doing device matching with the source
641*4882a593Smuzhiyun * ID, the AER driver should traverse the child device tree, reading
642*4882a593Smuzhiyun * AER registers to find the faulting device.
643*4882a593Smuzhiyun */
quirk_no_aersid(struct pci_dev * pdev)644*4882a593Smuzhiyun static void quirk_no_aersid(struct pci_dev *pdev)
645*4882a593Smuzhiyun {
646*4882a593Smuzhiyun /* VMD Domain */
647*4882a593Smuzhiyun if (is_vmd(pdev->bus) && pci_is_root_bus(pdev->bus))
648*4882a593Smuzhiyun pdev->bus->bus_flags |= PCI_BUS_FLAGS_NO_AERSID;
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
651*4882a593Smuzhiyun PCI_CLASS_BRIDGE_PCI, 8, quirk_no_aersid);
652*4882a593Smuzhiyun
quirk_intel_th_dnv(struct pci_dev * dev)653*4882a593Smuzhiyun static void quirk_intel_th_dnv(struct pci_dev *dev)
654*4882a593Smuzhiyun {
655*4882a593Smuzhiyun struct resource *r = &dev->resource[4];
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun /*
658*4882a593Smuzhiyun * Denverton reports 2k of RTIT_BAR (intel_th resource 4), which
659*4882a593Smuzhiyun * appears to be 4 MB in reality.
660*4882a593Smuzhiyun */
661*4882a593Smuzhiyun if (r->end == r->start + 0x7ff) {
662*4882a593Smuzhiyun r->start = 0;
663*4882a593Smuzhiyun r->end = 0x3fffff;
664*4882a593Smuzhiyun r->flags |= IORESOURCE_UNSET;
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x19e1, quirk_intel_th_dnv);
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun #ifdef CONFIG_PHYS_ADDR_T_64BIT
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun #define AMD_141b_MMIO_BASE(x) (0x80 + (x) * 0x8)
672*4882a593Smuzhiyun #define AMD_141b_MMIO_BASE_RE_MASK BIT(0)
673*4882a593Smuzhiyun #define AMD_141b_MMIO_BASE_WE_MASK BIT(1)
674*4882a593Smuzhiyun #define AMD_141b_MMIO_BASE_MMIOBASE_MASK GENMASK(31,8)
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun #define AMD_141b_MMIO_LIMIT(x) (0x84 + (x) * 0x8)
677*4882a593Smuzhiyun #define AMD_141b_MMIO_LIMIT_MMIOLIMIT_MASK GENMASK(31,8)
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun #define AMD_141b_MMIO_HIGH(x) (0x180 + (x) * 0x4)
680*4882a593Smuzhiyun #define AMD_141b_MMIO_HIGH_MMIOBASE_MASK GENMASK(7,0)
681*4882a593Smuzhiyun #define AMD_141b_MMIO_HIGH_MMIOLIMIT_SHIFT 16
682*4882a593Smuzhiyun #define AMD_141b_MMIO_HIGH_MMIOLIMIT_MASK GENMASK(23,16)
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun /*
685*4882a593Smuzhiyun * The PCI Firmware Spec, rev 3.2, notes that ACPI should optionally allow
686*4882a593Smuzhiyun * configuring host bridge windows using the _PRS and _SRS methods.
687*4882a593Smuzhiyun *
688*4882a593Smuzhiyun * But this is rarely implemented, so we manually enable a large 64bit BAR for
689*4882a593Smuzhiyun * PCIe device on AMD Family 15h (Models 00h-1fh, 30h-3fh, 60h-7fh) Processors
690*4882a593Smuzhiyun * here.
691*4882a593Smuzhiyun */
pci_amd_enable_64bit_bar(struct pci_dev * dev)692*4882a593Smuzhiyun static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
693*4882a593Smuzhiyun {
694*4882a593Smuzhiyun static const char *name = "PCI Bus 0000:00";
695*4882a593Smuzhiyun struct resource *res, *conflict;
696*4882a593Smuzhiyun u32 base, limit, high;
697*4882a593Smuzhiyun struct pci_dev *other;
698*4882a593Smuzhiyun unsigned i;
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun if (!(pci_probe & PCI_BIG_ROOT_WINDOW))
701*4882a593Smuzhiyun return;
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun /* Check that we are the only device of that type */
704*4882a593Smuzhiyun other = pci_get_device(dev->vendor, dev->device, NULL);
705*4882a593Smuzhiyun if (other != dev ||
706*4882a593Smuzhiyun (other = pci_get_device(dev->vendor, dev->device, other))) {
707*4882a593Smuzhiyun /* This is a multi-socket system, don't touch it for now */
708*4882a593Smuzhiyun pci_dev_put(other);
709*4882a593Smuzhiyun return;
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun for (i = 0; i < 8; i++) {
713*4882a593Smuzhiyun pci_read_config_dword(dev, AMD_141b_MMIO_BASE(i), &base);
714*4882a593Smuzhiyun pci_read_config_dword(dev, AMD_141b_MMIO_HIGH(i), &high);
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun /* Is this slot free? */
717*4882a593Smuzhiyun if (!(base & (AMD_141b_MMIO_BASE_RE_MASK |
718*4882a593Smuzhiyun AMD_141b_MMIO_BASE_WE_MASK)))
719*4882a593Smuzhiyun break;
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun base >>= 8;
722*4882a593Smuzhiyun base |= high << 24;
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun /* Abort if a slot already configures a 64bit BAR. */
725*4882a593Smuzhiyun if (base > 0x10000)
726*4882a593Smuzhiyun return;
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun if (i == 8)
729*4882a593Smuzhiyun return;
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun res = kzalloc(sizeof(*res), GFP_KERNEL);
732*4882a593Smuzhiyun if (!res)
733*4882a593Smuzhiyun return;
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun /*
736*4882a593Smuzhiyun * Allocate a 256GB window directly below the 0xfd00000000 hardware
737*4882a593Smuzhiyun * limit (see AMD Family 15h Models 30h-3Fh BKDG, sec 2.4.6).
738*4882a593Smuzhiyun */
739*4882a593Smuzhiyun res->name = name;
740*4882a593Smuzhiyun res->flags = IORESOURCE_PREFETCH | IORESOURCE_MEM |
741*4882a593Smuzhiyun IORESOURCE_MEM_64 | IORESOURCE_WINDOW;
742*4882a593Smuzhiyun res->start = 0xbd00000000ull;
743*4882a593Smuzhiyun res->end = 0xfd00000000ull - 1;
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun conflict = request_resource_conflict(&iomem_resource, res);
746*4882a593Smuzhiyun if (conflict) {
747*4882a593Smuzhiyun kfree(res);
748*4882a593Smuzhiyun if (conflict->name != name)
749*4882a593Smuzhiyun return;
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun /* We are resuming from suspend; just reenable the window */
752*4882a593Smuzhiyun res = conflict;
753*4882a593Smuzhiyun } else {
754*4882a593Smuzhiyun dev_info(&dev->dev, "adding root bus resource %pR (tainting kernel)\n",
755*4882a593Smuzhiyun res);
756*4882a593Smuzhiyun add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
757*4882a593Smuzhiyun pci_bus_add_resource(dev->bus, res, 0);
758*4882a593Smuzhiyun }
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun base = ((res->start >> 8) & AMD_141b_MMIO_BASE_MMIOBASE_MASK) |
761*4882a593Smuzhiyun AMD_141b_MMIO_BASE_RE_MASK | AMD_141b_MMIO_BASE_WE_MASK;
762*4882a593Smuzhiyun limit = ((res->end + 1) >> 8) & AMD_141b_MMIO_LIMIT_MMIOLIMIT_MASK;
763*4882a593Smuzhiyun high = ((res->start >> 40) & AMD_141b_MMIO_HIGH_MMIOBASE_MASK) |
764*4882a593Smuzhiyun ((((res->end + 1) >> 40) << AMD_141b_MMIO_HIGH_MMIOLIMIT_SHIFT)
765*4882a593Smuzhiyun & AMD_141b_MMIO_HIGH_MMIOLIMIT_MASK);
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun pci_write_config_dword(dev, AMD_141b_MMIO_HIGH(i), high);
768*4882a593Smuzhiyun pci_write_config_dword(dev, AMD_141b_MMIO_LIMIT(i), limit);
769*4882a593Smuzhiyun pci_write_config_dword(dev, AMD_141b_MMIO_BASE(i), base);
770*4882a593Smuzhiyun }
771*4882a593Smuzhiyun DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1401, pci_amd_enable_64bit_bar);
772*4882a593Smuzhiyun DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar);
773*4882a593Smuzhiyun DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
774*4882a593Smuzhiyun DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
775*4882a593Smuzhiyun DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
776*4882a593Smuzhiyun DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1401, pci_amd_enable_64bit_bar);
777*4882a593Smuzhiyun DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar);
778*4882a593Smuzhiyun DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
779*4882a593Smuzhiyun DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
780*4882a593Smuzhiyun DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun #define RS690_LOWER_TOP_OF_DRAM2 0x30
783*4882a593Smuzhiyun #define RS690_LOWER_TOP_OF_DRAM2_VALID 0x1
784*4882a593Smuzhiyun #define RS690_UPPER_TOP_OF_DRAM2 0x31
785*4882a593Smuzhiyun #define RS690_HTIU_NB_INDEX 0xA8
786*4882a593Smuzhiyun #define RS690_HTIU_NB_INDEX_WR_ENABLE 0x100
787*4882a593Smuzhiyun #define RS690_HTIU_NB_DATA 0xAC
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun /*
790*4882a593Smuzhiyun * Some BIOS implementations support RAM above 4GB, but do not configure the
791*4882a593Smuzhiyun * PCI host to respond to bus master accesses for these addresses. These
792*4882a593Smuzhiyun * implementations set the TOP_OF_DRAM_SLOT1 register correctly, so PCI DMA
793*4882a593Smuzhiyun * works as expected for addresses below 4GB.
794*4882a593Smuzhiyun *
795*4882a593Smuzhiyun * Reference: "AMD RS690 ASIC Family Register Reference Guide" (pg. 2-57)
796*4882a593Smuzhiyun * https://www.amd.com/system/files/TechDocs/43372_rs690_rrg_3.00o.pdf
797*4882a593Smuzhiyun */
rs690_fix_64bit_dma(struct pci_dev * pdev)798*4882a593Smuzhiyun static void rs690_fix_64bit_dma(struct pci_dev *pdev)
799*4882a593Smuzhiyun {
800*4882a593Smuzhiyun u32 val = 0;
801*4882a593Smuzhiyun phys_addr_t top_of_dram = __pa(high_memory - 1) + 1;
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun if (top_of_dram <= (1ULL << 32))
804*4882a593Smuzhiyun return;
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
807*4882a593Smuzhiyun RS690_LOWER_TOP_OF_DRAM2);
808*4882a593Smuzhiyun pci_read_config_dword(pdev, RS690_HTIU_NB_DATA, &val);
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun if (val)
811*4882a593Smuzhiyun return;
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun pci_info(pdev, "Adjusting top of DRAM to %pa for 64-bit DMA support\n", &top_of_dram);
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
816*4882a593Smuzhiyun RS690_UPPER_TOP_OF_DRAM2 | RS690_HTIU_NB_INDEX_WR_ENABLE);
817*4882a593Smuzhiyun pci_write_config_dword(pdev, RS690_HTIU_NB_DATA, top_of_dram >> 32);
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
820*4882a593Smuzhiyun RS690_LOWER_TOP_OF_DRAM2 | RS690_HTIU_NB_INDEX_WR_ENABLE);
821*4882a593Smuzhiyun pci_write_config_dword(pdev, RS690_HTIU_NB_DATA,
822*4882a593Smuzhiyun top_of_dram | RS690_LOWER_TOP_OF_DRAM2_VALID);
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7910, rs690_fix_64bit_dma);
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun #endif
827