xref: /OK3568_Linux_fs/kernel/drivers/usb/host/pci-quirks.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * This file contains code to reset and initialize USB host controllers.
4*4882a593Smuzhiyun  * Some of it includes work-arounds for PCI hardware and BIOS quirks.
5*4882a593Smuzhiyun  * It may need to run early during booting -- before USB would normally
6*4882a593Smuzhiyun  * initialize -- to ensure that Linux doesn't use any legacy modes.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  *  Copyright (c) 1999 Martin Mares <mj@ucw.cz>
9*4882a593Smuzhiyun  *  (and others)
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/types.h>
13*4882a593Smuzhiyun #include <linux/kernel.h>
14*4882a593Smuzhiyun #include <linux/pci.h>
15*4882a593Smuzhiyun #include <linux/delay.h>
16*4882a593Smuzhiyun #include <linux/export.h>
17*4882a593Smuzhiyun #include <linux/acpi.h>
18*4882a593Smuzhiyun #include <linux/dmi.h>
19*4882a593Smuzhiyun #include <linux/of.h>
20*4882a593Smuzhiyun #include <linux/iopoll.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #include "pci-quirks.h"
23*4882a593Smuzhiyun #include "xhci-ext-caps.h"
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #define UHCI_USBLEGSUP		0xc0		/* legacy support */
27*4882a593Smuzhiyun #define UHCI_USBCMD		0		/* command register */
28*4882a593Smuzhiyun #define UHCI_USBINTR		4		/* interrupt register */
29*4882a593Smuzhiyun #define UHCI_USBLEGSUP_RWC	0x8f00		/* the R/WC bits */
30*4882a593Smuzhiyun #define UHCI_USBLEGSUP_RO	0x5040		/* R/O and reserved bits */
31*4882a593Smuzhiyun #define UHCI_USBCMD_RUN		0x0001		/* RUN/STOP bit */
32*4882a593Smuzhiyun #define UHCI_USBCMD_HCRESET	0x0002		/* Host Controller reset */
33*4882a593Smuzhiyun #define UHCI_USBCMD_EGSM	0x0008		/* Global Suspend Mode */
34*4882a593Smuzhiyun #define UHCI_USBCMD_CONFIGURE	0x0040		/* Config Flag */
35*4882a593Smuzhiyun #define UHCI_USBINTR_RESUME	0x0002		/* Resume interrupt enable */
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #define OHCI_CONTROL		0x04
38*4882a593Smuzhiyun #define OHCI_CMDSTATUS		0x08
39*4882a593Smuzhiyun #define OHCI_INTRSTATUS		0x0c
40*4882a593Smuzhiyun #define OHCI_INTRENABLE		0x10
41*4882a593Smuzhiyun #define OHCI_INTRDISABLE	0x14
42*4882a593Smuzhiyun #define OHCI_FMINTERVAL		0x34
43*4882a593Smuzhiyun #define OHCI_HCFS		(3 << 6)	/* hc functional state */
44*4882a593Smuzhiyun #define OHCI_HCR		(1 << 0)	/* host controller reset */
45*4882a593Smuzhiyun #define OHCI_OCR		(1 << 3)	/* ownership change request */
46*4882a593Smuzhiyun #define OHCI_CTRL_RWC		(1 << 9)	/* remote wakeup connected */
47*4882a593Smuzhiyun #define OHCI_CTRL_IR		(1 << 8)	/* interrupt routing */
48*4882a593Smuzhiyun #define OHCI_INTR_OC		(1 << 30)	/* ownership change */
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun #define EHCI_HCC_PARAMS		0x08		/* extended capabilities */
51*4882a593Smuzhiyun #define EHCI_USBCMD		0		/* command register */
52*4882a593Smuzhiyun #define EHCI_USBCMD_RUN		(1 << 0)	/* RUN/STOP bit */
53*4882a593Smuzhiyun #define EHCI_USBSTS		4		/* status register */
54*4882a593Smuzhiyun #define EHCI_USBSTS_HALTED	(1 << 12)	/* HCHalted bit */
55*4882a593Smuzhiyun #define EHCI_USBINTR		8		/* interrupt register */
56*4882a593Smuzhiyun #define EHCI_CONFIGFLAG		0x40		/* configured flag register */
57*4882a593Smuzhiyun #define EHCI_USBLEGSUP		0		/* legacy support register */
58*4882a593Smuzhiyun #define EHCI_USBLEGSUP_BIOS	(1 << 16)	/* BIOS semaphore */
59*4882a593Smuzhiyun #define EHCI_USBLEGSUP_OS	(1 << 24)	/* OS semaphore */
60*4882a593Smuzhiyun #define EHCI_USBLEGCTLSTS	4		/* legacy control/status */
61*4882a593Smuzhiyun #define EHCI_USBLEGCTLSTS_SOOE	(1 << 13)	/* SMI on ownership change */
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun /* AMD quirk use */
64*4882a593Smuzhiyun #define	AB_REG_BAR_LOW		0xe0
65*4882a593Smuzhiyun #define	AB_REG_BAR_HIGH		0xe1
66*4882a593Smuzhiyun #define	AB_REG_BAR_SB700	0xf0
67*4882a593Smuzhiyun #define	AB_INDX(addr)		((addr) + 0x00)
68*4882a593Smuzhiyun #define	AB_DATA(addr)		((addr) + 0x04)
69*4882a593Smuzhiyun #define	AX_INDXC		0x30
70*4882a593Smuzhiyun #define	AX_DATAC		0x34
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun #define PT_ADDR_INDX		0xE8
73*4882a593Smuzhiyun #define PT_READ_INDX		0xE4
74*4882a593Smuzhiyun #define PT_SIG_1_ADDR		0xA520
75*4882a593Smuzhiyun #define PT_SIG_2_ADDR		0xA521
76*4882a593Smuzhiyun #define PT_SIG_3_ADDR		0xA522
77*4882a593Smuzhiyun #define PT_SIG_4_ADDR		0xA523
78*4882a593Smuzhiyun #define PT_SIG_1_DATA		0x78
79*4882a593Smuzhiyun #define PT_SIG_2_DATA		0x56
80*4882a593Smuzhiyun #define PT_SIG_3_DATA		0x34
81*4882a593Smuzhiyun #define PT_SIG_4_DATA		0x12
82*4882a593Smuzhiyun #define PT4_P1_REG		0xB521
83*4882a593Smuzhiyun #define PT4_P2_REG		0xB522
84*4882a593Smuzhiyun #define PT2_P1_REG		0xD520
85*4882a593Smuzhiyun #define PT2_P2_REG		0xD521
86*4882a593Smuzhiyun #define PT1_P1_REG		0xD522
87*4882a593Smuzhiyun #define PT1_P2_REG		0xD523
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun #define	NB_PCIE_INDX_ADDR	0xe0
90*4882a593Smuzhiyun #define	NB_PCIE_INDX_DATA	0xe4
91*4882a593Smuzhiyun #define	PCIE_P_CNTL		0x10040
92*4882a593Smuzhiyun #define	BIF_NB			0x10002
93*4882a593Smuzhiyun #define	NB_PIF0_PWRDOWN_0	0x01100012
94*4882a593Smuzhiyun #define	NB_PIF0_PWRDOWN_1	0x01100013
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun #define USB_INTEL_XUSB2PR      0xD0
97*4882a593Smuzhiyun #define USB_INTEL_USB2PRM      0xD4
98*4882a593Smuzhiyun #define USB_INTEL_USB3_PSSEN   0xD8
99*4882a593Smuzhiyun #define USB_INTEL_USB3PRM      0xDC
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun /* ASMEDIA quirk use */
102*4882a593Smuzhiyun #define ASMT_DATA_WRITE0_REG	0xF8
103*4882a593Smuzhiyun #define ASMT_DATA_WRITE1_REG	0xFC
104*4882a593Smuzhiyun #define ASMT_CONTROL_REG	0xE0
105*4882a593Smuzhiyun #define ASMT_CONTROL_WRITE_BIT	0x02
106*4882a593Smuzhiyun #define ASMT_WRITEREG_CMD	0x10423
107*4882a593Smuzhiyun #define ASMT_FLOWCTL_ADDR	0xFA30
108*4882a593Smuzhiyun #define ASMT_FLOWCTL_DATA	0xBA
109*4882a593Smuzhiyun #define ASMT_PSEUDO_DATA	0
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun /*
112*4882a593Smuzhiyun  * amd_chipset_gen values represent AMD different chipset generations
113*4882a593Smuzhiyun  */
114*4882a593Smuzhiyun enum amd_chipset_gen {
115*4882a593Smuzhiyun 	NOT_AMD_CHIPSET = 0,
116*4882a593Smuzhiyun 	AMD_CHIPSET_SB600,
117*4882a593Smuzhiyun 	AMD_CHIPSET_SB700,
118*4882a593Smuzhiyun 	AMD_CHIPSET_SB800,
119*4882a593Smuzhiyun 	AMD_CHIPSET_HUDSON2,
120*4882a593Smuzhiyun 	AMD_CHIPSET_BOLTON,
121*4882a593Smuzhiyun 	AMD_CHIPSET_YANGTZE,
122*4882a593Smuzhiyun 	AMD_CHIPSET_TAISHAN,
123*4882a593Smuzhiyun 	AMD_CHIPSET_UNKNOWN,
124*4882a593Smuzhiyun };
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun struct amd_chipset_type {
127*4882a593Smuzhiyun 	enum amd_chipset_gen gen;
128*4882a593Smuzhiyun 	u8 rev;
129*4882a593Smuzhiyun };
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun static struct amd_chipset_info {
132*4882a593Smuzhiyun 	struct pci_dev	*nb_dev;
133*4882a593Smuzhiyun 	struct pci_dev	*smbus_dev;
134*4882a593Smuzhiyun 	int nb_type;
135*4882a593Smuzhiyun 	struct amd_chipset_type sb_type;
136*4882a593Smuzhiyun 	int isoc_reqs;
137*4882a593Smuzhiyun 	int probe_count;
138*4882a593Smuzhiyun 	bool need_pll_quirk;
139*4882a593Smuzhiyun } amd_chipset;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun static DEFINE_SPINLOCK(amd_lock);
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun /*
144*4882a593Smuzhiyun  * amd_chipset_sb_type_init - initialize amd chipset southbridge type
145*4882a593Smuzhiyun  *
146*4882a593Smuzhiyun  * AMD FCH/SB generation and revision is identified by SMBus controller
147*4882a593Smuzhiyun  * vendor, device and revision IDs.
148*4882a593Smuzhiyun  *
149*4882a593Smuzhiyun  * Returns: 1 if it is an AMD chipset, 0 otherwise.
150*4882a593Smuzhiyun  */
amd_chipset_sb_type_init(struct amd_chipset_info * pinfo)151*4882a593Smuzhiyun static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun 	u8 rev = 0;
154*4882a593Smuzhiyun 	pinfo->sb_type.gen = AMD_CHIPSET_UNKNOWN;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI,
157*4882a593Smuzhiyun 			PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL);
158*4882a593Smuzhiyun 	if (pinfo->smbus_dev) {
159*4882a593Smuzhiyun 		rev = pinfo->smbus_dev->revision;
160*4882a593Smuzhiyun 		if (rev >= 0x10 && rev <= 0x1f)
161*4882a593Smuzhiyun 			pinfo->sb_type.gen = AMD_CHIPSET_SB600;
162*4882a593Smuzhiyun 		else if (rev >= 0x30 && rev <= 0x3f)
163*4882a593Smuzhiyun 			pinfo->sb_type.gen = AMD_CHIPSET_SB700;
164*4882a593Smuzhiyun 		else if (rev >= 0x40 && rev <= 0x4f)
165*4882a593Smuzhiyun 			pinfo->sb_type.gen = AMD_CHIPSET_SB800;
166*4882a593Smuzhiyun 	} else {
167*4882a593Smuzhiyun 		pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
168*4882a593Smuzhiyun 				PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 		if (pinfo->smbus_dev) {
171*4882a593Smuzhiyun 			rev = pinfo->smbus_dev->revision;
172*4882a593Smuzhiyun 			if (rev >= 0x11 && rev <= 0x14)
173*4882a593Smuzhiyun 				pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
174*4882a593Smuzhiyun 			else if (rev >= 0x15 && rev <= 0x18)
175*4882a593Smuzhiyun 				pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
176*4882a593Smuzhiyun 			else if (rev >= 0x39 && rev <= 0x3a)
177*4882a593Smuzhiyun 				pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
178*4882a593Smuzhiyun 		} else {
179*4882a593Smuzhiyun 			pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
180*4882a593Smuzhiyun 							  0x145c, NULL);
181*4882a593Smuzhiyun 			if (pinfo->smbus_dev) {
182*4882a593Smuzhiyun 				rev = pinfo->smbus_dev->revision;
183*4882a593Smuzhiyun 				pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;
184*4882a593Smuzhiyun 			} else {
185*4882a593Smuzhiyun 				pinfo->sb_type.gen = NOT_AMD_CHIPSET;
186*4882a593Smuzhiyun 				return 0;
187*4882a593Smuzhiyun 			}
188*4882a593Smuzhiyun 		}
189*4882a593Smuzhiyun 	}
190*4882a593Smuzhiyun 	pinfo->sb_type.rev = rev;
191*4882a593Smuzhiyun 	return 1;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun 
sb800_prefetch(struct device * dev,int on)194*4882a593Smuzhiyun void sb800_prefetch(struct device *dev, int on)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun 	u16 misc;
197*4882a593Smuzhiyun 	struct pci_dev *pdev = to_pci_dev(dev);
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	pci_read_config_word(pdev, 0x50, &misc);
200*4882a593Smuzhiyun 	if (on == 0)
201*4882a593Smuzhiyun 		pci_write_config_word(pdev, 0x50, misc & 0xfcff);
202*4882a593Smuzhiyun 	else
203*4882a593Smuzhiyun 		pci_write_config_word(pdev, 0x50, misc | 0x0300);
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(sb800_prefetch);
206*4882a593Smuzhiyun 
usb_amd_find_chipset_info(void)207*4882a593Smuzhiyun static void usb_amd_find_chipset_info(void)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun 	unsigned long flags;
210*4882a593Smuzhiyun 	struct amd_chipset_info info;
211*4882a593Smuzhiyun 	info.need_pll_quirk = false;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	spin_lock_irqsave(&amd_lock, flags);
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	/* probe only once */
216*4882a593Smuzhiyun 	if (amd_chipset.probe_count > 0) {
217*4882a593Smuzhiyun 		amd_chipset.probe_count++;
218*4882a593Smuzhiyun 		spin_unlock_irqrestore(&amd_lock, flags);
219*4882a593Smuzhiyun 		return;
220*4882a593Smuzhiyun 	}
221*4882a593Smuzhiyun 	memset(&info, 0, sizeof(info));
222*4882a593Smuzhiyun 	spin_unlock_irqrestore(&amd_lock, flags);
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	if (!amd_chipset_sb_type_init(&info)) {
225*4882a593Smuzhiyun 		goto commit;
226*4882a593Smuzhiyun 	}
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	switch (info.sb_type.gen) {
229*4882a593Smuzhiyun 	case AMD_CHIPSET_SB700:
230*4882a593Smuzhiyun 		info.need_pll_quirk = info.sb_type.rev <= 0x3B;
231*4882a593Smuzhiyun 		break;
232*4882a593Smuzhiyun 	case AMD_CHIPSET_SB800:
233*4882a593Smuzhiyun 	case AMD_CHIPSET_HUDSON2:
234*4882a593Smuzhiyun 	case AMD_CHIPSET_BOLTON:
235*4882a593Smuzhiyun 		info.need_pll_quirk = true;
236*4882a593Smuzhiyun 		break;
237*4882a593Smuzhiyun 	default:
238*4882a593Smuzhiyun 		info.need_pll_quirk = false;
239*4882a593Smuzhiyun 		break;
240*4882a593Smuzhiyun 	}
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	if (!info.need_pll_quirk) {
243*4882a593Smuzhiyun 		if (info.smbus_dev) {
244*4882a593Smuzhiyun 			pci_dev_put(info.smbus_dev);
245*4882a593Smuzhiyun 			info.smbus_dev = NULL;
246*4882a593Smuzhiyun 		}
247*4882a593Smuzhiyun 		goto commit;
248*4882a593Smuzhiyun 	}
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL);
251*4882a593Smuzhiyun 	if (info.nb_dev) {
252*4882a593Smuzhiyun 		info.nb_type = 1;
253*4882a593Smuzhiyun 	} else {
254*4882a593Smuzhiyun 		info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL);
255*4882a593Smuzhiyun 		if (info.nb_dev) {
256*4882a593Smuzhiyun 			info.nb_type = 2;
257*4882a593Smuzhiyun 		} else {
258*4882a593Smuzhiyun 			info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
259*4882a593Smuzhiyun 						     0x9600, NULL);
260*4882a593Smuzhiyun 			if (info.nb_dev)
261*4882a593Smuzhiyun 				info.nb_type = 3;
262*4882a593Smuzhiyun 		}
263*4882a593Smuzhiyun 	}
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n");
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun commit:
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	spin_lock_irqsave(&amd_lock, flags);
270*4882a593Smuzhiyun 	if (amd_chipset.probe_count > 0) {
271*4882a593Smuzhiyun 		/* race - someone else was faster - drop devices */
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 		/* Mark that we where here */
274*4882a593Smuzhiyun 		amd_chipset.probe_count++;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 		spin_unlock_irqrestore(&amd_lock, flags);
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 		pci_dev_put(info.nb_dev);
279*4882a593Smuzhiyun 		pci_dev_put(info.smbus_dev);
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	} else {
282*4882a593Smuzhiyun 		/* no race - commit the result */
283*4882a593Smuzhiyun 		info.probe_count++;
284*4882a593Smuzhiyun 		amd_chipset = info;
285*4882a593Smuzhiyun 		spin_unlock_irqrestore(&amd_lock, flags);
286*4882a593Smuzhiyun 	}
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun 
usb_hcd_amd_remote_wakeup_quirk(struct pci_dev * pdev)289*4882a593Smuzhiyun int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun 	/* Make sure amd chipset type has already been initialized */
292*4882a593Smuzhiyun 	usb_amd_find_chipset_info();
293*4882a593Smuzhiyun 	if (amd_chipset.sb_type.gen == AMD_CHIPSET_YANGTZE ||
294*4882a593Smuzhiyun 	    amd_chipset.sb_type.gen == AMD_CHIPSET_TAISHAN) {
295*4882a593Smuzhiyun 		dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
296*4882a593Smuzhiyun 		return 1;
297*4882a593Smuzhiyun 	}
298*4882a593Smuzhiyun 	return 0;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk);
301*4882a593Smuzhiyun 
usb_amd_hang_symptom_quirk(void)302*4882a593Smuzhiyun bool usb_amd_hang_symptom_quirk(void)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun 	u8 rev;
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	usb_amd_find_chipset_info();
307*4882a593Smuzhiyun 	rev = amd_chipset.sb_type.rev;
308*4882a593Smuzhiyun 	/* SB600 and old version of SB700 have hang symptom bug */
309*4882a593Smuzhiyun 	return amd_chipset.sb_type.gen == AMD_CHIPSET_SB600 ||
310*4882a593Smuzhiyun 			(amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 &&
311*4882a593Smuzhiyun 			 rev >= 0x3a && rev <= 0x3b);
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_amd_hang_symptom_quirk);
314*4882a593Smuzhiyun 
usb_amd_prefetch_quirk(void)315*4882a593Smuzhiyun bool usb_amd_prefetch_quirk(void)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun 	usb_amd_find_chipset_info();
318*4882a593Smuzhiyun 	/* SB800 needs pre-fetch fix */
319*4882a593Smuzhiyun 	return amd_chipset.sb_type.gen == AMD_CHIPSET_SB800;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_amd_prefetch_quirk);
322*4882a593Smuzhiyun 
usb_amd_quirk_pll_check(void)323*4882a593Smuzhiyun bool usb_amd_quirk_pll_check(void)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun 	usb_amd_find_chipset_info();
326*4882a593Smuzhiyun 	return amd_chipset.need_pll_quirk;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_check);
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun /*
331*4882a593Smuzhiyun  * The hardware normally enables the A-link power management feature, which
332*4882a593Smuzhiyun  * lets the system lower the power consumption in idle states.
333*4882a593Smuzhiyun  *
334*4882a593Smuzhiyun  * This USB quirk prevents the link going into that lower power state
335*4882a593Smuzhiyun  * during isochronous transfers.
336*4882a593Smuzhiyun  *
337*4882a593Smuzhiyun  * Without this quirk, isochronous stream on OHCI/EHCI/xHCI controllers of
338*4882a593Smuzhiyun  * some AMD platforms may stutter or have breaks occasionally.
339*4882a593Smuzhiyun  */
usb_amd_quirk_pll(int disable)340*4882a593Smuzhiyun static void usb_amd_quirk_pll(int disable)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun 	u32 addr, addr_low, addr_high, val;
343*4882a593Smuzhiyun 	u32 bit = disable ? 0 : 1;
344*4882a593Smuzhiyun 	unsigned long flags;
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	spin_lock_irqsave(&amd_lock, flags);
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	if (disable) {
349*4882a593Smuzhiyun 		amd_chipset.isoc_reqs++;
350*4882a593Smuzhiyun 		if (amd_chipset.isoc_reqs > 1) {
351*4882a593Smuzhiyun 			spin_unlock_irqrestore(&amd_lock, flags);
352*4882a593Smuzhiyun 			return;
353*4882a593Smuzhiyun 		}
354*4882a593Smuzhiyun 	} else {
355*4882a593Smuzhiyun 		amd_chipset.isoc_reqs--;
356*4882a593Smuzhiyun 		if (amd_chipset.isoc_reqs > 0) {
357*4882a593Smuzhiyun 			spin_unlock_irqrestore(&amd_lock, flags);
358*4882a593Smuzhiyun 			return;
359*4882a593Smuzhiyun 		}
360*4882a593Smuzhiyun 	}
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB800 ||
363*4882a593Smuzhiyun 			amd_chipset.sb_type.gen == AMD_CHIPSET_HUDSON2 ||
364*4882a593Smuzhiyun 			amd_chipset.sb_type.gen == AMD_CHIPSET_BOLTON) {
365*4882a593Smuzhiyun 		outb_p(AB_REG_BAR_LOW, 0xcd6);
366*4882a593Smuzhiyun 		addr_low = inb_p(0xcd7);
367*4882a593Smuzhiyun 		outb_p(AB_REG_BAR_HIGH, 0xcd6);
368*4882a593Smuzhiyun 		addr_high = inb_p(0xcd7);
369*4882a593Smuzhiyun 		addr = addr_high << 8 | addr_low;
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 		outl_p(0x30, AB_INDX(addr));
372*4882a593Smuzhiyun 		outl_p(0x40, AB_DATA(addr));
373*4882a593Smuzhiyun 		outl_p(0x34, AB_INDX(addr));
374*4882a593Smuzhiyun 		val = inl_p(AB_DATA(addr));
375*4882a593Smuzhiyun 	} else if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 &&
376*4882a593Smuzhiyun 			amd_chipset.sb_type.rev <= 0x3b) {
377*4882a593Smuzhiyun 		pci_read_config_dword(amd_chipset.smbus_dev,
378*4882a593Smuzhiyun 					AB_REG_BAR_SB700, &addr);
379*4882a593Smuzhiyun 		outl(AX_INDXC, AB_INDX(addr));
380*4882a593Smuzhiyun 		outl(0x40, AB_DATA(addr));
381*4882a593Smuzhiyun 		outl(AX_DATAC, AB_INDX(addr));
382*4882a593Smuzhiyun 		val = inl(AB_DATA(addr));
383*4882a593Smuzhiyun 	} else {
384*4882a593Smuzhiyun 		spin_unlock_irqrestore(&amd_lock, flags);
385*4882a593Smuzhiyun 		return;
386*4882a593Smuzhiyun 	}
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	if (disable) {
389*4882a593Smuzhiyun 		val &= ~0x08;
390*4882a593Smuzhiyun 		val |= (1 << 4) | (1 << 9);
391*4882a593Smuzhiyun 	} else {
392*4882a593Smuzhiyun 		val |= 0x08;
393*4882a593Smuzhiyun 		val &= ~((1 << 4) | (1 << 9));
394*4882a593Smuzhiyun 	}
395*4882a593Smuzhiyun 	outl_p(val, AB_DATA(addr));
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	if (!amd_chipset.nb_dev) {
398*4882a593Smuzhiyun 		spin_unlock_irqrestore(&amd_lock, flags);
399*4882a593Smuzhiyun 		return;
400*4882a593Smuzhiyun 	}
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	if (amd_chipset.nb_type == 1 || amd_chipset.nb_type == 3) {
403*4882a593Smuzhiyun 		addr = PCIE_P_CNTL;
404*4882a593Smuzhiyun 		pci_write_config_dword(amd_chipset.nb_dev,
405*4882a593Smuzhiyun 					NB_PCIE_INDX_ADDR, addr);
406*4882a593Smuzhiyun 		pci_read_config_dword(amd_chipset.nb_dev,
407*4882a593Smuzhiyun 					NB_PCIE_INDX_DATA, &val);
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 		val &= ~(1 | (1 << 3) | (1 << 4) | (1 << 9) | (1 << 12));
410*4882a593Smuzhiyun 		val |= bit | (bit << 3) | (bit << 12);
411*4882a593Smuzhiyun 		val |= ((!bit) << 4) | ((!bit) << 9);
412*4882a593Smuzhiyun 		pci_write_config_dword(amd_chipset.nb_dev,
413*4882a593Smuzhiyun 					NB_PCIE_INDX_DATA, val);
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 		addr = BIF_NB;
416*4882a593Smuzhiyun 		pci_write_config_dword(amd_chipset.nb_dev,
417*4882a593Smuzhiyun 					NB_PCIE_INDX_ADDR, addr);
418*4882a593Smuzhiyun 		pci_read_config_dword(amd_chipset.nb_dev,
419*4882a593Smuzhiyun 					NB_PCIE_INDX_DATA, &val);
420*4882a593Smuzhiyun 		val &= ~(1 << 8);
421*4882a593Smuzhiyun 		val |= bit << 8;
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 		pci_write_config_dword(amd_chipset.nb_dev,
424*4882a593Smuzhiyun 					NB_PCIE_INDX_DATA, val);
425*4882a593Smuzhiyun 	} else if (amd_chipset.nb_type == 2) {
426*4882a593Smuzhiyun 		addr = NB_PIF0_PWRDOWN_0;
427*4882a593Smuzhiyun 		pci_write_config_dword(amd_chipset.nb_dev,
428*4882a593Smuzhiyun 					NB_PCIE_INDX_ADDR, addr);
429*4882a593Smuzhiyun 		pci_read_config_dword(amd_chipset.nb_dev,
430*4882a593Smuzhiyun 					NB_PCIE_INDX_DATA, &val);
431*4882a593Smuzhiyun 		if (disable)
432*4882a593Smuzhiyun 			val &= ~(0x3f << 7);
433*4882a593Smuzhiyun 		else
434*4882a593Smuzhiyun 			val |= 0x3f << 7;
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 		pci_write_config_dword(amd_chipset.nb_dev,
437*4882a593Smuzhiyun 					NB_PCIE_INDX_DATA, val);
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 		addr = NB_PIF0_PWRDOWN_1;
440*4882a593Smuzhiyun 		pci_write_config_dword(amd_chipset.nb_dev,
441*4882a593Smuzhiyun 					NB_PCIE_INDX_ADDR, addr);
442*4882a593Smuzhiyun 		pci_read_config_dword(amd_chipset.nb_dev,
443*4882a593Smuzhiyun 					NB_PCIE_INDX_DATA, &val);
444*4882a593Smuzhiyun 		if (disable)
445*4882a593Smuzhiyun 			val &= ~(0x3f << 7);
446*4882a593Smuzhiyun 		else
447*4882a593Smuzhiyun 			val |= 0x3f << 7;
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 		pci_write_config_dword(amd_chipset.nb_dev,
450*4882a593Smuzhiyun 					NB_PCIE_INDX_DATA, val);
451*4882a593Smuzhiyun 	}
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	spin_unlock_irqrestore(&amd_lock, flags);
454*4882a593Smuzhiyun 	return;
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun 
usb_amd_quirk_pll_disable(void)457*4882a593Smuzhiyun void usb_amd_quirk_pll_disable(void)
458*4882a593Smuzhiyun {
459*4882a593Smuzhiyun 	usb_amd_quirk_pll(1);
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_disable);
462*4882a593Smuzhiyun 
usb_asmedia_wait_write(struct pci_dev * pdev)463*4882a593Smuzhiyun static int usb_asmedia_wait_write(struct pci_dev *pdev)
464*4882a593Smuzhiyun {
465*4882a593Smuzhiyun 	unsigned long retry_count;
466*4882a593Smuzhiyun 	unsigned char value;
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	for (retry_count = 1000; retry_count > 0; --retry_count) {
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 		pci_read_config_byte(pdev, ASMT_CONTROL_REG, &value);
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 		if (value == 0xff) {
473*4882a593Smuzhiyun 			dev_err(&pdev->dev, "%s: check_ready ERROR", __func__);
474*4882a593Smuzhiyun 			return -EIO;
475*4882a593Smuzhiyun 		}
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 		if ((value & ASMT_CONTROL_WRITE_BIT) == 0)
478*4882a593Smuzhiyun 			return 0;
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 		udelay(50);
481*4882a593Smuzhiyun 	}
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	dev_warn(&pdev->dev, "%s: check_write_ready timeout", __func__);
484*4882a593Smuzhiyun 	return -ETIMEDOUT;
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun 
usb_asmedia_modifyflowcontrol(struct pci_dev * pdev)487*4882a593Smuzhiyun void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev)
488*4882a593Smuzhiyun {
489*4882a593Smuzhiyun 	if (usb_asmedia_wait_write(pdev) != 0)
490*4882a593Smuzhiyun 		return;
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	/* send command and address to device */
493*4882a593Smuzhiyun 	pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_WRITEREG_CMD);
494*4882a593Smuzhiyun 	pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_FLOWCTL_ADDR);
495*4882a593Smuzhiyun 	pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT);
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	if (usb_asmedia_wait_write(pdev) != 0)
498*4882a593Smuzhiyun 		return;
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	/* send data to device */
501*4882a593Smuzhiyun 	pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_FLOWCTL_DATA);
502*4882a593Smuzhiyun 	pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_PSEUDO_DATA);
503*4882a593Smuzhiyun 	pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT);
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_asmedia_modifyflowcontrol);
506*4882a593Smuzhiyun 
usb_amd_quirk_pll_enable(void)507*4882a593Smuzhiyun void usb_amd_quirk_pll_enable(void)
508*4882a593Smuzhiyun {
509*4882a593Smuzhiyun 	usb_amd_quirk_pll(0);
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_enable);
512*4882a593Smuzhiyun 
usb_amd_dev_put(void)513*4882a593Smuzhiyun void usb_amd_dev_put(void)
514*4882a593Smuzhiyun {
515*4882a593Smuzhiyun 	struct pci_dev *nb, *smbus;
516*4882a593Smuzhiyun 	unsigned long flags;
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	spin_lock_irqsave(&amd_lock, flags);
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	amd_chipset.probe_count--;
521*4882a593Smuzhiyun 	if (amd_chipset.probe_count > 0) {
522*4882a593Smuzhiyun 		spin_unlock_irqrestore(&amd_lock, flags);
523*4882a593Smuzhiyun 		return;
524*4882a593Smuzhiyun 	}
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	/* save them to pci_dev_put outside of spinlock */
527*4882a593Smuzhiyun 	nb    = amd_chipset.nb_dev;
528*4882a593Smuzhiyun 	smbus = amd_chipset.smbus_dev;
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	amd_chipset.nb_dev = NULL;
531*4882a593Smuzhiyun 	amd_chipset.smbus_dev = NULL;
532*4882a593Smuzhiyun 	amd_chipset.nb_type = 0;
533*4882a593Smuzhiyun 	memset(&amd_chipset.sb_type, 0, sizeof(amd_chipset.sb_type));
534*4882a593Smuzhiyun 	amd_chipset.isoc_reqs = 0;
535*4882a593Smuzhiyun 	amd_chipset.need_pll_quirk = false;
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	spin_unlock_irqrestore(&amd_lock, flags);
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	pci_dev_put(nb);
540*4882a593Smuzhiyun 	pci_dev_put(smbus);
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_amd_dev_put);
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun /*
545*4882a593Smuzhiyun  * Check if port is disabled in BIOS on AMD Promontory host.
546*4882a593Smuzhiyun  * BIOS Disabled ports may wake on connect/disconnect and need
547*4882a593Smuzhiyun  * driver workaround to keep them disabled.
548*4882a593Smuzhiyun  * Returns true if port is marked disabled.
549*4882a593Smuzhiyun  */
usb_amd_pt_check_port(struct device * device,int port)550*4882a593Smuzhiyun bool usb_amd_pt_check_port(struct device *device, int port)
551*4882a593Smuzhiyun {
552*4882a593Smuzhiyun 	unsigned char value, port_shift;
553*4882a593Smuzhiyun 	struct pci_dev *pdev;
554*4882a593Smuzhiyun 	u16 reg;
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	pdev = to_pci_dev(device);
557*4882a593Smuzhiyun 	pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_1_ADDR);
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	pci_read_config_byte(pdev, PT_READ_INDX, &value);
560*4882a593Smuzhiyun 	if (value != PT_SIG_1_DATA)
561*4882a593Smuzhiyun 		return false;
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 	pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_2_ADDR);
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	pci_read_config_byte(pdev, PT_READ_INDX, &value);
566*4882a593Smuzhiyun 	if (value != PT_SIG_2_DATA)
567*4882a593Smuzhiyun 		return false;
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_3_ADDR);
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	pci_read_config_byte(pdev, PT_READ_INDX, &value);
572*4882a593Smuzhiyun 	if (value != PT_SIG_3_DATA)
573*4882a593Smuzhiyun 		return false;
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_4_ADDR);
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	pci_read_config_byte(pdev, PT_READ_INDX, &value);
578*4882a593Smuzhiyun 	if (value != PT_SIG_4_DATA)
579*4882a593Smuzhiyun 		return false;
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	/* Check disabled port setting, if bit is set port is enabled */
582*4882a593Smuzhiyun 	switch (pdev->device) {
583*4882a593Smuzhiyun 	case 0x43b9:
584*4882a593Smuzhiyun 	case 0x43ba:
585*4882a593Smuzhiyun 	/*
586*4882a593Smuzhiyun 	 * device is AMD_PROMONTORYA_4(0x43b9) or PROMONTORYA_3(0x43ba)
587*4882a593Smuzhiyun 	 * PT4_P1_REG bits[7..1] represents USB2.0 ports 6 to 0
588*4882a593Smuzhiyun 	 * PT4_P2_REG bits[6..0] represents ports 13 to 7
589*4882a593Smuzhiyun 	 */
590*4882a593Smuzhiyun 		if (port > 6) {
591*4882a593Smuzhiyun 			reg = PT4_P2_REG;
592*4882a593Smuzhiyun 			port_shift = port - 7;
593*4882a593Smuzhiyun 		} else {
594*4882a593Smuzhiyun 			reg = PT4_P1_REG;
595*4882a593Smuzhiyun 			port_shift = port + 1;
596*4882a593Smuzhiyun 		}
597*4882a593Smuzhiyun 		break;
598*4882a593Smuzhiyun 	case 0x43bb:
599*4882a593Smuzhiyun 	/*
600*4882a593Smuzhiyun 	 * device is AMD_PROMONTORYA_2(0x43bb)
601*4882a593Smuzhiyun 	 * PT2_P1_REG bits[7..5] represents USB2.0 ports 2 to 0
602*4882a593Smuzhiyun 	 * PT2_P2_REG bits[5..0] represents ports 9 to 3
603*4882a593Smuzhiyun 	 */
604*4882a593Smuzhiyun 		if (port > 2) {
605*4882a593Smuzhiyun 			reg = PT2_P2_REG;
606*4882a593Smuzhiyun 			port_shift = port - 3;
607*4882a593Smuzhiyun 		} else {
608*4882a593Smuzhiyun 			reg = PT2_P1_REG;
609*4882a593Smuzhiyun 			port_shift = port + 5;
610*4882a593Smuzhiyun 		}
611*4882a593Smuzhiyun 		break;
612*4882a593Smuzhiyun 	case 0x43bc:
613*4882a593Smuzhiyun 	/*
614*4882a593Smuzhiyun 	 * device is AMD_PROMONTORYA_1(0x43bc)
615*4882a593Smuzhiyun 	 * PT1_P1_REG[7..4] represents USB2.0 ports 3 to 0
616*4882a593Smuzhiyun 	 * PT1_P2_REG[5..0] represents ports 9 to 4
617*4882a593Smuzhiyun 	 */
618*4882a593Smuzhiyun 		if (port > 3) {
619*4882a593Smuzhiyun 			reg = PT1_P2_REG;
620*4882a593Smuzhiyun 			port_shift = port - 4;
621*4882a593Smuzhiyun 		} else {
622*4882a593Smuzhiyun 			reg = PT1_P1_REG;
623*4882a593Smuzhiyun 			port_shift = port + 4;
624*4882a593Smuzhiyun 		}
625*4882a593Smuzhiyun 		break;
626*4882a593Smuzhiyun 	default:
627*4882a593Smuzhiyun 		return false;
628*4882a593Smuzhiyun 	}
629*4882a593Smuzhiyun 	pci_write_config_word(pdev, PT_ADDR_INDX, reg);
630*4882a593Smuzhiyun 	pci_read_config_byte(pdev, PT_READ_INDX, &value);
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	return !(value & BIT(port_shift));
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_amd_pt_check_port);
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun /*
637*4882a593Smuzhiyun  * Make sure the controller is completely inactive, unable to
638*4882a593Smuzhiyun  * generate interrupts or do DMA.
639*4882a593Smuzhiyun  */
uhci_reset_hc(struct pci_dev * pdev,unsigned long base)640*4882a593Smuzhiyun void uhci_reset_hc(struct pci_dev *pdev, unsigned long base)
641*4882a593Smuzhiyun {
642*4882a593Smuzhiyun 	/* Turn off PIRQ enable and SMI enable.  (This also turns off the
643*4882a593Smuzhiyun 	 * BIOS's USB Legacy Support.)  Turn off all the R/WC bits too.
644*4882a593Smuzhiyun 	 */
645*4882a593Smuzhiyun 	pci_write_config_word(pdev, UHCI_USBLEGSUP, UHCI_USBLEGSUP_RWC);
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	/* Reset the HC - this will force us to get a
648*4882a593Smuzhiyun 	 * new notification of any already connected
649*4882a593Smuzhiyun 	 * ports due to the virtual disconnect that it
650*4882a593Smuzhiyun 	 * implies.
651*4882a593Smuzhiyun 	 */
652*4882a593Smuzhiyun 	outw(UHCI_USBCMD_HCRESET, base + UHCI_USBCMD);
653*4882a593Smuzhiyun 	mb();
654*4882a593Smuzhiyun 	udelay(5);
655*4882a593Smuzhiyun 	if (inw(base + UHCI_USBCMD) & UHCI_USBCMD_HCRESET)
656*4882a593Smuzhiyun 		dev_warn(&pdev->dev, "HCRESET not completed yet!\n");
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 	/* Just to be safe, disable interrupt requests and
659*4882a593Smuzhiyun 	 * make sure the controller is stopped.
660*4882a593Smuzhiyun 	 */
661*4882a593Smuzhiyun 	outw(0, base + UHCI_USBINTR);
662*4882a593Smuzhiyun 	outw(0, base + UHCI_USBCMD);
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(uhci_reset_hc);
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun /*
667*4882a593Smuzhiyun  * Initialize a controller that was newly discovered or has just been
668*4882a593Smuzhiyun  * resumed.  In either case we can't be sure of its previous state.
669*4882a593Smuzhiyun  *
670*4882a593Smuzhiyun  * Returns: 1 if the controller was reset, 0 otherwise.
671*4882a593Smuzhiyun  */
uhci_check_and_reset_hc(struct pci_dev * pdev,unsigned long base)672*4882a593Smuzhiyun int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base)
673*4882a593Smuzhiyun {
674*4882a593Smuzhiyun 	u16 legsup;
675*4882a593Smuzhiyun 	unsigned int cmd, intr;
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 	/*
678*4882a593Smuzhiyun 	 * When restarting a suspended controller, we expect all the
679*4882a593Smuzhiyun 	 * settings to be the same as we left them:
680*4882a593Smuzhiyun 	 *
681*4882a593Smuzhiyun 	 *	PIRQ and SMI disabled, no R/W bits set in USBLEGSUP;
682*4882a593Smuzhiyun 	 *	Controller is stopped and configured with EGSM set;
683*4882a593Smuzhiyun 	 *	No interrupts enabled except possibly Resume Detect.
684*4882a593Smuzhiyun 	 *
685*4882a593Smuzhiyun 	 * If any of these conditions are violated we do a complete reset.
686*4882a593Smuzhiyun 	 */
687*4882a593Smuzhiyun 	pci_read_config_word(pdev, UHCI_USBLEGSUP, &legsup);
688*4882a593Smuzhiyun 	if (legsup & ~(UHCI_USBLEGSUP_RO | UHCI_USBLEGSUP_RWC)) {
689*4882a593Smuzhiyun 		dev_dbg(&pdev->dev, "%s: legsup = 0x%04x\n",
690*4882a593Smuzhiyun 				__func__, legsup);
691*4882a593Smuzhiyun 		goto reset_needed;
692*4882a593Smuzhiyun 	}
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun 	cmd = inw(base + UHCI_USBCMD);
695*4882a593Smuzhiyun 	if ((cmd & UHCI_USBCMD_RUN) || !(cmd & UHCI_USBCMD_CONFIGURE) ||
696*4882a593Smuzhiyun 			!(cmd & UHCI_USBCMD_EGSM)) {
697*4882a593Smuzhiyun 		dev_dbg(&pdev->dev, "%s: cmd = 0x%04x\n",
698*4882a593Smuzhiyun 				__func__, cmd);
699*4882a593Smuzhiyun 		goto reset_needed;
700*4882a593Smuzhiyun 	}
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun 	intr = inw(base + UHCI_USBINTR);
703*4882a593Smuzhiyun 	if (intr & (~UHCI_USBINTR_RESUME)) {
704*4882a593Smuzhiyun 		dev_dbg(&pdev->dev, "%s: intr = 0x%04x\n",
705*4882a593Smuzhiyun 				__func__, intr);
706*4882a593Smuzhiyun 		goto reset_needed;
707*4882a593Smuzhiyun 	}
708*4882a593Smuzhiyun 	return 0;
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun reset_needed:
711*4882a593Smuzhiyun 	dev_dbg(&pdev->dev, "Performing full reset\n");
712*4882a593Smuzhiyun 	uhci_reset_hc(pdev, base);
713*4882a593Smuzhiyun 	return 1;
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(uhci_check_and_reset_hc);
716*4882a593Smuzhiyun 
io_type_enabled(struct pci_dev * pdev,unsigned int mask)717*4882a593Smuzhiyun static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask)
718*4882a593Smuzhiyun {
719*4882a593Smuzhiyun 	u16 cmd;
720*4882a593Smuzhiyun 	return !pci_read_config_word(pdev, PCI_COMMAND, &cmd) && (cmd & mask);
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun #define pio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_IO)
724*4882a593Smuzhiyun #define mmio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_MEMORY)
725*4882a593Smuzhiyun 
quirk_usb_handoff_uhci(struct pci_dev * pdev)726*4882a593Smuzhiyun static void quirk_usb_handoff_uhci(struct pci_dev *pdev)
727*4882a593Smuzhiyun {
728*4882a593Smuzhiyun 	unsigned long base = 0;
729*4882a593Smuzhiyun 	int i;
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	if (!pio_enabled(pdev))
732*4882a593Smuzhiyun 		return;
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 	for (i = 0; i < PCI_STD_NUM_BARS; i++)
735*4882a593Smuzhiyun 		if ((pci_resource_flags(pdev, i) & IORESOURCE_IO)) {
736*4882a593Smuzhiyun 			base = pci_resource_start(pdev, i);
737*4882a593Smuzhiyun 			break;
738*4882a593Smuzhiyun 		}
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 	if (base)
741*4882a593Smuzhiyun 		uhci_check_and_reset_hc(pdev, base);
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun 
mmio_resource_enabled(struct pci_dev * pdev,int idx)744*4882a593Smuzhiyun static int mmio_resource_enabled(struct pci_dev *pdev, int idx)
745*4882a593Smuzhiyun {
746*4882a593Smuzhiyun 	return pci_resource_start(pdev, idx) && mmio_enabled(pdev);
747*4882a593Smuzhiyun }
748*4882a593Smuzhiyun 
quirk_usb_handoff_ohci(struct pci_dev * pdev)749*4882a593Smuzhiyun static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
750*4882a593Smuzhiyun {
751*4882a593Smuzhiyun 	void __iomem *base;
752*4882a593Smuzhiyun 	u32 control;
753*4882a593Smuzhiyun 	u32 fminterval = 0;
754*4882a593Smuzhiyun 	bool no_fminterval = false;
755*4882a593Smuzhiyun 	int cnt;
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	if (!mmio_resource_enabled(pdev, 0))
758*4882a593Smuzhiyun 		return;
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 	base = pci_ioremap_bar(pdev, 0);
761*4882a593Smuzhiyun 	if (base == NULL)
762*4882a593Smuzhiyun 		return;
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 	/*
765*4882a593Smuzhiyun 	 * ULi M5237 OHCI controller locks the whole system when accessing
766*4882a593Smuzhiyun 	 * the OHCI_FMINTERVAL offset.
767*4882a593Smuzhiyun 	 */
768*4882a593Smuzhiyun 	if (pdev->vendor == PCI_VENDOR_ID_AL && pdev->device == 0x5237)
769*4882a593Smuzhiyun 		no_fminterval = true;
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	control = readl(base + OHCI_CONTROL);
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun /* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
774*4882a593Smuzhiyun #ifdef __hppa__
775*4882a593Smuzhiyun #define	OHCI_CTRL_MASK		(OHCI_CTRL_RWC | OHCI_CTRL_IR)
776*4882a593Smuzhiyun #else
777*4882a593Smuzhiyun #define	OHCI_CTRL_MASK		OHCI_CTRL_RWC
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 	if (control & OHCI_CTRL_IR) {
780*4882a593Smuzhiyun 		int wait_time = 500; /* arbitrary; 5 seconds */
781*4882a593Smuzhiyun 		writel(OHCI_INTR_OC, base + OHCI_INTRENABLE);
782*4882a593Smuzhiyun 		writel(OHCI_OCR, base + OHCI_CMDSTATUS);
783*4882a593Smuzhiyun 		while (wait_time > 0 &&
784*4882a593Smuzhiyun 				readl(base + OHCI_CONTROL) & OHCI_CTRL_IR) {
785*4882a593Smuzhiyun 			wait_time -= 10;
786*4882a593Smuzhiyun 			msleep(10);
787*4882a593Smuzhiyun 		}
788*4882a593Smuzhiyun 		if (wait_time <= 0)
789*4882a593Smuzhiyun 			dev_warn(&pdev->dev,
790*4882a593Smuzhiyun 				 "OHCI: BIOS handoff failed (BIOS bug?) %08x\n",
791*4882a593Smuzhiyun 				 readl(base + OHCI_CONTROL));
792*4882a593Smuzhiyun 	}
793*4882a593Smuzhiyun #endif
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 	/* disable interrupts */
796*4882a593Smuzhiyun 	writel((u32) ~0, base + OHCI_INTRDISABLE);
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 	/* Go into the USB_RESET state, preserving RWC (and possibly IR) */
799*4882a593Smuzhiyun 	writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
800*4882a593Smuzhiyun 	readl(base + OHCI_CONTROL);
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun 	/* software reset of the controller, preserving HcFmInterval */
803*4882a593Smuzhiyun 	if (!no_fminterval)
804*4882a593Smuzhiyun 		fminterval = readl(base + OHCI_FMINTERVAL);
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 	writel(OHCI_HCR, base + OHCI_CMDSTATUS);
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 	/* reset requires max 10 us delay */
809*4882a593Smuzhiyun 	for (cnt = 30; cnt > 0; --cnt) {	/* ... allow extra time */
810*4882a593Smuzhiyun 		if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0)
811*4882a593Smuzhiyun 			break;
812*4882a593Smuzhiyun 		udelay(1);
813*4882a593Smuzhiyun 	}
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun 	if (!no_fminterval)
816*4882a593Smuzhiyun 		writel(fminterval, base + OHCI_FMINTERVAL);
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 	/* Now the controller is safely in SUSPEND and nothing can wake it up */
819*4882a593Smuzhiyun 	iounmap(base);
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun static const struct dmi_system_id ehci_dmi_nohandoff_table[] = {
823*4882a593Smuzhiyun 	{
824*4882a593Smuzhiyun 		/*  Pegatron Lucid (ExoPC) */
825*4882a593Smuzhiyun 		.matches = {
826*4882a593Smuzhiyun 			DMI_MATCH(DMI_BOARD_NAME, "EXOPG06411"),
827*4882a593Smuzhiyun 			DMI_MATCH(DMI_BIOS_VERSION, "Lucid-CE-133"),
828*4882a593Smuzhiyun 		},
829*4882a593Smuzhiyun 	},
830*4882a593Smuzhiyun 	{
831*4882a593Smuzhiyun 		/*  Pegatron Lucid (Ordissimo AIRIS) */
832*4882a593Smuzhiyun 		.matches = {
833*4882a593Smuzhiyun 			DMI_MATCH(DMI_BOARD_NAME, "M11JB"),
834*4882a593Smuzhiyun 			DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
835*4882a593Smuzhiyun 		},
836*4882a593Smuzhiyun 	},
837*4882a593Smuzhiyun 	{
838*4882a593Smuzhiyun 		/*  Pegatron Lucid (Ordissimo) */
839*4882a593Smuzhiyun 		.matches = {
840*4882a593Smuzhiyun 			DMI_MATCH(DMI_BOARD_NAME, "Ordissimo"),
841*4882a593Smuzhiyun 			DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
842*4882a593Smuzhiyun 		},
843*4882a593Smuzhiyun 	},
844*4882a593Smuzhiyun 	{
845*4882a593Smuzhiyun 		/* HASEE E200 */
846*4882a593Smuzhiyun 		.matches = {
847*4882a593Smuzhiyun 			DMI_MATCH(DMI_BOARD_VENDOR, "HASEE"),
848*4882a593Smuzhiyun 			DMI_MATCH(DMI_BOARD_NAME, "E210"),
849*4882a593Smuzhiyun 			DMI_MATCH(DMI_BIOS_VERSION, "6.00"),
850*4882a593Smuzhiyun 		},
851*4882a593Smuzhiyun 	},
852*4882a593Smuzhiyun 	{ }
853*4882a593Smuzhiyun };
854*4882a593Smuzhiyun 
ehci_bios_handoff(struct pci_dev * pdev,void __iomem * op_reg_base,u32 cap,u8 offset)855*4882a593Smuzhiyun static void ehci_bios_handoff(struct pci_dev *pdev,
856*4882a593Smuzhiyun 					void __iomem *op_reg_base,
857*4882a593Smuzhiyun 					u32 cap, u8 offset)
858*4882a593Smuzhiyun {
859*4882a593Smuzhiyun 	int try_handoff = 1, tried_handoff = 0;
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 	/*
862*4882a593Smuzhiyun 	 * The Pegatron Lucid tablet sporadically waits for 98 seconds trying
863*4882a593Smuzhiyun 	 * the handoff on its unused controller.  Skip it.
864*4882a593Smuzhiyun 	 *
865*4882a593Smuzhiyun 	 * The HASEE E200 hangs when the semaphore is set (bugzilla #77021).
866*4882a593Smuzhiyun 	 */
867*4882a593Smuzhiyun 	if (pdev->vendor == 0x8086 && (pdev->device == 0x283a ||
868*4882a593Smuzhiyun 			pdev->device == 0x27cc)) {
869*4882a593Smuzhiyun 		if (dmi_check_system(ehci_dmi_nohandoff_table))
870*4882a593Smuzhiyun 			try_handoff = 0;
871*4882a593Smuzhiyun 	}
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun 	if (try_handoff && (cap & EHCI_USBLEGSUP_BIOS)) {
874*4882a593Smuzhiyun 		dev_dbg(&pdev->dev, "EHCI: BIOS handoff\n");
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun #if 0
877*4882a593Smuzhiyun /* aleksey_gorelov@phoenix.com reports that some systems need SMI forced on,
878*4882a593Smuzhiyun  * but that seems dubious in general (the BIOS left it off intentionally)
879*4882a593Smuzhiyun  * and is known to prevent some systems from booting.  so we won't do this
880*4882a593Smuzhiyun  * unless maybe we can determine when we're on a system that needs SMI forced.
881*4882a593Smuzhiyun  */
882*4882a593Smuzhiyun 		/* BIOS workaround (?): be sure the pre-Linux code
883*4882a593Smuzhiyun 		 * receives the SMI
884*4882a593Smuzhiyun 		 */
885*4882a593Smuzhiyun 		pci_read_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, &val);
886*4882a593Smuzhiyun 		pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS,
887*4882a593Smuzhiyun 				       val | EHCI_USBLEGCTLSTS_SOOE);
888*4882a593Smuzhiyun #endif
889*4882a593Smuzhiyun 
890*4882a593Smuzhiyun 		/* some systems get upset if this semaphore is
891*4882a593Smuzhiyun 		 * set for any other reason than forcing a BIOS
892*4882a593Smuzhiyun 		 * handoff..
893*4882a593Smuzhiyun 		 */
894*4882a593Smuzhiyun 		pci_write_config_byte(pdev, offset + 3, 1);
895*4882a593Smuzhiyun 	}
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 	/* if boot firmware now owns EHCI, spin till it hands it over. */
898*4882a593Smuzhiyun 	if (try_handoff) {
899*4882a593Smuzhiyun 		int msec = 1000;
900*4882a593Smuzhiyun 		while ((cap & EHCI_USBLEGSUP_BIOS) && (msec > 0)) {
901*4882a593Smuzhiyun 			tried_handoff = 1;
902*4882a593Smuzhiyun 			msleep(10);
903*4882a593Smuzhiyun 			msec -= 10;
904*4882a593Smuzhiyun 			pci_read_config_dword(pdev, offset, &cap);
905*4882a593Smuzhiyun 		}
906*4882a593Smuzhiyun 	}
907*4882a593Smuzhiyun 
908*4882a593Smuzhiyun 	if (cap & EHCI_USBLEGSUP_BIOS) {
909*4882a593Smuzhiyun 		/* well, possibly buggy BIOS... try to shut it down,
910*4882a593Smuzhiyun 		 * and hope nothing goes too wrong
911*4882a593Smuzhiyun 		 */
912*4882a593Smuzhiyun 		if (try_handoff)
913*4882a593Smuzhiyun 			dev_warn(&pdev->dev,
914*4882a593Smuzhiyun 				 "EHCI: BIOS handoff failed (BIOS bug?) %08x\n",
915*4882a593Smuzhiyun 				 cap);
916*4882a593Smuzhiyun 		pci_write_config_byte(pdev, offset + 2, 0);
917*4882a593Smuzhiyun 	}
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun 	/* just in case, always disable EHCI SMIs */
920*4882a593Smuzhiyun 	pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, 0);
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun 	/* If the BIOS ever owned the controller then we can't expect
923*4882a593Smuzhiyun 	 * any power sessions to remain intact.
924*4882a593Smuzhiyun 	 */
925*4882a593Smuzhiyun 	if (tried_handoff)
926*4882a593Smuzhiyun 		writel(0, op_reg_base + EHCI_CONFIGFLAG);
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun 
quirk_usb_disable_ehci(struct pci_dev * pdev)929*4882a593Smuzhiyun static void quirk_usb_disable_ehci(struct pci_dev *pdev)
930*4882a593Smuzhiyun {
931*4882a593Smuzhiyun 	void __iomem *base, *op_reg_base;
932*4882a593Smuzhiyun 	u32	hcc_params, cap, val;
933*4882a593Smuzhiyun 	u8	offset, cap_length;
934*4882a593Smuzhiyun 	int	wait_time, count = 256/4;
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 	if (!mmio_resource_enabled(pdev, 0))
937*4882a593Smuzhiyun 		return;
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun 	base = pci_ioremap_bar(pdev, 0);
940*4882a593Smuzhiyun 	if (base == NULL)
941*4882a593Smuzhiyun 		return;
942*4882a593Smuzhiyun 
943*4882a593Smuzhiyun 	cap_length = readb(base);
944*4882a593Smuzhiyun 	op_reg_base = base + cap_length;
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun 	/* EHCI 0.96 and later may have "extended capabilities"
947*4882a593Smuzhiyun 	 * spec section 5.1 explains the bios handoff, e.g. for
948*4882a593Smuzhiyun 	 * booting from USB disk or using a usb keyboard
949*4882a593Smuzhiyun 	 */
950*4882a593Smuzhiyun 	hcc_params = readl(base + EHCI_HCC_PARAMS);
951*4882a593Smuzhiyun 	offset = (hcc_params >> 8) & 0xff;
952*4882a593Smuzhiyun 	while (offset && --count) {
953*4882a593Smuzhiyun 		pci_read_config_dword(pdev, offset, &cap);
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 		switch (cap & 0xff) {
956*4882a593Smuzhiyun 		case 1:
957*4882a593Smuzhiyun 			ehci_bios_handoff(pdev, op_reg_base, cap, offset);
958*4882a593Smuzhiyun 			break;
959*4882a593Smuzhiyun 		case 0: /* Illegal reserved cap, set cap=0 so we exit */
960*4882a593Smuzhiyun 			cap = 0;
961*4882a593Smuzhiyun 			fallthrough;
962*4882a593Smuzhiyun 		default:
963*4882a593Smuzhiyun 			dev_warn(&pdev->dev,
964*4882a593Smuzhiyun 				 "EHCI: unrecognized capability %02x\n",
965*4882a593Smuzhiyun 				 cap & 0xff);
966*4882a593Smuzhiyun 		}
967*4882a593Smuzhiyun 		offset = (cap >> 8) & 0xff;
968*4882a593Smuzhiyun 	}
969*4882a593Smuzhiyun 	if (!count)
970*4882a593Smuzhiyun 		dev_printk(KERN_DEBUG, &pdev->dev, "EHCI: capability loop?\n");
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 	/*
973*4882a593Smuzhiyun 	 * halt EHCI & disable its interrupts in any case
974*4882a593Smuzhiyun 	 */
975*4882a593Smuzhiyun 	val = readl(op_reg_base + EHCI_USBSTS);
976*4882a593Smuzhiyun 	if ((val & EHCI_USBSTS_HALTED) == 0) {
977*4882a593Smuzhiyun 		val = readl(op_reg_base + EHCI_USBCMD);
978*4882a593Smuzhiyun 		val &= ~EHCI_USBCMD_RUN;
979*4882a593Smuzhiyun 		writel(val, op_reg_base + EHCI_USBCMD);
980*4882a593Smuzhiyun 
981*4882a593Smuzhiyun 		wait_time = 2000;
982*4882a593Smuzhiyun 		do {
983*4882a593Smuzhiyun 			writel(0x3f, op_reg_base + EHCI_USBSTS);
984*4882a593Smuzhiyun 			udelay(100);
985*4882a593Smuzhiyun 			wait_time -= 100;
986*4882a593Smuzhiyun 			val = readl(op_reg_base + EHCI_USBSTS);
987*4882a593Smuzhiyun 			if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) {
988*4882a593Smuzhiyun 				break;
989*4882a593Smuzhiyun 			}
990*4882a593Smuzhiyun 		} while (wait_time > 0);
991*4882a593Smuzhiyun 	}
992*4882a593Smuzhiyun 	writel(0, op_reg_base + EHCI_USBINTR);
993*4882a593Smuzhiyun 	writel(0x3f, op_reg_base + EHCI_USBSTS);
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun 	iounmap(base);
996*4882a593Smuzhiyun }
997*4882a593Smuzhiyun 
998*4882a593Smuzhiyun /*
999*4882a593Smuzhiyun  * handshake - spin reading a register until handshake completes
1000*4882a593Smuzhiyun  * @ptr: address of hc register to be read
1001*4882a593Smuzhiyun  * @mask: bits to look at in result of read
1002*4882a593Smuzhiyun  * @done: value of those bits when handshake succeeds
1003*4882a593Smuzhiyun  * @wait_usec: timeout in microseconds
1004*4882a593Smuzhiyun  * @delay_usec: delay in microseconds to wait between polling
1005*4882a593Smuzhiyun  *
1006*4882a593Smuzhiyun  * Polls a register every delay_usec microseconds.
1007*4882a593Smuzhiyun  * Returns 0 when the mask bits have the value done.
1008*4882a593Smuzhiyun  * Returns -ETIMEDOUT if this condition is not true after
1009*4882a593Smuzhiyun  * wait_usec microseconds have passed.
1010*4882a593Smuzhiyun  */
handshake(void __iomem * ptr,u32 mask,u32 done,int wait_usec,int delay_usec)1011*4882a593Smuzhiyun static int handshake(void __iomem *ptr, u32 mask, u32 done,
1012*4882a593Smuzhiyun 		int wait_usec, int delay_usec)
1013*4882a593Smuzhiyun {
1014*4882a593Smuzhiyun 	u32	result;
1015*4882a593Smuzhiyun 
1016*4882a593Smuzhiyun 	return readl_poll_timeout_atomic(ptr, result,
1017*4882a593Smuzhiyun 					 ((result & mask) == done),
1018*4882a593Smuzhiyun 					 delay_usec, wait_usec);
1019*4882a593Smuzhiyun }
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun /*
1022*4882a593Smuzhiyun  * Intel's Panther Point chipset has two host controllers (EHCI and xHCI) that
1023*4882a593Smuzhiyun  * share some number of ports.  These ports can be switched between either
1024*4882a593Smuzhiyun  * controller.  Not all of the ports under the EHCI host controller may be
1025*4882a593Smuzhiyun  * switchable.
1026*4882a593Smuzhiyun  *
1027*4882a593Smuzhiyun  * The ports should be switched over to xHCI before PCI probes for any device
1028*4882a593Smuzhiyun  * start.  This avoids active devices under EHCI being disconnected during the
1029*4882a593Smuzhiyun  * port switchover, which could cause loss of data on USB storage devices, or
1030*4882a593Smuzhiyun  * failed boot when the root file system is on a USB mass storage device and is
1031*4882a593Smuzhiyun  * enumerated under EHCI first.
1032*4882a593Smuzhiyun  *
1033*4882a593Smuzhiyun  * We write into the xHC's PCI configuration space in some Intel-specific
1034*4882a593Smuzhiyun  * registers to switch the ports over.  The USB 3.0 terminations and the USB
1035*4882a593Smuzhiyun  * 2.0 data wires are switched separately.  We want to enable the SuperSpeed
1036*4882a593Smuzhiyun  * terminations before switching the USB 2.0 wires over, so that USB 3.0
1037*4882a593Smuzhiyun  * devices connect at SuperSpeed, rather than at USB 2.0 speeds.
1038*4882a593Smuzhiyun  */
usb_enable_intel_xhci_ports(struct pci_dev * xhci_pdev)1039*4882a593Smuzhiyun void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev)
1040*4882a593Smuzhiyun {
1041*4882a593Smuzhiyun 	u32		ports_available;
1042*4882a593Smuzhiyun 	bool		ehci_found = false;
1043*4882a593Smuzhiyun 	struct pci_dev	*companion = NULL;
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 	/* Sony VAIO t-series with subsystem device ID 90a8 is not capable of
1046*4882a593Smuzhiyun 	 * switching ports from EHCI to xHCI
1047*4882a593Smuzhiyun 	 */
1048*4882a593Smuzhiyun 	if (xhci_pdev->subsystem_vendor == PCI_VENDOR_ID_SONY &&
1049*4882a593Smuzhiyun 	    xhci_pdev->subsystem_device == 0x90a8)
1050*4882a593Smuzhiyun 		return;
1051*4882a593Smuzhiyun 
1052*4882a593Smuzhiyun 	/* make sure an intel EHCI controller exists */
1053*4882a593Smuzhiyun 	for_each_pci_dev(companion) {
1054*4882a593Smuzhiyun 		if (companion->class == PCI_CLASS_SERIAL_USB_EHCI &&
1055*4882a593Smuzhiyun 		    companion->vendor == PCI_VENDOR_ID_INTEL) {
1056*4882a593Smuzhiyun 			ehci_found = true;
1057*4882a593Smuzhiyun 			break;
1058*4882a593Smuzhiyun 		}
1059*4882a593Smuzhiyun 	}
1060*4882a593Smuzhiyun 
1061*4882a593Smuzhiyun 	if (!ehci_found)
1062*4882a593Smuzhiyun 		return;
1063*4882a593Smuzhiyun 
1064*4882a593Smuzhiyun 	/* Don't switchover the ports if the user hasn't compiled the xHCI
1065*4882a593Smuzhiyun 	 * driver.  Otherwise they will see "dead" USB ports that don't power
1066*4882a593Smuzhiyun 	 * the devices.
1067*4882a593Smuzhiyun 	 */
1068*4882a593Smuzhiyun 	if (!IS_ENABLED(CONFIG_USB_XHCI_HCD)) {
1069*4882a593Smuzhiyun 		dev_warn(&xhci_pdev->dev,
1070*4882a593Smuzhiyun 			 "CONFIG_USB_XHCI_HCD is turned off, defaulting to EHCI.\n");
1071*4882a593Smuzhiyun 		dev_warn(&xhci_pdev->dev,
1072*4882a593Smuzhiyun 				"USB 3.0 devices will work at USB 2.0 speeds.\n");
1073*4882a593Smuzhiyun 		usb_disable_xhci_ports(xhci_pdev);
1074*4882a593Smuzhiyun 		return;
1075*4882a593Smuzhiyun 	}
1076*4882a593Smuzhiyun 
1077*4882a593Smuzhiyun 	/* Read USB3PRM, the USB 3.0 Port Routing Mask Register
1078*4882a593Smuzhiyun 	 * Indicate the ports that can be changed from OS.
1079*4882a593Smuzhiyun 	 */
1080*4882a593Smuzhiyun 	pci_read_config_dword(xhci_pdev, USB_INTEL_USB3PRM,
1081*4882a593Smuzhiyun 			&ports_available);
1082*4882a593Smuzhiyun 
1083*4882a593Smuzhiyun 	dev_dbg(&xhci_pdev->dev, "Configurable ports to enable SuperSpeed: 0x%x\n",
1084*4882a593Smuzhiyun 			ports_available);
1085*4882a593Smuzhiyun 
1086*4882a593Smuzhiyun 	/* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable
1087*4882a593Smuzhiyun 	 * Register, to turn on SuperSpeed terminations for the
1088*4882a593Smuzhiyun 	 * switchable ports.
1089*4882a593Smuzhiyun 	 */
1090*4882a593Smuzhiyun 	pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
1091*4882a593Smuzhiyun 			ports_available);
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun 	pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
1094*4882a593Smuzhiyun 			&ports_available);
1095*4882a593Smuzhiyun 	dev_dbg(&xhci_pdev->dev,
1096*4882a593Smuzhiyun 		"USB 3.0 ports that are now enabled under xHCI: 0x%x\n",
1097*4882a593Smuzhiyun 		ports_available);
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun 	/* Read XUSB2PRM, xHCI USB 2.0 Port Routing Mask Register
1100*4882a593Smuzhiyun 	 * Indicate the USB 2.0 ports to be controlled by the xHCI host.
1101*4882a593Smuzhiyun 	 */
1102*4882a593Smuzhiyun 
1103*4882a593Smuzhiyun 	pci_read_config_dword(xhci_pdev, USB_INTEL_USB2PRM,
1104*4882a593Smuzhiyun 			&ports_available);
1105*4882a593Smuzhiyun 
1106*4882a593Smuzhiyun 	dev_dbg(&xhci_pdev->dev, "Configurable USB 2.0 ports to hand over to xCHI: 0x%x\n",
1107*4882a593Smuzhiyun 			ports_available);
1108*4882a593Smuzhiyun 
1109*4882a593Smuzhiyun 	/* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to
1110*4882a593Smuzhiyun 	 * switch the USB 2.0 power and data lines over to the xHCI
1111*4882a593Smuzhiyun 	 * host.
1112*4882a593Smuzhiyun 	 */
1113*4882a593Smuzhiyun 	pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
1114*4882a593Smuzhiyun 			ports_available);
1115*4882a593Smuzhiyun 
1116*4882a593Smuzhiyun 	pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
1117*4882a593Smuzhiyun 			&ports_available);
1118*4882a593Smuzhiyun 	dev_dbg(&xhci_pdev->dev,
1119*4882a593Smuzhiyun 		"USB 2.0 ports that are now switched over to xHCI: 0x%x\n",
1120*4882a593Smuzhiyun 		ports_available);
1121*4882a593Smuzhiyun }
1122*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_enable_intel_xhci_ports);
1123*4882a593Smuzhiyun 
usb_disable_xhci_ports(struct pci_dev * xhci_pdev)1124*4882a593Smuzhiyun void usb_disable_xhci_ports(struct pci_dev *xhci_pdev)
1125*4882a593Smuzhiyun {
1126*4882a593Smuzhiyun 	pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 0x0);
1127*4882a593Smuzhiyun 	pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 0x0);
1128*4882a593Smuzhiyun }
1129*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_disable_xhci_ports);
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun /*
1132*4882a593Smuzhiyun  * PCI Quirks for xHCI.
1133*4882a593Smuzhiyun  *
1134*4882a593Smuzhiyun  * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS.
1135*4882a593Smuzhiyun  * It signals to the BIOS that the OS wants control of the host controller,
1136*4882a593Smuzhiyun  * and then waits 1 second for the BIOS to hand over control.
1137*4882a593Smuzhiyun  * If we timeout, assume the BIOS is broken and take control anyway.
1138*4882a593Smuzhiyun  */
quirk_usb_handoff_xhci(struct pci_dev * pdev)1139*4882a593Smuzhiyun static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
1140*4882a593Smuzhiyun {
1141*4882a593Smuzhiyun 	void __iomem *base;
1142*4882a593Smuzhiyun 	int ext_cap_offset;
1143*4882a593Smuzhiyun 	void __iomem *op_reg_base;
1144*4882a593Smuzhiyun 	u32 val;
1145*4882a593Smuzhiyun 	int timeout;
1146*4882a593Smuzhiyun 	int len = pci_resource_len(pdev, 0);
1147*4882a593Smuzhiyun 
1148*4882a593Smuzhiyun 	if (!mmio_resource_enabled(pdev, 0))
1149*4882a593Smuzhiyun 		return;
1150*4882a593Smuzhiyun 
1151*4882a593Smuzhiyun 	base = ioremap(pci_resource_start(pdev, 0), len);
1152*4882a593Smuzhiyun 	if (base == NULL)
1153*4882a593Smuzhiyun 		return;
1154*4882a593Smuzhiyun 
1155*4882a593Smuzhiyun 	/*
1156*4882a593Smuzhiyun 	 * Find the Legacy Support Capability register -
1157*4882a593Smuzhiyun 	 * this is optional for xHCI host controllers.
1158*4882a593Smuzhiyun 	 */
1159*4882a593Smuzhiyun 	ext_cap_offset = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_LEGACY);
1160*4882a593Smuzhiyun 
1161*4882a593Smuzhiyun 	if (!ext_cap_offset)
1162*4882a593Smuzhiyun 		goto hc_init;
1163*4882a593Smuzhiyun 
1164*4882a593Smuzhiyun 	if ((ext_cap_offset + sizeof(val)) > len) {
1165*4882a593Smuzhiyun 		/* We're reading garbage from the controller */
1166*4882a593Smuzhiyun 		dev_warn(&pdev->dev, "xHCI controller failing to respond");
1167*4882a593Smuzhiyun 		goto iounmap;
1168*4882a593Smuzhiyun 	}
1169*4882a593Smuzhiyun 	val = readl(base + ext_cap_offset);
1170*4882a593Smuzhiyun 
1171*4882a593Smuzhiyun 	/* Auto handoff never worked for these devices. Force it and continue */
1172*4882a593Smuzhiyun 	if ((pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) ||
1173*4882a593Smuzhiyun 			(pdev->vendor == PCI_VENDOR_ID_RENESAS
1174*4882a593Smuzhiyun 			 && pdev->device == 0x0014)) {
1175*4882a593Smuzhiyun 		val = (val | XHCI_HC_OS_OWNED) & ~XHCI_HC_BIOS_OWNED;
1176*4882a593Smuzhiyun 		writel(val, base + ext_cap_offset);
1177*4882a593Smuzhiyun 	}
1178*4882a593Smuzhiyun 
1179*4882a593Smuzhiyun 	/* If the BIOS owns the HC, signal that the OS wants it, and wait */
1180*4882a593Smuzhiyun 	if (val & XHCI_HC_BIOS_OWNED) {
1181*4882a593Smuzhiyun 		writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
1182*4882a593Smuzhiyun 
1183*4882a593Smuzhiyun 		/* Wait for 1 second with 10 microsecond polling interval */
1184*4882a593Smuzhiyun 		timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
1185*4882a593Smuzhiyun 				0, 1000000, 10);
1186*4882a593Smuzhiyun 
1187*4882a593Smuzhiyun 		/* Assume a buggy BIOS and take HC ownership anyway */
1188*4882a593Smuzhiyun 		if (timeout) {
1189*4882a593Smuzhiyun 			dev_warn(&pdev->dev,
1190*4882a593Smuzhiyun 				 "xHCI BIOS handoff failed (BIOS bug ?) %08x\n",
1191*4882a593Smuzhiyun 				 val);
1192*4882a593Smuzhiyun 			writel(val & ~XHCI_HC_BIOS_OWNED, base + ext_cap_offset);
1193*4882a593Smuzhiyun 		}
1194*4882a593Smuzhiyun 	}
1195*4882a593Smuzhiyun 
1196*4882a593Smuzhiyun 	val = readl(base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
1197*4882a593Smuzhiyun 	/* Mask off (turn off) any enabled SMIs */
1198*4882a593Smuzhiyun 	val &= XHCI_LEGACY_DISABLE_SMI;
1199*4882a593Smuzhiyun 	/* Mask all SMI events bits, RW1C */
1200*4882a593Smuzhiyun 	val |= XHCI_LEGACY_SMI_EVENTS;
1201*4882a593Smuzhiyun 	/* Disable any BIOS SMIs and clear all SMI events*/
1202*4882a593Smuzhiyun 	writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
1203*4882a593Smuzhiyun 
1204*4882a593Smuzhiyun hc_init:
1205*4882a593Smuzhiyun 	if (pdev->vendor == PCI_VENDOR_ID_INTEL)
1206*4882a593Smuzhiyun 		usb_enable_intel_xhci_ports(pdev);
1207*4882a593Smuzhiyun 
1208*4882a593Smuzhiyun 	op_reg_base = base + XHCI_HC_LENGTH(readl(base));
1209*4882a593Smuzhiyun 
1210*4882a593Smuzhiyun 	/* Wait for the host controller to be ready before writing any
1211*4882a593Smuzhiyun 	 * operational or runtime registers.  Wait 5 seconds and no more.
1212*4882a593Smuzhiyun 	 */
1213*4882a593Smuzhiyun 	timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0,
1214*4882a593Smuzhiyun 			5000000, 10);
1215*4882a593Smuzhiyun 	/* Assume a buggy HC and start HC initialization anyway */
1216*4882a593Smuzhiyun 	if (timeout) {
1217*4882a593Smuzhiyun 		val = readl(op_reg_base + XHCI_STS_OFFSET);
1218*4882a593Smuzhiyun 		dev_warn(&pdev->dev,
1219*4882a593Smuzhiyun 			 "xHCI HW not ready after 5 sec (HC bug?) status = 0x%x\n",
1220*4882a593Smuzhiyun 			 val);
1221*4882a593Smuzhiyun 	}
1222*4882a593Smuzhiyun 
1223*4882a593Smuzhiyun 	/* Send the halt and disable interrupts command */
1224*4882a593Smuzhiyun 	val = readl(op_reg_base + XHCI_CMD_OFFSET);
1225*4882a593Smuzhiyun 	val &= ~(XHCI_CMD_RUN | XHCI_IRQS);
1226*4882a593Smuzhiyun 	writel(val, op_reg_base + XHCI_CMD_OFFSET);
1227*4882a593Smuzhiyun 
1228*4882a593Smuzhiyun 	/* Wait for the HC to halt - poll every 125 usec (one microframe). */
1229*4882a593Smuzhiyun 	timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_HALT, 1,
1230*4882a593Smuzhiyun 			XHCI_MAX_HALT_USEC, 125);
1231*4882a593Smuzhiyun 	if (timeout) {
1232*4882a593Smuzhiyun 		val = readl(op_reg_base + XHCI_STS_OFFSET);
1233*4882a593Smuzhiyun 		dev_warn(&pdev->dev,
1234*4882a593Smuzhiyun 			 "xHCI HW did not halt within %d usec status = 0x%x\n",
1235*4882a593Smuzhiyun 			 XHCI_MAX_HALT_USEC, val);
1236*4882a593Smuzhiyun 	}
1237*4882a593Smuzhiyun 
1238*4882a593Smuzhiyun iounmap:
1239*4882a593Smuzhiyun 	iounmap(base);
1240*4882a593Smuzhiyun }
1241*4882a593Smuzhiyun 
quirk_usb_early_handoff(struct pci_dev * pdev)1242*4882a593Smuzhiyun static void quirk_usb_early_handoff(struct pci_dev *pdev)
1243*4882a593Smuzhiyun {
1244*4882a593Smuzhiyun 	struct device_node *parent;
1245*4882a593Smuzhiyun 	bool is_rpi;
1246*4882a593Smuzhiyun 
1247*4882a593Smuzhiyun 	/* Skip Netlogic mips SoC's internal PCI USB controller.
1248*4882a593Smuzhiyun 	 * This device does not need/support EHCI/OHCI handoff
1249*4882a593Smuzhiyun 	 */
1250*4882a593Smuzhiyun 	if (pdev->vendor == 0x184e)	/* vendor Netlogic */
1251*4882a593Smuzhiyun 		return;
1252*4882a593Smuzhiyun 
1253*4882a593Smuzhiyun 	/*
1254*4882a593Smuzhiyun 	 * Bypass the Raspberry Pi 4 controller xHCI controller, things are
1255*4882a593Smuzhiyun 	 * taken care of by the board's co-processor.
1256*4882a593Smuzhiyun 	 */
1257*4882a593Smuzhiyun 	if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483) {
1258*4882a593Smuzhiyun 		parent = of_get_parent(pdev->bus->dev.of_node);
1259*4882a593Smuzhiyun 		is_rpi = of_device_is_compatible(parent, "brcm,bcm2711-pcie");
1260*4882a593Smuzhiyun 		of_node_put(parent);
1261*4882a593Smuzhiyun 		if (is_rpi)
1262*4882a593Smuzhiyun 			return;
1263*4882a593Smuzhiyun 	}
1264*4882a593Smuzhiyun 
1265*4882a593Smuzhiyun 	if (pdev->class != PCI_CLASS_SERIAL_USB_UHCI &&
1266*4882a593Smuzhiyun 			pdev->class != PCI_CLASS_SERIAL_USB_OHCI &&
1267*4882a593Smuzhiyun 			pdev->class != PCI_CLASS_SERIAL_USB_EHCI &&
1268*4882a593Smuzhiyun 			pdev->class != PCI_CLASS_SERIAL_USB_XHCI)
1269*4882a593Smuzhiyun 		return;
1270*4882a593Smuzhiyun 
1271*4882a593Smuzhiyun 	if (pci_enable_device(pdev) < 0) {
1272*4882a593Smuzhiyun 		dev_warn(&pdev->dev,
1273*4882a593Smuzhiyun 			 "Can't enable PCI device, BIOS handoff failed.\n");
1274*4882a593Smuzhiyun 		return;
1275*4882a593Smuzhiyun 	}
1276*4882a593Smuzhiyun 	if (pdev->class == PCI_CLASS_SERIAL_USB_UHCI)
1277*4882a593Smuzhiyun 		quirk_usb_handoff_uhci(pdev);
1278*4882a593Smuzhiyun 	else if (pdev->class == PCI_CLASS_SERIAL_USB_OHCI)
1279*4882a593Smuzhiyun 		quirk_usb_handoff_ohci(pdev);
1280*4882a593Smuzhiyun 	else if (pdev->class == PCI_CLASS_SERIAL_USB_EHCI)
1281*4882a593Smuzhiyun 		quirk_usb_disable_ehci(pdev);
1282*4882a593Smuzhiyun 	else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI)
1283*4882a593Smuzhiyun 		quirk_usb_handoff_xhci(pdev);
1284*4882a593Smuzhiyun 	pci_disable_device(pdev);
1285*4882a593Smuzhiyun }
1286*4882a593Smuzhiyun DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
1287*4882a593Smuzhiyun 			PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff);
1288