1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * USB HOST XHCI Controller
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Based on xHCI host controller driver in linux-kernel
5*4882a593Smuzhiyun * by Sarah Sharp.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright (C) 2008 Intel Corp.
8*4882a593Smuzhiyun * Author: Sarah Sharp
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Copyright (C) 2013 Samsung Electronics Co.Ltd
11*4882a593Smuzhiyun * Authors: Vivek Gautam <gautam.vivek@samsung.com>
12*4882a593Smuzhiyun * Vikas Sajjan <vikas.sajjan@samsung.com>
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * SPDX-License-Identifier: GPL-2.0+
15*4882a593Smuzhiyun */
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #ifndef HOST_XHCI_H_
18*4882a593Smuzhiyun #define HOST_XHCI_H_
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #include <asm/types.h>
21*4882a593Smuzhiyun #include <asm/cache.h>
22*4882a593Smuzhiyun #include <asm/io.h>
23*4882a593Smuzhiyun #include <linux/list.h>
24*4882a593Smuzhiyun #include <linux/compat.h>
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #define MAX_EP_CTX_NUM 31
27*4882a593Smuzhiyun #define XHCI_ALIGNMENT 64
28*4882a593Smuzhiyun /* Generic timeout for XHCI events */
29*4882a593Smuzhiyun #define XHCI_TIMEOUT 5000
30*4882a593Smuzhiyun /* Max number of USB devices for any host controller - limit in section 6.1 */
31*4882a593Smuzhiyun #define MAX_HC_SLOTS 256
32*4882a593Smuzhiyun /* Section 5.3.3 - MaxPorts */
33*4882a593Smuzhiyun #define MAX_HC_PORTS 255
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun /* Up to 16 ms to halt an HC */
36*4882a593Smuzhiyun #define XHCI_MAX_HALT_USEC (16*1000)
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #define XHCI_MAX_RESET_USEC (250*1000)
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /*
41*4882a593Smuzhiyun * These bits are Read Only (RO) and should be saved and written to the
42*4882a593Smuzhiyun * registers: 0, 3, 10:13, 30
43*4882a593Smuzhiyun * connect status, over-current status, port speed, and device removable.
44*4882a593Smuzhiyun * connect status and port speed are also sticky - meaning they're in
45*4882a593Smuzhiyun * the AUX well and they aren't changed by a hot, warm, or cold reset.
46*4882a593Smuzhiyun */
47*4882a593Smuzhiyun #define XHCI_PORT_RO ((1 << 0) | (1 << 3) | (0xf << 10) | (1 << 30))
48*4882a593Smuzhiyun /*
49*4882a593Smuzhiyun * These bits are RW; writing a 0 clears the bit, writing a 1 sets the bit:
50*4882a593Smuzhiyun * bits 5:8, 9, 14:15, 25:27
51*4882a593Smuzhiyun * link state, port power, port indicator state, "wake on" enable state
52*4882a593Smuzhiyun */
53*4882a593Smuzhiyun #define XHCI_PORT_RWS ((0xf << 5) | (1 << 9) | (0x3 << 14) | (0x7 << 25))
54*4882a593Smuzhiyun /*
55*4882a593Smuzhiyun * These bits are RW; writing a 1 sets the bit, writing a 0 has no effect:
56*4882a593Smuzhiyun * bit 4 (port reset)
57*4882a593Smuzhiyun */
58*4882a593Smuzhiyun #define XHCI_PORT_RW1S ((1 << 4))
59*4882a593Smuzhiyun /*
60*4882a593Smuzhiyun * These bits are RW; writing a 1 clears the bit, writing a 0 has no effect:
61*4882a593Smuzhiyun * bits 1, 17, 18, 19, 20, 21, 22, 23
62*4882a593Smuzhiyun * port enable/disable, and
63*4882a593Smuzhiyun * change bits: connect, PED,
64*4882a593Smuzhiyun * warm port reset changed (reserved zero for USB 2.0 ports),
65*4882a593Smuzhiyun * over-current, reset, link state, and L1 change
66*4882a593Smuzhiyun */
67*4882a593Smuzhiyun #define XHCI_PORT_RW1CS ((1 << 1) | (0x7f << 17))
68*4882a593Smuzhiyun /*
69*4882a593Smuzhiyun * Bit 16 is RW, and writing a '1' to it causes the link state control to be
70*4882a593Smuzhiyun * latched in
71*4882a593Smuzhiyun */
72*4882a593Smuzhiyun #define XHCI_PORT_RW ((1 << 16))
73*4882a593Smuzhiyun /*
74*4882a593Smuzhiyun * These bits are Reserved Zero (RsvdZ) and zero should be written to them:
75*4882a593Smuzhiyun * bits 2, 24, 28:31
76*4882a593Smuzhiyun */
77*4882a593Smuzhiyun #define XHCI_PORT_RZ ((1 << 2) | (1 << 24) | (0xf << 28))
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /*
80*4882a593Smuzhiyun * XHCI Register Space.
81*4882a593Smuzhiyun */
82*4882a593Smuzhiyun struct xhci_hccr {
83*4882a593Smuzhiyun uint32_t cr_capbase;
84*4882a593Smuzhiyun uint32_t cr_hcsparams1;
85*4882a593Smuzhiyun uint32_t cr_hcsparams2;
86*4882a593Smuzhiyun uint32_t cr_hcsparams3;
87*4882a593Smuzhiyun uint32_t cr_hccparams;
88*4882a593Smuzhiyun uint32_t cr_dboff;
89*4882a593Smuzhiyun uint32_t cr_rtsoff;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun /* hc_capbase bitmasks */
92*4882a593Smuzhiyun /* bits 7:0 - how long is the Capabilities register */
93*4882a593Smuzhiyun #define HC_LENGTH(p) XHCI_HC_LENGTH(p)
94*4882a593Smuzhiyun /* bits 31:16 */
95*4882a593Smuzhiyun #define HC_VERSION(p) (((p) >> 16) & 0xffff)
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun /* HCSPARAMS1 - hcs_params1 - bitmasks */
98*4882a593Smuzhiyun /* bits 0:7, Max Device Slots */
99*4882a593Smuzhiyun #define HCS_MAX_SLOTS(p) (((p) >> 0) & 0xff)
100*4882a593Smuzhiyun #define HCS_SLOTS_MASK 0xff
101*4882a593Smuzhiyun /* bits 8:18, Max Interrupters */
102*4882a593Smuzhiyun #define HCS_MAX_INTRS(p) (((p) >> 8) & 0x7ff)
103*4882a593Smuzhiyun /* bits 24:31, Max Ports - max value is 0x7F = 127 ports */
104*4882a593Smuzhiyun #define HCS_MAX_PORTS_SHIFT 24
105*4882a593Smuzhiyun #define HCS_MAX_PORTS_MASK (0xff << HCS_MAX_PORTS_SHIFT)
106*4882a593Smuzhiyun #define HCS_MAX_PORTS(p) (((p) >> 24) & 0xff)
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun /* HCSPARAMS2 - hcs_params2 - bitmasks */
109*4882a593Smuzhiyun /* bits 0:3, frames or uframes that SW needs to queue transactions
110*4882a593Smuzhiyun * ahead of the HW to meet periodic deadlines */
111*4882a593Smuzhiyun #define HCS_IST(p) (((p) >> 0) & 0xf)
112*4882a593Smuzhiyun /* bits 4:7, max number of Event Ring segments */
113*4882a593Smuzhiyun #define HCS_ERST_MAX(p) (((p) >> 4) & 0xf)
114*4882a593Smuzhiyun /* bits 21:25 Hi 5 bits of Scratchpad buffers SW must allocate for the HW */
115*4882a593Smuzhiyun /* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
116*4882a593Smuzhiyun /* bits 27:31 Lo 5 bits of Scratchpad buffers SW must allocate for the HW */
117*4882a593Smuzhiyun #define HCS_MAX_SCRATCHPAD(p) ((((p) >> 16) & 0x3e0) | (((p) >> 27) & 0x1f))
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /* HCSPARAMS3 - hcs_params3 - bitmasks */
120*4882a593Smuzhiyun /* bits 0:7, Max U1 to U0 latency for the roothub ports */
121*4882a593Smuzhiyun #define HCS_U1_LATENCY(p) (((p) >> 0) & 0xff)
122*4882a593Smuzhiyun /* bits 16:31, Max U2 to U0 latency for the roothub ports */
123*4882a593Smuzhiyun #define HCS_U2_LATENCY(p) (((p) >> 16) & 0xffff)
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun /* HCCPARAMS - hcc_params - bitmasks */
126*4882a593Smuzhiyun /* true: HC can use 64-bit address pointers */
127*4882a593Smuzhiyun #define HCC_64BIT_ADDR(p) ((p) & (1 << 0))
128*4882a593Smuzhiyun /* true: HC can do bandwidth negotiation */
129*4882a593Smuzhiyun #define HCC_BANDWIDTH_NEG(p) ((p) & (1 << 1))
130*4882a593Smuzhiyun /* true: HC uses 64-byte Device Context structures
131*4882a593Smuzhiyun * FIXME 64-byte context structures aren't supported yet.
132*4882a593Smuzhiyun */
133*4882a593Smuzhiyun #define HCC_64BYTE_CONTEXT(p) ((p) & (1 << 2))
134*4882a593Smuzhiyun /* true: HC has port power switches */
135*4882a593Smuzhiyun #define HCC_PPC(p) ((p) & (1 << 3))
136*4882a593Smuzhiyun /* true: HC has port indicators */
137*4882a593Smuzhiyun #define HCS_INDICATOR(p) ((p) & (1 << 4))
138*4882a593Smuzhiyun /* true: HC has Light HC Reset Capability */
139*4882a593Smuzhiyun #define HCC_LIGHT_RESET(p) ((p) & (1 << 5))
140*4882a593Smuzhiyun /* true: HC supports latency tolerance messaging */
141*4882a593Smuzhiyun #define HCC_LTC(p) ((p) & (1 << 6))
142*4882a593Smuzhiyun /* true: no secondary Stream ID Support */
143*4882a593Smuzhiyun #define HCC_NSS(p) ((p) & (1 << 7))
144*4882a593Smuzhiyun /* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15 */
145*4882a593Smuzhiyun #define HCC_MAX_PSA(p) (1 << ((((p) >> 12) & 0xf) + 1))
146*4882a593Smuzhiyun /* Extended Capabilities pointer from PCI base - section 5.3.6 */
147*4882a593Smuzhiyun #define HCC_EXT_CAPS(p) XHCI_HCC_EXT_CAPS(p)
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun /* db_off bitmask - bits 0:1 reserved */
150*4882a593Smuzhiyun #define DBOFF_MASK (~0x3)
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun /* run_regs_off bitmask - bits 0:4 reserved */
153*4882a593Smuzhiyun #define RTSOFF_MASK (~0x1f)
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun };
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun struct xhci_hcor_port_regs {
158*4882a593Smuzhiyun volatile uint32_t or_portsc;
159*4882a593Smuzhiyun volatile uint32_t or_portpmsc;
160*4882a593Smuzhiyun volatile uint32_t or_portli;
161*4882a593Smuzhiyun volatile uint32_t reserved_3;
162*4882a593Smuzhiyun };
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun struct xhci_hcor {
165*4882a593Smuzhiyun volatile uint32_t or_usbcmd;
166*4882a593Smuzhiyun volatile uint32_t or_usbsts;
167*4882a593Smuzhiyun volatile uint32_t or_pagesize;
168*4882a593Smuzhiyun volatile uint32_t reserved_0[2];
169*4882a593Smuzhiyun volatile uint32_t or_dnctrl;
170*4882a593Smuzhiyun volatile uint64_t or_crcr;
171*4882a593Smuzhiyun volatile uint32_t reserved_1[4];
172*4882a593Smuzhiyun volatile uint64_t or_dcbaap;
173*4882a593Smuzhiyun volatile uint32_t or_config;
174*4882a593Smuzhiyun volatile uint32_t reserved_2[241];
175*4882a593Smuzhiyun struct xhci_hcor_port_regs portregs[MAX_HC_PORTS];
176*4882a593Smuzhiyun };
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun /* USBCMD - USB command - command bitmasks */
179*4882a593Smuzhiyun /* start/stop HC execution - do not write unless HC is halted*/
180*4882a593Smuzhiyun #define CMD_RUN XHCI_CMD_RUN
181*4882a593Smuzhiyun /* Reset HC - resets internal HC state machine and all registers (except
182*4882a593Smuzhiyun * PCI config regs). HC does NOT drive a USB reset on the downstream ports.
183*4882a593Smuzhiyun * The xHCI driver must reinitialize the xHC after setting this bit.
184*4882a593Smuzhiyun */
185*4882a593Smuzhiyun #define CMD_RESET (1 << 1)
186*4882a593Smuzhiyun /* Event Interrupt Enable - a '1' allows interrupts from the host controller */
187*4882a593Smuzhiyun #define CMD_EIE XHCI_CMD_EIE
188*4882a593Smuzhiyun /* Host System Error Interrupt Enable - get out-of-band signal for HC errors */
189*4882a593Smuzhiyun #define CMD_HSEIE XHCI_CMD_HSEIE
190*4882a593Smuzhiyun /* bits 4:6 are reserved (and should be preserved on writes). */
191*4882a593Smuzhiyun /* light reset (port status stays unchanged) - reset completed when this is 0 */
192*4882a593Smuzhiyun #define CMD_LRESET (1 << 7)
193*4882a593Smuzhiyun /* host controller save/restore state. */
194*4882a593Smuzhiyun #define CMD_CSS (1 << 8)
195*4882a593Smuzhiyun #define CMD_CRS (1 << 9)
196*4882a593Smuzhiyun /* Enable Wrap Event - '1' means xHC generates an event when MFINDEX wraps. */
197*4882a593Smuzhiyun #define CMD_EWE XHCI_CMD_EWE
198*4882a593Smuzhiyun /* MFINDEX power management - '1' means xHC can stop MFINDEX counter if all root
199*4882a593Smuzhiyun * hubs are in U3 (selective suspend), disconnect, disabled, or powered-off.
200*4882a593Smuzhiyun * '0' means the xHC can power it off if all ports are in the disconnect,
201*4882a593Smuzhiyun * disabled, or powered-off state.
202*4882a593Smuzhiyun */
203*4882a593Smuzhiyun #define CMD_PM_INDEX (1 << 11)
204*4882a593Smuzhiyun /* bits 12:31 are reserved (and should be preserved on writes). */
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun /* USBSTS - USB status - status bitmasks */
207*4882a593Smuzhiyun /* HC not running - set to 1 when run/stop bit is cleared. */
208*4882a593Smuzhiyun #define STS_HALT XHCI_STS_HALT
209*4882a593Smuzhiyun /* serious error, e.g. PCI parity error. The HC will clear the run/stop bit. */
210*4882a593Smuzhiyun #define STS_FATAL (1 << 2)
211*4882a593Smuzhiyun /* event interrupt - clear this prior to clearing any IP flags in IR set*/
212*4882a593Smuzhiyun #define STS_EINT (1 << 3)
213*4882a593Smuzhiyun /* port change detect */
214*4882a593Smuzhiyun #define STS_PORT (1 << 4)
215*4882a593Smuzhiyun /* bits 5:7 reserved and zeroed */
216*4882a593Smuzhiyun /* save state status - '1' means xHC is saving state */
217*4882a593Smuzhiyun #define STS_SAVE (1 << 8)
218*4882a593Smuzhiyun /* restore state status - '1' means xHC is restoring state */
219*4882a593Smuzhiyun #define STS_RESTORE (1 << 9)
220*4882a593Smuzhiyun /* true: save or restore error */
221*4882a593Smuzhiyun #define STS_SRE (1 << 10)
222*4882a593Smuzhiyun /* true: Controller Not Ready to accept doorbell or op reg writes after reset */
223*4882a593Smuzhiyun #define STS_CNR XHCI_STS_CNR
224*4882a593Smuzhiyun /* true: internal Host Controller Error - SW needs to reset and reinitialize */
225*4882a593Smuzhiyun #define STS_HCE (1 << 12)
226*4882a593Smuzhiyun /* bits 13:31 reserved and should be preserved */
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun /*
229*4882a593Smuzhiyun * DNCTRL - Device Notification Control Register - dev_notification bitmasks
230*4882a593Smuzhiyun * Generate a device notification event when the HC sees a transaction with a
231*4882a593Smuzhiyun * notification type that matches a bit set in this bit field.
232*4882a593Smuzhiyun */
233*4882a593Smuzhiyun #define DEV_NOTE_MASK (0xffff)
234*4882a593Smuzhiyun #define ENABLE_DEV_NOTE(x) (1 << (x))
235*4882a593Smuzhiyun /* Most of the device notification types should only be used for debug.
236*4882a593Smuzhiyun * SW does need to pay attention to function wake notifications.
237*4882a593Smuzhiyun */
238*4882a593Smuzhiyun #define DEV_NOTE_FWAKE ENABLE_DEV_NOTE(1)
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun /* CRCR - Command Ring Control Register - cmd_ring bitmasks */
241*4882a593Smuzhiyun /* bit 0 is the command ring cycle state */
242*4882a593Smuzhiyun /* stop ring operation after completion of the currently executing command */
243*4882a593Smuzhiyun #define CMD_RING_PAUSE (1 << 1)
244*4882a593Smuzhiyun /* stop ring immediately - abort the currently executing command */
245*4882a593Smuzhiyun #define CMD_RING_ABORT (1 << 2)
246*4882a593Smuzhiyun /* true: command ring is running */
247*4882a593Smuzhiyun #define CMD_RING_RUNNING (1 << 3)
248*4882a593Smuzhiyun /* bits 4:5 reserved and should be preserved */
249*4882a593Smuzhiyun /* Command Ring pointer - bit mask for the lower 32 bits. */
250*4882a593Smuzhiyun #define CMD_RING_RSVD_BITS (0x3f)
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun /* CONFIG - Configure Register - config_reg bitmasks */
253*4882a593Smuzhiyun /* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */
254*4882a593Smuzhiyun #define MAX_DEVS(p) ((p) & 0xff)
255*4882a593Smuzhiyun /* bits 8:31 - reserved and should be preserved */
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun /* PORTSC - Port Status and Control Register - port_status_base bitmasks */
258*4882a593Smuzhiyun /* true: device connected */
259*4882a593Smuzhiyun #define PORT_CONNECT (1 << 0)
260*4882a593Smuzhiyun /* true: port enabled */
261*4882a593Smuzhiyun #define PORT_PE (1 << 1)
262*4882a593Smuzhiyun /* bit 2 reserved and zeroed */
263*4882a593Smuzhiyun /* true: port has an over-current condition */
264*4882a593Smuzhiyun #define PORT_OC (1 << 3)
265*4882a593Smuzhiyun /* true: port reset signaling asserted */
266*4882a593Smuzhiyun #define PORT_RESET (1 << 4)
267*4882a593Smuzhiyun /* Port Link State - bits 5:8
268*4882a593Smuzhiyun * A read gives the current link PM state of the port,
269*4882a593Smuzhiyun * a write with Link State Write Strobe set sets the link state.
270*4882a593Smuzhiyun */
271*4882a593Smuzhiyun #define PORT_PLS_MASK (0xf << 5)
272*4882a593Smuzhiyun #define XDEV_U0 (0x0 << 5)
273*4882a593Smuzhiyun #define XDEV_U2 (0x2 << 5)
274*4882a593Smuzhiyun #define XDEV_U3 (0x3 << 5)
275*4882a593Smuzhiyun #define XDEV_RESUME (0xf << 5)
276*4882a593Smuzhiyun /* true: port has power (see HCC_PPC) */
277*4882a593Smuzhiyun #define PORT_POWER (1 << 9)
278*4882a593Smuzhiyun /* bits 10:13 indicate device speed:
279*4882a593Smuzhiyun * 0 - undefined speed - port hasn't be initialized by a reset yet
280*4882a593Smuzhiyun * 1 - full speed
281*4882a593Smuzhiyun * 2 - low speed
282*4882a593Smuzhiyun * 3 - high speed
283*4882a593Smuzhiyun * 4 - super speed
284*4882a593Smuzhiyun * 5-15 reserved
285*4882a593Smuzhiyun */
286*4882a593Smuzhiyun #define DEV_SPEED_MASK (0xf << 10)
287*4882a593Smuzhiyun #define XDEV_FS (0x1 << 10)
288*4882a593Smuzhiyun #define XDEV_LS (0x2 << 10)
289*4882a593Smuzhiyun #define XDEV_HS (0x3 << 10)
290*4882a593Smuzhiyun #define XDEV_SS (0x4 << 10)
291*4882a593Smuzhiyun #define DEV_UNDEFSPEED(p) (((p) & DEV_SPEED_MASK) == (0x0<<10))
292*4882a593Smuzhiyun #define DEV_FULLSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_FS)
293*4882a593Smuzhiyun #define DEV_LOWSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_LS)
294*4882a593Smuzhiyun #define DEV_HIGHSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_HS)
295*4882a593Smuzhiyun #define DEV_SUPERSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_SS)
296*4882a593Smuzhiyun /* Bits 20:23 in the Slot Context are the speed for the device */
297*4882a593Smuzhiyun #define SLOT_SPEED_FS (XDEV_FS << 10)
298*4882a593Smuzhiyun #define SLOT_SPEED_LS (XDEV_LS << 10)
299*4882a593Smuzhiyun #define SLOT_SPEED_HS (XDEV_HS << 10)
300*4882a593Smuzhiyun #define SLOT_SPEED_SS (XDEV_SS << 10)
301*4882a593Smuzhiyun /* Port Indicator Control */
302*4882a593Smuzhiyun #define PORT_LED_OFF (0 << 14)
303*4882a593Smuzhiyun #define PORT_LED_AMBER (1 << 14)
304*4882a593Smuzhiyun #define PORT_LED_GREEN (2 << 14)
305*4882a593Smuzhiyun #define PORT_LED_MASK (3 << 14)
306*4882a593Smuzhiyun /* Port Link State Write Strobe - set this when changing link state */
307*4882a593Smuzhiyun #define PORT_LINK_STROBE (1 << 16)
308*4882a593Smuzhiyun /* true: connect status change */
309*4882a593Smuzhiyun #define PORT_CSC (1 << 17)
310*4882a593Smuzhiyun /* true: port enable change */
311*4882a593Smuzhiyun #define PORT_PEC (1 << 18)
312*4882a593Smuzhiyun /* true: warm reset for a USB 3.0 device is done. A "hot" reset puts the port
313*4882a593Smuzhiyun * into an enabled state, and the device into the default state. A "warm" reset
314*4882a593Smuzhiyun * also resets the link, forcing the device through the link training sequence.
315*4882a593Smuzhiyun * SW can also look at the Port Reset register to see when warm reset is done.
316*4882a593Smuzhiyun */
317*4882a593Smuzhiyun #define PORT_WRC (1 << 19)
318*4882a593Smuzhiyun /* true: over-current change */
319*4882a593Smuzhiyun #define PORT_OCC (1 << 20)
320*4882a593Smuzhiyun /* true: reset change - 1 to 0 transition of PORT_RESET */
321*4882a593Smuzhiyun #define PORT_RC (1 << 21)
322*4882a593Smuzhiyun /* port link status change - set on some port link state transitions:
323*4882a593Smuzhiyun * Transition Reason
324*4882a593Smuzhiyun * --------------------------------------------------------------------------
325*4882a593Smuzhiyun * - U3 to Resume Wakeup signaling from a device
326*4882a593Smuzhiyun * - Resume to Recovery to U0 USB 3.0 device resume
327*4882a593Smuzhiyun * - Resume to U0 USB 2.0 device resume
328*4882a593Smuzhiyun * - U3 to Recovery to U0 Software resume of USB 3.0 device complete
329*4882a593Smuzhiyun * - U3 to U0 Software resume of USB 2.0 device complete
330*4882a593Smuzhiyun * - U2 to U0 L1 resume of USB 2.1 device complete
331*4882a593Smuzhiyun * - U0 to U0 (???) L1 entry rejection by USB 2.1 device
332*4882a593Smuzhiyun * - U0 to disabled L1 entry error with USB 2.1 device
333*4882a593Smuzhiyun * - Any state to inactive Error on USB 3.0 port
334*4882a593Smuzhiyun */
335*4882a593Smuzhiyun #define PORT_PLC (1 << 22)
336*4882a593Smuzhiyun /* port configure error change - port failed to configure its link partner */
337*4882a593Smuzhiyun #define PORT_CEC (1 << 23)
338*4882a593Smuzhiyun /* bit 24 reserved */
339*4882a593Smuzhiyun /* wake on connect (enable) */
340*4882a593Smuzhiyun #define PORT_WKCONN_E (1 << 25)
341*4882a593Smuzhiyun /* wake on disconnect (enable) */
342*4882a593Smuzhiyun #define PORT_WKDISC_E (1 << 26)
343*4882a593Smuzhiyun /* wake on over-current (enable) */
344*4882a593Smuzhiyun #define PORT_WKOC_E (1 << 27)
345*4882a593Smuzhiyun /* bits 28:29 reserved */
346*4882a593Smuzhiyun /* true: device is removable - for USB 3.0 roothub emulation */
347*4882a593Smuzhiyun #define PORT_DEV_REMOVE (1 << 30)
348*4882a593Smuzhiyun /* Initiate a warm port reset - complete when PORT_WRC is '1' */
349*4882a593Smuzhiyun #define PORT_WR (1 << 31)
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun /* We mark duplicate entries with -1 */
352*4882a593Smuzhiyun #define DUPLICATE_ENTRY ((u8)(-1))
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun /* Port Power Management Status and Control - port_power_base bitmasks */
355*4882a593Smuzhiyun /* Inactivity timer value for transitions into U1, in microseconds.
356*4882a593Smuzhiyun * Timeout can be up to 127us. 0xFF means an infinite timeout.
357*4882a593Smuzhiyun */
358*4882a593Smuzhiyun #define PORT_U1_TIMEOUT(p) ((p) & 0xff)
359*4882a593Smuzhiyun /* Inactivity timer value for transitions into U2 */
360*4882a593Smuzhiyun #define PORT_U2_TIMEOUT(p) (((p) & 0xff) << 8)
361*4882a593Smuzhiyun /* Bits 24:31 for port testing */
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun /* USB2 Protocol PORTSPMSC */
364*4882a593Smuzhiyun #define PORT_L1S_MASK 7
365*4882a593Smuzhiyun #define PORT_L1S_SUCCESS 1
366*4882a593Smuzhiyun #define PORT_RWE (1 << 3)
367*4882a593Smuzhiyun #define PORT_HIRD(p) (((p) & 0xf) << 4)
368*4882a593Smuzhiyun #define PORT_HIRD_MASK (0xf << 4)
369*4882a593Smuzhiyun #define PORT_L1DS(p) (((p) & 0xff) << 8)
370*4882a593Smuzhiyun #define PORT_HLE (1 << 16)
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun /**
373*4882a593Smuzhiyun * struct xhci_intr_reg - Interrupt Register Set
374*4882a593Smuzhiyun * @irq_pending: IMAN - Interrupt Management Register. Used to enable
375*4882a593Smuzhiyun * interrupts and check for pending interrupts.
376*4882a593Smuzhiyun * @irq_control: IMOD - Interrupt Moderation Register.
377*4882a593Smuzhiyun * Used to throttle interrupts.
378*4882a593Smuzhiyun * @erst_size: Number of segments in the
379*4882a593Smuzhiyun Event Ring Segment Table (ERST).
380*4882a593Smuzhiyun * @erst_base: ERST base address.
381*4882a593Smuzhiyun * @erst_dequeue: Event ring dequeue pointer.
382*4882a593Smuzhiyun *
383*4882a593Smuzhiyun * Each interrupter (defined by a MSI-X vector) has an event ring and an Event
384*4882a593Smuzhiyun * Ring Segment Table (ERST) associated with it.
385*4882a593Smuzhiyun * The event ring is comprised of multiple segments of the same size.
386*4882a593Smuzhiyun * The HC places events on the ring and "updates the Cycle bit in the TRBs to
387*4882a593Smuzhiyun * indicate to software the current position of the Enqueue Pointer."
388*4882a593Smuzhiyun * The HCD (Linux) processes those events and updates the dequeue pointer.
389*4882a593Smuzhiyun */
390*4882a593Smuzhiyun struct xhci_intr_reg {
391*4882a593Smuzhiyun volatile __le32 irq_pending;
392*4882a593Smuzhiyun volatile __le32 irq_control;
393*4882a593Smuzhiyun volatile __le32 erst_size;
394*4882a593Smuzhiyun volatile __le32 rsvd;
395*4882a593Smuzhiyun volatile __le64 erst_base;
396*4882a593Smuzhiyun volatile __le64 erst_dequeue;
397*4882a593Smuzhiyun };
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun /* irq_pending bitmasks */
400*4882a593Smuzhiyun #define ER_IRQ_PENDING(p) ((p) & 0x1)
401*4882a593Smuzhiyun /* bits 2:31 need to be preserved */
402*4882a593Smuzhiyun /* THIS IS BUGGY - FIXME - IP IS WRITE 1 TO CLEAR */
403*4882a593Smuzhiyun #define ER_IRQ_CLEAR(p) ((p) & 0xfffffffe)
404*4882a593Smuzhiyun #define ER_IRQ_ENABLE(p) ((ER_IRQ_CLEAR(p)) | 0x2)
405*4882a593Smuzhiyun #define ER_IRQ_DISABLE(p) ((ER_IRQ_CLEAR(p)) & ~(0x2))
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun /* irq_control bitmasks */
408*4882a593Smuzhiyun /* Minimum interval between interrupts (in 250ns intervals). The interval
409*4882a593Smuzhiyun * between interrupts will be longer if there are no events on the event ring.
410*4882a593Smuzhiyun * Default is 4000 (1 ms).
411*4882a593Smuzhiyun */
412*4882a593Smuzhiyun #define ER_IRQ_INTERVAL_MASK (0xffff)
413*4882a593Smuzhiyun /* Counter used to count down the time to the next interrupt - HW use only */
414*4882a593Smuzhiyun #define ER_IRQ_COUNTER_MASK (0xffff << 16)
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun /* erst_size bitmasks */
417*4882a593Smuzhiyun /* Preserve bits 16:31 of erst_size */
418*4882a593Smuzhiyun #define ERST_SIZE_MASK (0xffff << 16)
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun /* erst_dequeue bitmasks */
421*4882a593Smuzhiyun /* Dequeue ERST Segment Index (DESI) - Segment number (or alias)
422*4882a593Smuzhiyun * where the current dequeue pointer lies. This is an optional HW hint.
423*4882a593Smuzhiyun */
424*4882a593Smuzhiyun #define ERST_DESI_MASK (0x7)
425*4882a593Smuzhiyun /* Event Handler Busy (EHB) - is the event ring scheduled to be serviced by
426*4882a593Smuzhiyun * a work queue (or delayed service routine)?
427*4882a593Smuzhiyun */
428*4882a593Smuzhiyun #define ERST_EHB (1 << 3)
429*4882a593Smuzhiyun #define ERST_PTR_MASK (0xf)
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun /**
432*4882a593Smuzhiyun * struct xhci_run_regs
433*4882a593Smuzhiyun * @microframe_index: MFINDEX - current microframe number
434*4882a593Smuzhiyun *
435*4882a593Smuzhiyun * Section 5.5 Host Controller Runtime Registers:
436*4882a593Smuzhiyun * "Software should read and write these registers using only Dword (32 bit)
437*4882a593Smuzhiyun * or larger accesses"
438*4882a593Smuzhiyun */
439*4882a593Smuzhiyun struct xhci_run_regs {
440*4882a593Smuzhiyun __le32 microframe_index;
441*4882a593Smuzhiyun __le32 rsvd[7];
442*4882a593Smuzhiyun struct xhci_intr_reg ir_set[128];
443*4882a593Smuzhiyun };
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun /**
446*4882a593Smuzhiyun * struct doorbell_array
447*4882a593Smuzhiyun *
448*4882a593Smuzhiyun * Bits 0 - 7: Endpoint target
449*4882a593Smuzhiyun * Bits 8 - 15: RsvdZ
450*4882a593Smuzhiyun * Bits 16 - 31: Stream ID
451*4882a593Smuzhiyun *
452*4882a593Smuzhiyun * Section 5.6
453*4882a593Smuzhiyun */
454*4882a593Smuzhiyun struct xhci_doorbell_array {
455*4882a593Smuzhiyun volatile __le32 doorbell[256];
456*4882a593Smuzhiyun };
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun #define DB_VALUE(ep, stream) ((((ep) + 1) & 0xff) | ((stream) << 16))
459*4882a593Smuzhiyun #define DB_VALUE_HOST 0x00000000
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun /**
462*4882a593Smuzhiyun * struct xhci_protocol_caps
463*4882a593Smuzhiyun * @revision: major revision, minor revision, capability ID,
464*4882a593Smuzhiyun * and next capability pointer.
465*4882a593Smuzhiyun * @name_string: Four ASCII characters to say which spec this xHC
466*4882a593Smuzhiyun * follows, typically "USB ".
467*4882a593Smuzhiyun * @port_info: Port offset, count, and protocol-defined information.
468*4882a593Smuzhiyun */
469*4882a593Smuzhiyun struct xhci_protocol_caps {
470*4882a593Smuzhiyun u32 revision;
471*4882a593Smuzhiyun u32 name_string;
472*4882a593Smuzhiyun u32 port_info;
473*4882a593Smuzhiyun };
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun #define XHCI_EXT_PORT_MAJOR(x) (((x) >> 24) & 0xff)
476*4882a593Smuzhiyun #define XHCI_EXT_PORT_OFF(x) ((x) & 0xff)
477*4882a593Smuzhiyun #define XHCI_EXT_PORT_COUNT(x) (((x) >> 8) & 0xff)
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun /**
480*4882a593Smuzhiyun * struct xhci_container_ctx
481*4882a593Smuzhiyun * @type: Type of context. Used to calculated offsets to contained contexts.
482*4882a593Smuzhiyun * @size: Size of the context data
483*4882a593Smuzhiyun * @bytes: The raw context data given to HW
484*4882a593Smuzhiyun *
485*4882a593Smuzhiyun * Represents either a Device or Input context. Holds a pointer to the raw
486*4882a593Smuzhiyun * memory used for the context (bytes).
487*4882a593Smuzhiyun */
488*4882a593Smuzhiyun struct xhci_container_ctx {
489*4882a593Smuzhiyun unsigned type;
490*4882a593Smuzhiyun #define XHCI_CTX_TYPE_DEVICE 0x1
491*4882a593Smuzhiyun #define XHCI_CTX_TYPE_INPUT 0x2
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun int size;
494*4882a593Smuzhiyun u8 *bytes;
495*4882a593Smuzhiyun };
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun /**
498*4882a593Smuzhiyun * struct xhci_slot_ctx
499*4882a593Smuzhiyun * @dev_info: Route string, device speed, hub info, and last valid endpoint
500*4882a593Smuzhiyun * @dev_info2: Max exit latency for device number, root hub port number
501*4882a593Smuzhiyun * @tt_info: tt_info is used to construct split transaction tokens
502*4882a593Smuzhiyun * @dev_state: slot state and device address
503*4882a593Smuzhiyun *
504*4882a593Smuzhiyun * Slot Context - section 6.2.1.1. This assumes the HC uses 32-byte context
505*4882a593Smuzhiyun * structures. If the HC uses 64-byte contexts, there is an additional 32 bytes
506*4882a593Smuzhiyun * reserved at the end of the slot context for HC internal use.
507*4882a593Smuzhiyun */
508*4882a593Smuzhiyun struct xhci_slot_ctx {
509*4882a593Smuzhiyun __le32 dev_info;
510*4882a593Smuzhiyun __le32 dev_info2;
511*4882a593Smuzhiyun __le32 tt_info;
512*4882a593Smuzhiyun __le32 dev_state;
513*4882a593Smuzhiyun /* offset 0x10 to 0x1f reserved for HC internal use */
514*4882a593Smuzhiyun __le32 reserved[4];
515*4882a593Smuzhiyun };
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun /* dev_info bitmasks */
518*4882a593Smuzhiyun /* Route String - 0:19 */
519*4882a593Smuzhiyun #define ROUTE_STRING_MASK (0xfffff)
520*4882a593Smuzhiyun /* Device speed - values defined by PORTSC Device Speed field - 20:23 */
521*4882a593Smuzhiyun #define DEV_SPEED (0xf << 20)
522*4882a593Smuzhiyun /* bit 24 reserved */
523*4882a593Smuzhiyun /* Is this LS/FS device connected through a HS hub? - bit 25 */
524*4882a593Smuzhiyun #define DEV_MTT (0x1 << 25)
525*4882a593Smuzhiyun /* Set if the device is a hub - bit 26 */
526*4882a593Smuzhiyun #define DEV_HUB (0x1 << 26)
527*4882a593Smuzhiyun /* Index of the last valid endpoint context in this device context - 27:31 */
528*4882a593Smuzhiyun #define LAST_CTX_MASK (0x1f << 27)
529*4882a593Smuzhiyun #define LAST_CTX(p) ((p) << 27)
530*4882a593Smuzhiyun #define LAST_CTX_TO_EP_NUM(p) (((p) >> 27) - 1)
531*4882a593Smuzhiyun #define SLOT_FLAG (1 << 0)
532*4882a593Smuzhiyun #define EP0_FLAG (1 << 1)
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun /* dev_info2 bitmasks */
535*4882a593Smuzhiyun /* Max Exit Latency (ms) - worst case time to wake up all links in dev path */
536*4882a593Smuzhiyun #define MAX_EXIT (0xffff)
537*4882a593Smuzhiyun /* Root hub port number that is needed to access the USB device */
538*4882a593Smuzhiyun #define ROOT_HUB_PORT(p) (((p) & 0xff) << 16)
539*4882a593Smuzhiyun #define ROOT_HUB_PORT_MASK (0xff)
540*4882a593Smuzhiyun #define ROOT_HUB_PORT_SHIFT (16)
541*4882a593Smuzhiyun #define DEVINFO_TO_ROOT_HUB_PORT(p) (((p) >> 16) & 0xff)
542*4882a593Smuzhiyun /* Maximum number of ports under a hub device */
543*4882a593Smuzhiyun #define XHCI_MAX_PORTS(p) (((p) & 0xff) << 24)
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun /* tt_info bitmasks */
546*4882a593Smuzhiyun /*
547*4882a593Smuzhiyun * TT Hub Slot ID - for low or full speed devices attached to a high-speed hub
548*4882a593Smuzhiyun * The Slot ID of the hub that isolates the high speed signaling from
549*4882a593Smuzhiyun * this low or full-speed device. '0' if attached to root hub port.
550*4882a593Smuzhiyun */
551*4882a593Smuzhiyun #define TT_SLOT(p) (((p) & 0xff) << 0)
552*4882a593Smuzhiyun /*
553*4882a593Smuzhiyun * The number of the downstream facing port of the high-speed hub
554*4882a593Smuzhiyun * '0' if the device is not low or full speed.
555*4882a593Smuzhiyun */
556*4882a593Smuzhiyun #define TT_PORT(p) (((p) & 0xff) << 8)
557*4882a593Smuzhiyun #define TT_THINK_TIME(p) (((p) & 0x3) << 16)
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun /* dev_state bitmasks */
560*4882a593Smuzhiyun /* USB device address - assigned by the HC */
561*4882a593Smuzhiyun #define DEV_ADDR_MASK (0xff)
562*4882a593Smuzhiyun /* bits 8:26 reserved */
563*4882a593Smuzhiyun /* Slot state */
564*4882a593Smuzhiyun #define SLOT_STATE (0x1f << 27)
565*4882a593Smuzhiyun #define GET_SLOT_STATE(p) (((p) & (0x1f << 27)) >> 27)
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun #define SLOT_STATE_DISABLED 0
568*4882a593Smuzhiyun #define SLOT_STATE_ENABLED SLOT_STATE_DISABLED
569*4882a593Smuzhiyun #define SLOT_STATE_DEFAULT 1
570*4882a593Smuzhiyun #define SLOT_STATE_ADDRESSED 2
571*4882a593Smuzhiyun #define SLOT_STATE_CONFIGURED 3
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun /**
574*4882a593Smuzhiyun * struct xhci_ep_ctx
575*4882a593Smuzhiyun * @ep_info: endpoint state, streams, mult, and interval information.
576*4882a593Smuzhiyun * @ep_info2: information on endpoint type, max packet size, max burst size,
577*4882a593Smuzhiyun * error count, and whether the HC will force an event for all
578*4882a593Smuzhiyun * transactions.
579*4882a593Smuzhiyun * @deq: 64-bit ring dequeue pointer address. If the endpoint only
580*4882a593Smuzhiyun * defines one stream, this points to the endpoint transfer ring.
581*4882a593Smuzhiyun * Otherwise, it points to a stream context array, which has a
582*4882a593Smuzhiyun * ring pointer for each flow.
583*4882a593Smuzhiyun * @tx_info:
584*4882a593Smuzhiyun * Average TRB lengths for the endpoint ring and
585*4882a593Smuzhiyun * max payload within an Endpoint Service Interval Time (ESIT).
586*4882a593Smuzhiyun *
587*4882a593Smuzhiyun * Endpoint Context - section 6.2.1.2.This assumes the HC uses 32-byte context
588*4882a593Smuzhiyun * structures.If the HC uses 64-byte contexts, there is an additional 32 bytes
589*4882a593Smuzhiyun * reserved at the end of the endpoint context for HC internal use.
590*4882a593Smuzhiyun */
591*4882a593Smuzhiyun struct xhci_ep_ctx {
592*4882a593Smuzhiyun __le32 ep_info;
593*4882a593Smuzhiyun __le32 ep_info2;
594*4882a593Smuzhiyun __le64 deq;
595*4882a593Smuzhiyun __le32 tx_info;
596*4882a593Smuzhiyun /* offset 0x14 - 0x1f reserved for HC internal use */
597*4882a593Smuzhiyun __le32 reserved[3];
598*4882a593Smuzhiyun };
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun /* ep_info bitmasks */
601*4882a593Smuzhiyun /*
602*4882a593Smuzhiyun * Endpoint State - bits 0:2
603*4882a593Smuzhiyun * 0 - disabled
604*4882a593Smuzhiyun * 1 - running
605*4882a593Smuzhiyun * 2 - halted due to halt condition - ok to manipulate endpoint ring
606*4882a593Smuzhiyun * 3 - stopped
607*4882a593Smuzhiyun * 4 - TRB error
608*4882a593Smuzhiyun * 5-7 - reserved
609*4882a593Smuzhiyun */
610*4882a593Smuzhiyun #define EP_STATE_MASK (0xf)
611*4882a593Smuzhiyun #define EP_STATE_DISABLED 0
612*4882a593Smuzhiyun #define EP_STATE_RUNNING 1
613*4882a593Smuzhiyun #define EP_STATE_HALTED 2
614*4882a593Smuzhiyun #define EP_STATE_STOPPED 3
615*4882a593Smuzhiyun #define EP_STATE_ERROR 4
616*4882a593Smuzhiyun /* Mult - Max number of burtst within an interval, in EP companion desc. */
617*4882a593Smuzhiyun #define EP_MULT(p) (((p) & 0x3) << 8)
618*4882a593Smuzhiyun #define CTX_TO_EP_MULT(p) (((p) >> 8) & 0x3)
619*4882a593Smuzhiyun /* bits 10:14 are Max Primary Streams */
620*4882a593Smuzhiyun /* bit 15 is Linear Stream Array */
621*4882a593Smuzhiyun /* Interval - period between requests to an endpoint - 125u increments. */
622*4882a593Smuzhiyun #define EP_INTERVAL(p) (((p) & 0xff) << 16)
623*4882a593Smuzhiyun #define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff))
624*4882a593Smuzhiyun #define CTX_TO_EP_INTERVAL(p) (((p) >> 16) & 0xff)
625*4882a593Smuzhiyun #define EP_MAXPSTREAMS_MASK (0x1f << 10)
626*4882a593Smuzhiyun #define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK)
627*4882a593Smuzhiyun /* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */
628*4882a593Smuzhiyun #define EP_HAS_LSA (1 << 15)
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun /* ep_info2 bitmasks */
631*4882a593Smuzhiyun /*
632*4882a593Smuzhiyun * Force Event - generate transfer events for all TRBs for this endpoint
633*4882a593Smuzhiyun * This will tell the HC to ignore the IOC and ISP flags (for debugging only).
634*4882a593Smuzhiyun */
635*4882a593Smuzhiyun #define FORCE_EVENT (0x1)
636*4882a593Smuzhiyun #define ERROR_COUNT(p) (((p) & 0x3) << 1)
637*4882a593Smuzhiyun #define ERROR_COUNT_SHIFT (1)
638*4882a593Smuzhiyun #define ERROR_COUNT_MASK (0x3)
639*4882a593Smuzhiyun #define CTX_TO_EP_TYPE(p) (((p) >> 3) & 0x7)
640*4882a593Smuzhiyun #define EP_TYPE(p) ((p) << 3)
641*4882a593Smuzhiyun #define EP_TYPE_SHIFT (3)
642*4882a593Smuzhiyun #define ISOC_OUT_EP 1
643*4882a593Smuzhiyun #define BULK_OUT_EP 2
644*4882a593Smuzhiyun #define INT_OUT_EP 3
645*4882a593Smuzhiyun #define CTRL_EP 4
646*4882a593Smuzhiyun #define ISOC_IN_EP 5
647*4882a593Smuzhiyun #define BULK_IN_EP 6
648*4882a593Smuzhiyun #define INT_IN_EP 7
649*4882a593Smuzhiyun /* bit 6 reserved */
650*4882a593Smuzhiyun /* bit 7 is Host Initiate Disable - for disabling stream selection */
651*4882a593Smuzhiyun #define MAX_BURST(p) (((p)&0xff) << 8)
652*4882a593Smuzhiyun #define MAX_BURST_MASK (0xff)
653*4882a593Smuzhiyun #define MAX_BURST_SHIFT (8)
654*4882a593Smuzhiyun #define CTX_TO_MAX_BURST(p) (((p) >> 8) & 0xff)
655*4882a593Smuzhiyun #define MAX_PACKET(p) (((p)&0xffff) << 16)
656*4882a593Smuzhiyun #define MAX_PACKET_MASK (0xffff)
657*4882a593Smuzhiyun #define MAX_PACKET_DECODED(p) (((p) >> 16) & 0xffff)
658*4882a593Smuzhiyun #define MAX_PACKET_SHIFT (16)
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun /* Get max packet size from ep desc. Bit 10..0 specify the max packet size.
661*4882a593Smuzhiyun * USB2.0 spec 9.6.6.
662*4882a593Smuzhiyun */
663*4882a593Smuzhiyun #define GET_MAX_PACKET(p) ((p) & 0x7ff)
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun /* tx_info bitmasks */
666*4882a593Smuzhiyun #define EP_AVG_TRB_LENGTH(p) ((p) & 0xffff)
667*4882a593Smuzhiyun #define EP_MAX_ESIT_PAYLOAD_LO(p) (((p) & 0xffff) << 16)
668*4882a593Smuzhiyun #define EP_MAX_ESIT_PAYLOAD_HI(p) ((((p) >> 16) & 0xff) << 24)
669*4882a593Smuzhiyun #define CTX_TO_MAX_ESIT_PAYLOAD(p) (((p) >> 16) & 0xffff)
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun /* deq bitmasks */
672*4882a593Smuzhiyun #define EP_CTX_CYCLE_MASK (1 << 0)
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun /**
676*4882a593Smuzhiyun * struct xhci_input_control_context
677*4882a593Smuzhiyun * Input control context; see section 6.2.5.
678*4882a593Smuzhiyun *
679*4882a593Smuzhiyun * @drop_context: set the bit of the endpoint context you want to disable
680*4882a593Smuzhiyun * @add_context: set the bit of the endpoint context you want to enable
681*4882a593Smuzhiyun */
682*4882a593Smuzhiyun struct xhci_input_control_ctx {
683*4882a593Smuzhiyun volatile __le32 drop_flags;
684*4882a593Smuzhiyun volatile __le32 add_flags;
685*4882a593Smuzhiyun __le32 rsvd2[6];
686*4882a593Smuzhiyun };
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun /**
690*4882a593Smuzhiyun * struct xhci_device_context_array
691*4882a593Smuzhiyun * @dev_context_ptr array of 64-bit DMA addresses for device contexts
692*4882a593Smuzhiyun */
693*4882a593Smuzhiyun struct xhci_device_context_array {
694*4882a593Smuzhiyun /* 64-bit device addresses; we only write 32-bit addresses */
695*4882a593Smuzhiyun __le64 dev_context_ptrs[MAX_HC_SLOTS];
696*4882a593Smuzhiyun };
697*4882a593Smuzhiyun /* TODO: write function to set the 64-bit device DMA address */
698*4882a593Smuzhiyun /*
699*4882a593Smuzhiyun * TODO: change this to be dynamically sized at HC mem init time since the HC
700*4882a593Smuzhiyun * might not be able to handle the maximum number of devices possible.
701*4882a593Smuzhiyun */
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun struct xhci_transfer_event {
705*4882a593Smuzhiyun /* 64-bit buffer address, or immediate data */
706*4882a593Smuzhiyun __le64 buffer;
707*4882a593Smuzhiyun __le32 transfer_len;
708*4882a593Smuzhiyun /* This field is interpreted differently based on the type of TRB */
709*4882a593Smuzhiyun volatile __le32 flags;
710*4882a593Smuzhiyun };
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun /* Transfer event TRB length bit mask */
713*4882a593Smuzhiyun /* bits 0:23 */
714*4882a593Smuzhiyun #define EVENT_TRB_LEN(p) ((p) & 0xffffff)
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun /** Transfer Event bit fields **/
717*4882a593Smuzhiyun #define TRB_TO_EP_ID(p) (((p) >> 16) & 0x1f)
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun /* Completion Code - only applicable for some types of TRBs */
720*4882a593Smuzhiyun #define COMP_CODE_MASK (0xff << 24)
721*4882a593Smuzhiyun #define COMP_CODE_SHIFT (24)
722*4882a593Smuzhiyun #define GET_COMP_CODE(p) (((p) & COMP_CODE_MASK) >> 24)
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun typedef enum {
725*4882a593Smuzhiyun COMP_SUCCESS = 1,
726*4882a593Smuzhiyun /* Data Buffer Error */
727*4882a593Smuzhiyun COMP_DB_ERR, /* 2 */
728*4882a593Smuzhiyun /* Babble Detected Error */
729*4882a593Smuzhiyun COMP_BABBLE, /* 3 */
730*4882a593Smuzhiyun /* USB Transaction Error */
731*4882a593Smuzhiyun COMP_TX_ERR, /* 4 */
732*4882a593Smuzhiyun /* TRB Error - some TRB field is invalid */
733*4882a593Smuzhiyun COMP_TRB_ERR, /* 5 */
734*4882a593Smuzhiyun /* Stall Error - USB device is stalled */
735*4882a593Smuzhiyun COMP_STALL, /* 6 */
736*4882a593Smuzhiyun /* Resource Error - HC doesn't have memory for that device configuration */
737*4882a593Smuzhiyun COMP_ENOMEM, /* 7 */
738*4882a593Smuzhiyun /* Bandwidth Error - not enough room in schedule for this dev config */
739*4882a593Smuzhiyun COMP_BW_ERR, /* 8 */
740*4882a593Smuzhiyun /* No Slots Available Error - HC ran out of device slots */
741*4882a593Smuzhiyun COMP_ENOSLOTS, /* 9 */
742*4882a593Smuzhiyun /* Invalid Stream Type Error */
743*4882a593Smuzhiyun COMP_STREAM_ERR, /* 10 */
744*4882a593Smuzhiyun /* Slot Not Enabled Error - doorbell rung for disabled device slot */
745*4882a593Smuzhiyun COMP_EBADSLT, /* 11 */
746*4882a593Smuzhiyun /* Endpoint Not Enabled Error */
747*4882a593Smuzhiyun COMP_EBADEP,/* 12 */
748*4882a593Smuzhiyun /* Short Packet */
749*4882a593Smuzhiyun COMP_SHORT_TX, /* 13 */
750*4882a593Smuzhiyun /* Ring Underrun - doorbell rung for an empty isoc OUT ep ring */
751*4882a593Smuzhiyun COMP_UNDERRUN, /* 14 */
752*4882a593Smuzhiyun /* Ring Overrun - isoc IN ep ring is empty when ep is scheduled to RX */
753*4882a593Smuzhiyun COMP_OVERRUN, /* 15 */
754*4882a593Smuzhiyun /* Virtual Function Event Ring Full Error */
755*4882a593Smuzhiyun COMP_VF_FULL, /* 16 */
756*4882a593Smuzhiyun /* Parameter Error - Context parameter is invalid */
757*4882a593Smuzhiyun COMP_EINVAL, /* 17 */
758*4882a593Smuzhiyun /* Bandwidth Overrun Error - isoc ep exceeded its allocated bandwidth */
759*4882a593Smuzhiyun COMP_BW_OVER,/* 18 */
760*4882a593Smuzhiyun /* Context State Error - illegal context state transition requested */
761*4882a593Smuzhiyun COMP_CTX_STATE,/* 19 */
762*4882a593Smuzhiyun /* No Ping Response Error - HC didn't get PING_RESPONSE in time to TX */
763*4882a593Smuzhiyun COMP_PING_ERR,/* 20 */
764*4882a593Smuzhiyun /* Event Ring is full */
765*4882a593Smuzhiyun COMP_ER_FULL,/* 21 */
766*4882a593Smuzhiyun /* Incompatible Device Error */
767*4882a593Smuzhiyun COMP_DEV_ERR,/* 22 */
768*4882a593Smuzhiyun /* Missed Service Error - HC couldn't service an isoc ep within interval */
769*4882a593Smuzhiyun COMP_MISSED_INT,/* 23 */
770*4882a593Smuzhiyun /* Successfully stopped command ring */
771*4882a593Smuzhiyun COMP_CMD_STOP, /* 24 */
772*4882a593Smuzhiyun /* Successfully aborted current command and stopped command ring */
773*4882a593Smuzhiyun COMP_CMD_ABORT, /* 25 */
774*4882a593Smuzhiyun /* Stopped - transfer was terminated by a stop endpoint command */
775*4882a593Smuzhiyun COMP_STOP,/* 26 */
776*4882a593Smuzhiyun /* Same as COMP_EP_STOPPED, but the transferred length in the event
777*4882a593Smuzhiyun * is invalid */
778*4882a593Smuzhiyun COMP_STOP_INVAL, /* 27*/
779*4882a593Smuzhiyun /* Control Abort Error - Debug Capability - control pipe aborted */
780*4882a593Smuzhiyun COMP_DBG_ABORT, /* 28 */
781*4882a593Smuzhiyun /* Max Exit Latency Too Large Error */
782*4882a593Smuzhiyun COMP_MEL_ERR,/* 29 */
783*4882a593Smuzhiyun /* TRB type 30 reserved */
784*4882a593Smuzhiyun /* Isoc Buffer Overrun - an isoc IN ep sent more data than could fit in TD */
785*4882a593Smuzhiyun COMP_BUFF_OVER = 31,
786*4882a593Smuzhiyun /* Event Lost Error - xHC has an "internal event overrun condition" */
787*4882a593Smuzhiyun COMP_ISSUES, /* 32 */
788*4882a593Smuzhiyun /* Undefined Error - reported when other error codes don't apply */
789*4882a593Smuzhiyun COMP_UNKNOWN, /* 33 */
790*4882a593Smuzhiyun /* Invalid Stream ID Error */
791*4882a593Smuzhiyun COMP_STRID_ERR, /* 34 */
792*4882a593Smuzhiyun /* Secondary Bandwidth Error - may be returned by a Configure Endpoint cmd */
793*4882a593Smuzhiyun COMP_2ND_BW_ERR, /* 35 */
794*4882a593Smuzhiyun /* Split Transaction Error */
795*4882a593Smuzhiyun COMP_SPLIT_ERR /* 36 */
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun } xhci_comp_code;
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun struct xhci_link_trb {
800*4882a593Smuzhiyun /* 64-bit segment pointer*/
801*4882a593Smuzhiyun volatile __le64 segment_ptr;
802*4882a593Smuzhiyun volatile __le32 intr_target;
803*4882a593Smuzhiyun volatile __le32 control;
804*4882a593Smuzhiyun };
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun /* control bitfields */
807*4882a593Smuzhiyun #define LINK_TOGGLE (0x1 << 1)
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun /* Command completion event TRB */
810*4882a593Smuzhiyun struct xhci_event_cmd {
811*4882a593Smuzhiyun /* Pointer to command TRB, or the value passed by the event data trb */
812*4882a593Smuzhiyun volatile __le64 cmd_trb;
813*4882a593Smuzhiyun volatile __le32 status;
814*4882a593Smuzhiyun volatile __le32 flags;
815*4882a593Smuzhiyun };
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun /* flags bitmasks */
818*4882a593Smuzhiyun /* bits 16:23 are the virtual function ID */
819*4882a593Smuzhiyun /* bits 24:31 are the slot ID */
820*4882a593Smuzhiyun #define TRB_TO_SLOT_ID(p) (((p) & (0xff << 24)) >> 24)
821*4882a593Smuzhiyun #define TRB_TO_SLOT_ID_SHIFT (24)
822*4882a593Smuzhiyun #define TRB_TO_SLOT_ID_MASK (0xff << TRB_TO_SLOT_ID_SHIFT)
823*4882a593Smuzhiyun #define SLOT_ID_FOR_TRB(p) (((p) & 0xff) << 24)
824*4882a593Smuzhiyun #define SLOT_ID_FOR_TRB_MASK (0xff)
825*4882a593Smuzhiyun #define SLOT_ID_FOR_TRB_SHIFT (24)
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun /* Stop Endpoint TRB - ep_index to endpoint ID for this TRB */
828*4882a593Smuzhiyun #define TRB_TO_EP_INDEX(p) ((((p) & (0x1f << 16)) >> 16) - 1)
829*4882a593Smuzhiyun #define EP_ID_FOR_TRB(p) ((((p) + 1) & 0x1f) << 16)
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun #define SUSPEND_PORT_FOR_TRB(p) (((p) & 1) << 23)
832*4882a593Smuzhiyun #define TRB_TO_SUSPEND_PORT(p) (((p) & (1 << 23)) >> 23)
833*4882a593Smuzhiyun #define LAST_EP_INDEX 30
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun /* Set TR Dequeue Pointer command TRB fields */
836*4882a593Smuzhiyun #define TRB_TO_STREAM_ID(p) ((((p) & (0xffff << 16)) >> 16))
837*4882a593Smuzhiyun #define STREAM_ID_FOR_TRB(p) ((((p)) & 0xffff) << 16)
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun /* Port Status Change Event TRB fields */
841*4882a593Smuzhiyun /* Port ID - bits 31:24 */
842*4882a593Smuzhiyun #define GET_PORT_ID(p) (((p) & (0xff << 24)) >> 24)
843*4882a593Smuzhiyun #define PORT_ID_SHIFT (24)
844*4882a593Smuzhiyun #define PORT_ID_MASK (0xff << PORT_ID_SHIFT)
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun /* Normal TRB fields */
847*4882a593Smuzhiyun /* transfer_len bitmasks - bits 0:16 */
848*4882a593Smuzhiyun #define TRB_LEN(p) ((p) & 0x1ffff)
849*4882a593Smuzhiyun #define TRB_LEN_MASK (0x1ffff)
850*4882a593Smuzhiyun /* Interrupter Target - which MSI-X vector to target the completion event at */
851*4882a593Smuzhiyun #define TRB_INTR_TARGET_SHIFT (22)
852*4882a593Smuzhiyun #define TRB_INTR_TARGET_MASK (0x3ff)
853*4882a593Smuzhiyun #define TRB_INTR_TARGET(p) (((p) & 0x3ff) << 22)
854*4882a593Smuzhiyun #define GET_INTR_TARGET(p) (((p) >> 22) & 0x3ff)
855*4882a593Smuzhiyun #define TRB_TBC(p) (((p) & 0x3) << 7)
856*4882a593Smuzhiyun #define TRB_TLBPC(p) (((p) & 0xf) << 16)
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun /* Cycle bit - indicates TRB ownership by HC or HCD */
859*4882a593Smuzhiyun #define TRB_CYCLE (1<<0)
860*4882a593Smuzhiyun /*
861*4882a593Smuzhiyun * Force next event data TRB to be evaluated before task switch.
862*4882a593Smuzhiyun * Used to pass OS data back after a TD completes.
863*4882a593Smuzhiyun */
864*4882a593Smuzhiyun #define TRB_ENT (1<<1)
865*4882a593Smuzhiyun /* Interrupt on short packet */
866*4882a593Smuzhiyun #define TRB_ISP (1<<2)
867*4882a593Smuzhiyun /* Set PCIe no snoop attribute */
868*4882a593Smuzhiyun #define TRB_NO_SNOOP (1<<3)
869*4882a593Smuzhiyun /* Chain multiple TRBs into a TD */
870*4882a593Smuzhiyun #define TRB_CHAIN (1<<4)
871*4882a593Smuzhiyun /* Interrupt on completion */
872*4882a593Smuzhiyun #define TRB_IOC (1<<5)
873*4882a593Smuzhiyun /* The buffer pointer contains immediate data */
874*4882a593Smuzhiyun #define TRB_IDT (1<<6)
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun /* Block Event Interrupt */
877*4882a593Smuzhiyun #define TRB_BEI (1<<9)
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun /* Control transfer TRB specific fields */
880*4882a593Smuzhiyun #define TRB_DIR_IN (1<<16)
881*4882a593Smuzhiyun #define TRB_TX_TYPE(p) ((p) << 16)
882*4882a593Smuzhiyun #define TRB_TX_TYPE_SHIFT (16)
883*4882a593Smuzhiyun #define TRB_DATA_OUT 2
884*4882a593Smuzhiyun #define TRB_DATA_IN 3
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun /* Isochronous TRB specific fields */
887*4882a593Smuzhiyun #define TRB_SIA (1 << 31)
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun struct xhci_generic_trb {
890*4882a593Smuzhiyun volatile __le32 field[4];
891*4882a593Smuzhiyun };
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun union xhci_trb {
894*4882a593Smuzhiyun struct xhci_link_trb link;
895*4882a593Smuzhiyun struct xhci_transfer_event trans_event;
896*4882a593Smuzhiyun struct xhci_event_cmd event_cmd;
897*4882a593Smuzhiyun struct xhci_generic_trb generic;
898*4882a593Smuzhiyun };
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun /* TRB bit mask */
901*4882a593Smuzhiyun #define TRB_TYPE_BITMASK (0xfc00)
902*4882a593Smuzhiyun #define TRB_TYPE(p) ((p) << 10)
903*4882a593Smuzhiyun #define TRB_TYPE_SHIFT (10)
904*4882a593Smuzhiyun #define TRB_FIELD_TO_TYPE(p) (((p) & TRB_TYPE_BITMASK) >> 10)
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun /* TRB type IDs */
907*4882a593Smuzhiyun typedef enum {
908*4882a593Smuzhiyun /* bulk, interrupt, isoc scatter/gather, and control data stage */
909*4882a593Smuzhiyun TRB_NORMAL = 1,
910*4882a593Smuzhiyun /* setup stage for control transfers */
911*4882a593Smuzhiyun TRB_SETUP, /* 2 */
912*4882a593Smuzhiyun /* data stage for control transfers */
913*4882a593Smuzhiyun TRB_DATA, /* 3 */
914*4882a593Smuzhiyun /* status stage for control transfers */
915*4882a593Smuzhiyun TRB_STATUS, /* 4 */
916*4882a593Smuzhiyun /* isoc transfers */
917*4882a593Smuzhiyun TRB_ISOC, /* 5 */
918*4882a593Smuzhiyun /* TRB for linking ring segments */
919*4882a593Smuzhiyun TRB_LINK, /* 6 */
920*4882a593Smuzhiyun /* TRB for EVENT DATA */
921*4882a593Smuzhiyun TRB_EVENT_DATA, /* 7 */
922*4882a593Smuzhiyun /* Transfer Ring No-op (not for the command ring) */
923*4882a593Smuzhiyun TRB_TR_NOOP, /* 8 */
924*4882a593Smuzhiyun /* Command TRBs */
925*4882a593Smuzhiyun /* Enable Slot Command */
926*4882a593Smuzhiyun TRB_ENABLE_SLOT, /* 9 */
927*4882a593Smuzhiyun /* Disable Slot Command */
928*4882a593Smuzhiyun TRB_DISABLE_SLOT, /* 10 */
929*4882a593Smuzhiyun /* Address Device Command */
930*4882a593Smuzhiyun TRB_ADDR_DEV, /* 11 */
931*4882a593Smuzhiyun /* Configure Endpoint Command */
932*4882a593Smuzhiyun TRB_CONFIG_EP, /* 12 */
933*4882a593Smuzhiyun /* Evaluate Context Command */
934*4882a593Smuzhiyun TRB_EVAL_CONTEXT, /* 13 */
935*4882a593Smuzhiyun /* Reset Endpoint Command */
936*4882a593Smuzhiyun TRB_RESET_EP, /* 14 */
937*4882a593Smuzhiyun /* Stop Transfer Ring Command */
938*4882a593Smuzhiyun TRB_STOP_RING, /* 15 */
939*4882a593Smuzhiyun /* Set Transfer Ring Dequeue Pointer Command */
940*4882a593Smuzhiyun TRB_SET_DEQ, /* 16 */
941*4882a593Smuzhiyun /* Reset Device Command */
942*4882a593Smuzhiyun TRB_RESET_DEV, /* 17 */
943*4882a593Smuzhiyun /* Force Event Command (opt) */
944*4882a593Smuzhiyun TRB_FORCE_EVENT, /* 18 */
945*4882a593Smuzhiyun /* Negotiate Bandwidth Command (opt) */
946*4882a593Smuzhiyun TRB_NEG_BANDWIDTH, /* 19 */
947*4882a593Smuzhiyun /* Set Latency Tolerance Value Command (opt) */
948*4882a593Smuzhiyun TRB_SET_LT, /* 20 */
949*4882a593Smuzhiyun /* Get port bandwidth Command */
950*4882a593Smuzhiyun TRB_GET_BW, /* 21 */
951*4882a593Smuzhiyun /* Force Header Command - generate a transaction or link management packet */
952*4882a593Smuzhiyun TRB_FORCE_HEADER, /* 22 */
953*4882a593Smuzhiyun /* No-op Command - not for transfer rings */
954*4882a593Smuzhiyun TRB_CMD_NOOP, /* 23 */
955*4882a593Smuzhiyun /* TRB IDs 24-31 reserved */
956*4882a593Smuzhiyun /* Event TRBS */
957*4882a593Smuzhiyun /* Transfer Event */
958*4882a593Smuzhiyun TRB_TRANSFER = 32,
959*4882a593Smuzhiyun /* Command Completion Event */
960*4882a593Smuzhiyun TRB_COMPLETION, /* 33 */
961*4882a593Smuzhiyun /* Port Status Change Event */
962*4882a593Smuzhiyun TRB_PORT_STATUS, /* 34 */
963*4882a593Smuzhiyun /* Bandwidth Request Event (opt) */
964*4882a593Smuzhiyun TRB_BANDWIDTH_EVENT, /* 35 */
965*4882a593Smuzhiyun /* Doorbell Event (opt) */
966*4882a593Smuzhiyun TRB_DOORBELL, /* 36 */
967*4882a593Smuzhiyun /* Host Controller Event */
968*4882a593Smuzhiyun TRB_HC_EVENT, /* 37 */
969*4882a593Smuzhiyun /* Device Notification Event - device sent function wake notification */
970*4882a593Smuzhiyun TRB_DEV_NOTE, /* 38 */
971*4882a593Smuzhiyun /* MFINDEX Wrap Event - microframe counter wrapped */
972*4882a593Smuzhiyun TRB_MFINDEX_WRAP, /* 39 */
973*4882a593Smuzhiyun /* TRB IDs 40-47 reserved, 48-63 is vendor-defined */
974*4882a593Smuzhiyun /* Nec vendor-specific command completion event. */
975*4882a593Smuzhiyun TRB_NEC_CMD_COMP = 48, /* 48 */
976*4882a593Smuzhiyun /* Get NEC firmware revision. */
977*4882a593Smuzhiyun TRB_NEC_GET_FW, /* 49 */
978*4882a593Smuzhiyun } trb_type;
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun #define TRB_TYPE_LINK(x) (((x) & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK))
981*4882a593Smuzhiyun /* Above, but for __le32 types -- can avoid work by swapping constants: */
982*4882a593Smuzhiyun #define TRB_TYPE_LINK_LE32(x) (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \
983*4882a593Smuzhiyun cpu_to_le32(TRB_TYPE(TRB_LINK)))
984*4882a593Smuzhiyun #define TRB_TYPE_NOOP_LE32(x) (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \
985*4882a593Smuzhiyun cpu_to_le32(TRB_TYPE(TRB_TR_NOOP)))
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun /*
988*4882a593Smuzhiyun * TRBS_PER_SEGMENT must be a multiple of 4,
989*4882a593Smuzhiyun * since the command ring is 64-byte aligned.
990*4882a593Smuzhiyun * It must also be greater than 16.
991*4882a593Smuzhiyun */
992*4882a593Smuzhiyun #define TRBS_PER_SEGMENT 64
993*4882a593Smuzhiyun /* Allow two commands + a link TRB, along with any reserved command TRBs */
994*4882a593Smuzhiyun #define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3)
995*4882a593Smuzhiyun #define SEGMENT_SIZE (TRBS_PER_SEGMENT*16)
996*4882a593Smuzhiyun /* SEGMENT_SHIFT should be log2(SEGMENT_SIZE).
997*4882a593Smuzhiyun * Change this if you change TRBS_PER_SEGMENT!
998*4882a593Smuzhiyun */
999*4882a593Smuzhiyun #define SEGMENT_SHIFT 10
1000*4882a593Smuzhiyun /* TRB buffer pointers can't cross 64KB boundaries */
1001*4882a593Smuzhiyun #define TRB_MAX_BUFF_SHIFT 16
1002*4882a593Smuzhiyun #define TRB_MAX_BUFF_SIZE (1 << TRB_MAX_BUFF_SHIFT)
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun struct xhci_segment {
1005*4882a593Smuzhiyun union xhci_trb *trbs;
1006*4882a593Smuzhiyun /* private to HCD */
1007*4882a593Smuzhiyun struct xhci_segment *next;
1008*4882a593Smuzhiyun };
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun struct xhci_ring {
1011*4882a593Smuzhiyun struct xhci_segment *first_seg;
1012*4882a593Smuzhiyun union xhci_trb *enqueue;
1013*4882a593Smuzhiyun struct xhci_segment *enq_seg;
1014*4882a593Smuzhiyun union xhci_trb *dequeue;
1015*4882a593Smuzhiyun struct xhci_segment *deq_seg;
1016*4882a593Smuzhiyun /*
1017*4882a593Smuzhiyun * Write the cycle state into the TRB cycle field to give ownership of
1018*4882a593Smuzhiyun * the TRB to the host controller (if we are the producer), or to check
1019*4882a593Smuzhiyun * if we own the TRB (if we are the consumer). See section 4.9.1.
1020*4882a593Smuzhiyun */
1021*4882a593Smuzhiyun volatile u32 cycle_state;
1022*4882a593Smuzhiyun unsigned int num_segs;
1023*4882a593Smuzhiyun };
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun struct xhci_erst_entry {
1026*4882a593Smuzhiyun /* 64-bit event ring segment address */
1027*4882a593Smuzhiyun __le64 seg_addr;
1028*4882a593Smuzhiyun __le32 seg_size;
1029*4882a593Smuzhiyun /* Set to zero */
1030*4882a593Smuzhiyun __le32 rsvd;
1031*4882a593Smuzhiyun };
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun struct xhci_erst {
1034*4882a593Smuzhiyun struct xhci_erst_entry *entries;
1035*4882a593Smuzhiyun unsigned int num_entries;
1036*4882a593Smuzhiyun /* Num entries the ERST can contain */
1037*4882a593Smuzhiyun unsigned int erst_size;
1038*4882a593Smuzhiyun };
1039*4882a593Smuzhiyun
1040*4882a593Smuzhiyun struct xhci_scratchpad {
1041*4882a593Smuzhiyun u64 *sp_array;
1042*4882a593Smuzhiyun };
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun /*
1045*4882a593Smuzhiyun * Each segment table entry is 4*32bits long. 1K seems like an ok size:
1046*4882a593Smuzhiyun * (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table,
1047*4882a593Smuzhiyun * meaning 64 ring segments.
1048*4882a593Smuzhiyun * Initial allocated size of the ERST, in number of entries */
1049*4882a593Smuzhiyun #define ERST_NUM_SEGS 1
1050*4882a593Smuzhiyun /* Initial number of event segment rings allocated */
1051*4882a593Smuzhiyun #define ERST_ENTRIES 1
1052*4882a593Smuzhiyun /* Initial allocated size of the ERST, in number of entries */
1053*4882a593Smuzhiyun #define ERST_SIZE 64
1054*4882a593Smuzhiyun /* Poll every 60 seconds */
1055*4882a593Smuzhiyun #define POLL_TIMEOUT 60
1056*4882a593Smuzhiyun /* Stop endpoint command timeout (secs) for URB cancellation watchdog timer */
1057*4882a593Smuzhiyun #define XHCI_STOP_EP_CMD_TIMEOUT 5
1058*4882a593Smuzhiyun /* XXX: Make these module parameters */
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun struct xhci_virt_ep {
1061*4882a593Smuzhiyun struct xhci_ring *ring;
1062*4882a593Smuzhiyun unsigned int ep_state;
1063*4882a593Smuzhiyun #define SET_DEQ_PENDING (1 << 0)
1064*4882a593Smuzhiyun #define EP_HALTED (1 << 1) /* For stall handling */
1065*4882a593Smuzhiyun #define EP_HALT_PENDING (1 << 2) /* For URB cancellation */
1066*4882a593Smuzhiyun /* Transitioning the endpoint to using streams, don't enqueue URBs */
1067*4882a593Smuzhiyun #define EP_GETTING_STREAMS (1 << 3)
1068*4882a593Smuzhiyun #define EP_HAS_STREAMS (1 << 4)
1069*4882a593Smuzhiyun /* Transitioning the endpoint to not using streams, don't enqueue URBs */
1070*4882a593Smuzhiyun #define EP_GETTING_NO_STREAMS (1 << 5)
1071*4882a593Smuzhiyun };
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun struct xhci_virt_device {
1076*4882a593Smuzhiyun struct usb_device *udev;
1077*4882a593Smuzhiyun /*
1078*4882a593Smuzhiyun * Commands to the hardware are passed an "input context" that
1079*4882a593Smuzhiyun * tells the hardware what to change in its data structures.
1080*4882a593Smuzhiyun * The hardware will return changes in an "output context" that
1081*4882a593Smuzhiyun * software must allocate for the hardware. We need to keep
1082*4882a593Smuzhiyun * track of input and output contexts separately because
1083*4882a593Smuzhiyun * these commands might fail and we don't trust the hardware.
1084*4882a593Smuzhiyun */
1085*4882a593Smuzhiyun struct xhci_container_ctx *out_ctx;
1086*4882a593Smuzhiyun /* Used for addressing devices and configuration changes */
1087*4882a593Smuzhiyun struct xhci_container_ctx *in_ctx;
1088*4882a593Smuzhiyun /* Rings saved to ensure old alt settings can be re-instated */
1089*4882a593Smuzhiyun #define XHCI_MAX_RINGS_CACHED 31
1090*4882a593Smuzhiyun struct xhci_virt_ep eps[31];
1091*4882a593Smuzhiyun };
1092*4882a593Smuzhiyun
1093*4882a593Smuzhiyun /* TODO: copied from ehci.h - can be refactored? */
1094*4882a593Smuzhiyun /* xHCI spec says all registers are little endian */
xhci_readl(uint32_t volatile * regs)1095*4882a593Smuzhiyun static inline unsigned int xhci_readl(uint32_t volatile *regs)
1096*4882a593Smuzhiyun {
1097*4882a593Smuzhiyun return readl(regs);
1098*4882a593Smuzhiyun }
1099*4882a593Smuzhiyun
xhci_writel(uint32_t volatile * regs,const unsigned int val)1100*4882a593Smuzhiyun static inline void xhci_writel(uint32_t volatile *regs, const unsigned int val)
1101*4882a593Smuzhiyun {
1102*4882a593Smuzhiyun writel(val, regs);
1103*4882a593Smuzhiyun }
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun /*
1106*4882a593Smuzhiyun * Registers should always be accessed with double word or quad word accesses.
1107*4882a593Smuzhiyun * Some xHCI implementations may support 64-bit address pointers. Registers
1108*4882a593Smuzhiyun * with 64-bit address pointers should be written to with dword accesses by
1109*4882a593Smuzhiyun * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
1110*4882a593Smuzhiyun * xHCI implementations that do not support 64-bit address pointers will ignore
1111*4882a593Smuzhiyun * the high dword, and write order is irrelevant.
1112*4882a593Smuzhiyun */
xhci_readq(__le64 volatile * regs)1113*4882a593Smuzhiyun static inline u64 xhci_readq(__le64 volatile *regs)
1114*4882a593Smuzhiyun {
1115*4882a593Smuzhiyun #if BITS_PER_LONG == 64
1116*4882a593Smuzhiyun return readq(regs);
1117*4882a593Smuzhiyun #else
1118*4882a593Smuzhiyun __u32 *ptr = (__u32 *)regs;
1119*4882a593Smuzhiyun u64 val_lo = readl(ptr);
1120*4882a593Smuzhiyun u64 val_hi = readl(ptr + 1);
1121*4882a593Smuzhiyun return val_lo + (val_hi << 32);
1122*4882a593Smuzhiyun #endif
1123*4882a593Smuzhiyun }
1124*4882a593Smuzhiyun
xhci_writeq(__le64 volatile * regs,const u64 val)1125*4882a593Smuzhiyun static inline void xhci_writeq(__le64 volatile *regs, const u64 val)
1126*4882a593Smuzhiyun {
1127*4882a593Smuzhiyun #if BITS_PER_LONG == 64
1128*4882a593Smuzhiyun writeq(val, regs);
1129*4882a593Smuzhiyun #else
1130*4882a593Smuzhiyun __u32 *ptr = (__u32 *)regs;
1131*4882a593Smuzhiyun u32 val_lo = lower_32_bits(val);
1132*4882a593Smuzhiyun /* FIXME */
1133*4882a593Smuzhiyun u32 val_hi = upper_32_bits(val);
1134*4882a593Smuzhiyun writel(val_lo, ptr);
1135*4882a593Smuzhiyun writel(val_hi, ptr + 1);
1136*4882a593Smuzhiyun #endif
1137*4882a593Smuzhiyun }
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun int xhci_hcd_init(int index, struct xhci_hccr **ret_hccr,
1140*4882a593Smuzhiyun struct xhci_hcor **ret_hcor);
1141*4882a593Smuzhiyun void xhci_hcd_stop(int index);
1142*4882a593Smuzhiyun
1143*4882a593Smuzhiyun
1144*4882a593Smuzhiyun /*************************************************************
1145*4882a593Smuzhiyun EXTENDED CAPABILITY DEFINITIONS
1146*4882a593Smuzhiyun *************************************************************/
1147*4882a593Smuzhiyun /* Up to 16 ms to halt an HC */
1148*4882a593Smuzhiyun #define XHCI_MAX_HALT_USEC (16*1000)
1149*4882a593Smuzhiyun /* HC not running - set to 1 when run/stop bit is cleared. */
1150*4882a593Smuzhiyun #define XHCI_STS_HALT (1 << 0)
1151*4882a593Smuzhiyun
1152*4882a593Smuzhiyun /* HCCPARAMS offset from PCI base address */
1153*4882a593Smuzhiyun #define XHCI_HCC_PARAMS_OFFSET 0x10
1154*4882a593Smuzhiyun /* HCCPARAMS contains the first extended capability pointer */
1155*4882a593Smuzhiyun #define XHCI_HCC_EXT_CAPS(p) (((p)>>16)&0xffff)
1156*4882a593Smuzhiyun
1157*4882a593Smuzhiyun /* Command and Status registers offset from the Operational Registers address */
1158*4882a593Smuzhiyun #define XHCI_CMD_OFFSET 0x00
1159*4882a593Smuzhiyun #define XHCI_STS_OFFSET 0x04
1160*4882a593Smuzhiyun
1161*4882a593Smuzhiyun #define XHCI_MAX_EXT_CAPS 50
1162*4882a593Smuzhiyun
1163*4882a593Smuzhiyun /* Capability Register */
1164*4882a593Smuzhiyun /* bits 7:0 - how long is the Capabilities register */
1165*4882a593Smuzhiyun #define XHCI_HC_LENGTH(p) (((p) >> 00) & 0x00ff)
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun /* Extended capability register fields */
1168*4882a593Smuzhiyun #define XHCI_EXT_CAPS_ID(p) (((p) >> 0) & 0xff)
1169*4882a593Smuzhiyun #define XHCI_EXT_CAPS_NEXT(p) (((p) >> 8) & 0xff)
1170*4882a593Smuzhiyun #define XHCI_EXT_CAPS_VAL(p) ((p) >> 16)
1171*4882a593Smuzhiyun /* Extended capability IDs - ID 0 reserved */
1172*4882a593Smuzhiyun #define XHCI_EXT_CAPS_LEGACY 1
1173*4882a593Smuzhiyun #define XHCI_EXT_CAPS_PROTOCOL 2
1174*4882a593Smuzhiyun #define XHCI_EXT_CAPS_PM 3
1175*4882a593Smuzhiyun #define XHCI_EXT_CAPS_VIRT 4
1176*4882a593Smuzhiyun #define XHCI_EXT_CAPS_ROUTE 5
1177*4882a593Smuzhiyun /* IDs 6-9 reserved */
1178*4882a593Smuzhiyun #define XHCI_EXT_CAPS_DEBUG 10
1179*4882a593Smuzhiyun /* USB Legacy Support Capability - section 7.1.1 */
1180*4882a593Smuzhiyun #define XHCI_HC_BIOS_OWNED (1 << 16)
1181*4882a593Smuzhiyun #define XHCI_HC_OS_OWNED (1 << 24)
1182*4882a593Smuzhiyun
1183*4882a593Smuzhiyun /* USB Legacy Support Capability - section 7.1.1 */
1184*4882a593Smuzhiyun /* Add this offset, plus the value of xECP in HCCPARAMS to the base address */
1185*4882a593Smuzhiyun #define XHCI_LEGACY_SUPPORT_OFFSET (0x00)
1186*4882a593Smuzhiyun
1187*4882a593Smuzhiyun /* USB Legacy Support Control and Status Register - section 7.1.2 */
1188*4882a593Smuzhiyun /* Add this offset, plus the value of xECP in HCCPARAMS to the base address */
1189*4882a593Smuzhiyun #define XHCI_LEGACY_CONTROL_OFFSET (0x04)
1190*4882a593Smuzhiyun /* bits 1:2, 5:12, and 17:19 need to be preserved; bits 21:28 should be zero */
1191*4882a593Smuzhiyun #define XHCI_LEGACY_DISABLE_SMI ((0x3 << 1) + (0xff << 5) + (0x7 << 17))
1192*4882a593Smuzhiyun
1193*4882a593Smuzhiyun /* USB 2.0 xHCI 0.96 L1C capability - section 7.2.2.1.3.2 */
1194*4882a593Smuzhiyun #define XHCI_L1C (1 << 16)
1195*4882a593Smuzhiyun
1196*4882a593Smuzhiyun /* USB 2.0 xHCI 1.0 hardware LMP capability - section 7.2.2.1.3.2 */
1197*4882a593Smuzhiyun #define XHCI_HLC (1 << 19)
1198*4882a593Smuzhiyun
1199*4882a593Smuzhiyun /* command register values to disable interrupts and halt the HC */
1200*4882a593Smuzhiyun /* start/stop HC execution - do not write unless HC is halted*/
1201*4882a593Smuzhiyun #define XHCI_CMD_RUN (1 << 0)
1202*4882a593Smuzhiyun /* Event Interrupt Enable - get irq when EINT bit is set in USBSTS register */
1203*4882a593Smuzhiyun #define XHCI_CMD_EIE (1 << 2)
1204*4882a593Smuzhiyun /* Host System Error Interrupt Enable - get irq when HSEIE bit set in USBSTS */
1205*4882a593Smuzhiyun #define XHCI_CMD_HSEIE (1 << 3)
1206*4882a593Smuzhiyun /* Enable Wrap Event - '1' means xHC generates an event when MFINDEX wraps. */
1207*4882a593Smuzhiyun #define XHCI_CMD_EWE (1 << 10)
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyun #define XHCI_IRQS (XHCI_CMD_EIE | XHCI_CMD_HSEIE | XHCI_CMD_EWE)
1210*4882a593Smuzhiyun
1211*4882a593Smuzhiyun /* true: Controller Not Ready to accept doorbell or op reg writes after reset */
1212*4882a593Smuzhiyun #define XHCI_STS_CNR (1 << 11)
1213*4882a593Smuzhiyun
1214*4882a593Smuzhiyun struct xhci_ctrl {
1215*4882a593Smuzhiyun #if CONFIG_IS_ENABLED(DM_USB)
1216*4882a593Smuzhiyun struct udevice *dev;
1217*4882a593Smuzhiyun #endif
1218*4882a593Smuzhiyun struct xhci_hccr *hccr; /* R/O registers, not need for volatile */
1219*4882a593Smuzhiyun struct xhci_hcor *hcor;
1220*4882a593Smuzhiyun struct xhci_doorbell_array *dba;
1221*4882a593Smuzhiyun struct xhci_run_regs *run_regs;
1222*4882a593Smuzhiyun struct xhci_device_context_array *dcbaa \
1223*4882a593Smuzhiyun __attribute__ ((aligned(ARCH_DMA_MINALIGN)));
1224*4882a593Smuzhiyun struct xhci_ring *event_ring;
1225*4882a593Smuzhiyun struct xhci_ring *cmd_ring;
1226*4882a593Smuzhiyun struct xhci_ring *transfer_ring;
1227*4882a593Smuzhiyun struct xhci_segment *seg;
1228*4882a593Smuzhiyun struct xhci_intr_reg *ir_set;
1229*4882a593Smuzhiyun struct xhci_erst erst;
1230*4882a593Smuzhiyun struct xhci_erst_entry entry[ERST_NUM_SEGS];
1231*4882a593Smuzhiyun struct xhci_scratchpad *scratchpad;
1232*4882a593Smuzhiyun struct xhci_virt_device *devs[MAX_HC_SLOTS];
1233*4882a593Smuzhiyun struct usb_hub_descriptor hub;
1234*4882a593Smuzhiyun int rootdev;
1235*4882a593Smuzhiyun };
1236*4882a593Smuzhiyun
1237*4882a593Smuzhiyun unsigned long trb_addr(struct xhci_segment *seg, union xhci_trb *trb);
1238*4882a593Smuzhiyun struct xhci_input_control_ctx
1239*4882a593Smuzhiyun *xhci_get_input_control_ctx(struct xhci_container_ctx *ctx);
1240*4882a593Smuzhiyun struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_ctrl *ctrl,
1241*4882a593Smuzhiyun struct xhci_container_ctx *ctx);
1242*4882a593Smuzhiyun struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_ctrl *ctrl,
1243*4882a593Smuzhiyun struct xhci_container_ctx *ctx,
1244*4882a593Smuzhiyun unsigned int ep_index);
1245*4882a593Smuzhiyun void xhci_endpoint_copy(struct xhci_ctrl *ctrl,
1246*4882a593Smuzhiyun struct xhci_container_ctx *in_ctx,
1247*4882a593Smuzhiyun struct xhci_container_ctx *out_ctx,
1248*4882a593Smuzhiyun unsigned int ep_index);
1249*4882a593Smuzhiyun void xhci_slot_copy(struct xhci_ctrl *ctrl,
1250*4882a593Smuzhiyun struct xhci_container_ctx *in_ctx,
1251*4882a593Smuzhiyun struct xhci_container_ctx *out_ctx);
1252*4882a593Smuzhiyun void xhci_setup_addressable_virt_dev(struct xhci_ctrl *ctrl,
1253*4882a593Smuzhiyun struct usb_device *udev, int hop_portnr);
1254*4882a593Smuzhiyun void xhci_queue_command(struct xhci_ctrl *ctrl, u8 *ptr,
1255*4882a593Smuzhiyun u32 slot_id, u32 ep_index, trb_type cmd);
1256*4882a593Smuzhiyun void xhci_acknowledge_event(struct xhci_ctrl *ctrl);
1257*4882a593Smuzhiyun union xhci_trb *xhci_wait_for_event(struct xhci_ctrl *ctrl, trb_type expected);
1258*4882a593Smuzhiyun int xhci_bulk_tx(struct usb_device *udev, unsigned long pipe,
1259*4882a593Smuzhiyun int length, void *buffer);
1260*4882a593Smuzhiyun int xhci_ctrl_tx(struct usb_device *udev, unsigned long pipe,
1261*4882a593Smuzhiyun struct devrequest *req, int length, void *buffer);
1262*4882a593Smuzhiyun int xhci_check_maxpacket(struct usb_device *udev);
1263*4882a593Smuzhiyun void xhci_flush_cache(uintptr_t addr, u32 type_len);
1264*4882a593Smuzhiyun void xhci_inval_cache(uintptr_t addr, u32 type_len);
1265*4882a593Smuzhiyun void xhci_cleanup(struct xhci_ctrl *ctrl);
1266*4882a593Smuzhiyun struct xhci_ring *xhci_ring_alloc(unsigned int num_segs, bool link_trbs);
1267*4882a593Smuzhiyun int xhci_alloc_virt_device(struct xhci_ctrl *ctrl, unsigned int slot_id);
1268*4882a593Smuzhiyun int xhci_mem_init(struct xhci_ctrl *ctrl, struct xhci_hccr *hccr,
1269*4882a593Smuzhiyun struct xhci_hcor *hcor);
1270*4882a593Smuzhiyun
1271*4882a593Smuzhiyun /**
1272*4882a593Smuzhiyun * xhci_deregister() - Unregister an XHCI controller
1273*4882a593Smuzhiyun *
1274*4882a593Smuzhiyun * @dev: Controller device
1275*4882a593Smuzhiyun * @return 0 if registered, -ve on error
1276*4882a593Smuzhiyun */
1277*4882a593Smuzhiyun int xhci_deregister(struct udevice *dev);
1278*4882a593Smuzhiyun
1279*4882a593Smuzhiyun /**
1280*4882a593Smuzhiyun * xhci_register() - Register a new XHCI controller
1281*4882a593Smuzhiyun *
1282*4882a593Smuzhiyun * @dev: Controller device
1283*4882a593Smuzhiyun * @hccr: Host controller control registers
1284*4882a593Smuzhiyun * @hcor: Not sure what this means
1285*4882a593Smuzhiyun * @return 0 if registered, -ve on error
1286*4882a593Smuzhiyun */
1287*4882a593Smuzhiyun int xhci_register(struct udevice *dev, struct xhci_hccr *hccr,
1288*4882a593Smuzhiyun struct xhci_hcor *hcor);
1289*4882a593Smuzhiyun
1290*4882a593Smuzhiyun extern struct dm_usb_ops xhci_usb_ops;
1291*4882a593Smuzhiyun
1292*4882a593Smuzhiyun struct xhci_ctrl *xhci_get_ctrl(struct usb_device *udev);
1293*4882a593Smuzhiyun
1294*4882a593Smuzhiyun #endif /* HOST_XHCI_H_ */
1295