xref: /OK3568_Linux_fs/kernel/drivers/usb/host/fotg210.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef __LINUX_FOTG210_H
3*4882a593Smuzhiyun #define __LINUX_FOTG210_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/usb/ehci-dbgp.h>
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun /* definitions used for the EHCI driver */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun /*
10*4882a593Smuzhiyun  * __hc32 and __hc16 are "Host Controller" types, they may be equivalent to
11*4882a593Smuzhiyun  * __leXX (normally) or __beXX (given FOTG210_BIG_ENDIAN_DESC), depending on
12*4882a593Smuzhiyun  * the host controller implementation.
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * To facilitate the strongest possible byte-order checking from "sparse"
15*4882a593Smuzhiyun  * and so on, we use __leXX unless that's not practical.
16*4882a593Smuzhiyun  */
17*4882a593Smuzhiyun #define __hc32	__le32
18*4882a593Smuzhiyun #define __hc16	__le16
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun /* statistics can be kept for tuning/monitoring */
21*4882a593Smuzhiyun struct fotg210_stats {
22*4882a593Smuzhiyun 	/* irq usage */
23*4882a593Smuzhiyun 	unsigned long		normal;
24*4882a593Smuzhiyun 	unsigned long		error;
25*4882a593Smuzhiyun 	unsigned long		iaa;
26*4882a593Smuzhiyun 	unsigned long		lost_iaa;
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun 	/* termination of urbs from core */
29*4882a593Smuzhiyun 	unsigned long		complete;
30*4882a593Smuzhiyun 	unsigned long		unlink;
31*4882a593Smuzhiyun };
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun /* fotg210_hcd->lock guards shared data against other CPUs:
34*4882a593Smuzhiyun  *   fotg210_hcd:	async, unlink, periodic (and shadow), ...
35*4882a593Smuzhiyun  *   usb_host_endpoint: hcpriv
36*4882a593Smuzhiyun  *   fotg210_qh:	qh_next, qtd_list
37*4882a593Smuzhiyun  *   fotg210_qtd:	qtd_list
38*4882a593Smuzhiyun  *
39*4882a593Smuzhiyun  * Also, hold this lock when talking to HC registers or
40*4882a593Smuzhiyun  * when updating hw_* fields in shared qh/qtd/... structures.
41*4882a593Smuzhiyun  */
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun #define	FOTG210_MAX_ROOT_PORTS	1		/* see HCS_N_PORTS */
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun /*
46*4882a593Smuzhiyun  * fotg210_rh_state values of FOTG210_RH_RUNNING or above mean that the
47*4882a593Smuzhiyun  * controller may be doing DMA.  Lower values mean there's no DMA.
48*4882a593Smuzhiyun  */
49*4882a593Smuzhiyun enum fotg210_rh_state {
50*4882a593Smuzhiyun 	FOTG210_RH_HALTED,
51*4882a593Smuzhiyun 	FOTG210_RH_SUSPENDED,
52*4882a593Smuzhiyun 	FOTG210_RH_RUNNING,
53*4882a593Smuzhiyun 	FOTG210_RH_STOPPING
54*4882a593Smuzhiyun };
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun /*
57*4882a593Smuzhiyun  * Timer events, ordered by increasing delay length.
58*4882a593Smuzhiyun  * Always update event_delays_ns[] and event_handlers[] (defined in
59*4882a593Smuzhiyun  * ehci-timer.c) in parallel with this list.
60*4882a593Smuzhiyun  */
61*4882a593Smuzhiyun enum fotg210_hrtimer_event {
62*4882a593Smuzhiyun 	FOTG210_HRTIMER_POLL_ASS,	/* Poll for async schedule off */
63*4882a593Smuzhiyun 	FOTG210_HRTIMER_POLL_PSS,	/* Poll for periodic schedule off */
64*4882a593Smuzhiyun 	FOTG210_HRTIMER_POLL_DEAD,	/* Wait for dead controller to stop */
65*4882a593Smuzhiyun 	FOTG210_HRTIMER_UNLINK_INTR,	/* Wait for interrupt QH unlink */
66*4882a593Smuzhiyun 	FOTG210_HRTIMER_FREE_ITDS,	/* Wait for unused iTDs and siTDs */
67*4882a593Smuzhiyun 	FOTG210_HRTIMER_ASYNC_UNLINKS,	/* Unlink empty async QHs */
68*4882a593Smuzhiyun 	FOTG210_HRTIMER_IAA_WATCHDOG,	/* Handle lost IAA interrupts */
69*4882a593Smuzhiyun 	FOTG210_HRTIMER_DISABLE_PERIODIC, /* Wait to disable periodic sched */
70*4882a593Smuzhiyun 	FOTG210_HRTIMER_DISABLE_ASYNC,	/* Wait to disable async sched */
71*4882a593Smuzhiyun 	FOTG210_HRTIMER_IO_WATCHDOG,	/* Check for missing IRQs */
72*4882a593Smuzhiyun 	FOTG210_HRTIMER_NUM_EVENTS	/* Must come last */
73*4882a593Smuzhiyun };
74*4882a593Smuzhiyun #define FOTG210_HRTIMER_NO_EVENT	99
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun struct fotg210_hcd {			/* one per controller */
77*4882a593Smuzhiyun 	/* timing support */
78*4882a593Smuzhiyun 	enum fotg210_hrtimer_event	next_hrtimer_event;
79*4882a593Smuzhiyun 	unsigned		enabled_hrtimer_events;
80*4882a593Smuzhiyun 	ktime_t			hr_timeouts[FOTG210_HRTIMER_NUM_EVENTS];
81*4882a593Smuzhiyun 	struct hrtimer		hrtimer;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	int			PSS_poll_count;
84*4882a593Smuzhiyun 	int			ASS_poll_count;
85*4882a593Smuzhiyun 	int			died_poll_count;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	/* glue to PCI and HCD framework */
88*4882a593Smuzhiyun 	struct fotg210_caps __iomem *caps;
89*4882a593Smuzhiyun 	struct fotg210_regs __iomem *regs;
90*4882a593Smuzhiyun 	struct ehci_dbg_port __iomem *debug;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	__u32			hcs_params;	/* cached register copy */
93*4882a593Smuzhiyun 	spinlock_t		lock;
94*4882a593Smuzhiyun 	enum fotg210_rh_state	rh_state;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	/* general schedule support */
97*4882a593Smuzhiyun 	bool			scanning:1;
98*4882a593Smuzhiyun 	bool			need_rescan:1;
99*4882a593Smuzhiyun 	bool			intr_unlinking:1;
100*4882a593Smuzhiyun 	bool			async_unlinking:1;
101*4882a593Smuzhiyun 	bool			shutdown:1;
102*4882a593Smuzhiyun 	struct fotg210_qh		*qh_scan_next;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	/* async schedule support */
105*4882a593Smuzhiyun 	struct fotg210_qh		*async;
106*4882a593Smuzhiyun 	struct fotg210_qh		*dummy;		/* For AMD quirk use */
107*4882a593Smuzhiyun 	struct fotg210_qh		*async_unlink;
108*4882a593Smuzhiyun 	struct fotg210_qh		*async_unlink_last;
109*4882a593Smuzhiyun 	struct fotg210_qh		*async_iaa;
110*4882a593Smuzhiyun 	unsigned		async_unlink_cycle;
111*4882a593Smuzhiyun 	unsigned		async_count;	/* async activity count */
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	/* periodic schedule support */
114*4882a593Smuzhiyun #define	DEFAULT_I_TDPS		1024		/* some HCs can do less */
115*4882a593Smuzhiyun 	unsigned		periodic_size;
116*4882a593Smuzhiyun 	__hc32			*periodic;	/* hw periodic table */
117*4882a593Smuzhiyun 	dma_addr_t		periodic_dma;
118*4882a593Smuzhiyun 	struct list_head	intr_qh_list;
119*4882a593Smuzhiyun 	unsigned		i_thresh;	/* uframes HC might cache */
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	union fotg210_shadow	*pshadow;	/* mirror hw periodic table */
122*4882a593Smuzhiyun 	struct fotg210_qh		*intr_unlink;
123*4882a593Smuzhiyun 	struct fotg210_qh		*intr_unlink_last;
124*4882a593Smuzhiyun 	unsigned		intr_unlink_cycle;
125*4882a593Smuzhiyun 	unsigned		now_frame;	/* frame from HC hardware */
126*4882a593Smuzhiyun 	unsigned		next_frame;	/* scan periodic, start here */
127*4882a593Smuzhiyun 	unsigned		intr_count;	/* intr activity count */
128*4882a593Smuzhiyun 	unsigned		isoc_count;	/* isoc activity count */
129*4882a593Smuzhiyun 	unsigned		periodic_count;	/* periodic activity count */
130*4882a593Smuzhiyun 	/* max periodic time per uframe */
131*4882a593Smuzhiyun 	unsigned		uframe_periodic_max;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	/* list of itds completed while now_frame was still active */
135*4882a593Smuzhiyun 	struct list_head	cached_itd_list;
136*4882a593Smuzhiyun 	struct fotg210_itd	*last_itd_to_free;
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	/* per root hub port */
139*4882a593Smuzhiyun 	unsigned long		reset_done[FOTG210_MAX_ROOT_PORTS];
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	/* bit vectors (one bit per port)
142*4882a593Smuzhiyun 	 * which ports were already suspended at the start of a bus suspend
143*4882a593Smuzhiyun 	 */
144*4882a593Smuzhiyun 	unsigned long		bus_suspended;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	/* which ports are edicated to the companion controller */
147*4882a593Smuzhiyun 	unsigned long		companion_ports;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	/* which ports are owned by the companion during a bus suspend */
150*4882a593Smuzhiyun 	unsigned long		owned_ports;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	/* which ports have the change-suspend feature turned on */
153*4882a593Smuzhiyun 	unsigned long		port_c_suspend;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	/* which ports are suspended */
156*4882a593Smuzhiyun 	unsigned long		suspended_ports;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	/* which ports have started to resume */
159*4882a593Smuzhiyun 	unsigned long		resuming_ports;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	/* per-HC memory pools (could be per-bus, but ...) */
162*4882a593Smuzhiyun 	struct dma_pool		*qh_pool;	/* qh per active urb */
163*4882a593Smuzhiyun 	struct dma_pool		*qtd_pool;	/* one or more per qh */
164*4882a593Smuzhiyun 	struct dma_pool		*itd_pool;	/* itd per iso urb */
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	unsigned		random_frame;
167*4882a593Smuzhiyun 	unsigned long		next_statechange;
168*4882a593Smuzhiyun 	ktime_t			last_periodic_enable;
169*4882a593Smuzhiyun 	u32			command;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	/* SILICON QUIRKS */
172*4882a593Smuzhiyun 	unsigned		need_io_watchdog:1;
173*4882a593Smuzhiyun 	unsigned		fs_i_thresh:1;	/* Intel iso scheduling */
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	u8			sbrn;		/* packed release number */
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	/* irq statistics */
178*4882a593Smuzhiyun #ifdef FOTG210_STATS
179*4882a593Smuzhiyun 	struct fotg210_stats	stats;
180*4882a593Smuzhiyun #	define INCR(x) ((x)++)
181*4882a593Smuzhiyun #else
182*4882a593Smuzhiyun #	define INCR(x) do {} while (0)
183*4882a593Smuzhiyun #endif
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	/* silicon clock */
186*4882a593Smuzhiyun 	struct clk		*pclk;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	/* debug files */
189*4882a593Smuzhiyun 	struct dentry		*debug_dir;
190*4882a593Smuzhiyun };
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun /* convert between an HCD pointer and the corresponding FOTG210_HCD */
hcd_to_fotg210(struct usb_hcd * hcd)193*4882a593Smuzhiyun static inline struct fotg210_hcd *hcd_to_fotg210(struct usb_hcd *hcd)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun 	return (struct fotg210_hcd *)(hcd->hcd_priv);
196*4882a593Smuzhiyun }
fotg210_to_hcd(struct fotg210_hcd * fotg210)197*4882a593Smuzhiyun static inline struct usb_hcd *fotg210_to_hcd(struct fotg210_hcd *fotg210)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun 	return container_of((void *) fotg210, struct usb_hcd, hcd_priv);
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun /* EHCI register interface, corresponds to EHCI Revision 0.95 specification */
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun /* Section 2.2 Host Controller Capability Registers */
207*4882a593Smuzhiyun struct fotg210_caps {
208*4882a593Smuzhiyun 	/* these fields are specified as 8 and 16 bit registers,
209*4882a593Smuzhiyun 	 * but some hosts can't perform 8 or 16 bit PCI accesses.
210*4882a593Smuzhiyun 	 * some hosts treat caplength and hciversion as parts of a 32-bit
211*4882a593Smuzhiyun 	 * register, others treat them as two separate registers, this
212*4882a593Smuzhiyun 	 * affects the memory map for big endian controllers.
213*4882a593Smuzhiyun 	 */
214*4882a593Smuzhiyun 	u32		hc_capbase;
215*4882a593Smuzhiyun #define HC_LENGTH(fotg210, p)	(0x00ff&((p) >> /* bits 7:0 / offset 00h */ \
216*4882a593Smuzhiyun 				(fotg210_big_endian_capbase(fotg210) ? 24 : 0)))
217*4882a593Smuzhiyun #define HC_VERSION(fotg210, p)	(0xffff&((p) >> /* bits 31:16 / offset 02h */ \
218*4882a593Smuzhiyun 				(fotg210_big_endian_capbase(fotg210) ? 0 : 16)))
219*4882a593Smuzhiyun 	u32		hcs_params;     /* HCSPARAMS - offset 0x4 */
220*4882a593Smuzhiyun #define HCS_N_PORTS(p)		(((p)>>0)&0xf)	/* bits 3:0, ports on HC */
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	u32		hcc_params;	/* HCCPARAMS - offset 0x8 */
223*4882a593Smuzhiyun #define HCC_CANPARK(p)		((p)&(1 << 2))  /* true: can park on async qh */
224*4882a593Smuzhiyun #define HCC_PGM_FRAMELISTLEN(p) ((p)&(1 << 1))  /* true: periodic_size changes*/
225*4882a593Smuzhiyun 	u8		portroute[8];	 /* nibbles for routing - offset 0xC */
226*4882a593Smuzhiyun };
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun /* Section 2.3 Host Controller Operational Registers */
230*4882a593Smuzhiyun struct fotg210_regs {
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	/* USBCMD: offset 0x00 */
233*4882a593Smuzhiyun 	u32		command;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun /* EHCI 1.1 addendum */
236*4882a593Smuzhiyun /* 23:16 is r/w intr rate, in microframes; default "8" == 1/msec */
237*4882a593Smuzhiyun #define CMD_PARK	(1<<11)		/* enable "park" on async qh */
238*4882a593Smuzhiyun #define CMD_PARK_CNT(c)	(((c)>>8)&3)	/* how many transfers to park for */
239*4882a593Smuzhiyun #define CMD_IAAD	(1<<6)		/* "doorbell" interrupt async advance */
240*4882a593Smuzhiyun #define CMD_ASE		(1<<5)		/* async schedule enable */
241*4882a593Smuzhiyun #define CMD_PSE		(1<<4)		/* periodic schedule enable */
242*4882a593Smuzhiyun /* 3:2 is periodic frame list size */
243*4882a593Smuzhiyun #define CMD_RESET	(1<<1)		/* reset HC not bus */
244*4882a593Smuzhiyun #define CMD_RUN		(1<<0)		/* start/stop HC */
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	/* USBSTS: offset 0x04 */
247*4882a593Smuzhiyun 	u32		status;
248*4882a593Smuzhiyun #define STS_ASS		(1<<15)		/* Async Schedule Status */
249*4882a593Smuzhiyun #define STS_PSS		(1<<14)		/* Periodic Schedule Status */
250*4882a593Smuzhiyun #define STS_RECL	(1<<13)		/* Reclamation */
251*4882a593Smuzhiyun #define STS_HALT	(1<<12)		/* Not running (any reason) */
252*4882a593Smuzhiyun /* some bits reserved */
253*4882a593Smuzhiyun 	/* these STS_* flags are also intr_enable bits (USBINTR) */
254*4882a593Smuzhiyun #define STS_IAA		(1<<5)		/* Interrupted on async advance */
255*4882a593Smuzhiyun #define STS_FATAL	(1<<4)		/* such as some PCI access errors */
256*4882a593Smuzhiyun #define STS_FLR		(1<<3)		/* frame list rolled over */
257*4882a593Smuzhiyun #define STS_PCD		(1<<2)		/* port change detect */
258*4882a593Smuzhiyun #define STS_ERR		(1<<1)		/* "error" completion (overflow, ...) */
259*4882a593Smuzhiyun #define STS_INT		(1<<0)		/* "normal" completion (short, ...) */
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	/* USBINTR: offset 0x08 */
262*4882a593Smuzhiyun 	u32		intr_enable;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	/* FRINDEX: offset 0x0C */
265*4882a593Smuzhiyun 	u32		frame_index;	/* current microframe number */
266*4882a593Smuzhiyun 	/* CTRLDSSEGMENT: offset 0x10 */
267*4882a593Smuzhiyun 	u32		segment;	/* address bits 63:32 if needed */
268*4882a593Smuzhiyun 	/* PERIODICLISTBASE: offset 0x14 */
269*4882a593Smuzhiyun 	u32		frame_list;	/* points to periodic list */
270*4882a593Smuzhiyun 	/* ASYNCLISTADDR: offset 0x18 */
271*4882a593Smuzhiyun 	u32		async_next;	/* address of next async queue head */
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	u32	reserved1;
274*4882a593Smuzhiyun 	/* PORTSC: offset 0x20 */
275*4882a593Smuzhiyun 	u32	port_status;
276*4882a593Smuzhiyun /* 31:23 reserved */
277*4882a593Smuzhiyun #define PORT_USB11(x) (((x)&(3<<10)) == (1<<10))	/* USB 1.1 device */
278*4882a593Smuzhiyun #define PORT_RESET	(1<<8)		/* reset port */
279*4882a593Smuzhiyun #define PORT_SUSPEND	(1<<7)		/* suspend port */
280*4882a593Smuzhiyun #define PORT_RESUME	(1<<6)		/* resume it */
281*4882a593Smuzhiyun #define PORT_PEC	(1<<3)		/* port enable change */
282*4882a593Smuzhiyun #define PORT_PE		(1<<2)		/* port enable */
283*4882a593Smuzhiyun #define PORT_CSC	(1<<1)		/* connect status change */
284*4882a593Smuzhiyun #define PORT_CONNECT	(1<<0)		/* device connected */
285*4882a593Smuzhiyun #define PORT_RWC_BITS   (PORT_CSC | PORT_PEC)
286*4882a593Smuzhiyun 	u32     reserved2[19];
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	/* OTGCSR: offet 0x70 */
289*4882a593Smuzhiyun 	u32     otgcsr;
290*4882a593Smuzhiyun #define OTGCSR_HOST_SPD_TYP     (3 << 22)
291*4882a593Smuzhiyun #define OTGCSR_A_BUS_DROP	(1 << 5)
292*4882a593Smuzhiyun #define OTGCSR_A_BUS_REQ	(1 << 4)
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	/* OTGISR: offset 0x74 */
295*4882a593Smuzhiyun 	u32     otgisr;
296*4882a593Smuzhiyun #define OTGISR_OVC	(1 << 10)
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	u32     reserved3[15];
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	/* GMIR: offset 0xB4 */
301*4882a593Smuzhiyun 	u32     gmir;
302*4882a593Smuzhiyun #define GMIR_INT_POLARITY	(1 << 3) /*Active High*/
303*4882a593Smuzhiyun #define GMIR_MHC_INT		(1 << 2)
304*4882a593Smuzhiyun #define GMIR_MOTG_INT		(1 << 1)
305*4882a593Smuzhiyun #define GMIR_MDEV_INT	(1 << 0)
306*4882a593Smuzhiyun };
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun #define	QTD_NEXT(fotg210, dma)	cpu_to_hc32(fotg210, (u32)dma)
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun /*
313*4882a593Smuzhiyun  * EHCI Specification 0.95 Section 3.5
314*4882a593Smuzhiyun  * QTD: describe data transfer components (buffer, direction, ...)
315*4882a593Smuzhiyun  * See Fig 3-6 "Queue Element Transfer Descriptor Block Diagram".
316*4882a593Smuzhiyun  *
317*4882a593Smuzhiyun  * These are associated only with "QH" (Queue Head) structures,
318*4882a593Smuzhiyun  * used with control, bulk, and interrupt transfers.
319*4882a593Smuzhiyun  */
320*4882a593Smuzhiyun struct fotg210_qtd {
321*4882a593Smuzhiyun 	/* first part defined by EHCI spec */
322*4882a593Smuzhiyun 	__hc32			hw_next;	/* see EHCI 3.5.1 */
323*4882a593Smuzhiyun 	__hc32			hw_alt_next;    /* see EHCI 3.5.2 */
324*4882a593Smuzhiyun 	__hc32			hw_token;	/* see EHCI 3.5.3 */
325*4882a593Smuzhiyun #define	QTD_TOGGLE	(1 << 31)	/* data toggle */
326*4882a593Smuzhiyun #define	QTD_LENGTH(tok)	(((tok)>>16) & 0x7fff)
327*4882a593Smuzhiyun #define	QTD_IOC		(1 << 15)	/* interrupt on complete */
328*4882a593Smuzhiyun #define	QTD_CERR(tok)	(((tok)>>10) & 0x3)
329*4882a593Smuzhiyun #define	QTD_PID(tok)	(((tok)>>8) & 0x3)
330*4882a593Smuzhiyun #define	QTD_STS_ACTIVE	(1 << 7)	/* HC may execute this */
331*4882a593Smuzhiyun #define	QTD_STS_HALT	(1 << 6)	/* halted on error */
332*4882a593Smuzhiyun #define	QTD_STS_DBE	(1 << 5)	/* data buffer error (in HC) */
333*4882a593Smuzhiyun #define	QTD_STS_BABBLE	(1 << 4)	/* device was babbling (qtd halted) */
334*4882a593Smuzhiyun #define	QTD_STS_XACT	(1 << 3)	/* device gave illegal response */
335*4882a593Smuzhiyun #define	QTD_STS_MMF	(1 << 2)	/* incomplete split transaction */
336*4882a593Smuzhiyun #define	QTD_STS_STS	(1 << 1)	/* split transaction state */
337*4882a593Smuzhiyun #define	QTD_STS_PING	(1 << 0)	/* issue PING? */
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun #define ACTIVE_BIT(fotg210)	cpu_to_hc32(fotg210, QTD_STS_ACTIVE)
340*4882a593Smuzhiyun #define HALT_BIT(fotg210)		cpu_to_hc32(fotg210, QTD_STS_HALT)
341*4882a593Smuzhiyun #define STATUS_BIT(fotg210)	cpu_to_hc32(fotg210, QTD_STS_STS)
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	__hc32			hw_buf[5];	/* see EHCI 3.5.4 */
344*4882a593Smuzhiyun 	__hc32			hw_buf_hi[5];	/* Appendix B */
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	/* the rest is HCD-private */
347*4882a593Smuzhiyun 	dma_addr_t		qtd_dma;		/* qtd address */
348*4882a593Smuzhiyun 	struct list_head	qtd_list;		/* sw qtd list */
349*4882a593Smuzhiyun 	struct urb		*urb;			/* qtd's urb */
350*4882a593Smuzhiyun 	size_t			length;			/* length of buffer */
351*4882a593Smuzhiyun } __aligned(32);
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun /* mask NakCnt+T in qh->hw_alt_next */
354*4882a593Smuzhiyun #define QTD_MASK(fotg210)	cpu_to_hc32(fotg210, ~0x1f)
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun #define IS_SHORT_READ(token) (QTD_LENGTH(token) != 0 && QTD_PID(token) == 1)
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun /* type tag from {qh,itd,fstn}->hw_next */
361*4882a593Smuzhiyun #define Q_NEXT_TYPE(fotg210, dma)	((dma) & cpu_to_hc32(fotg210, 3 << 1))
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun /*
364*4882a593Smuzhiyun  * Now the following defines are not converted using the
365*4882a593Smuzhiyun  * cpu_to_le32() macro anymore, since we have to support
366*4882a593Smuzhiyun  * "dynamic" switching between be and le support, so that the driver
367*4882a593Smuzhiyun  * can be used on one system with SoC EHCI controller using big-endian
368*4882a593Smuzhiyun  * descriptors as well as a normal little-endian PCI EHCI controller.
369*4882a593Smuzhiyun  */
370*4882a593Smuzhiyun /* values for that type tag */
371*4882a593Smuzhiyun #define Q_TYPE_ITD	(0 << 1)
372*4882a593Smuzhiyun #define Q_TYPE_QH	(1 << 1)
373*4882a593Smuzhiyun #define Q_TYPE_SITD	(2 << 1)
374*4882a593Smuzhiyun #define Q_TYPE_FSTN	(3 << 1)
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun /* next async queue entry, or pointer to interrupt/periodic QH */
377*4882a593Smuzhiyun #define QH_NEXT(fotg210, dma) \
378*4882a593Smuzhiyun 	(cpu_to_hc32(fotg210, (((u32)dma)&~0x01f)|Q_TYPE_QH))
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun /* for periodic/async schedules and qtd lists, mark end of list */
381*4882a593Smuzhiyun #define FOTG210_LIST_END(fotg210) \
382*4882a593Smuzhiyun 	cpu_to_hc32(fotg210, 1) /* "null pointer" to hw */
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun /*
385*4882a593Smuzhiyun  * Entries in periodic shadow table are pointers to one of four kinds
386*4882a593Smuzhiyun  * of data structure.  That's dictated by the hardware; a type tag is
387*4882a593Smuzhiyun  * encoded in the low bits of the hardware's periodic schedule.  Use
388*4882a593Smuzhiyun  * Q_NEXT_TYPE to get the tag.
389*4882a593Smuzhiyun  *
390*4882a593Smuzhiyun  * For entries in the async schedule, the type tag always says "qh".
391*4882a593Smuzhiyun  */
392*4882a593Smuzhiyun union fotg210_shadow {
393*4882a593Smuzhiyun 	struct fotg210_qh	*qh;		/* Q_TYPE_QH */
394*4882a593Smuzhiyun 	struct fotg210_itd	*itd;		/* Q_TYPE_ITD */
395*4882a593Smuzhiyun 	struct fotg210_fstn	*fstn;		/* Q_TYPE_FSTN */
396*4882a593Smuzhiyun 	__hc32			*hw_next;	/* (all types) */
397*4882a593Smuzhiyun 	void			*ptr;
398*4882a593Smuzhiyun };
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun /*
403*4882a593Smuzhiyun  * EHCI Specification 0.95 Section 3.6
404*4882a593Smuzhiyun  * QH: describes control/bulk/interrupt endpoints
405*4882a593Smuzhiyun  * See Fig 3-7 "Queue Head Structure Layout".
406*4882a593Smuzhiyun  *
407*4882a593Smuzhiyun  * These appear in both the async and (for interrupt) periodic schedules.
408*4882a593Smuzhiyun  */
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun /* first part defined by EHCI spec */
411*4882a593Smuzhiyun struct fotg210_qh_hw {
412*4882a593Smuzhiyun 	__hc32			hw_next;	/* see EHCI 3.6.1 */
413*4882a593Smuzhiyun 	__hc32			hw_info1;	/* see EHCI 3.6.2 */
414*4882a593Smuzhiyun #define	QH_CONTROL_EP	(1 << 27)	/* FS/LS control endpoint */
415*4882a593Smuzhiyun #define	QH_HEAD		(1 << 15)	/* Head of async reclamation list */
416*4882a593Smuzhiyun #define	QH_TOGGLE_CTL	(1 << 14)	/* Data toggle control */
417*4882a593Smuzhiyun #define	QH_HIGH_SPEED	(2 << 12)	/* Endpoint speed */
418*4882a593Smuzhiyun #define	QH_LOW_SPEED	(1 << 12)
419*4882a593Smuzhiyun #define	QH_FULL_SPEED	(0 << 12)
420*4882a593Smuzhiyun #define	QH_INACTIVATE	(1 << 7)	/* Inactivate on next transaction */
421*4882a593Smuzhiyun 	__hc32			hw_info2;	/* see EHCI 3.6.2 */
422*4882a593Smuzhiyun #define	QH_SMASK	0x000000ff
423*4882a593Smuzhiyun #define	QH_CMASK	0x0000ff00
424*4882a593Smuzhiyun #define	QH_HUBADDR	0x007f0000
425*4882a593Smuzhiyun #define	QH_HUBPORT	0x3f800000
426*4882a593Smuzhiyun #define	QH_MULT		0xc0000000
427*4882a593Smuzhiyun 	__hc32			hw_current;	/* qtd list - see EHCI 3.6.4 */
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	/* qtd overlay (hardware parts of a struct fotg210_qtd) */
430*4882a593Smuzhiyun 	__hc32			hw_qtd_next;
431*4882a593Smuzhiyun 	__hc32			hw_alt_next;
432*4882a593Smuzhiyun 	__hc32			hw_token;
433*4882a593Smuzhiyun 	__hc32			hw_buf[5];
434*4882a593Smuzhiyun 	__hc32			hw_buf_hi[5];
435*4882a593Smuzhiyun } __aligned(32);
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun struct fotg210_qh {
438*4882a593Smuzhiyun 	struct fotg210_qh_hw	*hw;		/* Must come first */
439*4882a593Smuzhiyun 	/* the rest is HCD-private */
440*4882a593Smuzhiyun 	dma_addr_t		qh_dma;		/* address of qh */
441*4882a593Smuzhiyun 	union fotg210_shadow	qh_next;	/* ptr to qh; or periodic */
442*4882a593Smuzhiyun 	struct list_head	qtd_list;	/* sw qtd list */
443*4882a593Smuzhiyun 	struct list_head	intr_node;	/* list of intr QHs */
444*4882a593Smuzhiyun 	struct fotg210_qtd	*dummy;
445*4882a593Smuzhiyun 	struct fotg210_qh	*unlink_next;	/* next on unlink list */
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	unsigned		unlink_cycle;
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	u8			needs_rescan;	/* Dequeue during giveback */
450*4882a593Smuzhiyun 	u8			qh_state;
451*4882a593Smuzhiyun #define	QH_STATE_LINKED		1		/* HC sees this */
452*4882a593Smuzhiyun #define	QH_STATE_UNLINK		2		/* HC may still see this */
453*4882a593Smuzhiyun #define	QH_STATE_IDLE		3		/* HC doesn't see this */
454*4882a593Smuzhiyun #define	QH_STATE_UNLINK_WAIT	4		/* LINKED and on unlink q */
455*4882a593Smuzhiyun #define	QH_STATE_COMPLETING	5		/* don't touch token.HALT */
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 	u8			xacterrs;	/* XactErr retry counter */
458*4882a593Smuzhiyun #define	QH_XACTERR_MAX		32		/* XactErr retry limit */
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	/* periodic schedule info */
461*4882a593Smuzhiyun 	u8			usecs;		/* intr bandwidth */
462*4882a593Smuzhiyun 	u8			gap_uf;		/* uframes split/csplit gap */
463*4882a593Smuzhiyun 	u8			c_usecs;	/* ... split completion bw */
464*4882a593Smuzhiyun 	u16			tt_usecs;	/* tt downstream bandwidth */
465*4882a593Smuzhiyun 	unsigned short		period;		/* polling interval */
466*4882a593Smuzhiyun 	unsigned short		start;		/* where polling starts */
467*4882a593Smuzhiyun #define NO_FRAME ((unsigned short)~0)			/* pick new start */
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	struct usb_device	*dev;		/* access to TT */
470*4882a593Smuzhiyun 	unsigned		is_out:1;	/* bulk or intr OUT */
471*4882a593Smuzhiyun 	unsigned		clearing_tt:1;	/* Clear-TT-Buf in progress */
472*4882a593Smuzhiyun };
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun /* description of one iso transaction (up to 3 KB data if highspeed) */
477*4882a593Smuzhiyun struct fotg210_iso_packet {
478*4882a593Smuzhiyun 	/* These will be copied to iTD when scheduling */
479*4882a593Smuzhiyun 	u64			bufp;		/* itd->hw_bufp{,_hi}[pg] |= */
480*4882a593Smuzhiyun 	__hc32			transaction;	/* itd->hw_transaction[i] |= */
481*4882a593Smuzhiyun 	u8			cross;		/* buf crosses pages */
482*4882a593Smuzhiyun 	/* for full speed OUT splits */
483*4882a593Smuzhiyun 	u32			buf1;
484*4882a593Smuzhiyun };
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun /* temporary schedule data for packets from iso urbs (both speeds)
487*4882a593Smuzhiyun  * each packet is one logical usb transaction to the device (not TT),
488*4882a593Smuzhiyun  * beginning at stream->next_uframe
489*4882a593Smuzhiyun  */
490*4882a593Smuzhiyun struct fotg210_iso_sched {
491*4882a593Smuzhiyun 	struct list_head	td_list;
492*4882a593Smuzhiyun 	unsigned		span;
493*4882a593Smuzhiyun 	struct fotg210_iso_packet	packet[];
494*4882a593Smuzhiyun };
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun /*
497*4882a593Smuzhiyun  * fotg210_iso_stream - groups all (s)itds for this endpoint.
498*4882a593Smuzhiyun  * acts like a qh would, if EHCI had them for ISO.
499*4882a593Smuzhiyun  */
500*4882a593Smuzhiyun struct fotg210_iso_stream {
501*4882a593Smuzhiyun 	/* first field matches fotg210_hq, but is NULL */
502*4882a593Smuzhiyun 	struct fotg210_qh_hw	*hw;
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	u8			bEndpointAddress;
505*4882a593Smuzhiyun 	u8			highspeed;
506*4882a593Smuzhiyun 	struct list_head	td_list;	/* queued itds */
507*4882a593Smuzhiyun 	struct list_head	free_list;	/* list of unused itds */
508*4882a593Smuzhiyun 	struct usb_device	*udev;
509*4882a593Smuzhiyun 	struct usb_host_endpoint *ep;
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	/* output of (re)scheduling */
512*4882a593Smuzhiyun 	int			next_uframe;
513*4882a593Smuzhiyun 	__hc32			splits;
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	/* the rest is derived from the endpoint descriptor,
516*4882a593Smuzhiyun 	 * trusting urb->interval == f(epdesc->bInterval) and
517*4882a593Smuzhiyun 	 * including the extra info for hw_bufp[0..2]
518*4882a593Smuzhiyun 	 */
519*4882a593Smuzhiyun 	u8			usecs, c_usecs;
520*4882a593Smuzhiyun 	u16			interval;
521*4882a593Smuzhiyun 	u16			tt_usecs;
522*4882a593Smuzhiyun 	u16			maxp;
523*4882a593Smuzhiyun 	u16			raw_mask;
524*4882a593Smuzhiyun 	unsigned		bandwidth;
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	/* This is used to initialize iTD's hw_bufp fields */
527*4882a593Smuzhiyun 	__hc32			buf0;
528*4882a593Smuzhiyun 	__hc32			buf1;
529*4882a593Smuzhiyun 	__hc32			buf2;
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	/* this is used to initialize sITD's tt info */
532*4882a593Smuzhiyun 	__hc32			address;
533*4882a593Smuzhiyun };
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun /*
538*4882a593Smuzhiyun  * EHCI Specification 0.95 Section 3.3
539*4882a593Smuzhiyun  * Fig 3-4 "Isochronous Transaction Descriptor (iTD)"
540*4882a593Smuzhiyun  *
541*4882a593Smuzhiyun  * Schedule records for high speed iso xfers
542*4882a593Smuzhiyun  */
543*4882a593Smuzhiyun struct fotg210_itd {
544*4882a593Smuzhiyun 	/* first part defined by EHCI spec */
545*4882a593Smuzhiyun 	__hc32			hw_next;	/* see EHCI 3.3.1 */
546*4882a593Smuzhiyun 	__hc32			hw_transaction[8]; /* see EHCI 3.3.2 */
547*4882a593Smuzhiyun #define FOTG210_ISOC_ACTIVE	(1<<31)	/* activate transfer this slot */
548*4882a593Smuzhiyun #define FOTG210_ISOC_BUF_ERR	(1<<30)	/* Data buffer error */
549*4882a593Smuzhiyun #define FOTG210_ISOC_BABBLE	(1<<29)	/* babble detected */
550*4882a593Smuzhiyun #define FOTG210_ISOC_XACTERR	(1<<28)	/* XactErr - transaction error */
551*4882a593Smuzhiyun #define	FOTG210_ITD_LENGTH(tok)	(((tok)>>16) & 0x0fff)
552*4882a593Smuzhiyun #define	FOTG210_ITD_IOC		(1 << 15)	/* interrupt on complete */
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun #define ITD_ACTIVE(fotg210)	cpu_to_hc32(fotg210, FOTG210_ISOC_ACTIVE)
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	__hc32			hw_bufp[7];	/* see EHCI 3.3.3 */
557*4882a593Smuzhiyun 	__hc32			hw_bufp_hi[7];	/* Appendix B */
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	/* the rest is HCD-private */
560*4882a593Smuzhiyun 	dma_addr_t		itd_dma;	/* for this itd */
561*4882a593Smuzhiyun 	union fotg210_shadow	itd_next;	/* ptr to periodic q entry */
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 	struct urb		*urb;
564*4882a593Smuzhiyun 	struct fotg210_iso_stream	*stream;	/* endpoint's queue */
565*4882a593Smuzhiyun 	struct list_head	itd_list;	/* list of stream's itds */
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	/* any/all hw_transactions here may be used by that urb */
568*4882a593Smuzhiyun 	unsigned		frame;		/* where scheduled */
569*4882a593Smuzhiyun 	unsigned		pg;
570*4882a593Smuzhiyun 	unsigned		index[8];	/* in urb->iso_frame_desc */
571*4882a593Smuzhiyun } __aligned(32);
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun /*
576*4882a593Smuzhiyun  * EHCI Specification 0.96 Section 3.7
577*4882a593Smuzhiyun  * Periodic Frame Span Traversal Node (FSTN)
578*4882a593Smuzhiyun  *
579*4882a593Smuzhiyun  * Manages split interrupt transactions (using TT) that span frame boundaries
580*4882a593Smuzhiyun  * into uframes 0/1; see 4.12.2.2.  In those uframes, a "save place" FSTN
581*4882a593Smuzhiyun  * makes the HC jump (back) to a QH to scan for fs/ls QH completions until
582*4882a593Smuzhiyun  * it hits a "restore" FSTN; then it returns to finish other uframe 0/1 work.
583*4882a593Smuzhiyun  */
584*4882a593Smuzhiyun struct fotg210_fstn {
585*4882a593Smuzhiyun 	__hc32			hw_next;	/* any periodic q entry */
586*4882a593Smuzhiyun 	__hc32			hw_prev;	/* qh or FOTG210_LIST_END */
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	/* the rest is HCD-private */
589*4882a593Smuzhiyun 	dma_addr_t		fstn_dma;
590*4882a593Smuzhiyun 	union fotg210_shadow	fstn_next;	/* ptr to periodic q entry */
591*4882a593Smuzhiyun } __aligned(32);
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun /* Prepare the PORTSC wakeup flags during controller suspend/resume */
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun #define fotg210_prepare_ports_for_controller_suspend(fotg210, do_wakeup) \
598*4882a593Smuzhiyun 		fotg210_adjust_port_wakeup_flags(fotg210, true, do_wakeup)
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun #define fotg210_prepare_ports_for_controller_resume(fotg210)		\
601*4882a593Smuzhiyun 		fotg210_adjust_port_wakeup_flags(fotg210, false, false)
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun /*
606*4882a593Smuzhiyun  * Some EHCI controllers have a Transaction Translator built into the
607*4882a593Smuzhiyun  * root hub. This is a non-standard feature.  Each controller will need
608*4882a593Smuzhiyun  * to add code to the following inline functions, and call them as
609*4882a593Smuzhiyun  * needed (mostly in root hub code).
610*4882a593Smuzhiyun  */
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun static inline unsigned int
fotg210_get_speed(struct fotg210_hcd * fotg210,unsigned int portsc)613*4882a593Smuzhiyun fotg210_get_speed(struct fotg210_hcd *fotg210, unsigned int portsc)
614*4882a593Smuzhiyun {
615*4882a593Smuzhiyun 	return (readl(&fotg210->regs->otgcsr)
616*4882a593Smuzhiyun 		& OTGCSR_HOST_SPD_TYP) >> 22;
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun /* Returns the speed of a device attached to a port on the root hub. */
620*4882a593Smuzhiyun static inline unsigned int
fotg210_port_speed(struct fotg210_hcd * fotg210,unsigned int portsc)621*4882a593Smuzhiyun fotg210_port_speed(struct fotg210_hcd *fotg210, unsigned int portsc)
622*4882a593Smuzhiyun {
623*4882a593Smuzhiyun 	switch (fotg210_get_speed(fotg210, portsc)) {
624*4882a593Smuzhiyun 	case 0:
625*4882a593Smuzhiyun 		return 0;
626*4882a593Smuzhiyun 	case 1:
627*4882a593Smuzhiyun 		return USB_PORT_STAT_LOW_SPEED;
628*4882a593Smuzhiyun 	case 2:
629*4882a593Smuzhiyun 	default:
630*4882a593Smuzhiyun 		return USB_PORT_STAT_HIGH_SPEED;
631*4882a593Smuzhiyun 	}
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun #define	fotg210_has_fsl_portno_bug(e)		(0)
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun /*
639*4882a593Smuzhiyun  * While most USB host controllers implement their registers in
640*4882a593Smuzhiyun  * little-endian format, a minority (celleb companion chip) implement
641*4882a593Smuzhiyun  * them in big endian format.
642*4882a593Smuzhiyun  *
643*4882a593Smuzhiyun  * This attempts to support either format at compile time without a
644*4882a593Smuzhiyun  * runtime penalty, or both formats with the additional overhead
645*4882a593Smuzhiyun  * of checking a flag bit.
646*4882a593Smuzhiyun  *
647*4882a593Smuzhiyun  */
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun #define fotg210_big_endian_mmio(e)	0
650*4882a593Smuzhiyun #define fotg210_big_endian_capbase(e)	0
651*4882a593Smuzhiyun 
fotg210_readl(const struct fotg210_hcd * fotg210,__u32 __iomem * regs)652*4882a593Smuzhiyun static inline unsigned int fotg210_readl(const struct fotg210_hcd *fotg210,
653*4882a593Smuzhiyun 		__u32 __iomem *regs)
654*4882a593Smuzhiyun {
655*4882a593Smuzhiyun 	return readl(regs);
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun 
fotg210_writel(const struct fotg210_hcd * fotg210,const unsigned int val,__u32 __iomem * regs)658*4882a593Smuzhiyun static inline void fotg210_writel(const struct fotg210_hcd *fotg210,
659*4882a593Smuzhiyun 		const unsigned int val, __u32 __iomem *regs)
660*4882a593Smuzhiyun {
661*4882a593Smuzhiyun 	writel(val, regs);
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun /* cpu to fotg210 */
cpu_to_hc32(const struct fotg210_hcd * fotg210,const u32 x)665*4882a593Smuzhiyun static inline __hc32 cpu_to_hc32(const struct fotg210_hcd *fotg210, const u32 x)
666*4882a593Smuzhiyun {
667*4882a593Smuzhiyun 	return cpu_to_le32(x);
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun /* fotg210 to cpu */
hc32_to_cpu(const struct fotg210_hcd * fotg210,const __hc32 x)671*4882a593Smuzhiyun static inline u32 hc32_to_cpu(const struct fotg210_hcd *fotg210, const __hc32 x)
672*4882a593Smuzhiyun {
673*4882a593Smuzhiyun 	return le32_to_cpu(x);
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun 
hc32_to_cpup(const struct fotg210_hcd * fotg210,const __hc32 * x)676*4882a593Smuzhiyun static inline u32 hc32_to_cpup(const struct fotg210_hcd *fotg210,
677*4882a593Smuzhiyun 			       const __hc32 *x)
678*4882a593Smuzhiyun {
679*4882a593Smuzhiyun 	return le32_to_cpup(x);
680*4882a593Smuzhiyun }
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
683*4882a593Smuzhiyun 
fotg210_read_frame_index(struct fotg210_hcd * fotg210)684*4882a593Smuzhiyun static inline unsigned fotg210_read_frame_index(struct fotg210_hcd *fotg210)
685*4882a593Smuzhiyun {
686*4882a593Smuzhiyun 	return fotg210_readl(fotg210, &fotg210->regs->frame_index);
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun #endif /* __LINUX_FOTG210_H */
692