xref: /OK3568_Linux_fs/kernel/drivers/usb/dwc2/hcd.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * hcd.h - DesignWare HS OTG Controller host-mode declarations
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2004-2013 Synopsys, Inc.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Redistribution and use in source and binary forms, with or without
8*4882a593Smuzhiyun  * modification, are permitted provided that the following conditions
9*4882a593Smuzhiyun  * are met:
10*4882a593Smuzhiyun  * 1. Redistributions of source code must retain the above copyright
11*4882a593Smuzhiyun  *    notice, this list of conditions, and the following disclaimer,
12*4882a593Smuzhiyun  *    without modification.
13*4882a593Smuzhiyun  * 2. Redistributions in binary form must reproduce the above copyright
14*4882a593Smuzhiyun  *    notice, this list of conditions and the following disclaimer in the
15*4882a593Smuzhiyun  *    documentation and/or other materials provided with the distribution.
16*4882a593Smuzhiyun  * 3. The names of the above-listed copyright holders may not be used
17*4882a593Smuzhiyun  *    to endorse or promote products derived from this software without
18*4882a593Smuzhiyun  *    specific prior written permission.
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  * ALTERNATIVELY, this software may be distributed under the terms of the
21*4882a593Smuzhiyun  * GNU General Public License ("GPL") as published by the Free Software
22*4882a593Smuzhiyun  * Foundation; either version 2 of the License, or (at your option) any
23*4882a593Smuzhiyun  * later version.
24*4882a593Smuzhiyun  *
25*4882a593Smuzhiyun  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
26*4882a593Smuzhiyun  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
27*4882a593Smuzhiyun  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28*4882a593Smuzhiyun  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
29*4882a593Smuzhiyun  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30*4882a593Smuzhiyun  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31*4882a593Smuzhiyun  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32*4882a593Smuzhiyun  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33*4882a593Smuzhiyun  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34*4882a593Smuzhiyun  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35*4882a593Smuzhiyun  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36*4882a593Smuzhiyun  */
37*4882a593Smuzhiyun #ifndef __DWC2_HCD_H__
38*4882a593Smuzhiyun #define __DWC2_HCD_H__
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun /*
41*4882a593Smuzhiyun  * This file contains the structures, constants, and interfaces for the
42*4882a593Smuzhiyun  * Host Contoller Driver (HCD)
43*4882a593Smuzhiyun  *
44*4882a593Smuzhiyun  * The Host Controller Driver (HCD) is responsible for translating requests
45*4882a593Smuzhiyun  * from the USB Driver into the appropriate actions on the DWC_otg controller.
46*4882a593Smuzhiyun  * It isolates the USBD from the specifics of the controller by providing an
47*4882a593Smuzhiyun  * API to the USBD.
48*4882a593Smuzhiyun  */
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun struct dwc2_qh;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun /**
53*4882a593Smuzhiyun  * struct dwc2_host_chan - Software host channel descriptor
54*4882a593Smuzhiyun  *
55*4882a593Smuzhiyun  * @hc_num:             Host channel number, used for register address lookup
56*4882a593Smuzhiyun  * @dev_addr:           Address of the device
57*4882a593Smuzhiyun  * @ep_num:             Endpoint of the device
58*4882a593Smuzhiyun  * @ep_is_in:           Endpoint direction
59*4882a593Smuzhiyun  * @speed:              Device speed. One of the following values:
60*4882a593Smuzhiyun  *                       - USB_SPEED_LOW
61*4882a593Smuzhiyun  *                       - USB_SPEED_FULL
62*4882a593Smuzhiyun  *                       - USB_SPEED_HIGH
63*4882a593Smuzhiyun  * @ep_type:            Endpoint type. One of the following values:
64*4882a593Smuzhiyun  *                       - USB_ENDPOINT_XFER_CONTROL: 0
65*4882a593Smuzhiyun  *                       - USB_ENDPOINT_XFER_ISOC:    1
66*4882a593Smuzhiyun  *                       - USB_ENDPOINT_XFER_BULK:    2
67*4882a593Smuzhiyun  *                       - USB_ENDPOINT_XFER_INTR:    3
68*4882a593Smuzhiyun  * @max_packet:         Max packet size in bytes
69*4882a593Smuzhiyun  * @data_pid_start:     PID for initial transaction.
70*4882a593Smuzhiyun  *                       0: DATA0
71*4882a593Smuzhiyun  *                       1: DATA2
72*4882a593Smuzhiyun  *                       2: DATA1
73*4882a593Smuzhiyun  *                       3: MDATA (non-Control EP),
74*4882a593Smuzhiyun  *                          SETUP (Control EP)
75*4882a593Smuzhiyun  * @multi_count:        Number of additional periodic transactions per
76*4882a593Smuzhiyun  *                      (micro)frame
77*4882a593Smuzhiyun  * @xfer_buf:           Pointer to current transfer buffer position
78*4882a593Smuzhiyun  * @xfer_dma:           DMA address of xfer_buf
79*4882a593Smuzhiyun  * @align_buf:          In Buffer DMA mode this will be used if xfer_buf is not
80*4882a593Smuzhiyun  *                      DWORD aligned
81*4882a593Smuzhiyun  * @xfer_len:           Total number of bytes to transfer
82*4882a593Smuzhiyun  * @xfer_count:         Number of bytes transferred so far
83*4882a593Smuzhiyun  * @start_pkt_count:    Packet count at start of transfer
84*4882a593Smuzhiyun  * @xfer_started:       True if the transfer has been started
85*4882a593Smuzhiyun  * @do_ping:            True if a PING request should be issued on this channel
86*4882a593Smuzhiyun  * @error_state:        True if the error count for this transaction is non-zero
87*4882a593Smuzhiyun  * @halt_on_queue:      True if this channel should be halted the next time a
88*4882a593Smuzhiyun  *                      request is queued for the channel. This is necessary in
89*4882a593Smuzhiyun  *                      slave mode if no request queue space is available when
90*4882a593Smuzhiyun  *                      an attempt is made to halt the channel.
91*4882a593Smuzhiyun  * @halt_pending:       True if the host channel has been halted, but the core
92*4882a593Smuzhiyun  *                      is not finished flushing queued requests
93*4882a593Smuzhiyun  * @do_split:           Enable split for the channel
94*4882a593Smuzhiyun  * @complete_split:     Enable complete split
95*4882a593Smuzhiyun  * @hub_addr:           Address of high speed hub for the split
96*4882a593Smuzhiyun  * @hub_port:           Port of the low/full speed device for the split
97*4882a593Smuzhiyun  * @xact_pos:           Split transaction position. One of the following values:
98*4882a593Smuzhiyun  *                       - DWC2_HCSPLT_XACTPOS_MID
99*4882a593Smuzhiyun  *                       - DWC2_HCSPLT_XACTPOS_BEGIN
100*4882a593Smuzhiyun  *                       - DWC2_HCSPLT_XACTPOS_END
101*4882a593Smuzhiyun  *                       - DWC2_HCSPLT_XACTPOS_ALL
102*4882a593Smuzhiyun  * @requests:           Number of requests issued for this channel since it was
103*4882a593Smuzhiyun  *                      assigned to the current transfer (not counting PINGs)
104*4882a593Smuzhiyun  * @schinfo:            Scheduling micro-frame bitmap
105*4882a593Smuzhiyun  * @ntd:                Number of transfer descriptors for the transfer
106*4882a593Smuzhiyun  * @halt_status:        Reason for halting the host channel
107*4882a593Smuzhiyun  * @hcint:               Contents of the HCINT register when the interrupt came
108*4882a593Smuzhiyun  * @qh:                 QH for the transfer being processed by this channel
109*4882a593Smuzhiyun  * @hc_list_entry:      For linking to list of host channels
110*4882a593Smuzhiyun  * @desc_list_addr:     Current QH's descriptor list DMA address
111*4882a593Smuzhiyun  * @desc_list_sz:       Current QH's descriptor list size
112*4882a593Smuzhiyun  * @split_order_list_entry: List entry for keeping track of the order of splits
113*4882a593Smuzhiyun  *
114*4882a593Smuzhiyun  * This structure represents the state of a single host channel when acting in
115*4882a593Smuzhiyun  * host mode. It contains the data items needed to transfer packets to an
116*4882a593Smuzhiyun  * endpoint via a host channel.
117*4882a593Smuzhiyun  */
118*4882a593Smuzhiyun struct dwc2_host_chan {
119*4882a593Smuzhiyun 	u8 hc_num;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	unsigned dev_addr:7;
122*4882a593Smuzhiyun 	unsigned ep_num:4;
123*4882a593Smuzhiyun 	unsigned ep_is_in:1;
124*4882a593Smuzhiyun 	unsigned speed:4;
125*4882a593Smuzhiyun 	unsigned ep_type:2;
126*4882a593Smuzhiyun 	unsigned max_packet:11;
127*4882a593Smuzhiyun 	unsigned data_pid_start:2;
128*4882a593Smuzhiyun #define DWC2_HC_PID_DATA0	TSIZ_SC_MC_PID_DATA0
129*4882a593Smuzhiyun #define DWC2_HC_PID_DATA2	TSIZ_SC_MC_PID_DATA2
130*4882a593Smuzhiyun #define DWC2_HC_PID_DATA1	TSIZ_SC_MC_PID_DATA1
131*4882a593Smuzhiyun #define DWC2_HC_PID_MDATA	TSIZ_SC_MC_PID_MDATA
132*4882a593Smuzhiyun #define DWC2_HC_PID_SETUP	TSIZ_SC_MC_PID_SETUP
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	unsigned multi_count:2;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	u8 *xfer_buf;
137*4882a593Smuzhiyun 	dma_addr_t xfer_dma;
138*4882a593Smuzhiyun 	dma_addr_t align_buf;
139*4882a593Smuzhiyun 	u32 xfer_len;
140*4882a593Smuzhiyun 	u32 xfer_count;
141*4882a593Smuzhiyun 	u16 start_pkt_count;
142*4882a593Smuzhiyun 	u8 xfer_started;
143*4882a593Smuzhiyun 	u8 do_ping;
144*4882a593Smuzhiyun 	u8 error_state;
145*4882a593Smuzhiyun 	u8 halt_on_queue;
146*4882a593Smuzhiyun 	u8 halt_pending;
147*4882a593Smuzhiyun 	u8 do_split;
148*4882a593Smuzhiyun 	u8 complete_split;
149*4882a593Smuzhiyun 	u8 hub_addr;
150*4882a593Smuzhiyun 	u8 hub_port;
151*4882a593Smuzhiyun 	u8 xact_pos;
152*4882a593Smuzhiyun #define DWC2_HCSPLT_XACTPOS_MID	HCSPLT_XACTPOS_MID
153*4882a593Smuzhiyun #define DWC2_HCSPLT_XACTPOS_END	HCSPLT_XACTPOS_END
154*4882a593Smuzhiyun #define DWC2_HCSPLT_XACTPOS_BEGIN HCSPLT_XACTPOS_BEGIN
155*4882a593Smuzhiyun #define DWC2_HCSPLT_XACTPOS_ALL	HCSPLT_XACTPOS_ALL
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	u8 requests;
158*4882a593Smuzhiyun 	u8 schinfo;
159*4882a593Smuzhiyun 	u16 ntd;
160*4882a593Smuzhiyun 	enum dwc2_halt_status halt_status;
161*4882a593Smuzhiyun 	u32 hcint;
162*4882a593Smuzhiyun 	struct dwc2_qh *qh;
163*4882a593Smuzhiyun 	struct list_head hc_list_entry;
164*4882a593Smuzhiyun 	dma_addr_t desc_list_addr;
165*4882a593Smuzhiyun 	u32 desc_list_sz;
166*4882a593Smuzhiyun 	struct list_head split_order_list_entry;
167*4882a593Smuzhiyun };
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun struct dwc2_hcd_pipe_info {
170*4882a593Smuzhiyun 	u8 dev_addr;
171*4882a593Smuzhiyun 	u8 ep_num;
172*4882a593Smuzhiyun 	u8 pipe_type;
173*4882a593Smuzhiyun 	u8 pipe_dir;
174*4882a593Smuzhiyun 	u16 maxp;
175*4882a593Smuzhiyun 	u16 maxp_mult;
176*4882a593Smuzhiyun };
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun struct dwc2_hcd_iso_packet_desc {
179*4882a593Smuzhiyun 	u32 offset;
180*4882a593Smuzhiyun 	u32 length;
181*4882a593Smuzhiyun 	u32 actual_length;
182*4882a593Smuzhiyun 	u32 status;
183*4882a593Smuzhiyun };
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun struct dwc2_qtd;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun struct dwc2_hcd_urb {
188*4882a593Smuzhiyun 	void *priv;
189*4882a593Smuzhiyun 	struct dwc2_qtd *qtd;
190*4882a593Smuzhiyun 	void *buf;
191*4882a593Smuzhiyun 	dma_addr_t dma;
192*4882a593Smuzhiyun 	void *setup_packet;
193*4882a593Smuzhiyun 	dma_addr_t setup_dma;
194*4882a593Smuzhiyun 	u32 length;
195*4882a593Smuzhiyun 	u32 actual_length;
196*4882a593Smuzhiyun 	u32 status;
197*4882a593Smuzhiyun 	u32 error_count;
198*4882a593Smuzhiyun 	u32 packet_count;
199*4882a593Smuzhiyun 	u32 flags;
200*4882a593Smuzhiyun 	u16 interval;
201*4882a593Smuzhiyun 	struct dwc2_hcd_pipe_info pipe_info;
202*4882a593Smuzhiyun 	struct dwc2_hcd_iso_packet_desc iso_descs[];
203*4882a593Smuzhiyun };
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun /* Phases for control transfers */
206*4882a593Smuzhiyun enum dwc2_control_phase {
207*4882a593Smuzhiyun 	DWC2_CONTROL_SETUP,
208*4882a593Smuzhiyun 	DWC2_CONTROL_DATA,
209*4882a593Smuzhiyun 	DWC2_CONTROL_STATUS,
210*4882a593Smuzhiyun };
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun /* Transaction types */
213*4882a593Smuzhiyun enum dwc2_transaction_type {
214*4882a593Smuzhiyun 	DWC2_TRANSACTION_NONE,
215*4882a593Smuzhiyun 	DWC2_TRANSACTION_PERIODIC,
216*4882a593Smuzhiyun 	DWC2_TRANSACTION_NON_PERIODIC,
217*4882a593Smuzhiyun 	DWC2_TRANSACTION_ALL,
218*4882a593Smuzhiyun };
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun /* The number of elements per LS bitmap (per port on multi_tt) */
221*4882a593Smuzhiyun #define DWC2_ELEMENTS_PER_LS_BITMAP	DIV_ROUND_UP(DWC2_LS_SCHEDULE_SLICES, \
222*4882a593Smuzhiyun 						     BITS_PER_LONG)
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun /**
225*4882a593Smuzhiyun  * struct dwc2_tt - dwc2 data associated with a usb_tt
226*4882a593Smuzhiyun  *
227*4882a593Smuzhiyun  * @refcount:           Number of Queue Heads (QHs) holding a reference.
228*4882a593Smuzhiyun  * @usb_tt:             Pointer back to the official usb_tt.
229*4882a593Smuzhiyun  * @periodic_bitmaps:   Bitmap for which parts of the 1ms frame are accounted
230*4882a593Smuzhiyun  *                      for already.  Each is DWC2_ELEMENTS_PER_LS_BITMAP
231*4882a593Smuzhiyun  *			elements (so sizeof(long) times that in bytes).
232*4882a593Smuzhiyun  *
233*4882a593Smuzhiyun  * This structure is stored in the hcpriv of the official usb_tt.
234*4882a593Smuzhiyun  */
235*4882a593Smuzhiyun struct dwc2_tt {
236*4882a593Smuzhiyun 	int refcount;
237*4882a593Smuzhiyun 	struct usb_tt *usb_tt;
238*4882a593Smuzhiyun 	unsigned long periodic_bitmaps[];
239*4882a593Smuzhiyun };
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun /**
242*4882a593Smuzhiyun  * struct dwc2_hs_transfer_time - Info about a transfer on the high speed bus.
243*4882a593Smuzhiyun  *
244*4882a593Smuzhiyun  * @start_schedule_us:  The start time on the main bus schedule.  Note that
245*4882a593Smuzhiyun  *                         the main bus schedule is tightly packed and this
246*4882a593Smuzhiyun  *			   time should be interpreted as tightly packed (so
247*4882a593Smuzhiyun  *			   uFrame 0 starts at 0 us, uFrame 1 starts at 100 us
248*4882a593Smuzhiyun  *			   instead of 125 us).
249*4882a593Smuzhiyun  * @duration_us:           How long this transfer goes.
250*4882a593Smuzhiyun  */
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun struct dwc2_hs_transfer_time {
253*4882a593Smuzhiyun 	u32 start_schedule_us;
254*4882a593Smuzhiyun 	u16 duration_us;
255*4882a593Smuzhiyun };
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun /**
258*4882a593Smuzhiyun  * struct dwc2_qh - Software queue head structure
259*4882a593Smuzhiyun  *
260*4882a593Smuzhiyun  * @hsotg:              The HCD state structure for the DWC OTG controller
261*4882a593Smuzhiyun  * @ep_type:            Endpoint type. One of the following values:
262*4882a593Smuzhiyun  *                       - USB_ENDPOINT_XFER_CONTROL
263*4882a593Smuzhiyun  *                       - USB_ENDPOINT_XFER_BULK
264*4882a593Smuzhiyun  *                       - USB_ENDPOINT_XFER_INT
265*4882a593Smuzhiyun  *                       - USB_ENDPOINT_XFER_ISOC
266*4882a593Smuzhiyun  * @ep_is_in:           Endpoint direction
267*4882a593Smuzhiyun  * @maxp:               Value from wMaxPacketSize field of Endpoint Descriptor
268*4882a593Smuzhiyun  * @maxp_mult:          Multiplier for maxp
269*4882a593Smuzhiyun  * @dev_speed:          Device speed. One of the following values:
270*4882a593Smuzhiyun  *                       - USB_SPEED_LOW
271*4882a593Smuzhiyun  *                       - USB_SPEED_FULL
272*4882a593Smuzhiyun  *                       - USB_SPEED_HIGH
273*4882a593Smuzhiyun  * @data_toggle:        Determines the PID of the next data packet for
274*4882a593Smuzhiyun  *                      non-controltransfers. Ignored for control transfers.
275*4882a593Smuzhiyun  *                      One of the following values:
276*4882a593Smuzhiyun  *                       - DWC2_HC_PID_DATA0
277*4882a593Smuzhiyun  *                       - DWC2_HC_PID_DATA1
278*4882a593Smuzhiyun  * @ping_state:         Ping state
279*4882a593Smuzhiyun  * @do_split:           Full/low speed endpoint on high-speed hub requires split
280*4882a593Smuzhiyun  * @td_first:           Index of first activated isochronous transfer descriptor
281*4882a593Smuzhiyun  * @td_last:            Index of last activated isochronous transfer descriptor
282*4882a593Smuzhiyun  * @host_us:            Bandwidth in microseconds per transfer as seen by host
283*4882a593Smuzhiyun  * @device_us:          Bandwidth in microseconds per transfer as seen by device
284*4882a593Smuzhiyun  * @host_interval:      Interval between transfers as seen by the host.  If
285*4882a593Smuzhiyun  *                      the host is high speed and the device is low speed this
286*4882a593Smuzhiyun  *                      will be 8 times device interval.
287*4882a593Smuzhiyun  * @device_interval:    Interval between transfers as seen by the device.
288*4882a593Smuzhiyun  *                      interval.
289*4882a593Smuzhiyun  * @next_active_frame:  (Micro)frame _before_ we next need to put something on
290*4882a593Smuzhiyun  *                      the bus.  We'll move the qh to active here.  If the
291*4882a593Smuzhiyun  *                      host is in high speed mode this will be a uframe.  If
292*4882a593Smuzhiyun  *                      the host is in low speed mode this will be a full frame.
293*4882a593Smuzhiyun  * @start_active_frame: If we are partway through a split transfer, this will be
294*4882a593Smuzhiyun  *			what next_active_frame was when we started.  Otherwise
295*4882a593Smuzhiyun  *			it should always be the same as next_active_frame.
296*4882a593Smuzhiyun  * @num_hs_transfers:   Number of transfers in hs_transfers.
297*4882a593Smuzhiyun  *                      Normally this is 1 but can be more than one for splits.
298*4882a593Smuzhiyun  *                      Always >= 1 unless the host is in low/full speed mode.
299*4882a593Smuzhiyun  * @hs_transfers:       Transfers that are scheduled as seen by the high speed
300*4882a593Smuzhiyun  *                      bus.  Not used if host is in low or full speed mode (but
301*4882a593Smuzhiyun  *                      note that it IS USED if the device is low or full speed
302*4882a593Smuzhiyun  *                      as long as the HOST is in high speed mode).
303*4882a593Smuzhiyun  * @ls_start_schedule_slice: Start time (in slices) on the low speed bus
304*4882a593Smuzhiyun  *                           schedule that's being used by this device.  This
305*4882a593Smuzhiyun  *			     will be on the periodic_bitmap in a
306*4882a593Smuzhiyun  *                           "struct dwc2_tt".  Not used if this device is high
307*4882a593Smuzhiyun  *                           speed.  Note that this is in "schedule slice" which
308*4882a593Smuzhiyun  *                           is tightly packed.
309*4882a593Smuzhiyun  * @ntd:                Actual number of transfer descriptors in a list
310*4882a593Smuzhiyun  * @dw_align_buf:       Used instead of original buffer if its physical address
311*4882a593Smuzhiyun  *                      is not dword-aligned
312*4882a593Smuzhiyun  * @dw_align_buf_dma:   DMA address for dw_align_buf
313*4882a593Smuzhiyun  * @qtd_list:           List of QTDs for this QH
314*4882a593Smuzhiyun  * @channel:            Host channel currently processing transfers for this QH
315*4882a593Smuzhiyun  * @qh_list_entry:      Entry for QH in either the periodic or non-periodic
316*4882a593Smuzhiyun  *                      schedule
317*4882a593Smuzhiyun  * @desc_list:          List of transfer descriptors
318*4882a593Smuzhiyun  * @desc_list_dma:      Physical address of desc_list
319*4882a593Smuzhiyun  * @desc_list_sz:       Size of descriptors list
320*4882a593Smuzhiyun  * @n_bytes:            Xfer Bytes array. Each element corresponds to a transfer
321*4882a593Smuzhiyun  *                      descriptor and indicates original XferSize value for the
322*4882a593Smuzhiyun  *                      descriptor
323*4882a593Smuzhiyun  * @unreserve_timer:    Timer for releasing periodic reservation.
324*4882a593Smuzhiyun  * @wait_timer:         Timer used to wait before re-queuing.
325*4882a593Smuzhiyun  * @dwc_tt:            Pointer to our tt info (or NULL if no tt).
326*4882a593Smuzhiyun  * @ttport:             Port number within our tt.
327*4882a593Smuzhiyun  * @tt_buffer_dirty     True if clear_tt_buffer_complete is pending
328*4882a593Smuzhiyun  * @unreserve_pending:  True if we planned to unreserve but haven't yet.
329*4882a593Smuzhiyun  * @schedule_low_speed: True if we have a low/full speed component (either the
330*4882a593Smuzhiyun  *			host is in low/full speed mode or do_split).
331*4882a593Smuzhiyun  * @want_wait:          We should wait before re-queuing; only matters for non-
332*4882a593Smuzhiyun  *                      periodic transfers and is ignored for periodic ones.
333*4882a593Smuzhiyun  * @wait_timer_cancel:  Set to true to cancel the wait_timer.
334*4882a593Smuzhiyun  *
335*4882a593Smuzhiyun  * @tt_buffer_dirty:	True if EP's TT buffer is not clean.
336*4882a593Smuzhiyun  * A Queue Head (QH) holds the static characteristics of an endpoint and
337*4882a593Smuzhiyun  * maintains a list of transfers (QTDs) for that endpoint. A QH structure may
338*4882a593Smuzhiyun  * be entered in either the non-periodic or periodic schedule.
339*4882a593Smuzhiyun  */
340*4882a593Smuzhiyun struct dwc2_qh {
341*4882a593Smuzhiyun 	struct dwc2_hsotg *hsotg;
342*4882a593Smuzhiyun 	u8 ep_type;
343*4882a593Smuzhiyun 	u8 ep_is_in;
344*4882a593Smuzhiyun 	u16 maxp;
345*4882a593Smuzhiyun 	u16 maxp_mult;
346*4882a593Smuzhiyun 	u8 dev_speed;
347*4882a593Smuzhiyun 	u8 data_toggle;
348*4882a593Smuzhiyun 	u8 ping_state;
349*4882a593Smuzhiyun 	u8 do_split;
350*4882a593Smuzhiyun 	u8 td_first;
351*4882a593Smuzhiyun 	u8 td_last;
352*4882a593Smuzhiyun 	u16 host_us;
353*4882a593Smuzhiyun 	u16 device_us;
354*4882a593Smuzhiyun 	u16 host_interval;
355*4882a593Smuzhiyun 	u16 device_interval;
356*4882a593Smuzhiyun 	u16 next_active_frame;
357*4882a593Smuzhiyun 	u16 start_active_frame;
358*4882a593Smuzhiyun 	s16 num_hs_transfers;
359*4882a593Smuzhiyun 	struct dwc2_hs_transfer_time hs_transfers[DWC2_HS_SCHEDULE_UFRAMES];
360*4882a593Smuzhiyun 	u32 ls_start_schedule_slice;
361*4882a593Smuzhiyun 	u16 ntd;
362*4882a593Smuzhiyun 	u8 *dw_align_buf;
363*4882a593Smuzhiyun 	dma_addr_t dw_align_buf_dma;
364*4882a593Smuzhiyun 	struct list_head qtd_list;
365*4882a593Smuzhiyun 	struct dwc2_host_chan *channel;
366*4882a593Smuzhiyun 	struct list_head qh_list_entry;
367*4882a593Smuzhiyun 	struct dwc2_dma_desc *desc_list;
368*4882a593Smuzhiyun 	dma_addr_t desc_list_dma;
369*4882a593Smuzhiyun 	u32 desc_list_sz;
370*4882a593Smuzhiyun 	u32 *n_bytes;
371*4882a593Smuzhiyun 	struct timer_list unreserve_timer;
372*4882a593Smuzhiyun 	struct hrtimer wait_timer;
373*4882a593Smuzhiyun 	struct dwc2_tt *dwc_tt;
374*4882a593Smuzhiyun 	int ttport;
375*4882a593Smuzhiyun 	unsigned tt_buffer_dirty:1;
376*4882a593Smuzhiyun 	unsigned unreserve_pending:1;
377*4882a593Smuzhiyun 	unsigned schedule_low_speed:1;
378*4882a593Smuzhiyun 	unsigned want_wait:1;
379*4882a593Smuzhiyun 	unsigned wait_timer_cancel:1;
380*4882a593Smuzhiyun };
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun /**
383*4882a593Smuzhiyun  * struct dwc2_qtd - Software queue transfer descriptor (QTD)
384*4882a593Smuzhiyun  *
385*4882a593Smuzhiyun  * @control_phase:      Current phase for control transfers (Setup, Data, or
386*4882a593Smuzhiyun  *                      Status)
387*4882a593Smuzhiyun  * @in_process:         Indicates if this QTD is currently processed by HW
388*4882a593Smuzhiyun  * @data_toggle:        Determines the PID of the next data packet for the
389*4882a593Smuzhiyun  *                      data phase of control transfers. Ignored for other
390*4882a593Smuzhiyun  *                      transfer types. One of the following values:
391*4882a593Smuzhiyun  *                       - DWC2_HC_PID_DATA0
392*4882a593Smuzhiyun  *                       - DWC2_HC_PID_DATA1
393*4882a593Smuzhiyun  * @complete_split:     Keeps track of the current split type for FS/LS
394*4882a593Smuzhiyun  *                      endpoints on a HS Hub
395*4882a593Smuzhiyun  * @isoc_split_pos:     Position of the ISOC split in full/low speed
396*4882a593Smuzhiyun  * @isoc_frame_index:   Index of the next frame descriptor for an isochronous
397*4882a593Smuzhiyun  *                      transfer. A frame descriptor describes the buffer
398*4882a593Smuzhiyun  *                      position and length of the data to be transferred in the
399*4882a593Smuzhiyun  *                      next scheduled (micro)frame of an isochronous transfer.
400*4882a593Smuzhiyun  *                      It also holds status for that transaction. The frame
401*4882a593Smuzhiyun  *                      index starts at 0.
402*4882a593Smuzhiyun  * @isoc_split_offset:  Position of the ISOC split in the buffer for the
403*4882a593Smuzhiyun  *                      current frame
404*4882a593Smuzhiyun  * @ssplit_out_xfer_count: How many bytes transferred during SSPLIT OUT
405*4882a593Smuzhiyun  * @error_count:        Holds the number of bus errors that have occurred for
406*4882a593Smuzhiyun  *                      a transaction within this transfer
407*4882a593Smuzhiyun  * @n_desc:             Number of DMA descriptors for this QTD
408*4882a593Smuzhiyun  * @isoc_frame_index_last: Last activated frame (packet) index, used in
409*4882a593Smuzhiyun  *                      descriptor DMA mode only
410*4882a593Smuzhiyun  * @num_naks:           Number of NAKs received on this QTD.
411*4882a593Smuzhiyun  * @urb:                URB for this transfer
412*4882a593Smuzhiyun  * @qh:                 Queue head for this QTD
413*4882a593Smuzhiyun  * @qtd_list_entry:     For linking to the QH's list of QTDs
414*4882a593Smuzhiyun  * @isoc_td_first:	Index of first activated isochronous transfer
415*4882a593Smuzhiyun  *			descriptor in Descriptor DMA mode
416*4882a593Smuzhiyun  * @isoc_td_last:	Index of last activated isochronous transfer
417*4882a593Smuzhiyun  *			descriptor in Descriptor DMA mode
418*4882a593Smuzhiyun  *
419*4882a593Smuzhiyun  * A Queue Transfer Descriptor (QTD) holds the state of a bulk, control,
420*4882a593Smuzhiyun  * interrupt, or isochronous transfer. A single QTD is created for each URB
421*4882a593Smuzhiyun  * (of one of these types) submitted to the HCD. The transfer associated with
422*4882a593Smuzhiyun  * a QTD may require one or multiple transactions.
423*4882a593Smuzhiyun  *
424*4882a593Smuzhiyun  * A QTD is linked to a Queue Head, which is entered in either the
425*4882a593Smuzhiyun  * non-periodic or periodic schedule for execution. When a QTD is chosen for
426*4882a593Smuzhiyun  * execution, some or all of its transactions may be executed. After
427*4882a593Smuzhiyun  * execution, the state of the QTD is updated. The QTD may be retired if all
428*4882a593Smuzhiyun  * its transactions are complete or if an error occurred. Otherwise, it
429*4882a593Smuzhiyun  * remains in the schedule so more transactions can be executed later.
430*4882a593Smuzhiyun  */
431*4882a593Smuzhiyun struct dwc2_qtd {
432*4882a593Smuzhiyun 	enum dwc2_control_phase control_phase;
433*4882a593Smuzhiyun 	u8 in_process;
434*4882a593Smuzhiyun 	u8 data_toggle;
435*4882a593Smuzhiyun 	u8 complete_split;
436*4882a593Smuzhiyun 	u8 isoc_split_pos;
437*4882a593Smuzhiyun 	u16 isoc_frame_index;
438*4882a593Smuzhiyun 	u16 isoc_split_offset;
439*4882a593Smuzhiyun 	u16 isoc_td_last;
440*4882a593Smuzhiyun 	u16 isoc_td_first;
441*4882a593Smuzhiyun 	u32 ssplit_out_xfer_count;
442*4882a593Smuzhiyun 	u8 error_count;
443*4882a593Smuzhiyun 	u8 n_desc;
444*4882a593Smuzhiyun 	u16 isoc_frame_index_last;
445*4882a593Smuzhiyun 	u16 num_naks;
446*4882a593Smuzhiyun 	struct dwc2_hcd_urb *urb;
447*4882a593Smuzhiyun 	struct dwc2_qh *qh;
448*4882a593Smuzhiyun 	struct list_head qtd_list_entry;
449*4882a593Smuzhiyun };
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun #ifdef DEBUG
452*4882a593Smuzhiyun struct hc_xfer_info {
453*4882a593Smuzhiyun 	struct dwc2_hsotg *hsotg;
454*4882a593Smuzhiyun 	struct dwc2_host_chan *chan;
455*4882a593Smuzhiyun };
456*4882a593Smuzhiyun #endif
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg);
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun /* Gets the struct usb_hcd that contains a struct dwc2_hsotg */
dwc2_hsotg_to_hcd(struct dwc2_hsotg * hsotg)461*4882a593Smuzhiyun static inline struct usb_hcd *dwc2_hsotg_to_hcd(struct dwc2_hsotg *hsotg)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun 	return (struct usb_hcd *)hsotg->priv;
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun /*
467*4882a593Smuzhiyun  * Inline used to disable one channel interrupt. Channel interrupts are
468*4882a593Smuzhiyun  * disabled when the channel is halted or released by the interrupt handler.
469*4882a593Smuzhiyun  * There is no need to handle further interrupts of that type until the
470*4882a593Smuzhiyun  * channel is re-assigned. In fact, subsequent handling may cause crashes
471*4882a593Smuzhiyun  * because the channel structures are cleaned up when the channel is released.
472*4882a593Smuzhiyun  */
disable_hc_int(struct dwc2_hsotg * hsotg,int chnum,u32 intr)473*4882a593Smuzhiyun static inline void disable_hc_int(struct dwc2_hsotg *hsotg, int chnum, u32 intr)
474*4882a593Smuzhiyun {
475*4882a593Smuzhiyun 	u32 mask = dwc2_readl(hsotg, HCINTMSK(chnum));
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	mask &= ~intr;
478*4882a593Smuzhiyun 	dwc2_writel(hsotg, mask, HCINTMSK(chnum));
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan);
482*4882a593Smuzhiyun void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
483*4882a593Smuzhiyun 		  enum dwc2_halt_status halt_status);
484*4882a593Smuzhiyun void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
485*4882a593Smuzhiyun 				 struct dwc2_host_chan *chan);
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun /*
488*4882a593Smuzhiyun  * Reads HPRT0 in preparation to modify. It keeps the WC bits 0 so that if they
489*4882a593Smuzhiyun  * are read as 1, they won't clear when written back.
490*4882a593Smuzhiyun  */
dwc2_read_hprt0(struct dwc2_hsotg * hsotg)491*4882a593Smuzhiyun static inline u32 dwc2_read_hprt0(struct dwc2_hsotg *hsotg)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun 	u32 hprt0 = dwc2_readl(hsotg, HPRT0);
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	hprt0 &= ~(HPRT0_ENA | HPRT0_CONNDET | HPRT0_ENACHG | HPRT0_OVRCURRCHG);
496*4882a593Smuzhiyun 	return hprt0;
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun 
dwc2_hcd_get_ep_num(struct dwc2_hcd_pipe_info * pipe)499*4882a593Smuzhiyun static inline u8 dwc2_hcd_get_ep_num(struct dwc2_hcd_pipe_info *pipe)
500*4882a593Smuzhiyun {
501*4882a593Smuzhiyun 	return pipe->ep_num;
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun 
dwc2_hcd_get_pipe_type(struct dwc2_hcd_pipe_info * pipe)504*4882a593Smuzhiyun static inline u8 dwc2_hcd_get_pipe_type(struct dwc2_hcd_pipe_info *pipe)
505*4882a593Smuzhiyun {
506*4882a593Smuzhiyun 	return pipe->pipe_type;
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun 
dwc2_hcd_get_maxp(struct dwc2_hcd_pipe_info * pipe)509*4882a593Smuzhiyun static inline u16 dwc2_hcd_get_maxp(struct dwc2_hcd_pipe_info *pipe)
510*4882a593Smuzhiyun {
511*4882a593Smuzhiyun 	return pipe->maxp;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun 
dwc2_hcd_get_maxp_mult(struct dwc2_hcd_pipe_info * pipe)514*4882a593Smuzhiyun static inline u16 dwc2_hcd_get_maxp_mult(struct dwc2_hcd_pipe_info *pipe)
515*4882a593Smuzhiyun {
516*4882a593Smuzhiyun 	return pipe->maxp_mult;
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun 
dwc2_hcd_get_dev_addr(struct dwc2_hcd_pipe_info * pipe)519*4882a593Smuzhiyun static inline u8 dwc2_hcd_get_dev_addr(struct dwc2_hcd_pipe_info *pipe)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun 	return pipe->dev_addr;
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun 
dwc2_hcd_is_pipe_isoc(struct dwc2_hcd_pipe_info * pipe)524*4882a593Smuzhiyun static inline u8 dwc2_hcd_is_pipe_isoc(struct dwc2_hcd_pipe_info *pipe)
525*4882a593Smuzhiyun {
526*4882a593Smuzhiyun 	return pipe->pipe_type == USB_ENDPOINT_XFER_ISOC;
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun 
dwc2_hcd_is_pipe_int(struct dwc2_hcd_pipe_info * pipe)529*4882a593Smuzhiyun static inline u8 dwc2_hcd_is_pipe_int(struct dwc2_hcd_pipe_info *pipe)
530*4882a593Smuzhiyun {
531*4882a593Smuzhiyun 	return pipe->pipe_type == USB_ENDPOINT_XFER_INT;
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun 
dwc2_hcd_is_pipe_bulk(struct dwc2_hcd_pipe_info * pipe)534*4882a593Smuzhiyun static inline u8 dwc2_hcd_is_pipe_bulk(struct dwc2_hcd_pipe_info *pipe)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun 	return pipe->pipe_type == USB_ENDPOINT_XFER_BULK;
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun 
dwc2_hcd_is_pipe_control(struct dwc2_hcd_pipe_info * pipe)539*4882a593Smuzhiyun static inline u8 dwc2_hcd_is_pipe_control(struct dwc2_hcd_pipe_info *pipe)
540*4882a593Smuzhiyun {
541*4882a593Smuzhiyun 	return pipe->pipe_type == USB_ENDPOINT_XFER_CONTROL;
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun 
dwc2_hcd_is_pipe_in(struct dwc2_hcd_pipe_info * pipe)544*4882a593Smuzhiyun static inline u8 dwc2_hcd_is_pipe_in(struct dwc2_hcd_pipe_info *pipe)
545*4882a593Smuzhiyun {
546*4882a593Smuzhiyun 	return pipe->pipe_dir == USB_DIR_IN;
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun 
dwc2_hcd_is_pipe_out(struct dwc2_hcd_pipe_info * pipe)549*4882a593Smuzhiyun static inline u8 dwc2_hcd_is_pipe_out(struct dwc2_hcd_pipe_info *pipe)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun 	return !dwc2_hcd_is_pipe_in(pipe);
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun int dwc2_hcd_init(struct dwc2_hsotg *hsotg);
555*4882a593Smuzhiyun void dwc2_hcd_remove(struct dwc2_hsotg *hsotg);
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun /* Transaction Execution Functions */
558*4882a593Smuzhiyun enum dwc2_transaction_type dwc2_hcd_select_transactions(
559*4882a593Smuzhiyun 						struct dwc2_hsotg *hsotg);
560*4882a593Smuzhiyun void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg,
561*4882a593Smuzhiyun 				 enum dwc2_transaction_type tr_type);
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun /* Schedule Queue Functions */
564*4882a593Smuzhiyun /* Implemented in hcd_queue.c */
565*4882a593Smuzhiyun struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
566*4882a593Smuzhiyun 				   struct dwc2_hcd_urb *urb,
567*4882a593Smuzhiyun 					  gfp_t mem_flags);
568*4882a593Smuzhiyun void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
569*4882a593Smuzhiyun int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
570*4882a593Smuzhiyun void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
571*4882a593Smuzhiyun void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
572*4882a593Smuzhiyun 			    int sched_csplit);
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb);
575*4882a593Smuzhiyun int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
576*4882a593Smuzhiyun 		     struct dwc2_qh *qh);
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun /* Unlinks and frees a QTD */
dwc2_hcd_qtd_unlink_and_free(struct dwc2_hsotg * hsotg,struct dwc2_qtd * qtd,struct dwc2_qh * qh)579*4882a593Smuzhiyun static inline void dwc2_hcd_qtd_unlink_and_free(struct dwc2_hsotg *hsotg,
580*4882a593Smuzhiyun 						struct dwc2_qtd *qtd,
581*4882a593Smuzhiyun 						struct dwc2_qh *qh)
582*4882a593Smuzhiyun {
583*4882a593Smuzhiyun 	list_del(&qtd->qtd_list_entry);
584*4882a593Smuzhiyun 	kfree(qtd);
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun /* Descriptor DMA support functions */
588*4882a593Smuzhiyun void dwc2_hcd_start_xfer_ddma(struct dwc2_hsotg *hsotg,
589*4882a593Smuzhiyun 			      struct dwc2_qh *qh);
590*4882a593Smuzhiyun void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg,
591*4882a593Smuzhiyun 				 struct dwc2_host_chan *chan, int chnum,
592*4882a593Smuzhiyun 					enum dwc2_halt_status halt_status);
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun int dwc2_hcd_qh_init_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
595*4882a593Smuzhiyun 			  gfp_t mem_flags);
596*4882a593Smuzhiyun void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun /* Check if QH is non-periodic */
599*4882a593Smuzhiyun #define dwc2_qh_is_non_per(_qh_ptr_) \
600*4882a593Smuzhiyun 	((_qh_ptr_)->ep_type == USB_ENDPOINT_XFER_BULK || \
601*4882a593Smuzhiyun 	 (_qh_ptr_)->ep_type == USB_ENDPOINT_XFER_CONTROL)
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun #ifdef CONFIG_USB_DWC2_DEBUG_PERIODIC
dbg_hc(struct dwc2_host_chan * hc)604*4882a593Smuzhiyun static inline bool dbg_hc(struct dwc2_host_chan *hc) { return true; }
dbg_qh(struct dwc2_qh * qh)605*4882a593Smuzhiyun static inline bool dbg_qh(struct dwc2_qh *qh) { return true; }
dbg_urb(struct urb * urb)606*4882a593Smuzhiyun static inline bool dbg_urb(struct urb *urb) { return true; }
dbg_perio(void)607*4882a593Smuzhiyun static inline bool dbg_perio(void) { return true; }
608*4882a593Smuzhiyun #else /* !CONFIG_USB_DWC2_DEBUG_PERIODIC */
dbg_hc(struct dwc2_host_chan * hc)609*4882a593Smuzhiyun static inline bool dbg_hc(struct dwc2_host_chan *hc)
610*4882a593Smuzhiyun {
611*4882a593Smuzhiyun 	return hc->ep_type == USB_ENDPOINT_XFER_BULK ||
612*4882a593Smuzhiyun 	       hc->ep_type == USB_ENDPOINT_XFER_CONTROL;
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun 
dbg_qh(struct dwc2_qh * qh)615*4882a593Smuzhiyun static inline bool dbg_qh(struct dwc2_qh *qh)
616*4882a593Smuzhiyun {
617*4882a593Smuzhiyun 	return qh->ep_type == USB_ENDPOINT_XFER_BULK ||
618*4882a593Smuzhiyun 	       qh->ep_type == USB_ENDPOINT_XFER_CONTROL;
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun 
dbg_urb(struct urb * urb)621*4882a593Smuzhiyun static inline bool dbg_urb(struct urb *urb)
622*4882a593Smuzhiyun {
623*4882a593Smuzhiyun 	return usb_pipetype(urb->pipe) == PIPE_BULK ||
624*4882a593Smuzhiyun 	       usb_pipetype(urb->pipe) == PIPE_CONTROL;
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun 
dbg_perio(void)627*4882a593Smuzhiyun static inline bool dbg_perio(void) { return false; }
628*4882a593Smuzhiyun #endif
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun /*
631*4882a593Smuzhiyun  * Returns true if frame1 index is greater than frame2 index. The comparison
632*4882a593Smuzhiyun  * is done modulo FRLISTEN_64_SIZE. This accounts for the rollover of the
633*4882a593Smuzhiyun  * frame number when the max index frame number is reached.
634*4882a593Smuzhiyun  */
dwc2_frame_idx_num_gt(u16 fr_idx1,u16 fr_idx2)635*4882a593Smuzhiyun static inline bool dwc2_frame_idx_num_gt(u16 fr_idx1, u16 fr_idx2)
636*4882a593Smuzhiyun {
637*4882a593Smuzhiyun 	u16 diff = fr_idx1 - fr_idx2;
638*4882a593Smuzhiyun 	u16 sign = diff & (FRLISTEN_64_SIZE >> 1);
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	return diff && !sign;
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun /*
644*4882a593Smuzhiyun  * Returns true if frame1 is less than or equal to frame2. The comparison is
645*4882a593Smuzhiyun  * done modulo HFNUM_MAX_FRNUM. This accounts for the rollover of the
646*4882a593Smuzhiyun  * frame number when the max frame number is reached.
647*4882a593Smuzhiyun  */
dwc2_frame_num_le(u16 frame1,u16 frame2)648*4882a593Smuzhiyun static inline int dwc2_frame_num_le(u16 frame1, u16 frame2)
649*4882a593Smuzhiyun {
650*4882a593Smuzhiyun 	return ((frame2 - frame1) & HFNUM_MAX_FRNUM) <= (HFNUM_MAX_FRNUM >> 1);
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun /*
654*4882a593Smuzhiyun  * Returns true if frame1 is greater than frame2. The comparison is done
655*4882a593Smuzhiyun  * modulo HFNUM_MAX_FRNUM. This accounts for the rollover of the frame
656*4882a593Smuzhiyun  * number when the max frame number is reached.
657*4882a593Smuzhiyun  */
dwc2_frame_num_gt(u16 frame1,u16 frame2)658*4882a593Smuzhiyun static inline int dwc2_frame_num_gt(u16 frame1, u16 frame2)
659*4882a593Smuzhiyun {
660*4882a593Smuzhiyun 	return (frame1 != frame2) &&
661*4882a593Smuzhiyun 	       ((frame1 - frame2) & HFNUM_MAX_FRNUM) < (HFNUM_MAX_FRNUM >> 1);
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun /*
665*4882a593Smuzhiyun  * Increments frame by the amount specified by inc. The addition is done
666*4882a593Smuzhiyun  * modulo HFNUM_MAX_FRNUM. Returns the incremented value.
667*4882a593Smuzhiyun  */
dwc2_frame_num_inc(u16 frame,u16 inc)668*4882a593Smuzhiyun static inline u16 dwc2_frame_num_inc(u16 frame, u16 inc)
669*4882a593Smuzhiyun {
670*4882a593Smuzhiyun 	return (frame + inc) & HFNUM_MAX_FRNUM;
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun 
dwc2_frame_num_dec(u16 frame,u16 dec)673*4882a593Smuzhiyun static inline u16 dwc2_frame_num_dec(u16 frame, u16 dec)
674*4882a593Smuzhiyun {
675*4882a593Smuzhiyun 	return (frame + HFNUM_MAX_FRNUM + 1 - dec) & HFNUM_MAX_FRNUM;
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun 
dwc2_full_frame_num(u16 frame)678*4882a593Smuzhiyun static inline u16 dwc2_full_frame_num(u16 frame)
679*4882a593Smuzhiyun {
680*4882a593Smuzhiyun 	return (frame & HFNUM_MAX_FRNUM) >> 3;
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun 
dwc2_micro_frame_num(u16 frame)683*4882a593Smuzhiyun static inline u16 dwc2_micro_frame_num(u16 frame)
684*4882a593Smuzhiyun {
685*4882a593Smuzhiyun 	return frame & 0x7;
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun /*
689*4882a593Smuzhiyun  * Returns the Core Interrupt Status register contents, ANDed with the Core
690*4882a593Smuzhiyun  * Interrupt Mask register contents
691*4882a593Smuzhiyun  */
dwc2_read_core_intr(struct dwc2_hsotg * hsotg)692*4882a593Smuzhiyun static inline u32 dwc2_read_core_intr(struct dwc2_hsotg *hsotg)
693*4882a593Smuzhiyun {
694*4882a593Smuzhiyun 	return dwc2_readl(hsotg, GINTSTS) &
695*4882a593Smuzhiyun 	       dwc2_readl(hsotg, GINTMSK);
696*4882a593Smuzhiyun }
697*4882a593Smuzhiyun 
dwc2_hcd_urb_get_status(struct dwc2_hcd_urb * dwc2_urb)698*4882a593Smuzhiyun static inline u32 dwc2_hcd_urb_get_status(struct dwc2_hcd_urb *dwc2_urb)
699*4882a593Smuzhiyun {
700*4882a593Smuzhiyun 	return dwc2_urb->status;
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun 
dwc2_hcd_urb_get_actual_length(struct dwc2_hcd_urb * dwc2_urb)703*4882a593Smuzhiyun static inline u32 dwc2_hcd_urb_get_actual_length(
704*4882a593Smuzhiyun 		struct dwc2_hcd_urb *dwc2_urb)
705*4882a593Smuzhiyun {
706*4882a593Smuzhiyun 	return dwc2_urb->actual_length;
707*4882a593Smuzhiyun }
708*4882a593Smuzhiyun 
dwc2_hcd_urb_get_error_count(struct dwc2_hcd_urb * dwc2_urb)709*4882a593Smuzhiyun static inline u32 dwc2_hcd_urb_get_error_count(struct dwc2_hcd_urb *dwc2_urb)
710*4882a593Smuzhiyun {
711*4882a593Smuzhiyun 	return dwc2_urb->error_count;
712*4882a593Smuzhiyun }
713*4882a593Smuzhiyun 
dwc2_hcd_urb_set_iso_desc_params(struct dwc2_hcd_urb * dwc2_urb,int desc_num,u32 offset,u32 length)714*4882a593Smuzhiyun static inline void dwc2_hcd_urb_set_iso_desc_params(
715*4882a593Smuzhiyun 		struct dwc2_hcd_urb *dwc2_urb, int desc_num, u32 offset,
716*4882a593Smuzhiyun 		u32 length)
717*4882a593Smuzhiyun {
718*4882a593Smuzhiyun 	dwc2_urb->iso_descs[desc_num].offset = offset;
719*4882a593Smuzhiyun 	dwc2_urb->iso_descs[desc_num].length = length;
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun 
dwc2_hcd_urb_get_iso_desc_status(struct dwc2_hcd_urb * dwc2_urb,int desc_num)722*4882a593Smuzhiyun static inline u32 dwc2_hcd_urb_get_iso_desc_status(
723*4882a593Smuzhiyun 		struct dwc2_hcd_urb *dwc2_urb, int desc_num)
724*4882a593Smuzhiyun {
725*4882a593Smuzhiyun 	return dwc2_urb->iso_descs[desc_num].status;
726*4882a593Smuzhiyun }
727*4882a593Smuzhiyun 
dwc2_hcd_urb_get_iso_desc_actual_length(struct dwc2_hcd_urb * dwc2_urb,int desc_num)728*4882a593Smuzhiyun static inline u32 dwc2_hcd_urb_get_iso_desc_actual_length(
729*4882a593Smuzhiyun 		struct dwc2_hcd_urb *dwc2_urb, int desc_num)
730*4882a593Smuzhiyun {
731*4882a593Smuzhiyun 	return dwc2_urb->iso_descs[desc_num].actual_length;
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun 
dwc2_hcd_is_bandwidth_allocated(struct dwc2_hsotg * hsotg,struct usb_host_endpoint * ep)734*4882a593Smuzhiyun static inline int dwc2_hcd_is_bandwidth_allocated(struct dwc2_hsotg *hsotg,
735*4882a593Smuzhiyun 						  struct usb_host_endpoint *ep)
736*4882a593Smuzhiyun {
737*4882a593Smuzhiyun 	struct dwc2_qh *qh = ep->hcpriv;
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun 	if (qh && !list_empty(&qh->qh_list_entry))
740*4882a593Smuzhiyun 		return 1;
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 	return 0;
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun 
dwc2_hcd_get_ep_bandwidth(struct dwc2_hsotg * hsotg,struct usb_host_endpoint * ep)745*4882a593Smuzhiyun static inline u16 dwc2_hcd_get_ep_bandwidth(struct dwc2_hsotg *hsotg,
746*4882a593Smuzhiyun 					    struct usb_host_endpoint *ep)
747*4882a593Smuzhiyun {
748*4882a593Smuzhiyun 	struct dwc2_qh *qh = ep->hcpriv;
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun 	if (!qh) {
751*4882a593Smuzhiyun 		WARN_ON(1);
752*4882a593Smuzhiyun 		return 0;
753*4882a593Smuzhiyun 	}
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun 	return qh->host_us;
756*4882a593Smuzhiyun }
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
759*4882a593Smuzhiyun 			       struct dwc2_host_chan *chan, int chnum,
760*4882a593Smuzhiyun 				      struct dwc2_qtd *qtd);
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun /* HCD Core API */
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun /**
765*4882a593Smuzhiyun  * dwc2_handle_hcd_intr() - Called on every hardware interrupt
766*4882a593Smuzhiyun  *
767*4882a593Smuzhiyun  * @hsotg: The DWC2 HCD
768*4882a593Smuzhiyun  *
769*4882a593Smuzhiyun  * Returns IRQ_HANDLED if interrupt is handled
770*4882a593Smuzhiyun  * Return IRQ_NONE if interrupt is not handled
771*4882a593Smuzhiyun  */
772*4882a593Smuzhiyun irqreturn_t dwc2_handle_hcd_intr(struct dwc2_hsotg *hsotg);
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun /**
775*4882a593Smuzhiyun  * dwc2_hcd_stop() - Halts the DWC_otg host mode operation
776*4882a593Smuzhiyun  *
777*4882a593Smuzhiyun  * @hsotg: The DWC2 HCD
778*4882a593Smuzhiyun  */
779*4882a593Smuzhiyun void dwc2_hcd_stop(struct dwc2_hsotg *hsotg);
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun /**
782*4882a593Smuzhiyun  * dwc2_hcd_is_b_host() - Returns 1 if core currently is acting as B host,
783*4882a593Smuzhiyun  * and 0 otherwise
784*4882a593Smuzhiyun  *
785*4882a593Smuzhiyun  * @hsotg: The DWC2 HCD
786*4882a593Smuzhiyun  */
787*4882a593Smuzhiyun int dwc2_hcd_is_b_host(struct dwc2_hsotg *hsotg);
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun /**
790*4882a593Smuzhiyun  * dwc2_hcd_dump_state() - Dumps hsotg state
791*4882a593Smuzhiyun  *
792*4882a593Smuzhiyun  * @hsotg: The DWC2 HCD
793*4882a593Smuzhiyun  *
794*4882a593Smuzhiyun  * NOTE: This function will be removed once the peripheral controller code
795*4882a593Smuzhiyun  * is integrated and the driver is stable
796*4882a593Smuzhiyun  */
797*4882a593Smuzhiyun void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg);
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun /* URB interface */
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun /* Transfer flags */
802*4882a593Smuzhiyun #define URB_GIVEBACK_ASAP	0x1
803*4882a593Smuzhiyun #define URB_SEND_ZERO_PACKET	0x2
804*4882a593Smuzhiyun 
805*4882a593Smuzhiyun /* Host driver callbacks */
806*4882a593Smuzhiyun struct dwc2_tt *dwc2_host_get_tt_info(struct dwc2_hsotg *hsotg,
807*4882a593Smuzhiyun 				      void *context, gfp_t mem_flags,
808*4882a593Smuzhiyun 				      int *ttport);
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun void dwc2_host_put_tt_info(struct dwc2_hsotg *hsotg,
811*4882a593Smuzhiyun 			   struct dwc2_tt *dwc_tt);
812*4882a593Smuzhiyun int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context);
813*4882a593Smuzhiyun void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
814*4882a593Smuzhiyun 			int status);
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun #endif /* __DWC2_HCD_H__ */
817