1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * bcm63xx_udc.c -- BCM63xx UDC high/full speed USB device controller
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2012 Kevin Cernekee <cernekee@gmail.com>
6*4882a593Smuzhiyun * Copyright (C) 2012 Broadcom Corporation
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/bitops.h>
10*4882a593Smuzhiyun #include <linux/bug.h>
11*4882a593Smuzhiyun #include <linux/clk.h>
12*4882a593Smuzhiyun #include <linux/compiler.h>
13*4882a593Smuzhiyun #include <linux/debugfs.h>
14*4882a593Smuzhiyun #include <linux/delay.h>
15*4882a593Smuzhiyun #include <linux/device.h>
16*4882a593Smuzhiyun #include <linux/dma-mapping.h>
17*4882a593Smuzhiyun #include <linux/errno.h>
18*4882a593Smuzhiyun #include <linux/interrupt.h>
19*4882a593Smuzhiyun #include <linux/ioport.h>
20*4882a593Smuzhiyun #include <linux/kernel.h>
21*4882a593Smuzhiyun #include <linux/list.h>
22*4882a593Smuzhiyun #include <linux/module.h>
23*4882a593Smuzhiyun #include <linux/moduleparam.h>
24*4882a593Smuzhiyun #include <linux/platform_device.h>
25*4882a593Smuzhiyun #include <linux/sched.h>
26*4882a593Smuzhiyun #include <linux/seq_file.h>
27*4882a593Smuzhiyun #include <linux/slab.h>
28*4882a593Smuzhiyun #include <linux/timer.h>
29*4882a593Smuzhiyun #include <linux/usb.h>
30*4882a593Smuzhiyun #include <linux/usb/ch9.h>
31*4882a593Smuzhiyun #include <linux/usb/gadget.h>
32*4882a593Smuzhiyun #include <linux/workqueue.h>
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #include <bcm63xx_cpu.h>
35*4882a593Smuzhiyun #include <bcm63xx_iudma.h>
36*4882a593Smuzhiyun #include <bcm63xx_dev_usb_usbd.h>
37*4882a593Smuzhiyun #include <bcm63xx_io.h>
38*4882a593Smuzhiyun #include <bcm63xx_regs.h>
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun #define DRV_MODULE_NAME "bcm63xx_udc"
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun static const char bcm63xx_ep0name[] = "ep0";
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun static const struct {
45*4882a593Smuzhiyun const char *name;
46*4882a593Smuzhiyun const struct usb_ep_caps caps;
47*4882a593Smuzhiyun } bcm63xx_ep_info[] = {
48*4882a593Smuzhiyun #define EP_INFO(_name, _caps) \
49*4882a593Smuzhiyun { \
50*4882a593Smuzhiyun .name = _name, \
51*4882a593Smuzhiyun .caps = _caps, \
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun EP_INFO(bcm63xx_ep0name,
55*4882a593Smuzhiyun USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
56*4882a593Smuzhiyun EP_INFO("ep1in-bulk",
57*4882a593Smuzhiyun USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
58*4882a593Smuzhiyun EP_INFO("ep2out-bulk",
59*4882a593Smuzhiyun USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
60*4882a593Smuzhiyun EP_INFO("ep3in-int",
61*4882a593Smuzhiyun USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
62*4882a593Smuzhiyun EP_INFO("ep4out-int",
63*4882a593Smuzhiyun USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_OUT)),
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun #undef EP_INFO
66*4882a593Smuzhiyun };
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun static bool use_fullspeed;
69*4882a593Smuzhiyun module_param(use_fullspeed, bool, S_IRUGO);
70*4882a593Smuzhiyun MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /*
73*4882a593Smuzhiyun * RX IRQ coalescing options:
74*4882a593Smuzhiyun *
75*4882a593Smuzhiyun * false (default) - one IRQ per DATAx packet. Slow but reliable. The
76*4882a593Smuzhiyun * driver is able to pass the "testusb" suite and recover from conditions like:
77*4882a593Smuzhiyun *
78*4882a593Smuzhiyun * 1) Device queues up a 2048-byte RX IUDMA transaction on an OUT bulk ep
79*4882a593Smuzhiyun * 2) Host sends 512 bytes of data
80*4882a593Smuzhiyun * 3) Host decides to reconfigure the device and sends SET_INTERFACE
81*4882a593Smuzhiyun * 4) Device shuts down the endpoint and cancels the RX transaction
82*4882a593Smuzhiyun *
83*4882a593Smuzhiyun * true - one IRQ per transfer, for transfers <= 2048B. Generates
84*4882a593Smuzhiyun * considerably fewer IRQs, but error recovery is less robust. Does not
85*4882a593Smuzhiyun * reliably pass "testusb".
86*4882a593Smuzhiyun *
87*4882a593Smuzhiyun * TX always uses coalescing, because we can cancel partially complete TX
88*4882a593Smuzhiyun * transfers by repeatedly flushing the FIFO. The hardware doesn't allow
89*4882a593Smuzhiyun * this on RX.
90*4882a593Smuzhiyun */
91*4882a593Smuzhiyun static bool irq_coalesce;
92*4882a593Smuzhiyun module_param(irq_coalesce, bool, S_IRUGO);
93*4882a593Smuzhiyun MODULE_PARM_DESC(irq_coalesce, "take one IRQ per RX transfer");
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun #define BCM63XX_NUM_EP 5
96*4882a593Smuzhiyun #define BCM63XX_NUM_IUDMA 6
97*4882a593Smuzhiyun #define BCM63XX_NUM_FIFO_PAIRS 3
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun #define IUDMA_RESET_TIMEOUT_US 10000
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun #define IUDMA_EP0_RXCHAN 0
102*4882a593Smuzhiyun #define IUDMA_EP0_TXCHAN 1
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun #define IUDMA_MAX_FRAGMENT 2048
105*4882a593Smuzhiyun #define BCM63XX_MAX_CTRL_PKT 64
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun #define BCMEP_CTRL 0x00
108*4882a593Smuzhiyun #define BCMEP_ISOC 0x01
109*4882a593Smuzhiyun #define BCMEP_BULK 0x02
110*4882a593Smuzhiyun #define BCMEP_INTR 0x03
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun #define BCMEP_OUT 0x00
113*4882a593Smuzhiyun #define BCMEP_IN 0x01
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun #define BCM63XX_SPD_FULL 1
116*4882a593Smuzhiyun #define BCM63XX_SPD_HIGH 0
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun #define IUDMA_DMAC_OFFSET 0x200
119*4882a593Smuzhiyun #define IUDMA_DMAS_OFFSET 0x400
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun enum bcm63xx_ep0_state {
122*4882a593Smuzhiyun EP0_REQUEUE,
123*4882a593Smuzhiyun EP0_IDLE,
124*4882a593Smuzhiyun EP0_IN_DATA_PHASE_SETUP,
125*4882a593Smuzhiyun EP0_IN_DATA_PHASE_COMPLETE,
126*4882a593Smuzhiyun EP0_OUT_DATA_PHASE_SETUP,
127*4882a593Smuzhiyun EP0_OUT_DATA_PHASE_COMPLETE,
128*4882a593Smuzhiyun EP0_OUT_STATUS_PHASE,
129*4882a593Smuzhiyun EP0_IN_FAKE_STATUS_PHASE,
130*4882a593Smuzhiyun EP0_SHUTDOWN,
131*4882a593Smuzhiyun };
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun static const char __maybe_unused bcm63xx_ep0_state_names[][32] = {
134*4882a593Smuzhiyun "REQUEUE",
135*4882a593Smuzhiyun "IDLE",
136*4882a593Smuzhiyun "IN_DATA_PHASE_SETUP",
137*4882a593Smuzhiyun "IN_DATA_PHASE_COMPLETE",
138*4882a593Smuzhiyun "OUT_DATA_PHASE_SETUP",
139*4882a593Smuzhiyun "OUT_DATA_PHASE_COMPLETE",
140*4882a593Smuzhiyun "OUT_STATUS_PHASE",
141*4882a593Smuzhiyun "IN_FAKE_STATUS_PHASE",
142*4882a593Smuzhiyun "SHUTDOWN",
143*4882a593Smuzhiyun };
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun /**
146*4882a593Smuzhiyun * struct iudma_ch_cfg - Static configuration for an IUDMA channel.
147*4882a593Smuzhiyun * @ep_num: USB endpoint number.
148*4882a593Smuzhiyun * @n_bds: Number of buffer descriptors in the ring.
149*4882a593Smuzhiyun * @ep_type: Endpoint type (control, bulk, interrupt).
150*4882a593Smuzhiyun * @dir: Direction (in, out).
151*4882a593Smuzhiyun * @n_fifo_slots: Number of FIFO entries to allocate for this channel.
152*4882a593Smuzhiyun * @max_pkt_hs: Maximum packet size in high speed mode.
153*4882a593Smuzhiyun * @max_pkt_fs: Maximum packet size in full speed mode.
154*4882a593Smuzhiyun */
155*4882a593Smuzhiyun struct iudma_ch_cfg {
156*4882a593Smuzhiyun int ep_num;
157*4882a593Smuzhiyun int n_bds;
158*4882a593Smuzhiyun int ep_type;
159*4882a593Smuzhiyun int dir;
160*4882a593Smuzhiyun int n_fifo_slots;
161*4882a593Smuzhiyun int max_pkt_hs;
162*4882a593Smuzhiyun int max_pkt_fs;
163*4882a593Smuzhiyun };
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun static const struct iudma_ch_cfg iudma_defaults[] = {
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun /* This controller was designed to support a CDC/RNDIS application.
168*4882a593Smuzhiyun It may be possible to reconfigure some of the endpoints, but
169*4882a593Smuzhiyun the hardware limitations (FIFO sizing and number of DMA channels)
170*4882a593Smuzhiyun may significantly impact flexibility and/or stability. Change
171*4882a593Smuzhiyun these values at your own risk.
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun ep_num ep_type n_fifo_slots max_pkt_fs
174*4882a593Smuzhiyun idx | n_bds | dir | max_pkt_hs |
175*4882a593Smuzhiyun | | | | | | | | */
176*4882a593Smuzhiyun [0] = { -1, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
177*4882a593Smuzhiyun [1] = { 0, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
178*4882a593Smuzhiyun [2] = { 2, 16, BCMEP_BULK, BCMEP_OUT, 128, 512, 64 },
179*4882a593Smuzhiyun [3] = { 1, 16, BCMEP_BULK, BCMEP_IN, 128, 512, 64 },
180*4882a593Smuzhiyun [4] = { 4, 4, BCMEP_INTR, BCMEP_OUT, 32, 64, 64 },
181*4882a593Smuzhiyun [5] = { 3, 4, BCMEP_INTR, BCMEP_IN, 32, 64, 64 },
182*4882a593Smuzhiyun };
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun struct bcm63xx_udc;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun /**
187*4882a593Smuzhiyun * struct iudma_ch - Represents the current state of a single IUDMA channel.
188*4882a593Smuzhiyun * @ch_idx: IUDMA channel index (0 to BCM63XX_NUM_IUDMA-1).
189*4882a593Smuzhiyun * @ep_num: USB endpoint number. -1 for ep0 RX.
190*4882a593Smuzhiyun * @enabled: Whether bcm63xx_ep_enable() has been called.
191*4882a593Smuzhiyun * @max_pkt: "Chunk size" on the USB interface. Based on interface speed.
192*4882a593Smuzhiyun * @is_tx: true for TX, false for RX.
193*4882a593Smuzhiyun * @bep: Pointer to the associated endpoint. NULL for ep0 RX.
194*4882a593Smuzhiyun * @udc: Reference to the device controller.
195*4882a593Smuzhiyun * @read_bd: Next buffer descriptor to reap from the hardware.
196*4882a593Smuzhiyun * @write_bd: Next BD available for a new packet.
197*4882a593Smuzhiyun * @end_bd: Points to the final BD in the ring.
198*4882a593Smuzhiyun * @n_bds_used: Number of BD entries currently occupied.
199*4882a593Smuzhiyun * @bd_ring: Base pointer to the BD ring.
200*4882a593Smuzhiyun * @bd_ring_dma: Physical (DMA) address of bd_ring.
201*4882a593Smuzhiyun * @n_bds: Total number of BDs in the ring.
202*4882a593Smuzhiyun *
203*4882a593Smuzhiyun * ep0 has two IUDMA channels (IUDMA_EP0_RXCHAN and IUDMA_EP0_TXCHAN), as it is
204*4882a593Smuzhiyun * bidirectional. The "struct usb_ep" associated with ep0 is for TX (IN)
205*4882a593Smuzhiyun * only.
206*4882a593Smuzhiyun *
207*4882a593Smuzhiyun * Each bulk/intr endpoint has a single IUDMA channel and a single
208*4882a593Smuzhiyun * struct usb_ep.
209*4882a593Smuzhiyun */
210*4882a593Smuzhiyun struct iudma_ch {
211*4882a593Smuzhiyun unsigned int ch_idx;
212*4882a593Smuzhiyun int ep_num;
213*4882a593Smuzhiyun bool enabled;
214*4882a593Smuzhiyun int max_pkt;
215*4882a593Smuzhiyun bool is_tx;
216*4882a593Smuzhiyun struct bcm63xx_ep *bep;
217*4882a593Smuzhiyun struct bcm63xx_udc *udc;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun struct bcm_enet_desc *read_bd;
220*4882a593Smuzhiyun struct bcm_enet_desc *write_bd;
221*4882a593Smuzhiyun struct bcm_enet_desc *end_bd;
222*4882a593Smuzhiyun int n_bds_used;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun struct bcm_enet_desc *bd_ring;
225*4882a593Smuzhiyun dma_addr_t bd_ring_dma;
226*4882a593Smuzhiyun unsigned int n_bds;
227*4882a593Smuzhiyun };
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun /**
230*4882a593Smuzhiyun * struct bcm63xx_ep - Internal (driver) state of a single endpoint.
231*4882a593Smuzhiyun * @ep_num: USB endpoint number.
232*4882a593Smuzhiyun * @iudma: Pointer to IUDMA channel state.
233*4882a593Smuzhiyun * @ep: USB gadget layer representation of the EP.
234*4882a593Smuzhiyun * @udc: Reference to the device controller.
235*4882a593Smuzhiyun * @queue: Linked list of outstanding requests for this EP.
236*4882a593Smuzhiyun * @halted: 1 if the EP is stalled; 0 otherwise.
237*4882a593Smuzhiyun */
238*4882a593Smuzhiyun struct bcm63xx_ep {
239*4882a593Smuzhiyun unsigned int ep_num;
240*4882a593Smuzhiyun struct iudma_ch *iudma;
241*4882a593Smuzhiyun struct usb_ep ep;
242*4882a593Smuzhiyun struct bcm63xx_udc *udc;
243*4882a593Smuzhiyun struct list_head queue;
244*4882a593Smuzhiyun unsigned halted:1;
245*4882a593Smuzhiyun };
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun /**
248*4882a593Smuzhiyun * struct bcm63xx_req - Internal (driver) state of a single request.
249*4882a593Smuzhiyun * @queue: Links back to the EP's request list.
250*4882a593Smuzhiyun * @req: USB gadget layer representation of the request.
251*4882a593Smuzhiyun * @offset: Current byte offset into the data buffer (next byte to queue).
252*4882a593Smuzhiyun * @bd_bytes: Number of data bytes in outstanding BD entries.
253*4882a593Smuzhiyun * @iudma: IUDMA channel used for the request.
254*4882a593Smuzhiyun */
255*4882a593Smuzhiyun struct bcm63xx_req {
256*4882a593Smuzhiyun struct list_head queue; /* ep's requests */
257*4882a593Smuzhiyun struct usb_request req;
258*4882a593Smuzhiyun unsigned int offset;
259*4882a593Smuzhiyun unsigned int bd_bytes;
260*4882a593Smuzhiyun struct iudma_ch *iudma;
261*4882a593Smuzhiyun };
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun /**
264*4882a593Smuzhiyun * struct bcm63xx_udc - Driver/hardware private context.
265*4882a593Smuzhiyun * @lock: Spinlock to mediate access to this struct, and (most) HW regs.
266*4882a593Smuzhiyun * @dev: Generic Linux device structure.
267*4882a593Smuzhiyun * @pd: Platform data (board/port info).
268*4882a593Smuzhiyun * @usbd_clk: Clock descriptor for the USB device block.
269*4882a593Smuzhiyun * @usbh_clk: Clock descriptor for the USB host block.
270*4882a593Smuzhiyun * @gadget: USB device.
271*4882a593Smuzhiyun * @driver: Driver for USB device.
272*4882a593Smuzhiyun * @usbd_regs: Base address of the USBD/USB20D block.
273*4882a593Smuzhiyun * @iudma_regs: Base address of the USBD's associated IUDMA block.
274*4882a593Smuzhiyun * @bep: Array of endpoints, including ep0.
275*4882a593Smuzhiyun * @iudma: Array of all IUDMA channels used by this controller.
276*4882a593Smuzhiyun * @cfg: USB configuration number, from SET_CONFIGURATION wValue.
277*4882a593Smuzhiyun * @iface: USB interface number, from SET_INTERFACE wIndex.
278*4882a593Smuzhiyun * @alt_iface: USB alt interface number, from SET_INTERFACE wValue.
279*4882a593Smuzhiyun * @ep0_ctrl_req: Request object for bcm63xx_udc-initiated ep0 transactions.
280*4882a593Smuzhiyun * @ep0_ctrl_buf: Data buffer for ep0_ctrl_req.
281*4882a593Smuzhiyun * @ep0state: Current state of the ep0 state machine.
282*4882a593Smuzhiyun * @ep0_wq: Workqueue struct used to wake up the ep0 state machine.
283*4882a593Smuzhiyun * @wedgemap: Bitmap of wedged endpoints.
284*4882a593Smuzhiyun * @ep0_req_reset: USB reset is pending.
285*4882a593Smuzhiyun * @ep0_req_set_cfg: Need to spoof a SET_CONFIGURATION packet.
286*4882a593Smuzhiyun * @ep0_req_set_iface: Need to spoof a SET_INTERFACE packet.
287*4882a593Smuzhiyun * @ep0_req_shutdown: Driver is shutting down; requesting ep0 to halt activity.
288*4882a593Smuzhiyun * @ep0_req_completed: ep0 request has completed; worker has not seen it yet.
289*4882a593Smuzhiyun * @ep0_reply: Pending reply from gadget driver.
290*4882a593Smuzhiyun * @ep0_request: Outstanding ep0 request.
291*4882a593Smuzhiyun * @debugfs_root: debugfs directory: /sys/kernel/debug/<DRV_MODULE_NAME>.
292*4882a593Smuzhiyun */
293*4882a593Smuzhiyun struct bcm63xx_udc {
294*4882a593Smuzhiyun spinlock_t lock;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun struct device *dev;
297*4882a593Smuzhiyun struct bcm63xx_usbd_platform_data *pd;
298*4882a593Smuzhiyun struct clk *usbd_clk;
299*4882a593Smuzhiyun struct clk *usbh_clk;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun struct usb_gadget gadget;
302*4882a593Smuzhiyun struct usb_gadget_driver *driver;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun void __iomem *usbd_regs;
305*4882a593Smuzhiyun void __iomem *iudma_regs;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun struct bcm63xx_ep bep[BCM63XX_NUM_EP];
308*4882a593Smuzhiyun struct iudma_ch iudma[BCM63XX_NUM_IUDMA];
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun int cfg;
311*4882a593Smuzhiyun int iface;
312*4882a593Smuzhiyun int alt_iface;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun struct bcm63xx_req ep0_ctrl_req;
315*4882a593Smuzhiyun u8 *ep0_ctrl_buf;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun int ep0state;
318*4882a593Smuzhiyun struct work_struct ep0_wq;
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun unsigned long wedgemap;
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun unsigned ep0_req_reset:1;
323*4882a593Smuzhiyun unsigned ep0_req_set_cfg:1;
324*4882a593Smuzhiyun unsigned ep0_req_set_iface:1;
325*4882a593Smuzhiyun unsigned ep0_req_shutdown:1;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun unsigned ep0_req_completed:1;
328*4882a593Smuzhiyun struct usb_request *ep0_reply;
329*4882a593Smuzhiyun struct usb_request *ep0_request;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun struct dentry *debugfs_root;
332*4882a593Smuzhiyun };
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun static const struct usb_ep_ops bcm63xx_udc_ep_ops;
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun /***********************************************************************
337*4882a593Smuzhiyun * Convenience functions
338*4882a593Smuzhiyun ***********************************************************************/
339*4882a593Smuzhiyun
gadget_to_udc(struct usb_gadget * g)340*4882a593Smuzhiyun static inline struct bcm63xx_udc *gadget_to_udc(struct usb_gadget *g)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun return container_of(g, struct bcm63xx_udc, gadget);
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
our_ep(struct usb_ep * ep)345*4882a593Smuzhiyun static inline struct bcm63xx_ep *our_ep(struct usb_ep *ep)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun return container_of(ep, struct bcm63xx_ep, ep);
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun
our_req(struct usb_request * req)350*4882a593Smuzhiyun static inline struct bcm63xx_req *our_req(struct usb_request *req)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun return container_of(req, struct bcm63xx_req, req);
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun
usbd_readl(struct bcm63xx_udc * udc,u32 off)355*4882a593Smuzhiyun static inline u32 usbd_readl(struct bcm63xx_udc *udc, u32 off)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun return bcm_readl(udc->usbd_regs + off);
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
usbd_writel(struct bcm63xx_udc * udc,u32 val,u32 off)360*4882a593Smuzhiyun static inline void usbd_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun bcm_writel(val, udc->usbd_regs + off);
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun
usb_dma_readl(struct bcm63xx_udc * udc,u32 off)365*4882a593Smuzhiyun static inline u32 usb_dma_readl(struct bcm63xx_udc *udc, u32 off)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun return bcm_readl(udc->iudma_regs + off);
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun
usb_dma_writel(struct bcm63xx_udc * udc,u32 val,u32 off)370*4882a593Smuzhiyun static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun bcm_writel(val, udc->iudma_regs + off);
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
usb_dmac_readl(struct bcm63xx_udc * udc,u32 off,int chan)375*4882a593Smuzhiyun static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off, int chan)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
378*4882a593Smuzhiyun (ENETDMA_CHAN_WIDTH * chan));
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun
usb_dmac_writel(struct bcm63xx_udc * udc,u32 val,u32 off,int chan)381*4882a593Smuzhiyun static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
382*4882a593Smuzhiyun int chan)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
385*4882a593Smuzhiyun (ENETDMA_CHAN_WIDTH * chan));
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun
usb_dmas_readl(struct bcm63xx_udc * udc,u32 off,int chan)388*4882a593Smuzhiyun static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off, int chan)
389*4882a593Smuzhiyun {
390*4882a593Smuzhiyun return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
391*4882a593Smuzhiyun (ENETDMA_CHAN_WIDTH * chan));
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun
usb_dmas_writel(struct bcm63xx_udc * udc,u32 val,u32 off,int chan)394*4882a593Smuzhiyun static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
395*4882a593Smuzhiyun int chan)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
398*4882a593Smuzhiyun (ENETDMA_CHAN_WIDTH * chan));
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun
set_clocks(struct bcm63xx_udc * udc,bool is_enabled)401*4882a593Smuzhiyun static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun if (is_enabled) {
404*4882a593Smuzhiyun clk_enable(udc->usbh_clk);
405*4882a593Smuzhiyun clk_enable(udc->usbd_clk);
406*4882a593Smuzhiyun udelay(10);
407*4882a593Smuzhiyun } else {
408*4882a593Smuzhiyun clk_disable(udc->usbd_clk);
409*4882a593Smuzhiyun clk_disable(udc->usbh_clk);
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun /***********************************************************************
414*4882a593Smuzhiyun * Low-level IUDMA / FIFO operations
415*4882a593Smuzhiyun ***********************************************************************/
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun /**
418*4882a593Smuzhiyun * bcm63xx_ep_dma_select - Helper function to set up the init_sel signal.
419*4882a593Smuzhiyun * @udc: Reference to the device controller.
420*4882a593Smuzhiyun * @idx: Desired init_sel value.
421*4882a593Smuzhiyun *
422*4882a593Smuzhiyun * The "init_sel" signal is used as a selection index for both endpoints
423*4882a593Smuzhiyun * and IUDMA channels. Since these do not map 1:1, the use of this signal
424*4882a593Smuzhiyun * depends on the context.
425*4882a593Smuzhiyun */
bcm63xx_ep_dma_select(struct bcm63xx_udc * udc,int idx)426*4882a593Smuzhiyun static void bcm63xx_ep_dma_select(struct bcm63xx_udc *udc, int idx)
427*4882a593Smuzhiyun {
428*4882a593Smuzhiyun u32 val = usbd_readl(udc, USBD_CONTROL_REG);
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun val &= ~USBD_CONTROL_INIT_SEL_MASK;
431*4882a593Smuzhiyun val |= idx << USBD_CONTROL_INIT_SEL_SHIFT;
432*4882a593Smuzhiyun usbd_writel(udc, val, USBD_CONTROL_REG);
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun /**
436*4882a593Smuzhiyun * bcm63xx_set_stall - Enable/disable stall on one endpoint.
437*4882a593Smuzhiyun * @udc: Reference to the device controller.
438*4882a593Smuzhiyun * @bep: Endpoint on which to operate.
439*4882a593Smuzhiyun * @is_stalled: true to enable stall, false to disable.
440*4882a593Smuzhiyun *
441*4882a593Smuzhiyun * See notes in bcm63xx_update_wedge() regarding automatic clearing of
442*4882a593Smuzhiyun * halt/stall conditions.
443*4882a593Smuzhiyun */
bcm63xx_set_stall(struct bcm63xx_udc * udc,struct bcm63xx_ep * bep,bool is_stalled)444*4882a593Smuzhiyun static void bcm63xx_set_stall(struct bcm63xx_udc *udc, struct bcm63xx_ep *bep,
445*4882a593Smuzhiyun bool is_stalled)
446*4882a593Smuzhiyun {
447*4882a593Smuzhiyun u32 val;
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun val = USBD_STALL_UPDATE_MASK |
450*4882a593Smuzhiyun (is_stalled ? USBD_STALL_ENABLE_MASK : 0) |
451*4882a593Smuzhiyun (bep->ep_num << USBD_STALL_EPNUM_SHIFT);
452*4882a593Smuzhiyun usbd_writel(udc, val, USBD_STALL_REG);
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun /**
456*4882a593Smuzhiyun * bcm63xx_fifo_setup - (Re)initialize FIFO boundaries and settings.
457*4882a593Smuzhiyun * @udc: Reference to the device controller.
458*4882a593Smuzhiyun *
459*4882a593Smuzhiyun * These parameters depend on the USB link speed. Settings are
460*4882a593Smuzhiyun * per-IUDMA-channel-pair.
461*4882a593Smuzhiyun */
bcm63xx_fifo_setup(struct bcm63xx_udc * udc)462*4882a593Smuzhiyun static void bcm63xx_fifo_setup(struct bcm63xx_udc *udc)
463*4882a593Smuzhiyun {
464*4882a593Smuzhiyun int is_hs = udc->gadget.speed == USB_SPEED_HIGH;
465*4882a593Smuzhiyun u32 i, val, rx_fifo_slot, tx_fifo_slot;
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun /* set up FIFO boundaries and packet sizes; this is done in pairs */
468*4882a593Smuzhiyun rx_fifo_slot = tx_fifo_slot = 0;
469*4882a593Smuzhiyun for (i = 0; i < BCM63XX_NUM_IUDMA; i += 2) {
470*4882a593Smuzhiyun const struct iudma_ch_cfg *rx_cfg = &iudma_defaults[i];
471*4882a593Smuzhiyun const struct iudma_ch_cfg *tx_cfg = &iudma_defaults[i + 1];
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun bcm63xx_ep_dma_select(udc, i >> 1);
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun val = (rx_fifo_slot << USBD_RXFIFO_CONFIG_START_SHIFT) |
476*4882a593Smuzhiyun ((rx_fifo_slot + rx_cfg->n_fifo_slots - 1) <<
477*4882a593Smuzhiyun USBD_RXFIFO_CONFIG_END_SHIFT);
478*4882a593Smuzhiyun rx_fifo_slot += rx_cfg->n_fifo_slots;
479*4882a593Smuzhiyun usbd_writel(udc, val, USBD_RXFIFO_CONFIG_REG);
480*4882a593Smuzhiyun usbd_writel(udc,
481*4882a593Smuzhiyun is_hs ? rx_cfg->max_pkt_hs : rx_cfg->max_pkt_fs,
482*4882a593Smuzhiyun USBD_RXFIFO_EPSIZE_REG);
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun val = (tx_fifo_slot << USBD_TXFIFO_CONFIG_START_SHIFT) |
485*4882a593Smuzhiyun ((tx_fifo_slot + tx_cfg->n_fifo_slots - 1) <<
486*4882a593Smuzhiyun USBD_TXFIFO_CONFIG_END_SHIFT);
487*4882a593Smuzhiyun tx_fifo_slot += tx_cfg->n_fifo_slots;
488*4882a593Smuzhiyun usbd_writel(udc, val, USBD_TXFIFO_CONFIG_REG);
489*4882a593Smuzhiyun usbd_writel(udc,
490*4882a593Smuzhiyun is_hs ? tx_cfg->max_pkt_hs : tx_cfg->max_pkt_fs,
491*4882a593Smuzhiyun USBD_TXFIFO_EPSIZE_REG);
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun usbd_readl(udc, USBD_TXFIFO_EPSIZE_REG);
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun /**
498*4882a593Smuzhiyun * bcm63xx_fifo_reset_ep - Flush a single endpoint's FIFO.
499*4882a593Smuzhiyun * @udc: Reference to the device controller.
500*4882a593Smuzhiyun * @ep_num: Endpoint number.
501*4882a593Smuzhiyun */
bcm63xx_fifo_reset_ep(struct bcm63xx_udc * udc,int ep_num)502*4882a593Smuzhiyun static void bcm63xx_fifo_reset_ep(struct bcm63xx_udc *udc, int ep_num)
503*4882a593Smuzhiyun {
504*4882a593Smuzhiyun u32 val;
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun bcm63xx_ep_dma_select(udc, ep_num);
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun val = usbd_readl(udc, USBD_CONTROL_REG);
509*4882a593Smuzhiyun val |= USBD_CONTROL_FIFO_RESET_MASK;
510*4882a593Smuzhiyun usbd_writel(udc, val, USBD_CONTROL_REG);
511*4882a593Smuzhiyun usbd_readl(udc, USBD_CONTROL_REG);
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun /**
515*4882a593Smuzhiyun * bcm63xx_fifo_reset - Flush all hardware FIFOs.
516*4882a593Smuzhiyun * @udc: Reference to the device controller.
517*4882a593Smuzhiyun */
bcm63xx_fifo_reset(struct bcm63xx_udc * udc)518*4882a593Smuzhiyun static void bcm63xx_fifo_reset(struct bcm63xx_udc *udc)
519*4882a593Smuzhiyun {
520*4882a593Smuzhiyun int i;
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun for (i = 0; i < BCM63XX_NUM_FIFO_PAIRS; i++)
523*4882a593Smuzhiyun bcm63xx_fifo_reset_ep(udc, i);
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun /**
527*4882a593Smuzhiyun * bcm63xx_ep_init - Initial (one-time) endpoint initialization.
528*4882a593Smuzhiyun * @udc: Reference to the device controller.
529*4882a593Smuzhiyun */
bcm63xx_ep_init(struct bcm63xx_udc * udc)530*4882a593Smuzhiyun static void bcm63xx_ep_init(struct bcm63xx_udc *udc)
531*4882a593Smuzhiyun {
532*4882a593Smuzhiyun u32 i, val;
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
535*4882a593Smuzhiyun const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun if (cfg->ep_num < 0)
538*4882a593Smuzhiyun continue;
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun bcm63xx_ep_dma_select(udc, cfg->ep_num);
541*4882a593Smuzhiyun val = (cfg->ep_type << USBD_EPNUM_TYPEMAP_TYPE_SHIFT) |
542*4882a593Smuzhiyun ((i >> 1) << USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT);
543*4882a593Smuzhiyun usbd_writel(udc, val, USBD_EPNUM_TYPEMAP_REG);
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun /**
548*4882a593Smuzhiyun * bcm63xx_ep_setup - Configure per-endpoint settings.
549*4882a593Smuzhiyun * @udc: Reference to the device controller.
550*4882a593Smuzhiyun *
551*4882a593Smuzhiyun * This needs to be rerun if the speed/cfg/intf/altintf changes.
552*4882a593Smuzhiyun */
bcm63xx_ep_setup(struct bcm63xx_udc * udc)553*4882a593Smuzhiyun static void bcm63xx_ep_setup(struct bcm63xx_udc *udc)
554*4882a593Smuzhiyun {
555*4882a593Smuzhiyun u32 val, i;
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun usbd_writel(udc, USBD_CSR_SETUPADDR_DEF, USBD_CSR_SETUPADDR_REG);
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
560*4882a593Smuzhiyun const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
561*4882a593Smuzhiyun int max_pkt = udc->gadget.speed == USB_SPEED_HIGH ?
562*4882a593Smuzhiyun cfg->max_pkt_hs : cfg->max_pkt_fs;
563*4882a593Smuzhiyun int idx = cfg->ep_num;
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun udc->iudma[i].max_pkt = max_pkt;
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun if (idx < 0)
568*4882a593Smuzhiyun continue;
569*4882a593Smuzhiyun usb_ep_set_maxpacket_limit(&udc->bep[idx].ep, max_pkt);
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun val = (idx << USBD_CSR_EP_LOG_SHIFT) |
572*4882a593Smuzhiyun (cfg->dir << USBD_CSR_EP_DIR_SHIFT) |
573*4882a593Smuzhiyun (cfg->ep_type << USBD_CSR_EP_TYPE_SHIFT) |
574*4882a593Smuzhiyun (udc->cfg << USBD_CSR_EP_CFG_SHIFT) |
575*4882a593Smuzhiyun (udc->iface << USBD_CSR_EP_IFACE_SHIFT) |
576*4882a593Smuzhiyun (udc->alt_iface << USBD_CSR_EP_ALTIFACE_SHIFT) |
577*4882a593Smuzhiyun (max_pkt << USBD_CSR_EP_MAXPKT_SHIFT);
578*4882a593Smuzhiyun usbd_writel(udc, val, USBD_CSR_EP_REG(idx));
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun /**
583*4882a593Smuzhiyun * iudma_write - Queue a single IUDMA transaction.
584*4882a593Smuzhiyun * @udc: Reference to the device controller.
585*4882a593Smuzhiyun * @iudma: IUDMA channel to use.
586*4882a593Smuzhiyun * @breq: Request containing the transaction data.
587*4882a593Smuzhiyun *
588*4882a593Smuzhiyun * For RX IUDMA, this will queue a single buffer descriptor, as RX IUDMA
589*4882a593Smuzhiyun * does not honor SOP/EOP so the handling of multiple buffers is ambiguous.
590*4882a593Smuzhiyun * So iudma_write() may be called several times to fulfill a single
591*4882a593Smuzhiyun * usb_request.
592*4882a593Smuzhiyun *
593*4882a593Smuzhiyun * For TX IUDMA, this can queue multiple buffer descriptors if needed.
594*4882a593Smuzhiyun */
iudma_write(struct bcm63xx_udc * udc,struct iudma_ch * iudma,struct bcm63xx_req * breq)595*4882a593Smuzhiyun static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma,
596*4882a593Smuzhiyun struct bcm63xx_req *breq)
597*4882a593Smuzhiyun {
598*4882a593Smuzhiyun int first_bd = 1, last_bd = 0, extra_zero_pkt = 0;
599*4882a593Smuzhiyun unsigned int bytes_left = breq->req.length - breq->offset;
600*4882a593Smuzhiyun const int max_bd_bytes = !irq_coalesce && !iudma->is_tx ?
601*4882a593Smuzhiyun iudma->max_pkt : IUDMA_MAX_FRAGMENT;
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun iudma->n_bds_used = 0;
604*4882a593Smuzhiyun breq->bd_bytes = 0;
605*4882a593Smuzhiyun breq->iudma = iudma;
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun if ((bytes_left % iudma->max_pkt == 0) && bytes_left && breq->req.zero)
608*4882a593Smuzhiyun extra_zero_pkt = 1;
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun do {
611*4882a593Smuzhiyun struct bcm_enet_desc *d = iudma->write_bd;
612*4882a593Smuzhiyun u32 dmaflags = 0;
613*4882a593Smuzhiyun unsigned int n_bytes;
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun if (d == iudma->end_bd) {
616*4882a593Smuzhiyun dmaflags |= DMADESC_WRAP_MASK;
617*4882a593Smuzhiyun iudma->write_bd = iudma->bd_ring;
618*4882a593Smuzhiyun } else {
619*4882a593Smuzhiyun iudma->write_bd++;
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun iudma->n_bds_used++;
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun n_bytes = min_t(int, bytes_left, max_bd_bytes);
624*4882a593Smuzhiyun if (n_bytes)
625*4882a593Smuzhiyun dmaflags |= n_bytes << DMADESC_LENGTH_SHIFT;
626*4882a593Smuzhiyun else
627*4882a593Smuzhiyun dmaflags |= (1 << DMADESC_LENGTH_SHIFT) |
628*4882a593Smuzhiyun DMADESC_USB_ZERO_MASK;
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun dmaflags |= DMADESC_OWNER_MASK;
631*4882a593Smuzhiyun if (first_bd) {
632*4882a593Smuzhiyun dmaflags |= DMADESC_SOP_MASK;
633*4882a593Smuzhiyun first_bd = 0;
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun /*
637*4882a593Smuzhiyun * extra_zero_pkt forces one more iteration through the loop
638*4882a593Smuzhiyun * after all data is queued up, to send the zero packet
639*4882a593Smuzhiyun */
640*4882a593Smuzhiyun if (extra_zero_pkt && !bytes_left)
641*4882a593Smuzhiyun extra_zero_pkt = 0;
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun if (!iudma->is_tx || iudma->n_bds_used == iudma->n_bds ||
644*4882a593Smuzhiyun (n_bytes == bytes_left && !extra_zero_pkt)) {
645*4882a593Smuzhiyun last_bd = 1;
646*4882a593Smuzhiyun dmaflags |= DMADESC_EOP_MASK;
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun d->address = breq->req.dma + breq->offset;
650*4882a593Smuzhiyun mb();
651*4882a593Smuzhiyun d->len_stat = dmaflags;
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun breq->offset += n_bytes;
654*4882a593Smuzhiyun breq->bd_bytes += n_bytes;
655*4882a593Smuzhiyun bytes_left -= n_bytes;
656*4882a593Smuzhiyun } while (!last_bd);
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
659*4882a593Smuzhiyun ENETDMAC_CHANCFG_REG, iudma->ch_idx);
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun /**
663*4882a593Smuzhiyun * iudma_read - Check for IUDMA buffer completion.
664*4882a593Smuzhiyun * @udc: Reference to the device controller.
665*4882a593Smuzhiyun * @iudma: IUDMA channel to use.
666*4882a593Smuzhiyun *
667*4882a593Smuzhiyun * This checks to see if ALL of the outstanding BDs on the DMA channel
668*4882a593Smuzhiyun * have been filled. If so, it returns the actual transfer length;
669*4882a593Smuzhiyun * otherwise it returns -EBUSY.
670*4882a593Smuzhiyun */
iudma_read(struct bcm63xx_udc * udc,struct iudma_ch * iudma)671*4882a593Smuzhiyun static int iudma_read(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
672*4882a593Smuzhiyun {
673*4882a593Smuzhiyun int i, actual_len = 0;
674*4882a593Smuzhiyun struct bcm_enet_desc *d = iudma->read_bd;
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun if (!iudma->n_bds_used)
677*4882a593Smuzhiyun return -EINVAL;
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun for (i = 0; i < iudma->n_bds_used; i++) {
680*4882a593Smuzhiyun u32 dmaflags;
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun dmaflags = d->len_stat;
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun if (dmaflags & DMADESC_OWNER_MASK)
685*4882a593Smuzhiyun return -EBUSY;
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun actual_len += (dmaflags & DMADESC_LENGTH_MASK) >>
688*4882a593Smuzhiyun DMADESC_LENGTH_SHIFT;
689*4882a593Smuzhiyun if (d == iudma->end_bd)
690*4882a593Smuzhiyun d = iudma->bd_ring;
691*4882a593Smuzhiyun else
692*4882a593Smuzhiyun d++;
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun iudma->read_bd = d;
696*4882a593Smuzhiyun iudma->n_bds_used = 0;
697*4882a593Smuzhiyun return actual_len;
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun /**
701*4882a593Smuzhiyun * iudma_reset_channel - Stop DMA on a single channel.
702*4882a593Smuzhiyun * @udc: Reference to the device controller.
703*4882a593Smuzhiyun * @iudma: IUDMA channel to reset.
704*4882a593Smuzhiyun */
iudma_reset_channel(struct bcm63xx_udc * udc,struct iudma_ch * iudma)705*4882a593Smuzhiyun static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
706*4882a593Smuzhiyun {
707*4882a593Smuzhiyun int timeout = IUDMA_RESET_TIMEOUT_US;
708*4882a593Smuzhiyun struct bcm_enet_desc *d;
709*4882a593Smuzhiyun int ch_idx = iudma->ch_idx;
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun if (!iudma->is_tx)
712*4882a593Smuzhiyun bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun /* stop DMA, then wait for the hardware to wrap up */
715*4882a593Smuzhiyun usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG, ch_idx);
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx) &
718*4882a593Smuzhiyun ENETDMAC_CHANCFG_EN_MASK) {
719*4882a593Smuzhiyun udelay(1);
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun /* repeatedly flush the FIFO data until the BD completes */
722*4882a593Smuzhiyun if (iudma->is_tx && iudma->ep_num >= 0)
723*4882a593Smuzhiyun bcm63xx_fifo_reset_ep(udc, iudma->ep_num);
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun if (!timeout--) {
726*4882a593Smuzhiyun dev_err(udc->dev, "can't reset IUDMA channel %d\n",
727*4882a593Smuzhiyun ch_idx);
728*4882a593Smuzhiyun break;
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun if (timeout == IUDMA_RESET_TIMEOUT_US / 2) {
731*4882a593Smuzhiyun dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
732*4882a593Smuzhiyun ch_idx);
733*4882a593Smuzhiyun usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
734*4882a593Smuzhiyun ENETDMAC_CHANCFG_REG, ch_idx);
735*4882a593Smuzhiyun }
736*4882a593Smuzhiyun }
737*4882a593Smuzhiyun usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG, ch_idx);
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun /* don't leave "live" HW-owned entries for the next guy to step on */
740*4882a593Smuzhiyun for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
741*4882a593Smuzhiyun d->len_stat = 0;
742*4882a593Smuzhiyun mb();
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun iudma->read_bd = iudma->write_bd = iudma->bd_ring;
745*4882a593Smuzhiyun iudma->n_bds_used = 0;
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun /* set up IRQs, UBUS burst size, and BD base for this channel */
748*4882a593Smuzhiyun usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
749*4882a593Smuzhiyun ENETDMAC_IRMASK_REG, ch_idx);
750*4882a593Smuzhiyun usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx);
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx);
753*4882a593Smuzhiyun usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG, ch_idx);
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun /**
757*4882a593Smuzhiyun * iudma_init_channel - One-time IUDMA channel initialization.
758*4882a593Smuzhiyun * @udc: Reference to the device controller.
759*4882a593Smuzhiyun * @ch_idx: Channel to initialize.
760*4882a593Smuzhiyun */
iudma_init_channel(struct bcm63xx_udc * udc,unsigned int ch_idx)761*4882a593Smuzhiyun static int iudma_init_channel(struct bcm63xx_udc *udc, unsigned int ch_idx)
762*4882a593Smuzhiyun {
763*4882a593Smuzhiyun struct iudma_ch *iudma = &udc->iudma[ch_idx];
764*4882a593Smuzhiyun const struct iudma_ch_cfg *cfg = &iudma_defaults[ch_idx];
765*4882a593Smuzhiyun unsigned int n_bds = cfg->n_bds;
766*4882a593Smuzhiyun struct bcm63xx_ep *bep = NULL;
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun iudma->ep_num = cfg->ep_num;
769*4882a593Smuzhiyun iudma->ch_idx = ch_idx;
770*4882a593Smuzhiyun iudma->is_tx = !!(ch_idx & 0x01);
771*4882a593Smuzhiyun if (iudma->ep_num >= 0) {
772*4882a593Smuzhiyun bep = &udc->bep[iudma->ep_num];
773*4882a593Smuzhiyun bep->iudma = iudma;
774*4882a593Smuzhiyun INIT_LIST_HEAD(&bep->queue);
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun iudma->bep = bep;
778*4882a593Smuzhiyun iudma->udc = udc;
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun /* ep0 is always active; others are controlled by the gadget driver */
781*4882a593Smuzhiyun if (iudma->ep_num <= 0)
782*4882a593Smuzhiyun iudma->enabled = true;
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun iudma->n_bds = n_bds;
785*4882a593Smuzhiyun iudma->bd_ring = dmam_alloc_coherent(udc->dev,
786*4882a593Smuzhiyun n_bds * sizeof(struct bcm_enet_desc),
787*4882a593Smuzhiyun &iudma->bd_ring_dma, GFP_KERNEL);
788*4882a593Smuzhiyun if (!iudma->bd_ring)
789*4882a593Smuzhiyun return -ENOMEM;
790*4882a593Smuzhiyun iudma->end_bd = &iudma->bd_ring[n_bds - 1];
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun return 0;
793*4882a593Smuzhiyun }
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun /**
796*4882a593Smuzhiyun * iudma_init - One-time initialization of all IUDMA channels.
797*4882a593Smuzhiyun * @udc: Reference to the device controller.
798*4882a593Smuzhiyun *
799*4882a593Smuzhiyun * Enable DMA, flush channels, and enable global IUDMA IRQs.
800*4882a593Smuzhiyun */
iudma_init(struct bcm63xx_udc * udc)801*4882a593Smuzhiyun static int iudma_init(struct bcm63xx_udc *udc)
802*4882a593Smuzhiyun {
803*4882a593Smuzhiyun int i, rc;
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun usb_dma_writel(udc, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
808*4882a593Smuzhiyun rc = iudma_init_channel(udc, i);
809*4882a593Smuzhiyun if (rc)
810*4882a593Smuzhiyun return rc;
811*4882a593Smuzhiyun iudma_reset_channel(udc, &udc->iudma[i]);
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun usb_dma_writel(udc, BIT(BCM63XX_NUM_IUDMA)-1, ENETDMA_GLB_IRQMASK_REG);
815*4882a593Smuzhiyun return 0;
816*4882a593Smuzhiyun }
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun /**
819*4882a593Smuzhiyun * iudma_uninit - Uninitialize IUDMA channels.
820*4882a593Smuzhiyun * @udc: Reference to the device controller.
821*4882a593Smuzhiyun *
822*4882a593Smuzhiyun * Kill global IUDMA IRQs, flush channels, and kill DMA.
823*4882a593Smuzhiyun */
iudma_uninit(struct bcm63xx_udc * udc)824*4882a593Smuzhiyun static void iudma_uninit(struct bcm63xx_udc *udc)
825*4882a593Smuzhiyun {
826*4882a593Smuzhiyun int i;
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun usb_dma_writel(udc, 0, ENETDMA_GLB_IRQMASK_REG);
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun for (i = 0; i < BCM63XX_NUM_IUDMA; i++)
831*4882a593Smuzhiyun iudma_reset_channel(udc, &udc->iudma[i]);
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun usb_dma_writel(udc, 0, ENETDMA_CFG_REG);
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun /***********************************************************************
837*4882a593Smuzhiyun * Other low-level USBD operations
838*4882a593Smuzhiyun ***********************************************************************/
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun /**
841*4882a593Smuzhiyun * bcm63xx_set_ctrl_irqs - Mask/unmask control path interrupts.
842*4882a593Smuzhiyun * @udc: Reference to the device controller.
843*4882a593Smuzhiyun * @enable_irqs: true to enable, false to disable.
844*4882a593Smuzhiyun */
bcm63xx_set_ctrl_irqs(struct bcm63xx_udc * udc,bool enable_irqs)845*4882a593Smuzhiyun static void bcm63xx_set_ctrl_irqs(struct bcm63xx_udc *udc, bool enable_irqs)
846*4882a593Smuzhiyun {
847*4882a593Smuzhiyun u32 val;
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun usbd_writel(udc, 0, USBD_STATUS_REG);
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun val = BIT(USBD_EVENT_IRQ_USB_RESET) |
852*4882a593Smuzhiyun BIT(USBD_EVENT_IRQ_SETUP) |
853*4882a593Smuzhiyun BIT(USBD_EVENT_IRQ_SETCFG) |
854*4882a593Smuzhiyun BIT(USBD_EVENT_IRQ_SETINTF) |
855*4882a593Smuzhiyun BIT(USBD_EVENT_IRQ_USB_LINK);
856*4882a593Smuzhiyun usbd_writel(udc, enable_irqs ? val : 0, USBD_EVENT_IRQ_MASK_REG);
857*4882a593Smuzhiyun usbd_writel(udc, val, USBD_EVENT_IRQ_STATUS_REG);
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun /**
861*4882a593Smuzhiyun * bcm63xx_select_phy_mode - Select between USB device and host mode.
862*4882a593Smuzhiyun * @udc: Reference to the device controller.
863*4882a593Smuzhiyun * @is_device: true for device, false for host.
864*4882a593Smuzhiyun *
865*4882a593Smuzhiyun * This should probably be reworked to use the drivers/usb/otg
866*4882a593Smuzhiyun * infrastructure.
867*4882a593Smuzhiyun *
868*4882a593Smuzhiyun * By default, the AFE/pullups are disabled in device mode, until
869*4882a593Smuzhiyun * bcm63xx_select_pullup() is called.
870*4882a593Smuzhiyun */
bcm63xx_select_phy_mode(struct bcm63xx_udc * udc,bool is_device)871*4882a593Smuzhiyun static void bcm63xx_select_phy_mode(struct bcm63xx_udc *udc, bool is_device)
872*4882a593Smuzhiyun {
873*4882a593Smuzhiyun u32 val, portmask = BIT(udc->pd->port_no);
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun if (BCMCPU_IS_6328()) {
876*4882a593Smuzhiyun /* configure pinmux to sense VBUS signal */
877*4882a593Smuzhiyun val = bcm_gpio_readl(GPIO_PINMUX_OTHR_REG);
878*4882a593Smuzhiyun val &= ~GPIO_PINMUX_OTHR_6328_USB_MASK;
879*4882a593Smuzhiyun val |= is_device ? GPIO_PINMUX_OTHR_6328_USB_DEV :
880*4882a593Smuzhiyun GPIO_PINMUX_OTHR_6328_USB_HOST;
881*4882a593Smuzhiyun bcm_gpio_writel(val, GPIO_PINMUX_OTHR_REG);
882*4882a593Smuzhiyun }
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
885*4882a593Smuzhiyun if (is_device) {
886*4882a593Smuzhiyun val |= (portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
887*4882a593Smuzhiyun val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
888*4882a593Smuzhiyun } else {
889*4882a593Smuzhiyun val &= ~(portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
890*4882a593Smuzhiyun val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
891*4882a593Smuzhiyun }
892*4882a593Smuzhiyun bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_SWAP_6368_REG);
895*4882a593Smuzhiyun if (is_device)
896*4882a593Smuzhiyun val |= USBH_PRIV_SWAP_USBD_MASK;
897*4882a593Smuzhiyun else
898*4882a593Smuzhiyun val &= ~USBH_PRIV_SWAP_USBD_MASK;
899*4882a593Smuzhiyun bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_SWAP_6368_REG);
900*4882a593Smuzhiyun }
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun /**
903*4882a593Smuzhiyun * bcm63xx_select_pullup - Enable/disable the pullup on D+
904*4882a593Smuzhiyun * @udc: Reference to the device controller.
905*4882a593Smuzhiyun * @is_on: true to enable the pullup, false to disable.
906*4882a593Smuzhiyun *
907*4882a593Smuzhiyun * If the pullup is active, the host will sense a FS/HS device connected to
908*4882a593Smuzhiyun * the port. If the pullup is inactive, the host will think the USB
909*4882a593Smuzhiyun * device has been disconnected.
910*4882a593Smuzhiyun */
bcm63xx_select_pullup(struct bcm63xx_udc * udc,bool is_on)911*4882a593Smuzhiyun static void bcm63xx_select_pullup(struct bcm63xx_udc *udc, bool is_on)
912*4882a593Smuzhiyun {
913*4882a593Smuzhiyun u32 val, portmask = BIT(udc->pd->port_no);
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
916*4882a593Smuzhiyun if (is_on)
917*4882a593Smuzhiyun val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
918*4882a593Smuzhiyun else
919*4882a593Smuzhiyun val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
920*4882a593Smuzhiyun bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
921*4882a593Smuzhiyun }
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun /**
924*4882a593Smuzhiyun * bcm63xx_uninit_udc_hw - Shut down the hardware prior to driver removal.
925*4882a593Smuzhiyun * @udc: Reference to the device controller.
926*4882a593Smuzhiyun *
927*4882a593Smuzhiyun * This just masks the IUDMA IRQs and releases the clocks. It is assumed
928*4882a593Smuzhiyun * that bcm63xx_udc_stop() has already run, and the clocks are stopped.
929*4882a593Smuzhiyun */
bcm63xx_uninit_udc_hw(struct bcm63xx_udc * udc)930*4882a593Smuzhiyun static void bcm63xx_uninit_udc_hw(struct bcm63xx_udc *udc)
931*4882a593Smuzhiyun {
932*4882a593Smuzhiyun set_clocks(udc, true);
933*4882a593Smuzhiyun iudma_uninit(udc);
934*4882a593Smuzhiyun set_clocks(udc, false);
935*4882a593Smuzhiyun
936*4882a593Smuzhiyun clk_put(udc->usbd_clk);
937*4882a593Smuzhiyun clk_put(udc->usbh_clk);
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun /**
941*4882a593Smuzhiyun * bcm63xx_init_udc_hw - Initialize the controller hardware and data structures.
942*4882a593Smuzhiyun * @udc: Reference to the device controller.
943*4882a593Smuzhiyun */
bcm63xx_init_udc_hw(struct bcm63xx_udc * udc)944*4882a593Smuzhiyun static int bcm63xx_init_udc_hw(struct bcm63xx_udc *udc)
945*4882a593Smuzhiyun {
946*4882a593Smuzhiyun int i, rc = 0;
947*4882a593Smuzhiyun u32 val;
948*4882a593Smuzhiyun
949*4882a593Smuzhiyun udc->ep0_ctrl_buf = devm_kzalloc(udc->dev, BCM63XX_MAX_CTRL_PKT,
950*4882a593Smuzhiyun GFP_KERNEL);
951*4882a593Smuzhiyun if (!udc->ep0_ctrl_buf)
952*4882a593Smuzhiyun return -ENOMEM;
953*4882a593Smuzhiyun
954*4882a593Smuzhiyun INIT_LIST_HEAD(&udc->gadget.ep_list);
955*4882a593Smuzhiyun for (i = 0; i < BCM63XX_NUM_EP; i++) {
956*4882a593Smuzhiyun struct bcm63xx_ep *bep = &udc->bep[i];
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun bep->ep.name = bcm63xx_ep_info[i].name;
959*4882a593Smuzhiyun bep->ep.caps = bcm63xx_ep_info[i].caps;
960*4882a593Smuzhiyun bep->ep_num = i;
961*4882a593Smuzhiyun bep->ep.ops = &bcm63xx_udc_ep_ops;
962*4882a593Smuzhiyun list_add_tail(&bep->ep.ep_list, &udc->gadget.ep_list);
963*4882a593Smuzhiyun bep->halted = 0;
964*4882a593Smuzhiyun usb_ep_set_maxpacket_limit(&bep->ep, BCM63XX_MAX_CTRL_PKT);
965*4882a593Smuzhiyun bep->udc = udc;
966*4882a593Smuzhiyun bep->ep.desc = NULL;
967*4882a593Smuzhiyun INIT_LIST_HEAD(&bep->queue);
968*4882a593Smuzhiyun }
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun udc->gadget.ep0 = &udc->bep[0].ep;
971*4882a593Smuzhiyun list_del(&udc->bep[0].ep.ep_list);
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun udc->gadget.speed = USB_SPEED_UNKNOWN;
974*4882a593Smuzhiyun udc->ep0state = EP0_SHUTDOWN;
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun udc->usbh_clk = clk_get(udc->dev, "usbh");
977*4882a593Smuzhiyun if (IS_ERR(udc->usbh_clk))
978*4882a593Smuzhiyun return -EIO;
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun udc->usbd_clk = clk_get(udc->dev, "usbd");
981*4882a593Smuzhiyun if (IS_ERR(udc->usbd_clk)) {
982*4882a593Smuzhiyun clk_put(udc->usbh_clk);
983*4882a593Smuzhiyun return -EIO;
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun set_clocks(udc, true);
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun val = USBD_CONTROL_AUTO_CSRS_MASK |
989*4882a593Smuzhiyun USBD_CONTROL_DONE_CSRS_MASK |
990*4882a593Smuzhiyun (irq_coalesce ? USBD_CONTROL_RXZSCFG_MASK : 0);
991*4882a593Smuzhiyun usbd_writel(udc, val, USBD_CONTROL_REG);
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun val = USBD_STRAPS_APP_SELF_PWR_MASK |
994*4882a593Smuzhiyun USBD_STRAPS_APP_RAM_IF_MASK |
995*4882a593Smuzhiyun USBD_STRAPS_APP_CSRPRGSUP_MASK |
996*4882a593Smuzhiyun USBD_STRAPS_APP_8BITPHY_MASK |
997*4882a593Smuzhiyun USBD_STRAPS_APP_RMTWKUP_MASK;
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun if (udc->gadget.max_speed == USB_SPEED_HIGH)
1000*4882a593Smuzhiyun val |= (BCM63XX_SPD_HIGH << USBD_STRAPS_SPEED_SHIFT);
1001*4882a593Smuzhiyun else
1002*4882a593Smuzhiyun val |= (BCM63XX_SPD_FULL << USBD_STRAPS_SPEED_SHIFT);
1003*4882a593Smuzhiyun usbd_writel(udc, val, USBD_STRAPS_REG);
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun bcm63xx_set_ctrl_irqs(udc, false);
1006*4882a593Smuzhiyun
1007*4882a593Smuzhiyun usbd_writel(udc, 0, USBD_EVENT_IRQ_CFG_LO_REG);
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun val = USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_ENUM_ON) |
1010*4882a593Smuzhiyun USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_SET_CSRS);
1011*4882a593Smuzhiyun usbd_writel(udc, val, USBD_EVENT_IRQ_CFG_HI_REG);
1012*4882a593Smuzhiyun
1013*4882a593Smuzhiyun rc = iudma_init(udc);
1014*4882a593Smuzhiyun set_clocks(udc, false);
1015*4882a593Smuzhiyun if (rc)
1016*4882a593Smuzhiyun bcm63xx_uninit_udc_hw(udc);
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun return 0;
1019*4882a593Smuzhiyun }
1020*4882a593Smuzhiyun
1021*4882a593Smuzhiyun /***********************************************************************
1022*4882a593Smuzhiyun * Standard EP gadget operations
1023*4882a593Smuzhiyun ***********************************************************************/
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun /**
1026*4882a593Smuzhiyun * bcm63xx_ep_enable - Enable one endpoint.
1027*4882a593Smuzhiyun * @ep: Endpoint to enable.
1028*4882a593Smuzhiyun * @desc: Contains max packet, direction, etc.
1029*4882a593Smuzhiyun *
1030*4882a593Smuzhiyun * Most of the endpoint parameters are fixed in this controller, so there
1031*4882a593Smuzhiyun * isn't much for this function to do.
1032*4882a593Smuzhiyun */
bcm63xx_ep_enable(struct usb_ep * ep,const struct usb_endpoint_descriptor * desc)1033*4882a593Smuzhiyun static int bcm63xx_ep_enable(struct usb_ep *ep,
1034*4882a593Smuzhiyun const struct usb_endpoint_descriptor *desc)
1035*4882a593Smuzhiyun {
1036*4882a593Smuzhiyun struct bcm63xx_ep *bep = our_ep(ep);
1037*4882a593Smuzhiyun struct bcm63xx_udc *udc = bep->udc;
1038*4882a593Smuzhiyun struct iudma_ch *iudma = bep->iudma;
1039*4882a593Smuzhiyun unsigned long flags;
1040*4882a593Smuzhiyun
1041*4882a593Smuzhiyun if (!ep || !desc || ep->name == bcm63xx_ep0name)
1042*4882a593Smuzhiyun return -EINVAL;
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun if (!udc->driver)
1045*4882a593Smuzhiyun return -ESHUTDOWN;
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun spin_lock_irqsave(&udc->lock, flags);
1048*4882a593Smuzhiyun if (iudma->enabled) {
1049*4882a593Smuzhiyun spin_unlock_irqrestore(&udc->lock, flags);
1050*4882a593Smuzhiyun return -EINVAL;
1051*4882a593Smuzhiyun }
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun iudma->enabled = true;
1054*4882a593Smuzhiyun BUG_ON(!list_empty(&bep->queue));
1055*4882a593Smuzhiyun
1056*4882a593Smuzhiyun iudma_reset_channel(udc, iudma);
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun bep->halted = 0;
1059*4882a593Smuzhiyun bcm63xx_set_stall(udc, bep, false);
1060*4882a593Smuzhiyun clear_bit(bep->ep_num, &udc->wedgemap);
1061*4882a593Smuzhiyun
1062*4882a593Smuzhiyun ep->desc = desc;
1063*4882a593Smuzhiyun ep->maxpacket = usb_endpoint_maxp(desc);
1064*4882a593Smuzhiyun
1065*4882a593Smuzhiyun spin_unlock_irqrestore(&udc->lock, flags);
1066*4882a593Smuzhiyun return 0;
1067*4882a593Smuzhiyun }
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun /**
1070*4882a593Smuzhiyun * bcm63xx_ep_disable - Disable one endpoint.
1071*4882a593Smuzhiyun * @ep: Endpoint to disable.
1072*4882a593Smuzhiyun */
bcm63xx_ep_disable(struct usb_ep * ep)1073*4882a593Smuzhiyun static int bcm63xx_ep_disable(struct usb_ep *ep)
1074*4882a593Smuzhiyun {
1075*4882a593Smuzhiyun struct bcm63xx_ep *bep = our_ep(ep);
1076*4882a593Smuzhiyun struct bcm63xx_udc *udc = bep->udc;
1077*4882a593Smuzhiyun struct iudma_ch *iudma = bep->iudma;
1078*4882a593Smuzhiyun struct bcm63xx_req *breq, *n;
1079*4882a593Smuzhiyun unsigned long flags;
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun if (!ep || !ep->desc)
1082*4882a593Smuzhiyun return -EINVAL;
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun spin_lock_irqsave(&udc->lock, flags);
1085*4882a593Smuzhiyun if (!iudma->enabled) {
1086*4882a593Smuzhiyun spin_unlock_irqrestore(&udc->lock, flags);
1087*4882a593Smuzhiyun return -EINVAL;
1088*4882a593Smuzhiyun }
1089*4882a593Smuzhiyun iudma->enabled = false;
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun iudma_reset_channel(udc, iudma);
1092*4882a593Smuzhiyun
1093*4882a593Smuzhiyun if (!list_empty(&bep->queue)) {
1094*4882a593Smuzhiyun list_for_each_entry_safe(breq, n, &bep->queue, queue) {
1095*4882a593Smuzhiyun usb_gadget_unmap_request(&udc->gadget, &breq->req,
1096*4882a593Smuzhiyun iudma->is_tx);
1097*4882a593Smuzhiyun list_del(&breq->queue);
1098*4882a593Smuzhiyun breq->req.status = -ESHUTDOWN;
1099*4882a593Smuzhiyun
1100*4882a593Smuzhiyun spin_unlock_irqrestore(&udc->lock, flags);
1101*4882a593Smuzhiyun usb_gadget_giveback_request(&iudma->bep->ep, &breq->req);
1102*4882a593Smuzhiyun spin_lock_irqsave(&udc->lock, flags);
1103*4882a593Smuzhiyun }
1104*4882a593Smuzhiyun }
1105*4882a593Smuzhiyun ep->desc = NULL;
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun spin_unlock_irqrestore(&udc->lock, flags);
1108*4882a593Smuzhiyun return 0;
1109*4882a593Smuzhiyun }
1110*4882a593Smuzhiyun
1111*4882a593Smuzhiyun /**
1112*4882a593Smuzhiyun * bcm63xx_udc_alloc_request - Allocate a new request.
1113*4882a593Smuzhiyun * @ep: Endpoint associated with the request.
1114*4882a593Smuzhiyun * @mem_flags: Flags to pass to kzalloc().
1115*4882a593Smuzhiyun */
bcm63xx_udc_alloc_request(struct usb_ep * ep,gfp_t mem_flags)1116*4882a593Smuzhiyun static struct usb_request *bcm63xx_udc_alloc_request(struct usb_ep *ep,
1117*4882a593Smuzhiyun gfp_t mem_flags)
1118*4882a593Smuzhiyun {
1119*4882a593Smuzhiyun struct bcm63xx_req *breq;
1120*4882a593Smuzhiyun
1121*4882a593Smuzhiyun breq = kzalloc(sizeof(*breq), mem_flags);
1122*4882a593Smuzhiyun if (!breq)
1123*4882a593Smuzhiyun return NULL;
1124*4882a593Smuzhiyun return &breq->req;
1125*4882a593Smuzhiyun }
1126*4882a593Smuzhiyun
1127*4882a593Smuzhiyun /**
1128*4882a593Smuzhiyun * bcm63xx_udc_free_request - Free a request.
1129*4882a593Smuzhiyun * @ep: Endpoint associated with the request.
1130*4882a593Smuzhiyun * @req: Request to free.
1131*4882a593Smuzhiyun */
bcm63xx_udc_free_request(struct usb_ep * ep,struct usb_request * req)1132*4882a593Smuzhiyun static void bcm63xx_udc_free_request(struct usb_ep *ep,
1133*4882a593Smuzhiyun struct usb_request *req)
1134*4882a593Smuzhiyun {
1135*4882a593Smuzhiyun struct bcm63xx_req *breq = our_req(req);
1136*4882a593Smuzhiyun kfree(breq);
1137*4882a593Smuzhiyun }
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun /**
1140*4882a593Smuzhiyun * bcm63xx_udc_queue - Queue up a new request.
1141*4882a593Smuzhiyun * @ep: Endpoint associated with the request.
1142*4882a593Smuzhiyun * @req: Request to add.
1143*4882a593Smuzhiyun * @mem_flags: Unused.
1144*4882a593Smuzhiyun *
1145*4882a593Smuzhiyun * If the queue is empty, start this request immediately. Otherwise, add
1146*4882a593Smuzhiyun * it to the list.
1147*4882a593Smuzhiyun *
1148*4882a593Smuzhiyun * ep0 replies are sent through this function from the gadget driver, but
1149*4882a593Smuzhiyun * they are treated differently because they need to be handled by the ep0
1150*4882a593Smuzhiyun * state machine. (Sometimes they are replies to control requests that
1151*4882a593Smuzhiyun * were spoofed by this driver, and so they shouldn't be transmitted at all.)
1152*4882a593Smuzhiyun */
bcm63xx_udc_queue(struct usb_ep * ep,struct usb_request * req,gfp_t mem_flags)1153*4882a593Smuzhiyun static int bcm63xx_udc_queue(struct usb_ep *ep, struct usb_request *req,
1154*4882a593Smuzhiyun gfp_t mem_flags)
1155*4882a593Smuzhiyun {
1156*4882a593Smuzhiyun struct bcm63xx_ep *bep = our_ep(ep);
1157*4882a593Smuzhiyun struct bcm63xx_udc *udc = bep->udc;
1158*4882a593Smuzhiyun struct bcm63xx_req *breq = our_req(req);
1159*4882a593Smuzhiyun unsigned long flags;
1160*4882a593Smuzhiyun int rc = 0;
1161*4882a593Smuzhiyun
1162*4882a593Smuzhiyun if (unlikely(!req || !req->complete || !req->buf || !ep))
1163*4882a593Smuzhiyun return -EINVAL;
1164*4882a593Smuzhiyun
1165*4882a593Smuzhiyun req->actual = 0;
1166*4882a593Smuzhiyun req->status = 0;
1167*4882a593Smuzhiyun breq->offset = 0;
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun if (bep == &udc->bep[0]) {
1170*4882a593Smuzhiyun /* only one reply per request, please */
1171*4882a593Smuzhiyun if (udc->ep0_reply)
1172*4882a593Smuzhiyun return -EINVAL;
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyun udc->ep0_reply = req;
1175*4882a593Smuzhiyun schedule_work(&udc->ep0_wq);
1176*4882a593Smuzhiyun return 0;
1177*4882a593Smuzhiyun }
1178*4882a593Smuzhiyun
1179*4882a593Smuzhiyun spin_lock_irqsave(&udc->lock, flags);
1180*4882a593Smuzhiyun if (!bep->iudma->enabled) {
1181*4882a593Smuzhiyun rc = -ESHUTDOWN;
1182*4882a593Smuzhiyun goto out;
1183*4882a593Smuzhiyun }
1184*4882a593Smuzhiyun
1185*4882a593Smuzhiyun rc = usb_gadget_map_request(&udc->gadget, req, bep->iudma->is_tx);
1186*4882a593Smuzhiyun if (rc == 0) {
1187*4882a593Smuzhiyun list_add_tail(&breq->queue, &bep->queue);
1188*4882a593Smuzhiyun if (list_is_singular(&bep->queue))
1189*4882a593Smuzhiyun iudma_write(udc, bep->iudma, breq);
1190*4882a593Smuzhiyun }
1191*4882a593Smuzhiyun
1192*4882a593Smuzhiyun out:
1193*4882a593Smuzhiyun spin_unlock_irqrestore(&udc->lock, flags);
1194*4882a593Smuzhiyun return rc;
1195*4882a593Smuzhiyun }
1196*4882a593Smuzhiyun
1197*4882a593Smuzhiyun /**
1198*4882a593Smuzhiyun * bcm63xx_udc_dequeue - Remove a pending request from the queue.
1199*4882a593Smuzhiyun * @ep: Endpoint associated with the request.
1200*4882a593Smuzhiyun * @req: Request to remove.
1201*4882a593Smuzhiyun *
1202*4882a593Smuzhiyun * If the request is not at the head of the queue, this is easy - just nuke
1203*4882a593Smuzhiyun * it. If the request is at the head of the queue, we'll need to stop the
1204*4882a593Smuzhiyun * DMA transaction and then queue up the successor.
1205*4882a593Smuzhiyun */
bcm63xx_udc_dequeue(struct usb_ep * ep,struct usb_request * req)1206*4882a593Smuzhiyun static int bcm63xx_udc_dequeue(struct usb_ep *ep, struct usb_request *req)
1207*4882a593Smuzhiyun {
1208*4882a593Smuzhiyun struct bcm63xx_ep *bep = our_ep(ep);
1209*4882a593Smuzhiyun struct bcm63xx_udc *udc = bep->udc;
1210*4882a593Smuzhiyun struct bcm63xx_req *breq = our_req(req), *cur;
1211*4882a593Smuzhiyun unsigned long flags;
1212*4882a593Smuzhiyun int rc = 0;
1213*4882a593Smuzhiyun
1214*4882a593Smuzhiyun spin_lock_irqsave(&udc->lock, flags);
1215*4882a593Smuzhiyun if (list_empty(&bep->queue)) {
1216*4882a593Smuzhiyun rc = -EINVAL;
1217*4882a593Smuzhiyun goto out;
1218*4882a593Smuzhiyun }
1219*4882a593Smuzhiyun
1220*4882a593Smuzhiyun cur = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
1221*4882a593Smuzhiyun usb_gadget_unmap_request(&udc->gadget, &breq->req, bep->iudma->is_tx);
1222*4882a593Smuzhiyun
1223*4882a593Smuzhiyun if (breq == cur) {
1224*4882a593Smuzhiyun iudma_reset_channel(udc, bep->iudma);
1225*4882a593Smuzhiyun list_del(&breq->queue);
1226*4882a593Smuzhiyun
1227*4882a593Smuzhiyun if (!list_empty(&bep->queue)) {
1228*4882a593Smuzhiyun struct bcm63xx_req *next;
1229*4882a593Smuzhiyun
1230*4882a593Smuzhiyun next = list_first_entry(&bep->queue,
1231*4882a593Smuzhiyun struct bcm63xx_req, queue);
1232*4882a593Smuzhiyun iudma_write(udc, bep->iudma, next);
1233*4882a593Smuzhiyun }
1234*4882a593Smuzhiyun } else {
1235*4882a593Smuzhiyun list_del(&breq->queue);
1236*4882a593Smuzhiyun }
1237*4882a593Smuzhiyun
1238*4882a593Smuzhiyun out:
1239*4882a593Smuzhiyun spin_unlock_irqrestore(&udc->lock, flags);
1240*4882a593Smuzhiyun
1241*4882a593Smuzhiyun req->status = -ESHUTDOWN;
1242*4882a593Smuzhiyun req->complete(ep, req);
1243*4882a593Smuzhiyun
1244*4882a593Smuzhiyun return rc;
1245*4882a593Smuzhiyun }
1246*4882a593Smuzhiyun
1247*4882a593Smuzhiyun /**
1248*4882a593Smuzhiyun * bcm63xx_udc_set_halt - Enable/disable STALL flag in the hardware.
1249*4882a593Smuzhiyun * @ep: Endpoint to halt.
1250*4882a593Smuzhiyun * @value: Zero to clear halt; nonzero to set halt.
1251*4882a593Smuzhiyun *
1252*4882a593Smuzhiyun * See comments in bcm63xx_update_wedge().
1253*4882a593Smuzhiyun */
bcm63xx_udc_set_halt(struct usb_ep * ep,int value)1254*4882a593Smuzhiyun static int bcm63xx_udc_set_halt(struct usb_ep *ep, int value)
1255*4882a593Smuzhiyun {
1256*4882a593Smuzhiyun struct bcm63xx_ep *bep = our_ep(ep);
1257*4882a593Smuzhiyun struct bcm63xx_udc *udc = bep->udc;
1258*4882a593Smuzhiyun unsigned long flags;
1259*4882a593Smuzhiyun
1260*4882a593Smuzhiyun spin_lock_irqsave(&udc->lock, flags);
1261*4882a593Smuzhiyun bcm63xx_set_stall(udc, bep, !!value);
1262*4882a593Smuzhiyun bep->halted = value;
1263*4882a593Smuzhiyun spin_unlock_irqrestore(&udc->lock, flags);
1264*4882a593Smuzhiyun
1265*4882a593Smuzhiyun return 0;
1266*4882a593Smuzhiyun }
1267*4882a593Smuzhiyun
1268*4882a593Smuzhiyun /**
1269*4882a593Smuzhiyun * bcm63xx_udc_set_wedge - Stall the endpoint until the next reset.
1270*4882a593Smuzhiyun * @ep: Endpoint to wedge.
1271*4882a593Smuzhiyun *
1272*4882a593Smuzhiyun * See comments in bcm63xx_update_wedge().
1273*4882a593Smuzhiyun */
bcm63xx_udc_set_wedge(struct usb_ep * ep)1274*4882a593Smuzhiyun static int bcm63xx_udc_set_wedge(struct usb_ep *ep)
1275*4882a593Smuzhiyun {
1276*4882a593Smuzhiyun struct bcm63xx_ep *bep = our_ep(ep);
1277*4882a593Smuzhiyun struct bcm63xx_udc *udc = bep->udc;
1278*4882a593Smuzhiyun unsigned long flags;
1279*4882a593Smuzhiyun
1280*4882a593Smuzhiyun spin_lock_irqsave(&udc->lock, flags);
1281*4882a593Smuzhiyun set_bit(bep->ep_num, &udc->wedgemap);
1282*4882a593Smuzhiyun bcm63xx_set_stall(udc, bep, true);
1283*4882a593Smuzhiyun spin_unlock_irqrestore(&udc->lock, flags);
1284*4882a593Smuzhiyun
1285*4882a593Smuzhiyun return 0;
1286*4882a593Smuzhiyun }
1287*4882a593Smuzhiyun
1288*4882a593Smuzhiyun static const struct usb_ep_ops bcm63xx_udc_ep_ops = {
1289*4882a593Smuzhiyun .enable = bcm63xx_ep_enable,
1290*4882a593Smuzhiyun .disable = bcm63xx_ep_disable,
1291*4882a593Smuzhiyun
1292*4882a593Smuzhiyun .alloc_request = bcm63xx_udc_alloc_request,
1293*4882a593Smuzhiyun .free_request = bcm63xx_udc_free_request,
1294*4882a593Smuzhiyun
1295*4882a593Smuzhiyun .queue = bcm63xx_udc_queue,
1296*4882a593Smuzhiyun .dequeue = bcm63xx_udc_dequeue,
1297*4882a593Smuzhiyun
1298*4882a593Smuzhiyun .set_halt = bcm63xx_udc_set_halt,
1299*4882a593Smuzhiyun .set_wedge = bcm63xx_udc_set_wedge,
1300*4882a593Smuzhiyun };
1301*4882a593Smuzhiyun
1302*4882a593Smuzhiyun /***********************************************************************
1303*4882a593Smuzhiyun * EP0 handling
1304*4882a593Smuzhiyun ***********************************************************************/
1305*4882a593Smuzhiyun
1306*4882a593Smuzhiyun /**
1307*4882a593Smuzhiyun * bcm63xx_ep0_setup_callback - Drop spinlock to invoke ->setup callback.
1308*4882a593Smuzhiyun * @udc: Reference to the device controller.
1309*4882a593Smuzhiyun * @ctrl: 8-byte SETUP request.
1310*4882a593Smuzhiyun */
bcm63xx_ep0_setup_callback(struct bcm63xx_udc * udc,struct usb_ctrlrequest * ctrl)1311*4882a593Smuzhiyun static int bcm63xx_ep0_setup_callback(struct bcm63xx_udc *udc,
1312*4882a593Smuzhiyun struct usb_ctrlrequest *ctrl)
1313*4882a593Smuzhiyun {
1314*4882a593Smuzhiyun int rc;
1315*4882a593Smuzhiyun
1316*4882a593Smuzhiyun spin_unlock_irq(&udc->lock);
1317*4882a593Smuzhiyun rc = udc->driver->setup(&udc->gadget, ctrl);
1318*4882a593Smuzhiyun spin_lock_irq(&udc->lock);
1319*4882a593Smuzhiyun return rc;
1320*4882a593Smuzhiyun }
1321*4882a593Smuzhiyun
1322*4882a593Smuzhiyun /**
1323*4882a593Smuzhiyun * bcm63xx_ep0_spoof_set_cfg - Synthesize a SET_CONFIGURATION request.
1324*4882a593Smuzhiyun * @udc: Reference to the device controller.
1325*4882a593Smuzhiyun *
1326*4882a593Smuzhiyun * Many standard requests are handled automatically in the hardware, but
1327*4882a593Smuzhiyun * we still need to pass them to the gadget driver so that it can
1328*4882a593Smuzhiyun * reconfigure the interfaces/endpoints if necessary.
1329*4882a593Smuzhiyun *
1330*4882a593Smuzhiyun * Unfortunately we are not able to send a STALL response if the host
1331*4882a593Smuzhiyun * requests an invalid configuration. If this happens, we'll have to be
1332*4882a593Smuzhiyun * content with printing a warning.
1333*4882a593Smuzhiyun */
bcm63xx_ep0_spoof_set_cfg(struct bcm63xx_udc * udc)1334*4882a593Smuzhiyun static int bcm63xx_ep0_spoof_set_cfg(struct bcm63xx_udc *udc)
1335*4882a593Smuzhiyun {
1336*4882a593Smuzhiyun struct usb_ctrlrequest ctrl;
1337*4882a593Smuzhiyun int rc;
1338*4882a593Smuzhiyun
1339*4882a593Smuzhiyun ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_DEVICE;
1340*4882a593Smuzhiyun ctrl.bRequest = USB_REQ_SET_CONFIGURATION;
1341*4882a593Smuzhiyun ctrl.wValue = cpu_to_le16(udc->cfg);
1342*4882a593Smuzhiyun ctrl.wIndex = 0;
1343*4882a593Smuzhiyun ctrl.wLength = 0;
1344*4882a593Smuzhiyun
1345*4882a593Smuzhiyun rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1346*4882a593Smuzhiyun if (rc < 0) {
1347*4882a593Smuzhiyun dev_warn_ratelimited(udc->dev,
1348*4882a593Smuzhiyun "hardware auto-acked bad SET_CONFIGURATION(%d) request\n",
1349*4882a593Smuzhiyun udc->cfg);
1350*4882a593Smuzhiyun }
1351*4882a593Smuzhiyun return rc;
1352*4882a593Smuzhiyun }
1353*4882a593Smuzhiyun
1354*4882a593Smuzhiyun /**
1355*4882a593Smuzhiyun * bcm63xx_ep0_spoof_set_iface - Synthesize a SET_INTERFACE request.
1356*4882a593Smuzhiyun * @udc: Reference to the device controller.
1357*4882a593Smuzhiyun */
bcm63xx_ep0_spoof_set_iface(struct bcm63xx_udc * udc)1358*4882a593Smuzhiyun static int bcm63xx_ep0_spoof_set_iface(struct bcm63xx_udc *udc)
1359*4882a593Smuzhiyun {
1360*4882a593Smuzhiyun struct usb_ctrlrequest ctrl;
1361*4882a593Smuzhiyun int rc;
1362*4882a593Smuzhiyun
1363*4882a593Smuzhiyun ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_INTERFACE;
1364*4882a593Smuzhiyun ctrl.bRequest = USB_REQ_SET_INTERFACE;
1365*4882a593Smuzhiyun ctrl.wValue = cpu_to_le16(udc->alt_iface);
1366*4882a593Smuzhiyun ctrl.wIndex = cpu_to_le16(udc->iface);
1367*4882a593Smuzhiyun ctrl.wLength = 0;
1368*4882a593Smuzhiyun
1369*4882a593Smuzhiyun rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1370*4882a593Smuzhiyun if (rc < 0) {
1371*4882a593Smuzhiyun dev_warn_ratelimited(udc->dev,
1372*4882a593Smuzhiyun "hardware auto-acked bad SET_INTERFACE(%d,%d) request\n",
1373*4882a593Smuzhiyun udc->iface, udc->alt_iface);
1374*4882a593Smuzhiyun }
1375*4882a593Smuzhiyun return rc;
1376*4882a593Smuzhiyun }
1377*4882a593Smuzhiyun
1378*4882a593Smuzhiyun /**
1379*4882a593Smuzhiyun * bcm63xx_ep0_map_write - dma_map and iudma_write a single request.
1380*4882a593Smuzhiyun * @udc: Reference to the device controller.
1381*4882a593Smuzhiyun * @ch_idx: IUDMA channel number.
1382*4882a593Smuzhiyun * @req: USB gadget layer representation of the request.
1383*4882a593Smuzhiyun */
bcm63xx_ep0_map_write(struct bcm63xx_udc * udc,int ch_idx,struct usb_request * req)1384*4882a593Smuzhiyun static void bcm63xx_ep0_map_write(struct bcm63xx_udc *udc, int ch_idx,
1385*4882a593Smuzhiyun struct usb_request *req)
1386*4882a593Smuzhiyun {
1387*4882a593Smuzhiyun struct bcm63xx_req *breq = our_req(req);
1388*4882a593Smuzhiyun struct iudma_ch *iudma = &udc->iudma[ch_idx];
1389*4882a593Smuzhiyun
1390*4882a593Smuzhiyun BUG_ON(udc->ep0_request);
1391*4882a593Smuzhiyun udc->ep0_request = req;
1392*4882a593Smuzhiyun
1393*4882a593Smuzhiyun req->actual = 0;
1394*4882a593Smuzhiyun breq->offset = 0;
1395*4882a593Smuzhiyun usb_gadget_map_request(&udc->gadget, req, iudma->is_tx);
1396*4882a593Smuzhiyun iudma_write(udc, iudma, breq);
1397*4882a593Smuzhiyun }
1398*4882a593Smuzhiyun
1399*4882a593Smuzhiyun /**
1400*4882a593Smuzhiyun * bcm63xx_ep0_complete - Set completion status and "stage" the callback.
1401*4882a593Smuzhiyun * @udc: Reference to the device controller.
1402*4882a593Smuzhiyun * @req: USB gadget layer representation of the request.
1403*4882a593Smuzhiyun * @status: Status to return to the gadget driver.
1404*4882a593Smuzhiyun */
bcm63xx_ep0_complete(struct bcm63xx_udc * udc,struct usb_request * req,int status)1405*4882a593Smuzhiyun static void bcm63xx_ep0_complete(struct bcm63xx_udc *udc,
1406*4882a593Smuzhiyun struct usb_request *req, int status)
1407*4882a593Smuzhiyun {
1408*4882a593Smuzhiyun req->status = status;
1409*4882a593Smuzhiyun if (status)
1410*4882a593Smuzhiyun req->actual = 0;
1411*4882a593Smuzhiyun if (req->complete) {
1412*4882a593Smuzhiyun spin_unlock_irq(&udc->lock);
1413*4882a593Smuzhiyun req->complete(&udc->bep[0].ep, req);
1414*4882a593Smuzhiyun spin_lock_irq(&udc->lock);
1415*4882a593Smuzhiyun }
1416*4882a593Smuzhiyun }
1417*4882a593Smuzhiyun
1418*4882a593Smuzhiyun /**
1419*4882a593Smuzhiyun * bcm63xx_ep0_nuke_reply - Abort request from the gadget driver due to
1420*4882a593Smuzhiyun * reset/shutdown.
1421*4882a593Smuzhiyun * @udc: Reference to the device controller.
1422*4882a593Smuzhiyun * @is_tx: Nonzero for TX (IN), zero for RX (OUT).
1423*4882a593Smuzhiyun */
bcm63xx_ep0_nuke_reply(struct bcm63xx_udc * udc,int is_tx)1424*4882a593Smuzhiyun static void bcm63xx_ep0_nuke_reply(struct bcm63xx_udc *udc, int is_tx)
1425*4882a593Smuzhiyun {
1426*4882a593Smuzhiyun struct usb_request *req = udc->ep0_reply;
1427*4882a593Smuzhiyun
1428*4882a593Smuzhiyun udc->ep0_reply = NULL;
1429*4882a593Smuzhiyun usb_gadget_unmap_request(&udc->gadget, req, is_tx);
1430*4882a593Smuzhiyun if (udc->ep0_request == req) {
1431*4882a593Smuzhiyun udc->ep0_req_completed = 0;
1432*4882a593Smuzhiyun udc->ep0_request = NULL;
1433*4882a593Smuzhiyun }
1434*4882a593Smuzhiyun bcm63xx_ep0_complete(udc, req, -ESHUTDOWN);
1435*4882a593Smuzhiyun }
1436*4882a593Smuzhiyun
1437*4882a593Smuzhiyun /**
1438*4882a593Smuzhiyun * bcm63xx_ep0_read_complete - Close out the pending ep0 request; return
1439*4882a593Smuzhiyun * transfer len.
1440*4882a593Smuzhiyun * @udc: Reference to the device controller.
1441*4882a593Smuzhiyun */
bcm63xx_ep0_read_complete(struct bcm63xx_udc * udc)1442*4882a593Smuzhiyun static int bcm63xx_ep0_read_complete(struct bcm63xx_udc *udc)
1443*4882a593Smuzhiyun {
1444*4882a593Smuzhiyun struct usb_request *req = udc->ep0_request;
1445*4882a593Smuzhiyun
1446*4882a593Smuzhiyun udc->ep0_req_completed = 0;
1447*4882a593Smuzhiyun udc->ep0_request = NULL;
1448*4882a593Smuzhiyun
1449*4882a593Smuzhiyun return req->actual;
1450*4882a593Smuzhiyun }
1451*4882a593Smuzhiyun
1452*4882a593Smuzhiyun /**
1453*4882a593Smuzhiyun * bcm63xx_ep0_internal_request - Helper function to submit an ep0 request.
1454*4882a593Smuzhiyun * @udc: Reference to the device controller.
1455*4882a593Smuzhiyun * @ch_idx: IUDMA channel number.
1456*4882a593Smuzhiyun * @length: Number of bytes to TX/RX.
1457*4882a593Smuzhiyun *
1458*4882a593Smuzhiyun * Used for simple transfers performed by the ep0 worker. This will always
1459*4882a593Smuzhiyun * use ep0_ctrl_req / ep0_ctrl_buf.
1460*4882a593Smuzhiyun */
bcm63xx_ep0_internal_request(struct bcm63xx_udc * udc,int ch_idx,int length)1461*4882a593Smuzhiyun static void bcm63xx_ep0_internal_request(struct bcm63xx_udc *udc, int ch_idx,
1462*4882a593Smuzhiyun int length)
1463*4882a593Smuzhiyun {
1464*4882a593Smuzhiyun struct usb_request *req = &udc->ep0_ctrl_req.req;
1465*4882a593Smuzhiyun
1466*4882a593Smuzhiyun req->buf = udc->ep0_ctrl_buf;
1467*4882a593Smuzhiyun req->length = length;
1468*4882a593Smuzhiyun req->complete = NULL;
1469*4882a593Smuzhiyun
1470*4882a593Smuzhiyun bcm63xx_ep0_map_write(udc, ch_idx, req);
1471*4882a593Smuzhiyun }
1472*4882a593Smuzhiyun
1473*4882a593Smuzhiyun /**
1474*4882a593Smuzhiyun * bcm63xx_ep0_do_setup - Parse new SETUP packet and decide how to handle it.
1475*4882a593Smuzhiyun * @udc: Reference to the device controller.
1476*4882a593Smuzhiyun *
1477*4882a593Smuzhiyun * EP0_IDLE probably shouldn't ever happen. EP0_REQUEUE means we're ready
1478*4882a593Smuzhiyun * for the next packet. Anything else means the transaction requires multiple
1479*4882a593Smuzhiyun * stages of handling.
1480*4882a593Smuzhiyun */
bcm63xx_ep0_do_setup(struct bcm63xx_udc * udc)1481*4882a593Smuzhiyun static enum bcm63xx_ep0_state bcm63xx_ep0_do_setup(struct bcm63xx_udc *udc)
1482*4882a593Smuzhiyun {
1483*4882a593Smuzhiyun int rc;
1484*4882a593Smuzhiyun struct usb_ctrlrequest *ctrl = (void *)udc->ep0_ctrl_buf;
1485*4882a593Smuzhiyun
1486*4882a593Smuzhiyun rc = bcm63xx_ep0_read_complete(udc);
1487*4882a593Smuzhiyun
1488*4882a593Smuzhiyun if (rc < 0) {
1489*4882a593Smuzhiyun dev_err(udc->dev, "missing SETUP packet\n");
1490*4882a593Smuzhiyun return EP0_IDLE;
1491*4882a593Smuzhiyun }
1492*4882a593Smuzhiyun
1493*4882a593Smuzhiyun /*
1494*4882a593Smuzhiyun * Handle 0-byte IN STATUS acknowledgement. The hardware doesn't
1495*4882a593Smuzhiyun * ALWAYS deliver these 100% of the time, so if we happen to see one,
1496*4882a593Smuzhiyun * just throw it away.
1497*4882a593Smuzhiyun */
1498*4882a593Smuzhiyun if (rc == 0)
1499*4882a593Smuzhiyun return EP0_REQUEUE;
1500*4882a593Smuzhiyun
1501*4882a593Smuzhiyun /* Drop malformed SETUP packets */
1502*4882a593Smuzhiyun if (rc != sizeof(*ctrl)) {
1503*4882a593Smuzhiyun dev_warn_ratelimited(udc->dev,
1504*4882a593Smuzhiyun "malformed SETUP packet (%d bytes)\n", rc);
1505*4882a593Smuzhiyun return EP0_REQUEUE;
1506*4882a593Smuzhiyun }
1507*4882a593Smuzhiyun
1508*4882a593Smuzhiyun /* Process new SETUP packet arriving on ep0 */
1509*4882a593Smuzhiyun rc = bcm63xx_ep0_setup_callback(udc, ctrl);
1510*4882a593Smuzhiyun if (rc < 0) {
1511*4882a593Smuzhiyun bcm63xx_set_stall(udc, &udc->bep[0], true);
1512*4882a593Smuzhiyun return EP0_REQUEUE;
1513*4882a593Smuzhiyun }
1514*4882a593Smuzhiyun
1515*4882a593Smuzhiyun if (!ctrl->wLength)
1516*4882a593Smuzhiyun return EP0_REQUEUE;
1517*4882a593Smuzhiyun else if (ctrl->bRequestType & USB_DIR_IN)
1518*4882a593Smuzhiyun return EP0_IN_DATA_PHASE_SETUP;
1519*4882a593Smuzhiyun else
1520*4882a593Smuzhiyun return EP0_OUT_DATA_PHASE_SETUP;
1521*4882a593Smuzhiyun }
1522*4882a593Smuzhiyun
1523*4882a593Smuzhiyun /**
1524*4882a593Smuzhiyun * bcm63xx_ep0_do_idle - Check for outstanding requests if ep0 is idle.
1525*4882a593Smuzhiyun * @udc: Reference to the device controller.
1526*4882a593Smuzhiyun *
1527*4882a593Smuzhiyun * In state EP0_IDLE, the RX descriptor is either pending, or has been
1528*4882a593Smuzhiyun * filled with a SETUP packet from the host. This function handles new
1529*4882a593Smuzhiyun * SETUP packets, control IRQ events (which can generate fake SETUP packets),
1530*4882a593Smuzhiyun * and reset/shutdown events.
1531*4882a593Smuzhiyun *
1532*4882a593Smuzhiyun * Returns 0 if work was done; -EAGAIN if nothing to do.
1533*4882a593Smuzhiyun */
bcm63xx_ep0_do_idle(struct bcm63xx_udc * udc)1534*4882a593Smuzhiyun static int bcm63xx_ep0_do_idle(struct bcm63xx_udc *udc)
1535*4882a593Smuzhiyun {
1536*4882a593Smuzhiyun if (udc->ep0_req_reset) {
1537*4882a593Smuzhiyun udc->ep0_req_reset = 0;
1538*4882a593Smuzhiyun } else if (udc->ep0_req_set_cfg) {
1539*4882a593Smuzhiyun udc->ep0_req_set_cfg = 0;
1540*4882a593Smuzhiyun if (bcm63xx_ep0_spoof_set_cfg(udc) >= 0)
1541*4882a593Smuzhiyun udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1542*4882a593Smuzhiyun } else if (udc->ep0_req_set_iface) {
1543*4882a593Smuzhiyun udc->ep0_req_set_iface = 0;
1544*4882a593Smuzhiyun if (bcm63xx_ep0_spoof_set_iface(udc) >= 0)
1545*4882a593Smuzhiyun udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1546*4882a593Smuzhiyun } else if (udc->ep0_req_completed) {
1547*4882a593Smuzhiyun udc->ep0state = bcm63xx_ep0_do_setup(udc);
1548*4882a593Smuzhiyun return udc->ep0state == EP0_IDLE ? -EAGAIN : 0;
1549*4882a593Smuzhiyun } else if (udc->ep0_req_shutdown) {
1550*4882a593Smuzhiyun udc->ep0_req_shutdown = 0;
1551*4882a593Smuzhiyun udc->ep0_req_completed = 0;
1552*4882a593Smuzhiyun udc->ep0_request = NULL;
1553*4882a593Smuzhiyun iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1554*4882a593Smuzhiyun usb_gadget_unmap_request(&udc->gadget,
1555*4882a593Smuzhiyun &udc->ep0_ctrl_req.req, 0);
1556*4882a593Smuzhiyun
1557*4882a593Smuzhiyun /* bcm63xx_udc_pullup() is waiting for this */
1558*4882a593Smuzhiyun mb();
1559*4882a593Smuzhiyun udc->ep0state = EP0_SHUTDOWN;
1560*4882a593Smuzhiyun } else if (udc->ep0_reply) {
1561*4882a593Smuzhiyun /*
1562*4882a593Smuzhiyun * This could happen if a USB RESET shows up during an ep0
1563*4882a593Smuzhiyun * transaction (especially if a laggy driver like gadgetfs
1564*4882a593Smuzhiyun * is in use).
1565*4882a593Smuzhiyun */
1566*4882a593Smuzhiyun dev_warn(udc->dev, "nuking unexpected reply\n");
1567*4882a593Smuzhiyun bcm63xx_ep0_nuke_reply(udc, 0);
1568*4882a593Smuzhiyun } else {
1569*4882a593Smuzhiyun return -EAGAIN;
1570*4882a593Smuzhiyun }
1571*4882a593Smuzhiyun
1572*4882a593Smuzhiyun return 0;
1573*4882a593Smuzhiyun }
1574*4882a593Smuzhiyun
1575*4882a593Smuzhiyun /**
1576*4882a593Smuzhiyun * bcm63xx_ep0_one_round - Handle the current ep0 state.
1577*4882a593Smuzhiyun * @udc: Reference to the device controller.
1578*4882a593Smuzhiyun *
1579*4882a593Smuzhiyun * Returns 0 if work was done; -EAGAIN if nothing to do.
1580*4882a593Smuzhiyun */
bcm63xx_ep0_one_round(struct bcm63xx_udc * udc)1581*4882a593Smuzhiyun static int bcm63xx_ep0_one_round(struct bcm63xx_udc *udc)
1582*4882a593Smuzhiyun {
1583*4882a593Smuzhiyun enum bcm63xx_ep0_state ep0state = udc->ep0state;
1584*4882a593Smuzhiyun bool shutdown = udc->ep0_req_reset || udc->ep0_req_shutdown;
1585*4882a593Smuzhiyun
1586*4882a593Smuzhiyun switch (udc->ep0state) {
1587*4882a593Smuzhiyun case EP0_REQUEUE:
1588*4882a593Smuzhiyun /* set up descriptor to receive SETUP packet */
1589*4882a593Smuzhiyun bcm63xx_ep0_internal_request(udc, IUDMA_EP0_RXCHAN,
1590*4882a593Smuzhiyun BCM63XX_MAX_CTRL_PKT);
1591*4882a593Smuzhiyun ep0state = EP0_IDLE;
1592*4882a593Smuzhiyun break;
1593*4882a593Smuzhiyun case EP0_IDLE:
1594*4882a593Smuzhiyun return bcm63xx_ep0_do_idle(udc);
1595*4882a593Smuzhiyun case EP0_IN_DATA_PHASE_SETUP:
1596*4882a593Smuzhiyun /*
1597*4882a593Smuzhiyun * Normal case: TX request is in ep0_reply (queued by the
1598*4882a593Smuzhiyun * callback), or will be queued shortly. When it's here,
1599*4882a593Smuzhiyun * send it to the HW and go to EP0_IN_DATA_PHASE_COMPLETE.
1600*4882a593Smuzhiyun *
1601*4882a593Smuzhiyun * Shutdown case: Stop waiting for the reply. Just
1602*4882a593Smuzhiyun * REQUEUE->IDLE. The gadget driver is NOT expected to
1603*4882a593Smuzhiyun * queue anything else now.
1604*4882a593Smuzhiyun */
1605*4882a593Smuzhiyun if (udc->ep0_reply) {
1606*4882a593Smuzhiyun bcm63xx_ep0_map_write(udc, IUDMA_EP0_TXCHAN,
1607*4882a593Smuzhiyun udc->ep0_reply);
1608*4882a593Smuzhiyun ep0state = EP0_IN_DATA_PHASE_COMPLETE;
1609*4882a593Smuzhiyun } else if (shutdown) {
1610*4882a593Smuzhiyun ep0state = EP0_REQUEUE;
1611*4882a593Smuzhiyun }
1612*4882a593Smuzhiyun break;
1613*4882a593Smuzhiyun case EP0_IN_DATA_PHASE_COMPLETE: {
1614*4882a593Smuzhiyun /*
1615*4882a593Smuzhiyun * Normal case: TX packet (ep0_reply) is in flight; wait for
1616*4882a593Smuzhiyun * it to finish, then go back to REQUEUE->IDLE.
1617*4882a593Smuzhiyun *
1618*4882a593Smuzhiyun * Shutdown case: Reset the TX channel, send -ESHUTDOWN
1619*4882a593Smuzhiyun * completion to the gadget driver, then REQUEUE->IDLE.
1620*4882a593Smuzhiyun */
1621*4882a593Smuzhiyun if (udc->ep0_req_completed) {
1622*4882a593Smuzhiyun udc->ep0_reply = NULL;
1623*4882a593Smuzhiyun bcm63xx_ep0_read_complete(udc);
1624*4882a593Smuzhiyun /*
1625*4882a593Smuzhiyun * the "ack" sometimes gets eaten (see
1626*4882a593Smuzhiyun * bcm63xx_ep0_do_idle)
1627*4882a593Smuzhiyun */
1628*4882a593Smuzhiyun ep0state = EP0_REQUEUE;
1629*4882a593Smuzhiyun } else if (shutdown) {
1630*4882a593Smuzhiyun iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1631*4882a593Smuzhiyun bcm63xx_ep0_nuke_reply(udc, 1);
1632*4882a593Smuzhiyun ep0state = EP0_REQUEUE;
1633*4882a593Smuzhiyun }
1634*4882a593Smuzhiyun break;
1635*4882a593Smuzhiyun }
1636*4882a593Smuzhiyun case EP0_OUT_DATA_PHASE_SETUP:
1637*4882a593Smuzhiyun /* Similar behavior to EP0_IN_DATA_PHASE_SETUP */
1638*4882a593Smuzhiyun if (udc->ep0_reply) {
1639*4882a593Smuzhiyun bcm63xx_ep0_map_write(udc, IUDMA_EP0_RXCHAN,
1640*4882a593Smuzhiyun udc->ep0_reply);
1641*4882a593Smuzhiyun ep0state = EP0_OUT_DATA_PHASE_COMPLETE;
1642*4882a593Smuzhiyun } else if (shutdown) {
1643*4882a593Smuzhiyun ep0state = EP0_REQUEUE;
1644*4882a593Smuzhiyun }
1645*4882a593Smuzhiyun break;
1646*4882a593Smuzhiyun case EP0_OUT_DATA_PHASE_COMPLETE: {
1647*4882a593Smuzhiyun /* Similar behavior to EP0_IN_DATA_PHASE_COMPLETE */
1648*4882a593Smuzhiyun if (udc->ep0_req_completed) {
1649*4882a593Smuzhiyun udc->ep0_reply = NULL;
1650*4882a593Smuzhiyun bcm63xx_ep0_read_complete(udc);
1651*4882a593Smuzhiyun
1652*4882a593Smuzhiyun /* send 0-byte ack to host */
1653*4882a593Smuzhiyun bcm63xx_ep0_internal_request(udc, IUDMA_EP0_TXCHAN, 0);
1654*4882a593Smuzhiyun ep0state = EP0_OUT_STATUS_PHASE;
1655*4882a593Smuzhiyun } else if (shutdown) {
1656*4882a593Smuzhiyun iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1657*4882a593Smuzhiyun bcm63xx_ep0_nuke_reply(udc, 0);
1658*4882a593Smuzhiyun ep0state = EP0_REQUEUE;
1659*4882a593Smuzhiyun }
1660*4882a593Smuzhiyun break;
1661*4882a593Smuzhiyun }
1662*4882a593Smuzhiyun case EP0_OUT_STATUS_PHASE:
1663*4882a593Smuzhiyun /*
1664*4882a593Smuzhiyun * Normal case: 0-byte OUT ack packet is in flight; wait
1665*4882a593Smuzhiyun * for it to finish, then go back to REQUEUE->IDLE.
1666*4882a593Smuzhiyun *
1667*4882a593Smuzhiyun * Shutdown case: just cancel the transmission. Don't bother
1668*4882a593Smuzhiyun * calling the completion, because it originated from this
1669*4882a593Smuzhiyun * function anyway. Then go back to REQUEUE->IDLE.
1670*4882a593Smuzhiyun */
1671*4882a593Smuzhiyun if (udc->ep0_req_completed) {
1672*4882a593Smuzhiyun bcm63xx_ep0_read_complete(udc);
1673*4882a593Smuzhiyun ep0state = EP0_REQUEUE;
1674*4882a593Smuzhiyun } else if (shutdown) {
1675*4882a593Smuzhiyun iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1676*4882a593Smuzhiyun udc->ep0_request = NULL;
1677*4882a593Smuzhiyun ep0state = EP0_REQUEUE;
1678*4882a593Smuzhiyun }
1679*4882a593Smuzhiyun break;
1680*4882a593Smuzhiyun case EP0_IN_FAKE_STATUS_PHASE: {
1681*4882a593Smuzhiyun /*
1682*4882a593Smuzhiyun * Normal case: we spoofed a SETUP packet and are now
1683*4882a593Smuzhiyun * waiting for the gadget driver to send a 0-byte reply.
1684*4882a593Smuzhiyun * This doesn't actually get sent to the HW because the
1685*4882a593Smuzhiyun * HW has already sent its own reply. Once we get the
1686*4882a593Smuzhiyun * response, return to IDLE.
1687*4882a593Smuzhiyun *
1688*4882a593Smuzhiyun * Shutdown case: return to IDLE immediately.
1689*4882a593Smuzhiyun *
1690*4882a593Smuzhiyun * Note that the ep0 RX descriptor has remained queued
1691*4882a593Smuzhiyun * (and possibly unfilled) during this entire transaction.
1692*4882a593Smuzhiyun * The HW datapath (IUDMA) never even sees SET_CONFIGURATION
1693*4882a593Smuzhiyun * or SET_INTERFACE transactions.
1694*4882a593Smuzhiyun */
1695*4882a593Smuzhiyun struct usb_request *r = udc->ep0_reply;
1696*4882a593Smuzhiyun
1697*4882a593Smuzhiyun if (!r) {
1698*4882a593Smuzhiyun if (shutdown)
1699*4882a593Smuzhiyun ep0state = EP0_IDLE;
1700*4882a593Smuzhiyun break;
1701*4882a593Smuzhiyun }
1702*4882a593Smuzhiyun
1703*4882a593Smuzhiyun bcm63xx_ep0_complete(udc, r, 0);
1704*4882a593Smuzhiyun udc->ep0_reply = NULL;
1705*4882a593Smuzhiyun ep0state = EP0_IDLE;
1706*4882a593Smuzhiyun break;
1707*4882a593Smuzhiyun }
1708*4882a593Smuzhiyun case EP0_SHUTDOWN:
1709*4882a593Smuzhiyun break;
1710*4882a593Smuzhiyun }
1711*4882a593Smuzhiyun
1712*4882a593Smuzhiyun if (udc->ep0state == ep0state)
1713*4882a593Smuzhiyun return -EAGAIN;
1714*4882a593Smuzhiyun
1715*4882a593Smuzhiyun udc->ep0state = ep0state;
1716*4882a593Smuzhiyun return 0;
1717*4882a593Smuzhiyun }
1718*4882a593Smuzhiyun
1719*4882a593Smuzhiyun /**
1720*4882a593Smuzhiyun * bcm63xx_ep0_process - ep0 worker thread / state machine.
1721*4882a593Smuzhiyun * @w: Workqueue struct.
1722*4882a593Smuzhiyun *
1723*4882a593Smuzhiyun * bcm63xx_ep0_process is triggered any time an event occurs on ep0. It
1724*4882a593Smuzhiyun * is used to synchronize ep0 events and ensure that both HW and SW events
1725*4882a593Smuzhiyun * occur in a well-defined order. When the ep0 IUDMA queues are idle, it may
1726*4882a593Smuzhiyun * synthesize SET_CONFIGURATION / SET_INTERFACE requests that were consumed
1727*4882a593Smuzhiyun * by the USBD hardware.
1728*4882a593Smuzhiyun *
1729*4882a593Smuzhiyun * The worker function will continue iterating around the state machine
1730*4882a593Smuzhiyun * until there is nothing left to do. Usually "nothing left to do" means
1731*4882a593Smuzhiyun * that we're waiting for a new event from the hardware.
1732*4882a593Smuzhiyun */
bcm63xx_ep0_process(struct work_struct * w)1733*4882a593Smuzhiyun static void bcm63xx_ep0_process(struct work_struct *w)
1734*4882a593Smuzhiyun {
1735*4882a593Smuzhiyun struct bcm63xx_udc *udc = container_of(w, struct bcm63xx_udc, ep0_wq);
1736*4882a593Smuzhiyun spin_lock_irq(&udc->lock);
1737*4882a593Smuzhiyun while (bcm63xx_ep0_one_round(udc) == 0)
1738*4882a593Smuzhiyun ;
1739*4882a593Smuzhiyun spin_unlock_irq(&udc->lock);
1740*4882a593Smuzhiyun }
1741*4882a593Smuzhiyun
1742*4882a593Smuzhiyun /***********************************************************************
1743*4882a593Smuzhiyun * Standard UDC gadget operations
1744*4882a593Smuzhiyun ***********************************************************************/
1745*4882a593Smuzhiyun
1746*4882a593Smuzhiyun /**
1747*4882a593Smuzhiyun * bcm63xx_udc_get_frame - Read current SOF frame number from the HW.
1748*4882a593Smuzhiyun * @gadget: USB device.
1749*4882a593Smuzhiyun */
bcm63xx_udc_get_frame(struct usb_gadget * gadget)1750*4882a593Smuzhiyun static int bcm63xx_udc_get_frame(struct usb_gadget *gadget)
1751*4882a593Smuzhiyun {
1752*4882a593Smuzhiyun struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1753*4882a593Smuzhiyun
1754*4882a593Smuzhiyun return (usbd_readl(udc, USBD_STATUS_REG) &
1755*4882a593Smuzhiyun USBD_STATUS_SOF_MASK) >> USBD_STATUS_SOF_SHIFT;
1756*4882a593Smuzhiyun }
1757*4882a593Smuzhiyun
1758*4882a593Smuzhiyun /**
1759*4882a593Smuzhiyun * bcm63xx_udc_pullup - Enable/disable pullup on D+ line.
1760*4882a593Smuzhiyun * @gadget: USB device.
1761*4882a593Smuzhiyun * @is_on: 0 to disable pullup, 1 to enable.
1762*4882a593Smuzhiyun *
1763*4882a593Smuzhiyun * See notes in bcm63xx_select_pullup().
1764*4882a593Smuzhiyun */
bcm63xx_udc_pullup(struct usb_gadget * gadget,int is_on)1765*4882a593Smuzhiyun static int bcm63xx_udc_pullup(struct usb_gadget *gadget, int is_on)
1766*4882a593Smuzhiyun {
1767*4882a593Smuzhiyun struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1768*4882a593Smuzhiyun unsigned long flags;
1769*4882a593Smuzhiyun int i, rc = -EINVAL;
1770*4882a593Smuzhiyun
1771*4882a593Smuzhiyun spin_lock_irqsave(&udc->lock, flags);
1772*4882a593Smuzhiyun if (is_on && udc->ep0state == EP0_SHUTDOWN) {
1773*4882a593Smuzhiyun udc->gadget.speed = USB_SPEED_UNKNOWN;
1774*4882a593Smuzhiyun udc->ep0state = EP0_REQUEUE;
1775*4882a593Smuzhiyun bcm63xx_fifo_setup(udc);
1776*4882a593Smuzhiyun bcm63xx_fifo_reset(udc);
1777*4882a593Smuzhiyun bcm63xx_ep_setup(udc);
1778*4882a593Smuzhiyun
1779*4882a593Smuzhiyun bitmap_zero(&udc->wedgemap, BCM63XX_NUM_EP);
1780*4882a593Smuzhiyun for (i = 0; i < BCM63XX_NUM_EP; i++)
1781*4882a593Smuzhiyun bcm63xx_set_stall(udc, &udc->bep[i], false);
1782*4882a593Smuzhiyun
1783*4882a593Smuzhiyun bcm63xx_set_ctrl_irqs(udc, true);
1784*4882a593Smuzhiyun bcm63xx_select_pullup(gadget_to_udc(gadget), true);
1785*4882a593Smuzhiyun rc = 0;
1786*4882a593Smuzhiyun } else if (!is_on && udc->ep0state != EP0_SHUTDOWN) {
1787*4882a593Smuzhiyun bcm63xx_select_pullup(gadget_to_udc(gadget), false);
1788*4882a593Smuzhiyun
1789*4882a593Smuzhiyun udc->ep0_req_shutdown = 1;
1790*4882a593Smuzhiyun spin_unlock_irqrestore(&udc->lock, flags);
1791*4882a593Smuzhiyun
1792*4882a593Smuzhiyun while (1) {
1793*4882a593Smuzhiyun schedule_work(&udc->ep0_wq);
1794*4882a593Smuzhiyun if (udc->ep0state == EP0_SHUTDOWN)
1795*4882a593Smuzhiyun break;
1796*4882a593Smuzhiyun msleep(50);
1797*4882a593Smuzhiyun }
1798*4882a593Smuzhiyun bcm63xx_set_ctrl_irqs(udc, false);
1799*4882a593Smuzhiyun cancel_work_sync(&udc->ep0_wq);
1800*4882a593Smuzhiyun return 0;
1801*4882a593Smuzhiyun }
1802*4882a593Smuzhiyun
1803*4882a593Smuzhiyun spin_unlock_irqrestore(&udc->lock, flags);
1804*4882a593Smuzhiyun return rc;
1805*4882a593Smuzhiyun }
1806*4882a593Smuzhiyun
1807*4882a593Smuzhiyun /**
1808*4882a593Smuzhiyun * bcm63xx_udc_start - Start the controller.
1809*4882a593Smuzhiyun * @gadget: USB device.
1810*4882a593Smuzhiyun * @driver: Driver for USB device.
1811*4882a593Smuzhiyun */
bcm63xx_udc_start(struct usb_gadget * gadget,struct usb_gadget_driver * driver)1812*4882a593Smuzhiyun static int bcm63xx_udc_start(struct usb_gadget *gadget,
1813*4882a593Smuzhiyun struct usb_gadget_driver *driver)
1814*4882a593Smuzhiyun {
1815*4882a593Smuzhiyun struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1816*4882a593Smuzhiyun unsigned long flags;
1817*4882a593Smuzhiyun
1818*4882a593Smuzhiyun if (!driver || driver->max_speed < USB_SPEED_HIGH ||
1819*4882a593Smuzhiyun !driver->setup)
1820*4882a593Smuzhiyun return -EINVAL;
1821*4882a593Smuzhiyun if (!udc)
1822*4882a593Smuzhiyun return -ENODEV;
1823*4882a593Smuzhiyun if (udc->driver)
1824*4882a593Smuzhiyun return -EBUSY;
1825*4882a593Smuzhiyun
1826*4882a593Smuzhiyun spin_lock_irqsave(&udc->lock, flags);
1827*4882a593Smuzhiyun
1828*4882a593Smuzhiyun set_clocks(udc, true);
1829*4882a593Smuzhiyun bcm63xx_fifo_setup(udc);
1830*4882a593Smuzhiyun bcm63xx_ep_init(udc);
1831*4882a593Smuzhiyun bcm63xx_ep_setup(udc);
1832*4882a593Smuzhiyun bcm63xx_fifo_reset(udc);
1833*4882a593Smuzhiyun bcm63xx_select_phy_mode(udc, true);
1834*4882a593Smuzhiyun
1835*4882a593Smuzhiyun udc->driver = driver;
1836*4882a593Smuzhiyun driver->driver.bus = NULL;
1837*4882a593Smuzhiyun udc->gadget.dev.of_node = udc->dev->of_node;
1838*4882a593Smuzhiyun
1839*4882a593Smuzhiyun spin_unlock_irqrestore(&udc->lock, flags);
1840*4882a593Smuzhiyun
1841*4882a593Smuzhiyun return 0;
1842*4882a593Smuzhiyun }
1843*4882a593Smuzhiyun
1844*4882a593Smuzhiyun /**
1845*4882a593Smuzhiyun * bcm63xx_udc_stop - Shut down the controller.
1846*4882a593Smuzhiyun * @gadget: USB device.
1847*4882a593Smuzhiyun * @driver: Driver for USB device.
1848*4882a593Smuzhiyun */
bcm63xx_udc_stop(struct usb_gadget * gadget)1849*4882a593Smuzhiyun static int bcm63xx_udc_stop(struct usb_gadget *gadget)
1850*4882a593Smuzhiyun {
1851*4882a593Smuzhiyun struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1852*4882a593Smuzhiyun unsigned long flags;
1853*4882a593Smuzhiyun
1854*4882a593Smuzhiyun spin_lock_irqsave(&udc->lock, flags);
1855*4882a593Smuzhiyun
1856*4882a593Smuzhiyun udc->driver = NULL;
1857*4882a593Smuzhiyun
1858*4882a593Smuzhiyun /*
1859*4882a593Smuzhiyun * If we switch the PHY too abruptly after dropping D+, the host
1860*4882a593Smuzhiyun * will often complain:
1861*4882a593Smuzhiyun *
1862*4882a593Smuzhiyun * hub 1-0:1.0: port 1 disabled by hub (EMI?), re-enabling...
1863*4882a593Smuzhiyun */
1864*4882a593Smuzhiyun msleep(100);
1865*4882a593Smuzhiyun
1866*4882a593Smuzhiyun bcm63xx_select_phy_mode(udc, false);
1867*4882a593Smuzhiyun set_clocks(udc, false);
1868*4882a593Smuzhiyun
1869*4882a593Smuzhiyun spin_unlock_irqrestore(&udc->lock, flags);
1870*4882a593Smuzhiyun
1871*4882a593Smuzhiyun return 0;
1872*4882a593Smuzhiyun }
1873*4882a593Smuzhiyun
1874*4882a593Smuzhiyun static const struct usb_gadget_ops bcm63xx_udc_ops = {
1875*4882a593Smuzhiyun .get_frame = bcm63xx_udc_get_frame,
1876*4882a593Smuzhiyun .pullup = bcm63xx_udc_pullup,
1877*4882a593Smuzhiyun .udc_start = bcm63xx_udc_start,
1878*4882a593Smuzhiyun .udc_stop = bcm63xx_udc_stop,
1879*4882a593Smuzhiyun };
1880*4882a593Smuzhiyun
1881*4882a593Smuzhiyun /***********************************************************************
1882*4882a593Smuzhiyun * IRQ handling
1883*4882a593Smuzhiyun ***********************************************************************/
1884*4882a593Smuzhiyun
1885*4882a593Smuzhiyun /**
1886*4882a593Smuzhiyun * bcm63xx_update_cfg_iface - Read current configuration/interface settings.
1887*4882a593Smuzhiyun * @udc: Reference to the device controller.
1888*4882a593Smuzhiyun *
1889*4882a593Smuzhiyun * This controller intercepts SET_CONFIGURATION and SET_INTERFACE messages.
1890*4882a593Smuzhiyun * The driver never sees the raw control packets coming in on the ep0
1891*4882a593Smuzhiyun * IUDMA channel, but at least we get an interrupt event to tell us that
1892*4882a593Smuzhiyun * new values are waiting in the USBD_STATUS register.
1893*4882a593Smuzhiyun */
bcm63xx_update_cfg_iface(struct bcm63xx_udc * udc)1894*4882a593Smuzhiyun static void bcm63xx_update_cfg_iface(struct bcm63xx_udc *udc)
1895*4882a593Smuzhiyun {
1896*4882a593Smuzhiyun u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1897*4882a593Smuzhiyun
1898*4882a593Smuzhiyun udc->cfg = (reg & USBD_STATUS_CFG_MASK) >> USBD_STATUS_CFG_SHIFT;
1899*4882a593Smuzhiyun udc->iface = (reg & USBD_STATUS_INTF_MASK) >> USBD_STATUS_INTF_SHIFT;
1900*4882a593Smuzhiyun udc->alt_iface = (reg & USBD_STATUS_ALTINTF_MASK) >>
1901*4882a593Smuzhiyun USBD_STATUS_ALTINTF_SHIFT;
1902*4882a593Smuzhiyun bcm63xx_ep_setup(udc);
1903*4882a593Smuzhiyun }
1904*4882a593Smuzhiyun
1905*4882a593Smuzhiyun /**
1906*4882a593Smuzhiyun * bcm63xx_update_link_speed - Check to see if the link speed has changed.
1907*4882a593Smuzhiyun * @udc: Reference to the device controller.
1908*4882a593Smuzhiyun *
1909*4882a593Smuzhiyun * The link speed update coincides with a SETUP IRQ. Returns 1 if the
1910*4882a593Smuzhiyun * speed has changed, so that the caller can update the endpoint settings.
1911*4882a593Smuzhiyun */
bcm63xx_update_link_speed(struct bcm63xx_udc * udc)1912*4882a593Smuzhiyun static int bcm63xx_update_link_speed(struct bcm63xx_udc *udc)
1913*4882a593Smuzhiyun {
1914*4882a593Smuzhiyun u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1915*4882a593Smuzhiyun enum usb_device_speed oldspeed = udc->gadget.speed;
1916*4882a593Smuzhiyun
1917*4882a593Smuzhiyun switch ((reg & USBD_STATUS_SPD_MASK) >> USBD_STATUS_SPD_SHIFT) {
1918*4882a593Smuzhiyun case BCM63XX_SPD_HIGH:
1919*4882a593Smuzhiyun udc->gadget.speed = USB_SPEED_HIGH;
1920*4882a593Smuzhiyun break;
1921*4882a593Smuzhiyun case BCM63XX_SPD_FULL:
1922*4882a593Smuzhiyun udc->gadget.speed = USB_SPEED_FULL;
1923*4882a593Smuzhiyun break;
1924*4882a593Smuzhiyun default:
1925*4882a593Smuzhiyun /* this should never happen */
1926*4882a593Smuzhiyun udc->gadget.speed = USB_SPEED_UNKNOWN;
1927*4882a593Smuzhiyun dev_err(udc->dev,
1928*4882a593Smuzhiyun "received SETUP packet with invalid link speed\n");
1929*4882a593Smuzhiyun return 0;
1930*4882a593Smuzhiyun }
1931*4882a593Smuzhiyun
1932*4882a593Smuzhiyun if (udc->gadget.speed != oldspeed) {
1933*4882a593Smuzhiyun dev_info(udc->dev, "link up, %s-speed mode\n",
1934*4882a593Smuzhiyun udc->gadget.speed == USB_SPEED_HIGH ? "high" : "full");
1935*4882a593Smuzhiyun return 1;
1936*4882a593Smuzhiyun } else {
1937*4882a593Smuzhiyun return 0;
1938*4882a593Smuzhiyun }
1939*4882a593Smuzhiyun }
1940*4882a593Smuzhiyun
1941*4882a593Smuzhiyun /**
1942*4882a593Smuzhiyun * bcm63xx_update_wedge - Iterate through wedged endpoints.
1943*4882a593Smuzhiyun * @udc: Reference to the device controller.
1944*4882a593Smuzhiyun * @new_status: true to "refresh" wedge status; false to clear it.
1945*4882a593Smuzhiyun *
1946*4882a593Smuzhiyun * On a SETUP interrupt, we need to manually "refresh" the wedge status
1947*4882a593Smuzhiyun * because the controller hardware is designed to automatically clear
1948*4882a593Smuzhiyun * stalls in response to a CLEAR_FEATURE request from the host.
1949*4882a593Smuzhiyun *
1950*4882a593Smuzhiyun * On a RESET interrupt, we do want to restore all wedged endpoints.
1951*4882a593Smuzhiyun */
bcm63xx_update_wedge(struct bcm63xx_udc * udc,bool new_status)1952*4882a593Smuzhiyun static void bcm63xx_update_wedge(struct bcm63xx_udc *udc, bool new_status)
1953*4882a593Smuzhiyun {
1954*4882a593Smuzhiyun int i;
1955*4882a593Smuzhiyun
1956*4882a593Smuzhiyun for_each_set_bit(i, &udc->wedgemap, BCM63XX_NUM_EP) {
1957*4882a593Smuzhiyun bcm63xx_set_stall(udc, &udc->bep[i], new_status);
1958*4882a593Smuzhiyun if (!new_status)
1959*4882a593Smuzhiyun clear_bit(i, &udc->wedgemap);
1960*4882a593Smuzhiyun }
1961*4882a593Smuzhiyun }
1962*4882a593Smuzhiyun
1963*4882a593Smuzhiyun /**
1964*4882a593Smuzhiyun * bcm63xx_udc_ctrl_isr - ISR for control path events (USBD).
1965*4882a593Smuzhiyun * @irq: IRQ number (unused).
1966*4882a593Smuzhiyun * @dev_id: Reference to the device controller.
1967*4882a593Smuzhiyun *
1968*4882a593Smuzhiyun * This is where we handle link (VBUS) down, USB reset, speed changes,
1969*4882a593Smuzhiyun * SET_CONFIGURATION, and SET_INTERFACE events.
1970*4882a593Smuzhiyun */
bcm63xx_udc_ctrl_isr(int irq,void * dev_id)1971*4882a593Smuzhiyun static irqreturn_t bcm63xx_udc_ctrl_isr(int irq, void *dev_id)
1972*4882a593Smuzhiyun {
1973*4882a593Smuzhiyun struct bcm63xx_udc *udc = dev_id;
1974*4882a593Smuzhiyun u32 stat;
1975*4882a593Smuzhiyun bool disconnected = false, bus_reset = false;
1976*4882a593Smuzhiyun
1977*4882a593Smuzhiyun stat = usbd_readl(udc, USBD_EVENT_IRQ_STATUS_REG) &
1978*4882a593Smuzhiyun usbd_readl(udc, USBD_EVENT_IRQ_MASK_REG);
1979*4882a593Smuzhiyun
1980*4882a593Smuzhiyun usbd_writel(udc, stat, USBD_EVENT_IRQ_STATUS_REG);
1981*4882a593Smuzhiyun
1982*4882a593Smuzhiyun spin_lock(&udc->lock);
1983*4882a593Smuzhiyun if (stat & BIT(USBD_EVENT_IRQ_USB_LINK)) {
1984*4882a593Smuzhiyun /* VBUS toggled */
1985*4882a593Smuzhiyun
1986*4882a593Smuzhiyun if (!(usbd_readl(udc, USBD_EVENTS_REG) &
1987*4882a593Smuzhiyun USBD_EVENTS_USB_LINK_MASK) &&
1988*4882a593Smuzhiyun udc->gadget.speed != USB_SPEED_UNKNOWN)
1989*4882a593Smuzhiyun dev_info(udc->dev, "link down\n");
1990*4882a593Smuzhiyun
1991*4882a593Smuzhiyun udc->gadget.speed = USB_SPEED_UNKNOWN;
1992*4882a593Smuzhiyun disconnected = true;
1993*4882a593Smuzhiyun }
1994*4882a593Smuzhiyun if (stat & BIT(USBD_EVENT_IRQ_USB_RESET)) {
1995*4882a593Smuzhiyun bcm63xx_fifo_setup(udc);
1996*4882a593Smuzhiyun bcm63xx_fifo_reset(udc);
1997*4882a593Smuzhiyun bcm63xx_ep_setup(udc);
1998*4882a593Smuzhiyun
1999*4882a593Smuzhiyun bcm63xx_update_wedge(udc, false);
2000*4882a593Smuzhiyun
2001*4882a593Smuzhiyun udc->ep0_req_reset = 1;
2002*4882a593Smuzhiyun schedule_work(&udc->ep0_wq);
2003*4882a593Smuzhiyun bus_reset = true;
2004*4882a593Smuzhiyun }
2005*4882a593Smuzhiyun if (stat & BIT(USBD_EVENT_IRQ_SETUP)) {
2006*4882a593Smuzhiyun if (bcm63xx_update_link_speed(udc)) {
2007*4882a593Smuzhiyun bcm63xx_fifo_setup(udc);
2008*4882a593Smuzhiyun bcm63xx_ep_setup(udc);
2009*4882a593Smuzhiyun }
2010*4882a593Smuzhiyun bcm63xx_update_wedge(udc, true);
2011*4882a593Smuzhiyun }
2012*4882a593Smuzhiyun if (stat & BIT(USBD_EVENT_IRQ_SETCFG)) {
2013*4882a593Smuzhiyun bcm63xx_update_cfg_iface(udc);
2014*4882a593Smuzhiyun udc->ep0_req_set_cfg = 1;
2015*4882a593Smuzhiyun schedule_work(&udc->ep0_wq);
2016*4882a593Smuzhiyun }
2017*4882a593Smuzhiyun if (stat & BIT(USBD_EVENT_IRQ_SETINTF)) {
2018*4882a593Smuzhiyun bcm63xx_update_cfg_iface(udc);
2019*4882a593Smuzhiyun udc->ep0_req_set_iface = 1;
2020*4882a593Smuzhiyun schedule_work(&udc->ep0_wq);
2021*4882a593Smuzhiyun }
2022*4882a593Smuzhiyun spin_unlock(&udc->lock);
2023*4882a593Smuzhiyun
2024*4882a593Smuzhiyun if (disconnected && udc->driver)
2025*4882a593Smuzhiyun udc->driver->disconnect(&udc->gadget);
2026*4882a593Smuzhiyun else if (bus_reset && udc->driver)
2027*4882a593Smuzhiyun usb_gadget_udc_reset(&udc->gadget, udc->driver);
2028*4882a593Smuzhiyun
2029*4882a593Smuzhiyun return IRQ_HANDLED;
2030*4882a593Smuzhiyun }
2031*4882a593Smuzhiyun
2032*4882a593Smuzhiyun /**
2033*4882a593Smuzhiyun * bcm63xx_udc_data_isr - ISR for data path events (IUDMA).
2034*4882a593Smuzhiyun * @irq: IRQ number (unused).
2035*4882a593Smuzhiyun * @dev_id: Reference to the IUDMA channel that generated the interrupt.
2036*4882a593Smuzhiyun *
2037*4882a593Smuzhiyun * For the two ep0 channels, we have special handling that triggers the
2038*4882a593Smuzhiyun * ep0 worker thread. For normal bulk/intr channels, either queue up
2039*4882a593Smuzhiyun * the next buffer descriptor for the transaction (incomplete transaction),
2040*4882a593Smuzhiyun * or invoke the completion callback (complete transactions).
2041*4882a593Smuzhiyun */
bcm63xx_udc_data_isr(int irq,void * dev_id)2042*4882a593Smuzhiyun static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
2043*4882a593Smuzhiyun {
2044*4882a593Smuzhiyun struct iudma_ch *iudma = dev_id;
2045*4882a593Smuzhiyun struct bcm63xx_udc *udc = iudma->udc;
2046*4882a593Smuzhiyun struct bcm63xx_ep *bep;
2047*4882a593Smuzhiyun struct usb_request *req = NULL;
2048*4882a593Smuzhiyun struct bcm63xx_req *breq = NULL;
2049*4882a593Smuzhiyun int rc;
2050*4882a593Smuzhiyun bool is_done = false;
2051*4882a593Smuzhiyun
2052*4882a593Smuzhiyun spin_lock(&udc->lock);
2053*4882a593Smuzhiyun
2054*4882a593Smuzhiyun usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
2055*4882a593Smuzhiyun ENETDMAC_IR_REG, iudma->ch_idx);
2056*4882a593Smuzhiyun bep = iudma->bep;
2057*4882a593Smuzhiyun rc = iudma_read(udc, iudma);
2058*4882a593Smuzhiyun
2059*4882a593Smuzhiyun /* special handling for EP0 RX (0) and TX (1) */
2060*4882a593Smuzhiyun if (iudma->ch_idx == IUDMA_EP0_RXCHAN ||
2061*4882a593Smuzhiyun iudma->ch_idx == IUDMA_EP0_TXCHAN) {
2062*4882a593Smuzhiyun req = udc->ep0_request;
2063*4882a593Smuzhiyun breq = our_req(req);
2064*4882a593Smuzhiyun
2065*4882a593Smuzhiyun /* a single request could require multiple submissions */
2066*4882a593Smuzhiyun if (rc >= 0) {
2067*4882a593Smuzhiyun req->actual += rc;
2068*4882a593Smuzhiyun
2069*4882a593Smuzhiyun if (req->actual >= req->length || breq->bd_bytes > rc) {
2070*4882a593Smuzhiyun udc->ep0_req_completed = 1;
2071*4882a593Smuzhiyun is_done = true;
2072*4882a593Smuzhiyun schedule_work(&udc->ep0_wq);
2073*4882a593Smuzhiyun
2074*4882a593Smuzhiyun /* "actual" on a ZLP is 1 byte */
2075*4882a593Smuzhiyun req->actual = min(req->actual, req->length);
2076*4882a593Smuzhiyun } else {
2077*4882a593Smuzhiyun /* queue up the next BD (same request) */
2078*4882a593Smuzhiyun iudma_write(udc, iudma, breq);
2079*4882a593Smuzhiyun }
2080*4882a593Smuzhiyun }
2081*4882a593Smuzhiyun } else if (!list_empty(&bep->queue)) {
2082*4882a593Smuzhiyun breq = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
2083*4882a593Smuzhiyun req = &breq->req;
2084*4882a593Smuzhiyun
2085*4882a593Smuzhiyun if (rc >= 0) {
2086*4882a593Smuzhiyun req->actual += rc;
2087*4882a593Smuzhiyun
2088*4882a593Smuzhiyun if (req->actual >= req->length || breq->bd_bytes > rc) {
2089*4882a593Smuzhiyun is_done = true;
2090*4882a593Smuzhiyun list_del(&breq->queue);
2091*4882a593Smuzhiyun
2092*4882a593Smuzhiyun req->actual = min(req->actual, req->length);
2093*4882a593Smuzhiyun
2094*4882a593Smuzhiyun if (!list_empty(&bep->queue)) {
2095*4882a593Smuzhiyun struct bcm63xx_req *next;
2096*4882a593Smuzhiyun
2097*4882a593Smuzhiyun next = list_first_entry(&bep->queue,
2098*4882a593Smuzhiyun struct bcm63xx_req, queue);
2099*4882a593Smuzhiyun iudma_write(udc, iudma, next);
2100*4882a593Smuzhiyun }
2101*4882a593Smuzhiyun } else {
2102*4882a593Smuzhiyun iudma_write(udc, iudma, breq);
2103*4882a593Smuzhiyun }
2104*4882a593Smuzhiyun }
2105*4882a593Smuzhiyun }
2106*4882a593Smuzhiyun spin_unlock(&udc->lock);
2107*4882a593Smuzhiyun
2108*4882a593Smuzhiyun if (is_done) {
2109*4882a593Smuzhiyun usb_gadget_unmap_request(&udc->gadget, req, iudma->is_tx);
2110*4882a593Smuzhiyun if (req->complete)
2111*4882a593Smuzhiyun req->complete(&bep->ep, req);
2112*4882a593Smuzhiyun }
2113*4882a593Smuzhiyun
2114*4882a593Smuzhiyun return IRQ_HANDLED;
2115*4882a593Smuzhiyun }
2116*4882a593Smuzhiyun
2117*4882a593Smuzhiyun /***********************************************************************
2118*4882a593Smuzhiyun * Debug filesystem
2119*4882a593Smuzhiyun ***********************************************************************/
2120*4882a593Smuzhiyun
2121*4882a593Smuzhiyun /*
2122*4882a593Smuzhiyun * bcm63xx_usbd_dbg_show - Show USBD controller state.
2123*4882a593Smuzhiyun * @s: seq_file to which the information will be written.
2124*4882a593Smuzhiyun * @p: Unused.
2125*4882a593Smuzhiyun *
2126*4882a593Smuzhiyun * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/usbd
2127*4882a593Smuzhiyun */
bcm63xx_usbd_dbg_show(struct seq_file * s,void * p)2128*4882a593Smuzhiyun static int bcm63xx_usbd_dbg_show(struct seq_file *s, void *p)
2129*4882a593Smuzhiyun {
2130*4882a593Smuzhiyun struct bcm63xx_udc *udc = s->private;
2131*4882a593Smuzhiyun
2132*4882a593Smuzhiyun if (!udc->driver)
2133*4882a593Smuzhiyun return -ENODEV;
2134*4882a593Smuzhiyun
2135*4882a593Smuzhiyun seq_printf(s, "ep0 state: %s\n",
2136*4882a593Smuzhiyun bcm63xx_ep0_state_names[udc->ep0state]);
2137*4882a593Smuzhiyun seq_printf(s, " pending requests: %s%s%s%s%s%s%s\n",
2138*4882a593Smuzhiyun udc->ep0_req_reset ? "reset " : "",
2139*4882a593Smuzhiyun udc->ep0_req_set_cfg ? "set_cfg " : "",
2140*4882a593Smuzhiyun udc->ep0_req_set_iface ? "set_iface " : "",
2141*4882a593Smuzhiyun udc->ep0_req_shutdown ? "shutdown " : "",
2142*4882a593Smuzhiyun udc->ep0_request ? "pending " : "",
2143*4882a593Smuzhiyun udc->ep0_req_completed ? "completed " : "",
2144*4882a593Smuzhiyun udc->ep0_reply ? "reply " : "");
2145*4882a593Smuzhiyun seq_printf(s, "cfg: %d; iface: %d; alt_iface: %d\n",
2146*4882a593Smuzhiyun udc->cfg, udc->iface, udc->alt_iface);
2147*4882a593Smuzhiyun seq_printf(s, "regs:\n");
2148*4882a593Smuzhiyun seq_printf(s, " control: %08x; straps: %08x; status: %08x\n",
2149*4882a593Smuzhiyun usbd_readl(udc, USBD_CONTROL_REG),
2150*4882a593Smuzhiyun usbd_readl(udc, USBD_STRAPS_REG),
2151*4882a593Smuzhiyun usbd_readl(udc, USBD_STATUS_REG));
2152*4882a593Smuzhiyun seq_printf(s, " events: %08x; stall: %08x\n",
2153*4882a593Smuzhiyun usbd_readl(udc, USBD_EVENTS_REG),
2154*4882a593Smuzhiyun usbd_readl(udc, USBD_STALL_REG));
2155*4882a593Smuzhiyun
2156*4882a593Smuzhiyun return 0;
2157*4882a593Smuzhiyun }
2158*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(bcm63xx_usbd_dbg);
2159*4882a593Smuzhiyun
2160*4882a593Smuzhiyun /*
2161*4882a593Smuzhiyun * bcm63xx_iudma_dbg_show - Show IUDMA status and descriptors.
2162*4882a593Smuzhiyun * @s: seq_file to which the information will be written.
2163*4882a593Smuzhiyun * @p: Unused.
2164*4882a593Smuzhiyun *
2165*4882a593Smuzhiyun * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/iudma
2166*4882a593Smuzhiyun */
bcm63xx_iudma_dbg_show(struct seq_file * s,void * p)2167*4882a593Smuzhiyun static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
2168*4882a593Smuzhiyun {
2169*4882a593Smuzhiyun struct bcm63xx_udc *udc = s->private;
2170*4882a593Smuzhiyun int ch_idx, i;
2171*4882a593Smuzhiyun u32 sram2, sram3;
2172*4882a593Smuzhiyun
2173*4882a593Smuzhiyun if (!udc->driver)
2174*4882a593Smuzhiyun return -ENODEV;
2175*4882a593Smuzhiyun
2176*4882a593Smuzhiyun for (ch_idx = 0; ch_idx < BCM63XX_NUM_IUDMA; ch_idx++) {
2177*4882a593Smuzhiyun struct iudma_ch *iudma = &udc->iudma[ch_idx];
2178*4882a593Smuzhiyun struct list_head *pos;
2179*4882a593Smuzhiyun
2180*4882a593Smuzhiyun seq_printf(s, "IUDMA channel %d -- ", ch_idx);
2181*4882a593Smuzhiyun switch (iudma_defaults[ch_idx].ep_type) {
2182*4882a593Smuzhiyun case BCMEP_CTRL:
2183*4882a593Smuzhiyun seq_printf(s, "control");
2184*4882a593Smuzhiyun break;
2185*4882a593Smuzhiyun case BCMEP_BULK:
2186*4882a593Smuzhiyun seq_printf(s, "bulk");
2187*4882a593Smuzhiyun break;
2188*4882a593Smuzhiyun case BCMEP_INTR:
2189*4882a593Smuzhiyun seq_printf(s, "interrupt");
2190*4882a593Smuzhiyun break;
2191*4882a593Smuzhiyun }
2192*4882a593Smuzhiyun seq_printf(s, ch_idx & 0x01 ? " tx" : " rx");
2193*4882a593Smuzhiyun seq_printf(s, " [ep%d]:\n",
2194*4882a593Smuzhiyun max_t(int, iudma_defaults[ch_idx].ep_num, 0));
2195*4882a593Smuzhiyun seq_printf(s, " cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
2196*4882a593Smuzhiyun usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx),
2197*4882a593Smuzhiyun usb_dmac_readl(udc, ENETDMAC_IR_REG, ch_idx),
2198*4882a593Smuzhiyun usb_dmac_readl(udc, ENETDMAC_IRMASK_REG, ch_idx),
2199*4882a593Smuzhiyun usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG, ch_idx));
2200*4882a593Smuzhiyun
2201*4882a593Smuzhiyun sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG, ch_idx);
2202*4882a593Smuzhiyun sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG, ch_idx);
2203*4882a593Smuzhiyun seq_printf(s, " base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
2204*4882a593Smuzhiyun usb_dmas_readl(udc, ENETDMAS_RSTART_REG, ch_idx),
2205*4882a593Smuzhiyun sram2 >> 16, sram2 & 0xffff,
2206*4882a593Smuzhiyun sram3 >> 16, sram3 & 0xffff,
2207*4882a593Smuzhiyun usb_dmas_readl(udc, ENETDMAS_SRAM4_REG, ch_idx));
2208*4882a593Smuzhiyun seq_printf(s, " desc: %d/%d used", iudma->n_bds_used,
2209*4882a593Smuzhiyun iudma->n_bds);
2210*4882a593Smuzhiyun
2211*4882a593Smuzhiyun if (iudma->bep) {
2212*4882a593Smuzhiyun i = 0;
2213*4882a593Smuzhiyun list_for_each(pos, &iudma->bep->queue)
2214*4882a593Smuzhiyun i++;
2215*4882a593Smuzhiyun seq_printf(s, "; %d queued\n", i);
2216*4882a593Smuzhiyun } else {
2217*4882a593Smuzhiyun seq_printf(s, "\n");
2218*4882a593Smuzhiyun }
2219*4882a593Smuzhiyun
2220*4882a593Smuzhiyun for (i = 0; i < iudma->n_bds; i++) {
2221*4882a593Smuzhiyun struct bcm_enet_desc *d = &iudma->bd_ring[i];
2222*4882a593Smuzhiyun
2223*4882a593Smuzhiyun seq_printf(s, " %03x (%02x): len_stat: %04x_%04x; pa %08x",
2224*4882a593Smuzhiyun i * sizeof(*d), i,
2225*4882a593Smuzhiyun d->len_stat >> 16, d->len_stat & 0xffff,
2226*4882a593Smuzhiyun d->address);
2227*4882a593Smuzhiyun if (d == iudma->read_bd)
2228*4882a593Smuzhiyun seq_printf(s, " <<RD");
2229*4882a593Smuzhiyun if (d == iudma->write_bd)
2230*4882a593Smuzhiyun seq_printf(s, " <<WR");
2231*4882a593Smuzhiyun seq_printf(s, "\n");
2232*4882a593Smuzhiyun }
2233*4882a593Smuzhiyun
2234*4882a593Smuzhiyun seq_printf(s, "\n");
2235*4882a593Smuzhiyun }
2236*4882a593Smuzhiyun
2237*4882a593Smuzhiyun return 0;
2238*4882a593Smuzhiyun }
2239*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(bcm63xx_iudma_dbg);
2240*4882a593Smuzhiyun
2241*4882a593Smuzhiyun /**
2242*4882a593Smuzhiyun * bcm63xx_udc_init_debugfs - Create debugfs entries.
2243*4882a593Smuzhiyun * @udc: Reference to the device controller.
2244*4882a593Smuzhiyun */
bcm63xx_udc_init_debugfs(struct bcm63xx_udc * udc)2245*4882a593Smuzhiyun static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
2246*4882a593Smuzhiyun {
2247*4882a593Smuzhiyun struct dentry *root;
2248*4882a593Smuzhiyun
2249*4882a593Smuzhiyun if (!IS_ENABLED(CONFIG_USB_GADGET_DEBUG_FS))
2250*4882a593Smuzhiyun return;
2251*4882a593Smuzhiyun
2252*4882a593Smuzhiyun root = debugfs_create_dir(udc->gadget.name, usb_debug_root);
2253*4882a593Smuzhiyun udc->debugfs_root = root;
2254*4882a593Smuzhiyun
2255*4882a593Smuzhiyun debugfs_create_file("usbd", 0400, root, udc, &bcm63xx_usbd_dbg_fops);
2256*4882a593Smuzhiyun debugfs_create_file("iudma", 0400, root, udc, &bcm63xx_iudma_dbg_fops);
2257*4882a593Smuzhiyun }
2258*4882a593Smuzhiyun
2259*4882a593Smuzhiyun /**
2260*4882a593Smuzhiyun * bcm63xx_udc_cleanup_debugfs - Remove debugfs entries.
2261*4882a593Smuzhiyun * @udc: Reference to the device controller.
2262*4882a593Smuzhiyun *
2263*4882a593Smuzhiyun * debugfs_remove() is safe to call with a NULL argument.
2264*4882a593Smuzhiyun */
bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc * udc)2265*4882a593Smuzhiyun static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc)
2266*4882a593Smuzhiyun {
2267*4882a593Smuzhiyun debugfs_remove_recursive(udc->debugfs_root);
2268*4882a593Smuzhiyun }
2269*4882a593Smuzhiyun
2270*4882a593Smuzhiyun /***********************************************************************
2271*4882a593Smuzhiyun * Driver init/exit
2272*4882a593Smuzhiyun ***********************************************************************/
2273*4882a593Smuzhiyun
2274*4882a593Smuzhiyun /**
2275*4882a593Smuzhiyun * bcm63xx_udc_probe - Initialize a new instance of the UDC.
2276*4882a593Smuzhiyun * @pdev: Platform device struct from the bcm63xx BSP code.
2277*4882a593Smuzhiyun *
2278*4882a593Smuzhiyun * Note that platform data is required, because pd.port_no varies from chip
2279*4882a593Smuzhiyun * to chip and is used to switch the correct USB port to device mode.
2280*4882a593Smuzhiyun */
bcm63xx_udc_probe(struct platform_device * pdev)2281*4882a593Smuzhiyun static int bcm63xx_udc_probe(struct platform_device *pdev)
2282*4882a593Smuzhiyun {
2283*4882a593Smuzhiyun struct device *dev = &pdev->dev;
2284*4882a593Smuzhiyun struct bcm63xx_usbd_platform_data *pd = dev_get_platdata(dev);
2285*4882a593Smuzhiyun struct bcm63xx_udc *udc;
2286*4882a593Smuzhiyun int rc = -ENOMEM, i, irq;
2287*4882a593Smuzhiyun
2288*4882a593Smuzhiyun udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
2289*4882a593Smuzhiyun if (!udc)
2290*4882a593Smuzhiyun return -ENOMEM;
2291*4882a593Smuzhiyun
2292*4882a593Smuzhiyun platform_set_drvdata(pdev, udc);
2293*4882a593Smuzhiyun udc->dev = dev;
2294*4882a593Smuzhiyun udc->pd = pd;
2295*4882a593Smuzhiyun
2296*4882a593Smuzhiyun if (!pd) {
2297*4882a593Smuzhiyun dev_err(dev, "missing platform data\n");
2298*4882a593Smuzhiyun return -EINVAL;
2299*4882a593Smuzhiyun }
2300*4882a593Smuzhiyun
2301*4882a593Smuzhiyun udc->usbd_regs = devm_platform_ioremap_resource(pdev, 0);
2302*4882a593Smuzhiyun if (IS_ERR(udc->usbd_regs))
2303*4882a593Smuzhiyun return PTR_ERR(udc->usbd_regs);
2304*4882a593Smuzhiyun
2305*4882a593Smuzhiyun udc->iudma_regs = devm_platform_ioremap_resource(pdev, 1);
2306*4882a593Smuzhiyun if (IS_ERR(udc->iudma_regs))
2307*4882a593Smuzhiyun return PTR_ERR(udc->iudma_regs);
2308*4882a593Smuzhiyun
2309*4882a593Smuzhiyun spin_lock_init(&udc->lock);
2310*4882a593Smuzhiyun INIT_WORK(&udc->ep0_wq, bcm63xx_ep0_process);
2311*4882a593Smuzhiyun
2312*4882a593Smuzhiyun udc->gadget.ops = &bcm63xx_udc_ops;
2313*4882a593Smuzhiyun udc->gadget.name = dev_name(dev);
2314*4882a593Smuzhiyun
2315*4882a593Smuzhiyun if (!pd->use_fullspeed && !use_fullspeed)
2316*4882a593Smuzhiyun udc->gadget.max_speed = USB_SPEED_HIGH;
2317*4882a593Smuzhiyun else
2318*4882a593Smuzhiyun udc->gadget.max_speed = USB_SPEED_FULL;
2319*4882a593Smuzhiyun
2320*4882a593Smuzhiyun /* request clocks, allocate buffers, and clear any pending IRQs */
2321*4882a593Smuzhiyun rc = bcm63xx_init_udc_hw(udc);
2322*4882a593Smuzhiyun if (rc)
2323*4882a593Smuzhiyun return rc;
2324*4882a593Smuzhiyun
2325*4882a593Smuzhiyun rc = -ENXIO;
2326*4882a593Smuzhiyun
2327*4882a593Smuzhiyun /* IRQ resource #0: control interrupt (VBUS, speed, etc.) */
2328*4882a593Smuzhiyun irq = platform_get_irq(pdev, 0);
2329*4882a593Smuzhiyun if (irq < 0)
2330*4882a593Smuzhiyun goto out_uninit;
2331*4882a593Smuzhiyun if (devm_request_irq(dev, irq, &bcm63xx_udc_ctrl_isr, 0,
2332*4882a593Smuzhiyun dev_name(dev), udc) < 0)
2333*4882a593Smuzhiyun goto report_request_failure;
2334*4882a593Smuzhiyun
2335*4882a593Smuzhiyun /* IRQ resources #1-6: data interrupts for IUDMA channels 0-5 */
2336*4882a593Smuzhiyun for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
2337*4882a593Smuzhiyun irq = platform_get_irq(pdev, i + 1);
2338*4882a593Smuzhiyun if (irq < 0)
2339*4882a593Smuzhiyun goto out_uninit;
2340*4882a593Smuzhiyun if (devm_request_irq(dev, irq, &bcm63xx_udc_data_isr, 0,
2341*4882a593Smuzhiyun dev_name(dev), &udc->iudma[i]) < 0)
2342*4882a593Smuzhiyun goto report_request_failure;
2343*4882a593Smuzhiyun }
2344*4882a593Smuzhiyun
2345*4882a593Smuzhiyun bcm63xx_udc_init_debugfs(udc);
2346*4882a593Smuzhiyun rc = usb_add_gadget_udc(dev, &udc->gadget);
2347*4882a593Smuzhiyun if (!rc)
2348*4882a593Smuzhiyun return 0;
2349*4882a593Smuzhiyun
2350*4882a593Smuzhiyun bcm63xx_udc_cleanup_debugfs(udc);
2351*4882a593Smuzhiyun out_uninit:
2352*4882a593Smuzhiyun bcm63xx_uninit_udc_hw(udc);
2353*4882a593Smuzhiyun return rc;
2354*4882a593Smuzhiyun
2355*4882a593Smuzhiyun report_request_failure:
2356*4882a593Smuzhiyun dev_err(dev, "error requesting IRQ #%d\n", irq);
2357*4882a593Smuzhiyun goto out_uninit;
2358*4882a593Smuzhiyun }
2359*4882a593Smuzhiyun
2360*4882a593Smuzhiyun /**
2361*4882a593Smuzhiyun * bcm63xx_udc_remove - Remove the device from the system.
2362*4882a593Smuzhiyun * @pdev: Platform device struct from the bcm63xx BSP code.
2363*4882a593Smuzhiyun */
bcm63xx_udc_remove(struct platform_device * pdev)2364*4882a593Smuzhiyun static int bcm63xx_udc_remove(struct platform_device *pdev)
2365*4882a593Smuzhiyun {
2366*4882a593Smuzhiyun struct bcm63xx_udc *udc = platform_get_drvdata(pdev);
2367*4882a593Smuzhiyun
2368*4882a593Smuzhiyun bcm63xx_udc_cleanup_debugfs(udc);
2369*4882a593Smuzhiyun usb_del_gadget_udc(&udc->gadget);
2370*4882a593Smuzhiyun BUG_ON(udc->driver);
2371*4882a593Smuzhiyun
2372*4882a593Smuzhiyun bcm63xx_uninit_udc_hw(udc);
2373*4882a593Smuzhiyun
2374*4882a593Smuzhiyun return 0;
2375*4882a593Smuzhiyun }
2376*4882a593Smuzhiyun
2377*4882a593Smuzhiyun static struct platform_driver bcm63xx_udc_driver = {
2378*4882a593Smuzhiyun .probe = bcm63xx_udc_probe,
2379*4882a593Smuzhiyun .remove = bcm63xx_udc_remove,
2380*4882a593Smuzhiyun .driver = {
2381*4882a593Smuzhiyun .name = DRV_MODULE_NAME,
2382*4882a593Smuzhiyun },
2383*4882a593Smuzhiyun };
2384*4882a593Smuzhiyun module_platform_driver(bcm63xx_udc_driver);
2385*4882a593Smuzhiyun
2386*4882a593Smuzhiyun MODULE_DESCRIPTION("BCM63xx USB Peripheral Controller");
2387*4882a593Smuzhiyun MODULE_AUTHOR("Kevin Cernekee <cernekee@gmail.com>");
2388*4882a593Smuzhiyun MODULE_LICENSE("GPL");
2389*4882a593Smuzhiyun MODULE_ALIAS("platform:" DRV_MODULE_NAME);
2390