xref: /OK3568_Linux_fs/kernel/drivers/usb/chipidea/udc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * udc.c - ChipIdea UDC driver
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Author: David Lopo
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/delay.h>
11*4882a593Smuzhiyun #include <linux/device.h>
12*4882a593Smuzhiyun #include <linux/dmapool.h>
13*4882a593Smuzhiyun #include <linux/err.h>
14*4882a593Smuzhiyun #include <linux/irqreturn.h>
15*4882a593Smuzhiyun #include <linux/kernel.h>
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun #include <linux/pm_runtime.h>
18*4882a593Smuzhiyun #include <linux/pinctrl/consumer.h>
19*4882a593Smuzhiyun #include <linux/usb/ch9.h>
20*4882a593Smuzhiyun #include <linux/usb/gadget.h>
21*4882a593Smuzhiyun #include <linux/usb/otg-fsm.h>
22*4882a593Smuzhiyun #include <linux/usb/chipidea.h>
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #include "ci.h"
25*4882a593Smuzhiyun #include "udc.h"
26*4882a593Smuzhiyun #include "bits.h"
27*4882a593Smuzhiyun #include "otg.h"
28*4882a593Smuzhiyun #include "otg_fsm.h"
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun /* control endpoint description */
31*4882a593Smuzhiyun static const struct usb_endpoint_descriptor
32*4882a593Smuzhiyun ctrl_endpt_out_desc = {
33*4882a593Smuzhiyun 	.bLength         = USB_DT_ENDPOINT_SIZE,
34*4882a593Smuzhiyun 	.bDescriptorType = USB_DT_ENDPOINT,
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	.bEndpointAddress = USB_DIR_OUT,
37*4882a593Smuzhiyun 	.bmAttributes    = USB_ENDPOINT_XFER_CONTROL,
38*4882a593Smuzhiyun 	.wMaxPacketSize  = cpu_to_le16(CTRL_PAYLOAD_MAX),
39*4882a593Smuzhiyun };
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun static const struct usb_endpoint_descriptor
42*4882a593Smuzhiyun ctrl_endpt_in_desc = {
43*4882a593Smuzhiyun 	.bLength         = USB_DT_ENDPOINT_SIZE,
44*4882a593Smuzhiyun 	.bDescriptorType = USB_DT_ENDPOINT,
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	.bEndpointAddress = USB_DIR_IN,
47*4882a593Smuzhiyun 	.bmAttributes    = USB_ENDPOINT_XFER_CONTROL,
48*4882a593Smuzhiyun 	.wMaxPacketSize  = cpu_to_le16(CTRL_PAYLOAD_MAX),
49*4882a593Smuzhiyun };
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun /**
52*4882a593Smuzhiyun  * hw_ep_bit: calculates the bit number
53*4882a593Smuzhiyun  * @num: endpoint number
54*4882a593Smuzhiyun  * @dir: endpoint direction
55*4882a593Smuzhiyun  *
56*4882a593Smuzhiyun  * This function returns bit number
57*4882a593Smuzhiyun  */
hw_ep_bit(int num,int dir)58*4882a593Smuzhiyun static inline int hw_ep_bit(int num, int dir)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	return num + ((dir == TX) ? 16 : 0);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun 
ep_to_bit(struct ci_hdrc * ci,int n)63*4882a593Smuzhiyun static inline int ep_to_bit(struct ci_hdrc *ci, int n)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun 	int fill = 16 - ci->hw_ep_max / 2;
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	if (n >= ci->hw_ep_max / 2)
68*4882a593Smuzhiyun 		n += fill;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	return n;
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun /**
74*4882a593Smuzhiyun  * hw_device_state: enables/disables interrupts (execute without interruption)
75*4882a593Smuzhiyun  * @ci: the controller
76*4882a593Smuzhiyun  * @dma: 0 => disable, !0 => enable and set dma engine
77*4882a593Smuzhiyun  *
78*4882a593Smuzhiyun  * This function returns an error code
79*4882a593Smuzhiyun  */
hw_device_state(struct ci_hdrc * ci,u32 dma)80*4882a593Smuzhiyun static int hw_device_state(struct ci_hdrc *ci, u32 dma)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun 	if (dma) {
83*4882a593Smuzhiyun 		hw_write(ci, OP_ENDPTLISTADDR, ~0, dma);
84*4882a593Smuzhiyun 		/* interrupt, error, port change, reset, sleep/suspend */
85*4882a593Smuzhiyun 		hw_write(ci, OP_USBINTR, ~0,
86*4882a593Smuzhiyun 			     USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI);
87*4882a593Smuzhiyun 	} else {
88*4882a593Smuzhiyun 		hw_write(ci, OP_USBINTR, ~0, 0);
89*4882a593Smuzhiyun 	}
90*4882a593Smuzhiyun 	return 0;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun /**
94*4882a593Smuzhiyun  * hw_ep_flush: flush endpoint fifo (execute without interruption)
95*4882a593Smuzhiyun  * @ci: the controller
96*4882a593Smuzhiyun  * @num: endpoint number
97*4882a593Smuzhiyun  * @dir: endpoint direction
98*4882a593Smuzhiyun  *
99*4882a593Smuzhiyun  * This function returns an error code
100*4882a593Smuzhiyun  */
hw_ep_flush(struct ci_hdrc * ci,int num,int dir)101*4882a593Smuzhiyun static int hw_ep_flush(struct ci_hdrc *ci, int num, int dir)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun 	int n = hw_ep_bit(num, dir);
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	do {
106*4882a593Smuzhiyun 		/* flush any pending transfer */
107*4882a593Smuzhiyun 		hw_write(ci, OP_ENDPTFLUSH, ~0, BIT(n));
108*4882a593Smuzhiyun 		while (hw_read(ci, OP_ENDPTFLUSH, BIT(n)))
109*4882a593Smuzhiyun 			cpu_relax();
110*4882a593Smuzhiyun 	} while (hw_read(ci, OP_ENDPTSTAT, BIT(n)));
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	return 0;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun /**
116*4882a593Smuzhiyun  * hw_ep_disable: disables endpoint (execute without interruption)
117*4882a593Smuzhiyun  * @ci: the controller
118*4882a593Smuzhiyun  * @num: endpoint number
119*4882a593Smuzhiyun  * @dir: endpoint direction
120*4882a593Smuzhiyun  *
121*4882a593Smuzhiyun  * This function returns an error code
122*4882a593Smuzhiyun  */
hw_ep_disable(struct ci_hdrc * ci,int num,int dir)123*4882a593Smuzhiyun static int hw_ep_disable(struct ci_hdrc *ci, int num, int dir)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	hw_write(ci, OP_ENDPTCTRL + num,
126*4882a593Smuzhiyun 		 (dir == TX) ? ENDPTCTRL_TXE : ENDPTCTRL_RXE, 0);
127*4882a593Smuzhiyun 	return 0;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun /**
131*4882a593Smuzhiyun  * hw_ep_enable: enables endpoint (execute without interruption)
132*4882a593Smuzhiyun  * @ci: the controller
133*4882a593Smuzhiyun  * @num:  endpoint number
134*4882a593Smuzhiyun  * @dir:  endpoint direction
135*4882a593Smuzhiyun  * @type: endpoint type
136*4882a593Smuzhiyun  *
137*4882a593Smuzhiyun  * This function returns an error code
138*4882a593Smuzhiyun  */
hw_ep_enable(struct ci_hdrc * ci,int num,int dir,int type)139*4882a593Smuzhiyun static int hw_ep_enable(struct ci_hdrc *ci, int num, int dir, int type)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun 	u32 mask, data;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	if (dir == TX) {
144*4882a593Smuzhiyun 		mask  = ENDPTCTRL_TXT;  /* type    */
145*4882a593Smuzhiyun 		data  = type << __ffs(mask);
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 		mask |= ENDPTCTRL_TXS;  /* unstall */
148*4882a593Smuzhiyun 		mask |= ENDPTCTRL_TXR;  /* reset data toggle */
149*4882a593Smuzhiyun 		data |= ENDPTCTRL_TXR;
150*4882a593Smuzhiyun 		mask |= ENDPTCTRL_TXE;  /* enable  */
151*4882a593Smuzhiyun 		data |= ENDPTCTRL_TXE;
152*4882a593Smuzhiyun 	} else {
153*4882a593Smuzhiyun 		mask  = ENDPTCTRL_RXT;  /* type    */
154*4882a593Smuzhiyun 		data  = type << __ffs(mask);
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 		mask |= ENDPTCTRL_RXS;  /* unstall */
157*4882a593Smuzhiyun 		mask |= ENDPTCTRL_RXR;  /* reset data toggle */
158*4882a593Smuzhiyun 		data |= ENDPTCTRL_RXR;
159*4882a593Smuzhiyun 		mask |= ENDPTCTRL_RXE;  /* enable  */
160*4882a593Smuzhiyun 		data |= ENDPTCTRL_RXE;
161*4882a593Smuzhiyun 	}
162*4882a593Smuzhiyun 	hw_write(ci, OP_ENDPTCTRL + num, mask, data);
163*4882a593Smuzhiyun 	return 0;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun /**
167*4882a593Smuzhiyun  * hw_ep_get_halt: return endpoint halt status
168*4882a593Smuzhiyun  * @ci: the controller
169*4882a593Smuzhiyun  * @num: endpoint number
170*4882a593Smuzhiyun  * @dir: endpoint direction
171*4882a593Smuzhiyun  *
172*4882a593Smuzhiyun  * This function returns 1 if endpoint halted
173*4882a593Smuzhiyun  */
hw_ep_get_halt(struct ci_hdrc * ci,int num,int dir)174*4882a593Smuzhiyun static int hw_ep_get_halt(struct ci_hdrc *ci, int num, int dir)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	u32 mask = (dir == TX) ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	return hw_read(ci, OP_ENDPTCTRL + num, mask) ? 1 : 0;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun /**
182*4882a593Smuzhiyun  * hw_ep_prime: primes endpoint (execute without interruption)
183*4882a593Smuzhiyun  * @ci: the controller
184*4882a593Smuzhiyun  * @num:     endpoint number
185*4882a593Smuzhiyun  * @dir:     endpoint direction
186*4882a593Smuzhiyun  * @is_ctrl: true if control endpoint
187*4882a593Smuzhiyun  *
188*4882a593Smuzhiyun  * This function returns an error code
189*4882a593Smuzhiyun  */
hw_ep_prime(struct ci_hdrc * ci,int num,int dir,int is_ctrl)190*4882a593Smuzhiyun static int hw_ep_prime(struct ci_hdrc *ci, int num, int dir, int is_ctrl)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun 	int n = hw_ep_bit(num, dir);
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	/* Synchronize before ep prime */
195*4882a593Smuzhiyun 	wmb();
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num)))
198*4882a593Smuzhiyun 		return -EAGAIN;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	hw_write(ci, OP_ENDPTPRIME, ~0, BIT(n));
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	while (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
203*4882a593Smuzhiyun 		cpu_relax();
204*4882a593Smuzhiyun 	if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num)))
205*4882a593Smuzhiyun 		return -EAGAIN;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	/* status shoult be tested according with manual but it doesn't work */
208*4882a593Smuzhiyun 	return 0;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun /**
212*4882a593Smuzhiyun  * hw_ep_set_halt: configures ep halt & resets data toggle after clear (execute
213*4882a593Smuzhiyun  *                 without interruption)
214*4882a593Smuzhiyun  * @ci: the controller
215*4882a593Smuzhiyun  * @num:   endpoint number
216*4882a593Smuzhiyun  * @dir:   endpoint direction
217*4882a593Smuzhiyun  * @value: true => stall, false => unstall
218*4882a593Smuzhiyun  *
219*4882a593Smuzhiyun  * This function returns an error code
220*4882a593Smuzhiyun  */
hw_ep_set_halt(struct ci_hdrc * ci,int num,int dir,int value)221*4882a593Smuzhiyun static int hw_ep_set_halt(struct ci_hdrc *ci, int num, int dir, int value)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun 	if (value != 0 && value != 1)
224*4882a593Smuzhiyun 		return -EINVAL;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	do {
227*4882a593Smuzhiyun 		enum ci_hw_regs reg = OP_ENDPTCTRL + num;
228*4882a593Smuzhiyun 		u32 mask_xs = (dir == TX) ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
229*4882a593Smuzhiyun 		u32 mask_xr = (dir == TX) ? ENDPTCTRL_TXR : ENDPTCTRL_RXR;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 		/* data toggle - reserved for EP0 but it's in ESS */
232*4882a593Smuzhiyun 		hw_write(ci, reg, mask_xs|mask_xr,
233*4882a593Smuzhiyun 			  value ? mask_xs : mask_xr);
234*4882a593Smuzhiyun 	} while (value != hw_ep_get_halt(ci, num, dir));
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	return 0;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun /**
240*4882a593Smuzhiyun  * hw_is_port_high_speed: test if port is high speed
241*4882a593Smuzhiyun  * @ci: the controller
242*4882a593Smuzhiyun  *
243*4882a593Smuzhiyun  * This function returns true if high speed port
244*4882a593Smuzhiyun  */
hw_port_is_high_speed(struct ci_hdrc * ci)245*4882a593Smuzhiyun static int hw_port_is_high_speed(struct ci_hdrc *ci)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun 	return ci->hw_bank.lpm ? hw_read(ci, OP_DEVLC, DEVLC_PSPD) :
248*4882a593Smuzhiyun 		hw_read(ci, OP_PORTSC, PORTSC_HSP);
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun /**
252*4882a593Smuzhiyun  * hw_test_and_clear_complete: test & clear complete status (execute without
253*4882a593Smuzhiyun  *                             interruption)
254*4882a593Smuzhiyun  * @ci: the controller
255*4882a593Smuzhiyun  * @n: endpoint number
256*4882a593Smuzhiyun  *
257*4882a593Smuzhiyun  * This function returns complete status
258*4882a593Smuzhiyun  */
hw_test_and_clear_complete(struct ci_hdrc * ci,int n)259*4882a593Smuzhiyun static int hw_test_and_clear_complete(struct ci_hdrc *ci, int n)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun 	n = ep_to_bit(ci, n);
262*4882a593Smuzhiyun 	return hw_test_and_clear(ci, OP_ENDPTCOMPLETE, BIT(n));
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun /**
266*4882a593Smuzhiyun  * hw_test_and_clear_intr_active: test & clear active interrupts (execute
267*4882a593Smuzhiyun  *                                without interruption)
268*4882a593Smuzhiyun  * @ci: the controller
269*4882a593Smuzhiyun  *
270*4882a593Smuzhiyun  * This function returns active interrutps
271*4882a593Smuzhiyun  */
hw_test_and_clear_intr_active(struct ci_hdrc * ci)272*4882a593Smuzhiyun static u32 hw_test_and_clear_intr_active(struct ci_hdrc *ci)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun 	u32 reg = hw_read_intr_status(ci) & hw_read_intr_enable(ci);
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	hw_write(ci, OP_USBSTS, ~0, reg);
277*4882a593Smuzhiyun 	return reg;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun /**
281*4882a593Smuzhiyun  * hw_test_and_clear_setup_guard: test & clear setup guard (execute without
282*4882a593Smuzhiyun  *                                interruption)
283*4882a593Smuzhiyun  * @ci: the controller
284*4882a593Smuzhiyun  *
285*4882a593Smuzhiyun  * This function returns guard value
286*4882a593Smuzhiyun  */
hw_test_and_clear_setup_guard(struct ci_hdrc * ci)287*4882a593Smuzhiyun static int hw_test_and_clear_setup_guard(struct ci_hdrc *ci)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun 	return hw_test_and_write(ci, OP_USBCMD, USBCMD_SUTW, 0);
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun /**
293*4882a593Smuzhiyun  * hw_test_and_set_setup_guard: test & set setup guard (execute without
294*4882a593Smuzhiyun  *                              interruption)
295*4882a593Smuzhiyun  * @ci: the controller
296*4882a593Smuzhiyun  *
297*4882a593Smuzhiyun  * This function returns guard value
298*4882a593Smuzhiyun  */
hw_test_and_set_setup_guard(struct ci_hdrc * ci)299*4882a593Smuzhiyun static int hw_test_and_set_setup_guard(struct ci_hdrc *ci)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun 	return hw_test_and_write(ci, OP_USBCMD, USBCMD_SUTW, USBCMD_SUTW);
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun /**
305*4882a593Smuzhiyun  * hw_usb_set_address: configures USB address (execute without interruption)
306*4882a593Smuzhiyun  * @ci: the controller
307*4882a593Smuzhiyun  * @value: new USB address
308*4882a593Smuzhiyun  *
309*4882a593Smuzhiyun  * This function explicitly sets the address, without the "USBADRA" (advance)
310*4882a593Smuzhiyun  * feature, which is not supported by older versions of the controller.
311*4882a593Smuzhiyun  */
hw_usb_set_address(struct ci_hdrc * ci,u8 value)312*4882a593Smuzhiyun static void hw_usb_set_address(struct ci_hdrc *ci, u8 value)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun 	hw_write(ci, OP_DEVICEADDR, DEVICEADDR_USBADR,
315*4882a593Smuzhiyun 		 value << __ffs(DEVICEADDR_USBADR));
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun /**
319*4882a593Smuzhiyun  * hw_usb_reset: restart device after a bus reset (execute without
320*4882a593Smuzhiyun  *               interruption)
321*4882a593Smuzhiyun  * @ci: the controller
322*4882a593Smuzhiyun  *
323*4882a593Smuzhiyun  * This function returns an error code
324*4882a593Smuzhiyun  */
hw_usb_reset(struct ci_hdrc * ci)325*4882a593Smuzhiyun static int hw_usb_reset(struct ci_hdrc *ci)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun 	hw_usb_set_address(ci, 0);
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	/* ESS flushes only at end?!? */
330*4882a593Smuzhiyun 	hw_write(ci, OP_ENDPTFLUSH,    ~0, ~0);
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	/* clear setup token semaphores */
333*4882a593Smuzhiyun 	hw_write(ci, OP_ENDPTSETUPSTAT, 0,  0);
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	/* clear complete status */
336*4882a593Smuzhiyun 	hw_write(ci, OP_ENDPTCOMPLETE,  0,  0);
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	/* wait until all bits cleared */
339*4882a593Smuzhiyun 	while (hw_read(ci, OP_ENDPTPRIME, ~0))
340*4882a593Smuzhiyun 		udelay(10);             /* not RTOS friendly */
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	/* reset all endpoints ? */
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	/* reset internal status and wait for further instructions
345*4882a593Smuzhiyun 	   no need to verify the port reset status (ESS does it) */
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	return 0;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun /******************************************************************************
351*4882a593Smuzhiyun  * UTIL block
352*4882a593Smuzhiyun  *****************************************************************************/
353*4882a593Smuzhiyun 
add_td_to_list(struct ci_hw_ep * hwep,struct ci_hw_req * hwreq,unsigned int length,struct scatterlist * s)354*4882a593Smuzhiyun static int add_td_to_list(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
355*4882a593Smuzhiyun 			unsigned int length, struct scatterlist *s)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun 	int i;
358*4882a593Smuzhiyun 	u32 temp;
359*4882a593Smuzhiyun 	struct td_node *lastnode, *node = kzalloc(sizeof(struct td_node),
360*4882a593Smuzhiyun 						  GFP_ATOMIC);
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	if (node == NULL)
363*4882a593Smuzhiyun 		return -ENOMEM;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	node->ptr = dma_pool_zalloc(hwep->td_pool, GFP_ATOMIC, &node->dma);
366*4882a593Smuzhiyun 	if (node->ptr == NULL) {
367*4882a593Smuzhiyun 		kfree(node);
368*4882a593Smuzhiyun 		return -ENOMEM;
369*4882a593Smuzhiyun 	}
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	node->ptr->token = cpu_to_le32(length << __ffs(TD_TOTAL_BYTES));
372*4882a593Smuzhiyun 	node->ptr->token &= cpu_to_le32(TD_TOTAL_BYTES);
373*4882a593Smuzhiyun 	node->ptr->token |= cpu_to_le32(TD_STATUS_ACTIVE);
374*4882a593Smuzhiyun 	if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == TX) {
375*4882a593Smuzhiyun 		u32 mul = hwreq->req.length / hwep->ep.maxpacket;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 		if (hwreq->req.length == 0
378*4882a593Smuzhiyun 				|| hwreq->req.length % hwep->ep.maxpacket)
379*4882a593Smuzhiyun 			mul++;
380*4882a593Smuzhiyun 		node->ptr->token |= cpu_to_le32(mul << __ffs(TD_MULTO));
381*4882a593Smuzhiyun 	}
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	if (s) {
384*4882a593Smuzhiyun 		temp = (u32) (sg_dma_address(s) + hwreq->req.actual);
385*4882a593Smuzhiyun 		node->td_remaining_size = CI_MAX_BUF_SIZE - length;
386*4882a593Smuzhiyun 	} else {
387*4882a593Smuzhiyun 		temp = (u32) (hwreq->req.dma + hwreq->req.actual);
388*4882a593Smuzhiyun 	}
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	if (length) {
391*4882a593Smuzhiyun 		node->ptr->page[0] = cpu_to_le32(temp);
392*4882a593Smuzhiyun 		for (i = 1; i < TD_PAGE_COUNT; i++) {
393*4882a593Smuzhiyun 			u32 page = temp + i * CI_HDRC_PAGE_SIZE;
394*4882a593Smuzhiyun 			page &= ~TD_RESERVED_MASK;
395*4882a593Smuzhiyun 			node->ptr->page[i] = cpu_to_le32(page);
396*4882a593Smuzhiyun 		}
397*4882a593Smuzhiyun 	}
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	hwreq->req.actual += length;
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	if (!list_empty(&hwreq->tds)) {
402*4882a593Smuzhiyun 		/* get the last entry */
403*4882a593Smuzhiyun 		lastnode = list_entry(hwreq->tds.prev,
404*4882a593Smuzhiyun 				struct td_node, td);
405*4882a593Smuzhiyun 		lastnode->ptr->next = cpu_to_le32(node->dma);
406*4882a593Smuzhiyun 	}
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	INIT_LIST_HEAD(&node->td);
409*4882a593Smuzhiyun 	list_add_tail(&node->td, &hwreq->tds);
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	return 0;
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun /**
415*4882a593Smuzhiyun  * _usb_addr: calculates endpoint address from direction & number
416*4882a593Smuzhiyun  * @ep:  endpoint
417*4882a593Smuzhiyun  */
_usb_addr(struct ci_hw_ep * ep)418*4882a593Smuzhiyun static inline u8 _usb_addr(struct ci_hw_ep *ep)
419*4882a593Smuzhiyun {
420*4882a593Smuzhiyun 	return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun 
prepare_td_for_non_sg(struct ci_hw_ep * hwep,struct ci_hw_req * hwreq)423*4882a593Smuzhiyun static int prepare_td_for_non_sg(struct ci_hw_ep *hwep,
424*4882a593Smuzhiyun 		struct ci_hw_req *hwreq)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun 	unsigned int rest = hwreq->req.length;
427*4882a593Smuzhiyun 	int pages = TD_PAGE_COUNT;
428*4882a593Smuzhiyun 	int ret = 0;
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	if (rest == 0) {
431*4882a593Smuzhiyun 		ret = add_td_to_list(hwep, hwreq, 0, NULL);
432*4882a593Smuzhiyun 		if (ret < 0)
433*4882a593Smuzhiyun 			return ret;
434*4882a593Smuzhiyun 	}
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	/*
437*4882a593Smuzhiyun 	 * The first buffer could be not page aligned.
438*4882a593Smuzhiyun 	 * In that case we have to span into one extra td.
439*4882a593Smuzhiyun 	 */
440*4882a593Smuzhiyun 	if (hwreq->req.dma % PAGE_SIZE)
441*4882a593Smuzhiyun 		pages--;
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	while (rest > 0) {
444*4882a593Smuzhiyun 		unsigned int count = min(hwreq->req.length - hwreq->req.actual,
445*4882a593Smuzhiyun 			(unsigned int)(pages * CI_HDRC_PAGE_SIZE));
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 		ret = add_td_to_list(hwep, hwreq, count, NULL);
448*4882a593Smuzhiyun 		if (ret < 0)
449*4882a593Smuzhiyun 			return ret;
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 		rest -= count;
452*4882a593Smuzhiyun 	}
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	if (hwreq->req.zero && hwreq->req.length && hwep->dir == TX
455*4882a593Smuzhiyun 	    && (hwreq->req.length % hwep->ep.maxpacket == 0)) {
456*4882a593Smuzhiyun 		ret = add_td_to_list(hwep, hwreq, 0, NULL);
457*4882a593Smuzhiyun 		if (ret < 0)
458*4882a593Smuzhiyun 			return ret;
459*4882a593Smuzhiyun 	}
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 	return ret;
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun 
prepare_td_per_sg(struct ci_hw_ep * hwep,struct ci_hw_req * hwreq,struct scatterlist * s)464*4882a593Smuzhiyun static int prepare_td_per_sg(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
465*4882a593Smuzhiyun 		struct scatterlist *s)
466*4882a593Smuzhiyun {
467*4882a593Smuzhiyun 	unsigned int rest = sg_dma_len(s);
468*4882a593Smuzhiyun 	int ret = 0;
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 	hwreq->req.actual = 0;
471*4882a593Smuzhiyun 	while (rest > 0) {
472*4882a593Smuzhiyun 		unsigned int count = min_t(unsigned int, rest,
473*4882a593Smuzhiyun 				CI_MAX_BUF_SIZE);
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 		ret = add_td_to_list(hwep, hwreq, count, s);
476*4882a593Smuzhiyun 		if (ret < 0)
477*4882a593Smuzhiyun 			return ret;
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 		rest -= count;
480*4882a593Smuzhiyun 	}
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	return ret;
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun 
ci_add_buffer_entry(struct td_node * node,struct scatterlist * s)485*4882a593Smuzhiyun static void ci_add_buffer_entry(struct td_node *node, struct scatterlist *s)
486*4882a593Smuzhiyun {
487*4882a593Smuzhiyun 	int empty_td_slot_index = (CI_MAX_BUF_SIZE - node->td_remaining_size)
488*4882a593Smuzhiyun 			/ CI_HDRC_PAGE_SIZE;
489*4882a593Smuzhiyun 	int i;
490*4882a593Smuzhiyun 	u32 token;
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	token = le32_to_cpu(node->ptr->token) + (sg_dma_len(s) << __ffs(TD_TOTAL_BYTES));
493*4882a593Smuzhiyun 	node->ptr->token = cpu_to_le32(token);
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	for (i = empty_td_slot_index; i < TD_PAGE_COUNT; i++) {
496*4882a593Smuzhiyun 		u32 page = (u32) sg_dma_address(s) +
497*4882a593Smuzhiyun 			(i - empty_td_slot_index) * CI_HDRC_PAGE_SIZE;
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 		page &= ~TD_RESERVED_MASK;
500*4882a593Smuzhiyun 		node->ptr->page[i] = cpu_to_le32(page);
501*4882a593Smuzhiyun 	}
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun 
prepare_td_for_sg(struct ci_hw_ep * hwep,struct ci_hw_req * hwreq)504*4882a593Smuzhiyun static int prepare_td_for_sg(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
505*4882a593Smuzhiyun {
506*4882a593Smuzhiyun 	struct usb_request *req = &hwreq->req;
507*4882a593Smuzhiyun 	struct scatterlist *s = req->sg;
508*4882a593Smuzhiyun 	int ret = 0, i = 0;
509*4882a593Smuzhiyun 	struct td_node *node = NULL;
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	if (!s || req->zero || req->length == 0) {
512*4882a593Smuzhiyun 		dev_err(hwep->ci->dev, "not supported operation for sg\n");
513*4882a593Smuzhiyun 		return -EINVAL;
514*4882a593Smuzhiyun 	}
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	while (i++ < req->num_mapped_sgs) {
517*4882a593Smuzhiyun 		if (sg_dma_address(s) % PAGE_SIZE) {
518*4882a593Smuzhiyun 			dev_err(hwep->ci->dev, "not page aligned sg buffer\n");
519*4882a593Smuzhiyun 			return -EINVAL;
520*4882a593Smuzhiyun 		}
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 		if (node && (node->td_remaining_size >= sg_dma_len(s))) {
523*4882a593Smuzhiyun 			ci_add_buffer_entry(node, s);
524*4882a593Smuzhiyun 			node->td_remaining_size -= sg_dma_len(s);
525*4882a593Smuzhiyun 		} else {
526*4882a593Smuzhiyun 			ret = prepare_td_per_sg(hwep, hwreq, s);
527*4882a593Smuzhiyun 			if (ret)
528*4882a593Smuzhiyun 				return ret;
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 			node = list_entry(hwreq->tds.prev,
531*4882a593Smuzhiyun 				struct td_node, td);
532*4882a593Smuzhiyun 		}
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 		s = sg_next(s);
535*4882a593Smuzhiyun 	}
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	return ret;
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun /**
541*4882a593Smuzhiyun  * _hardware_enqueue: configures a request at hardware level
542*4882a593Smuzhiyun  * @hwep:   endpoint
543*4882a593Smuzhiyun  * @hwreq:  request
544*4882a593Smuzhiyun  *
545*4882a593Smuzhiyun  * This function returns an error code
546*4882a593Smuzhiyun  */
_hardware_enqueue(struct ci_hw_ep * hwep,struct ci_hw_req * hwreq)547*4882a593Smuzhiyun static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
548*4882a593Smuzhiyun {
549*4882a593Smuzhiyun 	struct ci_hdrc *ci = hwep->ci;
550*4882a593Smuzhiyun 	int ret = 0;
551*4882a593Smuzhiyun 	struct td_node *firstnode, *lastnode;
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	/* don't queue twice */
554*4882a593Smuzhiyun 	if (hwreq->req.status == -EALREADY)
555*4882a593Smuzhiyun 		return -EALREADY;
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	hwreq->req.status = -EALREADY;
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	ret = usb_gadget_map_request_by_dev(ci->dev->parent,
560*4882a593Smuzhiyun 					    &hwreq->req, hwep->dir);
561*4882a593Smuzhiyun 	if (ret)
562*4882a593Smuzhiyun 		return ret;
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	if (hwreq->req.num_mapped_sgs)
565*4882a593Smuzhiyun 		ret = prepare_td_for_sg(hwep, hwreq);
566*4882a593Smuzhiyun 	else
567*4882a593Smuzhiyun 		ret = prepare_td_for_non_sg(hwep, hwreq);
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	if (ret)
570*4882a593Smuzhiyun 		return ret;
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	firstnode = list_first_entry(&hwreq->tds, struct td_node, td);
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	lastnode = list_entry(hwreq->tds.prev,
575*4882a593Smuzhiyun 		struct td_node, td);
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	lastnode->ptr->next = cpu_to_le32(TD_TERMINATE);
578*4882a593Smuzhiyun 	if (!hwreq->req.no_interrupt)
579*4882a593Smuzhiyun 		lastnode->ptr->token |= cpu_to_le32(TD_IOC);
580*4882a593Smuzhiyun 	wmb();
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	hwreq->req.actual = 0;
583*4882a593Smuzhiyun 	if (!list_empty(&hwep->qh.queue)) {
584*4882a593Smuzhiyun 		struct ci_hw_req *hwreqprev;
585*4882a593Smuzhiyun 		int n = hw_ep_bit(hwep->num, hwep->dir);
586*4882a593Smuzhiyun 		int tmp_stat;
587*4882a593Smuzhiyun 		struct td_node *prevlastnode;
588*4882a593Smuzhiyun 		u32 next = firstnode->dma & TD_ADDR_MASK;
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 		hwreqprev = list_entry(hwep->qh.queue.prev,
591*4882a593Smuzhiyun 				struct ci_hw_req, queue);
592*4882a593Smuzhiyun 		prevlastnode = list_entry(hwreqprev->tds.prev,
593*4882a593Smuzhiyun 				struct td_node, td);
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 		prevlastnode->ptr->next = cpu_to_le32(next);
596*4882a593Smuzhiyun 		wmb();
597*4882a593Smuzhiyun 		if (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
598*4882a593Smuzhiyun 			goto done;
599*4882a593Smuzhiyun 		do {
600*4882a593Smuzhiyun 			hw_write(ci, OP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW);
601*4882a593Smuzhiyun 			tmp_stat = hw_read(ci, OP_ENDPTSTAT, BIT(n));
602*4882a593Smuzhiyun 		} while (!hw_read(ci, OP_USBCMD, USBCMD_ATDTW));
603*4882a593Smuzhiyun 		hw_write(ci, OP_USBCMD, USBCMD_ATDTW, 0);
604*4882a593Smuzhiyun 		if (tmp_stat)
605*4882a593Smuzhiyun 			goto done;
606*4882a593Smuzhiyun 	}
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	/*  QH configuration */
609*4882a593Smuzhiyun 	hwep->qh.ptr->td.next = cpu_to_le32(firstnode->dma);
610*4882a593Smuzhiyun 	hwep->qh.ptr->td.token &=
611*4882a593Smuzhiyun 		cpu_to_le32(~(TD_STATUS_HALTED|TD_STATUS_ACTIVE));
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 	if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == RX) {
614*4882a593Smuzhiyun 		u32 mul = hwreq->req.length / hwep->ep.maxpacket;
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 		if (hwreq->req.length == 0
617*4882a593Smuzhiyun 				|| hwreq->req.length % hwep->ep.maxpacket)
618*4882a593Smuzhiyun 			mul++;
619*4882a593Smuzhiyun 		hwep->qh.ptr->cap |= cpu_to_le32(mul << __ffs(QH_MULT));
620*4882a593Smuzhiyun 	}
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 	ret = hw_ep_prime(ci, hwep->num, hwep->dir,
623*4882a593Smuzhiyun 			   hwep->type == USB_ENDPOINT_XFER_CONTROL);
624*4882a593Smuzhiyun done:
625*4882a593Smuzhiyun 	return ret;
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun /**
629*4882a593Smuzhiyun  * free_pending_td: remove a pending request for the endpoint
630*4882a593Smuzhiyun  * @hwep: endpoint
631*4882a593Smuzhiyun  */
free_pending_td(struct ci_hw_ep * hwep)632*4882a593Smuzhiyun static void free_pending_td(struct ci_hw_ep *hwep)
633*4882a593Smuzhiyun {
634*4882a593Smuzhiyun 	struct td_node *pending = hwep->pending_td;
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 	dma_pool_free(hwep->td_pool, pending->ptr, pending->dma);
637*4882a593Smuzhiyun 	hwep->pending_td = NULL;
638*4882a593Smuzhiyun 	kfree(pending);
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun 
reprime_dtd(struct ci_hdrc * ci,struct ci_hw_ep * hwep,struct td_node * node)641*4882a593Smuzhiyun static int reprime_dtd(struct ci_hdrc *ci, struct ci_hw_ep *hwep,
642*4882a593Smuzhiyun 					   struct td_node *node)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun 	hwep->qh.ptr->td.next = cpu_to_le32(node->dma);
645*4882a593Smuzhiyun 	hwep->qh.ptr->td.token &=
646*4882a593Smuzhiyun 		cpu_to_le32(~(TD_STATUS_HALTED | TD_STATUS_ACTIVE));
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun 	return hw_ep_prime(ci, hwep->num, hwep->dir,
649*4882a593Smuzhiyun 				hwep->type == USB_ENDPOINT_XFER_CONTROL);
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun /**
653*4882a593Smuzhiyun  * _hardware_dequeue: handles a request at hardware level
654*4882a593Smuzhiyun  * @hwep: endpoint
655*4882a593Smuzhiyun  * @hwreq:  request
656*4882a593Smuzhiyun  *
657*4882a593Smuzhiyun  * This function returns an error code
658*4882a593Smuzhiyun  */
_hardware_dequeue(struct ci_hw_ep * hwep,struct ci_hw_req * hwreq)659*4882a593Smuzhiyun static int _hardware_dequeue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
660*4882a593Smuzhiyun {
661*4882a593Smuzhiyun 	u32 tmptoken;
662*4882a593Smuzhiyun 	struct td_node *node, *tmpnode;
663*4882a593Smuzhiyun 	unsigned remaining_length;
664*4882a593Smuzhiyun 	unsigned actual = hwreq->req.length;
665*4882a593Smuzhiyun 	struct ci_hdrc *ci = hwep->ci;
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	if (hwreq->req.status != -EALREADY)
668*4882a593Smuzhiyun 		return -EINVAL;
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 	hwreq->req.status = 0;
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
673*4882a593Smuzhiyun 		tmptoken = le32_to_cpu(node->ptr->token);
674*4882a593Smuzhiyun 		if ((TD_STATUS_ACTIVE & tmptoken) != 0) {
675*4882a593Smuzhiyun 			int n = hw_ep_bit(hwep->num, hwep->dir);
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 			if (ci->rev == CI_REVISION_24)
678*4882a593Smuzhiyun 				if (!hw_read(ci, OP_ENDPTSTAT, BIT(n)))
679*4882a593Smuzhiyun 					reprime_dtd(ci, hwep, node);
680*4882a593Smuzhiyun 			hwreq->req.status = -EALREADY;
681*4882a593Smuzhiyun 			return -EBUSY;
682*4882a593Smuzhiyun 		}
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 		remaining_length = (tmptoken & TD_TOTAL_BYTES);
685*4882a593Smuzhiyun 		remaining_length >>= __ffs(TD_TOTAL_BYTES);
686*4882a593Smuzhiyun 		actual -= remaining_length;
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 		hwreq->req.status = tmptoken & TD_STATUS;
689*4882a593Smuzhiyun 		if ((TD_STATUS_HALTED & hwreq->req.status)) {
690*4882a593Smuzhiyun 			hwreq->req.status = -EPIPE;
691*4882a593Smuzhiyun 			break;
692*4882a593Smuzhiyun 		} else if ((TD_STATUS_DT_ERR & hwreq->req.status)) {
693*4882a593Smuzhiyun 			hwreq->req.status = -EPROTO;
694*4882a593Smuzhiyun 			break;
695*4882a593Smuzhiyun 		} else if ((TD_STATUS_TR_ERR & hwreq->req.status)) {
696*4882a593Smuzhiyun 			hwreq->req.status = -EILSEQ;
697*4882a593Smuzhiyun 			break;
698*4882a593Smuzhiyun 		}
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 		if (remaining_length) {
701*4882a593Smuzhiyun 			if (hwep->dir == TX) {
702*4882a593Smuzhiyun 				hwreq->req.status = -EPROTO;
703*4882a593Smuzhiyun 				break;
704*4882a593Smuzhiyun 			}
705*4882a593Smuzhiyun 		}
706*4882a593Smuzhiyun 		/*
707*4882a593Smuzhiyun 		 * As the hardware could still address the freed td
708*4882a593Smuzhiyun 		 * which will run the udc unusable, the cleanup of the
709*4882a593Smuzhiyun 		 * td has to be delayed by one.
710*4882a593Smuzhiyun 		 */
711*4882a593Smuzhiyun 		if (hwep->pending_td)
712*4882a593Smuzhiyun 			free_pending_td(hwep);
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 		hwep->pending_td = node;
715*4882a593Smuzhiyun 		list_del_init(&node->td);
716*4882a593Smuzhiyun 	}
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 	usb_gadget_unmap_request_by_dev(hwep->ci->dev->parent,
719*4882a593Smuzhiyun 					&hwreq->req, hwep->dir);
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	hwreq->req.actual += actual;
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 	if (hwreq->req.status)
724*4882a593Smuzhiyun 		return hwreq->req.status;
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun 	return hwreq->req.actual;
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun /**
730*4882a593Smuzhiyun  * _ep_nuke: dequeues all endpoint requests
731*4882a593Smuzhiyun  * @hwep: endpoint
732*4882a593Smuzhiyun  *
733*4882a593Smuzhiyun  * This function returns an error code
734*4882a593Smuzhiyun  * Caller must hold lock
735*4882a593Smuzhiyun  */
_ep_nuke(struct ci_hw_ep * hwep)736*4882a593Smuzhiyun static int _ep_nuke(struct ci_hw_ep *hwep)
737*4882a593Smuzhiyun __releases(hwep->lock)
738*4882a593Smuzhiyun __acquires(hwep->lock)
739*4882a593Smuzhiyun {
740*4882a593Smuzhiyun 	struct td_node *node, *tmpnode;
741*4882a593Smuzhiyun 	if (hwep == NULL)
742*4882a593Smuzhiyun 		return -EINVAL;
743*4882a593Smuzhiyun 
744*4882a593Smuzhiyun 	hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 	while (!list_empty(&hwep->qh.queue)) {
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 		/* pop oldest request */
749*4882a593Smuzhiyun 		struct ci_hw_req *hwreq = list_entry(hwep->qh.queue.next,
750*4882a593Smuzhiyun 						     struct ci_hw_req, queue);
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun 		list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
753*4882a593Smuzhiyun 			dma_pool_free(hwep->td_pool, node->ptr, node->dma);
754*4882a593Smuzhiyun 			list_del_init(&node->td);
755*4882a593Smuzhiyun 			node->ptr = NULL;
756*4882a593Smuzhiyun 			kfree(node);
757*4882a593Smuzhiyun 		}
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 		list_del_init(&hwreq->queue);
760*4882a593Smuzhiyun 		hwreq->req.status = -ESHUTDOWN;
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun 		if (hwreq->req.complete != NULL) {
763*4882a593Smuzhiyun 			spin_unlock(hwep->lock);
764*4882a593Smuzhiyun 			usb_gadget_giveback_request(&hwep->ep, &hwreq->req);
765*4882a593Smuzhiyun 			spin_lock(hwep->lock);
766*4882a593Smuzhiyun 		}
767*4882a593Smuzhiyun 	}
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	if (hwep->pending_td)
770*4882a593Smuzhiyun 		free_pending_td(hwep);
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 	return 0;
773*4882a593Smuzhiyun }
774*4882a593Smuzhiyun 
_ep_set_halt(struct usb_ep * ep,int value,bool check_transfer)775*4882a593Smuzhiyun static int _ep_set_halt(struct usb_ep *ep, int value, bool check_transfer)
776*4882a593Smuzhiyun {
777*4882a593Smuzhiyun 	struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
778*4882a593Smuzhiyun 	int direction, retval = 0;
779*4882a593Smuzhiyun 	unsigned long flags;
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 	if (ep == NULL || hwep->ep.desc == NULL)
782*4882a593Smuzhiyun 		return -EINVAL;
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun 	if (usb_endpoint_xfer_isoc(hwep->ep.desc))
785*4882a593Smuzhiyun 		return -EOPNOTSUPP;
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun 	spin_lock_irqsave(hwep->lock, flags);
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	if (value && hwep->dir == TX && check_transfer &&
790*4882a593Smuzhiyun 		!list_empty(&hwep->qh.queue) &&
791*4882a593Smuzhiyun 			!usb_endpoint_xfer_control(hwep->ep.desc)) {
792*4882a593Smuzhiyun 		spin_unlock_irqrestore(hwep->lock, flags);
793*4882a593Smuzhiyun 		return -EAGAIN;
794*4882a593Smuzhiyun 	}
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun 	direction = hwep->dir;
797*4882a593Smuzhiyun 	do {
798*4882a593Smuzhiyun 		retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value);
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun 		if (!value)
801*4882a593Smuzhiyun 			hwep->wedge = 0;
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 		if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
804*4882a593Smuzhiyun 			hwep->dir = (hwep->dir == TX) ? RX : TX;
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 	} while (hwep->dir != direction);
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 	spin_unlock_irqrestore(hwep->lock, flags);
809*4882a593Smuzhiyun 	return retval;
810*4882a593Smuzhiyun }
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun /**
814*4882a593Smuzhiyun  * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
815*4882a593Smuzhiyun  * @gadget: gadget
816*4882a593Smuzhiyun  *
817*4882a593Smuzhiyun  * This function returns an error code
818*4882a593Smuzhiyun  */
_gadget_stop_activity(struct usb_gadget * gadget)819*4882a593Smuzhiyun static int _gadget_stop_activity(struct usb_gadget *gadget)
820*4882a593Smuzhiyun {
821*4882a593Smuzhiyun 	struct usb_ep *ep;
822*4882a593Smuzhiyun 	struct ci_hdrc    *ci = container_of(gadget, struct ci_hdrc, gadget);
823*4882a593Smuzhiyun 	unsigned long flags;
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 	/* flush all endpoints */
826*4882a593Smuzhiyun 	gadget_for_each_ep(ep, gadget) {
827*4882a593Smuzhiyun 		usb_ep_fifo_flush(ep);
828*4882a593Smuzhiyun 	}
829*4882a593Smuzhiyun 	usb_ep_fifo_flush(&ci->ep0out->ep);
830*4882a593Smuzhiyun 	usb_ep_fifo_flush(&ci->ep0in->ep);
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun 	/* make sure to disable all endpoints */
833*4882a593Smuzhiyun 	gadget_for_each_ep(ep, gadget) {
834*4882a593Smuzhiyun 		usb_ep_disable(ep);
835*4882a593Smuzhiyun 	}
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 	if (ci->status != NULL) {
838*4882a593Smuzhiyun 		usb_ep_free_request(&ci->ep0in->ep, ci->status);
839*4882a593Smuzhiyun 		ci->status = NULL;
840*4882a593Smuzhiyun 	}
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 	spin_lock_irqsave(&ci->lock, flags);
843*4882a593Smuzhiyun 	ci->gadget.speed = USB_SPEED_UNKNOWN;
844*4882a593Smuzhiyun 	ci->remote_wakeup = 0;
845*4882a593Smuzhiyun 	ci->suspended = 0;
846*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ci->lock, flags);
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun 	return 0;
849*4882a593Smuzhiyun }
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun /******************************************************************************
852*4882a593Smuzhiyun  * ISR block
853*4882a593Smuzhiyun  *****************************************************************************/
854*4882a593Smuzhiyun /**
855*4882a593Smuzhiyun  * isr_reset_handler: USB reset interrupt handler
856*4882a593Smuzhiyun  * @ci: UDC device
857*4882a593Smuzhiyun  *
858*4882a593Smuzhiyun  * This function resets USB engine after a bus reset occurred
859*4882a593Smuzhiyun  */
isr_reset_handler(struct ci_hdrc * ci)860*4882a593Smuzhiyun static void isr_reset_handler(struct ci_hdrc *ci)
861*4882a593Smuzhiyun __releases(ci->lock)
862*4882a593Smuzhiyun __acquires(ci->lock)
863*4882a593Smuzhiyun {
864*4882a593Smuzhiyun 	int retval;
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 	spin_unlock(&ci->lock);
867*4882a593Smuzhiyun 	if (ci->gadget.speed != USB_SPEED_UNKNOWN)
868*4882a593Smuzhiyun 		usb_gadget_udc_reset(&ci->gadget, ci->driver);
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun 	retval = _gadget_stop_activity(&ci->gadget);
871*4882a593Smuzhiyun 	if (retval)
872*4882a593Smuzhiyun 		goto done;
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun 	retval = hw_usb_reset(ci);
875*4882a593Smuzhiyun 	if (retval)
876*4882a593Smuzhiyun 		goto done;
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun 	ci->status = usb_ep_alloc_request(&ci->ep0in->ep, GFP_ATOMIC);
879*4882a593Smuzhiyun 	if (ci->status == NULL)
880*4882a593Smuzhiyun 		retval = -ENOMEM;
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun done:
883*4882a593Smuzhiyun 	spin_lock(&ci->lock);
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 	if (retval)
886*4882a593Smuzhiyun 		dev_err(ci->dev, "error: %i\n", retval);
887*4882a593Smuzhiyun }
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun /**
890*4882a593Smuzhiyun  * isr_get_status_complete: get_status request complete function
891*4882a593Smuzhiyun  * @ep:  endpoint
892*4882a593Smuzhiyun  * @req: request handled
893*4882a593Smuzhiyun  *
894*4882a593Smuzhiyun  * Caller must release lock
895*4882a593Smuzhiyun  */
isr_get_status_complete(struct usb_ep * ep,struct usb_request * req)896*4882a593Smuzhiyun static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
897*4882a593Smuzhiyun {
898*4882a593Smuzhiyun 	if (ep == NULL || req == NULL)
899*4882a593Smuzhiyun 		return;
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 	kfree(req->buf);
902*4882a593Smuzhiyun 	usb_ep_free_request(ep, req);
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun /**
906*4882a593Smuzhiyun  * _ep_queue: queues (submits) an I/O request to an endpoint
907*4882a593Smuzhiyun  * @ep:        endpoint
908*4882a593Smuzhiyun  * @req:       request
909*4882a593Smuzhiyun  * @gfp_flags: GFP flags (not used)
910*4882a593Smuzhiyun  *
911*4882a593Smuzhiyun  * Caller must hold lock
912*4882a593Smuzhiyun  * This function returns an error code
913*4882a593Smuzhiyun  */
_ep_queue(struct usb_ep * ep,struct usb_request * req,gfp_t __maybe_unused gfp_flags)914*4882a593Smuzhiyun static int _ep_queue(struct usb_ep *ep, struct usb_request *req,
915*4882a593Smuzhiyun 		    gfp_t __maybe_unused gfp_flags)
916*4882a593Smuzhiyun {
917*4882a593Smuzhiyun 	struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
918*4882a593Smuzhiyun 	struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
919*4882a593Smuzhiyun 	struct ci_hdrc *ci = hwep->ci;
920*4882a593Smuzhiyun 	int retval = 0;
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun 	if (ep == NULL || req == NULL || hwep->ep.desc == NULL)
923*4882a593Smuzhiyun 		return -EINVAL;
924*4882a593Smuzhiyun 
925*4882a593Smuzhiyun 	if (hwep->type == USB_ENDPOINT_XFER_CONTROL) {
926*4882a593Smuzhiyun 		if (req->length)
927*4882a593Smuzhiyun 			hwep = (ci->ep0_dir == RX) ?
928*4882a593Smuzhiyun 			       ci->ep0out : ci->ep0in;
929*4882a593Smuzhiyun 		if (!list_empty(&hwep->qh.queue)) {
930*4882a593Smuzhiyun 			_ep_nuke(hwep);
931*4882a593Smuzhiyun 			dev_warn(hwep->ci->dev, "endpoint ctrl %X nuked\n",
932*4882a593Smuzhiyun 				 _usb_addr(hwep));
933*4882a593Smuzhiyun 		}
934*4882a593Smuzhiyun 	}
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 	if (usb_endpoint_xfer_isoc(hwep->ep.desc) &&
937*4882a593Smuzhiyun 	    hwreq->req.length > hwep->ep.mult * hwep->ep.maxpacket) {
938*4882a593Smuzhiyun 		dev_err(hwep->ci->dev, "request length too big for isochronous\n");
939*4882a593Smuzhiyun 		return -EMSGSIZE;
940*4882a593Smuzhiyun 	}
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun 	/* first nuke then test link, e.g. previous status has not sent */
943*4882a593Smuzhiyun 	if (!list_empty(&hwreq->queue)) {
944*4882a593Smuzhiyun 		dev_err(hwep->ci->dev, "request already in queue\n");
945*4882a593Smuzhiyun 		return -EBUSY;
946*4882a593Smuzhiyun 	}
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun 	/* push request */
949*4882a593Smuzhiyun 	hwreq->req.status = -EINPROGRESS;
950*4882a593Smuzhiyun 	hwreq->req.actual = 0;
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun 	retval = _hardware_enqueue(hwep, hwreq);
953*4882a593Smuzhiyun 
954*4882a593Smuzhiyun 	if (retval == -EALREADY)
955*4882a593Smuzhiyun 		retval = 0;
956*4882a593Smuzhiyun 	if (!retval)
957*4882a593Smuzhiyun 		list_add_tail(&hwreq->queue, &hwep->qh.queue);
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun 	return retval;
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun /**
963*4882a593Smuzhiyun  * isr_get_status_response: get_status request response
964*4882a593Smuzhiyun  * @ci: ci struct
965*4882a593Smuzhiyun  * @setup: setup request packet
966*4882a593Smuzhiyun  *
967*4882a593Smuzhiyun  * This function returns an error code
968*4882a593Smuzhiyun  */
isr_get_status_response(struct ci_hdrc * ci,struct usb_ctrlrequest * setup)969*4882a593Smuzhiyun static int isr_get_status_response(struct ci_hdrc *ci,
970*4882a593Smuzhiyun 				   struct usb_ctrlrequest *setup)
971*4882a593Smuzhiyun __releases(hwep->lock)
972*4882a593Smuzhiyun __acquires(hwep->lock)
973*4882a593Smuzhiyun {
974*4882a593Smuzhiyun 	struct ci_hw_ep *hwep = ci->ep0in;
975*4882a593Smuzhiyun 	struct usb_request *req = NULL;
976*4882a593Smuzhiyun 	gfp_t gfp_flags = GFP_ATOMIC;
977*4882a593Smuzhiyun 	int dir, num, retval;
978*4882a593Smuzhiyun 
979*4882a593Smuzhiyun 	if (hwep == NULL || setup == NULL)
980*4882a593Smuzhiyun 		return -EINVAL;
981*4882a593Smuzhiyun 
982*4882a593Smuzhiyun 	spin_unlock(hwep->lock);
983*4882a593Smuzhiyun 	req = usb_ep_alloc_request(&hwep->ep, gfp_flags);
984*4882a593Smuzhiyun 	spin_lock(hwep->lock);
985*4882a593Smuzhiyun 	if (req == NULL)
986*4882a593Smuzhiyun 		return -ENOMEM;
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun 	req->complete = isr_get_status_complete;
989*4882a593Smuzhiyun 	req->length   = 2;
990*4882a593Smuzhiyun 	req->buf      = kzalloc(req->length, gfp_flags);
991*4882a593Smuzhiyun 	if (req->buf == NULL) {
992*4882a593Smuzhiyun 		retval = -ENOMEM;
993*4882a593Smuzhiyun 		goto err_free_req;
994*4882a593Smuzhiyun 	}
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun 	if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
997*4882a593Smuzhiyun 		*(u16 *)req->buf = (ci->remote_wakeup << 1) |
998*4882a593Smuzhiyun 			ci->gadget.is_selfpowered;
999*4882a593Smuzhiyun 	} else if ((setup->bRequestType & USB_RECIP_MASK) \
1000*4882a593Smuzhiyun 		   == USB_RECIP_ENDPOINT) {
1001*4882a593Smuzhiyun 		dir = (le16_to_cpu(setup->wIndex) & USB_ENDPOINT_DIR_MASK) ?
1002*4882a593Smuzhiyun 			TX : RX;
1003*4882a593Smuzhiyun 		num =  le16_to_cpu(setup->wIndex) & USB_ENDPOINT_NUMBER_MASK;
1004*4882a593Smuzhiyun 		*(u16 *)req->buf = hw_ep_get_halt(ci, num, dir);
1005*4882a593Smuzhiyun 	}
1006*4882a593Smuzhiyun 	/* else do nothing; reserved for future use */
1007*4882a593Smuzhiyun 
1008*4882a593Smuzhiyun 	retval = _ep_queue(&hwep->ep, req, gfp_flags);
1009*4882a593Smuzhiyun 	if (retval)
1010*4882a593Smuzhiyun 		goto err_free_buf;
1011*4882a593Smuzhiyun 
1012*4882a593Smuzhiyun 	return 0;
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun  err_free_buf:
1015*4882a593Smuzhiyun 	kfree(req->buf);
1016*4882a593Smuzhiyun  err_free_req:
1017*4882a593Smuzhiyun 	spin_unlock(hwep->lock);
1018*4882a593Smuzhiyun 	usb_ep_free_request(&hwep->ep, req);
1019*4882a593Smuzhiyun 	spin_lock(hwep->lock);
1020*4882a593Smuzhiyun 	return retval;
1021*4882a593Smuzhiyun }
1022*4882a593Smuzhiyun 
1023*4882a593Smuzhiyun /**
1024*4882a593Smuzhiyun  * isr_setup_status_complete: setup_status request complete function
1025*4882a593Smuzhiyun  * @ep:  endpoint
1026*4882a593Smuzhiyun  * @req: request handled
1027*4882a593Smuzhiyun  *
1028*4882a593Smuzhiyun  * Caller must release lock. Put the port in test mode if test mode
1029*4882a593Smuzhiyun  * feature is selected.
1030*4882a593Smuzhiyun  */
1031*4882a593Smuzhiyun static void
isr_setup_status_complete(struct usb_ep * ep,struct usb_request * req)1032*4882a593Smuzhiyun isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
1033*4882a593Smuzhiyun {
1034*4882a593Smuzhiyun 	struct ci_hdrc *ci = req->context;
1035*4882a593Smuzhiyun 	unsigned long flags;
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun 	if (req->status < 0)
1038*4882a593Smuzhiyun 		return;
1039*4882a593Smuzhiyun 
1040*4882a593Smuzhiyun 	if (ci->setaddr) {
1041*4882a593Smuzhiyun 		hw_usb_set_address(ci, ci->address);
1042*4882a593Smuzhiyun 		ci->setaddr = false;
1043*4882a593Smuzhiyun 		if (ci->address)
1044*4882a593Smuzhiyun 			usb_gadget_set_state(&ci->gadget, USB_STATE_ADDRESS);
1045*4882a593Smuzhiyun 	}
1046*4882a593Smuzhiyun 
1047*4882a593Smuzhiyun 	spin_lock_irqsave(&ci->lock, flags);
1048*4882a593Smuzhiyun 	if (ci->test_mode)
1049*4882a593Smuzhiyun 		hw_port_test_set(ci, ci->test_mode);
1050*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ci->lock, flags);
1051*4882a593Smuzhiyun }
1052*4882a593Smuzhiyun 
1053*4882a593Smuzhiyun /**
1054*4882a593Smuzhiyun  * isr_setup_status_phase: queues the status phase of a setup transation
1055*4882a593Smuzhiyun  * @ci: ci struct
1056*4882a593Smuzhiyun  *
1057*4882a593Smuzhiyun  * This function returns an error code
1058*4882a593Smuzhiyun  */
isr_setup_status_phase(struct ci_hdrc * ci)1059*4882a593Smuzhiyun static int isr_setup_status_phase(struct ci_hdrc *ci)
1060*4882a593Smuzhiyun {
1061*4882a593Smuzhiyun 	struct ci_hw_ep *hwep;
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun 	/*
1064*4882a593Smuzhiyun 	 * Unexpected USB controller behavior, caused by bad signal integrity
1065*4882a593Smuzhiyun 	 * or ground reference problems, can lead to isr_setup_status_phase
1066*4882a593Smuzhiyun 	 * being called with ci->status equal to NULL.
1067*4882a593Smuzhiyun 	 * If this situation occurs, you should review your USB hardware design.
1068*4882a593Smuzhiyun 	 */
1069*4882a593Smuzhiyun 	if (WARN_ON_ONCE(!ci->status))
1070*4882a593Smuzhiyun 		return -EPIPE;
1071*4882a593Smuzhiyun 
1072*4882a593Smuzhiyun 	hwep = (ci->ep0_dir == TX) ? ci->ep0out : ci->ep0in;
1073*4882a593Smuzhiyun 	ci->status->context = ci;
1074*4882a593Smuzhiyun 	ci->status->complete = isr_setup_status_complete;
1075*4882a593Smuzhiyun 
1076*4882a593Smuzhiyun 	return _ep_queue(&hwep->ep, ci->status, GFP_ATOMIC);
1077*4882a593Smuzhiyun }
1078*4882a593Smuzhiyun 
1079*4882a593Smuzhiyun /**
1080*4882a593Smuzhiyun  * isr_tr_complete_low: transaction complete low level handler
1081*4882a593Smuzhiyun  * @hwep: endpoint
1082*4882a593Smuzhiyun  *
1083*4882a593Smuzhiyun  * This function returns an error code
1084*4882a593Smuzhiyun  * Caller must hold lock
1085*4882a593Smuzhiyun  */
isr_tr_complete_low(struct ci_hw_ep * hwep)1086*4882a593Smuzhiyun static int isr_tr_complete_low(struct ci_hw_ep *hwep)
1087*4882a593Smuzhiyun __releases(hwep->lock)
1088*4882a593Smuzhiyun __acquires(hwep->lock)
1089*4882a593Smuzhiyun {
1090*4882a593Smuzhiyun 	struct ci_hw_req *hwreq, *hwreqtemp;
1091*4882a593Smuzhiyun 	struct ci_hw_ep *hweptemp = hwep;
1092*4882a593Smuzhiyun 	int retval = 0;
1093*4882a593Smuzhiyun 
1094*4882a593Smuzhiyun 	list_for_each_entry_safe(hwreq, hwreqtemp, &hwep->qh.queue,
1095*4882a593Smuzhiyun 			queue) {
1096*4882a593Smuzhiyun 		retval = _hardware_dequeue(hwep, hwreq);
1097*4882a593Smuzhiyun 		if (retval < 0)
1098*4882a593Smuzhiyun 			break;
1099*4882a593Smuzhiyun 		list_del_init(&hwreq->queue);
1100*4882a593Smuzhiyun 		if (hwreq->req.complete != NULL) {
1101*4882a593Smuzhiyun 			spin_unlock(hwep->lock);
1102*4882a593Smuzhiyun 			if ((hwep->type == USB_ENDPOINT_XFER_CONTROL) &&
1103*4882a593Smuzhiyun 					hwreq->req.length)
1104*4882a593Smuzhiyun 				hweptemp = hwep->ci->ep0in;
1105*4882a593Smuzhiyun 			usb_gadget_giveback_request(&hweptemp->ep, &hwreq->req);
1106*4882a593Smuzhiyun 			spin_lock(hwep->lock);
1107*4882a593Smuzhiyun 		}
1108*4882a593Smuzhiyun 	}
1109*4882a593Smuzhiyun 
1110*4882a593Smuzhiyun 	if (retval == -EBUSY)
1111*4882a593Smuzhiyun 		retval = 0;
1112*4882a593Smuzhiyun 
1113*4882a593Smuzhiyun 	return retval;
1114*4882a593Smuzhiyun }
1115*4882a593Smuzhiyun 
otg_a_alt_hnp_support(struct ci_hdrc * ci)1116*4882a593Smuzhiyun static int otg_a_alt_hnp_support(struct ci_hdrc *ci)
1117*4882a593Smuzhiyun {
1118*4882a593Smuzhiyun 	dev_warn(&ci->gadget.dev,
1119*4882a593Smuzhiyun 		"connect the device to an alternate port if you want HNP\n");
1120*4882a593Smuzhiyun 	return isr_setup_status_phase(ci);
1121*4882a593Smuzhiyun }
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun /**
1124*4882a593Smuzhiyun  * isr_setup_packet_handler: setup packet handler
1125*4882a593Smuzhiyun  * @ci: UDC descriptor
1126*4882a593Smuzhiyun  *
1127*4882a593Smuzhiyun  * This function handles setup packet
1128*4882a593Smuzhiyun  */
isr_setup_packet_handler(struct ci_hdrc * ci)1129*4882a593Smuzhiyun static void isr_setup_packet_handler(struct ci_hdrc *ci)
1130*4882a593Smuzhiyun __releases(ci->lock)
1131*4882a593Smuzhiyun __acquires(ci->lock)
1132*4882a593Smuzhiyun {
1133*4882a593Smuzhiyun 	struct ci_hw_ep *hwep = &ci->ci_hw_ep[0];
1134*4882a593Smuzhiyun 	struct usb_ctrlrequest req;
1135*4882a593Smuzhiyun 	int type, num, dir, err = -EINVAL;
1136*4882a593Smuzhiyun 	u8 tmode = 0;
1137*4882a593Smuzhiyun 
1138*4882a593Smuzhiyun 	/*
1139*4882a593Smuzhiyun 	 * Flush data and handshake transactions of previous
1140*4882a593Smuzhiyun 	 * setup packet.
1141*4882a593Smuzhiyun 	 */
1142*4882a593Smuzhiyun 	_ep_nuke(ci->ep0out);
1143*4882a593Smuzhiyun 	_ep_nuke(ci->ep0in);
1144*4882a593Smuzhiyun 
1145*4882a593Smuzhiyun 	/* read_setup_packet */
1146*4882a593Smuzhiyun 	do {
1147*4882a593Smuzhiyun 		hw_test_and_set_setup_guard(ci);
1148*4882a593Smuzhiyun 		memcpy(&req, &hwep->qh.ptr->setup, sizeof(req));
1149*4882a593Smuzhiyun 	} while (!hw_test_and_clear_setup_guard(ci));
1150*4882a593Smuzhiyun 
1151*4882a593Smuzhiyun 	type = req.bRequestType;
1152*4882a593Smuzhiyun 
1153*4882a593Smuzhiyun 	ci->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
1154*4882a593Smuzhiyun 
1155*4882a593Smuzhiyun 	switch (req.bRequest) {
1156*4882a593Smuzhiyun 	case USB_REQ_CLEAR_FEATURE:
1157*4882a593Smuzhiyun 		if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
1158*4882a593Smuzhiyun 				le16_to_cpu(req.wValue) ==
1159*4882a593Smuzhiyun 				USB_ENDPOINT_HALT) {
1160*4882a593Smuzhiyun 			if (req.wLength != 0)
1161*4882a593Smuzhiyun 				break;
1162*4882a593Smuzhiyun 			num  = le16_to_cpu(req.wIndex);
1163*4882a593Smuzhiyun 			dir = (num & USB_ENDPOINT_DIR_MASK) ? TX : RX;
1164*4882a593Smuzhiyun 			num &= USB_ENDPOINT_NUMBER_MASK;
1165*4882a593Smuzhiyun 			if (dir == TX)
1166*4882a593Smuzhiyun 				num += ci->hw_ep_max / 2;
1167*4882a593Smuzhiyun 			if (!ci->ci_hw_ep[num].wedge) {
1168*4882a593Smuzhiyun 				spin_unlock(&ci->lock);
1169*4882a593Smuzhiyun 				err = usb_ep_clear_halt(
1170*4882a593Smuzhiyun 					&ci->ci_hw_ep[num].ep);
1171*4882a593Smuzhiyun 				spin_lock(&ci->lock);
1172*4882a593Smuzhiyun 				if (err)
1173*4882a593Smuzhiyun 					break;
1174*4882a593Smuzhiyun 			}
1175*4882a593Smuzhiyun 			err = isr_setup_status_phase(ci);
1176*4882a593Smuzhiyun 		} else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE) &&
1177*4882a593Smuzhiyun 				le16_to_cpu(req.wValue) ==
1178*4882a593Smuzhiyun 				USB_DEVICE_REMOTE_WAKEUP) {
1179*4882a593Smuzhiyun 			if (req.wLength != 0)
1180*4882a593Smuzhiyun 				break;
1181*4882a593Smuzhiyun 			ci->remote_wakeup = 0;
1182*4882a593Smuzhiyun 			err = isr_setup_status_phase(ci);
1183*4882a593Smuzhiyun 		} else {
1184*4882a593Smuzhiyun 			goto delegate;
1185*4882a593Smuzhiyun 		}
1186*4882a593Smuzhiyun 		break;
1187*4882a593Smuzhiyun 	case USB_REQ_GET_STATUS:
1188*4882a593Smuzhiyun 		if ((type != (USB_DIR_IN|USB_RECIP_DEVICE) ||
1189*4882a593Smuzhiyun 			le16_to_cpu(req.wIndex) == OTG_STS_SELECTOR) &&
1190*4882a593Smuzhiyun 		    type != (USB_DIR_IN|USB_RECIP_ENDPOINT) &&
1191*4882a593Smuzhiyun 		    type != (USB_DIR_IN|USB_RECIP_INTERFACE))
1192*4882a593Smuzhiyun 			goto delegate;
1193*4882a593Smuzhiyun 		if (le16_to_cpu(req.wLength) != 2 ||
1194*4882a593Smuzhiyun 		    le16_to_cpu(req.wValue)  != 0)
1195*4882a593Smuzhiyun 			break;
1196*4882a593Smuzhiyun 		err = isr_get_status_response(ci, &req);
1197*4882a593Smuzhiyun 		break;
1198*4882a593Smuzhiyun 	case USB_REQ_SET_ADDRESS:
1199*4882a593Smuzhiyun 		if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
1200*4882a593Smuzhiyun 			goto delegate;
1201*4882a593Smuzhiyun 		if (le16_to_cpu(req.wLength) != 0 ||
1202*4882a593Smuzhiyun 		    le16_to_cpu(req.wIndex)  != 0)
1203*4882a593Smuzhiyun 			break;
1204*4882a593Smuzhiyun 		ci->address = (u8)le16_to_cpu(req.wValue);
1205*4882a593Smuzhiyun 		ci->setaddr = true;
1206*4882a593Smuzhiyun 		err = isr_setup_status_phase(ci);
1207*4882a593Smuzhiyun 		break;
1208*4882a593Smuzhiyun 	case USB_REQ_SET_FEATURE:
1209*4882a593Smuzhiyun 		if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
1210*4882a593Smuzhiyun 				le16_to_cpu(req.wValue) ==
1211*4882a593Smuzhiyun 				USB_ENDPOINT_HALT) {
1212*4882a593Smuzhiyun 			if (req.wLength != 0)
1213*4882a593Smuzhiyun 				break;
1214*4882a593Smuzhiyun 			num  = le16_to_cpu(req.wIndex);
1215*4882a593Smuzhiyun 			dir = (num & USB_ENDPOINT_DIR_MASK) ? TX : RX;
1216*4882a593Smuzhiyun 			num &= USB_ENDPOINT_NUMBER_MASK;
1217*4882a593Smuzhiyun 			if (dir == TX)
1218*4882a593Smuzhiyun 				num += ci->hw_ep_max / 2;
1219*4882a593Smuzhiyun 
1220*4882a593Smuzhiyun 			spin_unlock(&ci->lock);
1221*4882a593Smuzhiyun 			err = _ep_set_halt(&ci->ci_hw_ep[num].ep, 1, false);
1222*4882a593Smuzhiyun 			spin_lock(&ci->lock);
1223*4882a593Smuzhiyun 			if (!err)
1224*4882a593Smuzhiyun 				isr_setup_status_phase(ci);
1225*4882a593Smuzhiyun 		} else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE)) {
1226*4882a593Smuzhiyun 			if (req.wLength != 0)
1227*4882a593Smuzhiyun 				break;
1228*4882a593Smuzhiyun 			switch (le16_to_cpu(req.wValue)) {
1229*4882a593Smuzhiyun 			case USB_DEVICE_REMOTE_WAKEUP:
1230*4882a593Smuzhiyun 				ci->remote_wakeup = 1;
1231*4882a593Smuzhiyun 				err = isr_setup_status_phase(ci);
1232*4882a593Smuzhiyun 				break;
1233*4882a593Smuzhiyun 			case USB_DEVICE_TEST_MODE:
1234*4882a593Smuzhiyun 				tmode = le16_to_cpu(req.wIndex) >> 8;
1235*4882a593Smuzhiyun 				switch (tmode) {
1236*4882a593Smuzhiyun 				case USB_TEST_J:
1237*4882a593Smuzhiyun 				case USB_TEST_K:
1238*4882a593Smuzhiyun 				case USB_TEST_SE0_NAK:
1239*4882a593Smuzhiyun 				case USB_TEST_PACKET:
1240*4882a593Smuzhiyun 				case USB_TEST_FORCE_ENABLE:
1241*4882a593Smuzhiyun 					ci->test_mode = tmode;
1242*4882a593Smuzhiyun 					err = isr_setup_status_phase(
1243*4882a593Smuzhiyun 							ci);
1244*4882a593Smuzhiyun 					break;
1245*4882a593Smuzhiyun 				default:
1246*4882a593Smuzhiyun 					break;
1247*4882a593Smuzhiyun 				}
1248*4882a593Smuzhiyun 				break;
1249*4882a593Smuzhiyun 			case USB_DEVICE_B_HNP_ENABLE:
1250*4882a593Smuzhiyun 				if (ci_otg_is_fsm_mode(ci)) {
1251*4882a593Smuzhiyun 					ci->gadget.b_hnp_enable = 1;
1252*4882a593Smuzhiyun 					err = isr_setup_status_phase(
1253*4882a593Smuzhiyun 							ci);
1254*4882a593Smuzhiyun 				}
1255*4882a593Smuzhiyun 				break;
1256*4882a593Smuzhiyun 			case USB_DEVICE_A_ALT_HNP_SUPPORT:
1257*4882a593Smuzhiyun 				if (ci_otg_is_fsm_mode(ci))
1258*4882a593Smuzhiyun 					err = otg_a_alt_hnp_support(ci);
1259*4882a593Smuzhiyun 				break;
1260*4882a593Smuzhiyun 			case USB_DEVICE_A_HNP_SUPPORT:
1261*4882a593Smuzhiyun 				if (ci_otg_is_fsm_mode(ci)) {
1262*4882a593Smuzhiyun 					ci->gadget.a_hnp_support = 1;
1263*4882a593Smuzhiyun 					err = isr_setup_status_phase(
1264*4882a593Smuzhiyun 							ci);
1265*4882a593Smuzhiyun 				}
1266*4882a593Smuzhiyun 				break;
1267*4882a593Smuzhiyun 			default:
1268*4882a593Smuzhiyun 				goto delegate;
1269*4882a593Smuzhiyun 			}
1270*4882a593Smuzhiyun 		} else {
1271*4882a593Smuzhiyun 			goto delegate;
1272*4882a593Smuzhiyun 		}
1273*4882a593Smuzhiyun 		break;
1274*4882a593Smuzhiyun 	default:
1275*4882a593Smuzhiyun delegate:
1276*4882a593Smuzhiyun 		if (req.wLength == 0)   /* no data phase */
1277*4882a593Smuzhiyun 			ci->ep0_dir = TX;
1278*4882a593Smuzhiyun 
1279*4882a593Smuzhiyun 		spin_unlock(&ci->lock);
1280*4882a593Smuzhiyun 		err = ci->driver->setup(&ci->gadget, &req);
1281*4882a593Smuzhiyun 		spin_lock(&ci->lock);
1282*4882a593Smuzhiyun 		break;
1283*4882a593Smuzhiyun 	}
1284*4882a593Smuzhiyun 
1285*4882a593Smuzhiyun 	if (err < 0) {
1286*4882a593Smuzhiyun 		spin_unlock(&ci->lock);
1287*4882a593Smuzhiyun 		if (_ep_set_halt(&hwep->ep, 1, false))
1288*4882a593Smuzhiyun 			dev_err(ci->dev, "error: _ep_set_halt\n");
1289*4882a593Smuzhiyun 		spin_lock(&ci->lock);
1290*4882a593Smuzhiyun 	}
1291*4882a593Smuzhiyun }
1292*4882a593Smuzhiyun 
1293*4882a593Smuzhiyun /**
1294*4882a593Smuzhiyun  * isr_tr_complete_handler: transaction complete interrupt handler
1295*4882a593Smuzhiyun  * @ci: UDC descriptor
1296*4882a593Smuzhiyun  *
1297*4882a593Smuzhiyun  * This function handles traffic events
1298*4882a593Smuzhiyun  */
isr_tr_complete_handler(struct ci_hdrc * ci)1299*4882a593Smuzhiyun static void isr_tr_complete_handler(struct ci_hdrc *ci)
1300*4882a593Smuzhiyun __releases(ci->lock)
1301*4882a593Smuzhiyun __acquires(ci->lock)
1302*4882a593Smuzhiyun {
1303*4882a593Smuzhiyun 	unsigned i;
1304*4882a593Smuzhiyun 	int err;
1305*4882a593Smuzhiyun 
1306*4882a593Smuzhiyun 	for (i = 0; i < ci->hw_ep_max; i++) {
1307*4882a593Smuzhiyun 		struct ci_hw_ep *hwep  = &ci->ci_hw_ep[i];
1308*4882a593Smuzhiyun 
1309*4882a593Smuzhiyun 		if (hwep->ep.desc == NULL)
1310*4882a593Smuzhiyun 			continue;   /* not configured */
1311*4882a593Smuzhiyun 
1312*4882a593Smuzhiyun 		if (hw_test_and_clear_complete(ci, i)) {
1313*4882a593Smuzhiyun 			err = isr_tr_complete_low(hwep);
1314*4882a593Smuzhiyun 			if (hwep->type == USB_ENDPOINT_XFER_CONTROL) {
1315*4882a593Smuzhiyun 				if (err > 0)   /* needs status phase */
1316*4882a593Smuzhiyun 					err = isr_setup_status_phase(ci);
1317*4882a593Smuzhiyun 				if (err < 0) {
1318*4882a593Smuzhiyun 					spin_unlock(&ci->lock);
1319*4882a593Smuzhiyun 					if (_ep_set_halt(&hwep->ep, 1, false))
1320*4882a593Smuzhiyun 						dev_err(ci->dev,
1321*4882a593Smuzhiyun 						"error: _ep_set_halt\n");
1322*4882a593Smuzhiyun 					spin_lock(&ci->lock);
1323*4882a593Smuzhiyun 				}
1324*4882a593Smuzhiyun 			}
1325*4882a593Smuzhiyun 		}
1326*4882a593Smuzhiyun 
1327*4882a593Smuzhiyun 		/* Only handle setup packet below */
1328*4882a593Smuzhiyun 		if (i == 0 &&
1329*4882a593Smuzhiyun 			hw_test_and_clear(ci, OP_ENDPTSETUPSTAT, BIT(0)))
1330*4882a593Smuzhiyun 			isr_setup_packet_handler(ci);
1331*4882a593Smuzhiyun 	}
1332*4882a593Smuzhiyun }
1333*4882a593Smuzhiyun 
1334*4882a593Smuzhiyun /******************************************************************************
1335*4882a593Smuzhiyun  * ENDPT block
1336*4882a593Smuzhiyun  *****************************************************************************/
1337*4882a593Smuzhiyun /*
1338*4882a593Smuzhiyun  * ep_enable: configure endpoint, making it usable
1339*4882a593Smuzhiyun  *
1340*4882a593Smuzhiyun  * Check usb_ep_enable() at "usb_gadget.h" for details
1341*4882a593Smuzhiyun  */
ep_enable(struct usb_ep * ep,const struct usb_endpoint_descriptor * desc)1342*4882a593Smuzhiyun static int ep_enable(struct usb_ep *ep,
1343*4882a593Smuzhiyun 		     const struct usb_endpoint_descriptor *desc)
1344*4882a593Smuzhiyun {
1345*4882a593Smuzhiyun 	struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1346*4882a593Smuzhiyun 	int retval = 0;
1347*4882a593Smuzhiyun 	unsigned long flags;
1348*4882a593Smuzhiyun 	u32 cap = 0;
1349*4882a593Smuzhiyun 
1350*4882a593Smuzhiyun 	if (ep == NULL || desc == NULL)
1351*4882a593Smuzhiyun 		return -EINVAL;
1352*4882a593Smuzhiyun 
1353*4882a593Smuzhiyun 	spin_lock_irqsave(hwep->lock, flags);
1354*4882a593Smuzhiyun 
1355*4882a593Smuzhiyun 	/* only internal SW should enable ctrl endpts */
1356*4882a593Smuzhiyun 
1357*4882a593Smuzhiyun 	if (!list_empty(&hwep->qh.queue)) {
1358*4882a593Smuzhiyun 		dev_warn(hwep->ci->dev, "enabling a non-empty endpoint!\n");
1359*4882a593Smuzhiyun 		spin_unlock_irqrestore(hwep->lock, flags);
1360*4882a593Smuzhiyun 		return -EBUSY;
1361*4882a593Smuzhiyun 	}
1362*4882a593Smuzhiyun 
1363*4882a593Smuzhiyun 	hwep->ep.desc = desc;
1364*4882a593Smuzhiyun 
1365*4882a593Smuzhiyun 	hwep->dir  = usb_endpoint_dir_in(desc) ? TX : RX;
1366*4882a593Smuzhiyun 	hwep->num  = usb_endpoint_num(desc);
1367*4882a593Smuzhiyun 	hwep->type = usb_endpoint_type(desc);
1368*4882a593Smuzhiyun 
1369*4882a593Smuzhiyun 	hwep->ep.maxpacket = usb_endpoint_maxp(desc);
1370*4882a593Smuzhiyun 	hwep->ep.mult = usb_endpoint_maxp_mult(desc);
1371*4882a593Smuzhiyun 
1372*4882a593Smuzhiyun 	if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
1373*4882a593Smuzhiyun 		cap |= QH_IOS;
1374*4882a593Smuzhiyun 
1375*4882a593Smuzhiyun 	cap |= QH_ZLT;
1376*4882a593Smuzhiyun 	cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT;
1377*4882a593Smuzhiyun 	/*
1378*4882a593Smuzhiyun 	 * For ISO-TX, we set mult at QH as the largest value, and use
1379*4882a593Smuzhiyun 	 * MultO at TD as real mult value.
1380*4882a593Smuzhiyun 	 */
1381*4882a593Smuzhiyun 	if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == TX)
1382*4882a593Smuzhiyun 		cap |= 3 << __ffs(QH_MULT);
1383*4882a593Smuzhiyun 
1384*4882a593Smuzhiyun 	hwep->qh.ptr->cap = cpu_to_le32(cap);
1385*4882a593Smuzhiyun 
1386*4882a593Smuzhiyun 	hwep->qh.ptr->td.next |= cpu_to_le32(TD_TERMINATE);   /* needed? */
1387*4882a593Smuzhiyun 
1388*4882a593Smuzhiyun 	if (hwep->num != 0 && hwep->type == USB_ENDPOINT_XFER_CONTROL) {
1389*4882a593Smuzhiyun 		dev_err(hwep->ci->dev, "Set control xfer at non-ep0\n");
1390*4882a593Smuzhiyun 		retval = -EINVAL;
1391*4882a593Smuzhiyun 	}
1392*4882a593Smuzhiyun 
1393*4882a593Smuzhiyun 	/*
1394*4882a593Smuzhiyun 	 * Enable endpoints in the HW other than ep0 as ep0
1395*4882a593Smuzhiyun 	 * is always enabled
1396*4882a593Smuzhiyun 	 */
1397*4882a593Smuzhiyun 	if (hwep->num)
1398*4882a593Smuzhiyun 		retval |= hw_ep_enable(hwep->ci, hwep->num, hwep->dir,
1399*4882a593Smuzhiyun 				       hwep->type);
1400*4882a593Smuzhiyun 
1401*4882a593Smuzhiyun 	spin_unlock_irqrestore(hwep->lock, flags);
1402*4882a593Smuzhiyun 	return retval;
1403*4882a593Smuzhiyun }
1404*4882a593Smuzhiyun 
1405*4882a593Smuzhiyun /*
1406*4882a593Smuzhiyun  * ep_disable: endpoint is no longer usable
1407*4882a593Smuzhiyun  *
1408*4882a593Smuzhiyun  * Check usb_ep_disable() at "usb_gadget.h" for details
1409*4882a593Smuzhiyun  */
ep_disable(struct usb_ep * ep)1410*4882a593Smuzhiyun static int ep_disable(struct usb_ep *ep)
1411*4882a593Smuzhiyun {
1412*4882a593Smuzhiyun 	struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1413*4882a593Smuzhiyun 	int direction, retval = 0;
1414*4882a593Smuzhiyun 	unsigned long flags;
1415*4882a593Smuzhiyun 
1416*4882a593Smuzhiyun 	if (ep == NULL)
1417*4882a593Smuzhiyun 		return -EINVAL;
1418*4882a593Smuzhiyun 	else if (hwep->ep.desc == NULL)
1419*4882a593Smuzhiyun 		return -EBUSY;
1420*4882a593Smuzhiyun 
1421*4882a593Smuzhiyun 	spin_lock_irqsave(hwep->lock, flags);
1422*4882a593Smuzhiyun 	if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
1423*4882a593Smuzhiyun 		spin_unlock_irqrestore(hwep->lock, flags);
1424*4882a593Smuzhiyun 		return 0;
1425*4882a593Smuzhiyun 	}
1426*4882a593Smuzhiyun 
1427*4882a593Smuzhiyun 	/* only internal SW should disable ctrl endpts */
1428*4882a593Smuzhiyun 
1429*4882a593Smuzhiyun 	direction = hwep->dir;
1430*4882a593Smuzhiyun 	do {
1431*4882a593Smuzhiyun 		retval |= _ep_nuke(hwep);
1432*4882a593Smuzhiyun 		retval |= hw_ep_disable(hwep->ci, hwep->num, hwep->dir);
1433*4882a593Smuzhiyun 
1434*4882a593Smuzhiyun 		if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
1435*4882a593Smuzhiyun 			hwep->dir = (hwep->dir == TX) ? RX : TX;
1436*4882a593Smuzhiyun 
1437*4882a593Smuzhiyun 	} while (hwep->dir != direction);
1438*4882a593Smuzhiyun 
1439*4882a593Smuzhiyun 	hwep->ep.desc = NULL;
1440*4882a593Smuzhiyun 
1441*4882a593Smuzhiyun 	spin_unlock_irqrestore(hwep->lock, flags);
1442*4882a593Smuzhiyun 	return retval;
1443*4882a593Smuzhiyun }
1444*4882a593Smuzhiyun 
1445*4882a593Smuzhiyun /*
1446*4882a593Smuzhiyun  * ep_alloc_request: allocate a request object to use with this endpoint
1447*4882a593Smuzhiyun  *
1448*4882a593Smuzhiyun  * Check usb_ep_alloc_request() at "usb_gadget.h" for details
1449*4882a593Smuzhiyun  */
ep_alloc_request(struct usb_ep * ep,gfp_t gfp_flags)1450*4882a593Smuzhiyun static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1451*4882a593Smuzhiyun {
1452*4882a593Smuzhiyun 	struct ci_hw_req *hwreq = NULL;
1453*4882a593Smuzhiyun 
1454*4882a593Smuzhiyun 	if (ep == NULL)
1455*4882a593Smuzhiyun 		return NULL;
1456*4882a593Smuzhiyun 
1457*4882a593Smuzhiyun 	hwreq = kzalloc(sizeof(struct ci_hw_req), gfp_flags);
1458*4882a593Smuzhiyun 	if (hwreq != NULL) {
1459*4882a593Smuzhiyun 		INIT_LIST_HEAD(&hwreq->queue);
1460*4882a593Smuzhiyun 		INIT_LIST_HEAD(&hwreq->tds);
1461*4882a593Smuzhiyun 	}
1462*4882a593Smuzhiyun 
1463*4882a593Smuzhiyun 	return (hwreq == NULL) ? NULL : &hwreq->req;
1464*4882a593Smuzhiyun }
1465*4882a593Smuzhiyun 
1466*4882a593Smuzhiyun /*
1467*4882a593Smuzhiyun  * ep_free_request: frees a request object
1468*4882a593Smuzhiyun  *
1469*4882a593Smuzhiyun  * Check usb_ep_free_request() at "usb_gadget.h" for details
1470*4882a593Smuzhiyun  */
ep_free_request(struct usb_ep * ep,struct usb_request * req)1471*4882a593Smuzhiyun static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
1472*4882a593Smuzhiyun {
1473*4882a593Smuzhiyun 	struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
1474*4882a593Smuzhiyun 	struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
1475*4882a593Smuzhiyun 	struct td_node *node, *tmpnode;
1476*4882a593Smuzhiyun 	unsigned long flags;
1477*4882a593Smuzhiyun 
1478*4882a593Smuzhiyun 	if (ep == NULL || req == NULL) {
1479*4882a593Smuzhiyun 		return;
1480*4882a593Smuzhiyun 	} else if (!list_empty(&hwreq->queue)) {
1481*4882a593Smuzhiyun 		dev_err(hwep->ci->dev, "freeing queued request\n");
1482*4882a593Smuzhiyun 		return;
1483*4882a593Smuzhiyun 	}
1484*4882a593Smuzhiyun 
1485*4882a593Smuzhiyun 	spin_lock_irqsave(hwep->lock, flags);
1486*4882a593Smuzhiyun 
1487*4882a593Smuzhiyun 	list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
1488*4882a593Smuzhiyun 		dma_pool_free(hwep->td_pool, node->ptr, node->dma);
1489*4882a593Smuzhiyun 		list_del_init(&node->td);
1490*4882a593Smuzhiyun 		node->ptr = NULL;
1491*4882a593Smuzhiyun 		kfree(node);
1492*4882a593Smuzhiyun 	}
1493*4882a593Smuzhiyun 
1494*4882a593Smuzhiyun 	kfree(hwreq);
1495*4882a593Smuzhiyun 
1496*4882a593Smuzhiyun 	spin_unlock_irqrestore(hwep->lock, flags);
1497*4882a593Smuzhiyun }
1498*4882a593Smuzhiyun 
1499*4882a593Smuzhiyun /*
1500*4882a593Smuzhiyun  * ep_queue: queues (submits) an I/O request to an endpoint
1501*4882a593Smuzhiyun  *
1502*4882a593Smuzhiyun  * Check usb_ep_queue()* at usb_gadget.h" for details
1503*4882a593Smuzhiyun  */
ep_queue(struct usb_ep * ep,struct usb_request * req,gfp_t __maybe_unused gfp_flags)1504*4882a593Smuzhiyun static int ep_queue(struct usb_ep *ep, struct usb_request *req,
1505*4882a593Smuzhiyun 		    gfp_t __maybe_unused gfp_flags)
1506*4882a593Smuzhiyun {
1507*4882a593Smuzhiyun 	struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
1508*4882a593Smuzhiyun 	int retval = 0;
1509*4882a593Smuzhiyun 	unsigned long flags;
1510*4882a593Smuzhiyun 
1511*4882a593Smuzhiyun 	if (ep == NULL || req == NULL || hwep->ep.desc == NULL)
1512*4882a593Smuzhiyun 		return -EINVAL;
1513*4882a593Smuzhiyun 
1514*4882a593Smuzhiyun 	spin_lock_irqsave(hwep->lock, flags);
1515*4882a593Smuzhiyun 	if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
1516*4882a593Smuzhiyun 		spin_unlock_irqrestore(hwep->lock, flags);
1517*4882a593Smuzhiyun 		return 0;
1518*4882a593Smuzhiyun 	}
1519*4882a593Smuzhiyun 	retval = _ep_queue(ep, req, gfp_flags);
1520*4882a593Smuzhiyun 	spin_unlock_irqrestore(hwep->lock, flags);
1521*4882a593Smuzhiyun 	return retval;
1522*4882a593Smuzhiyun }
1523*4882a593Smuzhiyun 
1524*4882a593Smuzhiyun /*
1525*4882a593Smuzhiyun  * ep_dequeue: dequeues (cancels, unlinks) an I/O request from an endpoint
1526*4882a593Smuzhiyun  *
1527*4882a593Smuzhiyun  * Check usb_ep_dequeue() at "usb_gadget.h" for details
1528*4882a593Smuzhiyun  */
ep_dequeue(struct usb_ep * ep,struct usb_request * req)1529*4882a593Smuzhiyun static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
1530*4882a593Smuzhiyun {
1531*4882a593Smuzhiyun 	struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
1532*4882a593Smuzhiyun 	struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
1533*4882a593Smuzhiyun 	unsigned long flags;
1534*4882a593Smuzhiyun 	struct td_node *node, *tmpnode;
1535*4882a593Smuzhiyun 
1536*4882a593Smuzhiyun 	if (ep == NULL || req == NULL || hwreq->req.status != -EALREADY ||
1537*4882a593Smuzhiyun 		hwep->ep.desc == NULL || list_empty(&hwreq->queue) ||
1538*4882a593Smuzhiyun 		list_empty(&hwep->qh.queue))
1539*4882a593Smuzhiyun 		return -EINVAL;
1540*4882a593Smuzhiyun 
1541*4882a593Smuzhiyun 	spin_lock_irqsave(hwep->lock, flags);
1542*4882a593Smuzhiyun 	if (hwep->ci->gadget.speed != USB_SPEED_UNKNOWN)
1543*4882a593Smuzhiyun 		hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1544*4882a593Smuzhiyun 
1545*4882a593Smuzhiyun 	list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
1546*4882a593Smuzhiyun 		dma_pool_free(hwep->td_pool, node->ptr, node->dma);
1547*4882a593Smuzhiyun 		list_del(&node->td);
1548*4882a593Smuzhiyun 		kfree(node);
1549*4882a593Smuzhiyun 	}
1550*4882a593Smuzhiyun 
1551*4882a593Smuzhiyun 	/* pop request */
1552*4882a593Smuzhiyun 	list_del_init(&hwreq->queue);
1553*4882a593Smuzhiyun 
1554*4882a593Smuzhiyun 	usb_gadget_unmap_request(&hwep->ci->gadget, req, hwep->dir);
1555*4882a593Smuzhiyun 
1556*4882a593Smuzhiyun 	req->status = -ECONNRESET;
1557*4882a593Smuzhiyun 
1558*4882a593Smuzhiyun 	if (hwreq->req.complete != NULL) {
1559*4882a593Smuzhiyun 		spin_unlock(hwep->lock);
1560*4882a593Smuzhiyun 		usb_gadget_giveback_request(&hwep->ep, &hwreq->req);
1561*4882a593Smuzhiyun 		spin_lock(hwep->lock);
1562*4882a593Smuzhiyun 	}
1563*4882a593Smuzhiyun 
1564*4882a593Smuzhiyun 	spin_unlock_irqrestore(hwep->lock, flags);
1565*4882a593Smuzhiyun 	return 0;
1566*4882a593Smuzhiyun }
1567*4882a593Smuzhiyun 
1568*4882a593Smuzhiyun /*
1569*4882a593Smuzhiyun  * ep_set_halt: sets the endpoint halt feature
1570*4882a593Smuzhiyun  *
1571*4882a593Smuzhiyun  * Check usb_ep_set_halt() at "usb_gadget.h" for details
1572*4882a593Smuzhiyun  */
ep_set_halt(struct usb_ep * ep,int value)1573*4882a593Smuzhiyun static int ep_set_halt(struct usb_ep *ep, int value)
1574*4882a593Smuzhiyun {
1575*4882a593Smuzhiyun 	return _ep_set_halt(ep, value, true);
1576*4882a593Smuzhiyun }
1577*4882a593Smuzhiyun 
1578*4882a593Smuzhiyun /*
1579*4882a593Smuzhiyun  * ep_set_wedge: sets the halt feature and ignores clear requests
1580*4882a593Smuzhiyun  *
1581*4882a593Smuzhiyun  * Check usb_ep_set_wedge() at "usb_gadget.h" for details
1582*4882a593Smuzhiyun  */
ep_set_wedge(struct usb_ep * ep)1583*4882a593Smuzhiyun static int ep_set_wedge(struct usb_ep *ep)
1584*4882a593Smuzhiyun {
1585*4882a593Smuzhiyun 	struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1586*4882a593Smuzhiyun 	unsigned long flags;
1587*4882a593Smuzhiyun 
1588*4882a593Smuzhiyun 	if (ep == NULL || hwep->ep.desc == NULL)
1589*4882a593Smuzhiyun 		return -EINVAL;
1590*4882a593Smuzhiyun 
1591*4882a593Smuzhiyun 	spin_lock_irqsave(hwep->lock, flags);
1592*4882a593Smuzhiyun 	hwep->wedge = 1;
1593*4882a593Smuzhiyun 	spin_unlock_irqrestore(hwep->lock, flags);
1594*4882a593Smuzhiyun 
1595*4882a593Smuzhiyun 	return usb_ep_set_halt(ep);
1596*4882a593Smuzhiyun }
1597*4882a593Smuzhiyun 
1598*4882a593Smuzhiyun /*
1599*4882a593Smuzhiyun  * ep_fifo_flush: flushes contents of a fifo
1600*4882a593Smuzhiyun  *
1601*4882a593Smuzhiyun  * Check usb_ep_fifo_flush() at "usb_gadget.h" for details
1602*4882a593Smuzhiyun  */
ep_fifo_flush(struct usb_ep * ep)1603*4882a593Smuzhiyun static void ep_fifo_flush(struct usb_ep *ep)
1604*4882a593Smuzhiyun {
1605*4882a593Smuzhiyun 	struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1606*4882a593Smuzhiyun 	unsigned long flags;
1607*4882a593Smuzhiyun 
1608*4882a593Smuzhiyun 	if (ep == NULL) {
1609*4882a593Smuzhiyun 		dev_err(hwep->ci->dev, "%02X: -EINVAL\n", _usb_addr(hwep));
1610*4882a593Smuzhiyun 		return;
1611*4882a593Smuzhiyun 	}
1612*4882a593Smuzhiyun 
1613*4882a593Smuzhiyun 	spin_lock_irqsave(hwep->lock, flags);
1614*4882a593Smuzhiyun 	if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
1615*4882a593Smuzhiyun 		spin_unlock_irqrestore(hwep->lock, flags);
1616*4882a593Smuzhiyun 		return;
1617*4882a593Smuzhiyun 	}
1618*4882a593Smuzhiyun 
1619*4882a593Smuzhiyun 	hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1620*4882a593Smuzhiyun 
1621*4882a593Smuzhiyun 	spin_unlock_irqrestore(hwep->lock, flags);
1622*4882a593Smuzhiyun }
1623*4882a593Smuzhiyun 
1624*4882a593Smuzhiyun /*
1625*4882a593Smuzhiyun  * Endpoint-specific part of the API to the USB controller hardware
1626*4882a593Smuzhiyun  * Check "usb_gadget.h" for details
1627*4882a593Smuzhiyun  */
1628*4882a593Smuzhiyun static const struct usb_ep_ops usb_ep_ops = {
1629*4882a593Smuzhiyun 	.enable	       = ep_enable,
1630*4882a593Smuzhiyun 	.disable       = ep_disable,
1631*4882a593Smuzhiyun 	.alloc_request = ep_alloc_request,
1632*4882a593Smuzhiyun 	.free_request  = ep_free_request,
1633*4882a593Smuzhiyun 	.queue	       = ep_queue,
1634*4882a593Smuzhiyun 	.dequeue       = ep_dequeue,
1635*4882a593Smuzhiyun 	.set_halt      = ep_set_halt,
1636*4882a593Smuzhiyun 	.set_wedge     = ep_set_wedge,
1637*4882a593Smuzhiyun 	.fifo_flush    = ep_fifo_flush,
1638*4882a593Smuzhiyun };
1639*4882a593Smuzhiyun 
1640*4882a593Smuzhiyun /******************************************************************************
1641*4882a593Smuzhiyun  * GADGET block
1642*4882a593Smuzhiyun  *****************************************************************************/
1643*4882a593Smuzhiyun /*
1644*4882a593Smuzhiyun  * ci_hdrc_gadget_connect: caller makes sure gadget driver is binded
1645*4882a593Smuzhiyun  */
ci_hdrc_gadget_connect(struct usb_gadget * _gadget,int is_active)1646*4882a593Smuzhiyun static void ci_hdrc_gadget_connect(struct usb_gadget *_gadget, int is_active)
1647*4882a593Smuzhiyun {
1648*4882a593Smuzhiyun 	struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1649*4882a593Smuzhiyun 
1650*4882a593Smuzhiyun 	if (is_active) {
1651*4882a593Smuzhiyun 		pm_runtime_get_sync(ci->dev);
1652*4882a593Smuzhiyun 		hw_device_reset(ci);
1653*4882a593Smuzhiyun 		spin_lock_irq(&ci->lock);
1654*4882a593Smuzhiyun 		if (ci->driver) {
1655*4882a593Smuzhiyun 			hw_device_state(ci, ci->ep0out->qh.dma);
1656*4882a593Smuzhiyun 			usb_gadget_set_state(_gadget, USB_STATE_POWERED);
1657*4882a593Smuzhiyun 			spin_unlock_irq(&ci->lock);
1658*4882a593Smuzhiyun 			usb_udc_vbus_handler(_gadget, true);
1659*4882a593Smuzhiyun 		} else {
1660*4882a593Smuzhiyun 			spin_unlock_irq(&ci->lock);
1661*4882a593Smuzhiyun 		}
1662*4882a593Smuzhiyun 	} else {
1663*4882a593Smuzhiyun 		usb_udc_vbus_handler(_gadget, false);
1664*4882a593Smuzhiyun 		if (ci->driver)
1665*4882a593Smuzhiyun 			ci->driver->disconnect(&ci->gadget);
1666*4882a593Smuzhiyun 		hw_device_state(ci, 0);
1667*4882a593Smuzhiyun 		if (ci->platdata->notify_event)
1668*4882a593Smuzhiyun 			ci->platdata->notify_event(ci,
1669*4882a593Smuzhiyun 			CI_HDRC_CONTROLLER_STOPPED_EVENT);
1670*4882a593Smuzhiyun 		_gadget_stop_activity(&ci->gadget);
1671*4882a593Smuzhiyun 		pm_runtime_put_sync(ci->dev);
1672*4882a593Smuzhiyun 		usb_gadget_set_state(_gadget, USB_STATE_NOTATTACHED);
1673*4882a593Smuzhiyun 	}
1674*4882a593Smuzhiyun }
1675*4882a593Smuzhiyun 
ci_udc_vbus_session(struct usb_gadget * _gadget,int is_active)1676*4882a593Smuzhiyun static int ci_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
1677*4882a593Smuzhiyun {
1678*4882a593Smuzhiyun 	struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1679*4882a593Smuzhiyun 	unsigned long flags;
1680*4882a593Smuzhiyun 	int ret = 0;
1681*4882a593Smuzhiyun 
1682*4882a593Smuzhiyun 	spin_lock_irqsave(&ci->lock, flags);
1683*4882a593Smuzhiyun 	ci->vbus_active = is_active;
1684*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ci->lock, flags);
1685*4882a593Smuzhiyun 
1686*4882a593Smuzhiyun 	if (ci->usb_phy)
1687*4882a593Smuzhiyun 		usb_phy_set_charger_state(ci->usb_phy, is_active ?
1688*4882a593Smuzhiyun 			USB_CHARGER_PRESENT : USB_CHARGER_ABSENT);
1689*4882a593Smuzhiyun 
1690*4882a593Smuzhiyun 	if (ci->platdata->notify_event)
1691*4882a593Smuzhiyun 		ret = ci->platdata->notify_event(ci,
1692*4882a593Smuzhiyun 				CI_HDRC_CONTROLLER_VBUS_EVENT);
1693*4882a593Smuzhiyun 
1694*4882a593Smuzhiyun 	if (ci->driver)
1695*4882a593Smuzhiyun 		ci_hdrc_gadget_connect(_gadget, is_active);
1696*4882a593Smuzhiyun 
1697*4882a593Smuzhiyun 	return ret;
1698*4882a593Smuzhiyun }
1699*4882a593Smuzhiyun 
ci_udc_wakeup(struct usb_gadget * _gadget)1700*4882a593Smuzhiyun static int ci_udc_wakeup(struct usb_gadget *_gadget)
1701*4882a593Smuzhiyun {
1702*4882a593Smuzhiyun 	struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1703*4882a593Smuzhiyun 	unsigned long flags;
1704*4882a593Smuzhiyun 	int ret = 0;
1705*4882a593Smuzhiyun 
1706*4882a593Smuzhiyun 	spin_lock_irqsave(&ci->lock, flags);
1707*4882a593Smuzhiyun 	if (ci->gadget.speed == USB_SPEED_UNKNOWN) {
1708*4882a593Smuzhiyun 		spin_unlock_irqrestore(&ci->lock, flags);
1709*4882a593Smuzhiyun 		return 0;
1710*4882a593Smuzhiyun 	}
1711*4882a593Smuzhiyun 	if (!ci->remote_wakeup) {
1712*4882a593Smuzhiyun 		ret = -EOPNOTSUPP;
1713*4882a593Smuzhiyun 		goto out;
1714*4882a593Smuzhiyun 	}
1715*4882a593Smuzhiyun 	if (!hw_read(ci, OP_PORTSC, PORTSC_SUSP)) {
1716*4882a593Smuzhiyun 		ret = -EINVAL;
1717*4882a593Smuzhiyun 		goto out;
1718*4882a593Smuzhiyun 	}
1719*4882a593Smuzhiyun 	hw_write(ci, OP_PORTSC, PORTSC_FPR, PORTSC_FPR);
1720*4882a593Smuzhiyun out:
1721*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ci->lock, flags);
1722*4882a593Smuzhiyun 	return ret;
1723*4882a593Smuzhiyun }
1724*4882a593Smuzhiyun 
ci_udc_vbus_draw(struct usb_gadget * _gadget,unsigned ma)1725*4882a593Smuzhiyun static int ci_udc_vbus_draw(struct usb_gadget *_gadget, unsigned ma)
1726*4882a593Smuzhiyun {
1727*4882a593Smuzhiyun 	struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1728*4882a593Smuzhiyun 
1729*4882a593Smuzhiyun 	if (ci->usb_phy)
1730*4882a593Smuzhiyun 		return usb_phy_set_power(ci->usb_phy, ma);
1731*4882a593Smuzhiyun 	return -ENOTSUPP;
1732*4882a593Smuzhiyun }
1733*4882a593Smuzhiyun 
ci_udc_selfpowered(struct usb_gadget * _gadget,int is_on)1734*4882a593Smuzhiyun static int ci_udc_selfpowered(struct usb_gadget *_gadget, int is_on)
1735*4882a593Smuzhiyun {
1736*4882a593Smuzhiyun 	struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1737*4882a593Smuzhiyun 	struct ci_hw_ep *hwep = ci->ep0in;
1738*4882a593Smuzhiyun 	unsigned long flags;
1739*4882a593Smuzhiyun 
1740*4882a593Smuzhiyun 	spin_lock_irqsave(hwep->lock, flags);
1741*4882a593Smuzhiyun 	_gadget->is_selfpowered = (is_on != 0);
1742*4882a593Smuzhiyun 	spin_unlock_irqrestore(hwep->lock, flags);
1743*4882a593Smuzhiyun 
1744*4882a593Smuzhiyun 	return 0;
1745*4882a593Smuzhiyun }
1746*4882a593Smuzhiyun 
1747*4882a593Smuzhiyun /* Change Data+ pullup status
1748*4882a593Smuzhiyun  * this func is used by usb_gadget_connect/disconnect
1749*4882a593Smuzhiyun  */
ci_udc_pullup(struct usb_gadget * _gadget,int is_on)1750*4882a593Smuzhiyun static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on)
1751*4882a593Smuzhiyun {
1752*4882a593Smuzhiyun 	struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1753*4882a593Smuzhiyun 
1754*4882a593Smuzhiyun 	/*
1755*4882a593Smuzhiyun 	 * Data+ pullup controlled by OTG state machine in OTG fsm mode;
1756*4882a593Smuzhiyun 	 * and don't touch Data+ in host mode for dual role config.
1757*4882a593Smuzhiyun 	 */
1758*4882a593Smuzhiyun 	if (ci_otg_is_fsm_mode(ci) || ci->role == CI_ROLE_HOST)
1759*4882a593Smuzhiyun 		return 0;
1760*4882a593Smuzhiyun 
1761*4882a593Smuzhiyun 	pm_runtime_get_sync(ci->dev);
1762*4882a593Smuzhiyun 	if (is_on)
1763*4882a593Smuzhiyun 		hw_write(ci, OP_USBCMD, USBCMD_RS, USBCMD_RS);
1764*4882a593Smuzhiyun 	else
1765*4882a593Smuzhiyun 		hw_write(ci, OP_USBCMD, USBCMD_RS, 0);
1766*4882a593Smuzhiyun 	pm_runtime_put_sync(ci->dev);
1767*4882a593Smuzhiyun 
1768*4882a593Smuzhiyun 	return 0;
1769*4882a593Smuzhiyun }
1770*4882a593Smuzhiyun 
1771*4882a593Smuzhiyun static int ci_udc_start(struct usb_gadget *gadget,
1772*4882a593Smuzhiyun 			 struct usb_gadget_driver *driver);
1773*4882a593Smuzhiyun static int ci_udc_stop(struct usb_gadget *gadget);
1774*4882a593Smuzhiyun 
1775*4882a593Smuzhiyun /* Match ISOC IN from the highest endpoint */
ci_udc_match_ep(struct usb_gadget * gadget,struct usb_endpoint_descriptor * desc,struct usb_ss_ep_comp_descriptor * comp_desc)1776*4882a593Smuzhiyun static struct usb_ep *ci_udc_match_ep(struct usb_gadget *gadget,
1777*4882a593Smuzhiyun 			      struct usb_endpoint_descriptor *desc,
1778*4882a593Smuzhiyun 			      struct usb_ss_ep_comp_descriptor *comp_desc)
1779*4882a593Smuzhiyun {
1780*4882a593Smuzhiyun 	struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
1781*4882a593Smuzhiyun 	struct usb_ep *ep;
1782*4882a593Smuzhiyun 
1783*4882a593Smuzhiyun 	if (usb_endpoint_xfer_isoc(desc) && usb_endpoint_dir_in(desc)) {
1784*4882a593Smuzhiyun 		list_for_each_entry_reverse(ep, &ci->gadget.ep_list, ep_list) {
1785*4882a593Smuzhiyun 			if (ep->caps.dir_in && !ep->claimed)
1786*4882a593Smuzhiyun 				return ep;
1787*4882a593Smuzhiyun 		}
1788*4882a593Smuzhiyun 	}
1789*4882a593Smuzhiyun 
1790*4882a593Smuzhiyun 	return NULL;
1791*4882a593Smuzhiyun }
1792*4882a593Smuzhiyun 
1793*4882a593Smuzhiyun /*
1794*4882a593Smuzhiyun  * Device operations part of the API to the USB controller hardware,
1795*4882a593Smuzhiyun  * which don't involve endpoints (or i/o)
1796*4882a593Smuzhiyun  * Check  "usb_gadget.h" for details
1797*4882a593Smuzhiyun  */
1798*4882a593Smuzhiyun static const struct usb_gadget_ops usb_gadget_ops = {
1799*4882a593Smuzhiyun 	.vbus_session	= ci_udc_vbus_session,
1800*4882a593Smuzhiyun 	.wakeup		= ci_udc_wakeup,
1801*4882a593Smuzhiyun 	.set_selfpowered	= ci_udc_selfpowered,
1802*4882a593Smuzhiyun 	.pullup		= ci_udc_pullup,
1803*4882a593Smuzhiyun 	.vbus_draw	= ci_udc_vbus_draw,
1804*4882a593Smuzhiyun 	.udc_start	= ci_udc_start,
1805*4882a593Smuzhiyun 	.udc_stop	= ci_udc_stop,
1806*4882a593Smuzhiyun 	.match_ep 	= ci_udc_match_ep,
1807*4882a593Smuzhiyun };
1808*4882a593Smuzhiyun 
init_eps(struct ci_hdrc * ci)1809*4882a593Smuzhiyun static int init_eps(struct ci_hdrc *ci)
1810*4882a593Smuzhiyun {
1811*4882a593Smuzhiyun 	int retval = 0, i, j;
1812*4882a593Smuzhiyun 
1813*4882a593Smuzhiyun 	for (i = 0; i < ci->hw_ep_max/2; i++)
1814*4882a593Smuzhiyun 		for (j = RX; j <= TX; j++) {
1815*4882a593Smuzhiyun 			int k = i + j * ci->hw_ep_max/2;
1816*4882a593Smuzhiyun 			struct ci_hw_ep *hwep = &ci->ci_hw_ep[k];
1817*4882a593Smuzhiyun 
1818*4882a593Smuzhiyun 			scnprintf(hwep->name, sizeof(hwep->name), "ep%i%s", i,
1819*4882a593Smuzhiyun 					(j == TX)  ? "in" : "out");
1820*4882a593Smuzhiyun 
1821*4882a593Smuzhiyun 			hwep->ci          = ci;
1822*4882a593Smuzhiyun 			hwep->lock         = &ci->lock;
1823*4882a593Smuzhiyun 			hwep->td_pool      = ci->td_pool;
1824*4882a593Smuzhiyun 
1825*4882a593Smuzhiyun 			hwep->ep.name      = hwep->name;
1826*4882a593Smuzhiyun 			hwep->ep.ops       = &usb_ep_ops;
1827*4882a593Smuzhiyun 
1828*4882a593Smuzhiyun 			if (i == 0) {
1829*4882a593Smuzhiyun 				hwep->ep.caps.type_control = true;
1830*4882a593Smuzhiyun 			} else {
1831*4882a593Smuzhiyun 				hwep->ep.caps.type_iso = true;
1832*4882a593Smuzhiyun 				hwep->ep.caps.type_bulk = true;
1833*4882a593Smuzhiyun 				hwep->ep.caps.type_int = true;
1834*4882a593Smuzhiyun 			}
1835*4882a593Smuzhiyun 
1836*4882a593Smuzhiyun 			if (j == TX)
1837*4882a593Smuzhiyun 				hwep->ep.caps.dir_in = true;
1838*4882a593Smuzhiyun 			else
1839*4882a593Smuzhiyun 				hwep->ep.caps.dir_out = true;
1840*4882a593Smuzhiyun 
1841*4882a593Smuzhiyun 			/*
1842*4882a593Smuzhiyun 			 * for ep0: maxP defined in desc, for other
1843*4882a593Smuzhiyun 			 * eps, maxP is set by epautoconfig() called
1844*4882a593Smuzhiyun 			 * by gadget layer
1845*4882a593Smuzhiyun 			 */
1846*4882a593Smuzhiyun 			usb_ep_set_maxpacket_limit(&hwep->ep, (unsigned short)~0);
1847*4882a593Smuzhiyun 
1848*4882a593Smuzhiyun 			INIT_LIST_HEAD(&hwep->qh.queue);
1849*4882a593Smuzhiyun 			hwep->qh.ptr = dma_pool_zalloc(ci->qh_pool, GFP_KERNEL,
1850*4882a593Smuzhiyun 						       &hwep->qh.dma);
1851*4882a593Smuzhiyun 			if (hwep->qh.ptr == NULL)
1852*4882a593Smuzhiyun 				retval = -ENOMEM;
1853*4882a593Smuzhiyun 
1854*4882a593Smuzhiyun 			/*
1855*4882a593Smuzhiyun 			 * set up shorthands for ep0 out and in endpoints,
1856*4882a593Smuzhiyun 			 * don't add to gadget's ep_list
1857*4882a593Smuzhiyun 			 */
1858*4882a593Smuzhiyun 			if (i == 0) {
1859*4882a593Smuzhiyun 				if (j == RX)
1860*4882a593Smuzhiyun 					ci->ep0out = hwep;
1861*4882a593Smuzhiyun 				else
1862*4882a593Smuzhiyun 					ci->ep0in = hwep;
1863*4882a593Smuzhiyun 
1864*4882a593Smuzhiyun 				usb_ep_set_maxpacket_limit(&hwep->ep, CTRL_PAYLOAD_MAX);
1865*4882a593Smuzhiyun 				continue;
1866*4882a593Smuzhiyun 			}
1867*4882a593Smuzhiyun 
1868*4882a593Smuzhiyun 			list_add_tail(&hwep->ep.ep_list, &ci->gadget.ep_list);
1869*4882a593Smuzhiyun 		}
1870*4882a593Smuzhiyun 
1871*4882a593Smuzhiyun 	return retval;
1872*4882a593Smuzhiyun }
1873*4882a593Smuzhiyun 
destroy_eps(struct ci_hdrc * ci)1874*4882a593Smuzhiyun static void destroy_eps(struct ci_hdrc *ci)
1875*4882a593Smuzhiyun {
1876*4882a593Smuzhiyun 	int i;
1877*4882a593Smuzhiyun 
1878*4882a593Smuzhiyun 	for (i = 0; i < ci->hw_ep_max; i++) {
1879*4882a593Smuzhiyun 		struct ci_hw_ep *hwep = &ci->ci_hw_ep[i];
1880*4882a593Smuzhiyun 
1881*4882a593Smuzhiyun 		if (hwep->pending_td)
1882*4882a593Smuzhiyun 			free_pending_td(hwep);
1883*4882a593Smuzhiyun 		dma_pool_free(ci->qh_pool, hwep->qh.ptr, hwep->qh.dma);
1884*4882a593Smuzhiyun 	}
1885*4882a593Smuzhiyun }
1886*4882a593Smuzhiyun 
1887*4882a593Smuzhiyun /**
1888*4882a593Smuzhiyun  * ci_udc_start: register a gadget driver
1889*4882a593Smuzhiyun  * @gadget: our gadget
1890*4882a593Smuzhiyun  * @driver: the driver being registered
1891*4882a593Smuzhiyun  *
1892*4882a593Smuzhiyun  * Interrupts are enabled here.
1893*4882a593Smuzhiyun  */
ci_udc_start(struct usb_gadget * gadget,struct usb_gadget_driver * driver)1894*4882a593Smuzhiyun static int ci_udc_start(struct usb_gadget *gadget,
1895*4882a593Smuzhiyun 			 struct usb_gadget_driver *driver)
1896*4882a593Smuzhiyun {
1897*4882a593Smuzhiyun 	struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
1898*4882a593Smuzhiyun 	int retval;
1899*4882a593Smuzhiyun 
1900*4882a593Smuzhiyun 	if (driver->disconnect == NULL)
1901*4882a593Smuzhiyun 		return -EINVAL;
1902*4882a593Smuzhiyun 
1903*4882a593Smuzhiyun 	ci->ep0out->ep.desc = &ctrl_endpt_out_desc;
1904*4882a593Smuzhiyun 	retval = usb_ep_enable(&ci->ep0out->ep);
1905*4882a593Smuzhiyun 	if (retval)
1906*4882a593Smuzhiyun 		return retval;
1907*4882a593Smuzhiyun 
1908*4882a593Smuzhiyun 	ci->ep0in->ep.desc = &ctrl_endpt_in_desc;
1909*4882a593Smuzhiyun 	retval = usb_ep_enable(&ci->ep0in->ep);
1910*4882a593Smuzhiyun 	if (retval)
1911*4882a593Smuzhiyun 		return retval;
1912*4882a593Smuzhiyun 
1913*4882a593Smuzhiyun 	ci->driver = driver;
1914*4882a593Smuzhiyun 
1915*4882a593Smuzhiyun 	/* Start otg fsm for B-device */
1916*4882a593Smuzhiyun 	if (ci_otg_is_fsm_mode(ci) && ci->fsm.id) {
1917*4882a593Smuzhiyun 		ci_hdrc_otg_fsm_start(ci);
1918*4882a593Smuzhiyun 		return retval;
1919*4882a593Smuzhiyun 	}
1920*4882a593Smuzhiyun 
1921*4882a593Smuzhiyun 	if (ci->vbus_active)
1922*4882a593Smuzhiyun 		ci_hdrc_gadget_connect(gadget, 1);
1923*4882a593Smuzhiyun 	else
1924*4882a593Smuzhiyun 		usb_udc_vbus_handler(&ci->gadget, false);
1925*4882a593Smuzhiyun 
1926*4882a593Smuzhiyun 	return retval;
1927*4882a593Smuzhiyun }
1928*4882a593Smuzhiyun 
ci_udc_stop_for_otg_fsm(struct ci_hdrc * ci)1929*4882a593Smuzhiyun static void ci_udc_stop_for_otg_fsm(struct ci_hdrc *ci)
1930*4882a593Smuzhiyun {
1931*4882a593Smuzhiyun 	if (!ci_otg_is_fsm_mode(ci))
1932*4882a593Smuzhiyun 		return;
1933*4882a593Smuzhiyun 
1934*4882a593Smuzhiyun 	mutex_lock(&ci->fsm.lock);
1935*4882a593Smuzhiyun 	if (ci->fsm.otg->state == OTG_STATE_A_PERIPHERAL) {
1936*4882a593Smuzhiyun 		ci->fsm.a_bidl_adis_tmout = 1;
1937*4882a593Smuzhiyun 		ci_hdrc_otg_fsm_start(ci);
1938*4882a593Smuzhiyun 	} else if (ci->fsm.otg->state == OTG_STATE_B_PERIPHERAL) {
1939*4882a593Smuzhiyun 		ci->fsm.protocol = PROTO_UNDEF;
1940*4882a593Smuzhiyun 		ci->fsm.otg->state = OTG_STATE_UNDEFINED;
1941*4882a593Smuzhiyun 	}
1942*4882a593Smuzhiyun 	mutex_unlock(&ci->fsm.lock);
1943*4882a593Smuzhiyun }
1944*4882a593Smuzhiyun 
1945*4882a593Smuzhiyun /*
1946*4882a593Smuzhiyun  * ci_udc_stop: unregister a gadget driver
1947*4882a593Smuzhiyun  */
ci_udc_stop(struct usb_gadget * gadget)1948*4882a593Smuzhiyun static int ci_udc_stop(struct usb_gadget *gadget)
1949*4882a593Smuzhiyun {
1950*4882a593Smuzhiyun 	struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
1951*4882a593Smuzhiyun 	unsigned long flags;
1952*4882a593Smuzhiyun 
1953*4882a593Smuzhiyun 	spin_lock_irqsave(&ci->lock, flags);
1954*4882a593Smuzhiyun 	ci->driver = NULL;
1955*4882a593Smuzhiyun 
1956*4882a593Smuzhiyun 	if (ci->vbus_active) {
1957*4882a593Smuzhiyun 		hw_device_state(ci, 0);
1958*4882a593Smuzhiyun 		spin_unlock_irqrestore(&ci->lock, flags);
1959*4882a593Smuzhiyun 		if (ci->platdata->notify_event)
1960*4882a593Smuzhiyun 			ci->platdata->notify_event(ci,
1961*4882a593Smuzhiyun 			CI_HDRC_CONTROLLER_STOPPED_EVENT);
1962*4882a593Smuzhiyun 		_gadget_stop_activity(&ci->gadget);
1963*4882a593Smuzhiyun 		spin_lock_irqsave(&ci->lock, flags);
1964*4882a593Smuzhiyun 		pm_runtime_put(ci->dev);
1965*4882a593Smuzhiyun 	}
1966*4882a593Smuzhiyun 
1967*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ci->lock, flags);
1968*4882a593Smuzhiyun 
1969*4882a593Smuzhiyun 	ci_udc_stop_for_otg_fsm(ci);
1970*4882a593Smuzhiyun 	return 0;
1971*4882a593Smuzhiyun }
1972*4882a593Smuzhiyun 
1973*4882a593Smuzhiyun /******************************************************************************
1974*4882a593Smuzhiyun  * BUS block
1975*4882a593Smuzhiyun  *****************************************************************************/
1976*4882a593Smuzhiyun /*
1977*4882a593Smuzhiyun  * udc_irq: ci interrupt handler
1978*4882a593Smuzhiyun  *
1979*4882a593Smuzhiyun  * This function returns IRQ_HANDLED if the IRQ has been handled
1980*4882a593Smuzhiyun  * It locks access to registers
1981*4882a593Smuzhiyun  */
udc_irq(struct ci_hdrc * ci)1982*4882a593Smuzhiyun static irqreturn_t udc_irq(struct ci_hdrc *ci)
1983*4882a593Smuzhiyun {
1984*4882a593Smuzhiyun 	irqreturn_t retval;
1985*4882a593Smuzhiyun 	u32 intr;
1986*4882a593Smuzhiyun 
1987*4882a593Smuzhiyun 	if (ci == NULL)
1988*4882a593Smuzhiyun 		return IRQ_HANDLED;
1989*4882a593Smuzhiyun 
1990*4882a593Smuzhiyun 	spin_lock(&ci->lock);
1991*4882a593Smuzhiyun 
1992*4882a593Smuzhiyun 	if (ci->platdata->flags & CI_HDRC_REGS_SHARED) {
1993*4882a593Smuzhiyun 		if (hw_read(ci, OP_USBMODE, USBMODE_CM) !=
1994*4882a593Smuzhiyun 				USBMODE_CM_DC) {
1995*4882a593Smuzhiyun 			spin_unlock(&ci->lock);
1996*4882a593Smuzhiyun 			return IRQ_NONE;
1997*4882a593Smuzhiyun 		}
1998*4882a593Smuzhiyun 	}
1999*4882a593Smuzhiyun 	intr = hw_test_and_clear_intr_active(ci);
2000*4882a593Smuzhiyun 
2001*4882a593Smuzhiyun 	if (intr) {
2002*4882a593Smuzhiyun 		/* order defines priority - do NOT change it */
2003*4882a593Smuzhiyun 		if (USBi_URI & intr)
2004*4882a593Smuzhiyun 			isr_reset_handler(ci);
2005*4882a593Smuzhiyun 
2006*4882a593Smuzhiyun 		if (USBi_PCI & intr) {
2007*4882a593Smuzhiyun 			ci->gadget.speed = hw_port_is_high_speed(ci) ?
2008*4882a593Smuzhiyun 				USB_SPEED_HIGH : USB_SPEED_FULL;
2009*4882a593Smuzhiyun 			if (ci->suspended) {
2010*4882a593Smuzhiyun 				if (ci->driver->resume) {
2011*4882a593Smuzhiyun 					spin_unlock(&ci->lock);
2012*4882a593Smuzhiyun 					ci->driver->resume(&ci->gadget);
2013*4882a593Smuzhiyun 					spin_lock(&ci->lock);
2014*4882a593Smuzhiyun 				}
2015*4882a593Smuzhiyun 				ci->suspended = 0;
2016*4882a593Smuzhiyun 				usb_gadget_set_state(&ci->gadget,
2017*4882a593Smuzhiyun 						ci->resume_state);
2018*4882a593Smuzhiyun 			}
2019*4882a593Smuzhiyun 		}
2020*4882a593Smuzhiyun 
2021*4882a593Smuzhiyun 		if (USBi_UI  & intr)
2022*4882a593Smuzhiyun 			isr_tr_complete_handler(ci);
2023*4882a593Smuzhiyun 
2024*4882a593Smuzhiyun 		if ((USBi_SLI & intr) && !(ci->suspended)) {
2025*4882a593Smuzhiyun 			ci->suspended = 1;
2026*4882a593Smuzhiyun 			ci->resume_state = ci->gadget.state;
2027*4882a593Smuzhiyun 			if (ci->gadget.speed != USB_SPEED_UNKNOWN &&
2028*4882a593Smuzhiyun 			    ci->driver->suspend) {
2029*4882a593Smuzhiyun 				spin_unlock(&ci->lock);
2030*4882a593Smuzhiyun 				ci->driver->suspend(&ci->gadget);
2031*4882a593Smuzhiyun 				spin_lock(&ci->lock);
2032*4882a593Smuzhiyun 			}
2033*4882a593Smuzhiyun 			usb_gadget_set_state(&ci->gadget,
2034*4882a593Smuzhiyun 					USB_STATE_SUSPENDED);
2035*4882a593Smuzhiyun 		}
2036*4882a593Smuzhiyun 		retval = IRQ_HANDLED;
2037*4882a593Smuzhiyun 	} else {
2038*4882a593Smuzhiyun 		retval = IRQ_NONE;
2039*4882a593Smuzhiyun 	}
2040*4882a593Smuzhiyun 	spin_unlock(&ci->lock);
2041*4882a593Smuzhiyun 
2042*4882a593Smuzhiyun 	return retval;
2043*4882a593Smuzhiyun }
2044*4882a593Smuzhiyun 
2045*4882a593Smuzhiyun /**
2046*4882a593Smuzhiyun  * udc_start: initialize gadget role
2047*4882a593Smuzhiyun  * @ci: chipidea controller
2048*4882a593Smuzhiyun  */
udc_start(struct ci_hdrc * ci)2049*4882a593Smuzhiyun static int udc_start(struct ci_hdrc *ci)
2050*4882a593Smuzhiyun {
2051*4882a593Smuzhiyun 	struct device *dev = ci->dev;
2052*4882a593Smuzhiyun 	struct usb_otg_caps *otg_caps = &ci->platdata->ci_otg_caps;
2053*4882a593Smuzhiyun 	int retval = 0;
2054*4882a593Smuzhiyun 
2055*4882a593Smuzhiyun 	ci->gadget.ops          = &usb_gadget_ops;
2056*4882a593Smuzhiyun 	ci->gadget.speed        = USB_SPEED_UNKNOWN;
2057*4882a593Smuzhiyun 	ci->gadget.max_speed    = USB_SPEED_HIGH;
2058*4882a593Smuzhiyun 	ci->gadget.name         = ci->platdata->name;
2059*4882a593Smuzhiyun 	ci->gadget.otg_caps	= otg_caps;
2060*4882a593Smuzhiyun 	ci->gadget.sg_supported = 1;
2061*4882a593Smuzhiyun 	ci->gadget.irq		= ci->irq;
2062*4882a593Smuzhiyun 
2063*4882a593Smuzhiyun 	if (ci->platdata->flags & CI_HDRC_REQUIRES_ALIGNED_DMA)
2064*4882a593Smuzhiyun 		ci->gadget.quirk_avoids_skb_reserve = 1;
2065*4882a593Smuzhiyun 
2066*4882a593Smuzhiyun 	if (ci->is_otg && (otg_caps->hnp_support || otg_caps->srp_support ||
2067*4882a593Smuzhiyun 						otg_caps->adp_support))
2068*4882a593Smuzhiyun 		ci->gadget.is_otg = 1;
2069*4882a593Smuzhiyun 
2070*4882a593Smuzhiyun 	INIT_LIST_HEAD(&ci->gadget.ep_list);
2071*4882a593Smuzhiyun 
2072*4882a593Smuzhiyun 	/* alloc resources */
2073*4882a593Smuzhiyun 	ci->qh_pool = dma_pool_create("ci_hw_qh", dev->parent,
2074*4882a593Smuzhiyun 				       sizeof(struct ci_hw_qh),
2075*4882a593Smuzhiyun 				       64, CI_HDRC_PAGE_SIZE);
2076*4882a593Smuzhiyun 	if (ci->qh_pool == NULL)
2077*4882a593Smuzhiyun 		return -ENOMEM;
2078*4882a593Smuzhiyun 
2079*4882a593Smuzhiyun 	ci->td_pool = dma_pool_create("ci_hw_td", dev->parent,
2080*4882a593Smuzhiyun 				       sizeof(struct ci_hw_td),
2081*4882a593Smuzhiyun 				       64, CI_HDRC_PAGE_SIZE);
2082*4882a593Smuzhiyun 	if (ci->td_pool == NULL) {
2083*4882a593Smuzhiyun 		retval = -ENOMEM;
2084*4882a593Smuzhiyun 		goto free_qh_pool;
2085*4882a593Smuzhiyun 	}
2086*4882a593Smuzhiyun 
2087*4882a593Smuzhiyun 	retval = init_eps(ci);
2088*4882a593Smuzhiyun 	if (retval)
2089*4882a593Smuzhiyun 		goto free_pools;
2090*4882a593Smuzhiyun 
2091*4882a593Smuzhiyun 	ci->gadget.ep0 = &ci->ep0in->ep;
2092*4882a593Smuzhiyun 
2093*4882a593Smuzhiyun 	retval = usb_add_gadget_udc(dev, &ci->gadget);
2094*4882a593Smuzhiyun 	if (retval)
2095*4882a593Smuzhiyun 		goto destroy_eps;
2096*4882a593Smuzhiyun 
2097*4882a593Smuzhiyun 	return retval;
2098*4882a593Smuzhiyun 
2099*4882a593Smuzhiyun destroy_eps:
2100*4882a593Smuzhiyun 	destroy_eps(ci);
2101*4882a593Smuzhiyun free_pools:
2102*4882a593Smuzhiyun 	dma_pool_destroy(ci->td_pool);
2103*4882a593Smuzhiyun free_qh_pool:
2104*4882a593Smuzhiyun 	dma_pool_destroy(ci->qh_pool);
2105*4882a593Smuzhiyun 	return retval;
2106*4882a593Smuzhiyun }
2107*4882a593Smuzhiyun 
2108*4882a593Smuzhiyun /*
2109*4882a593Smuzhiyun  * ci_hdrc_gadget_destroy: parent remove must call this to remove UDC
2110*4882a593Smuzhiyun  *
2111*4882a593Smuzhiyun  * No interrupts active, the IRQ has been released
2112*4882a593Smuzhiyun  */
ci_hdrc_gadget_destroy(struct ci_hdrc * ci)2113*4882a593Smuzhiyun void ci_hdrc_gadget_destroy(struct ci_hdrc *ci)
2114*4882a593Smuzhiyun {
2115*4882a593Smuzhiyun 	if (!ci->roles[CI_ROLE_GADGET])
2116*4882a593Smuzhiyun 		return;
2117*4882a593Smuzhiyun 
2118*4882a593Smuzhiyun 	usb_del_gadget_udc(&ci->gadget);
2119*4882a593Smuzhiyun 
2120*4882a593Smuzhiyun 	destroy_eps(ci);
2121*4882a593Smuzhiyun 
2122*4882a593Smuzhiyun 	dma_pool_destroy(ci->td_pool);
2123*4882a593Smuzhiyun 	dma_pool_destroy(ci->qh_pool);
2124*4882a593Smuzhiyun }
2125*4882a593Smuzhiyun 
udc_id_switch_for_device(struct ci_hdrc * ci)2126*4882a593Smuzhiyun static int udc_id_switch_for_device(struct ci_hdrc *ci)
2127*4882a593Smuzhiyun {
2128*4882a593Smuzhiyun 	if (ci->platdata->pins_device)
2129*4882a593Smuzhiyun 		pinctrl_select_state(ci->platdata->pctl,
2130*4882a593Smuzhiyun 				     ci->platdata->pins_device);
2131*4882a593Smuzhiyun 
2132*4882a593Smuzhiyun 	if (ci->is_otg)
2133*4882a593Smuzhiyun 		/* Clear and enable BSV irq */
2134*4882a593Smuzhiyun 		hw_write_otgsc(ci, OTGSC_BSVIS | OTGSC_BSVIE,
2135*4882a593Smuzhiyun 					OTGSC_BSVIS | OTGSC_BSVIE);
2136*4882a593Smuzhiyun 
2137*4882a593Smuzhiyun 	return 0;
2138*4882a593Smuzhiyun }
2139*4882a593Smuzhiyun 
udc_id_switch_for_host(struct ci_hdrc * ci)2140*4882a593Smuzhiyun static void udc_id_switch_for_host(struct ci_hdrc *ci)
2141*4882a593Smuzhiyun {
2142*4882a593Smuzhiyun 	/*
2143*4882a593Smuzhiyun 	 * host doesn't care B_SESSION_VALID event
2144*4882a593Smuzhiyun 	 * so clear and disbale BSV irq
2145*4882a593Smuzhiyun 	 */
2146*4882a593Smuzhiyun 	if (ci->is_otg)
2147*4882a593Smuzhiyun 		hw_write_otgsc(ci, OTGSC_BSVIE | OTGSC_BSVIS, OTGSC_BSVIS);
2148*4882a593Smuzhiyun 
2149*4882a593Smuzhiyun 	ci->vbus_active = 0;
2150*4882a593Smuzhiyun 
2151*4882a593Smuzhiyun 	if (ci->platdata->pins_device && ci->platdata->pins_default)
2152*4882a593Smuzhiyun 		pinctrl_select_state(ci->platdata->pctl,
2153*4882a593Smuzhiyun 				     ci->platdata->pins_default);
2154*4882a593Smuzhiyun }
2155*4882a593Smuzhiyun 
2156*4882a593Smuzhiyun /**
2157*4882a593Smuzhiyun  * ci_hdrc_gadget_init - initialize device related bits
2158*4882a593Smuzhiyun  * @ci: the controller
2159*4882a593Smuzhiyun  *
2160*4882a593Smuzhiyun  * This function initializes the gadget, if the device is "device capable".
2161*4882a593Smuzhiyun  */
ci_hdrc_gadget_init(struct ci_hdrc * ci)2162*4882a593Smuzhiyun int ci_hdrc_gadget_init(struct ci_hdrc *ci)
2163*4882a593Smuzhiyun {
2164*4882a593Smuzhiyun 	struct ci_role_driver *rdrv;
2165*4882a593Smuzhiyun 	int ret;
2166*4882a593Smuzhiyun 
2167*4882a593Smuzhiyun 	if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC))
2168*4882a593Smuzhiyun 		return -ENXIO;
2169*4882a593Smuzhiyun 
2170*4882a593Smuzhiyun 	rdrv = devm_kzalloc(ci->dev, sizeof(*rdrv), GFP_KERNEL);
2171*4882a593Smuzhiyun 	if (!rdrv)
2172*4882a593Smuzhiyun 		return -ENOMEM;
2173*4882a593Smuzhiyun 
2174*4882a593Smuzhiyun 	rdrv->start	= udc_id_switch_for_device;
2175*4882a593Smuzhiyun 	rdrv->stop	= udc_id_switch_for_host;
2176*4882a593Smuzhiyun 	rdrv->irq	= udc_irq;
2177*4882a593Smuzhiyun 	rdrv->name	= "gadget";
2178*4882a593Smuzhiyun 
2179*4882a593Smuzhiyun 	ret = udc_start(ci);
2180*4882a593Smuzhiyun 	if (!ret)
2181*4882a593Smuzhiyun 		ci->roles[CI_ROLE_GADGET] = rdrv;
2182*4882a593Smuzhiyun 
2183*4882a593Smuzhiyun 	return ret;
2184*4882a593Smuzhiyun }
2185