1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2015 Karol Kosik <karo9@interia.eu>
4*4882a593Smuzhiyun * Copyright (C) 2015-2016 Samsung Electronics
5*4882a593Smuzhiyun * Igor Kotrasinski <i.kotrasinsk@samsung.com>
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Based on dummy_hcd.c, which is:
8*4882a593Smuzhiyun * Copyright (C) 2003 David Brownell
9*4882a593Smuzhiyun * Copyright (C) 2003-2005 Alan Stern
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/usb.h>
13*4882a593Smuzhiyun #include <linux/timer.h>
14*4882a593Smuzhiyun #include <linux/usb/ch9.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include "vudc.h"
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #define DEV_REQUEST (USB_TYPE_STANDARD | USB_RECIP_DEVICE)
19*4882a593Smuzhiyun #define DEV_INREQUEST (DEV_REQUEST | USB_DIR_IN)
20*4882a593Smuzhiyun #define INTF_REQUEST (USB_TYPE_STANDARD | USB_RECIP_INTERFACE)
21*4882a593Smuzhiyun #define INTF_INREQUEST (INTF_REQUEST | USB_DIR_IN)
22*4882a593Smuzhiyun #define EP_REQUEST (USB_TYPE_STANDARD | USB_RECIP_ENDPOINT)
23*4882a593Smuzhiyun #define EP_INREQUEST (EP_REQUEST | USB_DIR_IN)
24*4882a593Smuzhiyun
get_frame_limit(enum usb_device_speed speed)25*4882a593Smuzhiyun static int get_frame_limit(enum usb_device_speed speed)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun switch (speed) {
28*4882a593Smuzhiyun case USB_SPEED_LOW:
29*4882a593Smuzhiyun return 8 /*bytes*/ * 12 /*packets*/;
30*4882a593Smuzhiyun case USB_SPEED_FULL:
31*4882a593Smuzhiyun return 64 /*bytes*/ * 19 /*packets*/;
32*4882a593Smuzhiyun case USB_SPEED_HIGH:
33*4882a593Smuzhiyun return 512 /*bytes*/ * 13 /*packets*/ * 8 /*uframes*/;
34*4882a593Smuzhiyun case USB_SPEED_SUPER:
35*4882a593Smuzhiyun /* Bus speed is 500000 bytes/ms, so use a little less */
36*4882a593Smuzhiyun return 490000;
37*4882a593Smuzhiyun default:
38*4882a593Smuzhiyun /* error */
39*4882a593Smuzhiyun return -1;
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /*
45*4882a593Smuzhiyun * handle_control_request() - handles all control transfers
46*4882a593Smuzhiyun * @udc: pointer to vudc
47*4882a593Smuzhiyun * @urb: the urb request to handle
48*4882a593Smuzhiyun * @setup: pointer to the setup data for a USB device control
49*4882a593Smuzhiyun * request
50*4882a593Smuzhiyun * @status: pointer to request handling status
51*4882a593Smuzhiyun *
52*4882a593Smuzhiyun * Return 0 - if the request was handled
53*4882a593Smuzhiyun * 1 - if the request wasn't handles
54*4882a593Smuzhiyun * error code on error
55*4882a593Smuzhiyun *
56*4882a593Smuzhiyun * Adapted from drivers/usb/gadget/udc/dummy_hcd.c
57*4882a593Smuzhiyun */
handle_control_request(struct vudc * udc,struct urb * urb,struct usb_ctrlrequest * setup,int * status)58*4882a593Smuzhiyun static int handle_control_request(struct vudc *udc, struct urb *urb,
59*4882a593Smuzhiyun struct usb_ctrlrequest *setup,
60*4882a593Smuzhiyun int *status)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun struct vep *ep2;
63*4882a593Smuzhiyun int ret_val = 1;
64*4882a593Smuzhiyun unsigned int w_index;
65*4882a593Smuzhiyun unsigned int w_value;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun w_index = le16_to_cpu(setup->wIndex);
68*4882a593Smuzhiyun w_value = le16_to_cpu(setup->wValue);
69*4882a593Smuzhiyun switch (setup->bRequest) {
70*4882a593Smuzhiyun case USB_REQ_SET_ADDRESS:
71*4882a593Smuzhiyun if (setup->bRequestType != DEV_REQUEST)
72*4882a593Smuzhiyun break;
73*4882a593Smuzhiyun udc->address = w_value;
74*4882a593Smuzhiyun ret_val = 0;
75*4882a593Smuzhiyun *status = 0;
76*4882a593Smuzhiyun break;
77*4882a593Smuzhiyun case USB_REQ_SET_FEATURE:
78*4882a593Smuzhiyun if (setup->bRequestType == DEV_REQUEST) {
79*4882a593Smuzhiyun ret_val = 0;
80*4882a593Smuzhiyun switch (w_value) {
81*4882a593Smuzhiyun case USB_DEVICE_REMOTE_WAKEUP:
82*4882a593Smuzhiyun break;
83*4882a593Smuzhiyun case USB_DEVICE_B_HNP_ENABLE:
84*4882a593Smuzhiyun udc->gadget.b_hnp_enable = 1;
85*4882a593Smuzhiyun break;
86*4882a593Smuzhiyun case USB_DEVICE_A_HNP_SUPPORT:
87*4882a593Smuzhiyun udc->gadget.a_hnp_support = 1;
88*4882a593Smuzhiyun break;
89*4882a593Smuzhiyun case USB_DEVICE_A_ALT_HNP_SUPPORT:
90*4882a593Smuzhiyun udc->gadget.a_alt_hnp_support = 1;
91*4882a593Smuzhiyun break;
92*4882a593Smuzhiyun default:
93*4882a593Smuzhiyun ret_val = -EOPNOTSUPP;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun if (ret_val == 0) {
96*4882a593Smuzhiyun udc->devstatus |= (1 << w_value);
97*4882a593Smuzhiyun *status = 0;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun } else if (setup->bRequestType == EP_REQUEST) {
100*4882a593Smuzhiyun /* endpoint halt */
101*4882a593Smuzhiyun ep2 = vudc_find_endpoint(udc, w_index);
102*4882a593Smuzhiyun if (!ep2 || ep2->ep.name == udc->ep[0].ep.name) {
103*4882a593Smuzhiyun ret_val = -EOPNOTSUPP;
104*4882a593Smuzhiyun break;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun ep2->halted = 1;
107*4882a593Smuzhiyun ret_val = 0;
108*4882a593Smuzhiyun *status = 0;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun break;
111*4882a593Smuzhiyun case USB_REQ_CLEAR_FEATURE:
112*4882a593Smuzhiyun if (setup->bRequestType == DEV_REQUEST) {
113*4882a593Smuzhiyun ret_val = 0;
114*4882a593Smuzhiyun switch (w_value) {
115*4882a593Smuzhiyun case USB_DEVICE_REMOTE_WAKEUP:
116*4882a593Smuzhiyun w_value = USB_DEVICE_REMOTE_WAKEUP;
117*4882a593Smuzhiyun break;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun case USB_DEVICE_U1_ENABLE:
120*4882a593Smuzhiyun case USB_DEVICE_U2_ENABLE:
121*4882a593Smuzhiyun case USB_DEVICE_LTM_ENABLE:
122*4882a593Smuzhiyun ret_val = -EOPNOTSUPP;
123*4882a593Smuzhiyun break;
124*4882a593Smuzhiyun default:
125*4882a593Smuzhiyun ret_val = -EOPNOTSUPP;
126*4882a593Smuzhiyun break;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun if (ret_val == 0) {
129*4882a593Smuzhiyun udc->devstatus &= ~(1 << w_value);
130*4882a593Smuzhiyun *status = 0;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun } else if (setup->bRequestType == EP_REQUEST) {
133*4882a593Smuzhiyun /* endpoint halt */
134*4882a593Smuzhiyun ep2 = vudc_find_endpoint(udc, w_index);
135*4882a593Smuzhiyun if (!ep2) {
136*4882a593Smuzhiyun ret_val = -EOPNOTSUPP;
137*4882a593Smuzhiyun break;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun if (!ep2->wedged)
140*4882a593Smuzhiyun ep2->halted = 0;
141*4882a593Smuzhiyun ret_val = 0;
142*4882a593Smuzhiyun *status = 0;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun break;
145*4882a593Smuzhiyun case USB_REQ_GET_STATUS:
146*4882a593Smuzhiyun if (setup->bRequestType == DEV_INREQUEST
147*4882a593Smuzhiyun || setup->bRequestType == INTF_INREQUEST
148*4882a593Smuzhiyun || setup->bRequestType == EP_INREQUEST) {
149*4882a593Smuzhiyun char *buf;
150*4882a593Smuzhiyun /*
151*4882a593Smuzhiyun * device: remote wakeup, selfpowered
152*4882a593Smuzhiyun * interface: nothing
153*4882a593Smuzhiyun * endpoint: halt
154*4882a593Smuzhiyun */
155*4882a593Smuzhiyun buf = (char *)urb->transfer_buffer;
156*4882a593Smuzhiyun if (urb->transfer_buffer_length > 0) {
157*4882a593Smuzhiyun if (setup->bRequestType == EP_INREQUEST) {
158*4882a593Smuzhiyun ep2 = vudc_find_endpoint(udc, w_index);
159*4882a593Smuzhiyun if (!ep2) {
160*4882a593Smuzhiyun ret_val = -EOPNOTSUPP;
161*4882a593Smuzhiyun break;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun buf[0] = ep2->halted;
164*4882a593Smuzhiyun } else if (setup->bRequestType ==
165*4882a593Smuzhiyun DEV_INREQUEST) {
166*4882a593Smuzhiyun buf[0] = (u8)udc->devstatus;
167*4882a593Smuzhiyun } else
168*4882a593Smuzhiyun buf[0] = 0;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun if (urb->transfer_buffer_length > 1)
171*4882a593Smuzhiyun buf[1] = 0;
172*4882a593Smuzhiyun urb->actual_length = min_t(u32, 2,
173*4882a593Smuzhiyun urb->transfer_buffer_length);
174*4882a593Smuzhiyun ret_val = 0;
175*4882a593Smuzhiyun *status = 0;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun break;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun return ret_val;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun /* Adapted from dummy_hcd.c ; caller must hold lock */
transfer(struct vudc * udc,struct urb * urb,struct vep * ep,int limit)183*4882a593Smuzhiyun static int transfer(struct vudc *udc,
184*4882a593Smuzhiyun struct urb *urb, struct vep *ep, int limit)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun struct vrequest *req;
187*4882a593Smuzhiyun int sent = 0;
188*4882a593Smuzhiyun top:
189*4882a593Smuzhiyun /* if there's no request queued, the device is NAKing; return */
190*4882a593Smuzhiyun list_for_each_entry(req, &ep->req_queue, req_entry) {
191*4882a593Smuzhiyun unsigned int host_len, dev_len, len;
192*4882a593Smuzhiyun void *ubuf_pos, *rbuf_pos;
193*4882a593Smuzhiyun int is_short, to_host;
194*4882a593Smuzhiyun int rescan = 0;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun /*
197*4882a593Smuzhiyun * 1..N packets of ep->ep.maxpacket each ... the last one
198*4882a593Smuzhiyun * may be short (including zero length).
199*4882a593Smuzhiyun *
200*4882a593Smuzhiyun * writer can send a zlp explicitly (length 0) or implicitly
201*4882a593Smuzhiyun * (length mod maxpacket zero, and 'zero' flag); they always
202*4882a593Smuzhiyun * terminate reads.
203*4882a593Smuzhiyun */
204*4882a593Smuzhiyun host_len = urb->transfer_buffer_length - urb->actual_length;
205*4882a593Smuzhiyun dev_len = req->req.length - req->req.actual;
206*4882a593Smuzhiyun len = min(host_len, dev_len);
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun to_host = usb_pipein(urb->pipe);
209*4882a593Smuzhiyun if (unlikely(len == 0))
210*4882a593Smuzhiyun is_short = 1;
211*4882a593Smuzhiyun else {
212*4882a593Smuzhiyun /* send multiple of maxpacket first, then remainder */
213*4882a593Smuzhiyun if (len >= ep->ep.maxpacket) {
214*4882a593Smuzhiyun is_short = 0;
215*4882a593Smuzhiyun if (len % ep->ep.maxpacket > 0)
216*4882a593Smuzhiyun rescan = 1;
217*4882a593Smuzhiyun len -= len % ep->ep.maxpacket;
218*4882a593Smuzhiyun } else {
219*4882a593Smuzhiyun is_short = 1;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun ubuf_pos = urb->transfer_buffer + urb->actual_length;
223*4882a593Smuzhiyun rbuf_pos = req->req.buf + req->req.actual;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun if (urb->pipe & USB_DIR_IN)
226*4882a593Smuzhiyun memcpy(ubuf_pos, rbuf_pos, len);
227*4882a593Smuzhiyun else
228*4882a593Smuzhiyun memcpy(rbuf_pos, ubuf_pos, len);
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun urb->actual_length += len;
231*4882a593Smuzhiyun req->req.actual += len;
232*4882a593Smuzhiyun sent += len;
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /*
236*4882a593Smuzhiyun * short packets terminate, maybe with overflow/underflow.
237*4882a593Smuzhiyun * it's only really an error to write too much.
238*4882a593Smuzhiyun *
239*4882a593Smuzhiyun * partially filling a buffer optionally blocks queue advances
240*4882a593Smuzhiyun * (so completion handlers can clean up the queue) but we don't
241*4882a593Smuzhiyun * need to emulate such data-in-flight.
242*4882a593Smuzhiyun */
243*4882a593Smuzhiyun if (is_short) {
244*4882a593Smuzhiyun if (host_len == dev_len) {
245*4882a593Smuzhiyun req->req.status = 0;
246*4882a593Smuzhiyun urb->status = 0;
247*4882a593Smuzhiyun } else if (to_host) {
248*4882a593Smuzhiyun req->req.status = 0;
249*4882a593Smuzhiyun if (dev_len > host_len)
250*4882a593Smuzhiyun urb->status = -EOVERFLOW;
251*4882a593Smuzhiyun else
252*4882a593Smuzhiyun urb->status = 0;
253*4882a593Smuzhiyun } else {
254*4882a593Smuzhiyun urb->status = 0;
255*4882a593Smuzhiyun if (host_len > dev_len)
256*4882a593Smuzhiyun req->req.status = -EOVERFLOW;
257*4882a593Smuzhiyun else
258*4882a593Smuzhiyun req->req.status = 0;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun /* many requests terminate without a short packet */
262*4882a593Smuzhiyun /* also check if we need to send zlp */
263*4882a593Smuzhiyun } else {
264*4882a593Smuzhiyun if (req->req.length == req->req.actual) {
265*4882a593Smuzhiyun if (req->req.zero && to_host)
266*4882a593Smuzhiyun rescan = 1;
267*4882a593Smuzhiyun else
268*4882a593Smuzhiyun req->req.status = 0;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun if (urb->transfer_buffer_length == urb->actual_length) {
271*4882a593Smuzhiyun if (urb->transfer_flags & URB_ZERO_PACKET &&
272*4882a593Smuzhiyun !to_host)
273*4882a593Smuzhiyun rescan = 1;
274*4882a593Smuzhiyun else
275*4882a593Smuzhiyun urb->status = 0;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun /* device side completion --> continuable */
280*4882a593Smuzhiyun if (req->req.status != -EINPROGRESS) {
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun list_del_init(&req->req_entry);
283*4882a593Smuzhiyun spin_unlock(&udc->lock);
284*4882a593Smuzhiyun usb_gadget_giveback_request(&ep->ep, &req->req);
285*4882a593Smuzhiyun spin_lock(&udc->lock);
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun /* requests might have been unlinked... */
288*4882a593Smuzhiyun rescan = 1;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun /* host side completion --> terminate */
292*4882a593Smuzhiyun if (urb->status != -EINPROGRESS)
293*4882a593Smuzhiyun break;
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun /* rescan to continue with any other queued i/o */
296*4882a593Smuzhiyun if (rescan)
297*4882a593Smuzhiyun goto top;
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun return sent;
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun
v_timer(struct timer_list * t)302*4882a593Smuzhiyun static void v_timer(struct timer_list *t)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun struct vudc *udc = from_timer(udc, t, tr_timer.timer);
305*4882a593Smuzhiyun struct transfer_timer *timer = &udc->tr_timer;
306*4882a593Smuzhiyun struct urbp *urb_p, *tmp;
307*4882a593Smuzhiyun unsigned long flags;
308*4882a593Smuzhiyun struct usb_ep *_ep;
309*4882a593Smuzhiyun struct vep *ep;
310*4882a593Smuzhiyun int ret = 0;
311*4882a593Smuzhiyun int total, limit;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun spin_lock_irqsave(&udc->lock, flags);
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun total = get_frame_limit(udc->gadget.speed);
316*4882a593Smuzhiyun if (total < 0) { /* unknown speed, or not set yet */
317*4882a593Smuzhiyun timer->state = VUDC_TR_IDLE;
318*4882a593Smuzhiyun spin_unlock_irqrestore(&udc->lock, flags);
319*4882a593Smuzhiyun return;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun /* is it next frame now? */
322*4882a593Smuzhiyun if (time_after(jiffies, timer->frame_start + msecs_to_jiffies(1))) {
323*4882a593Smuzhiyun timer->frame_limit = total;
324*4882a593Smuzhiyun /* FIXME: how to make it accurate? */
325*4882a593Smuzhiyun timer->frame_start = jiffies;
326*4882a593Smuzhiyun } else {
327*4882a593Smuzhiyun total = timer->frame_limit;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /* We have to clear ep0 flags separately as it's not on the list */
331*4882a593Smuzhiyun udc->ep[0].already_seen = 0;
332*4882a593Smuzhiyun list_for_each_entry(_ep, &udc->gadget.ep_list, ep_list) {
333*4882a593Smuzhiyun ep = to_vep(_ep);
334*4882a593Smuzhiyun ep->already_seen = 0;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun restart:
338*4882a593Smuzhiyun list_for_each_entry_safe(urb_p, tmp, &udc->urb_queue, urb_entry) {
339*4882a593Smuzhiyun struct urb *urb = urb_p->urb;
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun ep = urb_p->ep;
342*4882a593Smuzhiyun if (urb->unlinked)
343*4882a593Smuzhiyun goto return_urb;
344*4882a593Smuzhiyun if (timer->state != VUDC_TR_RUNNING)
345*4882a593Smuzhiyun continue;
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun if (!ep) {
348*4882a593Smuzhiyun urb->status = -EPROTO;
349*4882a593Smuzhiyun goto return_urb;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun /* Used up bandwidth? */
353*4882a593Smuzhiyun if (total <= 0 && ep->type == USB_ENDPOINT_XFER_BULK)
354*4882a593Smuzhiyun continue;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun if (ep->already_seen)
357*4882a593Smuzhiyun continue;
358*4882a593Smuzhiyun ep->already_seen = 1;
359*4882a593Smuzhiyun if (ep == &udc->ep[0] && urb_p->new) {
360*4882a593Smuzhiyun ep->setup_stage = 1;
361*4882a593Smuzhiyun urb_p->new = 0;
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun if (ep->halted && !ep->setup_stage) {
364*4882a593Smuzhiyun urb->status = -EPIPE;
365*4882a593Smuzhiyun goto return_urb;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun if (ep == &udc->ep[0] && ep->setup_stage) {
369*4882a593Smuzhiyun /* TODO - flush any stale requests */
370*4882a593Smuzhiyun ep->setup_stage = 0;
371*4882a593Smuzhiyun ep->halted = 0;
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun ret = handle_control_request(udc, urb,
374*4882a593Smuzhiyun (struct usb_ctrlrequest *) urb->setup_packet,
375*4882a593Smuzhiyun (&urb->status));
376*4882a593Smuzhiyun if (ret > 0) {
377*4882a593Smuzhiyun spin_unlock(&udc->lock);
378*4882a593Smuzhiyun ret = udc->driver->setup(&udc->gadget,
379*4882a593Smuzhiyun (struct usb_ctrlrequest *)
380*4882a593Smuzhiyun urb->setup_packet);
381*4882a593Smuzhiyun spin_lock(&udc->lock);
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun if (ret >= 0) {
384*4882a593Smuzhiyun /* no delays (max 64kb data stage) */
385*4882a593Smuzhiyun limit = 64 * 1024;
386*4882a593Smuzhiyun goto treat_control_like_bulk;
387*4882a593Smuzhiyun } else {
388*4882a593Smuzhiyun urb->status = -EPIPE;
389*4882a593Smuzhiyun urb->actual_length = 0;
390*4882a593Smuzhiyun goto return_urb;
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun limit = total;
395*4882a593Smuzhiyun switch (ep->type) {
396*4882a593Smuzhiyun case USB_ENDPOINT_XFER_ISOC:
397*4882a593Smuzhiyun /* TODO: support */
398*4882a593Smuzhiyun urb->status = -EXDEV;
399*4882a593Smuzhiyun break;
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun case USB_ENDPOINT_XFER_INT:
402*4882a593Smuzhiyun /*
403*4882a593Smuzhiyun * TODO: figure out bandwidth guarantees
404*4882a593Smuzhiyun * for now, give unlimited bandwidth
405*4882a593Smuzhiyun */
406*4882a593Smuzhiyun limit += urb->transfer_buffer_length;
407*4882a593Smuzhiyun fallthrough;
408*4882a593Smuzhiyun default:
409*4882a593Smuzhiyun treat_control_like_bulk:
410*4882a593Smuzhiyun total -= transfer(udc, urb, ep, limit);
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun if (urb->status == -EINPROGRESS)
413*4882a593Smuzhiyun continue;
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun return_urb:
416*4882a593Smuzhiyun if (ep)
417*4882a593Smuzhiyun ep->already_seen = ep->setup_stage = 0;
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun spin_lock(&udc->lock_tx);
420*4882a593Smuzhiyun list_del(&urb_p->urb_entry);
421*4882a593Smuzhiyun if (!urb->unlinked) {
422*4882a593Smuzhiyun v_enqueue_ret_submit(udc, urb_p);
423*4882a593Smuzhiyun } else {
424*4882a593Smuzhiyun v_enqueue_ret_unlink(udc, urb_p->seqnum,
425*4882a593Smuzhiyun urb->unlinked);
426*4882a593Smuzhiyun free_urbp_and_urb(urb_p);
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun wake_up(&udc->tx_waitq);
429*4882a593Smuzhiyun spin_unlock(&udc->lock_tx);
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun goto restart;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun /* TODO - also wait on empty usb_request queues? */
435*4882a593Smuzhiyun if (list_empty(&udc->urb_queue))
436*4882a593Smuzhiyun timer->state = VUDC_TR_IDLE;
437*4882a593Smuzhiyun else
438*4882a593Smuzhiyun mod_timer(&timer->timer,
439*4882a593Smuzhiyun timer->frame_start + msecs_to_jiffies(1));
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun spin_unlock_irqrestore(&udc->lock, flags);
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun /* All timer functions are run with udc->lock held */
445*4882a593Smuzhiyun
v_init_timer(struct vudc * udc)446*4882a593Smuzhiyun void v_init_timer(struct vudc *udc)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun struct transfer_timer *t = &udc->tr_timer;
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun timer_setup(&t->timer, v_timer, 0);
451*4882a593Smuzhiyun t->state = VUDC_TR_STOPPED;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun
v_start_timer(struct vudc * udc)454*4882a593Smuzhiyun void v_start_timer(struct vudc *udc)
455*4882a593Smuzhiyun {
456*4882a593Smuzhiyun struct transfer_timer *t = &udc->tr_timer;
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun dev_dbg(&udc->pdev->dev, "timer start");
459*4882a593Smuzhiyun switch (t->state) {
460*4882a593Smuzhiyun case VUDC_TR_RUNNING:
461*4882a593Smuzhiyun return;
462*4882a593Smuzhiyun case VUDC_TR_IDLE:
463*4882a593Smuzhiyun return v_kick_timer(udc, jiffies);
464*4882a593Smuzhiyun case VUDC_TR_STOPPED:
465*4882a593Smuzhiyun t->state = VUDC_TR_IDLE;
466*4882a593Smuzhiyun t->frame_start = jiffies;
467*4882a593Smuzhiyun t->frame_limit = get_frame_limit(udc->gadget.speed);
468*4882a593Smuzhiyun return v_kick_timer(udc, jiffies);
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun
v_kick_timer(struct vudc * udc,unsigned long time)472*4882a593Smuzhiyun void v_kick_timer(struct vudc *udc, unsigned long time)
473*4882a593Smuzhiyun {
474*4882a593Smuzhiyun struct transfer_timer *t = &udc->tr_timer;
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun dev_dbg(&udc->pdev->dev, "timer kick");
477*4882a593Smuzhiyun switch (t->state) {
478*4882a593Smuzhiyun case VUDC_TR_RUNNING:
479*4882a593Smuzhiyun return;
480*4882a593Smuzhiyun case VUDC_TR_IDLE:
481*4882a593Smuzhiyun t->state = VUDC_TR_RUNNING;
482*4882a593Smuzhiyun fallthrough;
483*4882a593Smuzhiyun case VUDC_TR_STOPPED:
484*4882a593Smuzhiyun /* we may want to kick timer to unqueue urbs */
485*4882a593Smuzhiyun mod_timer(&t->timer, time);
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun
v_stop_timer(struct vudc * udc)489*4882a593Smuzhiyun void v_stop_timer(struct vudc *udc)
490*4882a593Smuzhiyun {
491*4882a593Smuzhiyun struct transfer_timer *t = &udc->tr_timer;
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun /* timer itself will take care of stopping */
494*4882a593Smuzhiyun dev_dbg(&udc->pdev->dev, "timer stop");
495*4882a593Smuzhiyun t->state = VUDC_TR_STOPPED;
496*4882a593Smuzhiyun }
497