xref: /rk3399_rockchip-uboot/drivers/usb/gadget/ci_udc.c (revision d7beeb9358a93e2dfd01e0ab5ff4317ce106c4d7)
1 /*
2  * Copyright 2011, Marvell Semiconductor Inc.
3  * Lei Wen <leiwen@marvell.com>
4  *
5  * SPDX-License-Identifier:	GPL-2.0+
6  *
7  * Back ported to the 8xx platform (from the 8260 platform) by
8  * Murray.Jensen@cmst.csiro.au, 27-Jan-01.
9  */
10 
11 #include <common.h>
12 #include <command.h>
13 #include <config.h>
14 #include <net.h>
15 #include <malloc.h>
16 #include <asm/byteorder.h>
17 #include <asm/errno.h>
18 #include <asm/io.h>
19 #include <asm/unaligned.h>
20 #include <linux/types.h>
21 #include <linux/usb/ch9.h>
22 #include <linux/usb/gadget.h>
23 #include <usb/ci_udc.h>
24 #include "../host/ehci.h"
25 #include "ci_udc.h"
26 
27 /*
28  * Check if the system has too long cachelines. If the cachelines are
29  * longer then 128b, the driver will not be able flush/invalidate data
30  * cache over separate QH entries. We use 128b because one QH entry is
31  * 64b long and there are always two QH list entries for each endpoint.
32  */
33 #if ARCH_DMA_MINALIGN > 128
34 #error This driver can not work on systems with caches longer than 128b
35 #endif
36 
37 #ifndef DEBUG
38 #define DBG(x...) do {} while (0)
39 #else
40 #define DBG(x...) printf(x)
41 static const char *reqname(unsigned r)
42 {
43 	switch (r) {
44 	case USB_REQ_GET_STATUS: return "GET_STATUS";
45 	case USB_REQ_CLEAR_FEATURE: return "CLEAR_FEATURE";
46 	case USB_REQ_SET_FEATURE: return "SET_FEATURE";
47 	case USB_REQ_SET_ADDRESS: return "SET_ADDRESS";
48 	case USB_REQ_GET_DESCRIPTOR: return "GET_DESCRIPTOR";
49 	case USB_REQ_SET_DESCRIPTOR: return "SET_DESCRIPTOR";
50 	case USB_REQ_GET_CONFIGURATION: return "GET_CONFIGURATION";
51 	case USB_REQ_SET_CONFIGURATION: return "SET_CONFIGURATION";
52 	case USB_REQ_GET_INTERFACE: return "GET_INTERFACE";
53 	case USB_REQ_SET_INTERFACE: return "SET_INTERFACE";
54 	default: return "*UNKNOWN*";
55 	}
56 }
57 #endif
58 
59 static struct usb_endpoint_descriptor ep0_desc = {
60 	.bLength = sizeof(struct usb_endpoint_descriptor),
61 	.bDescriptorType = USB_DT_ENDPOINT,
62 	.bEndpointAddress = USB_DIR_IN,
63 	.bmAttributes =	USB_ENDPOINT_XFER_CONTROL,
64 };
65 
66 static int ci_pullup(struct usb_gadget *gadget, int is_on);
67 static int ci_ep_enable(struct usb_ep *ep,
68 		const struct usb_endpoint_descriptor *desc);
69 static int ci_ep_disable(struct usb_ep *ep);
70 static int ci_ep_queue(struct usb_ep *ep,
71 		struct usb_request *req, gfp_t gfp_flags);
72 static struct usb_request *
73 ci_ep_alloc_request(struct usb_ep *ep, unsigned int gfp_flags);
74 static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *_req);
75 
76 static struct usb_gadget_ops ci_udc_ops = {
77 	.pullup = ci_pullup,
78 };
79 
80 static struct usb_ep_ops ci_ep_ops = {
81 	.enable         = ci_ep_enable,
82 	.disable        = ci_ep_disable,
83 	.queue          = ci_ep_queue,
84 	.alloc_request  = ci_ep_alloc_request,
85 	.free_request   = ci_ep_free_request,
86 };
87 
88 /* Init values for USB endpoints. */
89 static const struct usb_ep ci_ep_init[2] = {
90 	[0] = {	/* EP 0 */
91 		.maxpacket	= 64,
92 		.name		= "ep0",
93 		.ops		= &ci_ep_ops,
94 	},
95 	[1] = {	/* EP 1..n */
96 		.maxpacket	= 512,
97 		.name		= "ep-",
98 		.ops		= &ci_ep_ops,
99 	},
100 };
101 
102 static struct ci_drv controller = {
103 	.gadget	= {
104 		.name	= "ci_udc",
105 		.ops	= &ci_udc_ops,
106 		.is_dualspeed = 1,
107 	},
108 };
109 
110 /**
111  * ci_get_qh() - return queue head for endpoint
112  * @ep_num:	Endpoint number
113  * @dir_in:	Direction of the endpoint (IN = 1, OUT = 0)
114  *
115  * This function returns the QH associated with particular endpoint
116  * and it's direction.
117  */
118 static struct ept_queue_head *ci_get_qh(int ep_num, int dir_in)
119 {
120 	return &controller.epts[(ep_num * 2) + dir_in];
121 }
122 
123 /**
124  * ci_get_qtd() - return queue item for endpoint
125  * @ep_num:	Endpoint number
126  * @dir_in:	Direction of the endpoint (IN = 1, OUT = 0)
127  *
128  * This function returns the QH associated with particular endpoint
129  * and it's direction.
130  */
131 static struct ept_queue_item *ci_get_qtd(int ep_num, int dir_in)
132 {
133 	return controller.items[(ep_num * 2) + dir_in];
134 }
135 
136 /**
137  * ci_flush_qh - flush cache over queue head
138  * @ep_num:	Endpoint number
139  *
140  * This function flushes cache over QH for particular endpoint.
141  */
142 static void ci_flush_qh(int ep_num)
143 {
144 	struct ept_queue_head *head = ci_get_qh(ep_num, 0);
145 	const uint32_t start = (uint32_t)head;
146 	const uint32_t end = start + 2 * sizeof(*head);
147 
148 	flush_dcache_range(start, end);
149 }
150 
151 /**
152  * ci_invalidate_qh - invalidate cache over queue head
153  * @ep_num:	Endpoint number
154  *
155  * This function invalidates cache over QH for particular endpoint.
156  */
157 static void ci_invalidate_qh(int ep_num)
158 {
159 	struct ept_queue_head *head = ci_get_qh(ep_num, 0);
160 	uint32_t start = (uint32_t)head;
161 	uint32_t end = start + 2 * sizeof(*head);
162 
163 	invalidate_dcache_range(start, end);
164 }
165 
166 /**
167  * ci_flush_qtd - flush cache over queue item
168  * @ep_num:	Endpoint number
169  *
170  * This function flushes cache over qTD pair for particular endpoint.
171  */
172 static void ci_flush_qtd(int ep_num)
173 {
174 	struct ept_queue_item *item = ci_get_qtd(ep_num, 0);
175 	const uint32_t start = (uint32_t)item;
176 	const uint32_t end_raw = start + 2 * sizeof(*item);
177 	const uint32_t end = roundup(end_raw, ARCH_DMA_MINALIGN);
178 
179 	flush_dcache_range(start, end);
180 }
181 
182 /**
183  * ci_invalidate_qtd - invalidate cache over queue item
184  * @ep_num:	Endpoint number
185  *
186  * This function invalidates cache over qTD pair for particular endpoint.
187  */
188 static void ci_invalidate_qtd(int ep_num)
189 {
190 	struct ept_queue_item *item = ci_get_qtd(ep_num, 0);
191 	const uint32_t start = (uint32_t)item;
192 	const uint32_t end_raw = start + 2 * sizeof(*item);
193 	const uint32_t end = roundup(end_raw, ARCH_DMA_MINALIGN);
194 
195 	invalidate_dcache_range(start, end);
196 }
197 
198 static struct usb_request *
199 ci_ep_alloc_request(struct usb_ep *ep, unsigned int gfp_flags)
200 {
201 	struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
202 	int num;
203 	struct ci_req *ci_req;
204 
205 	num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
206 	if (num == 0 && controller.ep0_req)
207 		return &controller.ep0_req->req;
208 
209 	ci_req = memalign(ARCH_DMA_MINALIGN, sizeof(*ci_req));
210 	if (!ci_req)
211 		return NULL;
212 
213 	INIT_LIST_HEAD(&ci_req->queue);
214 	ci_req->b_buf = 0;
215 
216 	if (num == 0)
217 		controller.ep0_req = ci_req;
218 
219 	return &ci_req->req;
220 }
221 
222 static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *req)
223 {
224 	struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
225 	struct ci_req *ci_req = container_of(req, struct ci_req, req);
226 	int num;
227 
228 	num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
229 	if (num == 0) {
230 		if (!controller.ep0_req)
231 			return;
232 		controller.ep0_req = 0;
233 	}
234 
235 	if (ci_req->b_buf)
236 		free(ci_req->b_buf);
237 	free(ci_req);
238 }
239 
240 static void ep_enable(int num, int in, int maxpacket)
241 {
242 	struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
243 	unsigned n;
244 
245 	n = readl(&udc->epctrl[num]);
246 	if (in)
247 		n |= (CTRL_TXE | CTRL_TXR | CTRL_TXT_BULK);
248 	else
249 		n |= (CTRL_RXE | CTRL_RXR | CTRL_RXT_BULK);
250 
251 	if (num != 0) {
252 		struct ept_queue_head *head = ci_get_qh(num, in);
253 
254 		head->config = CONFIG_MAX_PKT(maxpacket) | CONFIG_ZLT;
255 		ci_flush_qh(num);
256 	}
257 	writel(n, &udc->epctrl[num]);
258 }
259 
260 static int ci_ep_enable(struct usb_ep *ep,
261 		const struct usb_endpoint_descriptor *desc)
262 {
263 	struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
264 	int num, in;
265 	num = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
266 	in = (desc->bEndpointAddress & USB_DIR_IN) != 0;
267 	ci_ep->desc = desc;
268 
269 	if (num) {
270 		int max = get_unaligned_le16(&desc->wMaxPacketSize);
271 
272 		if ((max > 64) && (controller.gadget.speed == USB_SPEED_FULL))
273 			max = 64;
274 		if (ep->maxpacket != max) {
275 			DBG("%s: from %d to %d\n", __func__,
276 			    ep->maxpacket, max);
277 			ep->maxpacket = max;
278 		}
279 	}
280 	ep_enable(num, in, ep->maxpacket);
281 	DBG("%s: num=%d maxpacket=%d\n", __func__, num, ep->maxpacket);
282 	return 0;
283 }
284 
285 static int ci_ep_disable(struct usb_ep *ep)
286 {
287 	struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
288 
289 	ci_ep->desc = NULL;
290 	return 0;
291 }
292 
293 static int ci_bounce(struct ci_req *ci_req, int in)
294 {
295 	struct usb_request *req = &ci_req->req;
296 	uint32_t addr = (uint32_t)req->buf;
297 	uint32_t hwaddr;
298 	uint32_t aligned_used_len;
299 
300 	/* Input buffer address is not aligned. */
301 	if (addr & (ARCH_DMA_MINALIGN - 1))
302 		goto align;
303 
304 	/* Input buffer length is not aligned. */
305 	if (req->length & (ARCH_DMA_MINALIGN - 1))
306 		goto align;
307 
308 	/* The buffer is well aligned, only flush cache. */
309 	ci_req->hw_len = req->length;
310 	ci_req->hw_buf = req->buf;
311 	goto flush;
312 
313 align:
314 	if (ci_req->b_buf && req->length > ci_req->b_len) {
315 		free(ci_req->b_buf);
316 		ci_req->b_buf = 0;
317 	}
318 	if (!ci_req->b_buf) {
319 		ci_req->b_len = roundup(req->length, ARCH_DMA_MINALIGN);
320 		ci_req->b_buf = memalign(ARCH_DMA_MINALIGN, ci_req->b_len);
321 		if (!ci_req->b_buf)
322 			return -ENOMEM;
323 	}
324 	ci_req->hw_len = ci_req->b_len;
325 	ci_req->hw_buf = ci_req->b_buf;
326 
327 	if (in)
328 		memcpy(ci_req->hw_buf, req->buf, req->length);
329 
330 flush:
331 	hwaddr = (uint32_t)ci_req->hw_buf;
332 	aligned_used_len = roundup(req->length, ARCH_DMA_MINALIGN);
333 	flush_dcache_range(hwaddr, hwaddr + aligned_used_len);
334 
335 	return 0;
336 }
337 
338 static void ci_debounce(struct ci_req *ci_req, int in)
339 {
340 	struct usb_request *req = &ci_req->req;
341 	uint32_t addr = (uint32_t)req->buf;
342 	uint32_t hwaddr = (uint32_t)ci_req->hw_buf;
343 	uint32_t aligned_used_len;
344 
345 	if (in)
346 		return;
347 
348 	aligned_used_len = roundup(req->actual, ARCH_DMA_MINALIGN);
349 	invalidate_dcache_range(hwaddr, hwaddr + aligned_used_len);
350 
351 	if (addr == hwaddr)
352 		return; /* not a bounce */
353 
354 	memcpy(req->buf, ci_req->hw_buf, req->actual);
355 }
356 
357 static void ci_ep_submit_next_request(struct ci_ep *ci_ep)
358 {
359 	struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
360 	struct ept_queue_item *item;
361 	struct ept_queue_head *head;
362 	int bit, num, len, in;
363 	struct ci_req *ci_req;
364 
365 	ci_ep->req_primed = true;
366 
367 	num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
368 	in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0;
369 	item = ci_get_qtd(num, in);
370 	head = ci_get_qh(num, in);
371 
372 	ci_req = list_first_entry(&ci_ep->queue, struct ci_req, queue);
373 	len = ci_req->req.length;
374 
375 	item->info = INFO_BYTES(len) | INFO_ACTIVE;
376 	item->page0 = (uint32_t)ci_req->hw_buf;
377 	item->page1 = ((uint32_t)ci_req->hw_buf & 0xfffff000) + 0x1000;
378 	item->page2 = ((uint32_t)ci_req->hw_buf & 0xfffff000) + 0x2000;
379 	item->page3 = ((uint32_t)ci_req->hw_buf & 0xfffff000) + 0x3000;
380 	item->page4 = ((uint32_t)ci_req->hw_buf & 0xfffff000) + 0x4000;
381 
382 	head->next = (unsigned) item;
383 	head->info = 0;
384 
385 	/*
386 	 * When sending the data for an IN transaction, the attached host
387 	 * knows that all data for the IN is sent when one of the following
388 	 * occurs:
389 	 * a) A zero-length packet is transmitted.
390 	 * b) A packet with length that isn't an exact multiple of the ep's
391 	 *    maxpacket is transmitted.
392 	 * c) Enough data is sent to exactly fill the host's maximum expected
393 	 *    IN transaction size.
394 	 *
395 	 * One of these conditions MUST apply at the end of an IN transaction,
396 	 * or the transaction will not be considered complete by the host. If
397 	 * none of (a)..(c) already applies, then we must force (a) to apply
398 	 * by explicitly sending an extra zero-length packet.
399 	 */
400 	/*  IN    !a     !b                              !c */
401 	if (in && len && !(len % ci_ep->ep.maxpacket) && ci_req->req.zero) {
402 		/*
403 		 * Each endpoint has 2 items allocated, even though typically
404 		 * only 1 is used at a time since either an IN or an OUT but
405 		 * not both is queued. For an IN transaction, item currently
406 		 * points at the second of these items, so we know that we
407 		 * can use (item - 1) to transmit the extra zero-length packet
408 		 */
409 		item->next = (unsigned)(item - 1);
410 		item--;
411 		item->info = INFO_ACTIVE;
412 	}
413 
414 	item->next = TERMINATE;
415 	item->info |= INFO_IOC;
416 
417 	ci_flush_qtd(num);
418 
419 	DBG("ept%d %s queue len %x, req %p, buffer %p\n",
420 	    num, in ? "in" : "out", len, ci_req, ci_req->hw_buf);
421 	ci_flush_qh(num);
422 
423 	if (in)
424 		bit = EPT_TX(num);
425 	else
426 		bit = EPT_RX(num);
427 
428 	writel(bit, &udc->epprime);
429 }
430 
431 static int ci_ep_queue(struct usb_ep *ep,
432 		struct usb_request *req, gfp_t gfp_flags)
433 {
434 	struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
435 	struct ci_req *ci_req = container_of(req, struct ci_req, req);
436 	int in, ret;
437 	int __maybe_unused num;
438 
439 	num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
440 	in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0;
441 
442 	if (!num && ci_ep->req_primed) {
443 		/*
444 		 * The flipping of ep0 between IN and OUT relies on
445 		 * ci_ep_queue consuming the current IN/OUT setting
446 		 * immediately. If this is deferred to a later point when the
447 		 * req is pulled out of ci_req->queue, then the IN/OUT setting
448 		 * may have been changed since the req was queued, and state
449 		 * will get out of sync. This condition doesn't occur today,
450 		 * but could if bugs were introduced later, and this error
451 		 * check will save a lot of debugging time.
452 		 */
453 		printf("%s: ep0 transaction already in progress\n", __func__);
454 		return -EPROTO;
455 	}
456 
457 	ret = ci_bounce(ci_req, in);
458 	if (ret)
459 		return ret;
460 
461 	DBG("ept%d %s pre-queue req %p, buffer %p\n",
462 	    num, in ? "in" : "out", ci_req, ci_req->hw_buf);
463 	list_add_tail(&ci_req->queue, &ci_ep->queue);
464 
465 	if (!ci_ep->req_primed)
466 		ci_ep_submit_next_request(ci_ep);
467 
468 	return 0;
469 }
470 
471 static void flip_ep0_direction(void)
472 {
473 	if (ep0_desc.bEndpointAddress == USB_DIR_IN) {
474 		DBG("%s: Flipping ep0 to OUT\n", __func__);
475 		ep0_desc.bEndpointAddress = 0;
476 	} else {
477 		DBG("%s: Flipping ep0 to IN\n", __func__);
478 		ep0_desc.bEndpointAddress = USB_DIR_IN;
479 	}
480 }
481 
482 static void handle_ep_complete(struct ci_ep *ep)
483 {
484 	struct ept_queue_item *item;
485 	int num, in, len;
486 	struct ci_req *ci_req;
487 
488 	num = ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
489 	in = (ep->desc->bEndpointAddress & USB_DIR_IN) != 0;
490 	item = ci_get_qtd(num, in);
491 	ci_invalidate_qtd(num);
492 
493 	len = (item->info >> 16) & 0x7fff;
494 	if (item->info & 0xff)
495 		printf("EP%d/%s FAIL info=%x pg0=%x\n",
496 		       num, in ? "in" : "out", item->info, item->page0);
497 
498 	ci_req = list_first_entry(&ep->queue, struct ci_req, queue);
499 	list_del_init(&ci_req->queue);
500 	ep->req_primed = false;
501 
502 	if (!list_empty(&ep->queue))
503 		ci_ep_submit_next_request(ep);
504 
505 	ci_req->req.actual = ci_req->req.length - len;
506 	ci_debounce(ci_req, in);
507 
508 	DBG("ept%d %s req %p, complete %x\n",
509 	    num, in ? "in" : "out", ci_req, len);
510 	if (num != 0 || controller.ep0_data_phase)
511 		ci_req->req.complete(&ep->ep, &ci_req->req);
512 	if (num == 0 && controller.ep0_data_phase) {
513 		/*
514 		 * Data Stage is complete, so flip ep0 dir for Status Stage,
515 		 * which always transfers a packet in the opposite direction.
516 		 */
517 		DBG("%s: flip ep0 dir for Status Stage\n", __func__);
518 		flip_ep0_direction();
519 		controller.ep0_data_phase = false;
520 		ci_req->req.length = 0;
521 		usb_ep_queue(&ep->ep, &ci_req->req, 0);
522 	}
523 }
524 
525 #define SETUP(type, request) (((type) << 8) | (request))
526 
527 static void handle_setup(void)
528 {
529 	struct ci_ep *ci_ep = &controller.ep[0];
530 	struct ci_req *ci_req;
531 	struct usb_request *req;
532 	struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
533 	struct ept_queue_head *head;
534 	struct usb_ctrlrequest r;
535 	int status = 0;
536 	int num, in, _num, _in, i;
537 	char *buf;
538 
539 	ci_req = controller.ep0_req;
540 	req = &ci_req->req;
541 	head = ci_get_qh(0, 0);	/* EP0 OUT */
542 
543 	ci_invalidate_qh(0);
544 	memcpy(&r, head->setup_data, sizeof(struct usb_ctrlrequest));
545 #ifdef CONFIG_CI_UDC_HAS_HOSTPC
546 	writel(EPT_RX(0), &udc->epsetupstat);
547 #else
548 	writel(EPT_RX(0), &udc->epstat);
549 #endif
550 	DBG("handle setup %s, %x, %x index %x value %x length %x\n",
551 	    reqname(r.bRequest), r.bRequestType, r.bRequest, r.wIndex,
552 	    r.wValue, r.wLength);
553 
554 	/* Set EP0 dir for Data Stage based on Setup Stage data */
555 	if (r.bRequestType & USB_DIR_IN) {
556 		DBG("%s: Set ep0 to IN for Data Stage\n", __func__);
557 		ep0_desc.bEndpointAddress = USB_DIR_IN;
558 	} else {
559 		DBG("%s: Set ep0 to OUT for Data Stage\n", __func__);
560 		ep0_desc.bEndpointAddress = 0;
561 	}
562 	if (r.wLength) {
563 		controller.ep0_data_phase = true;
564 	} else {
565 		/* 0 length -> no Data Stage. Flip dir for Status Stage */
566 		DBG("%s: 0 length: flip ep0 dir for Status Stage\n", __func__);
567 		flip_ep0_direction();
568 		controller.ep0_data_phase = false;
569 	}
570 
571 	list_del_init(&ci_req->queue);
572 	ci_ep->req_primed = false;
573 
574 	switch (SETUP(r.bRequestType, r.bRequest)) {
575 	case SETUP(USB_RECIP_ENDPOINT, USB_REQ_CLEAR_FEATURE):
576 		_num = r.wIndex & 15;
577 		_in = !!(r.wIndex & 0x80);
578 
579 		if ((r.wValue == 0) && (r.wLength == 0)) {
580 			req->length = 0;
581 			for (i = 0; i < NUM_ENDPOINTS; i++) {
582 				struct ci_ep *ep = &controller.ep[i];
583 
584 				if (!ep->desc)
585 					continue;
586 				num = ep->desc->bEndpointAddress
587 						& USB_ENDPOINT_NUMBER_MASK;
588 				in = (ep->desc->bEndpointAddress
589 						& USB_DIR_IN) != 0;
590 				if ((num == _num) && (in == _in)) {
591 					ep_enable(num, in, ep->ep.maxpacket);
592 					usb_ep_queue(controller.gadget.ep0,
593 							req, 0);
594 					break;
595 				}
596 			}
597 		}
598 		return;
599 
600 	case SETUP(USB_RECIP_DEVICE, USB_REQ_SET_ADDRESS):
601 		/*
602 		 * write address delayed (will take effect
603 		 * after the next IN txn)
604 		 */
605 		writel((r.wValue << 25) | (1 << 24), &udc->devaddr);
606 		req->length = 0;
607 		usb_ep_queue(controller.gadget.ep0, req, 0);
608 		return;
609 
610 	case SETUP(USB_DIR_IN | USB_RECIP_DEVICE, USB_REQ_GET_STATUS):
611 		req->length = 2;
612 		buf = (char *)req->buf;
613 		buf[0] = 1 << USB_DEVICE_SELF_POWERED;
614 		buf[1] = 0;
615 		usb_ep_queue(controller.gadget.ep0, req, 0);
616 		return;
617 	}
618 	/* pass request up to the gadget driver */
619 	if (controller.driver)
620 		status = controller.driver->setup(&controller.gadget, &r);
621 	else
622 		status = -ENODEV;
623 
624 	if (!status)
625 		return;
626 	DBG("STALL reqname %s type %x value %x, index %x\n",
627 	    reqname(r.bRequest), r.bRequestType, r.wValue, r.wIndex);
628 	writel((1<<16) | (1 << 0), &udc->epctrl[0]);
629 }
630 
631 static void stop_activity(void)
632 {
633 	int i, num, in;
634 	struct ept_queue_head *head;
635 	struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
636 	writel(readl(&udc->epcomp), &udc->epcomp);
637 #ifdef CONFIG_CI_UDC_HAS_HOSTPC
638 	writel(readl(&udc->epsetupstat), &udc->epsetupstat);
639 #endif
640 	writel(readl(&udc->epstat), &udc->epstat);
641 	writel(0xffffffff, &udc->epflush);
642 
643 	/* error out any pending reqs */
644 	for (i = 0; i < NUM_ENDPOINTS; i++) {
645 		if (i != 0)
646 			writel(0, &udc->epctrl[i]);
647 		if (controller.ep[i].desc) {
648 			num = controller.ep[i].desc->bEndpointAddress
649 				& USB_ENDPOINT_NUMBER_MASK;
650 			in = (controller.ep[i].desc->bEndpointAddress
651 				& USB_DIR_IN) != 0;
652 			head = ci_get_qh(num, in);
653 			head->info = INFO_ACTIVE;
654 			ci_flush_qh(num);
655 		}
656 	}
657 }
658 
659 void udc_irq(void)
660 {
661 	struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
662 	unsigned n = readl(&udc->usbsts);
663 	writel(n, &udc->usbsts);
664 	int bit, i, num, in;
665 
666 	n &= (STS_SLI | STS_URI | STS_PCI | STS_UI | STS_UEI);
667 	if (n == 0)
668 		return;
669 
670 	if (n & STS_URI) {
671 		DBG("-- reset --\n");
672 		stop_activity();
673 	}
674 	if (n & STS_SLI)
675 		DBG("-- suspend --\n");
676 
677 	if (n & STS_PCI) {
678 		int max = 64;
679 		int speed = USB_SPEED_FULL;
680 
681 #ifdef CONFIG_CI_UDC_HAS_HOSTPC
682 		bit = (readl(&udc->hostpc1_devlc) >> 25) & 3;
683 #else
684 		bit = (readl(&udc->portsc) >> 26) & 3;
685 #endif
686 		DBG("-- portchange %x %s\n", bit, (bit == 2) ? "High" : "Full");
687 		if (bit == 2) {
688 			speed = USB_SPEED_HIGH;
689 			max = 512;
690 		}
691 		controller.gadget.speed = speed;
692 		for (i = 1; i < NUM_ENDPOINTS; i++) {
693 			if (controller.ep[i].ep.maxpacket > max)
694 				controller.ep[i].ep.maxpacket = max;
695 		}
696 	}
697 
698 	if (n & STS_UEI)
699 		printf("<UEI %x>\n", readl(&udc->epcomp));
700 
701 	if ((n & STS_UI) || (n & STS_UEI)) {
702 #ifdef CONFIG_CI_UDC_HAS_HOSTPC
703 		n = readl(&udc->epsetupstat);
704 #else
705 		n = readl(&udc->epstat);
706 #endif
707 		if (n & EPT_RX(0))
708 			handle_setup();
709 
710 		n = readl(&udc->epcomp);
711 		if (n != 0)
712 			writel(n, &udc->epcomp);
713 
714 		for (i = 0; i < NUM_ENDPOINTS && n; i++) {
715 			if (controller.ep[i].desc) {
716 				num = controller.ep[i].desc->bEndpointAddress
717 					& USB_ENDPOINT_NUMBER_MASK;
718 				in = (controller.ep[i].desc->bEndpointAddress
719 						& USB_DIR_IN) != 0;
720 				bit = (in) ? EPT_TX(num) : EPT_RX(num);
721 				if (n & bit)
722 					handle_ep_complete(&controller.ep[i]);
723 			}
724 		}
725 	}
726 }
727 
728 int usb_gadget_handle_interrupts(void)
729 {
730 	u32 value;
731 	struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
732 
733 	value = readl(&udc->usbsts);
734 	if (value)
735 		udc_irq();
736 
737 	return value;
738 }
739 
740 void udc_disconnect(void)
741 {
742 	struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
743 	/* disable pullup */
744 	stop_activity();
745 	writel(USBCMD_FS2, &udc->usbcmd);
746 	udelay(800);
747 	if (controller.driver)
748 		controller.driver->disconnect(&controller.gadget);
749 }
750 
751 static int ci_pullup(struct usb_gadget *gadget, int is_on)
752 {
753 	struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
754 	if (is_on) {
755 		/* RESET */
756 		writel(USBCMD_ITC(MICRO_8FRAME) | USBCMD_RST, &udc->usbcmd);
757 		udelay(200);
758 
759 		writel((unsigned)controller.epts, &udc->epinitaddr);
760 
761 		/* select DEVICE mode */
762 		writel(USBMODE_DEVICE, &udc->usbmode);
763 
764 		writel(0xffffffff, &udc->epflush);
765 
766 		/* Turn on the USB connection by enabling the pullup resistor */
767 		writel(USBCMD_ITC(MICRO_8FRAME) | USBCMD_RUN, &udc->usbcmd);
768 	} else {
769 		udc_disconnect();
770 	}
771 
772 	return 0;
773 }
774 
775 static int ci_udc_probe(void)
776 {
777 	struct ept_queue_head *head;
778 	uint8_t *imem;
779 	int i;
780 
781 	const int num = 2 * NUM_ENDPOINTS;
782 
783 	const int eplist_min_align = 4096;
784 	const int eplist_align = roundup(eplist_min_align, ARCH_DMA_MINALIGN);
785 	const int eplist_raw_sz = num * sizeof(struct ept_queue_head);
786 	const int eplist_sz = roundup(eplist_raw_sz, ARCH_DMA_MINALIGN);
787 
788 	const int ilist_align = roundup(ARCH_DMA_MINALIGN, 32);
789 	const int ilist_ent_raw_sz = 2 * sizeof(struct ept_queue_item);
790 	const int ilist_ent_sz = roundup(ilist_ent_raw_sz, ARCH_DMA_MINALIGN);
791 	const int ilist_sz = NUM_ENDPOINTS * ilist_ent_sz;
792 
793 	/* The QH list must be aligned to 4096 bytes. */
794 	controller.epts = memalign(eplist_align, eplist_sz);
795 	if (!controller.epts)
796 		return -ENOMEM;
797 	memset(controller.epts, 0, eplist_sz);
798 
799 	/*
800 	 * Each qTD item must be 32-byte aligned, each qTD touple must be
801 	 * cacheline aligned. There are two qTD items for each endpoint and
802 	 * only one of them is used for the endpoint at time, so we can group
803 	 * them together.
804 	 */
805 	controller.items_mem = memalign(ilist_align, ilist_sz);
806 	if (!controller.items_mem) {
807 		free(controller.epts);
808 		return -ENOMEM;
809 	}
810 	memset(controller.items_mem, 0, ilist_sz);
811 
812 	for (i = 0; i < 2 * NUM_ENDPOINTS; i++) {
813 		/*
814 		 * Configure QH for each endpoint. The structure of the QH list
815 		 * is such that each two subsequent fields, N and N+1 where N is
816 		 * even, in the QH list represent QH for one endpoint. The Nth
817 		 * entry represents OUT configuration and the N+1th entry does
818 		 * represent IN configuration of the endpoint.
819 		 */
820 		head = controller.epts + i;
821 		if (i < 2)
822 			head->config = CONFIG_MAX_PKT(EP0_MAX_PACKET_SIZE)
823 				| CONFIG_ZLT | CONFIG_IOS;
824 		else
825 			head->config = CONFIG_MAX_PKT(EP_MAX_PACKET_SIZE)
826 				| CONFIG_ZLT;
827 		head->next = TERMINATE;
828 		head->info = 0;
829 
830 		imem = controller.items_mem + ((i >> 1) * ilist_ent_sz);
831 		if (i & 1)
832 			imem += sizeof(struct ept_queue_item);
833 
834 		controller.items[i] = (struct ept_queue_item *)imem;
835 
836 		if (i & 1) {
837 			ci_flush_qh(i / 2);
838 			ci_flush_qtd(i / 2);
839 		}
840 	}
841 
842 	INIT_LIST_HEAD(&controller.gadget.ep_list);
843 
844 	/* Init EP 0 */
845 	memcpy(&controller.ep[0].ep, &ci_ep_init[0], sizeof(*ci_ep_init));
846 	controller.ep[0].desc = &ep0_desc;
847 	INIT_LIST_HEAD(&controller.ep[0].queue);
848 	controller.ep[0].req_primed = false;
849 	controller.gadget.ep0 = &controller.ep[0].ep;
850 	INIT_LIST_HEAD(&controller.gadget.ep0->ep_list);
851 
852 	/* Init EP 1..n */
853 	for (i = 1; i < NUM_ENDPOINTS; i++) {
854 		memcpy(&controller.ep[i].ep, &ci_ep_init[1],
855 		       sizeof(*ci_ep_init));
856 		INIT_LIST_HEAD(&controller.ep[i].queue);
857 		controller.ep[i].req_primed = false;
858 		list_add_tail(&controller.ep[i].ep.ep_list,
859 			      &controller.gadget.ep_list);
860 	}
861 
862 	ci_ep_alloc_request(&controller.ep[0].ep, 0);
863 	if (!controller.ep0_req) {
864 		free(controller.items_mem);
865 		free(controller.epts);
866 		return -ENOMEM;
867 	}
868 
869 	return 0;
870 }
871 
872 int usb_gadget_register_driver(struct usb_gadget_driver *driver)
873 {
874 	int ret;
875 
876 	if (!driver)
877 		return -EINVAL;
878 	if (!driver->bind || !driver->setup || !driver->disconnect)
879 		return -EINVAL;
880 	if (driver->speed != USB_SPEED_FULL && driver->speed != USB_SPEED_HIGH)
881 		return -EINVAL;
882 
883 	ret = usb_lowlevel_init(0, USB_INIT_DEVICE, (void **)&controller.ctrl);
884 	if (ret)
885 		return ret;
886 
887 	ret = ci_udc_probe();
888 #if defined(CONFIG_USB_EHCI_MX6) || defined(CONFIG_USB_EHCI_MXS)
889 	/*
890 	 * FIXME: usb_lowlevel_init()->ehci_hcd_init() should be doing all
891 	 * HW-specific initialization, e.g. ULPI-vs-UTMI PHY selection
892 	 */
893 	if (!ret) {
894 		struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
895 
896 		/* select ULPI phy */
897 		writel(PTS(PTS_ENABLE) | PFSC, &udc->portsc);
898 	}
899 #endif
900 
901 	ret = driver->bind(&controller.gadget);
902 	if (ret) {
903 		DBG("driver->bind() returned %d\n", ret);
904 		return ret;
905 	}
906 	controller.driver = driver;
907 
908 	return 0;
909 }
910 
911 int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
912 {
913 	udc_disconnect();
914 
915 	driver->unbind(&controller.gadget);
916 	controller.driver = NULL;
917 
918 	ci_ep_free_request(&controller.ep[0].ep, &controller.ep0_req->req);
919 	free(controller.items_mem);
920 	free(controller.epts);
921 
922 	return 0;
923 }
924