xref: /rk3399_rockchip-uboot/drivers/usb/gadget/ci_udc.c (revision 6ac15fda4e2b9ad45b7769037964110f7f597b5c)
1 /*
2  * Copyright 2011, Marvell Semiconductor Inc.
3  * Lei Wen <leiwen@marvell.com>
4  *
5  * SPDX-License-Identifier:	GPL-2.0+
6  *
7  * Back ported to the 8xx platform (from the 8260 platform) by
8  * Murray.Jensen@cmst.csiro.au, 27-Jan-01.
9  */
10 
11 #include <common.h>
12 #include <command.h>
13 #include <config.h>
14 #include <net.h>
15 #include <malloc.h>
16 #include <asm/byteorder.h>
17 #include <asm/errno.h>
18 #include <asm/io.h>
19 #include <asm/unaligned.h>
20 #include <linux/types.h>
21 #include <linux/usb/ch9.h>
22 #include <linux/usb/gadget.h>
23 #include <usb/ci_udc.h>
24 #include "../host/ehci.h"
25 #include "ci_udc.h"
26 
27 /*
28  * Check if the system has too long cachelines. If the cachelines are
29  * longer then 128b, the driver will not be able flush/invalidate data
30  * cache over separate QH entries. We use 128b because one QH entry is
31  * 64b long and there are always two QH list entries for each endpoint.
32  */
33 #if ARCH_DMA_MINALIGN > 128
34 #error This driver can not work on systems with caches longer than 128b
35 #endif
36 
37 /*
38  * Every QTD must be individually aligned, since we can program any
39  * QTD's address into HW. Cache flushing requires ARCH_DMA_MINALIGN,
40  * and the USB HW requires 32-byte alignment. Align to both:
41  */
42 #define ILIST_ALIGN		roundup(ARCH_DMA_MINALIGN, 32)
43 /* Each QTD is this size */
44 #define ILIST_ENT_RAW_SZ	sizeof(struct ept_queue_item)
45 /*
46  * Align the size of the QTD too, so we can add this value to each
47  * QTD's address to get another aligned address.
48  */
49 #define ILIST_ENT_SZ		roundup(ILIST_ENT_RAW_SZ, ILIST_ALIGN)
50 /* For each endpoint, we need 2 QTDs, one for each of IN and OUT */
51 #define ILIST_SZ		(NUM_ENDPOINTS * 2 * ILIST_ENT_SZ)
52 
53 #ifndef DEBUG
54 #define DBG(x...) do {} while (0)
55 #else
56 #define DBG(x...) printf(x)
57 static const char *reqname(unsigned r)
58 {
59 	switch (r) {
60 	case USB_REQ_GET_STATUS: return "GET_STATUS";
61 	case USB_REQ_CLEAR_FEATURE: return "CLEAR_FEATURE";
62 	case USB_REQ_SET_FEATURE: return "SET_FEATURE";
63 	case USB_REQ_SET_ADDRESS: return "SET_ADDRESS";
64 	case USB_REQ_GET_DESCRIPTOR: return "GET_DESCRIPTOR";
65 	case USB_REQ_SET_DESCRIPTOR: return "SET_DESCRIPTOR";
66 	case USB_REQ_GET_CONFIGURATION: return "GET_CONFIGURATION";
67 	case USB_REQ_SET_CONFIGURATION: return "SET_CONFIGURATION";
68 	case USB_REQ_GET_INTERFACE: return "GET_INTERFACE";
69 	case USB_REQ_SET_INTERFACE: return "SET_INTERFACE";
70 	default: return "*UNKNOWN*";
71 	}
72 }
73 #endif
74 
75 static struct usb_endpoint_descriptor ep0_desc = {
76 	.bLength = sizeof(struct usb_endpoint_descriptor),
77 	.bDescriptorType = USB_DT_ENDPOINT,
78 	.bEndpointAddress = USB_DIR_IN,
79 	.bmAttributes =	USB_ENDPOINT_XFER_CONTROL,
80 };
81 
82 static int ci_pullup(struct usb_gadget *gadget, int is_on);
83 static int ci_ep_enable(struct usb_ep *ep,
84 		const struct usb_endpoint_descriptor *desc);
85 static int ci_ep_disable(struct usb_ep *ep);
86 static int ci_ep_queue(struct usb_ep *ep,
87 		struct usb_request *req, gfp_t gfp_flags);
88 static struct usb_request *
89 ci_ep_alloc_request(struct usb_ep *ep, unsigned int gfp_flags);
90 static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *_req);
91 
92 static struct usb_gadget_ops ci_udc_ops = {
93 	.pullup = ci_pullup,
94 };
95 
96 static struct usb_ep_ops ci_ep_ops = {
97 	.enable         = ci_ep_enable,
98 	.disable        = ci_ep_disable,
99 	.queue          = ci_ep_queue,
100 	.alloc_request  = ci_ep_alloc_request,
101 	.free_request   = ci_ep_free_request,
102 };
103 
104 /* Init values for USB endpoints. */
105 static const struct usb_ep ci_ep_init[2] = {
106 	[0] = {	/* EP 0 */
107 		.maxpacket	= 64,
108 		.name		= "ep0",
109 		.ops		= &ci_ep_ops,
110 	},
111 	[1] = {	/* EP 1..n */
112 		.maxpacket	= 512,
113 		.name		= "ep-",
114 		.ops		= &ci_ep_ops,
115 	},
116 };
117 
118 static struct ci_drv controller = {
119 	.gadget	= {
120 		.name	= "ci_udc",
121 		.ops	= &ci_udc_ops,
122 		.is_dualspeed = 1,
123 	},
124 };
125 
126 /**
127  * ci_get_qh() - return queue head for endpoint
128  * @ep_num:	Endpoint number
129  * @dir_in:	Direction of the endpoint (IN = 1, OUT = 0)
130  *
131  * This function returns the QH associated with particular endpoint
132  * and it's direction.
133  */
134 static struct ept_queue_head *ci_get_qh(int ep_num, int dir_in)
135 {
136 	return &controller.epts[(ep_num * 2) + dir_in];
137 }
138 
139 /**
140  * ci_get_qtd() - return queue item for endpoint
141  * @ep_num:	Endpoint number
142  * @dir_in:	Direction of the endpoint (IN = 1, OUT = 0)
143  *
144  * This function returns the QH associated with particular endpoint
145  * and it's direction.
146  */
147 static struct ept_queue_item *ci_get_qtd(int ep_num, int dir_in)
148 {
149 	int index = (ep_num * 2) + dir_in;
150 	uint8_t *imem = controller.items_mem + (index * ILIST_ENT_SZ);
151 	return (struct ept_queue_item *)imem;
152 }
153 
154 /**
155  * ci_flush_qh - flush cache over queue head
156  * @ep_num:	Endpoint number
157  *
158  * This function flushes cache over QH for particular endpoint.
159  */
160 static void ci_flush_qh(int ep_num)
161 {
162 	struct ept_queue_head *head = ci_get_qh(ep_num, 0);
163 	const uint32_t start = (uint32_t)head;
164 	const uint32_t end = start + 2 * sizeof(*head);
165 
166 	flush_dcache_range(start, end);
167 }
168 
169 /**
170  * ci_invalidate_qh - invalidate cache over queue head
171  * @ep_num:	Endpoint number
172  *
173  * This function invalidates cache over QH for particular endpoint.
174  */
175 static void ci_invalidate_qh(int ep_num)
176 {
177 	struct ept_queue_head *head = ci_get_qh(ep_num, 0);
178 	uint32_t start = (uint32_t)head;
179 	uint32_t end = start + 2 * sizeof(*head);
180 
181 	invalidate_dcache_range(start, end);
182 }
183 
184 /**
185  * ci_flush_qtd - flush cache over queue item
186  * @ep_num:	Endpoint number
187  *
188  * This function flushes cache over qTD pair for particular endpoint.
189  */
190 static void ci_flush_qtd(int ep_num)
191 {
192 	struct ept_queue_item *item = ci_get_qtd(ep_num, 0);
193 	const uint32_t start = (uint32_t)item;
194 	const uint32_t end = start + 2 * ILIST_ENT_SZ;
195 
196 	flush_dcache_range(start, end);
197 }
198 
199 /**
200  * ci_invalidate_qtd - invalidate cache over queue item
201  * @ep_num:	Endpoint number
202  *
203  * This function invalidates cache over qTD pair for particular endpoint.
204  */
205 static void ci_invalidate_qtd(int ep_num)
206 {
207 	struct ept_queue_item *item = ci_get_qtd(ep_num, 0);
208 	const uint32_t start = (uint32_t)item;
209 	const uint32_t end = start + 2 * ILIST_ENT_SZ;
210 
211 	invalidate_dcache_range(start, end);
212 }
213 
214 static struct usb_request *
215 ci_ep_alloc_request(struct usb_ep *ep, unsigned int gfp_flags)
216 {
217 	struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
218 	int num;
219 	struct ci_req *ci_req;
220 
221 	num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
222 	if (num == 0 && controller.ep0_req)
223 		return &controller.ep0_req->req;
224 
225 	ci_req = memalign(ARCH_DMA_MINALIGN, sizeof(*ci_req));
226 	if (!ci_req)
227 		return NULL;
228 
229 	INIT_LIST_HEAD(&ci_req->queue);
230 	ci_req->b_buf = 0;
231 
232 	if (num == 0)
233 		controller.ep0_req = ci_req;
234 
235 	return &ci_req->req;
236 }
237 
238 static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *req)
239 {
240 	struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
241 	struct ci_req *ci_req = container_of(req, struct ci_req, req);
242 	int num;
243 
244 	num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
245 	if (num == 0) {
246 		if (!controller.ep0_req)
247 			return;
248 		controller.ep0_req = 0;
249 	}
250 
251 	if (ci_req->b_buf)
252 		free(ci_req->b_buf);
253 	free(ci_req);
254 }
255 
256 static void ep_enable(int num, int in, int maxpacket)
257 {
258 	struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
259 	unsigned n;
260 
261 	n = readl(&udc->epctrl[num]);
262 	if (in)
263 		n |= (CTRL_TXE | CTRL_TXR | CTRL_TXT_BULK);
264 	else
265 		n |= (CTRL_RXE | CTRL_RXR | CTRL_RXT_BULK);
266 
267 	if (num != 0) {
268 		struct ept_queue_head *head = ci_get_qh(num, in);
269 
270 		head->config = CONFIG_MAX_PKT(maxpacket) | CONFIG_ZLT;
271 		ci_flush_qh(num);
272 	}
273 	writel(n, &udc->epctrl[num]);
274 }
275 
276 static int ci_ep_enable(struct usb_ep *ep,
277 		const struct usb_endpoint_descriptor *desc)
278 {
279 	struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
280 	int num, in;
281 	num = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
282 	in = (desc->bEndpointAddress & USB_DIR_IN) != 0;
283 	ci_ep->desc = desc;
284 
285 	if (num) {
286 		int max = get_unaligned_le16(&desc->wMaxPacketSize);
287 
288 		if ((max > 64) && (controller.gadget.speed == USB_SPEED_FULL))
289 			max = 64;
290 		if (ep->maxpacket != max) {
291 			DBG("%s: from %d to %d\n", __func__,
292 			    ep->maxpacket, max);
293 			ep->maxpacket = max;
294 		}
295 	}
296 	ep_enable(num, in, ep->maxpacket);
297 	DBG("%s: num=%d maxpacket=%d\n", __func__, num, ep->maxpacket);
298 	return 0;
299 }
300 
301 static int ci_ep_disable(struct usb_ep *ep)
302 {
303 	struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
304 
305 	ci_ep->desc = NULL;
306 	return 0;
307 }
308 
309 static int ci_bounce(struct ci_req *ci_req, int in)
310 {
311 	struct usb_request *req = &ci_req->req;
312 	uint32_t addr = (uint32_t)req->buf;
313 	uint32_t hwaddr;
314 	uint32_t aligned_used_len;
315 
316 	/* Input buffer address is not aligned. */
317 	if (addr & (ARCH_DMA_MINALIGN - 1))
318 		goto align;
319 
320 	/* Input buffer length is not aligned. */
321 	if (req->length & (ARCH_DMA_MINALIGN - 1))
322 		goto align;
323 
324 	/* The buffer is well aligned, only flush cache. */
325 	ci_req->hw_len = req->length;
326 	ci_req->hw_buf = req->buf;
327 	goto flush;
328 
329 align:
330 	if (ci_req->b_buf && req->length > ci_req->b_len) {
331 		free(ci_req->b_buf);
332 		ci_req->b_buf = 0;
333 	}
334 	if (!ci_req->b_buf) {
335 		ci_req->b_len = roundup(req->length, ARCH_DMA_MINALIGN);
336 		ci_req->b_buf = memalign(ARCH_DMA_MINALIGN, ci_req->b_len);
337 		if (!ci_req->b_buf)
338 			return -ENOMEM;
339 	}
340 	ci_req->hw_len = ci_req->b_len;
341 	ci_req->hw_buf = ci_req->b_buf;
342 
343 	if (in)
344 		memcpy(ci_req->hw_buf, req->buf, req->length);
345 
346 flush:
347 	hwaddr = (uint32_t)ci_req->hw_buf;
348 	aligned_used_len = roundup(req->length, ARCH_DMA_MINALIGN);
349 	flush_dcache_range(hwaddr, hwaddr + aligned_used_len);
350 
351 	return 0;
352 }
353 
354 static void ci_debounce(struct ci_req *ci_req, int in)
355 {
356 	struct usb_request *req = &ci_req->req;
357 	uint32_t addr = (uint32_t)req->buf;
358 	uint32_t hwaddr = (uint32_t)ci_req->hw_buf;
359 	uint32_t aligned_used_len;
360 
361 	if (in)
362 		return;
363 
364 	aligned_used_len = roundup(req->actual, ARCH_DMA_MINALIGN);
365 	invalidate_dcache_range(hwaddr, hwaddr + aligned_used_len);
366 
367 	if (addr == hwaddr)
368 		return; /* not a bounce */
369 
370 	memcpy(req->buf, ci_req->hw_buf, req->actual);
371 }
372 
373 static void ci_ep_submit_next_request(struct ci_ep *ci_ep)
374 {
375 	struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
376 	struct ept_queue_item *item;
377 	struct ept_queue_head *head;
378 	int bit, num, len, in;
379 	struct ci_req *ci_req;
380 
381 	ci_ep->req_primed = true;
382 
383 	num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
384 	in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0;
385 	item = ci_get_qtd(num, in);
386 	head = ci_get_qh(num, in);
387 
388 	ci_req = list_first_entry(&ci_ep->queue, struct ci_req, queue);
389 	len = ci_req->req.length;
390 
391 	item->info = INFO_BYTES(len) | INFO_ACTIVE;
392 	item->page0 = (uint32_t)ci_req->hw_buf;
393 	item->page1 = ((uint32_t)ci_req->hw_buf & 0xfffff000) + 0x1000;
394 	item->page2 = ((uint32_t)ci_req->hw_buf & 0xfffff000) + 0x2000;
395 	item->page3 = ((uint32_t)ci_req->hw_buf & 0xfffff000) + 0x3000;
396 	item->page4 = ((uint32_t)ci_req->hw_buf & 0xfffff000) + 0x4000;
397 
398 	head->next = (unsigned) item;
399 	head->info = 0;
400 
401 	/*
402 	 * When sending the data for an IN transaction, the attached host
403 	 * knows that all data for the IN is sent when one of the following
404 	 * occurs:
405 	 * a) A zero-length packet is transmitted.
406 	 * b) A packet with length that isn't an exact multiple of the ep's
407 	 *    maxpacket is transmitted.
408 	 * c) Enough data is sent to exactly fill the host's maximum expected
409 	 *    IN transaction size.
410 	 *
411 	 * One of these conditions MUST apply at the end of an IN transaction,
412 	 * or the transaction will not be considered complete by the host. If
413 	 * none of (a)..(c) already applies, then we must force (a) to apply
414 	 * by explicitly sending an extra zero-length packet.
415 	 */
416 	/*  IN    !a     !b                              !c */
417 	if (in && len && !(len % ci_ep->ep.maxpacket) && ci_req->req.zero) {
418 		/*
419 		 * Each endpoint has 2 items allocated, even though typically
420 		 * only 1 is used at a time since either an IN or an OUT but
421 		 * not both is queued. For an IN transaction, item currently
422 		 * points at the second of these items, so we know that we
423 		 * can use the other to transmit the extra zero-length packet.
424 		 */
425 		struct ept_queue_item *other_item = ci_get_qtd(num, 0);
426 		item->next = (unsigned)other_item;
427 		item = other_item;
428 		item->info = INFO_ACTIVE;
429 	}
430 
431 	item->next = TERMINATE;
432 	item->info |= INFO_IOC;
433 
434 	ci_flush_qtd(num);
435 
436 	DBG("ept%d %s queue len %x, req %p, buffer %p\n",
437 	    num, in ? "in" : "out", len, ci_req, ci_req->hw_buf);
438 	ci_flush_qh(num);
439 
440 	if (in)
441 		bit = EPT_TX(num);
442 	else
443 		bit = EPT_RX(num);
444 
445 	writel(bit, &udc->epprime);
446 }
447 
448 static int ci_ep_queue(struct usb_ep *ep,
449 		struct usb_request *req, gfp_t gfp_flags)
450 {
451 	struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
452 	struct ci_req *ci_req = container_of(req, struct ci_req, req);
453 	int in, ret;
454 	int __maybe_unused num;
455 
456 	num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
457 	in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0;
458 
459 	if (!num && ci_ep->req_primed) {
460 		/*
461 		 * The flipping of ep0 between IN and OUT relies on
462 		 * ci_ep_queue consuming the current IN/OUT setting
463 		 * immediately. If this is deferred to a later point when the
464 		 * req is pulled out of ci_req->queue, then the IN/OUT setting
465 		 * may have been changed since the req was queued, and state
466 		 * will get out of sync. This condition doesn't occur today,
467 		 * but could if bugs were introduced later, and this error
468 		 * check will save a lot of debugging time.
469 		 */
470 		printf("%s: ep0 transaction already in progress\n", __func__);
471 		return -EPROTO;
472 	}
473 
474 	ret = ci_bounce(ci_req, in);
475 	if (ret)
476 		return ret;
477 
478 	DBG("ept%d %s pre-queue req %p, buffer %p\n",
479 	    num, in ? "in" : "out", ci_req, ci_req->hw_buf);
480 	list_add_tail(&ci_req->queue, &ci_ep->queue);
481 
482 	if (!ci_ep->req_primed)
483 		ci_ep_submit_next_request(ci_ep);
484 
485 	return 0;
486 }
487 
488 static void flip_ep0_direction(void)
489 {
490 	if (ep0_desc.bEndpointAddress == USB_DIR_IN) {
491 		DBG("%s: Flipping ep0 to OUT\n", __func__);
492 		ep0_desc.bEndpointAddress = 0;
493 	} else {
494 		DBG("%s: Flipping ep0 to IN\n", __func__);
495 		ep0_desc.bEndpointAddress = USB_DIR_IN;
496 	}
497 }
498 
499 static void handle_ep_complete(struct ci_ep *ep)
500 {
501 	struct ept_queue_item *item;
502 	int num, in, len;
503 	struct ci_req *ci_req;
504 
505 	num = ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
506 	in = (ep->desc->bEndpointAddress & USB_DIR_IN) != 0;
507 	item = ci_get_qtd(num, in);
508 	ci_invalidate_qtd(num);
509 
510 	len = (item->info >> 16) & 0x7fff;
511 	if (item->info & 0xff)
512 		printf("EP%d/%s FAIL info=%x pg0=%x\n",
513 		       num, in ? "in" : "out", item->info, item->page0);
514 
515 	ci_req = list_first_entry(&ep->queue, struct ci_req, queue);
516 	list_del_init(&ci_req->queue);
517 	ep->req_primed = false;
518 
519 	if (!list_empty(&ep->queue))
520 		ci_ep_submit_next_request(ep);
521 
522 	ci_req->req.actual = ci_req->req.length - len;
523 	ci_debounce(ci_req, in);
524 
525 	DBG("ept%d %s req %p, complete %x\n",
526 	    num, in ? "in" : "out", ci_req, len);
527 	if (num != 0 || controller.ep0_data_phase)
528 		ci_req->req.complete(&ep->ep, &ci_req->req);
529 	if (num == 0 && controller.ep0_data_phase) {
530 		/*
531 		 * Data Stage is complete, so flip ep0 dir for Status Stage,
532 		 * which always transfers a packet in the opposite direction.
533 		 */
534 		DBG("%s: flip ep0 dir for Status Stage\n", __func__);
535 		flip_ep0_direction();
536 		controller.ep0_data_phase = false;
537 		ci_req->req.length = 0;
538 		usb_ep_queue(&ep->ep, &ci_req->req, 0);
539 	}
540 }
541 
542 #define SETUP(type, request) (((type) << 8) | (request))
543 
544 static void handle_setup(void)
545 {
546 	struct ci_ep *ci_ep = &controller.ep[0];
547 	struct ci_req *ci_req;
548 	struct usb_request *req;
549 	struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
550 	struct ept_queue_head *head;
551 	struct usb_ctrlrequest r;
552 	int status = 0;
553 	int num, in, _num, _in, i;
554 	char *buf;
555 
556 	ci_req = controller.ep0_req;
557 	req = &ci_req->req;
558 	head = ci_get_qh(0, 0);	/* EP0 OUT */
559 
560 	ci_invalidate_qh(0);
561 	memcpy(&r, head->setup_data, sizeof(struct usb_ctrlrequest));
562 #ifdef CONFIG_CI_UDC_HAS_HOSTPC
563 	writel(EPT_RX(0), &udc->epsetupstat);
564 #else
565 	writel(EPT_RX(0), &udc->epstat);
566 #endif
567 	DBG("handle setup %s, %x, %x index %x value %x length %x\n",
568 	    reqname(r.bRequest), r.bRequestType, r.bRequest, r.wIndex,
569 	    r.wValue, r.wLength);
570 
571 	/* Set EP0 dir for Data Stage based on Setup Stage data */
572 	if (r.bRequestType & USB_DIR_IN) {
573 		DBG("%s: Set ep0 to IN for Data Stage\n", __func__);
574 		ep0_desc.bEndpointAddress = USB_DIR_IN;
575 	} else {
576 		DBG("%s: Set ep0 to OUT for Data Stage\n", __func__);
577 		ep0_desc.bEndpointAddress = 0;
578 	}
579 	if (r.wLength) {
580 		controller.ep0_data_phase = true;
581 	} else {
582 		/* 0 length -> no Data Stage. Flip dir for Status Stage */
583 		DBG("%s: 0 length: flip ep0 dir for Status Stage\n", __func__);
584 		flip_ep0_direction();
585 		controller.ep0_data_phase = false;
586 	}
587 
588 	list_del_init(&ci_req->queue);
589 	ci_ep->req_primed = false;
590 
591 	switch (SETUP(r.bRequestType, r.bRequest)) {
592 	case SETUP(USB_RECIP_ENDPOINT, USB_REQ_CLEAR_FEATURE):
593 		_num = r.wIndex & 15;
594 		_in = !!(r.wIndex & 0x80);
595 
596 		if ((r.wValue == 0) && (r.wLength == 0)) {
597 			req->length = 0;
598 			for (i = 0; i < NUM_ENDPOINTS; i++) {
599 				struct ci_ep *ep = &controller.ep[i];
600 
601 				if (!ep->desc)
602 					continue;
603 				num = ep->desc->bEndpointAddress
604 						& USB_ENDPOINT_NUMBER_MASK;
605 				in = (ep->desc->bEndpointAddress
606 						& USB_DIR_IN) != 0;
607 				if ((num == _num) && (in == _in)) {
608 					ep_enable(num, in, ep->ep.maxpacket);
609 					usb_ep_queue(controller.gadget.ep0,
610 							req, 0);
611 					break;
612 				}
613 			}
614 		}
615 		return;
616 
617 	case SETUP(USB_RECIP_DEVICE, USB_REQ_SET_ADDRESS):
618 		/*
619 		 * write address delayed (will take effect
620 		 * after the next IN txn)
621 		 */
622 		writel((r.wValue << 25) | (1 << 24), &udc->devaddr);
623 		req->length = 0;
624 		usb_ep_queue(controller.gadget.ep0, req, 0);
625 		return;
626 
627 	case SETUP(USB_DIR_IN | USB_RECIP_DEVICE, USB_REQ_GET_STATUS):
628 		req->length = 2;
629 		buf = (char *)req->buf;
630 		buf[0] = 1 << USB_DEVICE_SELF_POWERED;
631 		buf[1] = 0;
632 		usb_ep_queue(controller.gadget.ep0, req, 0);
633 		return;
634 	}
635 	/* pass request up to the gadget driver */
636 	if (controller.driver)
637 		status = controller.driver->setup(&controller.gadget, &r);
638 	else
639 		status = -ENODEV;
640 
641 	if (!status)
642 		return;
643 	DBG("STALL reqname %s type %x value %x, index %x\n",
644 	    reqname(r.bRequest), r.bRequestType, r.wValue, r.wIndex);
645 	writel((1<<16) | (1 << 0), &udc->epctrl[0]);
646 }
647 
648 static void stop_activity(void)
649 {
650 	int i, num, in;
651 	struct ept_queue_head *head;
652 	struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
653 	writel(readl(&udc->epcomp), &udc->epcomp);
654 #ifdef CONFIG_CI_UDC_HAS_HOSTPC
655 	writel(readl(&udc->epsetupstat), &udc->epsetupstat);
656 #endif
657 	writel(readl(&udc->epstat), &udc->epstat);
658 	writel(0xffffffff, &udc->epflush);
659 
660 	/* error out any pending reqs */
661 	for (i = 0; i < NUM_ENDPOINTS; i++) {
662 		if (i != 0)
663 			writel(0, &udc->epctrl[i]);
664 		if (controller.ep[i].desc) {
665 			num = controller.ep[i].desc->bEndpointAddress
666 				& USB_ENDPOINT_NUMBER_MASK;
667 			in = (controller.ep[i].desc->bEndpointAddress
668 				& USB_DIR_IN) != 0;
669 			head = ci_get_qh(num, in);
670 			head->info = INFO_ACTIVE;
671 			ci_flush_qh(num);
672 		}
673 	}
674 }
675 
676 void udc_irq(void)
677 {
678 	struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
679 	unsigned n = readl(&udc->usbsts);
680 	writel(n, &udc->usbsts);
681 	int bit, i, num, in;
682 
683 	n &= (STS_SLI | STS_URI | STS_PCI | STS_UI | STS_UEI);
684 	if (n == 0)
685 		return;
686 
687 	if (n & STS_URI) {
688 		DBG("-- reset --\n");
689 		stop_activity();
690 	}
691 	if (n & STS_SLI)
692 		DBG("-- suspend --\n");
693 
694 	if (n & STS_PCI) {
695 		int max = 64;
696 		int speed = USB_SPEED_FULL;
697 
698 #ifdef CONFIG_CI_UDC_HAS_HOSTPC
699 		bit = (readl(&udc->hostpc1_devlc) >> 25) & 3;
700 #else
701 		bit = (readl(&udc->portsc) >> 26) & 3;
702 #endif
703 		DBG("-- portchange %x %s\n", bit, (bit == 2) ? "High" : "Full");
704 		if (bit == 2) {
705 			speed = USB_SPEED_HIGH;
706 			max = 512;
707 		}
708 		controller.gadget.speed = speed;
709 		for (i = 1; i < NUM_ENDPOINTS; i++) {
710 			if (controller.ep[i].ep.maxpacket > max)
711 				controller.ep[i].ep.maxpacket = max;
712 		}
713 	}
714 
715 	if (n & STS_UEI)
716 		printf("<UEI %x>\n", readl(&udc->epcomp));
717 
718 	if ((n & STS_UI) || (n & STS_UEI)) {
719 #ifdef CONFIG_CI_UDC_HAS_HOSTPC
720 		n = readl(&udc->epsetupstat);
721 #else
722 		n = readl(&udc->epstat);
723 #endif
724 		if (n & EPT_RX(0))
725 			handle_setup();
726 
727 		n = readl(&udc->epcomp);
728 		if (n != 0)
729 			writel(n, &udc->epcomp);
730 
731 		for (i = 0; i < NUM_ENDPOINTS && n; i++) {
732 			if (controller.ep[i].desc) {
733 				num = controller.ep[i].desc->bEndpointAddress
734 					& USB_ENDPOINT_NUMBER_MASK;
735 				in = (controller.ep[i].desc->bEndpointAddress
736 						& USB_DIR_IN) != 0;
737 				bit = (in) ? EPT_TX(num) : EPT_RX(num);
738 				if (n & bit)
739 					handle_ep_complete(&controller.ep[i]);
740 			}
741 		}
742 	}
743 }
744 
745 int usb_gadget_handle_interrupts(void)
746 {
747 	u32 value;
748 	struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
749 
750 	value = readl(&udc->usbsts);
751 	if (value)
752 		udc_irq();
753 
754 	return value;
755 }
756 
757 void udc_disconnect(void)
758 {
759 	struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
760 	/* disable pullup */
761 	stop_activity();
762 	writel(USBCMD_FS2, &udc->usbcmd);
763 	udelay(800);
764 	if (controller.driver)
765 		controller.driver->disconnect(&controller.gadget);
766 }
767 
768 static int ci_pullup(struct usb_gadget *gadget, int is_on)
769 {
770 	struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
771 	if (is_on) {
772 		/* RESET */
773 		writel(USBCMD_ITC(MICRO_8FRAME) | USBCMD_RST, &udc->usbcmd);
774 		udelay(200);
775 
776 		writel((unsigned)controller.epts, &udc->epinitaddr);
777 
778 		/* select DEVICE mode */
779 		writel(USBMODE_DEVICE, &udc->usbmode);
780 
781 		writel(0xffffffff, &udc->epflush);
782 
783 		/* Turn on the USB connection by enabling the pullup resistor */
784 		writel(USBCMD_ITC(MICRO_8FRAME) | USBCMD_RUN, &udc->usbcmd);
785 	} else {
786 		udc_disconnect();
787 	}
788 
789 	return 0;
790 }
791 
792 static int ci_udc_probe(void)
793 {
794 	struct ept_queue_head *head;
795 	int i;
796 
797 	const int num = 2 * NUM_ENDPOINTS;
798 
799 	const int eplist_min_align = 4096;
800 	const int eplist_align = roundup(eplist_min_align, ARCH_DMA_MINALIGN);
801 	const int eplist_raw_sz = num * sizeof(struct ept_queue_head);
802 	const int eplist_sz = roundup(eplist_raw_sz, ARCH_DMA_MINALIGN);
803 
804 	/* The QH list must be aligned to 4096 bytes. */
805 	controller.epts = memalign(eplist_align, eplist_sz);
806 	if (!controller.epts)
807 		return -ENOMEM;
808 	memset(controller.epts, 0, eplist_sz);
809 
810 	controller.items_mem = memalign(ILIST_ALIGN, ILIST_SZ);
811 	if (!controller.items_mem) {
812 		free(controller.epts);
813 		return -ENOMEM;
814 	}
815 	memset(controller.items_mem, 0, ILIST_SZ);
816 
817 	for (i = 0; i < 2 * NUM_ENDPOINTS; i++) {
818 		/*
819 		 * Configure QH for each endpoint. The structure of the QH list
820 		 * is such that each two subsequent fields, N and N+1 where N is
821 		 * even, in the QH list represent QH for one endpoint. The Nth
822 		 * entry represents OUT configuration and the N+1th entry does
823 		 * represent IN configuration of the endpoint.
824 		 */
825 		head = controller.epts + i;
826 		if (i < 2)
827 			head->config = CONFIG_MAX_PKT(EP0_MAX_PACKET_SIZE)
828 				| CONFIG_ZLT | CONFIG_IOS;
829 		else
830 			head->config = CONFIG_MAX_PKT(EP_MAX_PACKET_SIZE)
831 				| CONFIG_ZLT;
832 		head->next = TERMINATE;
833 		head->info = 0;
834 
835 		if (i & 1) {
836 			ci_flush_qh(i / 2);
837 			ci_flush_qtd(i / 2);
838 		}
839 	}
840 
841 	INIT_LIST_HEAD(&controller.gadget.ep_list);
842 
843 	/* Init EP 0 */
844 	memcpy(&controller.ep[0].ep, &ci_ep_init[0], sizeof(*ci_ep_init));
845 	controller.ep[0].desc = &ep0_desc;
846 	INIT_LIST_HEAD(&controller.ep[0].queue);
847 	controller.ep[0].req_primed = false;
848 	controller.gadget.ep0 = &controller.ep[0].ep;
849 	INIT_LIST_HEAD(&controller.gadget.ep0->ep_list);
850 
851 	/* Init EP 1..n */
852 	for (i = 1; i < NUM_ENDPOINTS; i++) {
853 		memcpy(&controller.ep[i].ep, &ci_ep_init[1],
854 		       sizeof(*ci_ep_init));
855 		INIT_LIST_HEAD(&controller.ep[i].queue);
856 		controller.ep[i].req_primed = false;
857 		list_add_tail(&controller.ep[i].ep.ep_list,
858 			      &controller.gadget.ep_list);
859 	}
860 
861 	ci_ep_alloc_request(&controller.ep[0].ep, 0);
862 	if (!controller.ep0_req) {
863 		free(controller.items_mem);
864 		free(controller.epts);
865 		return -ENOMEM;
866 	}
867 
868 	return 0;
869 }
870 
871 int usb_gadget_register_driver(struct usb_gadget_driver *driver)
872 {
873 	int ret;
874 
875 	if (!driver)
876 		return -EINVAL;
877 	if (!driver->bind || !driver->setup || !driver->disconnect)
878 		return -EINVAL;
879 	if (driver->speed != USB_SPEED_FULL && driver->speed != USB_SPEED_HIGH)
880 		return -EINVAL;
881 
882 	ret = usb_lowlevel_init(0, USB_INIT_DEVICE, (void **)&controller.ctrl);
883 	if (ret)
884 		return ret;
885 
886 	ret = ci_udc_probe();
887 #if defined(CONFIG_USB_EHCI_MX6) || defined(CONFIG_USB_EHCI_MXS)
888 	/*
889 	 * FIXME: usb_lowlevel_init()->ehci_hcd_init() should be doing all
890 	 * HW-specific initialization, e.g. ULPI-vs-UTMI PHY selection
891 	 */
892 	if (!ret) {
893 		struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
894 
895 		/* select ULPI phy */
896 		writel(PTS(PTS_ENABLE) | PFSC, &udc->portsc);
897 	}
898 #endif
899 
900 	ret = driver->bind(&controller.gadget);
901 	if (ret) {
902 		DBG("driver->bind() returned %d\n", ret);
903 		return ret;
904 	}
905 	controller.driver = driver;
906 
907 	return 0;
908 }
909 
910 int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
911 {
912 	udc_disconnect();
913 
914 	driver->unbind(&controller.gadget);
915 	controller.driver = NULL;
916 
917 	ci_ep_free_request(&controller.ep[0].ep, &controller.ep0_req->req);
918 	free(controller.items_mem);
919 	free(controller.epts);
920 
921 	return 0;
922 }
923