xref: /rk3399_rockchip-uboot/drivers/usb/gadget/ci_udc.c (revision 06b38fcbae9294d337578d583309f99de12a0d23)
1 /*
2  * Copyright 2011, Marvell Semiconductor Inc.
3  * Lei Wen <leiwen@marvell.com>
4  *
5  * SPDX-License-Identifier:	GPL-2.0+
6  *
7  * Back ported to the 8xx platform (from the 8260 platform) by
8  * Murray.Jensen@cmst.csiro.au, 27-Jan-01.
9  */
10 
11 #include <common.h>
12 #include <command.h>
13 #include <config.h>
14 #include <net.h>
15 #include <malloc.h>
16 #include <asm/byteorder.h>
17 #include <asm/errno.h>
18 #include <asm/io.h>
19 #include <asm/unaligned.h>
20 #include <linux/types.h>
21 #include <linux/usb/ch9.h>
22 #include <linux/usb/gadget.h>
23 #include <usb/ci_udc.h>
24 #include "../host/ehci.h"
25 #include "ci_udc.h"
26 
27 /*
28  * Check if the system has too long cachelines. If the cachelines are
29  * longer then 128b, the driver will not be able flush/invalidate data
30  * cache over separate QH entries. We use 128b because one QH entry is
31  * 64b long and there are always two QH list entries for each endpoint.
32  */
33 #if ARCH_DMA_MINALIGN > 128
34 #error This driver can not work on systems with caches longer than 128b
35 #endif
36 
37 /*
38  * Each qTD item must be 32-byte aligned, each qTD touple must be
39  * cacheline aligned. There are two qTD items for each endpoint and
40  * only one of them is used for the endpoint at time, so we can group
41  * them together.
42  */
43 #define ILIST_ALIGN		roundup(ARCH_DMA_MINALIGN, 32)
44 #define ILIST_ENT_RAW_SZ	(2 * sizeof(struct ept_queue_item))
45 #define ILIST_ENT_SZ		roundup(ILIST_ENT_RAW_SZ, ARCH_DMA_MINALIGN)
46 #define ILIST_SZ		(NUM_ENDPOINTS * ILIST_ENT_SZ)
47 
48 #ifndef DEBUG
49 #define DBG(x...) do {} while (0)
50 #else
51 #define DBG(x...) printf(x)
52 static const char *reqname(unsigned r)
53 {
54 	switch (r) {
55 	case USB_REQ_GET_STATUS: return "GET_STATUS";
56 	case USB_REQ_CLEAR_FEATURE: return "CLEAR_FEATURE";
57 	case USB_REQ_SET_FEATURE: return "SET_FEATURE";
58 	case USB_REQ_SET_ADDRESS: return "SET_ADDRESS";
59 	case USB_REQ_GET_DESCRIPTOR: return "GET_DESCRIPTOR";
60 	case USB_REQ_SET_DESCRIPTOR: return "SET_DESCRIPTOR";
61 	case USB_REQ_GET_CONFIGURATION: return "GET_CONFIGURATION";
62 	case USB_REQ_SET_CONFIGURATION: return "SET_CONFIGURATION";
63 	case USB_REQ_GET_INTERFACE: return "GET_INTERFACE";
64 	case USB_REQ_SET_INTERFACE: return "SET_INTERFACE";
65 	default: return "*UNKNOWN*";
66 	}
67 }
68 #endif
69 
70 static struct usb_endpoint_descriptor ep0_desc = {
71 	.bLength = sizeof(struct usb_endpoint_descriptor),
72 	.bDescriptorType = USB_DT_ENDPOINT,
73 	.bEndpointAddress = USB_DIR_IN,
74 	.bmAttributes =	USB_ENDPOINT_XFER_CONTROL,
75 };
76 
77 static int ci_pullup(struct usb_gadget *gadget, int is_on);
78 static int ci_ep_enable(struct usb_ep *ep,
79 		const struct usb_endpoint_descriptor *desc);
80 static int ci_ep_disable(struct usb_ep *ep);
81 static int ci_ep_queue(struct usb_ep *ep,
82 		struct usb_request *req, gfp_t gfp_flags);
83 static struct usb_request *
84 ci_ep_alloc_request(struct usb_ep *ep, unsigned int gfp_flags);
85 static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *_req);
86 
87 static struct usb_gadget_ops ci_udc_ops = {
88 	.pullup = ci_pullup,
89 };
90 
91 static struct usb_ep_ops ci_ep_ops = {
92 	.enable         = ci_ep_enable,
93 	.disable        = ci_ep_disable,
94 	.queue          = ci_ep_queue,
95 	.alloc_request  = ci_ep_alloc_request,
96 	.free_request   = ci_ep_free_request,
97 };
98 
99 /* Init values for USB endpoints. */
100 static const struct usb_ep ci_ep_init[2] = {
101 	[0] = {	/* EP 0 */
102 		.maxpacket	= 64,
103 		.name		= "ep0",
104 		.ops		= &ci_ep_ops,
105 	},
106 	[1] = {	/* EP 1..n */
107 		.maxpacket	= 512,
108 		.name		= "ep-",
109 		.ops		= &ci_ep_ops,
110 	},
111 };
112 
113 static struct ci_drv controller = {
114 	.gadget	= {
115 		.name	= "ci_udc",
116 		.ops	= &ci_udc_ops,
117 		.is_dualspeed = 1,
118 	},
119 };
120 
121 /**
122  * ci_get_qh() - return queue head for endpoint
123  * @ep_num:	Endpoint number
124  * @dir_in:	Direction of the endpoint (IN = 1, OUT = 0)
125  *
126  * This function returns the QH associated with particular endpoint
127  * and it's direction.
128  */
129 static struct ept_queue_head *ci_get_qh(int ep_num, int dir_in)
130 {
131 	return &controller.epts[(ep_num * 2) + dir_in];
132 }
133 
134 /**
135  * ci_get_qtd() - return queue item for endpoint
136  * @ep_num:	Endpoint number
137  * @dir_in:	Direction of the endpoint (IN = 1, OUT = 0)
138  *
139  * This function returns the QH associated with particular endpoint
140  * and it's direction.
141  */
142 static struct ept_queue_item *ci_get_qtd(int ep_num, int dir_in)
143 {
144 	return controller.items[(ep_num * 2) + dir_in];
145 }
146 
147 /**
148  * ci_flush_qh - flush cache over queue head
149  * @ep_num:	Endpoint number
150  *
151  * This function flushes cache over QH for particular endpoint.
152  */
153 static void ci_flush_qh(int ep_num)
154 {
155 	struct ept_queue_head *head = ci_get_qh(ep_num, 0);
156 	const uint32_t start = (uint32_t)head;
157 	const uint32_t end = start + 2 * sizeof(*head);
158 
159 	flush_dcache_range(start, end);
160 }
161 
162 /**
163  * ci_invalidate_qh - invalidate cache over queue head
164  * @ep_num:	Endpoint number
165  *
166  * This function invalidates cache over QH for particular endpoint.
167  */
168 static void ci_invalidate_qh(int ep_num)
169 {
170 	struct ept_queue_head *head = ci_get_qh(ep_num, 0);
171 	uint32_t start = (uint32_t)head;
172 	uint32_t end = start + 2 * sizeof(*head);
173 
174 	invalidate_dcache_range(start, end);
175 }
176 
177 /**
178  * ci_flush_qtd - flush cache over queue item
179  * @ep_num:	Endpoint number
180  *
181  * This function flushes cache over qTD pair for particular endpoint.
182  */
183 static void ci_flush_qtd(int ep_num)
184 {
185 	struct ept_queue_item *item = ci_get_qtd(ep_num, 0);
186 	const uint32_t start = (uint32_t)item;
187 	const uint32_t end_raw = start + 2 * sizeof(*item);
188 	const uint32_t end = roundup(end_raw, ARCH_DMA_MINALIGN);
189 
190 	flush_dcache_range(start, end);
191 }
192 
193 /**
194  * ci_invalidate_qtd - invalidate cache over queue item
195  * @ep_num:	Endpoint number
196  *
197  * This function invalidates cache over qTD pair for particular endpoint.
198  */
199 static void ci_invalidate_qtd(int ep_num)
200 {
201 	struct ept_queue_item *item = ci_get_qtd(ep_num, 0);
202 	const uint32_t start = (uint32_t)item;
203 	const uint32_t end_raw = start + 2 * sizeof(*item);
204 	const uint32_t end = roundup(end_raw, ARCH_DMA_MINALIGN);
205 
206 	invalidate_dcache_range(start, end);
207 }
208 
209 static struct usb_request *
210 ci_ep_alloc_request(struct usb_ep *ep, unsigned int gfp_flags)
211 {
212 	struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
213 	int num;
214 	struct ci_req *ci_req;
215 
216 	num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
217 	if (num == 0 && controller.ep0_req)
218 		return &controller.ep0_req->req;
219 
220 	ci_req = memalign(ARCH_DMA_MINALIGN, sizeof(*ci_req));
221 	if (!ci_req)
222 		return NULL;
223 
224 	INIT_LIST_HEAD(&ci_req->queue);
225 	ci_req->b_buf = 0;
226 
227 	if (num == 0)
228 		controller.ep0_req = ci_req;
229 
230 	return &ci_req->req;
231 }
232 
233 static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *req)
234 {
235 	struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
236 	struct ci_req *ci_req = container_of(req, struct ci_req, req);
237 	int num;
238 
239 	num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
240 	if (num == 0) {
241 		if (!controller.ep0_req)
242 			return;
243 		controller.ep0_req = 0;
244 	}
245 
246 	if (ci_req->b_buf)
247 		free(ci_req->b_buf);
248 	free(ci_req);
249 }
250 
251 static void ep_enable(int num, int in, int maxpacket)
252 {
253 	struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
254 	unsigned n;
255 
256 	n = readl(&udc->epctrl[num]);
257 	if (in)
258 		n |= (CTRL_TXE | CTRL_TXR | CTRL_TXT_BULK);
259 	else
260 		n |= (CTRL_RXE | CTRL_RXR | CTRL_RXT_BULK);
261 
262 	if (num != 0) {
263 		struct ept_queue_head *head = ci_get_qh(num, in);
264 
265 		head->config = CONFIG_MAX_PKT(maxpacket) | CONFIG_ZLT;
266 		ci_flush_qh(num);
267 	}
268 	writel(n, &udc->epctrl[num]);
269 }
270 
271 static int ci_ep_enable(struct usb_ep *ep,
272 		const struct usb_endpoint_descriptor *desc)
273 {
274 	struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
275 	int num, in;
276 	num = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
277 	in = (desc->bEndpointAddress & USB_DIR_IN) != 0;
278 	ci_ep->desc = desc;
279 
280 	if (num) {
281 		int max = get_unaligned_le16(&desc->wMaxPacketSize);
282 
283 		if ((max > 64) && (controller.gadget.speed == USB_SPEED_FULL))
284 			max = 64;
285 		if (ep->maxpacket != max) {
286 			DBG("%s: from %d to %d\n", __func__,
287 			    ep->maxpacket, max);
288 			ep->maxpacket = max;
289 		}
290 	}
291 	ep_enable(num, in, ep->maxpacket);
292 	DBG("%s: num=%d maxpacket=%d\n", __func__, num, ep->maxpacket);
293 	return 0;
294 }
295 
296 static int ci_ep_disable(struct usb_ep *ep)
297 {
298 	struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
299 
300 	ci_ep->desc = NULL;
301 	return 0;
302 }
303 
304 static int ci_bounce(struct ci_req *ci_req, int in)
305 {
306 	struct usb_request *req = &ci_req->req;
307 	uint32_t addr = (uint32_t)req->buf;
308 	uint32_t hwaddr;
309 	uint32_t aligned_used_len;
310 
311 	/* Input buffer address is not aligned. */
312 	if (addr & (ARCH_DMA_MINALIGN - 1))
313 		goto align;
314 
315 	/* Input buffer length is not aligned. */
316 	if (req->length & (ARCH_DMA_MINALIGN - 1))
317 		goto align;
318 
319 	/* The buffer is well aligned, only flush cache. */
320 	ci_req->hw_len = req->length;
321 	ci_req->hw_buf = req->buf;
322 	goto flush;
323 
324 align:
325 	if (ci_req->b_buf && req->length > ci_req->b_len) {
326 		free(ci_req->b_buf);
327 		ci_req->b_buf = 0;
328 	}
329 	if (!ci_req->b_buf) {
330 		ci_req->b_len = roundup(req->length, ARCH_DMA_MINALIGN);
331 		ci_req->b_buf = memalign(ARCH_DMA_MINALIGN, ci_req->b_len);
332 		if (!ci_req->b_buf)
333 			return -ENOMEM;
334 	}
335 	ci_req->hw_len = ci_req->b_len;
336 	ci_req->hw_buf = ci_req->b_buf;
337 
338 	if (in)
339 		memcpy(ci_req->hw_buf, req->buf, req->length);
340 
341 flush:
342 	hwaddr = (uint32_t)ci_req->hw_buf;
343 	aligned_used_len = roundup(req->length, ARCH_DMA_MINALIGN);
344 	flush_dcache_range(hwaddr, hwaddr + aligned_used_len);
345 
346 	return 0;
347 }
348 
349 static void ci_debounce(struct ci_req *ci_req, int in)
350 {
351 	struct usb_request *req = &ci_req->req;
352 	uint32_t addr = (uint32_t)req->buf;
353 	uint32_t hwaddr = (uint32_t)ci_req->hw_buf;
354 	uint32_t aligned_used_len;
355 
356 	if (in)
357 		return;
358 
359 	aligned_used_len = roundup(req->actual, ARCH_DMA_MINALIGN);
360 	invalidate_dcache_range(hwaddr, hwaddr + aligned_used_len);
361 
362 	if (addr == hwaddr)
363 		return; /* not a bounce */
364 
365 	memcpy(req->buf, ci_req->hw_buf, req->actual);
366 }
367 
368 static void ci_ep_submit_next_request(struct ci_ep *ci_ep)
369 {
370 	struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
371 	struct ept_queue_item *item;
372 	struct ept_queue_head *head;
373 	int bit, num, len, in;
374 	struct ci_req *ci_req;
375 
376 	ci_ep->req_primed = true;
377 
378 	num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
379 	in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0;
380 	item = ci_get_qtd(num, in);
381 	head = ci_get_qh(num, in);
382 
383 	ci_req = list_first_entry(&ci_ep->queue, struct ci_req, queue);
384 	len = ci_req->req.length;
385 
386 	item->info = INFO_BYTES(len) | INFO_ACTIVE;
387 	item->page0 = (uint32_t)ci_req->hw_buf;
388 	item->page1 = ((uint32_t)ci_req->hw_buf & 0xfffff000) + 0x1000;
389 	item->page2 = ((uint32_t)ci_req->hw_buf & 0xfffff000) + 0x2000;
390 	item->page3 = ((uint32_t)ci_req->hw_buf & 0xfffff000) + 0x3000;
391 	item->page4 = ((uint32_t)ci_req->hw_buf & 0xfffff000) + 0x4000;
392 
393 	head->next = (unsigned) item;
394 	head->info = 0;
395 
396 	/*
397 	 * When sending the data for an IN transaction, the attached host
398 	 * knows that all data for the IN is sent when one of the following
399 	 * occurs:
400 	 * a) A zero-length packet is transmitted.
401 	 * b) A packet with length that isn't an exact multiple of the ep's
402 	 *    maxpacket is transmitted.
403 	 * c) Enough data is sent to exactly fill the host's maximum expected
404 	 *    IN transaction size.
405 	 *
406 	 * One of these conditions MUST apply at the end of an IN transaction,
407 	 * or the transaction will not be considered complete by the host. If
408 	 * none of (a)..(c) already applies, then we must force (a) to apply
409 	 * by explicitly sending an extra zero-length packet.
410 	 */
411 	/*  IN    !a     !b                              !c */
412 	if (in && len && !(len % ci_ep->ep.maxpacket) && ci_req->req.zero) {
413 		/*
414 		 * Each endpoint has 2 items allocated, even though typically
415 		 * only 1 is used at a time since either an IN or an OUT but
416 		 * not both is queued. For an IN transaction, item currently
417 		 * points at the second of these items, so we know that we
418 		 * can use the other to transmit the extra zero-length packet.
419 		 */
420 		struct ept_queue_item *other_item = ci_get_qtd(num, 0);
421 		item->next = (unsigned)other_item;
422 		item = other_item;
423 		item->info = INFO_ACTIVE;
424 	}
425 
426 	item->next = TERMINATE;
427 	item->info |= INFO_IOC;
428 
429 	ci_flush_qtd(num);
430 
431 	DBG("ept%d %s queue len %x, req %p, buffer %p\n",
432 	    num, in ? "in" : "out", len, ci_req, ci_req->hw_buf);
433 	ci_flush_qh(num);
434 
435 	if (in)
436 		bit = EPT_TX(num);
437 	else
438 		bit = EPT_RX(num);
439 
440 	writel(bit, &udc->epprime);
441 }
442 
443 static int ci_ep_queue(struct usb_ep *ep,
444 		struct usb_request *req, gfp_t gfp_flags)
445 {
446 	struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
447 	struct ci_req *ci_req = container_of(req, struct ci_req, req);
448 	int in, ret;
449 	int __maybe_unused num;
450 
451 	num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
452 	in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0;
453 
454 	if (!num && ci_ep->req_primed) {
455 		/*
456 		 * The flipping of ep0 between IN and OUT relies on
457 		 * ci_ep_queue consuming the current IN/OUT setting
458 		 * immediately. If this is deferred to a later point when the
459 		 * req is pulled out of ci_req->queue, then the IN/OUT setting
460 		 * may have been changed since the req was queued, and state
461 		 * will get out of sync. This condition doesn't occur today,
462 		 * but could if bugs were introduced later, and this error
463 		 * check will save a lot of debugging time.
464 		 */
465 		printf("%s: ep0 transaction already in progress\n", __func__);
466 		return -EPROTO;
467 	}
468 
469 	ret = ci_bounce(ci_req, in);
470 	if (ret)
471 		return ret;
472 
473 	DBG("ept%d %s pre-queue req %p, buffer %p\n",
474 	    num, in ? "in" : "out", ci_req, ci_req->hw_buf);
475 	list_add_tail(&ci_req->queue, &ci_ep->queue);
476 
477 	if (!ci_ep->req_primed)
478 		ci_ep_submit_next_request(ci_ep);
479 
480 	return 0;
481 }
482 
483 static void flip_ep0_direction(void)
484 {
485 	if (ep0_desc.bEndpointAddress == USB_DIR_IN) {
486 		DBG("%s: Flipping ep0 to OUT\n", __func__);
487 		ep0_desc.bEndpointAddress = 0;
488 	} else {
489 		DBG("%s: Flipping ep0 to IN\n", __func__);
490 		ep0_desc.bEndpointAddress = USB_DIR_IN;
491 	}
492 }
493 
494 static void handle_ep_complete(struct ci_ep *ep)
495 {
496 	struct ept_queue_item *item;
497 	int num, in, len;
498 	struct ci_req *ci_req;
499 
500 	num = ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
501 	in = (ep->desc->bEndpointAddress & USB_DIR_IN) != 0;
502 	item = ci_get_qtd(num, in);
503 	ci_invalidate_qtd(num);
504 
505 	len = (item->info >> 16) & 0x7fff;
506 	if (item->info & 0xff)
507 		printf("EP%d/%s FAIL info=%x pg0=%x\n",
508 		       num, in ? "in" : "out", item->info, item->page0);
509 
510 	ci_req = list_first_entry(&ep->queue, struct ci_req, queue);
511 	list_del_init(&ci_req->queue);
512 	ep->req_primed = false;
513 
514 	if (!list_empty(&ep->queue))
515 		ci_ep_submit_next_request(ep);
516 
517 	ci_req->req.actual = ci_req->req.length - len;
518 	ci_debounce(ci_req, in);
519 
520 	DBG("ept%d %s req %p, complete %x\n",
521 	    num, in ? "in" : "out", ci_req, len);
522 	if (num != 0 || controller.ep0_data_phase)
523 		ci_req->req.complete(&ep->ep, &ci_req->req);
524 	if (num == 0 && controller.ep0_data_phase) {
525 		/*
526 		 * Data Stage is complete, so flip ep0 dir for Status Stage,
527 		 * which always transfers a packet in the opposite direction.
528 		 */
529 		DBG("%s: flip ep0 dir for Status Stage\n", __func__);
530 		flip_ep0_direction();
531 		controller.ep0_data_phase = false;
532 		ci_req->req.length = 0;
533 		usb_ep_queue(&ep->ep, &ci_req->req, 0);
534 	}
535 }
536 
537 #define SETUP(type, request) (((type) << 8) | (request))
538 
539 static void handle_setup(void)
540 {
541 	struct ci_ep *ci_ep = &controller.ep[0];
542 	struct ci_req *ci_req;
543 	struct usb_request *req;
544 	struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
545 	struct ept_queue_head *head;
546 	struct usb_ctrlrequest r;
547 	int status = 0;
548 	int num, in, _num, _in, i;
549 	char *buf;
550 
551 	ci_req = controller.ep0_req;
552 	req = &ci_req->req;
553 	head = ci_get_qh(0, 0);	/* EP0 OUT */
554 
555 	ci_invalidate_qh(0);
556 	memcpy(&r, head->setup_data, sizeof(struct usb_ctrlrequest));
557 #ifdef CONFIG_CI_UDC_HAS_HOSTPC
558 	writel(EPT_RX(0), &udc->epsetupstat);
559 #else
560 	writel(EPT_RX(0), &udc->epstat);
561 #endif
562 	DBG("handle setup %s, %x, %x index %x value %x length %x\n",
563 	    reqname(r.bRequest), r.bRequestType, r.bRequest, r.wIndex,
564 	    r.wValue, r.wLength);
565 
566 	/* Set EP0 dir for Data Stage based on Setup Stage data */
567 	if (r.bRequestType & USB_DIR_IN) {
568 		DBG("%s: Set ep0 to IN for Data Stage\n", __func__);
569 		ep0_desc.bEndpointAddress = USB_DIR_IN;
570 	} else {
571 		DBG("%s: Set ep0 to OUT for Data Stage\n", __func__);
572 		ep0_desc.bEndpointAddress = 0;
573 	}
574 	if (r.wLength) {
575 		controller.ep0_data_phase = true;
576 	} else {
577 		/* 0 length -> no Data Stage. Flip dir for Status Stage */
578 		DBG("%s: 0 length: flip ep0 dir for Status Stage\n", __func__);
579 		flip_ep0_direction();
580 		controller.ep0_data_phase = false;
581 	}
582 
583 	list_del_init(&ci_req->queue);
584 	ci_ep->req_primed = false;
585 
586 	switch (SETUP(r.bRequestType, r.bRequest)) {
587 	case SETUP(USB_RECIP_ENDPOINT, USB_REQ_CLEAR_FEATURE):
588 		_num = r.wIndex & 15;
589 		_in = !!(r.wIndex & 0x80);
590 
591 		if ((r.wValue == 0) && (r.wLength == 0)) {
592 			req->length = 0;
593 			for (i = 0; i < NUM_ENDPOINTS; i++) {
594 				struct ci_ep *ep = &controller.ep[i];
595 
596 				if (!ep->desc)
597 					continue;
598 				num = ep->desc->bEndpointAddress
599 						& USB_ENDPOINT_NUMBER_MASK;
600 				in = (ep->desc->bEndpointAddress
601 						& USB_DIR_IN) != 0;
602 				if ((num == _num) && (in == _in)) {
603 					ep_enable(num, in, ep->ep.maxpacket);
604 					usb_ep_queue(controller.gadget.ep0,
605 							req, 0);
606 					break;
607 				}
608 			}
609 		}
610 		return;
611 
612 	case SETUP(USB_RECIP_DEVICE, USB_REQ_SET_ADDRESS):
613 		/*
614 		 * write address delayed (will take effect
615 		 * after the next IN txn)
616 		 */
617 		writel((r.wValue << 25) | (1 << 24), &udc->devaddr);
618 		req->length = 0;
619 		usb_ep_queue(controller.gadget.ep0, req, 0);
620 		return;
621 
622 	case SETUP(USB_DIR_IN | USB_RECIP_DEVICE, USB_REQ_GET_STATUS):
623 		req->length = 2;
624 		buf = (char *)req->buf;
625 		buf[0] = 1 << USB_DEVICE_SELF_POWERED;
626 		buf[1] = 0;
627 		usb_ep_queue(controller.gadget.ep0, req, 0);
628 		return;
629 	}
630 	/* pass request up to the gadget driver */
631 	if (controller.driver)
632 		status = controller.driver->setup(&controller.gadget, &r);
633 	else
634 		status = -ENODEV;
635 
636 	if (!status)
637 		return;
638 	DBG("STALL reqname %s type %x value %x, index %x\n",
639 	    reqname(r.bRequest), r.bRequestType, r.wValue, r.wIndex);
640 	writel((1<<16) | (1 << 0), &udc->epctrl[0]);
641 }
642 
643 static void stop_activity(void)
644 {
645 	int i, num, in;
646 	struct ept_queue_head *head;
647 	struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
648 	writel(readl(&udc->epcomp), &udc->epcomp);
649 #ifdef CONFIG_CI_UDC_HAS_HOSTPC
650 	writel(readl(&udc->epsetupstat), &udc->epsetupstat);
651 #endif
652 	writel(readl(&udc->epstat), &udc->epstat);
653 	writel(0xffffffff, &udc->epflush);
654 
655 	/* error out any pending reqs */
656 	for (i = 0; i < NUM_ENDPOINTS; i++) {
657 		if (i != 0)
658 			writel(0, &udc->epctrl[i]);
659 		if (controller.ep[i].desc) {
660 			num = controller.ep[i].desc->bEndpointAddress
661 				& USB_ENDPOINT_NUMBER_MASK;
662 			in = (controller.ep[i].desc->bEndpointAddress
663 				& USB_DIR_IN) != 0;
664 			head = ci_get_qh(num, in);
665 			head->info = INFO_ACTIVE;
666 			ci_flush_qh(num);
667 		}
668 	}
669 }
670 
671 void udc_irq(void)
672 {
673 	struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
674 	unsigned n = readl(&udc->usbsts);
675 	writel(n, &udc->usbsts);
676 	int bit, i, num, in;
677 
678 	n &= (STS_SLI | STS_URI | STS_PCI | STS_UI | STS_UEI);
679 	if (n == 0)
680 		return;
681 
682 	if (n & STS_URI) {
683 		DBG("-- reset --\n");
684 		stop_activity();
685 	}
686 	if (n & STS_SLI)
687 		DBG("-- suspend --\n");
688 
689 	if (n & STS_PCI) {
690 		int max = 64;
691 		int speed = USB_SPEED_FULL;
692 
693 #ifdef CONFIG_CI_UDC_HAS_HOSTPC
694 		bit = (readl(&udc->hostpc1_devlc) >> 25) & 3;
695 #else
696 		bit = (readl(&udc->portsc) >> 26) & 3;
697 #endif
698 		DBG("-- portchange %x %s\n", bit, (bit == 2) ? "High" : "Full");
699 		if (bit == 2) {
700 			speed = USB_SPEED_HIGH;
701 			max = 512;
702 		}
703 		controller.gadget.speed = speed;
704 		for (i = 1; i < NUM_ENDPOINTS; i++) {
705 			if (controller.ep[i].ep.maxpacket > max)
706 				controller.ep[i].ep.maxpacket = max;
707 		}
708 	}
709 
710 	if (n & STS_UEI)
711 		printf("<UEI %x>\n", readl(&udc->epcomp));
712 
713 	if ((n & STS_UI) || (n & STS_UEI)) {
714 #ifdef CONFIG_CI_UDC_HAS_HOSTPC
715 		n = readl(&udc->epsetupstat);
716 #else
717 		n = readl(&udc->epstat);
718 #endif
719 		if (n & EPT_RX(0))
720 			handle_setup();
721 
722 		n = readl(&udc->epcomp);
723 		if (n != 0)
724 			writel(n, &udc->epcomp);
725 
726 		for (i = 0; i < NUM_ENDPOINTS && n; i++) {
727 			if (controller.ep[i].desc) {
728 				num = controller.ep[i].desc->bEndpointAddress
729 					& USB_ENDPOINT_NUMBER_MASK;
730 				in = (controller.ep[i].desc->bEndpointAddress
731 						& USB_DIR_IN) != 0;
732 				bit = (in) ? EPT_TX(num) : EPT_RX(num);
733 				if (n & bit)
734 					handle_ep_complete(&controller.ep[i]);
735 			}
736 		}
737 	}
738 }
739 
740 int usb_gadget_handle_interrupts(void)
741 {
742 	u32 value;
743 	struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
744 
745 	value = readl(&udc->usbsts);
746 	if (value)
747 		udc_irq();
748 
749 	return value;
750 }
751 
752 void udc_disconnect(void)
753 {
754 	struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
755 	/* disable pullup */
756 	stop_activity();
757 	writel(USBCMD_FS2, &udc->usbcmd);
758 	udelay(800);
759 	if (controller.driver)
760 		controller.driver->disconnect(&controller.gadget);
761 }
762 
763 static int ci_pullup(struct usb_gadget *gadget, int is_on)
764 {
765 	struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
766 	if (is_on) {
767 		/* RESET */
768 		writel(USBCMD_ITC(MICRO_8FRAME) | USBCMD_RST, &udc->usbcmd);
769 		udelay(200);
770 
771 		writel((unsigned)controller.epts, &udc->epinitaddr);
772 
773 		/* select DEVICE mode */
774 		writel(USBMODE_DEVICE, &udc->usbmode);
775 
776 		writel(0xffffffff, &udc->epflush);
777 
778 		/* Turn on the USB connection by enabling the pullup resistor */
779 		writel(USBCMD_ITC(MICRO_8FRAME) | USBCMD_RUN, &udc->usbcmd);
780 	} else {
781 		udc_disconnect();
782 	}
783 
784 	return 0;
785 }
786 
787 static int ci_udc_probe(void)
788 {
789 	struct ept_queue_head *head;
790 	uint8_t *imem;
791 	int i;
792 
793 	const int num = 2 * NUM_ENDPOINTS;
794 
795 	const int eplist_min_align = 4096;
796 	const int eplist_align = roundup(eplist_min_align, ARCH_DMA_MINALIGN);
797 	const int eplist_raw_sz = num * sizeof(struct ept_queue_head);
798 	const int eplist_sz = roundup(eplist_raw_sz, ARCH_DMA_MINALIGN);
799 
800 	/* The QH list must be aligned to 4096 bytes. */
801 	controller.epts = memalign(eplist_align, eplist_sz);
802 	if (!controller.epts)
803 		return -ENOMEM;
804 	memset(controller.epts, 0, eplist_sz);
805 
806 	controller.items_mem = memalign(ILIST_ALIGN, ILIST_SZ);
807 	if (!controller.items_mem) {
808 		free(controller.epts);
809 		return -ENOMEM;
810 	}
811 	memset(controller.items_mem, 0, ILIST_SZ);
812 
813 	for (i = 0; i < 2 * NUM_ENDPOINTS; i++) {
814 		/*
815 		 * Configure QH for each endpoint. The structure of the QH list
816 		 * is such that each two subsequent fields, N and N+1 where N is
817 		 * even, in the QH list represent QH for one endpoint. The Nth
818 		 * entry represents OUT configuration and the N+1th entry does
819 		 * represent IN configuration of the endpoint.
820 		 */
821 		head = controller.epts + i;
822 		if (i < 2)
823 			head->config = CONFIG_MAX_PKT(EP0_MAX_PACKET_SIZE)
824 				| CONFIG_ZLT | CONFIG_IOS;
825 		else
826 			head->config = CONFIG_MAX_PKT(EP_MAX_PACKET_SIZE)
827 				| CONFIG_ZLT;
828 		head->next = TERMINATE;
829 		head->info = 0;
830 
831 		imem = controller.items_mem + ((i >> 1) * ILIST_ENT_SZ);
832 		if (i & 1)
833 			imem += sizeof(struct ept_queue_item);
834 
835 		controller.items[i] = (struct ept_queue_item *)imem;
836 
837 		if (i & 1) {
838 			ci_flush_qh(i / 2);
839 			ci_flush_qtd(i / 2);
840 		}
841 	}
842 
843 	INIT_LIST_HEAD(&controller.gadget.ep_list);
844 
845 	/* Init EP 0 */
846 	memcpy(&controller.ep[0].ep, &ci_ep_init[0], sizeof(*ci_ep_init));
847 	controller.ep[0].desc = &ep0_desc;
848 	INIT_LIST_HEAD(&controller.ep[0].queue);
849 	controller.ep[0].req_primed = false;
850 	controller.gadget.ep0 = &controller.ep[0].ep;
851 	INIT_LIST_HEAD(&controller.gadget.ep0->ep_list);
852 
853 	/* Init EP 1..n */
854 	for (i = 1; i < NUM_ENDPOINTS; i++) {
855 		memcpy(&controller.ep[i].ep, &ci_ep_init[1],
856 		       sizeof(*ci_ep_init));
857 		INIT_LIST_HEAD(&controller.ep[i].queue);
858 		controller.ep[i].req_primed = false;
859 		list_add_tail(&controller.ep[i].ep.ep_list,
860 			      &controller.gadget.ep_list);
861 	}
862 
863 	ci_ep_alloc_request(&controller.ep[0].ep, 0);
864 	if (!controller.ep0_req) {
865 		free(controller.items_mem);
866 		free(controller.epts);
867 		return -ENOMEM;
868 	}
869 
870 	return 0;
871 }
872 
873 int usb_gadget_register_driver(struct usb_gadget_driver *driver)
874 {
875 	int ret;
876 
877 	if (!driver)
878 		return -EINVAL;
879 	if (!driver->bind || !driver->setup || !driver->disconnect)
880 		return -EINVAL;
881 	if (driver->speed != USB_SPEED_FULL && driver->speed != USB_SPEED_HIGH)
882 		return -EINVAL;
883 
884 	ret = usb_lowlevel_init(0, USB_INIT_DEVICE, (void **)&controller.ctrl);
885 	if (ret)
886 		return ret;
887 
888 	ret = ci_udc_probe();
889 #if defined(CONFIG_USB_EHCI_MX6) || defined(CONFIG_USB_EHCI_MXS)
890 	/*
891 	 * FIXME: usb_lowlevel_init()->ehci_hcd_init() should be doing all
892 	 * HW-specific initialization, e.g. ULPI-vs-UTMI PHY selection
893 	 */
894 	if (!ret) {
895 		struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
896 
897 		/* select ULPI phy */
898 		writel(PTS(PTS_ENABLE) | PFSC, &udc->portsc);
899 	}
900 #endif
901 
902 	ret = driver->bind(&controller.gadget);
903 	if (ret) {
904 		DBG("driver->bind() returned %d\n", ret);
905 		return ret;
906 	}
907 	controller.driver = driver;
908 
909 	return 0;
910 }
911 
912 int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
913 {
914 	udc_disconnect();
915 
916 	driver->unbind(&controller.gadget);
917 	controller.driver = NULL;
918 
919 	ci_ep_free_request(&controller.ep[0].ep, &controller.ep0_req->req);
920 	free(controller.items_mem);
921 	free(controller.epts);
922 
923 	return 0;
924 }
925