xref: /OK3568_Linux_fs/kernel/drivers/usb/gadget/udc/aspeed-vhub/epn.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * epn.c - Generic endpoints management
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Copyright 2017 IBM Corporation
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * This program is free software; you can redistribute it and/or modify
10*4882a593Smuzhiyun  * it under the terms of the GNU General Public License as published by
11*4882a593Smuzhiyun  * the Free Software Foundation; either version 2 of the License, or
12*4882a593Smuzhiyun  * (at your option) any later version.
13*4882a593Smuzhiyun  */
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <linux/kernel.h>
16*4882a593Smuzhiyun #include <linux/module.h>
17*4882a593Smuzhiyun #include <linux/platform_device.h>
18*4882a593Smuzhiyun #include <linux/delay.h>
19*4882a593Smuzhiyun #include <linux/ioport.h>
20*4882a593Smuzhiyun #include <linux/slab.h>
21*4882a593Smuzhiyun #include <linux/errno.h>
22*4882a593Smuzhiyun #include <linux/list.h>
23*4882a593Smuzhiyun #include <linux/interrupt.h>
24*4882a593Smuzhiyun #include <linux/proc_fs.h>
25*4882a593Smuzhiyun #include <linux/prefetch.h>
26*4882a593Smuzhiyun #include <linux/clk.h>
27*4882a593Smuzhiyun #include <linux/usb/gadget.h>
28*4882a593Smuzhiyun #include <linux/of.h>
29*4882a593Smuzhiyun #include <linux/of_gpio.h>
30*4882a593Smuzhiyun #include <linux/regmap.h>
31*4882a593Smuzhiyun #include <linux/dma-mapping.h>
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #include "vhub.h"
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #define EXTRA_CHECKS
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #ifdef EXTRA_CHECKS
38*4882a593Smuzhiyun #define CHECK(ep, expr, fmt...)					\
39*4882a593Smuzhiyun 	do {							\
40*4882a593Smuzhiyun 		if (!(expr)) EPDBG(ep, "CHECK:" fmt);		\
41*4882a593Smuzhiyun 	} while(0)
42*4882a593Smuzhiyun #else
43*4882a593Smuzhiyun #define CHECK(ep, expr, fmt...)	do { } while(0)
44*4882a593Smuzhiyun #endif
45*4882a593Smuzhiyun 
ast_vhub_epn_kick(struct ast_vhub_ep * ep,struct ast_vhub_req * req)46*4882a593Smuzhiyun static void ast_vhub_epn_kick(struct ast_vhub_ep *ep, struct ast_vhub_req *req)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun 	unsigned int act = req->req.actual;
49*4882a593Smuzhiyun 	unsigned int len = req->req.length;
50*4882a593Smuzhiyun 	unsigned int chunk;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	/* There should be no DMA ongoing */
53*4882a593Smuzhiyun 	WARN_ON(req->active);
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	/* Calculate next chunk size */
56*4882a593Smuzhiyun 	chunk = len - act;
57*4882a593Smuzhiyun 	if (chunk > ep->ep.maxpacket)
58*4882a593Smuzhiyun 		chunk = ep->ep.maxpacket;
59*4882a593Smuzhiyun 	else if ((chunk < ep->ep.maxpacket) || !req->req.zero)
60*4882a593Smuzhiyun 		req->last_desc = 1;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	EPVDBG(ep, "kick req %p act=%d/%d chunk=%d last=%d\n",
63*4882a593Smuzhiyun 	       req, act, len, chunk, req->last_desc);
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	/* If DMA unavailable, using staging EP buffer */
66*4882a593Smuzhiyun 	if (!req->req.dma) {
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 		/* For IN transfers, copy data over first */
69*4882a593Smuzhiyun 		if (ep->epn.is_in) {
70*4882a593Smuzhiyun 			memcpy(ep->buf, req->req.buf + act, chunk);
71*4882a593Smuzhiyun 			vhub_dma_workaround(ep->buf);
72*4882a593Smuzhiyun 		}
73*4882a593Smuzhiyun 		writel(ep->buf_dma, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
74*4882a593Smuzhiyun 	} else {
75*4882a593Smuzhiyun 		if (ep->epn.is_in)
76*4882a593Smuzhiyun 			vhub_dma_workaround(req->req.buf);
77*4882a593Smuzhiyun 		writel(req->req.dma + act, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
78*4882a593Smuzhiyun 	}
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	/* Start DMA */
81*4882a593Smuzhiyun 	req->active = true;
82*4882a593Smuzhiyun 	writel(VHUB_EP_DMA_SET_TX_SIZE(chunk),
83*4882a593Smuzhiyun 	       ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
84*4882a593Smuzhiyun 	writel(VHUB_EP_DMA_SET_TX_SIZE(chunk) | VHUB_EP_DMA_SINGLE_KICK,
85*4882a593Smuzhiyun 	       ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun 
ast_vhub_epn_handle_ack(struct ast_vhub_ep * ep)88*4882a593Smuzhiyun static void ast_vhub_epn_handle_ack(struct ast_vhub_ep *ep)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun 	struct ast_vhub_req *req;
91*4882a593Smuzhiyun 	unsigned int len;
92*4882a593Smuzhiyun 	u32 stat;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	/* Read EP status */
95*4882a593Smuzhiyun 	stat = readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	/* Grab current request if any */
98*4882a593Smuzhiyun 	req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	EPVDBG(ep, "ACK status=%08x is_in=%d, req=%p (active=%d)\n",
101*4882a593Smuzhiyun 	       stat, ep->epn.is_in, req, req ? req->active : 0);
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	/* In absence of a request, bail out, must have been dequeued */
104*4882a593Smuzhiyun 	if (!req)
105*4882a593Smuzhiyun 		return;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	/*
108*4882a593Smuzhiyun 	 * Request not active, move on to processing queue, active request
109*4882a593Smuzhiyun 	 * was probably dequeued
110*4882a593Smuzhiyun 	 */
111*4882a593Smuzhiyun 	if (!req->active)
112*4882a593Smuzhiyun 		goto next_chunk;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	/* Check if HW has moved on */
115*4882a593Smuzhiyun 	if (VHUB_EP_DMA_RPTR(stat) != 0) {
116*4882a593Smuzhiyun 		EPDBG(ep, "DMA read pointer not 0 !\n");
117*4882a593Smuzhiyun 		return;
118*4882a593Smuzhiyun 	}
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	/* No current DMA ongoing */
121*4882a593Smuzhiyun 	req->active = false;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	/* Grab length out of HW */
124*4882a593Smuzhiyun 	len = VHUB_EP_DMA_TX_SIZE(stat);
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	/* If not using DMA, copy data out if needed */
127*4882a593Smuzhiyun 	if (!req->req.dma && !ep->epn.is_in && len)
128*4882a593Smuzhiyun 		memcpy(req->req.buf + req->req.actual, ep->buf, len);
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	/* Adjust size */
131*4882a593Smuzhiyun 	req->req.actual += len;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	/* Check for short packet */
134*4882a593Smuzhiyun 	if (len < ep->ep.maxpacket)
135*4882a593Smuzhiyun 		req->last_desc = 1;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	/* That's it ? complete the request and pick a new one */
138*4882a593Smuzhiyun 	if (req->last_desc >= 0) {
139*4882a593Smuzhiyun 		ast_vhub_done(ep, req, 0);
140*4882a593Smuzhiyun 		req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req,
141*4882a593Smuzhiyun 					       queue);
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 		/*
144*4882a593Smuzhiyun 		 * Due to lock dropping inside "done" the next request could
145*4882a593Smuzhiyun 		 * already be active, so check for that and bail if needed.
146*4882a593Smuzhiyun 		 */
147*4882a593Smuzhiyun 		if (!req || req->active)
148*4882a593Smuzhiyun 			return;
149*4882a593Smuzhiyun 	}
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun  next_chunk:
152*4882a593Smuzhiyun 	ast_vhub_epn_kick(ep, req);
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun 
ast_vhub_count_free_descs(struct ast_vhub_ep * ep)155*4882a593Smuzhiyun static inline unsigned int ast_vhub_count_free_descs(struct ast_vhub_ep *ep)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun 	/*
158*4882a593Smuzhiyun 	 * d_next == d_last means descriptor list empty to HW,
159*4882a593Smuzhiyun 	 * thus we can only have AST_VHUB_DESCS_COUNT-1 descriptors
160*4882a593Smuzhiyun 	 * in the list
161*4882a593Smuzhiyun 	 */
162*4882a593Smuzhiyun 	return (ep->epn.d_last + AST_VHUB_DESCS_COUNT - ep->epn.d_next - 1) &
163*4882a593Smuzhiyun 		(AST_VHUB_DESCS_COUNT - 1);
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun 
ast_vhub_epn_kick_desc(struct ast_vhub_ep * ep,struct ast_vhub_req * req)166*4882a593Smuzhiyun static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep,
167*4882a593Smuzhiyun 				   struct ast_vhub_req *req)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun 	struct ast_vhub_desc *desc = NULL;
170*4882a593Smuzhiyun 	unsigned int act = req->act_count;
171*4882a593Smuzhiyun 	unsigned int len = req->req.length;
172*4882a593Smuzhiyun 	unsigned int chunk;
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	/* Mark request active if not already */
175*4882a593Smuzhiyun 	req->active = true;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	/* If the request was already completely written, do nothing */
178*4882a593Smuzhiyun 	if (req->last_desc >= 0)
179*4882a593Smuzhiyun 		return;
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	EPVDBG(ep, "kick act=%d/%d chunk_max=%d free_descs=%d\n",
182*4882a593Smuzhiyun 	       act, len, ep->epn.chunk_max, ast_vhub_count_free_descs(ep));
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	/* While we can create descriptors */
185*4882a593Smuzhiyun 	while (ast_vhub_count_free_descs(ep) && req->last_desc < 0) {
186*4882a593Smuzhiyun 		unsigned int d_num;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 		/* Grab next free descriptor */
189*4882a593Smuzhiyun 		d_num = ep->epn.d_next;
190*4882a593Smuzhiyun 		desc = &ep->epn.descs[d_num];
191*4882a593Smuzhiyun 		ep->epn.d_next = (d_num + 1) & (AST_VHUB_DESCS_COUNT - 1);
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 		/* Calculate next chunk size */
194*4882a593Smuzhiyun 		chunk = len - act;
195*4882a593Smuzhiyun 		if (chunk <= ep->epn.chunk_max) {
196*4882a593Smuzhiyun 			/*
197*4882a593Smuzhiyun 			 * Is this the last packet ? Because of having up to 8
198*4882a593Smuzhiyun 			 * packets in a descriptor we can't just compare "chunk"
199*4882a593Smuzhiyun 			 * with ep.maxpacket. We have to see if it's a multiple
200*4882a593Smuzhiyun 			 * of it to know if we have to send a zero packet.
201*4882a593Smuzhiyun 			 * Sadly that involves a modulo which is a bit expensive
202*4882a593Smuzhiyun 			 * but probably still better than not doing it.
203*4882a593Smuzhiyun 			 */
204*4882a593Smuzhiyun 			if (!chunk || !req->req.zero || (chunk % ep->ep.maxpacket) != 0)
205*4882a593Smuzhiyun 				req->last_desc = d_num;
206*4882a593Smuzhiyun 		} else {
207*4882a593Smuzhiyun 			chunk = ep->epn.chunk_max;
208*4882a593Smuzhiyun 		}
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 		EPVDBG(ep, " chunk: act=%d/%d chunk=%d last=%d desc=%d free=%d\n",
211*4882a593Smuzhiyun 		       act, len, chunk, req->last_desc, d_num,
212*4882a593Smuzhiyun 		       ast_vhub_count_free_descs(ep));
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 		/* Populate descriptor */
215*4882a593Smuzhiyun 		desc->w0 = cpu_to_le32(req->req.dma + act);
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 		/* Interrupt if end of request or no more descriptors */
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 		/*
220*4882a593Smuzhiyun 		 * TODO: Be smarter about it, if we don't have enough
221*4882a593Smuzhiyun 		 * descriptors request an interrupt before queue empty
222*4882a593Smuzhiyun 		 * or so in order to be able to populate more before
223*4882a593Smuzhiyun 		 * the HW runs out. This isn't a problem at the moment
224*4882a593Smuzhiyun 		 * as we use 256 descriptors and only put at most one
225*4882a593Smuzhiyun 		 * request in the ring.
226*4882a593Smuzhiyun 		 */
227*4882a593Smuzhiyun 		desc->w1 = cpu_to_le32(VHUB_DSC1_IN_SET_LEN(chunk));
228*4882a593Smuzhiyun 		if (req->last_desc >= 0 || !ast_vhub_count_free_descs(ep))
229*4882a593Smuzhiyun 			desc->w1 |= cpu_to_le32(VHUB_DSC1_IN_INTERRUPT);
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 		/* Account packet */
232*4882a593Smuzhiyun 		req->act_count = act = act + chunk;
233*4882a593Smuzhiyun 	}
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	if (likely(desc))
236*4882a593Smuzhiyun 		vhub_dma_workaround(desc);
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	/* Tell HW about new descriptors */
239*4882a593Smuzhiyun 	writel(VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next),
240*4882a593Smuzhiyun 	       ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	EPVDBG(ep, "HW kicked, d_next=%d dstat=%08x\n",
243*4882a593Smuzhiyun 	       ep->epn.d_next, readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS));
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun 
ast_vhub_epn_handle_ack_desc(struct ast_vhub_ep * ep)246*4882a593Smuzhiyun static void ast_vhub_epn_handle_ack_desc(struct ast_vhub_ep *ep)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun 	struct ast_vhub_req *req;
249*4882a593Smuzhiyun 	unsigned int len, d_last;
250*4882a593Smuzhiyun 	u32 stat, stat1;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	/* Read EP status, workaround HW race */
253*4882a593Smuzhiyun 	do {
254*4882a593Smuzhiyun 		stat = readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
255*4882a593Smuzhiyun 		stat1 = readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
256*4882a593Smuzhiyun 	} while(stat != stat1);
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	/* Extract RPTR */
259*4882a593Smuzhiyun 	d_last = VHUB_EP_DMA_RPTR(stat);
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	/* Grab current request if any */
262*4882a593Smuzhiyun 	req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	EPVDBG(ep, "ACK status=%08x is_in=%d ep->d_last=%d..%d\n",
265*4882a593Smuzhiyun 	       stat, ep->epn.is_in, ep->epn.d_last, d_last);
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	/* Check all completed descriptors */
268*4882a593Smuzhiyun 	while (ep->epn.d_last != d_last) {
269*4882a593Smuzhiyun 		struct ast_vhub_desc *desc;
270*4882a593Smuzhiyun 		unsigned int d_num;
271*4882a593Smuzhiyun 		bool is_last_desc;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 		/* Grab next completed descriptor */
274*4882a593Smuzhiyun 		d_num = ep->epn.d_last;
275*4882a593Smuzhiyun 		desc = &ep->epn.descs[d_num];
276*4882a593Smuzhiyun 		ep->epn.d_last = (d_num + 1) & (AST_VHUB_DESCS_COUNT - 1);
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 		/* Grab len out of descriptor */
279*4882a593Smuzhiyun 		len = VHUB_DSC1_IN_LEN(le32_to_cpu(desc->w1));
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 		EPVDBG(ep, " desc %d len=%d req=%p (act=%d)\n",
282*4882a593Smuzhiyun 		       d_num, len, req, req ? req->active : 0);
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 		/* If no active request pending, move on */
285*4882a593Smuzhiyun 		if (!req || !req->active)
286*4882a593Smuzhiyun 			continue;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 		/* Adjust size */
289*4882a593Smuzhiyun 		req->req.actual += len;
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 		/* Is that the last chunk ? */
292*4882a593Smuzhiyun 		is_last_desc = req->last_desc == d_num;
293*4882a593Smuzhiyun 		CHECK(ep, is_last_desc == (len < ep->ep.maxpacket ||
294*4882a593Smuzhiyun 					   (req->req.actual >= req->req.length &&
295*4882a593Smuzhiyun 					    !req->req.zero)),
296*4882a593Smuzhiyun 		      "Last packet discrepancy: last_desc=%d len=%d r.act=%d "
297*4882a593Smuzhiyun 		      "r.len=%d r.zero=%d mp=%d\n",
298*4882a593Smuzhiyun 		      is_last_desc, len, req->req.actual, req->req.length,
299*4882a593Smuzhiyun 		      req->req.zero, ep->ep.maxpacket);
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 		if (is_last_desc) {
302*4882a593Smuzhiyun 			/*
303*4882a593Smuzhiyun 			 * Because we can only have one request at a time
304*4882a593Smuzhiyun 			 * in our descriptor list in this implementation,
305*4882a593Smuzhiyun 			 * d_last and ep->d_last should now be equal
306*4882a593Smuzhiyun 			 */
307*4882a593Smuzhiyun 			CHECK(ep, d_last == ep->epn.d_last,
308*4882a593Smuzhiyun 			      "DMA read ptr mismatch %d vs %d\n",
309*4882a593Smuzhiyun 			      d_last, ep->epn.d_last);
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 			/* Note: done will drop and re-acquire the lock */
312*4882a593Smuzhiyun 			ast_vhub_done(ep, req, 0);
313*4882a593Smuzhiyun 			req = list_first_entry_or_null(&ep->queue,
314*4882a593Smuzhiyun 						       struct ast_vhub_req,
315*4882a593Smuzhiyun 						       queue);
316*4882a593Smuzhiyun 			break;
317*4882a593Smuzhiyun 		}
318*4882a593Smuzhiyun 	}
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	/* More work ? */
321*4882a593Smuzhiyun 	if (req)
322*4882a593Smuzhiyun 		ast_vhub_epn_kick_desc(ep, req);
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun 
ast_vhub_epn_ack_irq(struct ast_vhub_ep * ep)325*4882a593Smuzhiyun void ast_vhub_epn_ack_irq(struct ast_vhub_ep *ep)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun 	if (ep->epn.desc_mode)
328*4882a593Smuzhiyun 		ast_vhub_epn_handle_ack_desc(ep);
329*4882a593Smuzhiyun 	else
330*4882a593Smuzhiyun 		ast_vhub_epn_handle_ack(ep);
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun 
ast_vhub_epn_queue(struct usb_ep * u_ep,struct usb_request * u_req,gfp_t gfp_flags)333*4882a593Smuzhiyun static int ast_vhub_epn_queue(struct usb_ep* u_ep, struct usb_request *u_req,
334*4882a593Smuzhiyun 			      gfp_t gfp_flags)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun 	struct ast_vhub_req *req = to_ast_req(u_req);
337*4882a593Smuzhiyun 	struct ast_vhub_ep *ep = to_ast_ep(u_ep);
338*4882a593Smuzhiyun 	struct ast_vhub *vhub = ep->vhub;
339*4882a593Smuzhiyun 	unsigned long flags;
340*4882a593Smuzhiyun 	bool empty;
341*4882a593Smuzhiyun 	int rc;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	/* Paranoid checks */
344*4882a593Smuzhiyun 	if (!u_req || !u_req->complete || !u_req->buf) {
345*4882a593Smuzhiyun 		dev_warn(&vhub->pdev->dev, "Bogus EPn request ! u_req=%p\n", u_req);
346*4882a593Smuzhiyun 		if (u_req) {
347*4882a593Smuzhiyun 			dev_warn(&vhub->pdev->dev, "complete=%p internal=%d\n",
348*4882a593Smuzhiyun 				 u_req->complete, req->internal);
349*4882a593Smuzhiyun 		}
350*4882a593Smuzhiyun 		return -EINVAL;
351*4882a593Smuzhiyun 	}
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	/* Endpoint enabled ? */
354*4882a593Smuzhiyun 	if (!ep->epn.enabled || !u_ep->desc || !ep->dev || !ep->d_idx ||
355*4882a593Smuzhiyun 	    !ep->dev->enabled) {
356*4882a593Smuzhiyun 		EPDBG(ep, "Enqueuing request on wrong or disabled EP\n");
357*4882a593Smuzhiyun 		return -ESHUTDOWN;
358*4882a593Smuzhiyun 	}
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	/* Map request for DMA if possible. For now, the rule for DMA is
361*4882a593Smuzhiyun 	 * that:
362*4882a593Smuzhiyun 	 *
363*4882a593Smuzhiyun 	 *  * For single stage mode (no descriptors):
364*4882a593Smuzhiyun 	 *
365*4882a593Smuzhiyun 	 *   - The buffer is aligned to a 8 bytes boundary (HW requirement)
366*4882a593Smuzhiyun 	 *   - For a OUT endpoint, the request size is a multiple of the EP
367*4882a593Smuzhiyun 	 *     packet size (otherwise the controller will DMA past the end
368*4882a593Smuzhiyun 	 *     of the buffer if the host is sending a too long packet).
369*4882a593Smuzhiyun 	 *
370*4882a593Smuzhiyun 	 *  * For descriptor mode (tx only for now), always.
371*4882a593Smuzhiyun 	 *
372*4882a593Smuzhiyun 	 * We could relax the latter by making the decision to use the bounce
373*4882a593Smuzhiyun 	 * buffer based on the size of a given *segment* of the request rather
374*4882a593Smuzhiyun 	 * than the whole request.
375*4882a593Smuzhiyun 	 */
376*4882a593Smuzhiyun 	if (ep->epn.desc_mode ||
377*4882a593Smuzhiyun 	    ((((unsigned long)u_req->buf & 7) == 0) &&
378*4882a593Smuzhiyun 	     (ep->epn.is_in || !(u_req->length & (u_ep->maxpacket - 1))))) {
379*4882a593Smuzhiyun 		rc = usb_gadget_map_request_by_dev(&vhub->pdev->dev, u_req,
380*4882a593Smuzhiyun 					    ep->epn.is_in);
381*4882a593Smuzhiyun 		if (rc) {
382*4882a593Smuzhiyun 			dev_warn(&vhub->pdev->dev,
383*4882a593Smuzhiyun 				 "Request mapping failure %d\n", rc);
384*4882a593Smuzhiyun 			return rc;
385*4882a593Smuzhiyun 		}
386*4882a593Smuzhiyun 	} else
387*4882a593Smuzhiyun 		u_req->dma = 0;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	EPVDBG(ep, "enqueue req @%p\n", req);
390*4882a593Smuzhiyun 	EPVDBG(ep, " l=%d dma=0x%x zero=%d noshort=%d noirq=%d is_in=%d\n",
391*4882a593Smuzhiyun 	       u_req->length, (u32)u_req->dma, u_req->zero,
392*4882a593Smuzhiyun 	       u_req->short_not_ok, u_req->no_interrupt,
393*4882a593Smuzhiyun 	       ep->epn.is_in);
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	/* Initialize request progress fields */
396*4882a593Smuzhiyun 	u_req->status = -EINPROGRESS;
397*4882a593Smuzhiyun 	u_req->actual = 0;
398*4882a593Smuzhiyun 	req->act_count = 0;
399*4882a593Smuzhiyun 	req->active = false;
400*4882a593Smuzhiyun 	req->last_desc = -1;
401*4882a593Smuzhiyun 	spin_lock_irqsave(&vhub->lock, flags);
402*4882a593Smuzhiyun 	empty = list_empty(&ep->queue);
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	/* Add request to list and kick processing if empty */
405*4882a593Smuzhiyun 	list_add_tail(&req->queue, &ep->queue);
406*4882a593Smuzhiyun 	if (empty) {
407*4882a593Smuzhiyun 		if (ep->epn.desc_mode)
408*4882a593Smuzhiyun 			ast_vhub_epn_kick_desc(ep, req);
409*4882a593Smuzhiyun 		else
410*4882a593Smuzhiyun 			ast_vhub_epn_kick(ep, req);
411*4882a593Smuzhiyun 	}
412*4882a593Smuzhiyun 	spin_unlock_irqrestore(&vhub->lock, flags);
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	return 0;
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun 
ast_vhub_stop_active_req(struct ast_vhub_ep * ep,bool restart_ep)417*4882a593Smuzhiyun static void ast_vhub_stop_active_req(struct ast_vhub_ep *ep,
418*4882a593Smuzhiyun 				     bool restart_ep)
419*4882a593Smuzhiyun {
420*4882a593Smuzhiyun 	u32 state, reg, loops;
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	/* Stop DMA activity */
423*4882a593Smuzhiyun 	if (ep->epn.desc_mode)
424*4882a593Smuzhiyun 		writel(VHUB_EP_DMA_CTRL_RESET, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
425*4882a593Smuzhiyun 	else
426*4882a593Smuzhiyun 		writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	/* Wait for it to complete */
429*4882a593Smuzhiyun 	for (loops = 0; loops < 1000; loops++) {
430*4882a593Smuzhiyun 		state = readl(ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
431*4882a593Smuzhiyun 		state = VHUB_EP_DMA_PROC_STATUS(state);
432*4882a593Smuzhiyun 		if (state == EP_DMA_PROC_RX_IDLE ||
433*4882a593Smuzhiyun 		    state == EP_DMA_PROC_TX_IDLE)
434*4882a593Smuzhiyun 			break;
435*4882a593Smuzhiyun 		udelay(1);
436*4882a593Smuzhiyun 	}
437*4882a593Smuzhiyun 	if (loops >= 1000)
438*4882a593Smuzhiyun 		dev_warn(&ep->vhub->pdev->dev, "Timeout waiting for DMA\n");
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	/* If we don't have to restart the endpoint, that's it */
441*4882a593Smuzhiyun 	if (!restart_ep)
442*4882a593Smuzhiyun 		return;
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	/* Restart the endpoint */
445*4882a593Smuzhiyun 	if (ep->epn.desc_mode) {
446*4882a593Smuzhiyun 		/*
447*4882a593Smuzhiyun 		 * Take out descriptors by resetting the DMA read
448*4882a593Smuzhiyun 		 * pointer to be equal to the CPU write pointer.
449*4882a593Smuzhiyun 		 *
450*4882a593Smuzhiyun 		 * Note: If we ever support creating descriptors for
451*4882a593Smuzhiyun 		 * requests that aren't the head of the queue, we
452*4882a593Smuzhiyun 		 * may have to do something more complex here,
453*4882a593Smuzhiyun 		 * especially if the request being taken out is
454*4882a593Smuzhiyun 		 * not the current head descriptors.
455*4882a593Smuzhiyun 		 */
456*4882a593Smuzhiyun 		reg = VHUB_EP_DMA_SET_RPTR(ep->epn.d_next) |
457*4882a593Smuzhiyun 			VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next);
458*4882a593Smuzhiyun 		writel(reg, ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 		/* Then turn it back on */
461*4882a593Smuzhiyun 		writel(ep->epn.dma_conf,
462*4882a593Smuzhiyun 		       ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
463*4882a593Smuzhiyun 	} else {
464*4882a593Smuzhiyun 		/* Single mode: just turn it back on */
465*4882a593Smuzhiyun 		writel(ep->epn.dma_conf,
466*4882a593Smuzhiyun 		       ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
467*4882a593Smuzhiyun 	}
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun 
ast_vhub_epn_dequeue(struct usb_ep * u_ep,struct usb_request * u_req)470*4882a593Smuzhiyun static int ast_vhub_epn_dequeue(struct usb_ep* u_ep, struct usb_request *u_req)
471*4882a593Smuzhiyun {
472*4882a593Smuzhiyun 	struct ast_vhub_ep *ep = to_ast_ep(u_ep);
473*4882a593Smuzhiyun 	struct ast_vhub *vhub = ep->vhub;
474*4882a593Smuzhiyun 	struct ast_vhub_req *req;
475*4882a593Smuzhiyun 	unsigned long flags;
476*4882a593Smuzhiyun 	int rc = -EINVAL;
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	spin_lock_irqsave(&vhub->lock, flags);
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	/* Make sure it's actually queued on this endpoint */
481*4882a593Smuzhiyun 	list_for_each_entry (req, &ep->queue, queue) {
482*4882a593Smuzhiyun 		if (&req->req == u_req)
483*4882a593Smuzhiyun 			break;
484*4882a593Smuzhiyun 	}
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	if (&req->req == u_req) {
487*4882a593Smuzhiyun 		EPVDBG(ep, "dequeue req @%p active=%d\n",
488*4882a593Smuzhiyun 		       req, req->active);
489*4882a593Smuzhiyun 		if (req->active)
490*4882a593Smuzhiyun 			ast_vhub_stop_active_req(ep, true);
491*4882a593Smuzhiyun 		ast_vhub_done(ep, req, -ECONNRESET);
492*4882a593Smuzhiyun 		rc = 0;
493*4882a593Smuzhiyun 	}
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	spin_unlock_irqrestore(&vhub->lock, flags);
496*4882a593Smuzhiyun 	return rc;
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun 
ast_vhub_update_epn_stall(struct ast_vhub_ep * ep)499*4882a593Smuzhiyun void ast_vhub_update_epn_stall(struct ast_vhub_ep *ep)
500*4882a593Smuzhiyun {
501*4882a593Smuzhiyun 	u32 reg;
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	if (WARN_ON(ep->d_idx == 0))
504*4882a593Smuzhiyun 		return;
505*4882a593Smuzhiyun 	reg = readl(ep->epn.regs + AST_VHUB_EP_CONFIG);
506*4882a593Smuzhiyun 	if (ep->epn.stalled || ep->epn.wedged)
507*4882a593Smuzhiyun 		reg |= VHUB_EP_CFG_STALL_CTRL;
508*4882a593Smuzhiyun 	else
509*4882a593Smuzhiyun 		reg &= ~VHUB_EP_CFG_STALL_CTRL;
510*4882a593Smuzhiyun 	writel(reg, ep->epn.regs + AST_VHUB_EP_CONFIG);
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	if (!ep->epn.stalled && !ep->epn.wedged)
513*4882a593Smuzhiyun 		writel(VHUB_EP_TOGGLE_SET_EPNUM(ep->epn.g_idx),
514*4882a593Smuzhiyun 		       ep->vhub->regs + AST_VHUB_EP_TOGGLE);
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun 
ast_vhub_set_halt_and_wedge(struct usb_ep * u_ep,bool halt,bool wedge)517*4882a593Smuzhiyun static int ast_vhub_set_halt_and_wedge(struct usb_ep* u_ep, bool halt,
518*4882a593Smuzhiyun 				      bool wedge)
519*4882a593Smuzhiyun {
520*4882a593Smuzhiyun 	struct ast_vhub_ep *ep = to_ast_ep(u_ep);
521*4882a593Smuzhiyun 	struct ast_vhub *vhub = ep->vhub;
522*4882a593Smuzhiyun 	unsigned long flags;
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	EPDBG(ep, "Set halt (%d) & wedge (%d)\n", halt, wedge);
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	if (!u_ep || !u_ep->desc)
527*4882a593Smuzhiyun 		return -EINVAL;
528*4882a593Smuzhiyun 	if (ep->d_idx == 0)
529*4882a593Smuzhiyun 		return 0;
530*4882a593Smuzhiyun 	if (ep->epn.is_iso)
531*4882a593Smuzhiyun 		return -EOPNOTSUPP;
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	spin_lock_irqsave(&vhub->lock, flags);
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	/* Fail with still-busy IN endpoints */
536*4882a593Smuzhiyun 	if (halt && ep->epn.is_in && !list_empty(&ep->queue)) {
537*4882a593Smuzhiyun 		spin_unlock_irqrestore(&vhub->lock, flags);
538*4882a593Smuzhiyun 		return -EAGAIN;
539*4882a593Smuzhiyun 	}
540*4882a593Smuzhiyun 	ep->epn.stalled = halt;
541*4882a593Smuzhiyun 	ep->epn.wedged = wedge;
542*4882a593Smuzhiyun 	ast_vhub_update_epn_stall(ep);
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	spin_unlock_irqrestore(&vhub->lock, flags);
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	return 0;
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun 
ast_vhub_epn_set_halt(struct usb_ep * u_ep,int value)549*4882a593Smuzhiyun static int ast_vhub_epn_set_halt(struct usb_ep *u_ep, int value)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun 	return ast_vhub_set_halt_and_wedge(u_ep, value != 0, false);
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun 
ast_vhub_epn_set_wedge(struct usb_ep * u_ep)554*4882a593Smuzhiyun static int ast_vhub_epn_set_wedge(struct usb_ep *u_ep)
555*4882a593Smuzhiyun {
556*4882a593Smuzhiyun 	return ast_vhub_set_halt_and_wedge(u_ep, true, true);
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun 
ast_vhub_epn_disable(struct usb_ep * u_ep)559*4882a593Smuzhiyun static int ast_vhub_epn_disable(struct usb_ep* u_ep)
560*4882a593Smuzhiyun {
561*4882a593Smuzhiyun 	struct ast_vhub_ep *ep = to_ast_ep(u_ep);
562*4882a593Smuzhiyun 	struct ast_vhub *vhub = ep->vhub;
563*4882a593Smuzhiyun 	unsigned long flags;
564*4882a593Smuzhiyun 	u32 imask, ep_ier;
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	EPDBG(ep, "Disabling !\n");
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	spin_lock_irqsave(&vhub->lock, flags);
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	ep->epn.enabled = false;
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	/* Stop active DMA if any */
573*4882a593Smuzhiyun 	ast_vhub_stop_active_req(ep, false);
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	/* Disable endpoint */
576*4882a593Smuzhiyun 	writel(0, ep->epn.regs + AST_VHUB_EP_CONFIG);
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	/* Disable ACK interrupt */
579*4882a593Smuzhiyun 	imask = VHUB_EP_IRQ(ep->epn.g_idx);
580*4882a593Smuzhiyun 	ep_ier = readl(vhub->regs + AST_VHUB_EP_ACK_IER);
581*4882a593Smuzhiyun 	ep_ier &= ~imask;
582*4882a593Smuzhiyun 	writel(ep_ier, vhub->regs + AST_VHUB_EP_ACK_IER);
583*4882a593Smuzhiyun 	writel(imask, vhub->regs + AST_VHUB_EP_ACK_ISR);
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 	/* Nuke all pending requests */
586*4882a593Smuzhiyun 	ast_vhub_nuke(ep, -ESHUTDOWN);
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	/* No more descriptor associated with request */
589*4882a593Smuzhiyun 	ep->ep.desc = NULL;
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	spin_unlock_irqrestore(&vhub->lock, flags);
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	return 0;
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun 
ast_vhub_epn_enable(struct usb_ep * u_ep,const struct usb_endpoint_descriptor * desc)596*4882a593Smuzhiyun static int ast_vhub_epn_enable(struct usb_ep* u_ep,
597*4882a593Smuzhiyun 			       const struct usb_endpoint_descriptor *desc)
598*4882a593Smuzhiyun {
599*4882a593Smuzhiyun 	struct ast_vhub_ep *ep = to_ast_ep(u_ep);
600*4882a593Smuzhiyun 	struct ast_vhub_dev *dev;
601*4882a593Smuzhiyun 	struct ast_vhub *vhub;
602*4882a593Smuzhiyun 	u16 maxpacket, type;
603*4882a593Smuzhiyun 	unsigned long flags;
604*4882a593Smuzhiyun 	u32 ep_conf, ep_ier, imask;
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun 	/* Check arguments */
607*4882a593Smuzhiyun 	if (!u_ep || !desc)
608*4882a593Smuzhiyun 		return -EINVAL;
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	maxpacket = usb_endpoint_maxp(desc);
611*4882a593Smuzhiyun 	if (!ep->d_idx || !ep->dev ||
612*4882a593Smuzhiyun 	    desc->bDescriptorType != USB_DT_ENDPOINT ||
613*4882a593Smuzhiyun 	    maxpacket == 0 || maxpacket > ep->ep.maxpacket) {
614*4882a593Smuzhiyun 		EPDBG(ep, "Invalid EP enable,d_idx=%d,dev=%p,type=%d,mp=%d/%d\n",
615*4882a593Smuzhiyun 		      ep->d_idx, ep->dev, desc->bDescriptorType,
616*4882a593Smuzhiyun 		      maxpacket, ep->ep.maxpacket);
617*4882a593Smuzhiyun 		return -EINVAL;
618*4882a593Smuzhiyun 	}
619*4882a593Smuzhiyun 	if (ep->d_idx != usb_endpoint_num(desc)) {
620*4882a593Smuzhiyun 		EPDBG(ep, "EP number mismatch !\n");
621*4882a593Smuzhiyun 		return -EINVAL;
622*4882a593Smuzhiyun 	}
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	if (ep->epn.enabled) {
625*4882a593Smuzhiyun 		EPDBG(ep, "Already enabled\n");
626*4882a593Smuzhiyun 		return -EBUSY;
627*4882a593Smuzhiyun 	}
628*4882a593Smuzhiyun 	dev = ep->dev;
629*4882a593Smuzhiyun 	vhub = ep->vhub;
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 	/* Check device state */
632*4882a593Smuzhiyun 	if (!dev->driver) {
633*4882a593Smuzhiyun 		EPDBG(ep, "Bogus device state: driver=%p speed=%d\n",
634*4882a593Smuzhiyun 		       dev->driver, dev->gadget.speed);
635*4882a593Smuzhiyun 		return -ESHUTDOWN;
636*4882a593Smuzhiyun 	}
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 	/* Grab some info from the descriptor */
639*4882a593Smuzhiyun 	ep->epn.is_in = usb_endpoint_dir_in(desc);
640*4882a593Smuzhiyun 	ep->ep.maxpacket = maxpacket;
641*4882a593Smuzhiyun 	type = usb_endpoint_type(desc);
642*4882a593Smuzhiyun 	ep->epn.d_next = ep->epn.d_last = 0;
643*4882a593Smuzhiyun 	ep->epn.is_iso = false;
644*4882a593Smuzhiyun 	ep->epn.stalled = false;
645*4882a593Smuzhiyun 	ep->epn.wedged = false;
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	EPDBG(ep, "Enabling [%s] %s num %d maxpacket=%d\n",
648*4882a593Smuzhiyun 	      ep->epn.is_in ? "in" : "out", usb_ep_type_string(type),
649*4882a593Smuzhiyun 	      usb_endpoint_num(desc), maxpacket);
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	/* Can we use DMA descriptor mode ? */
652*4882a593Smuzhiyun 	ep->epn.desc_mode = ep->epn.descs && ep->epn.is_in;
653*4882a593Smuzhiyun 	if (ep->epn.desc_mode)
654*4882a593Smuzhiyun 		memset(ep->epn.descs, 0, 8 * AST_VHUB_DESCS_COUNT);
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 	/*
657*4882a593Smuzhiyun 	 * Large send function can send up to 8 packets from
658*4882a593Smuzhiyun 	 * one descriptor with a limit of 4095 bytes.
659*4882a593Smuzhiyun 	 */
660*4882a593Smuzhiyun 	ep->epn.chunk_max = ep->ep.maxpacket;
661*4882a593Smuzhiyun 	if (ep->epn.is_in) {
662*4882a593Smuzhiyun 		ep->epn.chunk_max <<= 3;
663*4882a593Smuzhiyun 		while (ep->epn.chunk_max > 4095)
664*4882a593Smuzhiyun 			ep->epn.chunk_max -= ep->ep.maxpacket;
665*4882a593Smuzhiyun 	}
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	switch(type) {
668*4882a593Smuzhiyun 	case USB_ENDPOINT_XFER_CONTROL:
669*4882a593Smuzhiyun 		EPDBG(ep, "Only one control endpoint\n");
670*4882a593Smuzhiyun 		return -EINVAL;
671*4882a593Smuzhiyun 	case USB_ENDPOINT_XFER_INT:
672*4882a593Smuzhiyun 		ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_INT);
673*4882a593Smuzhiyun 		break;
674*4882a593Smuzhiyun 	case USB_ENDPOINT_XFER_BULK:
675*4882a593Smuzhiyun 		ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_BULK);
676*4882a593Smuzhiyun 		break;
677*4882a593Smuzhiyun 	case USB_ENDPOINT_XFER_ISOC:
678*4882a593Smuzhiyun 		ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_ISO);
679*4882a593Smuzhiyun 		ep->epn.is_iso = true;
680*4882a593Smuzhiyun 		break;
681*4882a593Smuzhiyun 	default:
682*4882a593Smuzhiyun 		return -EINVAL;
683*4882a593Smuzhiyun 	}
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	/* Encode the rest of the EP config register */
686*4882a593Smuzhiyun 	if (maxpacket < 1024)
687*4882a593Smuzhiyun 		ep_conf |= VHUB_EP_CFG_SET_MAX_PKT(maxpacket);
688*4882a593Smuzhiyun 	if (!ep->epn.is_in)
689*4882a593Smuzhiyun 		ep_conf |= VHUB_EP_CFG_DIR_OUT;
690*4882a593Smuzhiyun 	ep_conf |= VHUB_EP_CFG_SET_EP_NUM(usb_endpoint_num(desc));
691*4882a593Smuzhiyun 	ep_conf |= VHUB_EP_CFG_ENABLE;
692*4882a593Smuzhiyun 	ep_conf |= VHUB_EP_CFG_SET_DEV(dev->index + 1);
693*4882a593Smuzhiyun 	EPVDBG(ep, "config=%08x\n", ep_conf);
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun 	spin_lock_irqsave(&vhub->lock, flags);
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 	/* Disable HW and reset DMA */
698*4882a593Smuzhiyun 	writel(0, ep->epn.regs + AST_VHUB_EP_CONFIG);
699*4882a593Smuzhiyun 	writel(VHUB_EP_DMA_CTRL_RESET,
700*4882a593Smuzhiyun 	       ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun 	/* Configure and enable */
703*4882a593Smuzhiyun 	writel(ep_conf, ep->epn.regs + AST_VHUB_EP_CONFIG);
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun 	if (ep->epn.desc_mode) {
706*4882a593Smuzhiyun 		/* Clear DMA status, including the DMA read ptr */
707*4882a593Smuzhiyun 		writel(0, ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 		/* Set descriptor base */
710*4882a593Smuzhiyun 		writel(ep->epn.descs_dma,
711*4882a593Smuzhiyun 		       ep->epn.regs + AST_VHUB_EP_DESC_BASE);
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 		/* Set base DMA config value */
714*4882a593Smuzhiyun 		ep->epn.dma_conf = VHUB_EP_DMA_DESC_MODE;
715*4882a593Smuzhiyun 		if (ep->epn.is_in)
716*4882a593Smuzhiyun 			ep->epn.dma_conf |= VHUB_EP_DMA_IN_LONG_MODE;
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 		/* First reset and disable all operations */
719*4882a593Smuzhiyun 		writel(ep->epn.dma_conf | VHUB_EP_DMA_CTRL_RESET,
720*4882a593Smuzhiyun 		       ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 		/* Enable descriptor mode */
723*4882a593Smuzhiyun 		writel(ep->epn.dma_conf,
724*4882a593Smuzhiyun 		       ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
725*4882a593Smuzhiyun 	} else {
726*4882a593Smuzhiyun 		/* Set base DMA config value */
727*4882a593Smuzhiyun 		ep->epn.dma_conf = VHUB_EP_DMA_SINGLE_STAGE;
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 		/* Reset and switch to single stage mode */
730*4882a593Smuzhiyun 		writel(ep->epn.dma_conf | VHUB_EP_DMA_CTRL_RESET,
731*4882a593Smuzhiyun 		       ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
732*4882a593Smuzhiyun 		writel(ep->epn.dma_conf,
733*4882a593Smuzhiyun 		       ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
734*4882a593Smuzhiyun 		writel(0, ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
735*4882a593Smuzhiyun 	}
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	/* Cleanup data toggle just in case */
738*4882a593Smuzhiyun 	writel(VHUB_EP_TOGGLE_SET_EPNUM(ep->epn.g_idx),
739*4882a593Smuzhiyun 	       vhub->regs + AST_VHUB_EP_TOGGLE);
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun 	/* Cleanup and enable ACK interrupt */
742*4882a593Smuzhiyun 	imask = VHUB_EP_IRQ(ep->epn.g_idx);
743*4882a593Smuzhiyun 	writel(imask, vhub->regs + AST_VHUB_EP_ACK_ISR);
744*4882a593Smuzhiyun 	ep_ier = readl(vhub->regs + AST_VHUB_EP_ACK_IER);
745*4882a593Smuzhiyun 	ep_ier |= imask;
746*4882a593Smuzhiyun 	writel(ep_ier, vhub->regs + AST_VHUB_EP_ACK_IER);
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 	/* Woot, we are online ! */
749*4882a593Smuzhiyun 	ep->epn.enabled = true;
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	spin_unlock_irqrestore(&vhub->lock, flags);
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 	return 0;
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun 
ast_vhub_epn_dispose(struct usb_ep * u_ep)756*4882a593Smuzhiyun static void ast_vhub_epn_dispose(struct usb_ep *u_ep)
757*4882a593Smuzhiyun {
758*4882a593Smuzhiyun 	struct ast_vhub_ep *ep = to_ast_ep(u_ep);
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 	if (WARN_ON(!ep->dev || !ep->d_idx))
761*4882a593Smuzhiyun 		return;
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun 	EPDBG(ep, "Releasing endpoint\n");
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun 	/* Take it out of the EP list */
766*4882a593Smuzhiyun 	list_del_init(&ep->ep.ep_list);
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 	/* Mark the address free in the device */
769*4882a593Smuzhiyun 	ep->dev->epns[ep->d_idx - 1] = NULL;
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	/* Free name & DMA buffers */
772*4882a593Smuzhiyun 	kfree(ep->ep.name);
773*4882a593Smuzhiyun 	ep->ep.name = NULL;
774*4882a593Smuzhiyun 	dma_free_coherent(&ep->vhub->pdev->dev,
775*4882a593Smuzhiyun 			  AST_VHUB_EPn_MAX_PACKET +
776*4882a593Smuzhiyun 			  8 * AST_VHUB_DESCS_COUNT,
777*4882a593Smuzhiyun 			  ep->buf, ep->buf_dma);
778*4882a593Smuzhiyun 	ep->buf = NULL;
779*4882a593Smuzhiyun 	ep->epn.descs = NULL;
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 	/* Mark free */
782*4882a593Smuzhiyun 	ep->dev = NULL;
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun static const struct usb_ep_ops ast_vhub_epn_ops = {
786*4882a593Smuzhiyun 	.enable		= ast_vhub_epn_enable,
787*4882a593Smuzhiyun 	.disable	= ast_vhub_epn_disable,
788*4882a593Smuzhiyun 	.dispose	= ast_vhub_epn_dispose,
789*4882a593Smuzhiyun 	.queue		= ast_vhub_epn_queue,
790*4882a593Smuzhiyun 	.dequeue	= ast_vhub_epn_dequeue,
791*4882a593Smuzhiyun 	.set_halt	= ast_vhub_epn_set_halt,
792*4882a593Smuzhiyun 	.set_wedge	= ast_vhub_epn_set_wedge,
793*4882a593Smuzhiyun 	.alloc_request	= ast_vhub_alloc_request,
794*4882a593Smuzhiyun 	.free_request	= ast_vhub_free_request,
795*4882a593Smuzhiyun };
796*4882a593Smuzhiyun 
ast_vhub_alloc_epn(struct ast_vhub_dev * d,u8 addr)797*4882a593Smuzhiyun struct ast_vhub_ep *ast_vhub_alloc_epn(struct ast_vhub_dev *d, u8 addr)
798*4882a593Smuzhiyun {
799*4882a593Smuzhiyun 	struct ast_vhub *vhub = d->vhub;
800*4882a593Smuzhiyun 	struct ast_vhub_ep *ep;
801*4882a593Smuzhiyun 	unsigned long flags;
802*4882a593Smuzhiyun 	int i;
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun 	/* Find a free one (no device) */
805*4882a593Smuzhiyun 	spin_lock_irqsave(&vhub->lock, flags);
806*4882a593Smuzhiyun 	for (i = 0; i < vhub->max_epns; i++)
807*4882a593Smuzhiyun 		if (vhub->epns[i].dev == NULL)
808*4882a593Smuzhiyun 			break;
809*4882a593Smuzhiyun 	if (i >= vhub->max_epns) {
810*4882a593Smuzhiyun 		spin_unlock_irqrestore(&vhub->lock, flags);
811*4882a593Smuzhiyun 		return NULL;
812*4882a593Smuzhiyun 	}
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 	/* Set it up */
815*4882a593Smuzhiyun 	ep = &vhub->epns[i];
816*4882a593Smuzhiyun 	ep->dev = d;
817*4882a593Smuzhiyun 	spin_unlock_irqrestore(&vhub->lock, flags);
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 	DDBG(d, "Allocating gen EP %d for addr %d\n", i, addr);
820*4882a593Smuzhiyun 	INIT_LIST_HEAD(&ep->queue);
821*4882a593Smuzhiyun 	ep->d_idx = addr;
822*4882a593Smuzhiyun 	ep->vhub = vhub;
823*4882a593Smuzhiyun 	ep->ep.ops = &ast_vhub_epn_ops;
824*4882a593Smuzhiyun 	ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", addr);
825*4882a593Smuzhiyun 	d->epns[addr-1] = ep;
826*4882a593Smuzhiyun 	ep->epn.g_idx = i;
827*4882a593Smuzhiyun 	ep->epn.regs = vhub->regs + 0x200 + (i * 0x10);
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 	ep->buf = dma_alloc_coherent(&vhub->pdev->dev,
830*4882a593Smuzhiyun 				     AST_VHUB_EPn_MAX_PACKET +
831*4882a593Smuzhiyun 				     8 * AST_VHUB_DESCS_COUNT,
832*4882a593Smuzhiyun 				     &ep->buf_dma, GFP_KERNEL);
833*4882a593Smuzhiyun 	if (!ep->buf) {
834*4882a593Smuzhiyun 		kfree(ep->ep.name);
835*4882a593Smuzhiyun 		ep->ep.name = NULL;
836*4882a593Smuzhiyun 		return NULL;
837*4882a593Smuzhiyun 	}
838*4882a593Smuzhiyun 	ep->epn.descs = ep->buf + AST_VHUB_EPn_MAX_PACKET;
839*4882a593Smuzhiyun 	ep->epn.descs_dma = ep->buf_dma + AST_VHUB_EPn_MAX_PACKET;
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 	usb_ep_set_maxpacket_limit(&ep->ep, AST_VHUB_EPn_MAX_PACKET);
842*4882a593Smuzhiyun 	list_add_tail(&ep->ep.ep_list, &d->gadget.ep_list);
843*4882a593Smuzhiyun 	ep->ep.caps.type_iso = true;
844*4882a593Smuzhiyun 	ep->ep.caps.type_bulk = true;
845*4882a593Smuzhiyun 	ep->ep.caps.type_int = true;
846*4882a593Smuzhiyun 	ep->ep.caps.dir_in = true;
847*4882a593Smuzhiyun 	ep->ep.caps.dir_out = true;
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun 	return ep;
850*4882a593Smuzhiyun }
851