xref: /OK3568_Linux_fs/kernel/drivers/usb/gadget/udc/aspeed-vhub/ep0.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * ep0.c - Endpoint 0 handling
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Copyright 2017 IBM Corporation
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * This program is free software; you can redistribute it and/or modify
10*4882a593Smuzhiyun  * it under the terms of the GNU General Public License as published by
11*4882a593Smuzhiyun  * the Free Software Foundation; either version 2 of the License, or
12*4882a593Smuzhiyun  * (at your option) any later version.
13*4882a593Smuzhiyun  */
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <linux/kernel.h>
16*4882a593Smuzhiyun #include <linux/module.h>
17*4882a593Smuzhiyun #include <linux/platform_device.h>
18*4882a593Smuzhiyun #include <linux/delay.h>
19*4882a593Smuzhiyun #include <linux/ioport.h>
20*4882a593Smuzhiyun #include <linux/slab.h>
21*4882a593Smuzhiyun #include <linux/errno.h>
22*4882a593Smuzhiyun #include <linux/list.h>
23*4882a593Smuzhiyun #include <linux/interrupt.h>
24*4882a593Smuzhiyun #include <linux/proc_fs.h>
25*4882a593Smuzhiyun #include <linux/prefetch.h>
26*4882a593Smuzhiyun #include <linux/clk.h>
27*4882a593Smuzhiyun #include <linux/usb/gadget.h>
28*4882a593Smuzhiyun #include <linux/of.h>
29*4882a593Smuzhiyun #include <linux/of_gpio.h>
30*4882a593Smuzhiyun #include <linux/regmap.h>
31*4882a593Smuzhiyun #include <linux/dma-mapping.h>
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #include "vhub.h"
34*4882a593Smuzhiyun 
ast_vhub_reply(struct ast_vhub_ep * ep,char * ptr,int len)35*4882a593Smuzhiyun int ast_vhub_reply(struct ast_vhub_ep *ep, char *ptr, int len)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun 	struct usb_request *req = &ep->ep0.req.req;
38*4882a593Smuzhiyun 	int rc;
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	if (WARN_ON(ep->d_idx != 0))
41*4882a593Smuzhiyun 		return std_req_stall;
42*4882a593Smuzhiyun 	if (WARN_ON(!ep->ep0.dir_in))
43*4882a593Smuzhiyun 		return std_req_stall;
44*4882a593Smuzhiyun 	if (WARN_ON(len > AST_VHUB_EP0_MAX_PACKET))
45*4882a593Smuzhiyun 		return std_req_stall;
46*4882a593Smuzhiyun 	if (WARN_ON(req->status == -EINPROGRESS))
47*4882a593Smuzhiyun 		return std_req_stall;
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	req->buf = ptr;
50*4882a593Smuzhiyun 	req->length = len;
51*4882a593Smuzhiyun 	req->complete = NULL;
52*4882a593Smuzhiyun 	req->zero = true;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	/*
55*4882a593Smuzhiyun 	 * Call internal queue directly after dropping the lock. This is
56*4882a593Smuzhiyun 	 * safe to do as the reply is always the last thing done when
57*4882a593Smuzhiyun 	 * processing a SETUP packet, usually as a tail call
58*4882a593Smuzhiyun 	 */
59*4882a593Smuzhiyun 	spin_unlock(&ep->vhub->lock);
60*4882a593Smuzhiyun 	if (ep->ep.ops->queue(&ep->ep, req, GFP_ATOMIC))
61*4882a593Smuzhiyun 		rc = std_req_stall;
62*4882a593Smuzhiyun 	else
63*4882a593Smuzhiyun 		rc = std_req_data;
64*4882a593Smuzhiyun 	spin_lock(&ep->vhub->lock);
65*4882a593Smuzhiyun 	return rc;
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun 
__ast_vhub_simple_reply(struct ast_vhub_ep * ep,int len,...)68*4882a593Smuzhiyun int __ast_vhub_simple_reply(struct ast_vhub_ep *ep, int len, ...)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun 	u8 *buffer = ep->buf;
71*4882a593Smuzhiyun 	unsigned int i;
72*4882a593Smuzhiyun 	va_list args;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	va_start(args, len);
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	/* Copy data directly into EP buffer */
77*4882a593Smuzhiyun 	for (i = 0; i < len; i++)
78*4882a593Smuzhiyun 		buffer[i] = va_arg(args, int);
79*4882a593Smuzhiyun 	va_end(args);
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	/* req->buf NULL means data is already there */
82*4882a593Smuzhiyun 	return ast_vhub_reply(ep, NULL, len);
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun 
ast_vhub_ep0_handle_setup(struct ast_vhub_ep * ep)85*4882a593Smuzhiyun void ast_vhub_ep0_handle_setup(struct ast_vhub_ep *ep)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun 	struct usb_ctrlrequest crq;
88*4882a593Smuzhiyun 	enum std_req_rc std_req_rc;
89*4882a593Smuzhiyun 	int rc = -ENODEV;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	if (WARN_ON(ep->d_idx != 0))
92*4882a593Smuzhiyun 		return;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	/*
95*4882a593Smuzhiyun 	 * Grab the setup packet from the chip and byteswap
96*4882a593Smuzhiyun 	 * interesting fields
97*4882a593Smuzhiyun 	 */
98*4882a593Smuzhiyun 	memcpy_fromio(&crq, ep->ep0.setup, sizeof(crq));
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	EPDBG(ep, "SETUP packet %02x/%02x/%04x/%04x/%04x [%s] st=%d\n",
101*4882a593Smuzhiyun 	      crq.bRequestType, crq.bRequest,
102*4882a593Smuzhiyun 	       le16_to_cpu(crq.wValue),
103*4882a593Smuzhiyun 	       le16_to_cpu(crq.wIndex),
104*4882a593Smuzhiyun 	       le16_to_cpu(crq.wLength),
105*4882a593Smuzhiyun 	       (crq.bRequestType & USB_DIR_IN) ? "in" : "out",
106*4882a593Smuzhiyun 	       ep->ep0.state);
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	/*
109*4882a593Smuzhiyun 	 * Check our state, cancel pending requests if needed
110*4882a593Smuzhiyun 	 *
111*4882a593Smuzhiyun 	 * Note: Under some circumstances, we can get a new setup
112*4882a593Smuzhiyun 	 * packet while waiting for the stall ack, just accept it.
113*4882a593Smuzhiyun 	 *
114*4882a593Smuzhiyun 	 * In any case, a SETUP packet in wrong state should have
115*4882a593Smuzhiyun 	 * reset the HW state machine, so let's just log, nuke
116*4882a593Smuzhiyun 	 * requests, move on.
117*4882a593Smuzhiyun 	 */
118*4882a593Smuzhiyun 	if (ep->ep0.state != ep0_state_token &&
119*4882a593Smuzhiyun 	    ep->ep0.state != ep0_state_stall) {
120*4882a593Smuzhiyun 		EPDBG(ep, "wrong state\n");
121*4882a593Smuzhiyun 		ast_vhub_nuke(ep, -EIO);
122*4882a593Smuzhiyun 	}
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	/* Calculate next state for EP0 */
125*4882a593Smuzhiyun 	ep->ep0.state = ep0_state_data;
126*4882a593Smuzhiyun 	ep->ep0.dir_in = !!(crq.bRequestType & USB_DIR_IN);
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	/* If this is the vHub, we handle requests differently */
129*4882a593Smuzhiyun 	std_req_rc = std_req_driver;
130*4882a593Smuzhiyun 	if (ep->dev == NULL) {
131*4882a593Smuzhiyun 		if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
132*4882a593Smuzhiyun 			std_req_rc = ast_vhub_std_hub_request(ep, &crq);
133*4882a593Smuzhiyun 		else if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS)
134*4882a593Smuzhiyun 			std_req_rc = ast_vhub_class_hub_request(ep, &crq);
135*4882a593Smuzhiyun 		else
136*4882a593Smuzhiyun 			std_req_rc = std_req_stall;
137*4882a593Smuzhiyun 	} else if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
138*4882a593Smuzhiyun 		std_req_rc = ast_vhub_std_dev_request(ep, &crq);
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	/* Act upon result */
141*4882a593Smuzhiyun 	switch(std_req_rc) {
142*4882a593Smuzhiyun 	case std_req_complete:
143*4882a593Smuzhiyun 		goto complete;
144*4882a593Smuzhiyun 	case std_req_stall:
145*4882a593Smuzhiyun 		goto stall;
146*4882a593Smuzhiyun 	case std_req_driver:
147*4882a593Smuzhiyun 		break;
148*4882a593Smuzhiyun 	case std_req_data:
149*4882a593Smuzhiyun 		return;
150*4882a593Smuzhiyun 	}
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	/* Pass request up to the gadget driver */
153*4882a593Smuzhiyun 	if (WARN_ON(!ep->dev))
154*4882a593Smuzhiyun 		goto stall;
155*4882a593Smuzhiyun 	if (ep->dev->driver) {
156*4882a593Smuzhiyun 		EPDBG(ep, "forwarding to gadget...\n");
157*4882a593Smuzhiyun 		spin_unlock(&ep->vhub->lock);
158*4882a593Smuzhiyun 		rc = ep->dev->driver->setup(&ep->dev->gadget, &crq);
159*4882a593Smuzhiyun 		spin_lock(&ep->vhub->lock);
160*4882a593Smuzhiyun 		EPDBG(ep, "driver returned %d\n", rc);
161*4882a593Smuzhiyun 	} else {
162*4882a593Smuzhiyun 		EPDBG(ep, "no gadget for request !\n");
163*4882a593Smuzhiyun 	}
164*4882a593Smuzhiyun 	if (rc >= 0)
165*4882a593Smuzhiyun 		return;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun  stall:
168*4882a593Smuzhiyun 	EPDBG(ep, "stalling\n");
169*4882a593Smuzhiyun 	writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
170*4882a593Smuzhiyun 	ep->ep0.state = ep0_state_stall;
171*4882a593Smuzhiyun 	ep->ep0.dir_in = false;
172*4882a593Smuzhiyun 	return;
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun  complete:
175*4882a593Smuzhiyun 	EPVDBG(ep, "sending [in] status with no data\n");
176*4882a593Smuzhiyun 	writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
177*4882a593Smuzhiyun 	ep->ep0.state = ep0_state_status;
178*4882a593Smuzhiyun 	ep->ep0.dir_in = false;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 
ast_vhub_ep0_do_send(struct ast_vhub_ep * ep,struct ast_vhub_req * req)182*4882a593Smuzhiyun static void ast_vhub_ep0_do_send(struct ast_vhub_ep *ep,
183*4882a593Smuzhiyun 				 struct ast_vhub_req *req)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun 	unsigned int chunk;
186*4882a593Smuzhiyun 	u32 reg;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	/* If this is a 0-length request, it's the gadget trying to
189*4882a593Smuzhiyun 	 * send a status on our behalf. We take it from here.
190*4882a593Smuzhiyun 	 */
191*4882a593Smuzhiyun 	if (req->req.length == 0)
192*4882a593Smuzhiyun 		req->last_desc = 1;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	/* Are we done ? Complete request, otherwise wait for next interrupt */
195*4882a593Smuzhiyun 	if (req->last_desc >= 0) {
196*4882a593Smuzhiyun 		EPVDBG(ep, "complete send %d/%d\n",
197*4882a593Smuzhiyun 		       req->req.actual, req->req.length);
198*4882a593Smuzhiyun 		ep->ep0.state = ep0_state_status;
199*4882a593Smuzhiyun 		writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat);
200*4882a593Smuzhiyun 		ast_vhub_done(ep, req, 0);
201*4882a593Smuzhiyun 		return;
202*4882a593Smuzhiyun 	}
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	/*
205*4882a593Smuzhiyun 	 * Next chunk cropped to max packet size. Also check if this
206*4882a593Smuzhiyun 	 * is the last packet
207*4882a593Smuzhiyun 	 */
208*4882a593Smuzhiyun 	chunk = req->req.length - req->req.actual;
209*4882a593Smuzhiyun 	if (chunk > ep->ep.maxpacket)
210*4882a593Smuzhiyun 		chunk = ep->ep.maxpacket;
211*4882a593Smuzhiyun 	else if ((chunk < ep->ep.maxpacket) || !req->req.zero)
212*4882a593Smuzhiyun 		req->last_desc = 1;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	EPVDBG(ep, "send chunk=%d last=%d, req->act=%d mp=%d\n",
215*4882a593Smuzhiyun 	       chunk, req->last_desc, req->req.actual, ep->ep.maxpacket);
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	/*
218*4882a593Smuzhiyun 	 * Copy data if any (internal requests already have data
219*4882a593Smuzhiyun 	 * in the EP buffer)
220*4882a593Smuzhiyun 	 */
221*4882a593Smuzhiyun 	if (chunk && req->req.buf)
222*4882a593Smuzhiyun 		memcpy(ep->buf, req->req.buf + req->req.actual, chunk);
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	vhub_dma_workaround(ep->buf);
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	/* Remember chunk size and trigger send */
227*4882a593Smuzhiyun 	reg = VHUB_EP0_SET_TX_LEN(chunk);
228*4882a593Smuzhiyun 	writel(reg, ep->ep0.ctlstat);
229*4882a593Smuzhiyun 	writel(reg | VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
230*4882a593Smuzhiyun 	req->req.actual += chunk;
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun 
ast_vhub_ep0_rx_prime(struct ast_vhub_ep * ep)233*4882a593Smuzhiyun static void ast_vhub_ep0_rx_prime(struct ast_vhub_ep *ep)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun 	EPVDBG(ep, "rx prime\n");
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	/* Prime endpoint for receiving data */
238*4882a593Smuzhiyun 	writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat);
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun 
ast_vhub_ep0_do_receive(struct ast_vhub_ep * ep,struct ast_vhub_req * req,unsigned int len)241*4882a593Smuzhiyun static void ast_vhub_ep0_do_receive(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
242*4882a593Smuzhiyun 				    unsigned int len)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun 	unsigned int remain;
245*4882a593Smuzhiyun 	int rc = 0;
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	/* We are receiving... grab request */
248*4882a593Smuzhiyun 	remain = req->req.length - req->req.actual;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	EPVDBG(ep, "receive got=%d remain=%d\n", len, remain);
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	/* Are we getting more than asked ? */
253*4882a593Smuzhiyun 	if (len > remain) {
254*4882a593Smuzhiyun 		EPDBG(ep, "receiving too much (ovf: %d) !\n",
255*4882a593Smuzhiyun 		      len - remain);
256*4882a593Smuzhiyun 		len = remain;
257*4882a593Smuzhiyun 		rc = -EOVERFLOW;
258*4882a593Smuzhiyun 	}
259*4882a593Smuzhiyun 	if (len && req->req.buf)
260*4882a593Smuzhiyun 		memcpy(req->req.buf + req->req.actual, ep->buf, len);
261*4882a593Smuzhiyun 	req->req.actual += len;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	/* Done ? */
264*4882a593Smuzhiyun 	if (len < ep->ep.maxpacket || len == remain) {
265*4882a593Smuzhiyun 		ep->ep0.state = ep0_state_status;
266*4882a593Smuzhiyun 		writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
267*4882a593Smuzhiyun 		ast_vhub_done(ep, req, rc);
268*4882a593Smuzhiyun 	} else
269*4882a593Smuzhiyun 		ast_vhub_ep0_rx_prime(ep);
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun 
ast_vhub_ep0_handle_ack(struct ast_vhub_ep * ep,bool in_ack)272*4882a593Smuzhiyun void ast_vhub_ep0_handle_ack(struct ast_vhub_ep *ep, bool in_ack)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun 	struct ast_vhub_req *req;
275*4882a593Smuzhiyun 	struct ast_vhub *vhub = ep->vhub;
276*4882a593Smuzhiyun 	struct device *dev = &vhub->pdev->dev;
277*4882a593Smuzhiyun 	bool stall = false;
278*4882a593Smuzhiyun 	u32 stat;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	/* Read EP0 status */
281*4882a593Smuzhiyun 	stat = readl(ep->ep0.ctlstat);
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	/* Grab current request if any */
284*4882a593Smuzhiyun 	req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	EPVDBG(ep, "ACK status=%08x,state=%d is_in=%d in_ack=%d req=%p\n",
287*4882a593Smuzhiyun 		stat, ep->ep0.state, ep->ep0.dir_in, in_ack, req);
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	switch(ep->ep0.state) {
290*4882a593Smuzhiyun 	case ep0_state_token:
291*4882a593Smuzhiyun 		/* There should be no request queued in that state... */
292*4882a593Smuzhiyun 		if (req) {
293*4882a593Smuzhiyun 			dev_warn(dev, "request present while in TOKEN state\n");
294*4882a593Smuzhiyun 			ast_vhub_nuke(ep, -EINVAL);
295*4882a593Smuzhiyun 		}
296*4882a593Smuzhiyun 		dev_warn(dev, "ack while in TOKEN state\n");
297*4882a593Smuzhiyun 		stall = true;
298*4882a593Smuzhiyun 		break;
299*4882a593Smuzhiyun 	case ep0_state_data:
300*4882a593Smuzhiyun 		/* Check the state bits corresponding to our direction */
301*4882a593Smuzhiyun 		if ((ep->ep0.dir_in && (stat & VHUB_EP0_TX_BUFF_RDY)) ||
302*4882a593Smuzhiyun 		    (!ep->ep0.dir_in && (stat & VHUB_EP0_RX_BUFF_RDY)) ||
303*4882a593Smuzhiyun 		    (ep->ep0.dir_in != in_ack)) {
304*4882a593Smuzhiyun 			/* In that case, ignore interrupt */
305*4882a593Smuzhiyun 			dev_warn(dev, "irq state mismatch");
306*4882a593Smuzhiyun 			break;
307*4882a593Smuzhiyun 		}
308*4882a593Smuzhiyun 		/*
309*4882a593Smuzhiyun 		 * We are in data phase and there's no request, something is
310*4882a593Smuzhiyun 		 * wrong, stall
311*4882a593Smuzhiyun 		 */
312*4882a593Smuzhiyun 		if (!req) {
313*4882a593Smuzhiyun 			dev_warn(dev, "data phase, no request\n");
314*4882a593Smuzhiyun 			stall = true;
315*4882a593Smuzhiyun 			break;
316*4882a593Smuzhiyun 		}
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 		/* We have a request, handle data transfers */
319*4882a593Smuzhiyun 		if (ep->ep0.dir_in)
320*4882a593Smuzhiyun 			ast_vhub_ep0_do_send(ep, req);
321*4882a593Smuzhiyun 		else
322*4882a593Smuzhiyun 			ast_vhub_ep0_do_receive(ep, req, VHUB_EP0_RX_LEN(stat));
323*4882a593Smuzhiyun 		return;
324*4882a593Smuzhiyun 	case ep0_state_status:
325*4882a593Smuzhiyun 		/* Nuke stale requests */
326*4882a593Smuzhiyun 		if (req) {
327*4882a593Smuzhiyun 			dev_warn(dev, "request present while in STATUS state\n");
328*4882a593Smuzhiyun 			ast_vhub_nuke(ep, -EINVAL);
329*4882a593Smuzhiyun 		}
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 		/*
332*4882a593Smuzhiyun 		 * If the status phase completes with the wrong ack, stall
333*4882a593Smuzhiyun 		 * the endpoint just in case, to abort whatever the host
334*4882a593Smuzhiyun 		 * was doing.
335*4882a593Smuzhiyun 		 */
336*4882a593Smuzhiyun 		if (ep->ep0.dir_in == in_ack) {
337*4882a593Smuzhiyun 			dev_warn(dev, "status direction mismatch\n");
338*4882a593Smuzhiyun 			stall = true;
339*4882a593Smuzhiyun 		}
340*4882a593Smuzhiyun 		break;
341*4882a593Smuzhiyun 	case ep0_state_stall:
342*4882a593Smuzhiyun 		/*
343*4882a593Smuzhiyun 		 * There shouldn't be any request left, but nuke just in case
344*4882a593Smuzhiyun 		 * otherwise the stale request will block subsequent ones
345*4882a593Smuzhiyun 		 */
346*4882a593Smuzhiyun 		ast_vhub_nuke(ep, -EIO);
347*4882a593Smuzhiyun 		break;
348*4882a593Smuzhiyun 	}
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	/* Reset to token state or stall */
351*4882a593Smuzhiyun 	if (stall) {
352*4882a593Smuzhiyun 		writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
353*4882a593Smuzhiyun 		ep->ep0.state = ep0_state_stall;
354*4882a593Smuzhiyun 	} else
355*4882a593Smuzhiyun 		ep->ep0.state = ep0_state_token;
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun 
ast_vhub_ep0_queue(struct usb_ep * u_ep,struct usb_request * u_req,gfp_t gfp_flags)358*4882a593Smuzhiyun static int ast_vhub_ep0_queue(struct usb_ep* u_ep, struct usb_request *u_req,
359*4882a593Smuzhiyun 			      gfp_t gfp_flags)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun 	struct ast_vhub_req *req = to_ast_req(u_req);
362*4882a593Smuzhiyun 	struct ast_vhub_ep *ep = to_ast_ep(u_ep);
363*4882a593Smuzhiyun 	struct ast_vhub *vhub = ep->vhub;
364*4882a593Smuzhiyun 	struct device *dev = &vhub->pdev->dev;
365*4882a593Smuzhiyun 	unsigned long flags;
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	/* Paranoid cheks */
368*4882a593Smuzhiyun 	if (!u_req || (!u_req->complete && !req->internal)) {
369*4882a593Smuzhiyun 		dev_warn(dev, "Bogus EP0 request ! u_req=%p\n", u_req);
370*4882a593Smuzhiyun 		if (u_req) {
371*4882a593Smuzhiyun 			dev_warn(dev, "complete=%p internal=%d\n",
372*4882a593Smuzhiyun 				 u_req->complete, req->internal);
373*4882a593Smuzhiyun 		}
374*4882a593Smuzhiyun 		return -EINVAL;
375*4882a593Smuzhiyun 	}
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	/* Not endpoint 0 ? */
378*4882a593Smuzhiyun 	if (WARN_ON(ep->d_idx != 0))
379*4882a593Smuzhiyun 		return -EINVAL;
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	/* Disabled device */
382*4882a593Smuzhiyun 	if (ep->dev && !ep->dev->enabled)
383*4882a593Smuzhiyun 		return -ESHUTDOWN;
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	/* Data, no buffer and not internal ? */
386*4882a593Smuzhiyun 	if (u_req->length && !u_req->buf && !req->internal) {
387*4882a593Smuzhiyun 		dev_warn(dev, "Request with no buffer !\n");
388*4882a593Smuzhiyun 		return -EINVAL;
389*4882a593Smuzhiyun 	}
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	EPVDBG(ep, "enqueue req @%p\n", req);
392*4882a593Smuzhiyun 	EPVDBG(ep, "  l=%d zero=%d noshort=%d is_in=%d\n",
393*4882a593Smuzhiyun 	       u_req->length, u_req->zero,
394*4882a593Smuzhiyun 	       u_req->short_not_ok, ep->ep0.dir_in);
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	/* Initialize request progress fields */
397*4882a593Smuzhiyun 	u_req->status = -EINPROGRESS;
398*4882a593Smuzhiyun 	u_req->actual = 0;
399*4882a593Smuzhiyun 	req->last_desc = -1;
400*4882a593Smuzhiyun 	req->active = false;
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	spin_lock_irqsave(&vhub->lock, flags);
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	/* EP0 can only support a single request at a time */
405*4882a593Smuzhiyun 	if (!list_empty(&ep->queue) ||
406*4882a593Smuzhiyun 	    ep->ep0.state == ep0_state_token ||
407*4882a593Smuzhiyun 	    ep->ep0.state == ep0_state_stall) {
408*4882a593Smuzhiyun 		dev_warn(dev, "EP0: Request in wrong state\n");
409*4882a593Smuzhiyun 	        EPVDBG(ep, "EP0: list_empty=%d state=%d\n",
410*4882a593Smuzhiyun 		       list_empty(&ep->queue), ep->ep0.state);
411*4882a593Smuzhiyun 		spin_unlock_irqrestore(&vhub->lock, flags);
412*4882a593Smuzhiyun 		return -EBUSY;
413*4882a593Smuzhiyun 	}
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	/* Add request to list and kick processing if empty */
416*4882a593Smuzhiyun 	list_add_tail(&req->queue, &ep->queue);
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	if (ep->ep0.dir_in) {
419*4882a593Smuzhiyun 		/* IN request, send data */
420*4882a593Smuzhiyun 		ast_vhub_ep0_do_send(ep, req);
421*4882a593Smuzhiyun 	} else if (u_req->length == 0) {
422*4882a593Smuzhiyun 		/* 0-len request, send completion as rx */
423*4882a593Smuzhiyun 		EPVDBG(ep, "0-length rx completion\n");
424*4882a593Smuzhiyun 		ep->ep0.state = ep0_state_status;
425*4882a593Smuzhiyun 		writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
426*4882a593Smuzhiyun 		ast_vhub_done(ep, req, 0);
427*4882a593Smuzhiyun 	} else {
428*4882a593Smuzhiyun 		/* OUT request, start receiver */
429*4882a593Smuzhiyun 		ast_vhub_ep0_rx_prime(ep);
430*4882a593Smuzhiyun 	}
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	spin_unlock_irqrestore(&vhub->lock, flags);
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	return 0;
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun 
ast_vhub_ep0_dequeue(struct usb_ep * u_ep,struct usb_request * u_req)437*4882a593Smuzhiyun static int ast_vhub_ep0_dequeue(struct usb_ep* u_ep, struct usb_request *u_req)
438*4882a593Smuzhiyun {
439*4882a593Smuzhiyun 	struct ast_vhub_ep *ep = to_ast_ep(u_ep);
440*4882a593Smuzhiyun 	struct ast_vhub *vhub = ep->vhub;
441*4882a593Smuzhiyun 	struct ast_vhub_req *req;
442*4882a593Smuzhiyun 	unsigned long flags;
443*4882a593Smuzhiyun 	int rc = -EINVAL;
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	spin_lock_irqsave(&vhub->lock, flags);
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	/* Only one request can be in the queue */
448*4882a593Smuzhiyun 	req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	/* Is it ours ? */
451*4882a593Smuzhiyun 	if (req && u_req == &req->req) {
452*4882a593Smuzhiyun 		EPVDBG(ep, "dequeue req @%p\n", req);
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 		/*
455*4882a593Smuzhiyun 		 * We don't have to deal with "active" as all
456*4882a593Smuzhiyun 		 * DMAs go to the EP buffers, not the request.
457*4882a593Smuzhiyun 		 */
458*4882a593Smuzhiyun 		ast_vhub_done(ep, req, -ECONNRESET);
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 		/* We do stall the EP to clean things up in HW */
461*4882a593Smuzhiyun 		writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
462*4882a593Smuzhiyun 		ep->ep0.state = ep0_state_status;
463*4882a593Smuzhiyun 		ep->ep0.dir_in = false;
464*4882a593Smuzhiyun 		rc = 0;
465*4882a593Smuzhiyun 	}
466*4882a593Smuzhiyun 	spin_unlock_irqrestore(&vhub->lock, flags);
467*4882a593Smuzhiyun 	return rc;
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun static const struct usb_ep_ops ast_vhub_ep0_ops = {
472*4882a593Smuzhiyun 	.queue		= ast_vhub_ep0_queue,
473*4882a593Smuzhiyun 	.dequeue	= ast_vhub_ep0_dequeue,
474*4882a593Smuzhiyun 	.alloc_request	= ast_vhub_alloc_request,
475*4882a593Smuzhiyun 	.free_request	= ast_vhub_free_request,
476*4882a593Smuzhiyun };
477*4882a593Smuzhiyun 
ast_vhub_reset_ep0(struct ast_vhub_dev * dev)478*4882a593Smuzhiyun void ast_vhub_reset_ep0(struct ast_vhub_dev *dev)
479*4882a593Smuzhiyun {
480*4882a593Smuzhiyun 	struct ast_vhub_ep *ep = &dev->ep0;
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	ast_vhub_nuke(ep, -EIO);
483*4882a593Smuzhiyun 	ep->ep0.state = ep0_state_token;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 
ast_vhub_init_ep0(struct ast_vhub * vhub,struct ast_vhub_ep * ep,struct ast_vhub_dev * dev)487*4882a593Smuzhiyun void ast_vhub_init_ep0(struct ast_vhub *vhub, struct ast_vhub_ep *ep,
488*4882a593Smuzhiyun 		       struct ast_vhub_dev *dev)
489*4882a593Smuzhiyun {
490*4882a593Smuzhiyun 	memset(ep, 0, sizeof(*ep));
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	INIT_LIST_HEAD(&ep->ep.ep_list);
493*4882a593Smuzhiyun 	INIT_LIST_HEAD(&ep->queue);
494*4882a593Smuzhiyun 	ep->ep.ops = &ast_vhub_ep0_ops;
495*4882a593Smuzhiyun 	ep->ep.name = "ep0";
496*4882a593Smuzhiyun 	ep->ep.caps.type_control = true;
497*4882a593Smuzhiyun 	usb_ep_set_maxpacket_limit(&ep->ep, AST_VHUB_EP0_MAX_PACKET);
498*4882a593Smuzhiyun 	ep->d_idx = 0;
499*4882a593Smuzhiyun 	ep->dev = dev;
500*4882a593Smuzhiyun 	ep->vhub = vhub;
501*4882a593Smuzhiyun 	ep->ep0.state = ep0_state_token;
502*4882a593Smuzhiyun 	INIT_LIST_HEAD(&ep->ep0.req.queue);
503*4882a593Smuzhiyun 	ep->ep0.req.internal = true;
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	/* Small difference between vHub and devices */
506*4882a593Smuzhiyun 	if (dev) {
507*4882a593Smuzhiyun 		ep->ep0.ctlstat = dev->regs + AST_VHUB_DEV_EP0_CTRL;
508*4882a593Smuzhiyun 		ep->ep0.setup = vhub->regs +
509*4882a593Smuzhiyun 			AST_VHUB_SETUP0 + 8 * (dev->index + 1);
510*4882a593Smuzhiyun 		ep->buf = vhub->ep0_bufs +
511*4882a593Smuzhiyun 			AST_VHUB_EP0_MAX_PACKET * (dev->index + 1);
512*4882a593Smuzhiyun 		ep->buf_dma = vhub->ep0_bufs_dma +
513*4882a593Smuzhiyun 			AST_VHUB_EP0_MAX_PACKET * (dev->index + 1);
514*4882a593Smuzhiyun 	} else {
515*4882a593Smuzhiyun 		ep->ep0.ctlstat = vhub->regs + AST_VHUB_EP0_CTRL;
516*4882a593Smuzhiyun 		ep->ep0.setup = vhub->regs + AST_VHUB_SETUP0;
517*4882a593Smuzhiyun 		ep->buf = vhub->ep0_bufs;
518*4882a593Smuzhiyun 		ep->buf_dma = vhub->ep0_bufs_dma;
519*4882a593Smuzhiyun 	}
520*4882a593Smuzhiyun }
521