xref: /OK3568_Linux_fs/kernel/drivers/usb/gadget/udc/net2272.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Driver for PLX NET2272 USB device controller
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2005-2006 PLX Technology, Inc.
6*4882a593Smuzhiyun  * Copyright (C) 2006-2011 Analog Devices, Inc.
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/delay.h>
10*4882a593Smuzhiyun #include <linux/device.h>
11*4882a593Smuzhiyun #include <linux/errno.h>
12*4882a593Smuzhiyun #include <linux/init.h>
13*4882a593Smuzhiyun #include <linux/interrupt.h>
14*4882a593Smuzhiyun #include <linux/io.h>
15*4882a593Smuzhiyun #include <linux/ioport.h>
16*4882a593Smuzhiyun #include <linux/kernel.h>
17*4882a593Smuzhiyun #include <linux/list.h>
18*4882a593Smuzhiyun #include <linux/module.h>
19*4882a593Smuzhiyun #include <linux/moduleparam.h>
20*4882a593Smuzhiyun #include <linux/pci.h>
21*4882a593Smuzhiyun #include <linux/platform_device.h>
22*4882a593Smuzhiyun #include <linux/prefetch.h>
23*4882a593Smuzhiyun #include <linux/sched.h>
24*4882a593Smuzhiyun #include <linux/slab.h>
25*4882a593Smuzhiyun #include <linux/timer.h>
26*4882a593Smuzhiyun #include <linux/usb.h>
27*4882a593Smuzhiyun #include <linux/usb/ch9.h>
28*4882a593Smuzhiyun #include <linux/usb/gadget.h>
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #include <asm/byteorder.h>
31*4882a593Smuzhiyun #include <asm/unaligned.h>
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #include "net2272.h"
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #define DRIVER_DESC "PLX NET2272 USB Peripheral Controller"
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun static const char driver_name[] = "net2272";
38*4882a593Smuzhiyun static const char driver_vers[] = "2006 October 17/mainline";
39*4882a593Smuzhiyun static const char driver_desc[] = DRIVER_DESC;
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun static const char ep0name[] = "ep0";
42*4882a593Smuzhiyun static const char * const ep_name[] = {
43*4882a593Smuzhiyun 	ep0name,
44*4882a593Smuzhiyun 	"ep-a", "ep-b", "ep-c",
45*4882a593Smuzhiyun };
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun #ifdef CONFIG_USB_NET2272_DMA
48*4882a593Smuzhiyun /*
49*4882a593Smuzhiyun  * use_dma: the NET2272 can use an external DMA controller.
50*4882a593Smuzhiyun  * Note that since there is no generic DMA api, some functions,
51*4882a593Smuzhiyun  * notably request_dma, start_dma, and cancel_dma will need to be
52*4882a593Smuzhiyun  * modified for your platform's particular dma controller.
53*4882a593Smuzhiyun  *
54*4882a593Smuzhiyun  * If use_dma is disabled, pio will be used instead.
55*4882a593Smuzhiyun  */
56*4882a593Smuzhiyun static bool use_dma = false;
57*4882a593Smuzhiyun module_param(use_dma, bool, 0644);
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun /*
60*4882a593Smuzhiyun  * dma_ep: selects the endpoint for use with dma (1=ep-a, 2=ep-b)
61*4882a593Smuzhiyun  * The NET2272 can only use dma for a single endpoint at a time.
62*4882a593Smuzhiyun  * At some point this could be modified to allow either endpoint
63*4882a593Smuzhiyun  * to take control of dma as it becomes available.
64*4882a593Smuzhiyun  *
65*4882a593Smuzhiyun  * Note that DMA should not be used on OUT endpoints unless it can
66*4882a593Smuzhiyun  * be guaranteed that no short packets will arrive on an IN endpoint
67*4882a593Smuzhiyun  * while the DMA operation is pending.  Otherwise the OUT DMA will
68*4882a593Smuzhiyun  * terminate prematurely (See NET2272 Errata 630-0213-0101)
69*4882a593Smuzhiyun  */
70*4882a593Smuzhiyun static ushort dma_ep = 1;
71*4882a593Smuzhiyun module_param(dma_ep, ushort, 0644);
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun /*
74*4882a593Smuzhiyun  * dma_mode: net2272 dma mode setting (see LOCCTL1 definiton):
75*4882a593Smuzhiyun  *	mode 0 == Slow DREQ mode
76*4882a593Smuzhiyun  *	mode 1 == Fast DREQ mode
77*4882a593Smuzhiyun  *	mode 2 == Burst mode
78*4882a593Smuzhiyun  */
79*4882a593Smuzhiyun static ushort dma_mode = 2;
80*4882a593Smuzhiyun module_param(dma_mode, ushort, 0644);
81*4882a593Smuzhiyun #else
82*4882a593Smuzhiyun #define use_dma 0
83*4882a593Smuzhiyun #define dma_ep 1
84*4882a593Smuzhiyun #define dma_mode 2
85*4882a593Smuzhiyun #endif
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun /*
88*4882a593Smuzhiyun  * fifo_mode: net2272 buffer configuration:
89*4882a593Smuzhiyun  *      mode 0 == ep-{a,b,c} 512db each
90*4882a593Smuzhiyun  *      mode 1 == ep-a 1k, ep-{b,c} 512db
91*4882a593Smuzhiyun  *      mode 2 == ep-a 1k, ep-b 1k, ep-c 512db
92*4882a593Smuzhiyun  *      mode 3 == ep-a 1k, ep-b disabled, ep-c 512db
93*4882a593Smuzhiyun  */
94*4882a593Smuzhiyun static ushort fifo_mode = 0;
95*4882a593Smuzhiyun module_param(fifo_mode, ushort, 0644);
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun /*
98*4882a593Smuzhiyun  * enable_suspend: When enabled, the driver will respond to
99*4882a593Smuzhiyun  * USB suspend requests by powering down the NET2272.  Otherwise,
100*4882a593Smuzhiyun  * USB suspend requests will be ignored.  This is acceptible for
101*4882a593Smuzhiyun  * self-powered devices.  For bus powered devices set this to 1.
102*4882a593Smuzhiyun  */
103*4882a593Smuzhiyun static ushort enable_suspend = 0;
104*4882a593Smuzhiyun module_param(enable_suspend, ushort, 0644);
105*4882a593Smuzhiyun 
assert_out_naking(struct net2272_ep * ep,const char * where)106*4882a593Smuzhiyun static void assert_out_naking(struct net2272_ep *ep, const char *where)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun 	u8 tmp;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun #ifndef DEBUG
111*4882a593Smuzhiyun 	return;
112*4882a593Smuzhiyun #endif
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	tmp = net2272_ep_read(ep, EP_STAT0);
115*4882a593Smuzhiyun 	if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
116*4882a593Smuzhiyun 		dev_dbg(ep->dev->dev, "%s %s %02x !NAK\n",
117*4882a593Smuzhiyun 			ep->ep.name, where, tmp);
118*4882a593Smuzhiyun 		net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
119*4882a593Smuzhiyun 	}
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun #define ASSERT_OUT_NAKING(ep) assert_out_naking(ep, __func__)
122*4882a593Smuzhiyun 
stop_out_naking(struct net2272_ep * ep)123*4882a593Smuzhiyun static void stop_out_naking(struct net2272_ep *ep)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	u8 tmp = net2272_ep_read(ep, EP_STAT0);
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	if ((tmp & (1 << NAK_OUT_PACKETS)) != 0)
128*4882a593Smuzhiyun 		net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun #define PIPEDIR(bAddress) (usb_pipein(bAddress) ? "in" : "out")
132*4882a593Smuzhiyun 
type_string(u8 bmAttributes)133*4882a593Smuzhiyun static char *type_string(u8 bmAttributes)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun 	switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
136*4882a593Smuzhiyun 	case USB_ENDPOINT_XFER_BULK: return "bulk";
137*4882a593Smuzhiyun 	case USB_ENDPOINT_XFER_ISOC: return "iso";
138*4882a593Smuzhiyun 	case USB_ENDPOINT_XFER_INT:  return "intr";
139*4882a593Smuzhiyun 	default:                     return "control";
140*4882a593Smuzhiyun 	}
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun 
buf_state_string(unsigned state)143*4882a593Smuzhiyun static char *buf_state_string(unsigned state)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun 	switch (state) {
146*4882a593Smuzhiyun 	case BUFF_FREE:  return "free";
147*4882a593Smuzhiyun 	case BUFF_VALID: return "valid";
148*4882a593Smuzhiyun 	case BUFF_LCL:   return "local";
149*4882a593Smuzhiyun 	case BUFF_USB:   return "usb";
150*4882a593Smuzhiyun 	default:         return "unknown";
151*4882a593Smuzhiyun 	}
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun 
dma_mode_string(void)154*4882a593Smuzhiyun static char *dma_mode_string(void)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun 	if (!use_dma)
157*4882a593Smuzhiyun 		return "PIO";
158*4882a593Smuzhiyun 	switch (dma_mode) {
159*4882a593Smuzhiyun 	case 0:  return "SLOW DREQ";
160*4882a593Smuzhiyun 	case 1:  return "FAST DREQ";
161*4882a593Smuzhiyun 	case 2:  return "BURST";
162*4882a593Smuzhiyun 	default: return "invalid";
163*4882a593Smuzhiyun 	}
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun static void net2272_dequeue_all(struct net2272_ep *);
167*4882a593Smuzhiyun static int net2272_kick_dma(struct net2272_ep *, struct net2272_request *);
168*4882a593Smuzhiyun static int net2272_fifo_status(struct usb_ep *);
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun static const struct usb_ep_ops net2272_ep_ops;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun /*---------------------------------------------------------------------------*/
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun static int
net2272_enable(struct usb_ep * _ep,const struct usb_endpoint_descriptor * desc)175*4882a593Smuzhiyun net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun 	struct net2272 *dev;
178*4882a593Smuzhiyun 	struct net2272_ep *ep;
179*4882a593Smuzhiyun 	u32 max;
180*4882a593Smuzhiyun 	u8 tmp;
181*4882a593Smuzhiyun 	unsigned long flags;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	ep = container_of(_ep, struct net2272_ep, ep);
184*4882a593Smuzhiyun 	if (!_ep || !desc || ep->desc || _ep->name == ep0name
185*4882a593Smuzhiyun 			|| desc->bDescriptorType != USB_DT_ENDPOINT)
186*4882a593Smuzhiyun 		return -EINVAL;
187*4882a593Smuzhiyun 	dev = ep->dev;
188*4882a593Smuzhiyun 	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
189*4882a593Smuzhiyun 		return -ESHUTDOWN;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	max = usb_endpoint_maxp(desc);
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	spin_lock_irqsave(&dev->lock, flags);
194*4882a593Smuzhiyun 	_ep->maxpacket = max;
195*4882a593Smuzhiyun 	ep->desc = desc;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	/* net2272_ep_reset() has already been called */
198*4882a593Smuzhiyun 	ep->stopped = 0;
199*4882a593Smuzhiyun 	ep->wedged = 0;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	/* set speed-dependent max packet */
202*4882a593Smuzhiyun 	net2272_ep_write(ep, EP_MAXPKT0, max & 0xff);
203*4882a593Smuzhiyun 	net2272_ep_write(ep, EP_MAXPKT1, (max & 0xff00) >> 8);
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	/* set type, direction, address; reset fifo counters */
206*4882a593Smuzhiyun 	net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
207*4882a593Smuzhiyun 	tmp = usb_endpoint_type(desc);
208*4882a593Smuzhiyun 	if (usb_endpoint_xfer_bulk(desc)) {
209*4882a593Smuzhiyun 		/* catch some particularly blatant driver bugs */
210*4882a593Smuzhiyun 		if ((dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
211*4882a593Smuzhiyun 		    (dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
212*4882a593Smuzhiyun 			spin_unlock_irqrestore(&dev->lock, flags);
213*4882a593Smuzhiyun 			return -ERANGE;
214*4882a593Smuzhiyun 		}
215*4882a593Smuzhiyun 	}
216*4882a593Smuzhiyun 	ep->is_iso = usb_endpoint_xfer_isoc(desc) ? 1 : 0;
217*4882a593Smuzhiyun 	tmp <<= ENDPOINT_TYPE;
218*4882a593Smuzhiyun 	tmp |= ((desc->bEndpointAddress & 0x0f) << ENDPOINT_NUMBER);
219*4882a593Smuzhiyun 	tmp |= usb_endpoint_dir_in(desc) << ENDPOINT_DIRECTION;
220*4882a593Smuzhiyun 	tmp |= (1 << ENDPOINT_ENABLE);
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	/* for OUT transfers, block the rx fifo until a read is posted */
223*4882a593Smuzhiyun 	ep->is_in = usb_endpoint_dir_in(desc);
224*4882a593Smuzhiyun 	if (!ep->is_in)
225*4882a593Smuzhiyun 		net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	net2272_ep_write(ep, EP_CFG, tmp);
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	/* enable irqs */
230*4882a593Smuzhiyun 	tmp = (1 << ep->num) | net2272_read(dev, IRQENB0);
231*4882a593Smuzhiyun 	net2272_write(dev, IRQENB0, tmp);
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
234*4882a593Smuzhiyun 		| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
235*4882a593Smuzhiyun 		| net2272_ep_read(ep, EP_IRQENB);
236*4882a593Smuzhiyun 	net2272_ep_write(ep, EP_IRQENB, tmp);
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	tmp = desc->bEndpointAddress;
239*4882a593Smuzhiyun 	dev_dbg(dev->dev, "enabled %s (ep%d%s-%s) max %04x cfg %02x\n",
240*4882a593Smuzhiyun 		_ep->name, tmp & 0x0f, PIPEDIR(tmp),
241*4882a593Smuzhiyun 		type_string(desc->bmAttributes), max,
242*4882a593Smuzhiyun 		net2272_ep_read(ep, EP_CFG));
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dev->lock, flags);
245*4882a593Smuzhiyun 	return 0;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun 
net2272_ep_reset(struct net2272_ep * ep)248*4882a593Smuzhiyun static void net2272_ep_reset(struct net2272_ep *ep)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun 	u8 tmp;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	ep->desc = NULL;
253*4882a593Smuzhiyun 	INIT_LIST_HEAD(&ep->queue);
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	usb_ep_set_maxpacket_limit(&ep->ep, ~0);
256*4882a593Smuzhiyun 	ep->ep.ops = &net2272_ep_ops;
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	/* disable irqs, endpoint */
259*4882a593Smuzhiyun 	net2272_ep_write(ep, EP_IRQENB, 0);
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	/* init to our chosen defaults, notably so that we NAK OUT
262*4882a593Smuzhiyun 	 * packets until the driver queues a read.
263*4882a593Smuzhiyun 	 */
264*4882a593Smuzhiyun 	tmp = (1 << NAK_OUT_PACKETS_MODE) | (1 << ALT_NAK_OUT_PACKETS);
265*4882a593Smuzhiyun 	net2272_ep_write(ep, EP_RSPSET, tmp);
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	tmp = (1 << INTERRUPT_MODE) | (1 << HIDE_STATUS_PHASE);
268*4882a593Smuzhiyun 	if (ep->num != 0)
269*4882a593Smuzhiyun 		tmp |= (1 << ENDPOINT_TOGGLE) | (1 << ENDPOINT_HALT);
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	net2272_ep_write(ep, EP_RSPCLR, tmp);
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	/* scrub most status bits, and flush any fifo state */
274*4882a593Smuzhiyun 	net2272_ep_write(ep, EP_STAT0,
275*4882a593Smuzhiyun 			  (1 << DATA_IN_TOKEN_INTERRUPT)
276*4882a593Smuzhiyun 			| (1 << DATA_OUT_TOKEN_INTERRUPT)
277*4882a593Smuzhiyun 			| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
278*4882a593Smuzhiyun 			| (1 << DATA_PACKET_RECEIVED_INTERRUPT)
279*4882a593Smuzhiyun 			| (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	net2272_ep_write(ep, EP_STAT1,
282*4882a593Smuzhiyun 			    (1 << TIMEOUT)
283*4882a593Smuzhiyun 			  | (1 << USB_OUT_ACK_SENT)
284*4882a593Smuzhiyun 			  | (1 << USB_OUT_NAK_SENT)
285*4882a593Smuzhiyun 			  | (1 << USB_IN_ACK_RCVD)
286*4882a593Smuzhiyun 			  | (1 << USB_IN_NAK_SENT)
287*4882a593Smuzhiyun 			  | (1 << USB_STALL_SENT)
288*4882a593Smuzhiyun 			  | (1 << LOCAL_OUT_ZLP)
289*4882a593Smuzhiyun 			  | (1 << BUFFER_FLUSH));
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	/* fifo size is handled seperately */
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun 
net2272_disable(struct usb_ep * _ep)294*4882a593Smuzhiyun static int net2272_disable(struct usb_ep *_ep)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun 	struct net2272_ep *ep;
297*4882a593Smuzhiyun 	unsigned long flags;
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	ep = container_of(_ep, struct net2272_ep, ep);
300*4882a593Smuzhiyun 	if (!_ep || !ep->desc || _ep->name == ep0name)
301*4882a593Smuzhiyun 		return -EINVAL;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	spin_lock_irqsave(&ep->dev->lock, flags);
304*4882a593Smuzhiyun 	net2272_dequeue_all(ep);
305*4882a593Smuzhiyun 	net2272_ep_reset(ep);
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	dev_vdbg(ep->dev->dev, "disabled %s\n", _ep->name);
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ep->dev->lock, flags);
310*4882a593Smuzhiyun 	return 0;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun /*---------------------------------------------------------------------------*/
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun static struct usb_request *
net2272_alloc_request(struct usb_ep * _ep,gfp_t gfp_flags)316*4882a593Smuzhiyun net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun 	struct net2272_request *req;
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	if (!_ep)
321*4882a593Smuzhiyun 		return NULL;
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	req = kzalloc(sizeof(*req), gfp_flags);
324*4882a593Smuzhiyun 	if (!req)
325*4882a593Smuzhiyun 		return NULL;
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	INIT_LIST_HEAD(&req->queue);
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	return &req->req;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun static void
net2272_free_request(struct usb_ep * _ep,struct usb_request * _req)333*4882a593Smuzhiyun net2272_free_request(struct usb_ep *_ep, struct usb_request *_req)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun 	struct net2272_request *req;
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	if (!_ep || !_req)
338*4882a593Smuzhiyun 		return;
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	req = container_of(_req, struct net2272_request, req);
341*4882a593Smuzhiyun 	WARN_ON(!list_empty(&req->queue));
342*4882a593Smuzhiyun 	kfree(req);
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun static void
net2272_done(struct net2272_ep * ep,struct net2272_request * req,int status)346*4882a593Smuzhiyun net2272_done(struct net2272_ep *ep, struct net2272_request *req, int status)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun 	struct net2272 *dev;
349*4882a593Smuzhiyun 	unsigned stopped = ep->stopped;
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	if (ep->num == 0) {
352*4882a593Smuzhiyun 		if (ep->dev->protocol_stall) {
353*4882a593Smuzhiyun 			ep->stopped = 1;
354*4882a593Smuzhiyun 			set_halt(ep);
355*4882a593Smuzhiyun 		}
356*4882a593Smuzhiyun 		allow_status(ep);
357*4882a593Smuzhiyun 	}
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	list_del_init(&req->queue);
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	if (req->req.status == -EINPROGRESS)
362*4882a593Smuzhiyun 		req->req.status = status;
363*4882a593Smuzhiyun 	else
364*4882a593Smuzhiyun 		status = req->req.status;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	dev = ep->dev;
367*4882a593Smuzhiyun 	if (use_dma && ep->dma)
368*4882a593Smuzhiyun 		usb_gadget_unmap_request(&dev->gadget, &req->req,
369*4882a593Smuzhiyun 				ep->is_in);
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	if (status && status != -ESHUTDOWN)
372*4882a593Smuzhiyun 		dev_vdbg(dev->dev, "complete %s req %p stat %d len %u/%u buf %p\n",
373*4882a593Smuzhiyun 			ep->ep.name, &req->req, status,
374*4882a593Smuzhiyun 			req->req.actual, req->req.length, req->req.buf);
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	/* don't modify queue heads during completion callback */
377*4882a593Smuzhiyun 	ep->stopped = 1;
378*4882a593Smuzhiyun 	spin_unlock(&dev->lock);
379*4882a593Smuzhiyun 	usb_gadget_giveback_request(&ep->ep, &req->req);
380*4882a593Smuzhiyun 	spin_lock(&dev->lock);
381*4882a593Smuzhiyun 	ep->stopped = stopped;
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun static int
net2272_write_packet(struct net2272_ep * ep,u8 * buf,struct net2272_request * req,unsigned max)385*4882a593Smuzhiyun net2272_write_packet(struct net2272_ep *ep, u8 *buf,
386*4882a593Smuzhiyun 	struct net2272_request *req, unsigned max)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun 	u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
389*4882a593Smuzhiyun 	u16 *bufp;
390*4882a593Smuzhiyun 	unsigned length, count;
391*4882a593Smuzhiyun 	u8 tmp;
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	length = min(req->req.length - req->req.actual, max);
394*4882a593Smuzhiyun 	req->req.actual += length;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	dev_vdbg(ep->dev->dev, "write packet %s req %p max %u len %u avail %u\n",
397*4882a593Smuzhiyun 		ep->ep.name, req, max, length,
398*4882a593Smuzhiyun 		(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	count = length;
401*4882a593Smuzhiyun 	bufp = (u16 *)buf;
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	while (likely(count >= 2)) {
404*4882a593Smuzhiyun 		/* no byte-swap required; chip endian set during init */
405*4882a593Smuzhiyun 		writew(*bufp++, ep_data);
406*4882a593Smuzhiyun 		count -= 2;
407*4882a593Smuzhiyun 	}
408*4882a593Smuzhiyun 	buf = (u8 *)bufp;
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	/* write final byte by placing the NET2272 into 8-bit mode */
411*4882a593Smuzhiyun 	if (unlikely(count)) {
412*4882a593Smuzhiyun 		tmp = net2272_read(ep->dev, LOCCTL);
413*4882a593Smuzhiyun 		net2272_write(ep->dev, LOCCTL, tmp & ~(1 << DATA_WIDTH));
414*4882a593Smuzhiyun 		writeb(*buf, ep_data);
415*4882a593Smuzhiyun 		net2272_write(ep->dev, LOCCTL, tmp);
416*4882a593Smuzhiyun 	}
417*4882a593Smuzhiyun 	return length;
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun /* returns: 0: still running, 1: completed, negative: errno */
421*4882a593Smuzhiyun static int
net2272_write_fifo(struct net2272_ep * ep,struct net2272_request * req)422*4882a593Smuzhiyun net2272_write_fifo(struct net2272_ep *ep, struct net2272_request *req)
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun 	u8 *buf;
425*4882a593Smuzhiyun 	unsigned count, max;
426*4882a593Smuzhiyun 	int status;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	dev_vdbg(ep->dev->dev, "write_fifo %s actual %d len %d\n",
429*4882a593Smuzhiyun 		ep->ep.name, req->req.actual, req->req.length);
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	/*
432*4882a593Smuzhiyun 	 * Keep loading the endpoint until the final packet is loaded,
433*4882a593Smuzhiyun 	 * or the endpoint buffer is full.
434*4882a593Smuzhiyun 	 */
435*4882a593Smuzhiyun  top:
436*4882a593Smuzhiyun 	/*
437*4882a593Smuzhiyun 	 * Clear interrupt status
438*4882a593Smuzhiyun 	 *  - Packet Transmitted interrupt will become set again when the
439*4882a593Smuzhiyun 	 *    host successfully takes another packet
440*4882a593Smuzhiyun 	 */
441*4882a593Smuzhiyun 	net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
442*4882a593Smuzhiyun 	while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_FULL))) {
443*4882a593Smuzhiyun 		buf = req->req.buf + req->req.actual;
444*4882a593Smuzhiyun 		prefetch(buf);
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 		/* force pagesel */
447*4882a593Smuzhiyun 		net2272_ep_read(ep, EP_STAT0);
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 		max = (net2272_ep_read(ep, EP_AVAIL1) << 8) |
450*4882a593Smuzhiyun 			(net2272_ep_read(ep, EP_AVAIL0));
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 		if (max < ep->ep.maxpacket)
453*4882a593Smuzhiyun 			max = (net2272_ep_read(ep, EP_AVAIL1) << 8)
454*4882a593Smuzhiyun 				| (net2272_ep_read(ep, EP_AVAIL0));
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 		count = net2272_write_packet(ep, buf, req, max);
457*4882a593Smuzhiyun 		/* see if we are done */
458*4882a593Smuzhiyun 		if (req->req.length == req->req.actual) {
459*4882a593Smuzhiyun 			/* validate short or zlp packet */
460*4882a593Smuzhiyun 			if (count < ep->ep.maxpacket)
461*4882a593Smuzhiyun 				set_fifo_bytecount(ep, 0);
462*4882a593Smuzhiyun 			net2272_done(ep, req, 0);
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 			if (!list_empty(&ep->queue)) {
465*4882a593Smuzhiyun 				req = list_entry(ep->queue.next,
466*4882a593Smuzhiyun 						struct net2272_request,
467*4882a593Smuzhiyun 						queue);
468*4882a593Smuzhiyun 				status = net2272_kick_dma(ep, req);
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 				if (status < 0)
471*4882a593Smuzhiyun 					if ((net2272_ep_read(ep, EP_STAT0)
472*4882a593Smuzhiyun 							& (1 << BUFFER_EMPTY)))
473*4882a593Smuzhiyun 						goto top;
474*4882a593Smuzhiyun 			}
475*4882a593Smuzhiyun 			return 1;
476*4882a593Smuzhiyun 		}
477*4882a593Smuzhiyun 		net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
478*4882a593Smuzhiyun 	}
479*4882a593Smuzhiyun 	return 0;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun static void
net2272_out_flush(struct net2272_ep * ep)483*4882a593Smuzhiyun net2272_out_flush(struct net2272_ep *ep)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun 	ASSERT_OUT_NAKING(ep);
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	net2272_ep_write(ep, EP_STAT0, (1 << DATA_OUT_TOKEN_INTERRUPT)
488*4882a593Smuzhiyun 			| (1 << DATA_PACKET_RECEIVED_INTERRUPT));
489*4882a593Smuzhiyun 	net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun static int
net2272_read_packet(struct net2272_ep * ep,u8 * buf,struct net2272_request * req,unsigned avail)493*4882a593Smuzhiyun net2272_read_packet(struct net2272_ep *ep, u8 *buf,
494*4882a593Smuzhiyun 	struct net2272_request *req, unsigned avail)
495*4882a593Smuzhiyun {
496*4882a593Smuzhiyun 	u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
497*4882a593Smuzhiyun 	unsigned is_short;
498*4882a593Smuzhiyun 	u16 *bufp;
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	req->req.actual += avail;
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	dev_vdbg(ep->dev->dev, "read packet %s req %p len %u avail %u\n",
503*4882a593Smuzhiyun 		ep->ep.name, req, avail,
504*4882a593Smuzhiyun 		(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	is_short = (avail < ep->ep.maxpacket);
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 	if (unlikely(avail == 0)) {
509*4882a593Smuzhiyun 		/* remove any zlp from the buffer */
510*4882a593Smuzhiyun 		(void)readw(ep_data);
511*4882a593Smuzhiyun 		return is_short;
512*4882a593Smuzhiyun 	}
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	/* Ensure we get the final byte */
515*4882a593Smuzhiyun 	if (unlikely(avail % 2))
516*4882a593Smuzhiyun 		avail++;
517*4882a593Smuzhiyun 	bufp = (u16 *)buf;
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	do {
520*4882a593Smuzhiyun 		*bufp++ = readw(ep_data);
521*4882a593Smuzhiyun 		avail -= 2;
522*4882a593Smuzhiyun 	} while (avail);
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	/*
525*4882a593Smuzhiyun 	 * To avoid false endpoint available race condition must read
526*4882a593Smuzhiyun 	 * ep stat0 twice in the case of a short transfer
527*4882a593Smuzhiyun 	 */
528*4882a593Smuzhiyun 	if (net2272_ep_read(ep, EP_STAT0) & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT))
529*4882a593Smuzhiyun 		net2272_ep_read(ep, EP_STAT0);
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	return is_short;
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun static int
net2272_read_fifo(struct net2272_ep * ep,struct net2272_request * req)535*4882a593Smuzhiyun net2272_read_fifo(struct net2272_ep *ep, struct net2272_request *req)
536*4882a593Smuzhiyun {
537*4882a593Smuzhiyun 	u8 *buf;
538*4882a593Smuzhiyun 	unsigned is_short;
539*4882a593Smuzhiyun 	int count;
540*4882a593Smuzhiyun 	int tmp;
541*4882a593Smuzhiyun 	int cleanup = 0;
542*4882a593Smuzhiyun 	int status = -1;
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	dev_vdbg(ep->dev->dev, "read_fifo %s actual %d len %d\n",
545*4882a593Smuzhiyun 		ep->ep.name, req->req.actual, req->req.length);
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun  top:
548*4882a593Smuzhiyun 	do {
549*4882a593Smuzhiyun 		buf = req->req.buf + req->req.actual;
550*4882a593Smuzhiyun 		prefetchw(buf);
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 		count = (net2272_ep_read(ep, EP_AVAIL1) << 8)
553*4882a593Smuzhiyun 			| net2272_ep_read(ep, EP_AVAIL0);
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 		net2272_ep_write(ep, EP_STAT0,
556*4882a593Smuzhiyun 			(1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) |
557*4882a593Smuzhiyun 			(1 << DATA_PACKET_RECEIVED_INTERRUPT));
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 		tmp = req->req.length - req->req.actual;
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 		if (count > tmp) {
562*4882a593Smuzhiyun 			if ((tmp % ep->ep.maxpacket) != 0) {
563*4882a593Smuzhiyun 				dev_err(ep->dev->dev,
564*4882a593Smuzhiyun 					"%s out fifo %d bytes, expected %d\n",
565*4882a593Smuzhiyun 					ep->ep.name, count, tmp);
566*4882a593Smuzhiyun 				cleanup = 1;
567*4882a593Smuzhiyun 			}
568*4882a593Smuzhiyun 			count = (tmp > 0) ? tmp : 0;
569*4882a593Smuzhiyun 		}
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 		is_short = net2272_read_packet(ep, buf, req, count);
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 		/* completion */
574*4882a593Smuzhiyun 		if (unlikely(cleanup || is_short ||
575*4882a593Smuzhiyun 				req->req.actual == req->req.length)) {
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 			if (cleanup) {
578*4882a593Smuzhiyun 				net2272_out_flush(ep);
579*4882a593Smuzhiyun 				net2272_done(ep, req, -EOVERFLOW);
580*4882a593Smuzhiyun 			} else
581*4882a593Smuzhiyun 				net2272_done(ep, req, 0);
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 			/* re-initialize endpoint transfer registers
584*4882a593Smuzhiyun 			 * otherwise they may result in erroneous pre-validation
585*4882a593Smuzhiyun 			 * for subsequent control reads
586*4882a593Smuzhiyun 			 */
587*4882a593Smuzhiyun 			if (unlikely(ep->num == 0)) {
588*4882a593Smuzhiyun 				net2272_ep_write(ep, EP_TRANSFER2, 0);
589*4882a593Smuzhiyun 				net2272_ep_write(ep, EP_TRANSFER1, 0);
590*4882a593Smuzhiyun 				net2272_ep_write(ep, EP_TRANSFER0, 0);
591*4882a593Smuzhiyun 			}
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 			if (!list_empty(&ep->queue)) {
594*4882a593Smuzhiyun 				req = list_entry(ep->queue.next,
595*4882a593Smuzhiyun 					struct net2272_request, queue);
596*4882a593Smuzhiyun 				status = net2272_kick_dma(ep, req);
597*4882a593Smuzhiyun 				if ((status < 0) &&
598*4882a593Smuzhiyun 				    !(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)))
599*4882a593Smuzhiyun 					goto top;
600*4882a593Smuzhiyun 			}
601*4882a593Smuzhiyun 			return 1;
602*4882a593Smuzhiyun 		}
603*4882a593Smuzhiyun 	} while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)));
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	return 0;
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun static void
net2272_pio_advance(struct net2272_ep * ep)609*4882a593Smuzhiyun net2272_pio_advance(struct net2272_ep *ep)
610*4882a593Smuzhiyun {
611*4882a593Smuzhiyun 	struct net2272_request *req;
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 	if (unlikely(list_empty(&ep->queue)))
614*4882a593Smuzhiyun 		return;
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	req = list_entry(ep->queue.next, struct net2272_request, queue);
617*4882a593Smuzhiyun 	(ep->is_in ? net2272_write_fifo : net2272_read_fifo)(ep, req);
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun /* returns 0 on success, else negative errno */
621*4882a593Smuzhiyun static int
net2272_request_dma(struct net2272 * dev,unsigned ep,u32 buf,unsigned len,unsigned dir)622*4882a593Smuzhiyun net2272_request_dma(struct net2272 *dev, unsigned ep, u32 buf,
623*4882a593Smuzhiyun 	unsigned len, unsigned dir)
624*4882a593Smuzhiyun {
625*4882a593Smuzhiyun 	dev_vdbg(dev->dev, "request_dma ep %d buf %08x len %d dir %d\n",
626*4882a593Smuzhiyun 		ep, buf, len, dir);
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 	/* The NET2272 only supports a single dma channel */
629*4882a593Smuzhiyun 	if (dev->dma_busy)
630*4882a593Smuzhiyun 		return -EBUSY;
631*4882a593Smuzhiyun 	/*
632*4882a593Smuzhiyun 	 * EP_TRANSFER (used to determine the number of bytes received
633*4882a593Smuzhiyun 	 * in an OUT transfer) is 24 bits wide; don't ask for more than that.
634*4882a593Smuzhiyun 	 */
635*4882a593Smuzhiyun 	if ((dir == 1) && (len > 0x1000000))
636*4882a593Smuzhiyun 		return -EINVAL;
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 	dev->dma_busy = 1;
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	/* initialize platform's dma */
641*4882a593Smuzhiyun #ifdef CONFIG_USB_PCI
642*4882a593Smuzhiyun 	/* NET2272 addr, buffer addr, length, etc. */
643*4882a593Smuzhiyun 	switch (dev->dev_id) {
644*4882a593Smuzhiyun 	case PCI_DEVICE_ID_RDK1:
645*4882a593Smuzhiyun 		/* Setup PLX 9054 DMA mode */
646*4882a593Smuzhiyun 		writel((1 << LOCAL_BUS_WIDTH) |
647*4882a593Smuzhiyun 			(1 << TA_READY_INPUT_ENABLE) |
648*4882a593Smuzhiyun 			(0 << LOCAL_BURST_ENABLE) |
649*4882a593Smuzhiyun 			(1 << DONE_INTERRUPT_ENABLE) |
650*4882a593Smuzhiyun 			(1 << LOCAL_ADDRESSING_MODE) |
651*4882a593Smuzhiyun 			(1 << DEMAND_MODE) |
652*4882a593Smuzhiyun 			(1 << DMA_EOT_ENABLE) |
653*4882a593Smuzhiyun 			(1 << FAST_SLOW_TERMINATE_MODE_SELECT) |
654*4882a593Smuzhiyun 			(1 << DMA_CHANNEL_INTERRUPT_SELECT),
655*4882a593Smuzhiyun 			dev->rdk1.plx9054_base_addr + DMAMODE0);
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 		writel(0x100000, dev->rdk1.plx9054_base_addr + DMALADR0);
658*4882a593Smuzhiyun 		writel(buf, dev->rdk1.plx9054_base_addr + DMAPADR0);
659*4882a593Smuzhiyun 		writel(len, dev->rdk1.plx9054_base_addr + DMASIZ0);
660*4882a593Smuzhiyun 		writel((dir << DIRECTION_OF_TRANSFER) |
661*4882a593Smuzhiyun 			(1 << INTERRUPT_AFTER_TERMINAL_COUNT),
662*4882a593Smuzhiyun 			dev->rdk1.plx9054_base_addr + DMADPR0);
663*4882a593Smuzhiyun 		writel((1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE) |
664*4882a593Smuzhiyun 			readl(dev->rdk1.plx9054_base_addr + INTCSR),
665*4882a593Smuzhiyun 			dev->rdk1.plx9054_base_addr + INTCSR);
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 		break;
668*4882a593Smuzhiyun 	}
669*4882a593Smuzhiyun #endif
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 	net2272_write(dev, DMAREQ,
672*4882a593Smuzhiyun 		(0 << DMA_BUFFER_VALID) |
673*4882a593Smuzhiyun 		(1 << DMA_REQUEST_ENABLE) |
674*4882a593Smuzhiyun 		(1 << DMA_CONTROL_DACK) |
675*4882a593Smuzhiyun 		(dev->dma_eot_polarity << EOT_POLARITY) |
676*4882a593Smuzhiyun 		(dev->dma_dack_polarity << DACK_POLARITY) |
677*4882a593Smuzhiyun 		(dev->dma_dreq_polarity << DREQ_POLARITY) |
678*4882a593Smuzhiyun 		((ep >> 1) << DMA_ENDPOINT_SELECT));
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	(void) net2272_read(dev, SCRATCH);
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 	return 0;
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun static void
net2272_start_dma(struct net2272 * dev)686*4882a593Smuzhiyun net2272_start_dma(struct net2272 *dev)
687*4882a593Smuzhiyun {
688*4882a593Smuzhiyun 	/* start platform's dma controller */
689*4882a593Smuzhiyun #ifdef CONFIG_USB_PCI
690*4882a593Smuzhiyun 	switch (dev->dev_id) {
691*4882a593Smuzhiyun 	case PCI_DEVICE_ID_RDK1:
692*4882a593Smuzhiyun 		writeb((1 << CHANNEL_ENABLE) | (1 << CHANNEL_START),
693*4882a593Smuzhiyun 			dev->rdk1.plx9054_base_addr + DMACSR0);
694*4882a593Smuzhiyun 		break;
695*4882a593Smuzhiyun 	}
696*4882a593Smuzhiyun #endif
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun /* returns 0 on success, else negative errno */
700*4882a593Smuzhiyun static int
net2272_kick_dma(struct net2272_ep * ep,struct net2272_request * req)701*4882a593Smuzhiyun net2272_kick_dma(struct net2272_ep *ep, struct net2272_request *req)
702*4882a593Smuzhiyun {
703*4882a593Smuzhiyun 	unsigned size;
704*4882a593Smuzhiyun 	u8 tmp;
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	if (!use_dma || (ep->num < 1) || (ep->num > 2) || !ep->dma)
707*4882a593Smuzhiyun 		return -EINVAL;
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	/* don't use dma for odd-length transfers
710*4882a593Smuzhiyun 	 * otherwise, we'd need to deal with the last byte with pio
711*4882a593Smuzhiyun 	 */
712*4882a593Smuzhiyun 	if (req->req.length & 1)
713*4882a593Smuzhiyun 		return -EINVAL;
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 	dev_vdbg(ep->dev->dev, "kick_dma %s req %p dma %08llx\n",
716*4882a593Smuzhiyun 		ep->ep.name, req, (unsigned long long) req->req.dma);
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 	net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 	/* The NET2272 can only use DMA on one endpoint at a time */
721*4882a593Smuzhiyun 	if (ep->dev->dma_busy)
722*4882a593Smuzhiyun 		return -EBUSY;
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 	/* Make sure we only DMA an even number of bytes (we'll use
725*4882a593Smuzhiyun 	 * pio to complete the transfer)
726*4882a593Smuzhiyun 	 */
727*4882a593Smuzhiyun 	size = req->req.length;
728*4882a593Smuzhiyun 	size &= ~1;
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 	/* device-to-host transfer */
731*4882a593Smuzhiyun 	if (ep->is_in) {
732*4882a593Smuzhiyun 		/* initialize platform's dma controller */
733*4882a593Smuzhiyun 		if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 0))
734*4882a593Smuzhiyun 			/* unable to obtain DMA channel; return error and use pio mode */
735*4882a593Smuzhiyun 			return -EBUSY;
736*4882a593Smuzhiyun 		req->req.actual += size;
737*4882a593Smuzhiyun 
738*4882a593Smuzhiyun 	/* host-to-device transfer */
739*4882a593Smuzhiyun 	} else {
740*4882a593Smuzhiyun 		tmp = net2272_ep_read(ep, EP_STAT0);
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 		/* initialize platform's dma controller */
743*4882a593Smuzhiyun 		if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 1))
744*4882a593Smuzhiyun 			/* unable to obtain DMA channel; return error and use pio mode */
745*4882a593Smuzhiyun 			return -EBUSY;
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun 		if (!(tmp & (1 << BUFFER_EMPTY)))
748*4882a593Smuzhiyun 			ep->not_empty = 1;
749*4882a593Smuzhiyun 		else
750*4882a593Smuzhiyun 			ep->not_empty = 0;
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 		/* allow the endpoint's buffer to fill */
754*4882a593Smuzhiyun 		net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
755*4882a593Smuzhiyun 
756*4882a593Smuzhiyun 		/* this transfer completed and data's already in the fifo
757*4882a593Smuzhiyun 		 * return error so pio gets used.
758*4882a593Smuzhiyun 		 */
759*4882a593Smuzhiyun 		if (tmp & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 			/* deassert dreq */
762*4882a593Smuzhiyun 			net2272_write(ep->dev, DMAREQ,
763*4882a593Smuzhiyun 				(0 << DMA_BUFFER_VALID) |
764*4882a593Smuzhiyun 				(0 << DMA_REQUEST_ENABLE) |
765*4882a593Smuzhiyun 				(1 << DMA_CONTROL_DACK) |
766*4882a593Smuzhiyun 				(ep->dev->dma_eot_polarity << EOT_POLARITY) |
767*4882a593Smuzhiyun 				(ep->dev->dma_dack_polarity << DACK_POLARITY) |
768*4882a593Smuzhiyun 				(ep->dev->dma_dreq_polarity << DREQ_POLARITY) |
769*4882a593Smuzhiyun 				((ep->num >> 1) << DMA_ENDPOINT_SELECT));
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 			return -EBUSY;
772*4882a593Smuzhiyun 		}
773*4882a593Smuzhiyun 	}
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 	/* Don't use per-packet interrupts: use dma interrupts only */
776*4882a593Smuzhiyun 	net2272_ep_write(ep, EP_IRQENB, 0);
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun 	net2272_start_dma(ep->dev);
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 	return 0;
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun 
net2272_cancel_dma(struct net2272 * dev)783*4882a593Smuzhiyun static void net2272_cancel_dma(struct net2272 *dev)
784*4882a593Smuzhiyun {
785*4882a593Smuzhiyun #ifdef CONFIG_USB_PCI
786*4882a593Smuzhiyun 	switch (dev->dev_id) {
787*4882a593Smuzhiyun 	case PCI_DEVICE_ID_RDK1:
788*4882a593Smuzhiyun 		writeb(0, dev->rdk1.plx9054_base_addr + DMACSR0);
789*4882a593Smuzhiyun 		writeb(1 << CHANNEL_ABORT, dev->rdk1.plx9054_base_addr + DMACSR0);
790*4882a593Smuzhiyun 		while (!(readb(dev->rdk1.plx9054_base_addr + DMACSR0) &
791*4882a593Smuzhiyun 		         (1 << CHANNEL_DONE)))
792*4882a593Smuzhiyun 			continue;	/* wait for dma to stabalize */
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 		/* dma abort generates an interrupt */
795*4882a593Smuzhiyun 		writeb(1 << CHANNEL_CLEAR_INTERRUPT,
796*4882a593Smuzhiyun 			dev->rdk1.plx9054_base_addr + DMACSR0);
797*4882a593Smuzhiyun 		break;
798*4882a593Smuzhiyun 	}
799*4882a593Smuzhiyun #endif
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 	dev->dma_busy = 0;
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun /*---------------------------------------------------------------------------*/
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun static int
net2272_queue(struct usb_ep * _ep,struct usb_request * _req,gfp_t gfp_flags)807*4882a593Smuzhiyun net2272_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
808*4882a593Smuzhiyun {
809*4882a593Smuzhiyun 	struct net2272_request *req;
810*4882a593Smuzhiyun 	struct net2272_ep *ep;
811*4882a593Smuzhiyun 	struct net2272 *dev;
812*4882a593Smuzhiyun 	unsigned long flags;
813*4882a593Smuzhiyun 	int status = -1;
814*4882a593Smuzhiyun 	u8 s;
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 	req = container_of(_req, struct net2272_request, req);
817*4882a593Smuzhiyun 	if (!_req || !_req->complete || !_req->buf
818*4882a593Smuzhiyun 			|| !list_empty(&req->queue))
819*4882a593Smuzhiyun 		return -EINVAL;
820*4882a593Smuzhiyun 	ep = container_of(_ep, struct net2272_ep, ep);
821*4882a593Smuzhiyun 	if (!_ep || (!ep->desc && ep->num != 0))
822*4882a593Smuzhiyun 		return -EINVAL;
823*4882a593Smuzhiyun 	dev = ep->dev;
824*4882a593Smuzhiyun 	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
825*4882a593Smuzhiyun 		return -ESHUTDOWN;
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun 	/* set up dma mapping in case the caller didn't */
828*4882a593Smuzhiyun 	if (use_dma && ep->dma) {
829*4882a593Smuzhiyun 		status = usb_gadget_map_request(&dev->gadget, _req,
830*4882a593Smuzhiyun 				ep->is_in);
831*4882a593Smuzhiyun 		if (status)
832*4882a593Smuzhiyun 			return status;
833*4882a593Smuzhiyun 	}
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun 	dev_vdbg(dev->dev, "%s queue req %p, len %d buf %p dma %08llx %s\n",
836*4882a593Smuzhiyun 		_ep->name, _req, _req->length, _req->buf,
837*4882a593Smuzhiyun 		(unsigned long long) _req->dma, _req->zero ? "zero" : "!zero");
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun 	spin_lock_irqsave(&dev->lock, flags);
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 	_req->status = -EINPROGRESS;
842*4882a593Smuzhiyun 	_req->actual = 0;
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 	/* kickstart this i/o queue? */
845*4882a593Smuzhiyun 	if (list_empty(&ep->queue) && !ep->stopped) {
846*4882a593Smuzhiyun 		/* maybe there's no control data, just status ack */
847*4882a593Smuzhiyun 		if (ep->num == 0 && _req->length == 0) {
848*4882a593Smuzhiyun 			net2272_done(ep, req, 0);
849*4882a593Smuzhiyun 			dev_vdbg(dev->dev, "%s status ack\n", ep->ep.name);
850*4882a593Smuzhiyun 			goto done;
851*4882a593Smuzhiyun 		}
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun 		/* Return zlp, don't let it block subsequent packets */
854*4882a593Smuzhiyun 		s = net2272_ep_read(ep, EP_STAT0);
855*4882a593Smuzhiyun 		if (s & (1 << BUFFER_EMPTY)) {
856*4882a593Smuzhiyun 			/* Buffer is empty check for a blocking zlp, handle it */
857*4882a593Smuzhiyun 			if ((s & (1 << NAK_OUT_PACKETS)) &&
858*4882a593Smuzhiyun 			    net2272_ep_read(ep, EP_STAT1) & (1 << LOCAL_OUT_ZLP)) {
859*4882a593Smuzhiyun 				dev_dbg(dev->dev, "WARNING: returning ZLP short packet termination!\n");
860*4882a593Smuzhiyun 				/*
861*4882a593Smuzhiyun 				 * Request is going to terminate with a short packet ...
862*4882a593Smuzhiyun 				 * hope the client is ready for it!
863*4882a593Smuzhiyun 				 */
864*4882a593Smuzhiyun 				status = net2272_read_fifo(ep, req);
865*4882a593Smuzhiyun 				/* clear short packet naking */
866*4882a593Smuzhiyun 				net2272_ep_write(ep, EP_STAT0, (1 << NAK_OUT_PACKETS));
867*4882a593Smuzhiyun 				goto done;
868*4882a593Smuzhiyun 			}
869*4882a593Smuzhiyun 		}
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun 		/* try dma first */
872*4882a593Smuzhiyun 		status = net2272_kick_dma(ep, req);
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun 		if (status < 0) {
875*4882a593Smuzhiyun 			/* dma failed (most likely in use by another endpoint)
876*4882a593Smuzhiyun 			 * fallback to pio
877*4882a593Smuzhiyun 			 */
878*4882a593Smuzhiyun 			status = 0;
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun 			if (ep->is_in)
881*4882a593Smuzhiyun 				status = net2272_write_fifo(ep, req);
882*4882a593Smuzhiyun 			else {
883*4882a593Smuzhiyun 				s = net2272_ep_read(ep, EP_STAT0);
884*4882a593Smuzhiyun 				if ((s & (1 << BUFFER_EMPTY)) == 0)
885*4882a593Smuzhiyun 					status = net2272_read_fifo(ep, req);
886*4882a593Smuzhiyun 			}
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun 			if (unlikely(status != 0)) {
889*4882a593Smuzhiyun 				if (status > 0)
890*4882a593Smuzhiyun 					status = 0;
891*4882a593Smuzhiyun 				req = NULL;
892*4882a593Smuzhiyun 			}
893*4882a593Smuzhiyun 		}
894*4882a593Smuzhiyun 	}
895*4882a593Smuzhiyun 	if (likely(req))
896*4882a593Smuzhiyun 		list_add_tail(&req->queue, &ep->queue);
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun 	if (likely(!list_empty(&ep->queue)))
899*4882a593Smuzhiyun 		net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
900*4882a593Smuzhiyun  done:
901*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dev->lock, flags);
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun 	return 0;
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun /* dequeue ALL requests */
907*4882a593Smuzhiyun static void
net2272_dequeue_all(struct net2272_ep * ep)908*4882a593Smuzhiyun net2272_dequeue_all(struct net2272_ep *ep)
909*4882a593Smuzhiyun {
910*4882a593Smuzhiyun 	struct net2272_request *req;
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun 	/* called with spinlock held */
913*4882a593Smuzhiyun 	ep->stopped = 1;
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun 	while (!list_empty(&ep->queue)) {
916*4882a593Smuzhiyun 		req = list_entry(ep->queue.next,
917*4882a593Smuzhiyun 				struct net2272_request,
918*4882a593Smuzhiyun 				queue);
919*4882a593Smuzhiyun 		net2272_done(ep, req, -ESHUTDOWN);
920*4882a593Smuzhiyun 	}
921*4882a593Smuzhiyun }
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun /* dequeue JUST ONE request */
924*4882a593Smuzhiyun static int
net2272_dequeue(struct usb_ep * _ep,struct usb_request * _req)925*4882a593Smuzhiyun net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
926*4882a593Smuzhiyun {
927*4882a593Smuzhiyun 	struct net2272_ep *ep;
928*4882a593Smuzhiyun 	struct net2272_request *req;
929*4882a593Smuzhiyun 	unsigned long flags;
930*4882a593Smuzhiyun 	int stopped;
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 	ep = container_of(_ep, struct net2272_ep, ep);
933*4882a593Smuzhiyun 	if (!_ep || (!ep->desc && ep->num != 0) || !_req)
934*4882a593Smuzhiyun 		return -EINVAL;
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 	spin_lock_irqsave(&ep->dev->lock, flags);
937*4882a593Smuzhiyun 	stopped = ep->stopped;
938*4882a593Smuzhiyun 	ep->stopped = 1;
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun 	/* make sure it's still queued on this endpoint */
941*4882a593Smuzhiyun 	list_for_each_entry(req, &ep->queue, queue) {
942*4882a593Smuzhiyun 		if (&req->req == _req)
943*4882a593Smuzhiyun 			break;
944*4882a593Smuzhiyun 	}
945*4882a593Smuzhiyun 	if (&req->req != _req) {
946*4882a593Smuzhiyun 		ep->stopped = stopped;
947*4882a593Smuzhiyun 		spin_unlock_irqrestore(&ep->dev->lock, flags);
948*4882a593Smuzhiyun 		return -EINVAL;
949*4882a593Smuzhiyun 	}
950*4882a593Smuzhiyun 
951*4882a593Smuzhiyun 	/* queue head may be partially complete */
952*4882a593Smuzhiyun 	if (ep->queue.next == &req->queue) {
953*4882a593Smuzhiyun 		dev_dbg(ep->dev->dev, "unlink (%s) pio\n", _ep->name);
954*4882a593Smuzhiyun 		net2272_done(ep, req, -ECONNRESET);
955*4882a593Smuzhiyun 	}
956*4882a593Smuzhiyun 	req = NULL;
957*4882a593Smuzhiyun 	ep->stopped = stopped;
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ep->dev->lock, flags);
960*4882a593Smuzhiyun 	return 0;
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun 
963*4882a593Smuzhiyun /*---------------------------------------------------------------------------*/
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun static int
net2272_set_halt_and_wedge(struct usb_ep * _ep,int value,int wedged)966*4882a593Smuzhiyun net2272_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
967*4882a593Smuzhiyun {
968*4882a593Smuzhiyun 	struct net2272_ep *ep;
969*4882a593Smuzhiyun 	unsigned long flags;
970*4882a593Smuzhiyun 	int ret = 0;
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 	ep = container_of(_ep, struct net2272_ep, ep);
973*4882a593Smuzhiyun 	if (!_ep || (!ep->desc && ep->num != 0))
974*4882a593Smuzhiyun 		return -EINVAL;
975*4882a593Smuzhiyun 	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
976*4882a593Smuzhiyun 		return -ESHUTDOWN;
977*4882a593Smuzhiyun 	if (ep->desc /* not ep0 */ && usb_endpoint_xfer_isoc(ep->desc))
978*4882a593Smuzhiyun 		return -EINVAL;
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 	spin_lock_irqsave(&ep->dev->lock, flags);
981*4882a593Smuzhiyun 	if (!list_empty(&ep->queue))
982*4882a593Smuzhiyun 		ret = -EAGAIN;
983*4882a593Smuzhiyun 	else if (ep->is_in && value && net2272_fifo_status(_ep) != 0)
984*4882a593Smuzhiyun 		ret = -EAGAIN;
985*4882a593Smuzhiyun 	else {
986*4882a593Smuzhiyun 		dev_vdbg(ep->dev->dev, "%s %s %s\n", _ep->name,
987*4882a593Smuzhiyun 			value ? "set" : "clear",
988*4882a593Smuzhiyun 			wedged ? "wedge" : "halt");
989*4882a593Smuzhiyun 		/* set/clear */
990*4882a593Smuzhiyun 		if (value) {
991*4882a593Smuzhiyun 			if (ep->num == 0)
992*4882a593Smuzhiyun 				ep->dev->protocol_stall = 1;
993*4882a593Smuzhiyun 			else
994*4882a593Smuzhiyun 				set_halt(ep);
995*4882a593Smuzhiyun 			if (wedged)
996*4882a593Smuzhiyun 				ep->wedged = 1;
997*4882a593Smuzhiyun 		} else {
998*4882a593Smuzhiyun 			clear_halt(ep);
999*4882a593Smuzhiyun 			ep->wedged = 0;
1000*4882a593Smuzhiyun 		}
1001*4882a593Smuzhiyun 	}
1002*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ep->dev->lock, flags);
1003*4882a593Smuzhiyun 
1004*4882a593Smuzhiyun 	return ret;
1005*4882a593Smuzhiyun }
1006*4882a593Smuzhiyun 
1007*4882a593Smuzhiyun static int
net2272_set_halt(struct usb_ep * _ep,int value)1008*4882a593Smuzhiyun net2272_set_halt(struct usb_ep *_ep, int value)
1009*4882a593Smuzhiyun {
1010*4882a593Smuzhiyun 	return net2272_set_halt_and_wedge(_ep, value, 0);
1011*4882a593Smuzhiyun }
1012*4882a593Smuzhiyun 
1013*4882a593Smuzhiyun static int
net2272_set_wedge(struct usb_ep * _ep)1014*4882a593Smuzhiyun net2272_set_wedge(struct usb_ep *_ep)
1015*4882a593Smuzhiyun {
1016*4882a593Smuzhiyun 	if (!_ep || _ep->name == ep0name)
1017*4882a593Smuzhiyun 		return -EINVAL;
1018*4882a593Smuzhiyun 	return net2272_set_halt_and_wedge(_ep, 1, 1);
1019*4882a593Smuzhiyun }
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun static int
net2272_fifo_status(struct usb_ep * _ep)1022*4882a593Smuzhiyun net2272_fifo_status(struct usb_ep *_ep)
1023*4882a593Smuzhiyun {
1024*4882a593Smuzhiyun 	struct net2272_ep *ep;
1025*4882a593Smuzhiyun 	u16 avail;
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun 	ep = container_of(_ep, struct net2272_ep, ep);
1028*4882a593Smuzhiyun 	if (!_ep || (!ep->desc && ep->num != 0))
1029*4882a593Smuzhiyun 		return -ENODEV;
1030*4882a593Smuzhiyun 	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1031*4882a593Smuzhiyun 		return -ESHUTDOWN;
1032*4882a593Smuzhiyun 
1033*4882a593Smuzhiyun 	avail = net2272_ep_read(ep, EP_AVAIL1) << 8;
1034*4882a593Smuzhiyun 	avail |= net2272_ep_read(ep, EP_AVAIL0);
1035*4882a593Smuzhiyun 	if (avail > ep->fifo_size)
1036*4882a593Smuzhiyun 		return -EOVERFLOW;
1037*4882a593Smuzhiyun 	if (ep->is_in)
1038*4882a593Smuzhiyun 		avail = ep->fifo_size - avail;
1039*4882a593Smuzhiyun 	return avail;
1040*4882a593Smuzhiyun }
1041*4882a593Smuzhiyun 
1042*4882a593Smuzhiyun static void
net2272_fifo_flush(struct usb_ep * _ep)1043*4882a593Smuzhiyun net2272_fifo_flush(struct usb_ep *_ep)
1044*4882a593Smuzhiyun {
1045*4882a593Smuzhiyun 	struct net2272_ep *ep;
1046*4882a593Smuzhiyun 
1047*4882a593Smuzhiyun 	ep = container_of(_ep, struct net2272_ep, ep);
1048*4882a593Smuzhiyun 	if (!_ep || (!ep->desc && ep->num != 0))
1049*4882a593Smuzhiyun 		return;
1050*4882a593Smuzhiyun 	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1051*4882a593Smuzhiyun 		return;
1052*4882a593Smuzhiyun 
1053*4882a593Smuzhiyun 	net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
1054*4882a593Smuzhiyun }
1055*4882a593Smuzhiyun 
1056*4882a593Smuzhiyun static const struct usb_ep_ops net2272_ep_ops = {
1057*4882a593Smuzhiyun 	.enable        = net2272_enable,
1058*4882a593Smuzhiyun 	.disable       = net2272_disable,
1059*4882a593Smuzhiyun 
1060*4882a593Smuzhiyun 	.alloc_request = net2272_alloc_request,
1061*4882a593Smuzhiyun 	.free_request  = net2272_free_request,
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun 	.queue         = net2272_queue,
1064*4882a593Smuzhiyun 	.dequeue       = net2272_dequeue,
1065*4882a593Smuzhiyun 
1066*4882a593Smuzhiyun 	.set_halt      = net2272_set_halt,
1067*4882a593Smuzhiyun 	.set_wedge     = net2272_set_wedge,
1068*4882a593Smuzhiyun 	.fifo_status   = net2272_fifo_status,
1069*4882a593Smuzhiyun 	.fifo_flush    = net2272_fifo_flush,
1070*4882a593Smuzhiyun };
1071*4882a593Smuzhiyun 
1072*4882a593Smuzhiyun /*---------------------------------------------------------------------------*/
1073*4882a593Smuzhiyun 
1074*4882a593Smuzhiyun static int
net2272_get_frame(struct usb_gadget * _gadget)1075*4882a593Smuzhiyun net2272_get_frame(struct usb_gadget *_gadget)
1076*4882a593Smuzhiyun {
1077*4882a593Smuzhiyun 	struct net2272 *dev;
1078*4882a593Smuzhiyun 	unsigned long flags;
1079*4882a593Smuzhiyun 	u16 ret;
1080*4882a593Smuzhiyun 
1081*4882a593Smuzhiyun 	if (!_gadget)
1082*4882a593Smuzhiyun 		return -ENODEV;
1083*4882a593Smuzhiyun 	dev = container_of(_gadget, struct net2272, gadget);
1084*4882a593Smuzhiyun 	spin_lock_irqsave(&dev->lock, flags);
1085*4882a593Smuzhiyun 
1086*4882a593Smuzhiyun 	ret = net2272_read(dev, FRAME1) << 8;
1087*4882a593Smuzhiyun 	ret |= net2272_read(dev, FRAME0);
1088*4882a593Smuzhiyun 
1089*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dev->lock, flags);
1090*4882a593Smuzhiyun 	return ret;
1091*4882a593Smuzhiyun }
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun static int
net2272_wakeup(struct usb_gadget * _gadget)1094*4882a593Smuzhiyun net2272_wakeup(struct usb_gadget *_gadget)
1095*4882a593Smuzhiyun {
1096*4882a593Smuzhiyun 	struct net2272 *dev;
1097*4882a593Smuzhiyun 	u8 tmp;
1098*4882a593Smuzhiyun 	unsigned long flags;
1099*4882a593Smuzhiyun 
1100*4882a593Smuzhiyun 	if (!_gadget)
1101*4882a593Smuzhiyun 		return 0;
1102*4882a593Smuzhiyun 	dev = container_of(_gadget, struct net2272, gadget);
1103*4882a593Smuzhiyun 
1104*4882a593Smuzhiyun 	spin_lock_irqsave(&dev->lock, flags);
1105*4882a593Smuzhiyun 	tmp = net2272_read(dev, USBCTL0);
1106*4882a593Smuzhiyun 	if (tmp & (1 << IO_WAKEUP_ENABLE))
1107*4882a593Smuzhiyun 		net2272_write(dev, USBCTL1, (1 << GENERATE_RESUME));
1108*4882a593Smuzhiyun 
1109*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dev->lock, flags);
1110*4882a593Smuzhiyun 
1111*4882a593Smuzhiyun 	return 0;
1112*4882a593Smuzhiyun }
1113*4882a593Smuzhiyun 
1114*4882a593Smuzhiyun static int
net2272_set_selfpowered(struct usb_gadget * _gadget,int value)1115*4882a593Smuzhiyun net2272_set_selfpowered(struct usb_gadget *_gadget, int value)
1116*4882a593Smuzhiyun {
1117*4882a593Smuzhiyun 	if (!_gadget)
1118*4882a593Smuzhiyun 		return -ENODEV;
1119*4882a593Smuzhiyun 
1120*4882a593Smuzhiyun 	_gadget->is_selfpowered = (value != 0);
1121*4882a593Smuzhiyun 
1122*4882a593Smuzhiyun 	return 0;
1123*4882a593Smuzhiyun }
1124*4882a593Smuzhiyun 
1125*4882a593Smuzhiyun static int
net2272_pullup(struct usb_gadget * _gadget,int is_on)1126*4882a593Smuzhiyun net2272_pullup(struct usb_gadget *_gadget, int is_on)
1127*4882a593Smuzhiyun {
1128*4882a593Smuzhiyun 	struct net2272 *dev;
1129*4882a593Smuzhiyun 	u8 tmp;
1130*4882a593Smuzhiyun 	unsigned long flags;
1131*4882a593Smuzhiyun 
1132*4882a593Smuzhiyun 	if (!_gadget)
1133*4882a593Smuzhiyun 		return -ENODEV;
1134*4882a593Smuzhiyun 	dev = container_of(_gadget, struct net2272, gadget);
1135*4882a593Smuzhiyun 
1136*4882a593Smuzhiyun 	spin_lock_irqsave(&dev->lock, flags);
1137*4882a593Smuzhiyun 	tmp = net2272_read(dev, USBCTL0);
1138*4882a593Smuzhiyun 	dev->softconnect = (is_on != 0);
1139*4882a593Smuzhiyun 	if (is_on)
1140*4882a593Smuzhiyun 		tmp |= (1 << USB_DETECT_ENABLE);
1141*4882a593Smuzhiyun 	else
1142*4882a593Smuzhiyun 		tmp &= ~(1 << USB_DETECT_ENABLE);
1143*4882a593Smuzhiyun 	net2272_write(dev, USBCTL0, tmp);
1144*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dev->lock, flags);
1145*4882a593Smuzhiyun 
1146*4882a593Smuzhiyun 	return 0;
1147*4882a593Smuzhiyun }
1148*4882a593Smuzhiyun 
1149*4882a593Smuzhiyun static int net2272_start(struct usb_gadget *_gadget,
1150*4882a593Smuzhiyun 		struct usb_gadget_driver *driver);
1151*4882a593Smuzhiyun static int net2272_stop(struct usb_gadget *_gadget);
1152*4882a593Smuzhiyun 
1153*4882a593Smuzhiyun static const struct usb_gadget_ops net2272_ops = {
1154*4882a593Smuzhiyun 	.get_frame	= net2272_get_frame,
1155*4882a593Smuzhiyun 	.wakeup		= net2272_wakeup,
1156*4882a593Smuzhiyun 	.set_selfpowered = net2272_set_selfpowered,
1157*4882a593Smuzhiyun 	.pullup		= net2272_pullup,
1158*4882a593Smuzhiyun 	.udc_start	= net2272_start,
1159*4882a593Smuzhiyun 	.udc_stop	= net2272_stop,
1160*4882a593Smuzhiyun };
1161*4882a593Smuzhiyun 
1162*4882a593Smuzhiyun /*---------------------------------------------------------------------------*/
1163*4882a593Smuzhiyun 
1164*4882a593Smuzhiyun static ssize_t
registers_show(struct device * _dev,struct device_attribute * attr,char * buf)1165*4882a593Smuzhiyun registers_show(struct device *_dev, struct device_attribute *attr, char *buf)
1166*4882a593Smuzhiyun {
1167*4882a593Smuzhiyun 	struct net2272 *dev;
1168*4882a593Smuzhiyun 	char *next;
1169*4882a593Smuzhiyun 	unsigned size, t;
1170*4882a593Smuzhiyun 	unsigned long flags;
1171*4882a593Smuzhiyun 	u8 t1, t2;
1172*4882a593Smuzhiyun 	int i;
1173*4882a593Smuzhiyun 	const char *s;
1174*4882a593Smuzhiyun 
1175*4882a593Smuzhiyun 	dev = dev_get_drvdata(_dev);
1176*4882a593Smuzhiyun 	next = buf;
1177*4882a593Smuzhiyun 	size = PAGE_SIZE;
1178*4882a593Smuzhiyun 	spin_lock_irqsave(&dev->lock, flags);
1179*4882a593Smuzhiyun 
1180*4882a593Smuzhiyun 	/* Main Control Registers */
1181*4882a593Smuzhiyun 	t = scnprintf(next, size, "%s version %s,"
1182*4882a593Smuzhiyun 		"chiprev %02x, locctl %02x\n"
1183*4882a593Smuzhiyun 		"irqenb0 %02x irqenb1 %02x "
1184*4882a593Smuzhiyun 		"irqstat0 %02x irqstat1 %02x\n",
1185*4882a593Smuzhiyun 		driver_name, driver_vers, dev->chiprev,
1186*4882a593Smuzhiyun 		net2272_read(dev, LOCCTL),
1187*4882a593Smuzhiyun 		net2272_read(dev, IRQENB0),
1188*4882a593Smuzhiyun 		net2272_read(dev, IRQENB1),
1189*4882a593Smuzhiyun 		net2272_read(dev, IRQSTAT0),
1190*4882a593Smuzhiyun 		net2272_read(dev, IRQSTAT1));
1191*4882a593Smuzhiyun 	size -= t;
1192*4882a593Smuzhiyun 	next += t;
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun 	/* DMA */
1195*4882a593Smuzhiyun 	t1 = net2272_read(dev, DMAREQ);
1196*4882a593Smuzhiyun 	t = scnprintf(next, size, "\ndmareq %02x: %s %s%s%s%s\n",
1197*4882a593Smuzhiyun 		t1, ep_name[(t1 & 0x01) + 1],
1198*4882a593Smuzhiyun 		t1 & (1 << DMA_CONTROL_DACK) ? "dack " : "",
1199*4882a593Smuzhiyun 		t1 & (1 << DMA_REQUEST_ENABLE) ? "reqenb " : "",
1200*4882a593Smuzhiyun 		t1 & (1 << DMA_REQUEST) ? "req " : "",
1201*4882a593Smuzhiyun 		t1 & (1 << DMA_BUFFER_VALID) ? "valid " : "");
1202*4882a593Smuzhiyun 	size -= t;
1203*4882a593Smuzhiyun 	next += t;
1204*4882a593Smuzhiyun 
1205*4882a593Smuzhiyun 	/* USB Control Registers */
1206*4882a593Smuzhiyun 	t1 = net2272_read(dev, USBCTL1);
1207*4882a593Smuzhiyun 	if (t1 & (1 << VBUS_PIN)) {
1208*4882a593Smuzhiyun 		if (t1 & (1 << USB_HIGH_SPEED))
1209*4882a593Smuzhiyun 			s = "high speed";
1210*4882a593Smuzhiyun 		else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1211*4882a593Smuzhiyun 			s = "powered";
1212*4882a593Smuzhiyun 		else
1213*4882a593Smuzhiyun 			s = "full speed";
1214*4882a593Smuzhiyun 	} else
1215*4882a593Smuzhiyun 		s = "not attached";
1216*4882a593Smuzhiyun 	t = scnprintf(next, size,
1217*4882a593Smuzhiyun 		"usbctl0 %02x usbctl1 %02x addr 0x%02x (%s)\n",
1218*4882a593Smuzhiyun 		net2272_read(dev, USBCTL0), t1,
1219*4882a593Smuzhiyun 		net2272_read(dev, OURADDR), s);
1220*4882a593Smuzhiyun 	size -= t;
1221*4882a593Smuzhiyun 	next += t;
1222*4882a593Smuzhiyun 
1223*4882a593Smuzhiyun 	/* Endpoint Registers */
1224*4882a593Smuzhiyun 	for (i = 0; i < 4; ++i) {
1225*4882a593Smuzhiyun 		struct net2272_ep *ep;
1226*4882a593Smuzhiyun 
1227*4882a593Smuzhiyun 		ep = &dev->ep[i];
1228*4882a593Smuzhiyun 		if (i && !ep->desc)
1229*4882a593Smuzhiyun 			continue;
1230*4882a593Smuzhiyun 
1231*4882a593Smuzhiyun 		t1 = net2272_ep_read(ep, EP_CFG);
1232*4882a593Smuzhiyun 		t2 = net2272_ep_read(ep, EP_RSPSET);
1233*4882a593Smuzhiyun 		t = scnprintf(next, size,
1234*4882a593Smuzhiyun 			"\n%s\tcfg %02x rsp (%02x) %s%s%s%s%s%s%s%s"
1235*4882a593Smuzhiyun 			"irqenb %02x\n",
1236*4882a593Smuzhiyun 			ep->ep.name, t1, t2,
1237*4882a593Smuzhiyun 			(t2 & (1 << ALT_NAK_OUT_PACKETS)) ? "NAK " : "",
1238*4882a593Smuzhiyun 			(t2 & (1 << HIDE_STATUS_PHASE)) ? "hide " : "",
1239*4882a593Smuzhiyun 			(t2 & (1 << AUTOVALIDATE)) ? "auto " : "",
1240*4882a593Smuzhiyun 			(t2 & (1 << INTERRUPT_MODE)) ? "interrupt " : "",
1241*4882a593Smuzhiyun 			(t2 & (1 << CONTROL_STATUS_PHASE_HANDSHAKE)) ? "status " : "",
1242*4882a593Smuzhiyun 			(t2 & (1 << NAK_OUT_PACKETS_MODE)) ? "NAKmode " : "",
1243*4882a593Smuzhiyun 			(t2 & (1 << ENDPOINT_TOGGLE)) ? "DATA1 " : "DATA0 ",
1244*4882a593Smuzhiyun 			(t2 & (1 << ENDPOINT_HALT)) ? "HALT " : "",
1245*4882a593Smuzhiyun 			net2272_ep_read(ep, EP_IRQENB));
1246*4882a593Smuzhiyun 		size -= t;
1247*4882a593Smuzhiyun 		next += t;
1248*4882a593Smuzhiyun 
1249*4882a593Smuzhiyun 		t = scnprintf(next, size,
1250*4882a593Smuzhiyun 			"\tstat0 %02x stat1 %02x avail %04x "
1251*4882a593Smuzhiyun 			"(ep%d%s-%s)%s\n",
1252*4882a593Smuzhiyun 			net2272_ep_read(ep, EP_STAT0),
1253*4882a593Smuzhiyun 			net2272_ep_read(ep, EP_STAT1),
1254*4882a593Smuzhiyun 			(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0),
1255*4882a593Smuzhiyun 			t1 & 0x0f,
1256*4882a593Smuzhiyun 			ep->is_in ? "in" : "out",
1257*4882a593Smuzhiyun 			type_string(t1 >> 5),
1258*4882a593Smuzhiyun 			ep->stopped ? "*" : "");
1259*4882a593Smuzhiyun 		size -= t;
1260*4882a593Smuzhiyun 		next += t;
1261*4882a593Smuzhiyun 
1262*4882a593Smuzhiyun 		t = scnprintf(next, size,
1263*4882a593Smuzhiyun 			"\tep_transfer %06x\n",
1264*4882a593Smuzhiyun 			((net2272_ep_read(ep, EP_TRANSFER2) & 0xff) << 16) |
1265*4882a593Smuzhiyun 			((net2272_ep_read(ep, EP_TRANSFER1) & 0xff) << 8) |
1266*4882a593Smuzhiyun 			((net2272_ep_read(ep, EP_TRANSFER0) & 0xff)));
1267*4882a593Smuzhiyun 		size -= t;
1268*4882a593Smuzhiyun 		next += t;
1269*4882a593Smuzhiyun 
1270*4882a593Smuzhiyun 		t1 = net2272_ep_read(ep, EP_BUFF_STATES) & 0x03;
1271*4882a593Smuzhiyun 		t2 = (net2272_ep_read(ep, EP_BUFF_STATES) >> 2) & 0x03;
1272*4882a593Smuzhiyun 		t = scnprintf(next, size,
1273*4882a593Smuzhiyun 			"\tbuf-a %s buf-b %s\n",
1274*4882a593Smuzhiyun 			buf_state_string(t1),
1275*4882a593Smuzhiyun 			buf_state_string(t2));
1276*4882a593Smuzhiyun 		size -= t;
1277*4882a593Smuzhiyun 		next += t;
1278*4882a593Smuzhiyun 	}
1279*4882a593Smuzhiyun 
1280*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dev->lock, flags);
1281*4882a593Smuzhiyun 
1282*4882a593Smuzhiyun 	return PAGE_SIZE - size;
1283*4882a593Smuzhiyun }
1284*4882a593Smuzhiyun static DEVICE_ATTR_RO(registers);
1285*4882a593Smuzhiyun 
1286*4882a593Smuzhiyun /*---------------------------------------------------------------------------*/
1287*4882a593Smuzhiyun 
1288*4882a593Smuzhiyun static void
net2272_set_fifo_mode(struct net2272 * dev,int mode)1289*4882a593Smuzhiyun net2272_set_fifo_mode(struct net2272 *dev, int mode)
1290*4882a593Smuzhiyun {
1291*4882a593Smuzhiyun 	u8 tmp;
1292*4882a593Smuzhiyun 
1293*4882a593Smuzhiyun 	tmp = net2272_read(dev, LOCCTL) & 0x3f;
1294*4882a593Smuzhiyun 	tmp |= (mode << 6);
1295*4882a593Smuzhiyun 	net2272_write(dev, LOCCTL, tmp);
1296*4882a593Smuzhiyun 
1297*4882a593Smuzhiyun 	INIT_LIST_HEAD(&dev->gadget.ep_list);
1298*4882a593Smuzhiyun 
1299*4882a593Smuzhiyun 	/* always ep-a, ep-c ... maybe not ep-b */
1300*4882a593Smuzhiyun 	list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
1301*4882a593Smuzhiyun 
1302*4882a593Smuzhiyun 	switch (mode) {
1303*4882a593Smuzhiyun 	case 0:
1304*4882a593Smuzhiyun 		list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1305*4882a593Smuzhiyun 		dev->ep[1].fifo_size = dev->ep[2].fifo_size = 512;
1306*4882a593Smuzhiyun 		break;
1307*4882a593Smuzhiyun 	case 1:
1308*4882a593Smuzhiyun 		list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1309*4882a593Smuzhiyun 		dev->ep[1].fifo_size = 1024;
1310*4882a593Smuzhiyun 		dev->ep[2].fifo_size = 512;
1311*4882a593Smuzhiyun 		break;
1312*4882a593Smuzhiyun 	case 2:
1313*4882a593Smuzhiyun 		list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1314*4882a593Smuzhiyun 		dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
1315*4882a593Smuzhiyun 		break;
1316*4882a593Smuzhiyun 	case 3:
1317*4882a593Smuzhiyun 		dev->ep[1].fifo_size = 1024;
1318*4882a593Smuzhiyun 		break;
1319*4882a593Smuzhiyun 	}
1320*4882a593Smuzhiyun 
1321*4882a593Smuzhiyun 	/* ep-c is always 2 512 byte buffers */
1322*4882a593Smuzhiyun 	list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
1323*4882a593Smuzhiyun 	dev->ep[3].fifo_size = 512;
1324*4882a593Smuzhiyun }
1325*4882a593Smuzhiyun 
1326*4882a593Smuzhiyun /*---------------------------------------------------------------------------*/
1327*4882a593Smuzhiyun 
1328*4882a593Smuzhiyun static void
net2272_usb_reset(struct net2272 * dev)1329*4882a593Smuzhiyun net2272_usb_reset(struct net2272 *dev)
1330*4882a593Smuzhiyun {
1331*4882a593Smuzhiyun 	dev->gadget.speed = USB_SPEED_UNKNOWN;
1332*4882a593Smuzhiyun 
1333*4882a593Smuzhiyun 	net2272_cancel_dma(dev);
1334*4882a593Smuzhiyun 
1335*4882a593Smuzhiyun 	net2272_write(dev, IRQENB0, 0);
1336*4882a593Smuzhiyun 	net2272_write(dev, IRQENB1, 0);
1337*4882a593Smuzhiyun 
1338*4882a593Smuzhiyun 	/* clear irq state */
1339*4882a593Smuzhiyun 	net2272_write(dev, IRQSTAT0, 0xff);
1340*4882a593Smuzhiyun 	net2272_write(dev, IRQSTAT1, ~(1 << SUSPEND_REQUEST_INTERRUPT));
1341*4882a593Smuzhiyun 
1342*4882a593Smuzhiyun 	net2272_write(dev, DMAREQ,
1343*4882a593Smuzhiyun 		(0 << DMA_BUFFER_VALID) |
1344*4882a593Smuzhiyun 		(0 << DMA_REQUEST_ENABLE) |
1345*4882a593Smuzhiyun 		(1 << DMA_CONTROL_DACK) |
1346*4882a593Smuzhiyun 		(dev->dma_eot_polarity << EOT_POLARITY) |
1347*4882a593Smuzhiyun 		(dev->dma_dack_polarity << DACK_POLARITY) |
1348*4882a593Smuzhiyun 		(dev->dma_dreq_polarity << DREQ_POLARITY) |
1349*4882a593Smuzhiyun 		((dma_ep >> 1) << DMA_ENDPOINT_SELECT));
1350*4882a593Smuzhiyun 
1351*4882a593Smuzhiyun 	net2272_cancel_dma(dev);
1352*4882a593Smuzhiyun 	net2272_set_fifo_mode(dev, (fifo_mode <= 3) ? fifo_mode : 0);
1353*4882a593Smuzhiyun 
1354*4882a593Smuzhiyun 	/* Set the NET2272 ep fifo data width to 16-bit mode and for correct byte swapping
1355*4882a593Smuzhiyun 	 * note that the higher level gadget drivers are expected to convert data to little endian.
1356*4882a593Smuzhiyun 	 * Enable byte swap for your local bus/cpu if needed by setting BYTE_SWAP in LOCCTL here
1357*4882a593Smuzhiyun 	 */
1358*4882a593Smuzhiyun 	net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) | (1 << DATA_WIDTH));
1359*4882a593Smuzhiyun 	net2272_write(dev, LOCCTL1, (dma_mode << DMA_MODE));
1360*4882a593Smuzhiyun }
1361*4882a593Smuzhiyun 
1362*4882a593Smuzhiyun static void
net2272_usb_reinit(struct net2272 * dev)1363*4882a593Smuzhiyun net2272_usb_reinit(struct net2272 *dev)
1364*4882a593Smuzhiyun {
1365*4882a593Smuzhiyun 	int i;
1366*4882a593Smuzhiyun 
1367*4882a593Smuzhiyun 	/* basic endpoint init */
1368*4882a593Smuzhiyun 	for (i = 0; i < 4; ++i) {
1369*4882a593Smuzhiyun 		struct net2272_ep *ep = &dev->ep[i];
1370*4882a593Smuzhiyun 
1371*4882a593Smuzhiyun 		ep->ep.name = ep_name[i];
1372*4882a593Smuzhiyun 		ep->dev = dev;
1373*4882a593Smuzhiyun 		ep->num = i;
1374*4882a593Smuzhiyun 		ep->not_empty = 0;
1375*4882a593Smuzhiyun 
1376*4882a593Smuzhiyun 		if (use_dma && ep->num == dma_ep)
1377*4882a593Smuzhiyun 			ep->dma = 1;
1378*4882a593Smuzhiyun 
1379*4882a593Smuzhiyun 		if (i > 0 && i <= 3)
1380*4882a593Smuzhiyun 			ep->fifo_size = 512;
1381*4882a593Smuzhiyun 		else
1382*4882a593Smuzhiyun 			ep->fifo_size = 64;
1383*4882a593Smuzhiyun 		net2272_ep_reset(ep);
1384*4882a593Smuzhiyun 
1385*4882a593Smuzhiyun 		if (i == 0) {
1386*4882a593Smuzhiyun 			ep->ep.caps.type_control = true;
1387*4882a593Smuzhiyun 		} else {
1388*4882a593Smuzhiyun 			ep->ep.caps.type_iso = true;
1389*4882a593Smuzhiyun 			ep->ep.caps.type_bulk = true;
1390*4882a593Smuzhiyun 			ep->ep.caps.type_int = true;
1391*4882a593Smuzhiyun 		}
1392*4882a593Smuzhiyun 
1393*4882a593Smuzhiyun 		ep->ep.caps.dir_in = true;
1394*4882a593Smuzhiyun 		ep->ep.caps.dir_out = true;
1395*4882a593Smuzhiyun 	}
1396*4882a593Smuzhiyun 	usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64);
1397*4882a593Smuzhiyun 
1398*4882a593Smuzhiyun 	dev->gadget.ep0 = &dev->ep[0].ep;
1399*4882a593Smuzhiyun 	dev->ep[0].stopped = 0;
1400*4882a593Smuzhiyun 	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1401*4882a593Smuzhiyun }
1402*4882a593Smuzhiyun 
1403*4882a593Smuzhiyun static void
net2272_ep0_start(struct net2272 * dev)1404*4882a593Smuzhiyun net2272_ep0_start(struct net2272 *dev)
1405*4882a593Smuzhiyun {
1406*4882a593Smuzhiyun 	struct net2272_ep *ep0 = &dev->ep[0];
1407*4882a593Smuzhiyun 
1408*4882a593Smuzhiyun 	net2272_ep_write(ep0, EP_RSPSET,
1409*4882a593Smuzhiyun 		(1 << NAK_OUT_PACKETS_MODE) |
1410*4882a593Smuzhiyun 		(1 << ALT_NAK_OUT_PACKETS));
1411*4882a593Smuzhiyun 	net2272_ep_write(ep0, EP_RSPCLR,
1412*4882a593Smuzhiyun 		(1 << HIDE_STATUS_PHASE) |
1413*4882a593Smuzhiyun 		(1 << CONTROL_STATUS_PHASE_HANDSHAKE));
1414*4882a593Smuzhiyun 	net2272_write(dev, USBCTL0,
1415*4882a593Smuzhiyun 		(dev->softconnect << USB_DETECT_ENABLE) |
1416*4882a593Smuzhiyun 		(1 << USB_ROOT_PORT_WAKEUP_ENABLE) |
1417*4882a593Smuzhiyun 		(1 << IO_WAKEUP_ENABLE));
1418*4882a593Smuzhiyun 	net2272_write(dev, IRQENB0,
1419*4882a593Smuzhiyun 		(1 << SETUP_PACKET_INTERRUPT_ENABLE) |
1420*4882a593Smuzhiyun 		(1 << ENDPOINT_0_INTERRUPT_ENABLE) |
1421*4882a593Smuzhiyun 		(1 << DMA_DONE_INTERRUPT_ENABLE));
1422*4882a593Smuzhiyun 	net2272_write(dev, IRQENB1,
1423*4882a593Smuzhiyun 		(1 << VBUS_INTERRUPT_ENABLE) |
1424*4882a593Smuzhiyun 		(1 << ROOT_PORT_RESET_INTERRUPT_ENABLE) |
1425*4882a593Smuzhiyun 		(1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE));
1426*4882a593Smuzhiyun }
1427*4882a593Smuzhiyun 
1428*4882a593Smuzhiyun /* when a driver is successfully registered, it will receive
1429*4882a593Smuzhiyun  * control requests including set_configuration(), which enables
1430*4882a593Smuzhiyun  * non-control requests.  then usb traffic follows until a
1431*4882a593Smuzhiyun  * disconnect is reported.  then a host may connect again, or
1432*4882a593Smuzhiyun  * the driver might get unbound.
1433*4882a593Smuzhiyun  */
net2272_start(struct usb_gadget * _gadget,struct usb_gadget_driver * driver)1434*4882a593Smuzhiyun static int net2272_start(struct usb_gadget *_gadget,
1435*4882a593Smuzhiyun 		struct usb_gadget_driver *driver)
1436*4882a593Smuzhiyun {
1437*4882a593Smuzhiyun 	struct net2272 *dev;
1438*4882a593Smuzhiyun 	unsigned i;
1439*4882a593Smuzhiyun 
1440*4882a593Smuzhiyun 	if (!driver || !driver->setup ||
1441*4882a593Smuzhiyun 	    driver->max_speed != USB_SPEED_HIGH)
1442*4882a593Smuzhiyun 		return -EINVAL;
1443*4882a593Smuzhiyun 
1444*4882a593Smuzhiyun 	dev = container_of(_gadget, struct net2272, gadget);
1445*4882a593Smuzhiyun 
1446*4882a593Smuzhiyun 	for (i = 0; i < 4; ++i)
1447*4882a593Smuzhiyun 		dev->ep[i].irqs = 0;
1448*4882a593Smuzhiyun 	/* hook up the driver ... */
1449*4882a593Smuzhiyun 	dev->softconnect = 1;
1450*4882a593Smuzhiyun 	driver->driver.bus = NULL;
1451*4882a593Smuzhiyun 	dev->driver = driver;
1452*4882a593Smuzhiyun 
1453*4882a593Smuzhiyun 	/* ... then enable host detection and ep0; and we're ready
1454*4882a593Smuzhiyun 	 * for set_configuration as well as eventual disconnect.
1455*4882a593Smuzhiyun 	 */
1456*4882a593Smuzhiyun 	net2272_ep0_start(dev);
1457*4882a593Smuzhiyun 
1458*4882a593Smuzhiyun 	return 0;
1459*4882a593Smuzhiyun }
1460*4882a593Smuzhiyun 
1461*4882a593Smuzhiyun static void
stop_activity(struct net2272 * dev,struct usb_gadget_driver * driver)1462*4882a593Smuzhiyun stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver)
1463*4882a593Smuzhiyun {
1464*4882a593Smuzhiyun 	int i;
1465*4882a593Smuzhiyun 
1466*4882a593Smuzhiyun 	/* don't disconnect if it's not connected */
1467*4882a593Smuzhiyun 	if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1468*4882a593Smuzhiyun 		driver = NULL;
1469*4882a593Smuzhiyun 
1470*4882a593Smuzhiyun 	/* stop hardware; prevent new request submissions;
1471*4882a593Smuzhiyun 	 * and kill any outstanding requests.
1472*4882a593Smuzhiyun 	 */
1473*4882a593Smuzhiyun 	net2272_usb_reset(dev);
1474*4882a593Smuzhiyun 	for (i = 0; i < 4; ++i)
1475*4882a593Smuzhiyun 		net2272_dequeue_all(&dev->ep[i]);
1476*4882a593Smuzhiyun 
1477*4882a593Smuzhiyun 	/* report disconnect; the driver is already quiesced */
1478*4882a593Smuzhiyun 	if (driver) {
1479*4882a593Smuzhiyun 		spin_unlock(&dev->lock);
1480*4882a593Smuzhiyun 		driver->disconnect(&dev->gadget);
1481*4882a593Smuzhiyun 		spin_lock(&dev->lock);
1482*4882a593Smuzhiyun 	}
1483*4882a593Smuzhiyun 
1484*4882a593Smuzhiyun 	net2272_usb_reinit(dev);
1485*4882a593Smuzhiyun }
1486*4882a593Smuzhiyun 
net2272_stop(struct usb_gadget * _gadget)1487*4882a593Smuzhiyun static int net2272_stop(struct usb_gadget *_gadget)
1488*4882a593Smuzhiyun {
1489*4882a593Smuzhiyun 	struct net2272 *dev;
1490*4882a593Smuzhiyun 	unsigned long flags;
1491*4882a593Smuzhiyun 
1492*4882a593Smuzhiyun 	dev = container_of(_gadget, struct net2272, gadget);
1493*4882a593Smuzhiyun 
1494*4882a593Smuzhiyun 	spin_lock_irqsave(&dev->lock, flags);
1495*4882a593Smuzhiyun 	stop_activity(dev, NULL);
1496*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dev->lock, flags);
1497*4882a593Smuzhiyun 
1498*4882a593Smuzhiyun 	dev->driver = NULL;
1499*4882a593Smuzhiyun 
1500*4882a593Smuzhiyun 	return 0;
1501*4882a593Smuzhiyun }
1502*4882a593Smuzhiyun 
1503*4882a593Smuzhiyun /*---------------------------------------------------------------------------*/
1504*4882a593Smuzhiyun /* handle ep-a/ep-b dma completions */
1505*4882a593Smuzhiyun static void
net2272_handle_dma(struct net2272_ep * ep)1506*4882a593Smuzhiyun net2272_handle_dma(struct net2272_ep *ep)
1507*4882a593Smuzhiyun {
1508*4882a593Smuzhiyun 	struct net2272_request *req;
1509*4882a593Smuzhiyun 	unsigned len;
1510*4882a593Smuzhiyun 	int status;
1511*4882a593Smuzhiyun 
1512*4882a593Smuzhiyun 	if (!list_empty(&ep->queue))
1513*4882a593Smuzhiyun 		req = list_entry(ep->queue.next,
1514*4882a593Smuzhiyun 				struct net2272_request, queue);
1515*4882a593Smuzhiyun 	else
1516*4882a593Smuzhiyun 		req = NULL;
1517*4882a593Smuzhiyun 
1518*4882a593Smuzhiyun 	dev_vdbg(ep->dev->dev, "handle_dma %s req %p\n", ep->ep.name, req);
1519*4882a593Smuzhiyun 
1520*4882a593Smuzhiyun 	/* Ensure DREQ is de-asserted */
1521*4882a593Smuzhiyun 	net2272_write(ep->dev, DMAREQ,
1522*4882a593Smuzhiyun 		(0 << DMA_BUFFER_VALID)
1523*4882a593Smuzhiyun 	      | (0 << DMA_REQUEST_ENABLE)
1524*4882a593Smuzhiyun 	      | (1 << DMA_CONTROL_DACK)
1525*4882a593Smuzhiyun 	      | (ep->dev->dma_eot_polarity << EOT_POLARITY)
1526*4882a593Smuzhiyun 	      | (ep->dev->dma_dack_polarity << DACK_POLARITY)
1527*4882a593Smuzhiyun 	      | (ep->dev->dma_dreq_polarity << DREQ_POLARITY)
1528*4882a593Smuzhiyun 	      | (ep->dma << DMA_ENDPOINT_SELECT));
1529*4882a593Smuzhiyun 
1530*4882a593Smuzhiyun 	ep->dev->dma_busy = 0;
1531*4882a593Smuzhiyun 
1532*4882a593Smuzhiyun 	net2272_ep_write(ep, EP_IRQENB,
1533*4882a593Smuzhiyun 		  (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
1534*4882a593Smuzhiyun 		| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
1535*4882a593Smuzhiyun 		| net2272_ep_read(ep, EP_IRQENB));
1536*4882a593Smuzhiyun 
1537*4882a593Smuzhiyun 	/* device-to-host transfer completed */
1538*4882a593Smuzhiyun 	if (ep->is_in) {
1539*4882a593Smuzhiyun 		/* validate a short packet or zlp if necessary */
1540*4882a593Smuzhiyun 		if ((req->req.length % ep->ep.maxpacket != 0) ||
1541*4882a593Smuzhiyun 				req->req.zero)
1542*4882a593Smuzhiyun 			set_fifo_bytecount(ep, 0);
1543*4882a593Smuzhiyun 
1544*4882a593Smuzhiyun 		net2272_done(ep, req, 0);
1545*4882a593Smuzhiyun 		if (!list_empty(&ep->queue)) {
1546*4882a593Smuzhiyun 			req = list_entry(ep->queue.next,
1547*4882a593Smuzhiyun 					struct net2272_request, queue);
1548*4882a593Smuzhiyun 			status = net2272_kick_dma(ep, req);
1549*4882a593Smuzhiyun 			if (status < 0)
1550*4882a593Smuzhiyun 				net2272_pio_advance(ep);
1551*4882a593Smuzhiyun 		}
1552*4882a593Smuzhiyun 
1553*4882a593Smuzhiyun 	/* host-to-device transfer completed */
1554*4882a593Smuzhiyun 	} else {
1555*4882a593Smuzhiyun 		/* terminated with a short packet? */
1556*4882a593Smuzhiyun 		if (net2272_read(ep->dev, IRQSTAT0) &
1557*4882a593Smuzhiyun 				(1 << DMA_DONE_INTERRUPT)) {
1558*4882a593Smuzhiyun 			/* abort system dma */
1559*4882a593Smuzhiyun 			net2272_cancel_dma(ep->dev);
1560*4882a593Smuzhiyun 		}
1561*4882a593Smuzhiyun 
1562*4882a593Smuzhiyun 		/* EP_TRANSFER will contain the number of bytes
1563*4882a593Smuzhiyun 		 * actually received.
1564*4882a593Smuzhiyun 		 * NOTE: There is no overflow detection on EP_TRANSFER:
1565*4882a593Smuzhiyun 		 * We can't deal with transfers larger than 2^24 bytes!
1566*4882a593Smuzhiyun 		 */
1567*4882a593Smuzhiyun 		len = (net2272_ep_read(ep, EP_TRANSFER2) << 16)
1568*4882a593Smuzhiyun 			| (net2272_ep_read(ep, EP_TRANSFER1) << 8)
1569*4882a593Smuzhiyun 			| (net2272_ep_read(ep, EP_TRANSFER0));
1570*4882a593Smuzhiyun 
1571*4882a593Smuzhiyun 		if (ep->not_empty)
1572*4882a593Smuzhiyun 			len += 4;
1573*4882a593Smuzhiyun 
1574*4882a593Smuzhiyun 		req->req.actual += len;
1575*4882a593Smuzhiyun 
1576*4882a593Smuzhiyun 		/* get any remaining data */
1577*4882a593Smuzhiyun 		net2272_pio_advance(ep);
1578*4882a593Smuzhiyun 	}
1579*4882a593Smuzhiyun }
1580*4882a593Smuzhiyun 
1581*4882a593Smuzhiyun /*---------------------------------------------------------------------------*/
1582*4882a593Smuzhiyun 
1583*4882a593Smuzhiyun static void
net2272_handle_ep(struct net2272_ep * ep)1584*4882a593Smuzhiyun net2272_handle_ep(struct net2272_ep *ep)
1585*4882a593Smuzhiyun {
1586*4882a593Smuzhiyun 	struct net2272_request *req;
1587*4882a593Smuzhiyun 	u8 stat0, stat1;
1588*4882a593Smuzhiyun 
1589*4882a593Smuzhiyun 	if (!list_empty(&ep->queue))
1590*4882a593Smuzhiyun 		req = list_entry(ep->queue.next,
1591*4882a593Smuzhiyun 			struct net2272_request, queue);
1592*4882a593Smuzhiyun 	else
1593*4882a593Smuzhiyun 		req = NULL;
1594*4882a593Smuzhiyun 
1595*4882a593Smuzhiyun 	/* ack all, and handle what we care about */
1596*4882a593Smuzhiyun 	stat0 = net2272_ep_read(ep, EP_STAT0);
1597*4882a593Smuzhiyun 	stat1 = net2272_ep_read(ep, EP_STAT1);
1598*4882a593Smuzhiyun 	ep->irqs++;
1599*4882a593Smuzhiyun 
1600*4882a593Smuzhiyun 	dev_vdbg(ep->dev->dev, "%s ack ep_stat0 %02x, ep_stat1 %02x, req %p\n",
1601*4882a593Smuzhiyun 		ep->ep.name, stat0, stat1, req ? &req->req : NULL);
1602*4882a593Smuzhiyun 
1603*4882a593Smuzhiyun 	net2272_ep_write(ep, EP_STAT0, stat0 &
1604*4882a593Smuzhiyun 		~((1 << NAK_OUT_PACKETS)
1605*4882a593Smuzhiyun 		| (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)));
1606*4882a593Smuzhiyun 	net2272_ep_write(ep, EP_STAT1, stat1);
1607*4882a593Smuzhiyun 
1608*4882a593Smuzhiyun 	/* data packet(s) received (in the fifo, OUT)
1609*4882a593Smuzhiyun 	 * direction must be validated, otherwise control read status phase
1610*4882a593Smuzhiyun 	 * could be interpreted as a valid packet
1611*4882a593Smuzhiyun 	 */
1612*4882a593Smuzhiyun 	if (!ep->is_in && (stat0 & (1 << DATA_PACKET_RECEIVED_INTERRUPT)))
1613*4882a593Smuzhiyun 		net2272_pio_advance(ep);
1614*4882a593Smuzhiyun 	/* data packet(s) transmitted (IN) */
1615*4882a593Smuzhiyun 	else if (stat0 & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))
1616*4882a593Smuzhiyun 		net2272_pio_advance(ep);
1617*4882a593Smuzhiyun }
1618*4882a593Smuzhiyun 
1619*4882a593Smuzhiyun static struct net2272_ep *
net2272_get_ep_by_addr(struct net2272 * dev,u16 wIndex)1620*4882a593Smuzhiyun net2272_get_ep_by_addr(struct net2272 *dev, u16 wIndex)
1621*4882a593Smuzhiyun {
1622*4882a593Smuzhiyun 	struct net2272_ep *ep;
1623*4882a593Smuzhiyun 
1624*4882a593Smuzhiyun 	if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
1625*4882a593Smuzhiyun 		return &dev->ep[0];
1626*4882a593Smuzhiyun 
1627*4882a593Smuzhiyun 	list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
1628*4882a593Smuzhiyun 		u8 bEndpointAddress;
1629*4882a593Smuzhiyun 
1630*4882a593Smuzhiyun 		if (!ep->desc)
1631*4882a593Smuzhiyun 			continue;
1632*4882a593Smuzhiyun 		bEndpointAddress = ep->desc->bEndpointAddress;
1633*4882a593Smuzhiyun 		if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
1634*4882a593Smuzhiyun 			continue;
1635*4882a593Smuzhiyun 		if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
1636*4882a593Smuzhiyun 			return ep;
1637*4882a593Smuzhiyun 	}
1638*4882a593Smuzhiyun 	return NULL;
1639*4882a593Smuzhiyun }
1640*4882a593Smuzhiyun 
1641*4882a593Smuzhiyun /*
1642*4882a593Smuzhiyun  * USB Test Packet:
1643*4882a593Smuzhiyun  * JKJKJKJK * 9
1644*4882a593Smuzhiyun  * JJKKJJKK * 8
1645*4882a593Smuzhiyun  * JJJJKKKK * 8
1646*4882a593Smuzhiyun  * JJJJJJJKKKKKKK * 8
1647*4882a593Smuzhiyun  * JJJJJJJK * 8
1648*4882a593Smuzhiyun  * {JKKKKKKK * 10}, JK
1649*4882a593Smuzhiyun  */
1650*4882a593Smuzhiyun static const u8 net2272_test_packet[] = {
1651*4882a593Smuzhiyun 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1652*4882a593Smuzhiyun 	0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
1653*4882a593Smuzhiyun 	0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
1654*4882a593Smuzhiyun 	0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1655*4882a593Smuzhiyun 	0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
1656*4882a593Smuzhiyun 	0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFD, 0x7E
1657*4882a593Smuzhiyun };
1658*4882a593Smuzhiyun 
1659*4882a593Smuzhiyun static void
net2272_set_test_mode(struct net2272 * dev,int mode)1660*4882a593Smuzhiyun net2272_set_test_mode(struct net2272 *dev, int mode)
1661*4882a593Smuzhiyun {
1662*4882a593Smuzhiyun 	int i;
1663*4882a593Smuzhiyun 
1664*4882a593Smuzhiyun 	/* Disable all net2272 interrupts:
1665*4882a593Smuzhiyun 	 * Nothing but a power cycle should stop the test.
1666*4882a593Smuzhiyun 	 */
1667*4882a593Smuzhiyun 	net2272_write(dev, IRQENB0, 0x00);
1668*4882a593Smuzhiyun 	net2272_write(dev, IRQENB1, 0x00);
1669*4882a593Smuzhiyun 
1670*4882a593Smuzhiyun 	/* Force tranceiver to high-speed */
1671*4882a593Smuzhiyun 	net2272_write(dev, XCVRDIAG, 1 << FORCE_HIGH_SPEED);
1672*4882a593Smuzhiyun 
1673*4882a593Smuzhiyun 	net2272_write(dev, PAGESEL, 0);
1674*4882a593Smuzhiyun 	net2272_write(dev, EP_STAT0, 1 << DATA_PACKET_TRANSMITTED_INTERRUPT);
1675*4882a593Smuzhiyun 	net2272_write(dev, EP_RSPCLR,
1676*4882a593Smuzhiyun 			  (1 << CONTROL_STATUS_PHASE_HANDSHAKE)
1677*4882a593Smuzhiyun 			| (1 << HIDE_STATUS_PHASE));
1678*4882a593Smuzhiyun 	net2272_write(dev, EP_CFG, 1 << ENDPOINT_DIRECTION);
1679*4882a593Smuzhiyun 	net2272_write(dev, EP_STAT1, 1 << BUFFER_FLUSH);
1680*4882a593Smuzhiyun 
1681*4882a593Smuzhiyun 	/* wait for status phase to complete */
1682*4882a593Smuzhiyun 	while (!(net2272_read(dev, EP_STAT0) &
1683*4882a593Smuzhiyun 				(1 << DATA_PACKET_TRANSMITTED_INTERRUPT)))
1684*4882a593Smuzhiyun 		;
1685*4882a593Smuzhiyun 
1686*4882a593Smuzhiyun 	/* Enable test mode */
1687*4882a593Smuzhiyun 	net2272_write(dev, USBTEST, mode);
1688*4882a593Smuzhiyun 
1689*4882a593Smuzhiyun 	/* load test packet */
1690*4882a593Smuzhiyun 	if (mode == USB_TEST_PACKET) {
1691*4882a593Smuzhiyun 		/* switch to 8 bit mode */
1692*4882a593Smuzhiyun 		net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) &
1693*4882a593Smuzhiyun 				~(1 << DATA_WIDTH));
1694*4882a593Smuzhiyun 
1695*4882a593Smuzhiyun 		for (i = 0; i < sizeof(net2272_test_packet); ++i)
1696*4882a593Smuzhiyun 			net2272_write(dev, EP_DATA, net2272_test_packet[i]);
1697*4882a593Smuzhiyun 
1698*4882a593Smuzhiyun 		/* Validate test packet */
1699*4882a593Smuzhiyun 		net2272_write(dev, EP_TRANSFER0, 0);
1700*4882a593Smuzhiyun 	}
1701*4882a593Smuzhiyun }
1702*4882a593Smuzhiyun 
1703*4882a593Smuzhiyun static void
net2272_handle_stat0_irqs(struct net2272 * dev,u8 stat)1704*4882a593Smuzhiyun net2272_handle_stat0_irqs(struct net2272 *dev, u8 stat)
1705*4882a593Smuzhiyun {
1706*4882a593Smuzhiyun 	struct net2272_ep *ep;
1707*4882a593Smuzhiyun 	u8 num, scratch;
1708*4882a593Smuzhiyun 
1709*4882a593Smuzhiyun 	/* starting a control request? */
1710*4882a593Smuzhiyun 	if (unlikely(stat & (1 << SETUP_PACKET_INTERRUPT))) {
1711*4882a593Smuzhiyun 		union {
1712*4882a593Smuzhiyun 			u8 raw[8];
1713*4882a593Smuzhiyun 			struct usb_ctrlrequest	r;
1714*4882a593Smuzhiyun 		} u;
1715*4882a593Smuzhiyun 		int tmp = 0;
1716*4882a593Smuzhiyun 		struct net2272_request *req;
1717*4882a593Smuzhiyun 
1718*4882a593Smuzhiyun 		if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
1719*4882a593Smuzhiyun 			if (net2272_read(dev, USBCTL1) & (1 << USB_HIGH_SPEED))
1720*4882a593Smuzhiyun 				dev->gadget.speed = USB_SPEED_HIGH;
1721*4882a593Smuzhiyun 			else
1722*4882a593Smuzhiyun 				dev->gadget.speed = USB_SPEED_FULL;
1723*4882a593Smuzhiyun 			dev_dbg(dev->dev, "%s\n",
1724*4882a593Smuzhiyun 				usb_speed_string(dev->gadget.speed));
1725*4882a593Smuzhiyun 		}
1726*4882a593Smuzhiyun 
1727*4882a593Smuzhiyun 		ep = &dev->ep[0];
1728*4882a593Smuzhiyun 		ep->irqs++;
1729*4882a593Smuzhiyun 
1730*4882a593Smuzhiyun 		/* make sure any leftover interrupt state is cleared */
1731*4882a593Smuzhiyun 		stat &= ~(1 << ENDPOINT_0_INTERRUPT);
1732*4882a593Smuzhiyun 		while (!list_empty(&ep->queue)) {
1733*4882a593Smuzhiyun 			req = list_entry(ep->queue.next,
1734*4882a593Smuzhiyun 				struct net2272_request, queue);
1735*4882a593Smuzhiyun 			net2272_done(ep, req,
1736*4882a593Smuzhiyun 				(req->req.actual == req->req.length) ? 0 : -EPROTO);
1737*4882a593Smuzhiyun 		}
1738*4882a593Smuzhiyun 		ep->stopped = 0;
1739*4882a593Smuzhiyun 		dev->protocol_stall = 0;
1740*4882a593Smuzhiyun 		net2272_ep_write(ep, EP_STAT0,
1741*4882a593Smuzhiyun 			    (1 << DATA_IN_TOKEN_INTERRUPT)
1742*4882a593Smuzhiyun 			  | (1 << DATA_OUT_TOKEN_INTERRUPT)
1743*4882a593Smuzhiyun 			  | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
1744*4882a593Smuzhiyun 			  | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
1745*4882a593Smuzhiyun 			  | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
1746*4882a593Smuzhiyun 		net2272_ep_write(ep, EP_STAT1,
1747*4882a593Smuzhiyun 			    (1 << TIMEOUT)
1748*4882a593Smuzhiyun 			  | (1 << USB_OUT_ACK_SENT)
1749*4882a593Smuzhiyun 			  | (1 << USB_OUT_NAK_SENT)
1750*4882a593Smuzhiyun 			  | (1 << USB_IN_ACK_RCVD)
1751*4882a593Smuzhiyun 			  | (1 << USB_IN_NAK_SENT)
1752*4882a593Smuzhiyun 			  | (1 << USB_STALL_SENT)
1753*4882a593Smuzhiyun 			  | (1 << LOCAL_OUT_ZLP));
1754*4882a593Smuzhiyun 
1755*4882a593Smuzhiyun 		/*
1756*4882a593Smuzhiyun 		 * Ensure Control Read pre-validation setting is beyond maximum size
1757*4882a593Smuzhiyun 		 *  - Control Writes can leave non-zero values in EP_TRANSFER. If
1758*4882a593Smuzhiyun 		 *    an EP0 transfer following the Control Write is a Control Read,
1759*4882a593Smuzhiyun 		 *    the NET2272 sees the non-zero EP_TRANSFER as an unexpected
1760*4882a593Smuzhiyun 		 *    pre-validation count.
1761*4882a593Smuzhiyun 		 *  - Setting EP_TRANSFER beyond the maximum EP0 transfer size ensures
1762*4882a593Smuzhiyun 		 *    the pre-validation count cannot cause an unexpected validatation
1763*4882a593Smuzhiyun 		 */
1764*4882a593Smuzhiyun 		net2272_write(dev, PAGESEL, 0);
1765*4882a593Smuzhiyun 		net2272_write(dev, EP_TRANSFER2, 0xff);
1766*4882a593Smuzhiyun 		net2272_write(dev, EP_TRANSFER1, 0xff);
1767*4882a593Smuzhiyun 		net2272_write(dev, EP_TRANSFER0, 0xff);
1768*4882a593Smuzhiyun 
1769*4882a593Smuzhiyun 		u.raw[0] = net2272_read(dev, SETUP0);
1770*4882a593Smuzhiyun 		u.raw[1] = net2272_read(dev, SETUP1);
1771*4882a593Smuzhiyun 		u.raw[2] = net2272_read(dev, SETUP2);
1772*4882a593Smuzhiyun 		u.raw[3] = net2272_read(dev, SETUP3);
1773*4882a593Smuzhiyun 		u.raw[4] = net2272_read(dev, SETUP4);
1774*4882a593Smuzhiyun 		u.raw[5] = net2272_read(dev, SETUP5);
1775*4882a593Smuzhiyun 		u.raw[6] = net2272_read(dev, SETUP6);
1776*4882a593Smuzhiyun 		u.raw[7] = net2272_read(dev, SETUP7);
1777*4882a593Smuzhiyun 		/*
1778*4882a593Smuzhiyun 		 * If you have a big endian cpu make sure le16_to_cpus
1779*4882a593Smuzhiyun 		 * performs the proper byte swapping here...
1780*4882a593Smuzhiyun 		 */
1781*4882a593Smuzhiyun 		le16_to_cpus(&u.r.wValue);
1782*4882a593Smuzhiyun 		le16_to_cpus(&u.r.wIndex);
1783*4882a593Smuzhiyun 		le16_to_cpus(&u.r.wLength);
1784*4882a593Smuzhiyun 
1785*4882a593Smuzhiyun 		/* ack the irq */
1786*4882a593Smuzhiyun 		net2272_write(dev, IRQSTAT0, 1 << SETUP_PACKET_INTERRUPT);
1787*4882a593Smuzhiyun 		stat ^= (1 << SETUP_PACKET_INTERRUPT);
1788*4882a593Smuzhiyun 
1789*4882a593Smuzhiyun 		/* watch control traffic at the token level, and force
1790*4882a593Smuzhiyun 		 * synchronization before letting the status phase happen.
1791*4882a593Smuzhiyun 		 */
1792*4882a593Smuzhiyun 		ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
1793*4882a593Smuzhiyun 		if (ep->is_in) {
1794*4882a593Smuzhiyun 			scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
1795*4882a593Smuzhiyun 				| (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
1796*4882a593Smuzhiyun 				| (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
1797*4882a593Smuzhiyun 			stop_out_naking(ep);
1798*4882a593Smuzhiyun 		} else
1799*4882a593Smuzhiyun 			scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
1800*4882a593Smuzhiyun 				| (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
1801*4882a593Smuzhiyun 				| (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
1802*4882a593Smuzhiyun 		net2272_ep_write(ep, EP_IRQENB, scratch);
1803*4882a593Smuzhiyun 
1804*4882a593Smuzhiyun 		if ((u.r.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD)
1805*4882a593Smuzhiyun 			goto delegate;
1806*4882a593Smuzhiyun 		switch (u.r.bRequest) {
1807*4882a593Smuzhiyun 		case USB_REQ_GET_STATUS: {
1808*4882a593Smuzhiyun 			struct net2272_ep *e;
1809*4882a593Smuzhiyun 			u16 status = 0;
1810*4882a593Smuzhiyun 
1811*4882a593Smuzhiyun 			switch (u.r.bRequestType & USB_RECIP_MASK) {
1812*4882a593Smuzhiyun 			case USB_RECIP_ENDPOINT:
1813*4882a593Smuzhiyun 				e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1814*4882a593Smuzhiyun 				if (!e || u.r.wLength > 2)
1815*4882a593Smuzhiyun 					goto do_stall;
1816*4882a593Smuzhiyun 				if (net2272_ep_read(e, EP_RSPSET) & (1 << ENDPOINT_HALT))
1817*4882a593Smuzhiyun 					status = cpu_to_le16(1);
1818*4882a593Smuzhiyun 				else
1819*4882a593Smuzhiyun 					status = cpu_to_le16(0);
1820*4882a593Smuzhiyun 
1821*4882a593Smuzhiyun 				/* don't bother with a request object! */
1822*4882a593Smuzhiyun 				net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1823*4882a593Smuzhiyun 				writew(status, net2272_reg_addr(dev, EP_DATA));
1824*4882a593Smuzhiyun 				set_fifo_bytecount(&dev->ep[0], 0);
1825*4882a593Smuzhiyun 				allow_status(ep);
1826*4882a593Smuzhiyun 				dev_vdbg(dev->dev, "%s stat %02x\n",
1827*4882a593Smuzhiyun 					ep->ep.name, status);
1828*4882a593Smuzhiyun 				goto next_endpoints;
1829*4882a593Smuzhiyun 			case USB_RECIP_DEVICE:
1830*4882a593Smuzhiyun 				if (u.r.wLength > 2)
1831*4882a593Smuzhiyun 					goto do_stall;
1832*4882a593Smuzhiyun 				if (dev->gadget.is_selfpowered)
1833*4882a593Smuzhiyun 					status = (1 << USB_DEVICE_SELF_POWERED);
1834*4882a593Smuzhiyun 
1835*4882a593Smuzhiyun 				/* don't bother with a request object! */
1836*4882a593Smuzhiyun 				net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1837*4882a593Smuzhiyun 				writew(status, net2272_reg_addr(dev, EP_DATA));
1838*4882a593Smuzhiyun 				set_fifo_bytecount(&dev->ep[0], 0);
1839*4882a593Smuzhiyun 				allow_status(ep);
1840*4882a593Smuzhiyun 				dev_vdbg(dev->dev, "device stat %02x\n", status);
1841*4882a593Smuzhiyun 				goto next_endpoints;
1842*4882a593Smuzhiyun 			case USB_RECIP_INTERFACE:
1843*4882a593Smuzhiyun 				if (u.r.wLength > 2)
1844*4882a593Smuzhiyun 					goto do_stall;
1845*4882a593Smuzhiyun 
1846*4882a593Smuzhiyun 				/* don't bother with a request object! */
1847*4882a593Smuzhiyun 				net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1848*4882a593Smuzhiyun 				writew(status, net2272_reg_addr(dev, EP_DATA));
1849*4882a593Smuzhiyun 				set_fifo_bytecount(&dev->ep[0], 0);
1850*4882a593Smuzhiyun 				allow_status(ep);
1851*4882a593Smuzhiyun 				dev_vdbg(dev->dev, "interface status %02x\n", status);
1852*4882a593Smuzhiyun 				goto next_endpoints;
1853*4882a593Smuzhiyun 			}
1854*4882a593Smuzhiyun 
1855*4882a593Smuzhiyun 			break;
1856*4882a593Smuzhiyun 		}
1857*4882a593Smuzhiyun 		case USB_REQ_CLEAR_FEATURE: {
1858*4882a593Smuzhiyun 			struct net2272_ep *e;
1859*4882a593Smuzhiyun 
1860*4882a593Smuzhiyun 			if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1861*4882a593Smuzhiyun 				goto delegate;
1862*4882a593Smuzhiyun 			if (u.r.wValue != USB_ENDPOINT_HALT ||
1863*4882a593Smuzhiyun 			    u.r.wLength != 0)
1864*4882a593Smuzhiyun 				goto do_stall;
1865*4882a593Smuzhiyun 			e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1866*4882a593Smuzhiyun 			if (!e)
1867*4882a593Smuzhiyun 				goto do_stall;
1868*4882a593Smuzhiyun 			if (e->wedged) {
1869*4882a593Smuzhiyun 				dev_vdbg(dev->dev, "%s wedged, halt not cleared\n",
1870*4882a593Smuzhiyun 					ep->ep.name);
1871*4882a593Smuzhiyun 			} else {
1872*4882a593Smuzhiyun 				dev_vdbg(dev->dev, "%s clear halt\n", ep->ep.name);
1873*4882a593Smuzhiyun 				clear_halt(e);
1874*4882a593Smuzhiyun 			}
1875*4882a593Smuzhiyun 			allow_status(ep);
1876*4882a593Smuzhiyun 			goto next_endpoints;
1877*4882a593Smuzhiyun 		}
1878*4882a593Smuzhiyun 		case USB_REQ_SET_FEATURE: {
1879*4882a593Smuzhiyun 			struct net2272_ep *e;
1880*4882a593Smuzhiyun 
1881*4882a593Smuzhiyun 			if (u.r.bRequestType == USB_RECIP_DEVICE) {
1882*4882a593Smuzhiyun 				if (u.r.wIndex != NORMAL_OPERATION)
1883*4882a593Smuzhiyun 					net2272_set_test_mode(dev, (u.r.wIndex >> 8));
1884*4882a593Smuzhiyun 				allow_status(ep);
1885*4882a593Smuzhiyun 				dev_vdbg(dev->dev, "test mode: %d\n", u.r.wIndex);
1886*4882a593Smuzhiyun 				goto next_endpoints;
1887*4882a593Smuzhiyun 			} else if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1888*4882a593Smuzhiyun 				goto delegate;
1889*4882a593Smuzhiyun 			if (u.r.wValue != USB_ENDPOINT_HALT ||
1890*4882a593Smuzhiyun 			    u.r.wLength != 0)
1891*4882a593Smuzhiyun 				goto do_stall;
1892*4882a593Smuzhiyun 			e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1893*4882a593Smuzhiyun 			if (!e)
1894*4882a593Smuzhiyun 				goto do_stall;
1895*4882a593Smuzhiyun 			set_halt(e);
1896*4882a593Smuzhiyun 			allow_status(ep);
1897*4882a593Smuzhiyun 			dev_vdbg(dev->dev, "%s set halt\n", ep->ep.name);
1898*4882a593Smuzhiyun 			goto next_endpoints;
1899*4882a593Smuzhiyun 		}
1900*4882a593Smuzhiyun 		case USB_REQ_SET_ADDRESS: {
1901*4882a593Smuzhiyun 			net2272_write(dev, OURADDR, u.r.wValue & 0xff);
1902*4882a593Smuzhiyun 			allow_status(ep);
1903*4882a593Smuzhiyun 			break;
1904*4882a593Smuzhiyun 		}
1905*4882a593Smuzhiyun 		default:
1906*4882a593Smuzhiyun  delegate:
1907*4882a593Smuzhiyun 			dev_vdbg(dev->dev, "setup %02x.%02x v%04x i%04x "
1908*4882a593Smuzhiyun 				"ep_cfg %08x\n",
1909*4882a593Smuzhiyun 				u.r.bRequestType, u.r.bRequest,
1910*4882a593Smuzhiyun 				u.r.wValue, u.r.wIndex,
1911*4882a593Smuzhiyun 				net2272_ep_read(ep, EP_CFG));
1912*4882a593Smuzhiyun 			spin_unlock(&dev->lock);
1913*4882a593Smuzhiyun 			tmp = dev->driver->setup(&dev->gadget, &u.r);
1914*4882a593Smuzhiyun 			spin_lock(&dev->lock);
1915*4882a593Smuzhiyun 		}
1916*4882a593Smuzhiyun 
1917*4882a593Smuzhiyun 		/* stall ep0 on error */
1918*4882a593Smuzhiyun 		if (tmp < 0) {
1919*4882a593Smuzhiyun  do_stall:
1920*4882a593Smuzhiyun 			dev_vdbg(dev->dev, "req %02x.%02x protocol STALL; stat %d\n",
1921*4882a593Smuzhiyun 				u.r.bRequestType, u.r.bRequest, tmp);
1922*4882a593Smuzhiyun 			dev->protocol_stall = 1;
1923*4882a593Smuzhiyun 		}
1924*4882a593Smuzhiyun 	/* endpoint dma irq? */
1925*4882a593Smuzhiyun 	} else if (stat & (1 << DMA_DONE_INTERRUPT)) {
1926*4882a593Smuzhiyun 		net2272_cancel_dma(dev);
1927*4882a593Smuzhiyun 		net2272_write(dev, IRQSTAT0, 1 << DMA_DONE_INTERRUPT);
1928*4882a593Smuzhiyun 		stat &= ~(1 << DMA_DONE_INTERRUPT);
1929*4882a593Smuzhiyun 		num = (net2272_read(dev, DMAREQ) & (1 << DMA_ENDPOINT_SELECT))
1930*4882a593Smuzhiyun 			? 2 : 1;
1931*4882a593Smuzhiyun 
1932*4882a593Smuzhiyun 		ep = &dev->ep[num];
1933*4882a593Smuzhiyun 		net2272_handle_dma(ep);
1934*4882a593Smuzhiyun 	}
1935*4882a593Smuzhiyun 
1936*4882a593Smuzhiyun  next_endpoints:
1937*4882a593Smuzhiyun 	/* endpoint data irq? */
1938*4882a593Smuzhiyun 	scratch = stat & 0x0f;
1939*4882a593Smuzhiyun 	stat &= ~0x0f;
1940*4882a593Smuzhiyun 	for (num = 0; scratch; num++) {
1941*4882a593Smuzhiyun 		u8 t;
1942*4882a593Smuzhiyun 
1943*4882a593Smuzhiyun 		/* does this endpoint's FIFO and queue need tending? */
1944*4882a593Smuzhiyun 		t = 1 << num;
1945*4882a593Smuzhiyun 		if ((scratch & t) == 0)
1946*4882a593Smuzhiyun 			continue;
1947*4882a593Smuzhiyun 		scratch ^= t;
1948*4882a593Smuzhiyun 
1949*4882a593Smuzhiyun 		ep = &dev->ep[num];
1950*4882a593Smuzhiyun 		net2272_handle_ep(ep);
1951*4882a593Smuzhiyun 	}
1952*4882a593Smuzhiyun 
1953*4882a593Smuzhiyun 	/* some interrupts we can just ignore */
1954*4882a593Smuzhiyun 	stat &= ~(1 << SOF_INTERRUPT);
1955*4882a593Smuzhiyun 
1956*4882a593Smuzhiyun 	if (stat)
1957*4882a593Smuzhiyun 		dev_dbg(dev->dev, "unhandled irqstat0 %02x\n", stat);
1958*4882a593Smuzhiyun }
1959*4882a593Smuzhiyun 
1960*4882a593Smuzhiyun static void
net2272_handle_stat1_irqs(struct net2272 * dev,u8 stat)1961*4882a593Smuzhiyun net2272_handle_stat1_irqs(struct net2272 *dev, u8 stat)
1962*4882a593Smuzhiyun {
1963*4882a593Smuzhiyun 	u8 tmp, mask;
1964*4882a593Smuzhiyun 
1965*4882a593Smuzhiyun 	/* after disconnect there's nothing else to do! */
1966*4882a593Smuzhiyun 	tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT);
1967*4882a593Smuzhiyun 	mask = (1 << USB_HIGH_SPEED) | (1 << USB_FULL_SPEED);
1968*4882a593Smuzhiyun 
1969*4882a593Smuzhiyun 	if (stat & tmp) {
1970*4882a593Smuzhiyun 		bool	reset = false;
1971*4882a593Smuzhiyun 		bool	disconnect = false;
1972*4882a593Smuzhiyun 
1973*4882a593Smuzhiyun 		/*
1974*4882a593Smuzhiyun 		 * Ignore disconnects and resets if the speed hasn't been set.
1975*4882a593Smuzhiyun 		 * VBUS can bounce and there's always an initial reset.
1976*4882a593Smuzhiyun 		 */
1977*4882a593Smuzhiyun 		net2272_write(dev, IRQSTAT1, tmp);
1978*4882a593Smuzhiyun 		if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
1979*4882a593Smuzhiyun 			if ((stat & (1 << VBUS_INTERRUPT)) &&
1980*4882a593Smuzhiyun 					(net2272_read(dev, USBCTL1) &
1981*4882a593Smuzhiyun 						(1 << VBUS_PIN)) == 0) {
1982*4882a593Smuzhiyun 				disconnect = true;
1983*4882a593Smuzhiyun 				dev_dbg(dev->dev, "disconnect %s\n",
1984*4882a593Smuzhiyun 					dev->driver->driver.name);
1985*4882a593Smuzhiyun 			} else if ((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) &&
1986*4882a593Smuzhiyun 					(net2272_read(dev, USBCTL1) & mask)
1987*4882a593Smuzhiyun 						== 0) {
1988*4882a593Smuzhiyun 				reset = true;
1989*4882a593Smuzhiyun 				dev_dbg(dev->dev, "reset %s\n",
1990*4882a593Smuzhiyun 					dev->driver->driver.name);
1991*4882a593Smuzhiyun 			}
1992*4882a593Smuzhiyun 
1993*4882a593Smuzhiyun 			if (disconnect || reset) {
1994*4882a593Smuzhiyun 				stop_activity(dev, dev->driver);
1995*4882a593Smuzhiyun 				net2272_ep0_start(dev);
1996*4882a593Smuzhiyun 				spin_unlock(&dev->lock);
1997*4882a593Smuzhiyun 				if (reset)
1998*4882a593Smuzhiyun 					usb_gadget_udc_reset
1999*4882a593Smuzhiyun 						(&dev->gadget, dev->driver);
2000*4882a593Smuzhiyun 				else
2001*4882a593Smuzhiyun 					(dev->driver->disconnect)
2002*4882a593Smuzhiyun 						(&dev->gadget);
2003*4882a593Smuzhiyun 				spin_lock(&dev->lock);
2004*4882a593Smuzhiyun 				return;
2005*4882a593Smuzhiyun 			}
2006*4882a593Smuzhiyun 		}
2007*4882a593Smuzhiyun 		stat &= ~tmp;
2008*4882a593Smuzhiyun 
2009*4882a593Smuzhiyun 		if (!stat)
2010*4882a593Smuzhiyun 			return;
2011*4882a593Smuzhiyun 	}
2012*4882a593Smuzhiyun 
2013*4882a593Smuzhiyun 	tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT);
2014*4882a593Smuzhiyun 	if (stat & tmp) {
2015*4882a593Smuzhiyun 		net2272_write(dev, IRQSTAT1, tmp);
2016*4882a593Smuzhiyun 		if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) {
2017*4882a593Smuzhiyun 			if (dev->driver->suspend)
2018*4882a593Smuzhiyun 				dev->driver->suspend(&dev->gadget);
2019*4882a593Smuzhiyun 			if (!enable_suspend) {
2020*4882a593Smuzhiyun 				stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT);
2021*4882a593Smuzhiyun 				dev_dbg(dev->dev, "Suspend disabled, ignoring\n");
2022*4882a593Smuzhiyun 			}
2023*4882a593Smuzhiyun 		} else {
2024*4882a593Smuzhiyun 			if (dev->driver->resume)
2025*4882a593Smuzhiyun 				dev->driver->resume(&dev->gadget);
2026*4882a593Smuzhiyun 		}
2027*4882a593Smuzhiyun 		stat &= ~tmp;
2028*4882a593Smuzhiyun 	}
2029*4882a593Smuzhiyun 
2030*4882a593Smuzhiyun 	/* clear any other status/irqs */
2031*4882a593Smuzhiyun 	if (stat)
2032*4882a593Smuzhiyun 		net2272_write(dev, IRQSTAT1, stat);
2033*4882a593Smuzhiyun 
2034*4882a593Smuzhiyun 	/* some status we can just ignore */
2035*4882a593Smuzhiyun 	stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
2036*4882a593Smuzhiyun 			| (1 << SUSPEND_REQUEST_INTERRUPT)
2037*4882a593Smuzhiyun 			| (1 << RESUME_INTERRUPT));
2038*4882a593Smuzhiyun 	if (!stat)
2039*4882a593Smuzhiyun 		return;
2040*4882a593Smuzhiyun 	else
2041*4882a593Smuzhiyun 		dev_dbg(dev->dev, "unhandled irqstat1 %02x\n", stat);
2042*4882a593Smuzhiyun }
2043*4882a593Smuzhiyun 
net2272_irq(int irq,void * _dev)2044*4882a593Smuzhiyun static irqreturn_t net2272_irq(int irq, void *_dev)
2045*4882a593Smuzhiyun {
2046*4882a593Smuzhiyun 	struct net2272 *dev = _dev;
2047*4882a593Smuzhiyun #if defined(PLX_PCI_RDK) || defined(PLX_PCI_RDK2)
2048*4882a593Smuzhiyun 	u32 intcsr;
2049*4882a593Smuzhiyun #endif
2050*4882a593Smuzhiyun #if defined(PLX_PCI_RDK)
2051*4882a593Smuzhiyun 	u8 dmareq;
2052*4882a593Smuzhiyun #endif
2053*4882a593Smuzhiyun 	spin_lock(&dev->lock);
2054*4882a593Smuzhiyun #if defined(PLX_PCI_RDK)
2055*4882a593Smuzhiyun 	intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
2056*4882a593Smuzhiyun 
2057*4882a593Smuzhiyun 	if ((intcsr & LOCAL_INTERRUPT_TEST) == LOCAL_INTERRUPT_TEST) {
2058*4882a593Smuzhiyun 		writel(intcsr & ~(1 << PCI_INTERRUPT_ENABLE),
2059*4882a593Smuzhiyun 				dev->rdk1.plx9054_base_addr + INTCSR);
2060*4882a593Smuzhiyun 		net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
2061*4882a593Smuzhiyun 		net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
2062*4882a593Smuzhiyun 		intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
2063*4882a593Smuzhiyun 		writel(intcsr | (1 << PCI_INTERRUPT_ENABLE),
2064*4882a593Smuzhiyun 			dev->rdk1.plx9054_base_addr + INTCSR);
2065*4882a593Smuzhiyun 	}
2066*4882a593Smuzhiyun 	if ((intcsr & DMA_CHANNEL_0_TEST) == DMA_CHANNEL_0_TEST) {
2067*4882a593Smuzhiyun 		writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
2068*4882a593Smuzhiyun 				dev->rdk1.plx9054_base_addr + DMACSR0);
2069*4882a593Smuzhiyun 
2070*4882a593Smuzhiyun 		dmareq = net2272_read(dev, DMAREQ);
2071*4882a593Smuzhiyun 		if (dmareq & 0x01)
2072*4882a593Smuzhiyun 			net2272_handle_dma(&dev->ep[2]);
2073*4882a593Smuzhiyun 		else
2074*4882a593Smuzhiyun 			net2272_handle_dma(&dev->ep[1]);
2075*4882a593Smuzhiyun 	}
2076*4882a593Smuzhiyun #endif
2077*4882a593Smuzhiyun #if defined(PLX_PCI_RDK2)
2078*4882a593Smuzhiyun 	/* see if PCI int for us by checking irqstat */
2079*4882a593Smuzhiyun 	intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
2080*4882a593Smuzhiyun 	if (!(intcsr & (1 << NET2272_PCI_IRQ))) {
2081*4882a593Smuzhiyun 		spin_unlock(&dev->lock);
2082*4882a593Smuzhiyun 		return IRQ_NONE;
2083*4882a593Smuzhiyun 	}
2084*4882a593Smuzhiyun 	/* check dma interrupts */
2085*4882a593Smuzhiyun #endif
2086*4882a593Smuzhiyun 	/* Platform/devcice interrupt handler */
2087*4882a593Smuzhiyun #if !defined(PLX_PCI_RDK)
2088*4882a593Smuzhiyun 	net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
2089*4882a593Smuzhiyun 	net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
2090*4882a593Smuzhiyun #endif
2091*4882a593Smuzhiyun 	spin_unlock(&dev->lock);
2092*4882a593Smuzhiyun 
2093*4882a593Smuzhiyun 	return IRQ_HANDLED;
2094*4882a593Smuzhiyun }
2095*4882a593Smuzhiyun 
net2272_present(struct net2272 * dev)2096*4882a593Smuzhiyun static int net2272_present(struct net2272 *dev)
2097*4882a593Smuzhiyun {
2098*4882a593Smuzhiyun 	/*
2099*4882a593Smuzhiyun 	 * Quick test to see if CPU can communicate properly with the NET2272.
2100*4882a593Smuzhiyun 	 * Verifies connection using writes and reads to write/read and
2101*4882a593Smuzhiyun 	 * read-only registers.
2102*4882a593Smuzhiyun 	 *
2103*4882a593Smuzhiyun 	 * This routine is strongly recommended especially during early bring-up
2104*4882a593Smuzhiyun 	 * of new hardware, however for designs that do not apply Power On System
2105*4882a593Smuzhiyun 	 * Tests (POST) it may discarded (or perhaps minimized).
2106*4882a593Smuzhiyun 	 */
2107*4882a593Smuzhiyun 	unsigned int ii;
2108*4882a593Smuzhiyun 	u8 val, refval;
2109*4882a593Smuzhiyun 
2110*4882a593Smuzhiyun 	/* Verify NET2272 write/read SCRATCH register can write and read */
2111*4882a593Smuzhiyun 	refval = net2272_read(dev, SCRATCH);
2112*4882a593Smuzhiyun 	for (ii = 0; ii < 0x100; ii += 7) {
2113*4882a593Smuzhiyun 		net2272_write(dev, SCRATCH, ii);
2114*4882a593Smuzhiyun 		val = net2272_read(dev, SCRATCH);
2115*4882a593Smuzhiyun 		if (val != ii) {
2116*4882a593Smuzhiyun 			dev_dbg(dev->dev,
2117*4882a593Smuzhiyun 				"%s: write/read SCRATCH register test failed: "
2118*4882a593Smuzhiyun 				"wrote:0x%2.2x, read:0x%2.2x\n",
2119*4882a593Smuzhiyun 				__func__, ii, val);
2120*4882a593Smuzhiyun 			return -EINVAL;
2121*4882a593Smuzhiyun 		}
2122*4882a593Smuzhiyun 	}
2123*4882a593Smuzhiyun 	/* To be nice, we write the original SCRATCH value back: */
2124*4882a593Smuzhiyun 	net2272_write(dev, SCRATCH, refval);
2125*4882a593Smuzhiyun 
2126*4882a593Smuzhiyun 	/* Verify NET2272 CHIPREV register is read-only: */
2127*4882a593Smuzhiyun 	refval = net2272_read(dev, CHIPREV_2272);
2128*4882a593Smuzhiyun 	for (ii = 0; ii < 0x100; ii += 7) {
2129*4882a593Smuzhiyun 		net2272_write(dev, CHIPREV_2272, ii);
2130*4882a593Smuzhiyun 		val = net2272_read(dev, CHIPREV_2272);
2131*4882a593Smuzhiyun 		if (val != refval) {
2132*4882a593Smuzhiyun 			dev_dbg(dev->dev,
2133*4882a593Smuzhiyun 				"%s: write/read CHIPREV register test failed: "
2134*4882a593Smuzhiyun 				"wrote 0x%2.2x, read:0x%2.2x expected:0x%2.2x\n",
2135*4882a593Smuzhiyun 				__func__, ii, val, refval);
2136*4882a593Smuzhiyun 			return -EINVAL;
2137*4882a593Smuzhiyun 		}
2138*4882a593Smuzhiyun 	}
2139*4882a593Smuzhiyun 
2140*4882a593Smuzhiyun 	/*
2141*4882a593Smuzhiyun 	 * Verify NET2272's "NET2270 legacy revision" register
2142*4882a593Smuzhiyun 	 *  - NET2272 has two revision registers. The NET2270 legacy revision
2143*4882a593Smuzhiyun 	 *    register should read the same value, regardless of the NET2272
2144*4882a593Smuzhiyun 	 *    silicon revision.  The legacy register applies to NET2270
2145*4882a593Smuzhiyun 	 *    firmware being applied to the NET2272.
2146*4882a593Smuzhiyun 	 */
2147*4882a593Smuzhiyun 	val = net2272_read(dev, CHIPREV_LEGACY);
2148*4882a593Smuzhiyun 	if (val != NET2270_LEGACY_REV) {
2149*4882a593Smuzhiyun 		/*
2150*4882a593Smuzhiyun 		 * Unexpected legacy revision value
2151*4882a593Smuzhiyun 		 * - Perhaps the chip is a NET2270?
2152*4882a593Smuzhiyun 		 */
2153*4882a593Smuzhiyun 		dev_dbg(dev->dev,
2154*4882a593Smuzhiyun 			"%s: WARNING: UNEXPECTED NET2272 LEGACY REGISTER VALUE:\n"
2155*4882a593Smuzhiyun 			" - CHIPREV_LEGACY: expected 0x%2.2x, got:0x%2.2x. (Not NET2272?)\n",
2156*4882a593Smuzhiyun 			__func__, NET2270_LEGACY_REV, val);
2157*4882a593Smuzhiyun 		return -EINVAL;
2158*4882a593Smuzhiyun 	}
2159*4882a593Smuzhiyun 
2160*4882a593Smuzhiyun 	/*
2161*4882a593Smuzhiyun 	 * Verify NET2272 silicon revision
2162*4882a593Smuzhiyun 	 *  - This revision register is appropriate for the silicon version
2163*4882a593Smuzhiyun 	 *    of the NET2272
2164*4882a593Smuzhiyun 	 */
2165*4882a593Smuzhiyun 	val = net2272_read(dev, CHIPREV_2272);
2166*4882a593Smuzhiyun 	switch (val) {
2167*4882a593Smuzhiyun 	case CHIPREV_NET2272_R1:
2168*4882a593Smuzhiyun 		/*
2169*4882a593Smuzhiyun 		 * NET2272 Rev 1 has DMA related errata:
2170*4882a593Smuzhiyun 		 *  - Newer silicon (Rev 1A or better) required
2171*4882a593Smuzhiyun 		 */
2172*4882a593Smuzhiyun 		dev_dbg(dev->dev,
2173*4882a593Smuzhiyun 			"%s: Rev 1 detected: newer silicon recommended for DMA support\n",
2174*4882a593Smuzhiyun 			__func__);
2175*4882a593Smuzhiyun 		break;
2176*4882a593Smuzhiyun 	case CHIPREV_NET2272_R1A:
2177*4882a593Smuzhiyun 		break;
2178*4882a593Smuzhiyun 	default:
2179*4882a593Smuzhiyun 		/* NET2272 silicon version *may* not work with this firmware */
2180*4882a593Smuzhiyun 		dev_dbg(dev->dev,
2181*4882a593Smuzhiyun 			"%s: unexpected silicon revision register value: "
2182*4882a593Smuzhiyun 			" CHIPREV_2272: 0x%2.2x\n",
2183*4882a593Smuzhiyun 			__func__, val);
2184*4882a593Smuzhiyun 		/*
2185*4882a593Smuzhiyun 		 * Return Success, even though the chip rev is not an expected value
2186*4882a593Smuzhiyun 		 *  - Older, pre-built firmware can attempt to operate on newer silicon
2187*4882a593Smuzhiyun 		 *  - Often, new silicon is perfectly compatible
2188*4882a593Smuzhiyun 		 */
2189*4882a593Smuzhiyun 	}
2190*4882a593Smuzhiyun 
2191*4882a593Smuzhiyun 	/* Success: NET2272 checks out OK */
2192*4882a593Smuzhiyun 	return 0;
2193*4882a593Smuzhiyun }
2194*4882a593Smuzhiyun 
2195*4882a593Smuzhiyun static void
net2272_gadget_release(struct device * _dev)2196*4882a593Smuzhiyun net2272_gadget_release(struct device *_dev)
2197*4882a593Smuzhiyun {
2198*4882a593Smuzhiyun 	struct net2272 *dev = container_of(_dev, struct net2272, gadget.dev);
2199*4882a593Smuzhiyun 
2200*4882a593Smuzhiyun 	kfree(dev);
2201*4882a593Smuzhiyun }
2202*4882a593Smuzhiyun 
2203*4882a593Smuzhiyun /*---------------------------------------------------------------------------*/
2204*4882a593Smuzhiyun 
2205*4882a593Smuzhiyun static void
net2272_remove(struct net2272 * dev)2206*4882a593Smuzhiyun net2272_remove(struct net2272 *dev)
2207*4882a593Smuzhiyun {
2208*4882a593Smuzhiyun 	if (dev->added)
2209*4882a593Smuzhiyun 		usb_del_gadget(&dev->gadget);
2210*4882a593Smuzhiyun 	free_irq(dev->irq, dev);
2211*4882a593Smuzhiyun 	iounmap(dev->base_addr);
2212*4882a593Smuzhiyun 	device_remove_file(dev->dev, &dev_attr_registers);
2213*4882a593Smuzhiyun 
2214*4882a593Smuzhiyun 	dev_info(dev->dev, "unbind\n");
2215*4882a593Smuzhiyun }
2216*4882a593Smuzhiyun 
net2272_probe_init(struct device * dev,unsigned int irq)2217*4882a593Smuzhiyun static struct net2272 *net2272_probe_init(struct device *dev, unsigned int irq)
2218*4882a593Smuzhiyun {
2219*4882a593Smuzhiyun 	struct net2272 *ret;
2220*4882a593Smuzhiyun 
2221*4882a593Smuzhiyun 	if (!irq) {
2222*4882a593Smuzhiyun 		dev_dbg(dev, "No IRQ!\n");
2223*4882a593Smuzhiyun 		return ERR_PTR(-ENODEV);
2224*4882a593Smuzhiyun 	}
2225*4882a593Smuzhiyun 
2226*4882a593Smuzhiyun 	/* alloc, and start init */
2227*4882a593Smuzhiyun 	ret = kzalloc(sizeof(*ret), GFP_KERNEL);
2228*4882a593Smuzhiyun 	if (!ret)
2229*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
2230*4882a593Smuzhiyun 
2231*4882a593Smuzhiyun 	spin_lock_init(&ret->lock);
2232*4882a593Smuzhiyun 	ret->irq = irq;
2233*4882a593Smuzhiyun 	ret->dev = dev;
2234*4882a593Smuzhiyun 	ret->gadget.ops = &net2272_ops;
2235*4882a593Smuzhiyun 	ret->gadget.max_speed = USB_SPEED_HIGH;
2236*4882a593Smuzhiyun 
2237*4882a593Smuzhiyun 	/* the "gadget" abstracts/virtualizes the controller */
2238*4882a593Smuzhiyun 	ret->gadget.name = driver_name;
2239*4882a593Smuzhiyun 	usb_initialize_gadget(dev, &ret->gadget, net2272_gadget_release);
2240*4882a593Smuzhiyun 
2241*4882a593Smuzhiyun 	return ret;
2242*4882a593Smuzhiyun }
2243*4882a593Smuzhiyun 
2244*4882a593Smuzhiyun static int
net2272_probe_fin(struct net2272 * dev,unsigned int irqflags)2245*4882a593Smuzhiyun net2272_probe_fin(struct net2272 *dev, unsigned int irqflags)
2246*4882a593Smuzhiyun {
2247*4882a593Smuzhiyun 	int ret;
2248*4882a593Smuzhiyun 
2249*4882a593Smuzhiyun 	/* See if there... */
2250*4882a593Smuzhiyun 	if (net2272_present(dev)) {
2251*4882a593Smuzhiyun 		dev_warn(dev->dev, "2272 not found!\n");
2252*4882a593Smuzhiyun 		ret = -ENODEV;
2253*4882a593Smuzhiyun 		goto err;
2254*4882a593Smuzhiyun 	}
2255*4882a593Smuzhiyun 
2256*4882a593Smuzhiyun 	net2272_usb_reset(dev);
2257*4882a593Smuzhiyun 	net2272_usb_reinit(dev);
2258*4882a593Smuzhiyun 
2259*4882a593Smuzhiyun 	ret = request_irq(dev->irq, net2272_irq, irqflags, driver_name, dev);
2260*4882a593Smuzhiyun 	if (ret) {
2261*4882a593Smuzhiyun 		dev_err(dev->dev, "request interrupt %i failed\n", dev->irq);
2262*4882a593Smuzhiyun 		goto err;
2263*4882a593Smuzhiyun 	}
2264*4882a593Smuzhiyun 
2265*4882a593Smuzhiyun 	dev->chiprev = net2272_read(dev, CHIPREV_2272);
2266*4882a593Smuzhiyun 
2267*4882a593Smuzhiyun 	/* done */
2268*4882a593Smuzhiyun 	dev_info(dev->dev, "%s\n", driver_desc);
2269*4882a593Smuzhiyun 	dev_info(dev->dev, "irq %i, mem %p, chip rev %04x, dma %s\n",
2270*4882a593Smuzhiyun 		dev->irq, dev->base_addr, dev->chiprev,
2271*4882a593Smuzhiyun 		dma_mode_string());
2272*4882a593Smuzhiyun 	dev_info(dev->dev, "version: %s\n", driver_vers);
2273*4882a593Smuzhiyun 
2274*4882a593Smuzhiyun 	ret = device_create_file(dev->dev, &dev_attr_registers);
2275*4882a593Smuzhiyun 	if (ret)
2276*4882a593Smuzhiyun 		goto err_irq;
2277*4882a593Smuzhiyun 
2278*4882a593Smuzhiyun 	ret = usb_add_gadget(&dev->gadget);
2279*4882a593Smuzhiyun 	if (ret)
2280*4882a593Smuzhiyun 		goto err_add_udc;
2281*4882a593Smuzhiyun 	dev->added = 1;
2282*4882a593Smuzhiyun 
2283*4882a593Smuzhiyun 	return 0;
2284*4882a593Smuzhiyun 
2285*4882a593Smuzhiyun err_add_udc:
2286*4882a593Smuzhiyun 	device_remove_file(dev->dev, &dev_attr_registers);
2287*4882a593Smuzhiyun  err_irq:
2288*4882a593Smuzhiyun 	free_irq(dev->irq, dev);
2289*4882a593Smuzhiyun  err:
2290*4882a593Smuzhiyun 	return ret;
2291*4882a593Smuzhiyun }
2292*4882a593Smuzhiyun 
2293*4882a593Smuzhiyun #ifdef CONFIG_USB_PCI
2294*4882a593Smuzhiyun 
2295*4882a593Smuzhiyun /*
2296*4882a593Smuzhiyun  * wrap this driver around the specified device, but
2297*4882a593Smuzhiyun  * don't respond over USB until a gadget driver binds to us
2298*4882a593Smuzhiyun  */
2299*4882a593Smuzhiyun 
2300*4882a593Smuzhiyun static int
net2272_rdk1_probe(struct pci_dev * pdev,struct net2272 * dev)2301*4882a593Smuzhiyun net2272_rdk1_probe(struct pci_dev *pdev, struct net2272 *dev)
2302*4882a593Smuzhiyun {
2303*4882a593Smuzhiyun 	unsigned long resource, len, tmp;
2304*4882a593Smuzhiyun 	void __iomem *mem_mapped_addr[4];
2305*4882a593Smuzhiyun 	int ret, i;
2306*4882a593Smuzhiyun 
2307*4882a593Smuzhiyun 	/*
2308*4882a593Smuzhiyun 	 * BAR 0 holds PLX 9054 config registers
2309*4882a593Smuzhiyun 	 * BAR 1 is i/o memory; unused here
2310*4882a593Smuzhiyun 	 * BAR 2 holds EPLD config registers
2311*4882a593Smuzhiyun 	 * BAR 3 holds NET2272 registers
2312*4882a593Smuzhiyun 	 */
2313*4882a593Smuzhiyun 
2314*4882a593Smuzhiyun 	/* Find and map all address spaces */
2315*4882a593Smuzhiyun 	for (i = 0; i < 4; ++i) {
2316*4882a593Smuzhiyun 		if (i == 1)
2317*4882a593Smuzhiyun 			continue;	/* BAR1 unused */
2318*4882a593Smuzhiyun 
2319*4882a593Smuzhiyun 		resource = pci_resource_start(pdev, i);
2320*4882a593Smuzhiyun 		len = pci_resource_len(pdev, i);
2321*4882a593Smuzhiyun 
2322*4882a593Smuzhiyun 		if (!request_mem_region(resource, len, driver_name)) {
2323*4882a593Smuzhiyun 			dev_dbg(dev->dev, "controller already in use\n");
2324*4882a593Smuzhiyun 			ret = -EBUSY;
2325*4882a593Smuzhiyun 			goto err;
2326*4882a593Smuzhiyun 		}
2327*4882a593Smuzhiyun 
2328*4882a593Smuzhiyun 		mem_mapped_addr[i] = ioremap(resource, len);
2329*4882a593Smuzhiyun 		if (mem_mapped_addr[i] == NULL) {
2330*4882a593Smuzhiyun 			release_mem_region(resource, len);
2331*4882a593Smuzhiyun 			dev_dbg(dev->dev, "can't map memory\n");
2332*4882a593Smuzhiyun 			ret = -EFAULT;
2333*4882a593Smuzhiyun 			goto err;
2334*4882a593Smuzhiyun 		}
2335*4882a593Smuzhiyun 	}
2336*4882a593Smuzhiyun 
2337*4882a593Smuzhiyun 	dev->rdk1.plx9054_base_addr = mem_mapped_addr[0];
2338*4882a593Smuzhiyun 	dev->rdk1.epld_base_addr = mem_mapped_addr[2];
2339*4882a593Smuzhiyun 	dev->base_addr = mem_mapped_addr[3];
2340*4882a593Smuzhiyun 
2341*4882a593Smuzhiyun 	/* Set PLX 9054 bus width (16 bits) */
2342*4882a593Smuzhiyun 	tmp = readl(dev->rdk1.plx9054_base_addr + LBRD1);
2343*4882a593Smuzhiyun 	writel((tmp & ~(3 << MEMORY_SPACE_LOCAL_BUS_WIDTH)) | W16_BIT,
2344*4882a593Smuzhiyun 			dev->rdk1.plx9054_base_addr + LBRD1);
2345*4882a593Smuzhiyun 
2346*4882a593Smuzhiyun 	/* Enable PLX 9054 Interrupts */
2347*4882a593Smuzhiyun 	writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) |
2348*4882a593Smuzhiyun 			(1 << PCI_INTERRUPT_ENABLE) |
2349*4882a593Smuzhiyun 			(1 << LOCAL_INTERRUPT_INPUT_ENABLE),
2350*4882a593Smuzhiyun 			dev->rdk1.plx9054_base_addr + INTCSR);
2351*4882a593Smuzhiyun 
2352*4882a593Smuzhiyun 	writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
2353*4882a593Smuzhiyun 			dev->rdk1.plx9054_base_addr + DMACSR0);
2354*4882a593Smuzhiyun 
2355*4882a593Smuzhiyun 	/* reset */
2356*4882a593Smuzhiyun 	writeb((1 << EPLD_DMA_ENABLE) |
2357*4882a593Smuzhiyun 		(1 << DMA_CTL_DACK) |
2358*4882a593Smuzhiyun 		(1 << DMA_TIMEOUT_ENABLE) |
2359*4882a593Smuzhiyun 		(1 << USER) |
2360*4882a593Smuzhiyun 		(0 << MPX_MODE) |
2361*4882a593Smuzhiyun 		(1 << BUSWIDTH) |
2362*4882a593Smuzhiyun 		(1 << NET2272_RESET),
2363*4882a593Smuzhiyun 		dev->base_addr + EPLD_IO_CONTROL_REGISTER);
2364*4882a593Smuzhiyun 
2365*4882a593Smuzhiyun 	mb();
2366*4882a593Smuzhiyun 	writeb(readb(dev->base_addr + EPLD_IO_CONTROL_REGISTER) &
2367*4882a593Smuzhiyun 		~(1 << NET2272_RESET),
2368*4882a593Smuzhiyun 		dev->base_addr + EPLD_IO_CONTROL_REGISTER);
2369*4882a593Smuzhiyun 	udelay(200);
2370*4882a593Smuzhiyun 
2371*4882a593Smuzhiyun 	return 0;
2372*4882a593Smuzhiyun 
2373*4882a593Smuzhiyun  err:
2374*4882a593Smuzhiyun 	while (--i >= 0) {
2375*4882a593Smuzhiyun 		if (i == 1)
2376*4882a593Smuzhiyun 			continue;	/* BAR1 unused */
2377*4882a593Smuzhiyun 		iounmap(mem_mapped_addr[i]);
2378*4882a593Smuzhiyun 		release_mem_region(pci_resource_start(pdev, i),
2379*4882a593Smuzhiyun 			pci_resource_len(pdev, i));
2380*4882a593Smuzhiyun 	}
2381*4882a593Smuzhiyun 
2382*4882a593Smuzhiyun 	return ret;
2383*4882a593Smuzhiyun }
2384*4882a593Smuzhiyun 
2385*4882a593Smuzhiyun static int
net2272_rdk2_probe(struct pci_dev * pdev,struct net2272 * dev)2386*4882a593Smuzhiyun net2272_rdk2_probe(struct pci_dev *pdev, struct net2272 *dev)
2387*4882a593Smuzhiyun {
2388*4882a593Smuzhiyun 	unsigned long resource, len;
2389*4882a593Smuzhiyun 	void __iomem *mem_mapped_addr[2];
2390*4882a593Smuzhiyun 	int ret, i;
2391*4882a593Smuzhiyun 
2392*4882a593Smuzhiyun 	/*
2393*4882a593Smuzhiyun 	 * BAR 0 holds FGPA config registers
2394*4882a593Smuzhiyun 	 * BAR 1 holds NET2272 registers
2395*4882a593Smuzhiyun 	 */
2396*4882a593Smuzhiyun 
2397*4882a593Smuzhiyun 	/* Find and map all address spaces, bar2-3 unused in rdk 2 */
2398*4882a593Smuzhiyun 	for (i = 0; i < 2; ++i) {
2399*4882a593Smuzhiyun 		resource = pci_resource_start(pdev, i);
2400*4882a593Smuzhiyun 		len = pci_resource_len(pdev, i);
2401*4882a593Smuzhiyun 
2402*4882a593Smuzhiyun 		if (!request_mem_region(resource, len, driver_name)) {
2403*4882a593Smuzhiyun 			dev_dbg(dev->dev, "controller already in use\n");
2404*4882a593Smuzhiyun 			ret = -EBUSY;
2405*4882a593Smuzhiyun 			goto err;
2406*4882a593Smuzhiyun 		}
2407*4882a593Smuzhiyun 
2408*4882a593Smuzhiyun 		mem_mapped_addr[i] = ioremap(resource, len);
2409*4882a593Smuzhiyun 		if (mem_mapped_addr[i] == NULL) {
2410*4882a593Smuzhiyun 			release_mem_region(resource, len);
2411*4882a593Smuzhiyun 			dev_dbg(dev->dev, "can't map memory\n");
2412*4882a593Smuzhiyun 			ret = -EFAULT;
2413*4882a593Smuzhiyun 			goto err;
2414*4882a593Smuzhiyun 		}
2415*4882a593Smuzhiyun 	}
2416*4882a593Smuzhiyun 
2417*4882a593Smuzhiyun 	dev->rdk2.fpga_base_addr = mem_mapped_addr[0];
2418*4882a593Smuzhiyun 	dev->base_addr = mem_mapped_addr[1];
2419*4882a593Smuzhiyun 
2420*4882a593Smuzhiyun 	mb();
2421*4882a593Smuzhiyun 	/* Set 2272 bus width (16 bits) and reset */
2422*4882a593Smuzhiyun 	writel((1 << CHIP_RESET), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
2423*4882a593Smuzhiyun 	udelay(200);
2424*4882a593Smuzhiyun 	writel((1 << BUS_WIDTH), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
2425*4882a593Smuzhiyun 	/* Print fpga version number */
2426*4882a593Smuzhiyun 	dev_info(dev->dev, "RDK2 FPGA version %08x\n",
2427*4882a593Smuzhiyun 		readl(dev->rdk2.fpga_base_addr + RDK2_FPGAREV));
2428*4882a593Smuzhiyun 	/* Enable FPGA Interrupts */
2429*4882a593Smuzhiyun 	writel((1 << NET2272_PCI_IRQ), dev->rdk2.fpga_base_addr + RDK2_IRQENB);
2430*4882a593Smuzhiyun 
2431*4882a593Smuzhiyun 	return 0;
2432*4882a593Smuzhiyun 
2433*4882a593Smuzhiyun  err:
2434*4882a593Smuzhiyun 	while (--i >= 0) {
2435*4882a593Smuzhiyun 		iounmap(mem_mapped_addr[i]);
2436*4882a593Smuzhiyun 		release_mem_region(pci_resource_start(pdev, i),
2437*4882a593Smuzhiyun 			pci_resource_len(pdev, i));
2438*4882a593Smuzhiyun 	}
2439*4882a593Smuzhiyun 
2440*4882a593Smuzhiyun 	return ret;
2441*4882a593Smuzhiyun }
2442*4882a593Smuzhiyun 
2443*4882a593Smuzhiyun static int
net2272_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)2444*4882a593Smuzhiyun net2272_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2445*4882a593Smuzhiyun {
2446*4882a593Smuzhiyun 	struct net2272 *dev;
2447*4882a593Smuzhiyun 	int ret;
2448*4882a593Smuzhiyun 
2449*4882a593Smuzhiyun 	dev = net2272_probe_init(&pdev->dev, pdev->irq);
2450*4882a593Smuzhiyun 	if (IS_ERR(dev))
2451*4882a593Smuzhiyun 		return PTR_ERR(dev);
2452*4882a593Smuzhiyun 	dev->dev_id = pdev->device;
2453*4882a593Smuzhiyun 
2454*4882a593Smuzhiyun 	if (pci_enable_device(pdev) < 0) {
2455*4882a593Smuzhiyun 		ret = -ENODEV;
2456*4882a593Smuzhiyun 		goto err_put;
2457*4882a593Smuzhiyun 	}
2458*4882a593Smuzhiyun 
2459*4882a593Smuzhiyun 	pci_set_master(pdev);
2460*4882a593Smuzhiyun 
2461*4882a593Smuzhiyun 	switch (pdev->device) {
2462*4882a593Smuzhiyun 	case PCI_DEVICE_ID_RDK1: ret = net2272_rdk1_probe(pdev, dev); break;
2463*4882a593Smuzhiyun 	case PCI_DEVICE_ID_RDK2: ret = net2272_rdk2_probe(pdev, dev); break;
2464*4882a593Smuzhiyun 	default: BUG();
2465*4882a593Smuzhiyun 	}
2466*4882a593Smuzhiyun 	if (ret)
2467*4882a593Smuzhiyun 		goto err_pci;
2468*4882a593Smuzhiyun 
2469*4882a593Smuzhiyun 	ret = net2272_probe_fin(dev, 0);
2470*4882a593Smuzhiyun 	if (ret)
2471*4882a593Smuzhiyun 		goto err_pci;
2472*4882a593Smuzhiyun 
2473*4882a593Smuzhiyun 	pci_set_drvdata(pdev, dev);
2474*4882a593Smuzhiyun 
2475*4882a593Smuzhiyun 	return 0;
2476*4882a593Smuzhiyun 
2477*4882a593Smuzhiyun  err_pci:
2478*4882a593Smuzhiyun 	pci_disable_device(pdev);
2479*4882a593Smuzhiyun  err_put:
2480*4882a593Smuzhiyun 	usb_put_gadget(&dev->gadget);
2481*4882a593Smuzhiyun 
2482*4882a593Smuzhiyun 	return ret;
2483*4882a593Smuzhiyun }
2484*4882a593Smuzhiyun 
2485*4882a593Smuzhiyun static void
net2272_rdk1_remove(struct pci_dev * pdev,struct net2272 * dev)2486*4882a593Smuzhiyun net2272_rdk1_remove(struct pci_dev *pdev, struct net2272 *dev)
2487*4882a593Smuzhiyun {
2488*4882a593Smuzhiyun 	int i;
2489*4882a593Smuzhiyun 
2490*4882a593Smuzhiyun 	/* disable PLX 9054 interrupts */
2491*4882a593Smuzhiyun 	writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2492*4882a593Smuzhiyun 		~(1 << PCI_INTERRUPT_ENABLE),
2493*4882a593Smuzhiyun 		dev->rdk1.plx9054_base_addr + INTCSR);
2494*4882a593Smuzhiyun 
2495*4882a593Smuzhiyun 	/* clean up resources allocated during probe() */
2496*4882a593Smuzhiyun 	iounmap(dev->rdk1.plx9054_base_addr);
2497*4882a593Smuzhiyun 	iounmap(dev->rdk1.epld_base_addr);
2498*4882a593Smuzhiyun 
2499*4882a593Smuzhiyun 	for (i = 0; i < 4; ++i) {
2500*4882a593Smuzhiyun 		if (i == 1)
2501*4882a593Smuzhiyun 			continue;	/* BAR1 unused */
2502*4882a593Smuzhiyun 		release_mem_region(pci_resource_start(pdev, i),
2503*4882a593Smuzhiyun 			pci_resource_len(pdev, i));
2504*4882a593Smuzhiyun 	}
2505*4882a593Smuzhiyun }
2506*4882a593Smuzhiyun 
2507*4882a593Smuzhiyun static void
net2272_rdk2_remove(struct pci_dev * pdev,struct net2272 * dev)2508*4882a593Smuzhiyun net2272_rdk2_remove(struct pci_dev *pdev, struct net2272 *dev)
2509*4882a593Smuzhiyun {
2510*4882a593Smuzhiyun 	int i;
2511*4882a593Smuzhiyun 
2512*4882a593Smuzhiyun 	/* disable fpga interrupts
2513*4882a593Smuzhiyun 	writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2514*4882a593Smuzhiyun 			~(1 << PCI_INTERRUPT_ENABLE),
2515*4882a593Smuzhiyun 			dev->rdk1.plx9054_base_addr + INTCSR);
2516*4882a593Smuzhiyun 	*/
2517*4882a593Smuzhiyun 
2518*4882a593Smuzhiyun 	/* clean up resources allocated during probe() */
2519*4882a593Smuzhiyun 	iounmap(dev->rdk2.fpga_base_addr);
2520*4882a593Smuzhiyun 
2521*4882a593Smuzhiyun 	for (i = 0; i < 2; ++i)
2522*4882a593Smuzhiyun 		release_mem_region(pci_resource_start(pdev, i),
2523*4882a593Smuzhiyun 			pci_resource_len(pdev, i));
2524*4882a593Smuzhiyun }
2525*4882a593Smuzhiyun 
2526*4882a593Smuzhiyun static void
net2272_pci_remove(struct pci_dev * pdev)2527*4882a593Smuzhiyun net2272_pci_remove(struct pci_dev *pdev)
2528*4882a593Smuzhiyun {
2529*4882a593Smuzhiyun 	struct net2272 *dev = pci_get_drvdata(pdev);
2530*4882a593Smuzhiyun 
2531*4882a593Smuzhiyun 	net2272_remove(dev);
2532*4882a593Smuzhiyun 
2533*4882a593Smuzhiyun 	switch (pdev->device) {
2534*4882a593Smuzhiyun 	case PCI_DEVICE_ID_RDK1: net2272_rdk1_remove(pdev, dev); break;
2535*4882a593Smuzhiyun 	case PCI_DEVICE_ID_RDK2: net2272_rdk2_remove(pdev, dev); break;
2536*4882a593Smuzhiyun 	default: BUG();
2537*4882a593Smuzhiyun 	}
2538*4882a593Smuzhiyun 
2539*4882a593Smuzhiyun 	pci_disable_device(pdev);
2540*4882a593Smuzhiyun 
2541*4882a593Smuzhiyun 	usb_put_gadget(&dev->gadget);
2542*4882a593Smuzhiyun }
2543*4882a593Smuzhiyun 
2544*4882a593Smuzhiyun /* Table of matching PCI IDs */
2545*4882a593Smuzhiyun static struct pci_device_id pci_ids[] = {
2546*4882a593Smuzhiyun 	{	/* RDK 1 card */
2547*4882a593Smuzhiyun 		.class       = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
2548*4882a593Smuzhiyun 		.class_mask  = 0,
2549*4882a593Smuzhiyun 		.vendor      = PCI_VENDOR_ID_PLX,
2550*4882a593Smuzhiyun 		.device      = PCI_DEVICE_ID_RDK1,
2551*4882a593Smuzhiyun 		.subvendor   = PCI_ANY_ID,
2552*4882a593Smuzhiyun 		.subdevice   = PCI_ANY_ID,
2553*4882a593Smuzhiyun 	},
2554*4882a593Smuzhiyun 	{	/* RDK 2 card */
2555*4882a593Smuzhiyun 		.class       = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
2556*4882a593Smuzhiyun 		.class_mask  = 0,
2557*4882a593Smuzhiyun 		.vendor      = PCI_VENDOR_ID_PLX,
2558*4882a593Smuzhiyun 		.device      = PCI_DEVICE_ID_RDK2,
2559*4882a593Smuzhiyun 		.subvendor   = PCI_ANY_ID,
2560*4882a593Smuzhiyun 		.subdevice   = PCI_ANY_ID,
2561*4882a593Smuzhiyun 	},
2562*4882a593Smuzhiyun 	{ }
2563*4882a593Smuzhiyun };
2564*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, pci_ids);
2565*4882a593Smuzhiyun 
2566*4882a593Smuzhiyun static struct pci_driver net2272_pci_driver = {
2567*4882a593Smuzhiyun 	.name     = driver_name,
2568*4882a593Smuzhiyun 	.id_table = pci_ids,
2569*4882a593Smuzhiyun 
2570*4882a593Smuzhiyun 	.probe    = net2272_pci_probe,
2571*4882a593Smuzhiyun 	.remove   = net2272_pci_remove,
2572*4882a593Smuzhiyun };
2573*4882a593Smuzhiyun 
net2272_pci_register(void)2574*4882a593Smuzhiyun static int net2272_pci_register(void)
2575*4882a593Smuzhiyun {
2576*4882a593Smuzhiyun 	return pci_register_driver(&net2272_pci_driver);
2577*4882a593Smuzhiyun }
2578*4882a593Smuzhiyun 
net2272_pci_unregister(void)2579*4882a593Smuzhiyun static void net2272_pci_unregister(void)
2580*4882a593Smuzhiyun {
2581*4882a593Smuzhiyun 	pci_unregister_driver(&net2272_pci_driver);
2582*4882a593Smuzhiyun }
2583*4882a593Smuzhiyun 
2584*4882a593Smuzhiyun #else
net2272_pci_register(void)2585*4882a593Smuzhiyun static inline int net2272_pci_register(void) { return 0; }
net2272_pci_unregister(void)2586*4882a593Smuzhiyun static inline void net2272_pci_unregister(void) { }
2587*4882a593Smuzhiyun #endif
2588*4882a593Smuzhiyun 
2589*4882a593Smuzhiyun /*---------------------------------------------------------------------------*/
2590*4882a593Smuzhiyun 
2591*4882a593Smuzhiyun static int
net2272_plat_probe(struct platform_device * pdev)2592*4882a593Smuzhiyun net2272_plat_probe(struct platform_device *pdev)
2593*4882a593Smuzhiyun {
2594*4882a593Smuzhiyun 	struct net2272 *dev;
2595*4882a593Smuzhiyun 	int ret;
2596*4882a593Smuzhiyun 	unsigned int irqflags;
2597*4882a593Smuzhiyun 	resource_size_t base, len;
2598*4882a593Smuzhiyun 	struct resource *iomem, *iomem_bus, *irq_res;
2599*4882a593Smuzhiyun 
2600*4882a593Smuzhiyun 	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2601*4882a593Smuzhiyun 	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2602*4882a593Smuzhiyun 	iomem_bus = platform_get_resource(pdev, IORESOURCE_BUS, 0);
2603*4882a593Smuzhiyun 	if (!irq_res || !iomem) {
2604*4882a593Smuzhiyun 		dev_err(&pdev->dev, "must provide irq/base addr");
2605*4882a593Smuzhiyun 		return -EINVAL;
2606*4882a593Smuzhiyun 	}
2607*4882a593Smuzhiyun 
2608*4882a593Smuzhiyun 	dev = net2272_probe_init(&pdev->dev, irq_res->start);
2609*4882a593Smuzhiyun 	if (IS_ERR(dev))
2610*4882a593Smuzhiyun 		return PTR_ERR(dev);
2611*4882a593Smuzhiyun 
2612*4882a593Smuzhiyun 	irqflags = 0;
2613*4882a593Smuzhiyun 	if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
2614*4882a593Smuzhiyun 		irqflags |= IRQF_TRIGGER_RISING;
2615*4882a593Smuzhiyun 	if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
2616*4882a593Smuzhiyun 		irqflags |= IRQF_TRIGGER_FALLING;
2617*4882a593Smuzhiyun 	if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
2618*4882a593Smuzhiyun 		irqflags |= IRQF_TRIGGER_HIGH;
2619*4882a593Smuzhiyun 	if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
2620*4882a593Smuzhiyun 		irqflags |= IRQF_TRIGGER_LOW;
2621*4882a593Smuzhiyun 
2622*4882a593Smuzhiyun 	base = iomem->start;
2623*4882a593Smuzhiyun 	len = resource_size(iomem);
2624*4882a593Smuzhiyun 	if (iomem_bus)
2625*4882a593Smuzhiyun 		dev->base_shift = iomem_bus->start;
2626*4882a593Smuzhiyun 
2627*4882a593Smuzhiyun 	if (!request_mem_region(base, len, driver_name)) {
2628*4882a593Smuzhiyun 		dev_dbg(dev->dev, "get request memory region!\n");
2629*4882a593Smuzhiyun 		ret = -EBUSY;
2630*4882a593Smuzhiyun 		goto err;
2631*4882a593Smuzhiyun 	}
2632*4882a593Smuzhiyun 	dev->base_addr = ioremap(base, len);
2633*4882a593Smuzhiyun 	if (!dev->base_addr) {
2634*4882a593Smuzhiyun 		dev_dbg(dev->dev, "can't map memory\n");
2635*4882a593Smuzhiyun 		ret = -EFAULT;
2636*4882a593Smuzhiyun 		goto err_req;
2637*4882a593Smuzhiyun 	}
2638*4882a593Smuzhiyun 
2639*4882a593Smuzhiyun 	ret = net2272_probe_fin(dev, IRQF_TRIGGER_LOW);
2640*4882a593Smuzhiyun 	if (ret)
2641*4882a593Smuzhiyun 		goto err_io;
2642*4882a593Smuzhiyun 
2643*4882a593Smuzhiyun 	platform_set_drvdata(pdev, dev);
2644*4882a593Smuzhiyun 	dev_info(&pdev->dev, "running in 16-bit, %sbyte swap local bus mode\n",
2645*4882a593Smuzhiyun 		(net2272_read(dev, LOCCTL) & (1 << BYTE_SWAP)) ? "" : "no ");
2646*4882a593Smuzhiyun 
2647*4882a593Smuzhiyun 	return 0;
2648*4882a593Smuzhiyun 
2649*4882a593Smuzhiyun  err_io:
2650*4882a593Smuzhiyun 	iounmap(dev->base_addr);
2651*4882a593Smuzhiyun  err_req:
2652*4882a593Smuzhiyun 	release_mem_region(base, len);
2653*4882a593Smuzhiyun  err:
2654*4882a593Smuzhiyun 	usb_put_gadget(&dev->gadget);
2655*4882a593Smuzhiyun 
2656*4882a593Smuzhiyun 	return ret;
2657*4882a593Smuzhiyun }
2658*4882a593Smuzhiyun 
2659*4882a593Smuzhiyun static int
net2272_plat_remove(struct platform_device * pdev)2660*4882a593Smuzhiyun net2272_plat_remove(struct platform_device *pdev)
2661*4882a593Smuzhiyun {
2662*4882a593Smuzhiyun 	struct net2272 *dev = platform_get_drvdata(pdev);
2663*4882a593Smuzhiyun 
2664*4882a593Smuzhiyun 	net2272_remove(dev);
2665*4882a593Smuzhiyun 
2666*4882a593Smuzhiyun 	release_mem_region(pdev->resource[0].start,
2667*4882a593Smuzhiyun 		resource_size(&pdev->resource[0]));
2668*4882a593Smuzhiyun 
2669*4882a593Smuzhiyun 	usb_put_gadget(&dev->gadget);
2670*4882a593Smuzhiyun 
2671*4882a593Smuzhiyun 	return 0;
2672*4882a593Smuzhiyun }
2673*4882a593Smuzhiyun 
2674*4882a593Smuzhiyun static struct platform_driver net2272_plat_driver = {
2675*4882a593Smuzhiyun 	.probe   = net2272_plat_probe,
2676*4882a593Smuzhiyun 	.remove  = net2272_plat_remove,
2677*4882a593Smuzhiyun 	.driver  = {
2678*4882a593Smuzhiyun 		.name  = driver_name,
2679*4882a593Smuzhiyun 	},
2680*4882a593Smuzhiyun 	/* FIXME .suspend, .resume */
2681*4882a593Smuzhiyun };
2682*4882a593Smuzhiyun MODULE_ALIAS("platform:net2272");
2683*4882a593Smuzhiyun 
net2272_init(void)2684*4882a593Smuzhiyun static int __init net2272_init(void)
2685*4882a593Smuzhiyun {
2686*4882a593Smuzhiyun 	int ret;
2687*4882a593Smuzhiyun 
2688*4882a593Smuzhiyun 	ret = net2272_pci_register();
2689*4882a593Smuzhiyun 	if (ret)
2690*4882a593Smuzhiyun 		return ret;
2691*4882a593Smuzhiyun 	ret = platform_driver_register(&net2272_plat_driver);
2692*4882a593Smuzhiyun 	if (ret)
2693*4882a593Smuzhiyun 		goto err_pci;
2694*4882a593Smuzhiyun 	return ret;
2695*4882a593Smuzhiyun 
2696*4882a593Smuzhiyun err_pci:
2697*4882a593Smuzhiyun 	net2272_pci_unregister();
2698*4882a593Smuzhiyun 	return ret;
2699*4882a593Smuzhiyun }
2700*4882a593Smuzhiyun module_init(net2272_init);
2701*4882a593Smuzhiyun 
net2272_cleanup(void)2702*4882a593Smuzhiyun static void __exit net2272_cleanup(void)
2703*4882a593Smuzhiyun {
2704*4882a593Smuzhiyun 	net2272_pci_unregister();
2705*4882a593Smuzhiyun 	platform_driver_unregister(&net2272_plat_driver);
2706*4882a593Smuzhiyun }
2707*4882a593Smuzhiyun module_exit(net2272_cleanup);
2708*4882a593Smuzhiyun 
2709*4882a593Smuzhiyun MODULE_DESCRIPTION(DRIVER_DESC);
2710*4882a593Smuzhiyun MODULE_AUTHOR("PLX Technology, Inc.");
2711*4882a593Smuzhiyun MODULE_LICENSE("GPL");
2712