xref: /OK3568_Linux_fs/kernel/drivers/net/usb/usbnet.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * USB Network driver infrastructure
4*4882a593Smuzhiyun  * Copyright (C) 2000-2005 by David Brownell
5*4882a593Smuzhiyun  * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com>
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun /*
9*4882a593Smuzhiyun  * This is a generic "USB networking" framework that works with several
10*4882a593Smuzhiyun  * kinds of full and high speed networking devices:  host-to-host cables,
11*4882a593Smuzhiyun  * smart usb peripherals, and actual Ethernet adapters.
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * These devices usually differ in terms of control protocols (if they
14*4882a593Smuzhiyun  * even have one!) and sometimes they define new framing to wrap or batch
15*4882a593Smuzhiyun  * Ethernet packets.  Otherwise, they talk to USB pretty much the same,
16*4882a593Smuzhiyun  * so interface (un)binding, endpoint I/O queues, fault handling, and other
17*4882a593Smuzhiyun  * issues can usefully be addressed by this framework.
18*4882a593Smuzhiyun  */
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun // #define	DEBUG			// error path messages, extra info
21*4882a593Smuzhiyun // #define	VERBOSE			// more; success messages
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #include <linux/module.h>
24*4882a593Smuzhiyun #include <linux/init.h>
25*4882a593Smuzhiyun #include <linux/netdevice.h>
26*4882a593Smuzhiyun #include <linux/etherdevice.h>
27*4882a593Smuzhiyun #include <linux/ctype.h>
28*4882a593Smuzhiyun #include <linux/ethtool.h>
29*4882a593Smuzhiyun #include <linux/workqueue.h>
30*4882a593Smuzhiyun #include <linux/mii.h>
31*4882a593Smuzhiyun #include <linux/usb.h>
32*4882a593Smuzhiyun #include <linux/usb/usbnet.h>
33*4882a593Smuzhiyun #include <linux/slab.h>
34*4882a593Smuzhiyun #include <linux/kernel.h>
35*4882a593Smuzhiyun #include <linux/pm_runtime.h>
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun /*
40*4882a593Smuzhiyun  * Nineteen USB 1.1 max size bulk transactions per frame (ms), max.
41*4882a593Smuzhiyun  * Several dozen bytes of IPv4 data can fit in two such transactions.
42*4882a593Smuzhiyun  * One maximum size Ethernet packet takes twenty four of them.
43*4882a593Smuzhiyun  * For high speed, each frame comfortably fits almost 36 max size
44*4882a593Smuzhiyun  * Ethernet packets (so queues should be bigger).
45*4882a593Smuzhiyun  *
46*4882a593Smuzhiyun  * The goal is to let the USB host controller be busy for 5msec or
47*4882a593Smuzhiyun  * more before an irq is required, under load.  Jumbograms change
48*4882a593Smuzhiyun  * the equation.
49*4882a593Smuzhiyun  */
50*4882a593Smuzhiyun #define	MAX_QUEUE_MEMORY	(60 * 1518)
51*4882a593Smuzhiyun #define	RX_QLEN(dev)		((dev)->rx_qlen)
52*4882a593Smuzhiyun #define	TX_QLEN(dev)		((dev)->tx_qlen)
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun // reawaken network queue this soon after stopping; else watchdog barks
55*4882a593Smuzhiyun #define TX_TIMEOUT_JIFFIES	(5*HZ)
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun /* throttle rx/tx briefly after some faults, so hub_wq might disconnect()
58*4882a593Smuzhiyun  * us (it polls at HZ/4 usually) before we report too many false errors.
59*4882a593Smuzhiyun  */
60*4882a593Smuzhiyun #define THROTTLE_JIFFIES	(HZ/8)
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun // between wakeups
63*4882a593Smuzhiyun #define UNLINK_TIMEOUT_MS	3
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun // randomly generated ethernet address
68*4882a593Smuzhiyun static u8	node_id [ETH_ALEN];
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun /* use ethtool to change the level for any given device */
71*4882a593Smuzhiyun static int msg_level = -1;
72*4882a593Smuzhiyun module_param (msg_level, int, 0);
73*4882a593Smuzhiyun MODULE_PARM_DESC (msg_level, "Override default message level");
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun /* handles CDC Ethernet and many other network "bulk data" interfaces */
usbnet_get_endpoints(struct usbnet * dev,struct usb_interface * intf)78*4882a593Smuzhiyun int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	int				tmp;
81*4882a593Smuzhiyun 	struct usb_host_interface	*alt = NULL;
82*4882a593Smuzhiyun 	struct usb_host_endpoint	*in = NULL, *out = NULL;
83*4882a593Smuzhiyun 	struct usb_host_endpoint	*status = NULL;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
86*4882a593Smuzhiyun 		unsigned	ep;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 		in = out = status = NULL;
89*4882a593Smuzhiyun 		alt = intf->altsetting + tmp;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 		/* take the first altsetting with in-bulk + out-bulk;
92*4882a593Smuzhiyun 		 * remember any status endpoint, just in case;
93*4882a593Smuzhiyun 		 * ignore other endpoints and altsettings.
94*4882a593Smuzhiyun 		 */
95*4882a593Smuzhiyun 		for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
96*4882a593Smuzhiyun 			struct usb_host_endpoint	*e;
97*4882a593Smuzhiyun 			int				intr = 0;
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 			e = alt->endpoint + ep;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 			/* ignore endpoints which cannot transfer data */
102*4882a593Smuzhiyun 			if (!usb_endpoint_maxp(&e->desc))
103*4882a593Smuzhiyun 				continue;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 			switch (e->desc.bmAttributes) {
106*4882a593Smuzhiyun 			case USB_ENDPOINT_XFER_INT:
107*4882a593Smuzhiyun 				if (!usb_endpoint_dir_in(&e->desc))
108*4882a593Smuzhiyun 					continue;
109*4882a593Smuzhiyun 				intr = 1;
110*4882a593Smuzhiyun 				fallthrough;
111*4882a593Smuzhiyun 			case USB_ENDPOINT_XFER_BULK:
112*4882a593Smuzhiyun 				break;
113*4882a593Smuzhiyun 			default:
114*4882a593Smuzhiyun 				continue;
115*4882a593Smuzhiyun 			}
116*4882a593Smuzhiyun 			if (usb_endpoint_dir_in(&e->desc)) {
117*4882a593Smuzhiyun 				if (!intr && !in)
118*4882a593Smuzhiyun 					in = e;
119*4882a593Smuzhiyun 				else if (intr && !status)
120*4882a593Smuzhiyun 					status = e;
121*4882a593Smuzhiyun 			} else {
122*4882a593Smuzhiyun 				if (!out)
123*4882a593Smuzhiyun 					out = e;
124*4882a593Smuzhiyun 			}
125*4882a593Smuzhiyun 		}
126*4882a593Smuzhiyun 		if (in && out)
127*4882a593Smuzhiyun 			break;
128*4882a593Smuzhiyun 	}
129*4882a593Smuzhiyun 	if (!alt || !in || !out)
130*4882a593Smuzhiyun 		return -EINVAL;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	if (alt->desc.bAlternateSetting != 0 ||
133*4882a593Smuzhiyun 	    !(dev->driver_info->flags & FLAG_NO_SETINT)) {
134*4882a593Smuzhiyun 		tmp = usb_set_interface (dev->udev, alt->desc.bInterfaceNumber,
135*4882a593Smuzhiyun 				alt->desc.bAlternateSetting);
136*4882a593Smuzhiyun 		if (tmp < 0)
137*4882a593Smuzhiyun 			return tmp;
138*4882a593Smuzhiyun 	}
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	dev->in = usb_rcvbulkpipe (dev->udev,
141*4882a593Smuzhiyun 			in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
142*4882a593Smuzhiyun 	dev->out = usb_sndbulkpipe (dev->udev,
143*4882a593Smuzhiyun 			out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
144*4882a593Smuzhiyun 	dev->status = status;
145*4882a593Smuzhiyun 	return 0;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usbnet_get_endpoints);
148*4882a593Smuzhiyun 
usbnet_get_ethernet_addr(struct usbnet * dev,int iMACAddress)149*4882a593Smuzhiyun int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun 	int 		tmp = -1, ret;
152*4882a593Smuzhiyun 	unsigned char	buf [13];
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	ret = usb_string(dev->udev, iMACAddress, buf, sizeof buf);
155*4882a593Smuzhiyun 	if (ret == 12)
156*4882a593Smuzhiyun 		tmp = hex2bin(dev->net->dev_addr, buf, 6);
157*4882a593Smuzhiyun 	if (tmp < 0) {
158*4882a593Smuzhiyun 		dev_dbg(&dev->udev->dev,
159*4882a593Smuzhiyun 			"bad MAC string %d fetch, %d\n", iMACAddress, tmp);
160*4882a593Smuzhiyun 		if (ret >= 0)
161*4882a593Smuzhiyun 			ret = -EINVAL;
162*4882a593Smuzhiyun 		return ret;
163*4882a593Smuzhiyun 	}
164*4882a593Smuzhiyun 	return 0;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr);
167*4882a593Smuzhiyun 
intr_complete(struct urb * urb)168*4882a593Smuzhiyun static void intr_complete (struct urb *urb)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	struct usbnet	*dev = urb->context;
171*4882a593Smuzhiyun 	int		status = urb->status;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	switch (status) {
174*4882a593Smuzhiyun 	/* success */
175*4882a593Smuzhiyun 	case 0:
176*4882a593Smuzhiyun 		dev->driver_info->status(dev, urb);
177*4882a593Smuzhiyun 		break;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	/* software-driven interface shutdown */
180*4882a593Smuzhiyun 	case -ENOENT:		/* urb killed */
181*4882a593Smuzhiyun 	case -ESHUTDOWN:	/* hardware gone */
182*4882a593Smuzhiyun 		netif_dbg(dev, ifdown, dev->net,
183*4882a593Smuzhiyun 			  "intr shutdown, code %d\n", status);
184*4882a593Smuzhiyun 		return;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	/* NOTE:  not throttling like RX/TX, since this endpoint
187*4882a593Smuzhiyun 	 * already polls infrequently
188*4882a593Smuzhiyun 	 */
189*4882a593Smuzhiyun 	default:
190*4882a593Smuzhiyun 		netdev_dbg(dev->net, "intr status %d\n", status);
191*4882a593Smuzhiyun 		break;
192*4882a593Smuzhiyun 	}
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	status = usb_submit_urb (urb, GFP_ATOMIC);
195*4882a593Smuzhiyun 	if (status != 0)
196*4882a593Smuzhiyun 		netif_err(dev, timer, dev->net,
197*4882a593Smuzhiyun 			  "intr resubmit --> %d\n", status);
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun 
init_status(struct usbnet * dev,struct usb_interface * intf)200*4882a593Smuzhiyun static int init_status (struct usbnet *dev, struct usb_interface *intf)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun 	char		*buf = NULL;
203*4882a593Smuzhiyun 	unsigned	pipe = 0;
204*4882a593Smuzhiyun 	unsigned	maxp;
205*4882a593Smuzhiyun 	unsigned	period;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	if (!dev->driver_info->status)
208*4882a593Smuzhiyun 		return 0;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	pipe = usb_rcvintpipe (dev->udev,
211*4882a593Smuzhiyun 			dev->status->desc.bEndpointAddress
212*4882a593Smuzhiyun 				& USB_ENDPOINT_NUMBER_MASK);
213*4882a593Smuzhiyun 	maxp = usb_maxpacket (dev->udev, pipe, 0);
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	/* avoid 1 msec chatter:  min 8 msec poll rate */
216*4882a593Smuzhiyun 	period = max ((int) dev->status->desc.bInterval,
217*4882a593Smuzhiyun 		(dev->udev->speed == USB_SPEED_HIGH) ? 7 : 3);
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	buf = kmalloc (maxp, GFP_KERNEL);
220*4882a593Smuzhiyun 	if (buf) {
221*4882a593Smuzhiyun 		dev->interrupt = usb_alloc_urb (0, GFP_KERNEL);
222*4882a593Smuzhiyun 		if (!dev->interrupt) {
223*4882a593Smuzhiyun 			kfree (buf);
224*4882a593Smuzhiyun 			return -ENOMEM;
225*4882a593Smuzhiyun 		} else {
226*4882a593Smuzhiyun 			usb_fill_int_urb(dev->interrupt, dev->udev, pipe,
227*4882a593Smuzhiyun 				buf, maxp, intr_complete, dev, period);
228*4882a593Smuzhiyun 			dev->interrupt->transfer_flags |= URB_FREE_BUFFER;
229*4882a593Smuzhiyun 			dev_dbg(&intf->dev,
230*4882a593Smuzhiyun 				"status ep%din, %d bytes period %d\n",
231*4882a593Smuzhiyun 				usb_pipeendpoint(pipe), maxp, period);
232*4882a593Smuzhiyun 		}
233*4882a593Smuzhiyun 	}
234*4882a593Smuzhiyun 	return 0;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun /* Submit the interrupt URB if not previously submitted, increasing refcount */
usbnet_status_start(struct usbnet * dev,gfp_t mem_flags)238*4882a593Smuzhiyun int usbnet_status_start(struct usbnet *dev, gfp_t mem_flags)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun 	int ret = 0;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	WARN_ON_ONCE(dev->interrupt == NULL);
243*4882a593Smuzhiyun 	if (dev->interrupt) {
244*4882a593Smuzhiyun 		mutex_lock(&dev->interrupt_mutex);
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 		if (++dev->interrupt_count == 1)
247*4882a593Smuzhiyun 			ret = usb_submit_urb(dev->interrupt, mem_flags);
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 		dev_dbg(&dev->udev->dev, "incremented interrupt URB count to %d\n",
250*4882a593Smuzhiyun 			dev->interrupt_count);
251*4882a593Smuzhiyun 		mutex_unlock(&dev->interrupt_mutex);
252*4882a593Smuzhiyun 	}
253*4882a593Smuzhiyun 	return ret;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usbnet_status_start);
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun /* For resume; submit interrupt URB if previously submitted */
__usbnet_status_start_force(struct usbnet * dev,gfp_t mem_flags)258*4882a593Smuzhiyun static int __usbnet_status_start_force(struct usbnet *dev, gfp_t mem_flags)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun 	int ret = 0;
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	mutex_lock(&dev->interrupt_mutex);
263*4882a593Smuzhiyun 	if (dev->interrupt_count) {
264*4882a593Smuzhiyun 		ret = usb_submit_urb(dev->interrupt, mem_flags);
265*4882a593Smuzhiyun 		dev_dbg(&dev->udev->dev,
266*4882a593Smuzhiyun 			"submitted interrupt URB for resume\n");
267*4882a593Smuzhiyun 	}
268*4882a593Smuzhiyun 	mutex_unlock(&dev->interrupt_mutex);
269*4882a593Smuzhiyun 	return ret;
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun /* Kill the interrupt URB if all submitters want it killed */
usbnet_status_stop(struct usbnet * dev)273*4882a593Smuzhiyun void usbnet_status_stop(struct usbnet *dev)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun 	if (dev->interrupt) {
276*4882a593Smuzhiyun 		mutex_lock(&dev->interrupt_mutex);
277*4882a593Smuzhiyun 		WARN_ON(dev->interrupt_count == 0);
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 		if (dev->interrupt_count && --dev->interrupt_count == 0)
280*4882a593Smuzhiyun 			usb_kill_urb(dev->interrupt);
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 		dev_dbg(&dev->udev->dev,
283*4882a593Smuzhiyun 			"decremented interrupt URB count to %d\n",
284*4882a593Smuzhiyun 			dev->interrupt_count);
285*4882a593Smuzhiyun 		mutex_unlock(&dev->interrupt_mutex);
286*4882a593Smuzhiyun 	}
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usbnet_status_stop);
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun /* For suspend; always kill interrupt URB */
__usbnet_status_stop_force(struct usbnet * dev)291*4882a593Smuzhiyun static void __usbnet_status_stop_force(struct usbnet *dev)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun 	if (dev->interrupt) {
294*4882a593Smuzhiyun 		mutex_lock(&dev->interrupt_mutex);
295*4882a593Smuzhiyun 		usb_kill_urb(dev->interrupt);
296*4882a593Smuzhiyun 		dev_dbg(&dev->udev->dev, "killed interrupt URB for suspend\n");
297*4882a593Smuzhiyun 		mutex_unlock(&dev->interrupt_mutex);
298*4882a593Smuzhiyun 	}
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun /* Passes this packet up the stack, updating its accounting.
302*4882a593Smuzhiyun  * Some link protocols batch packets, so their rx_fixup paths
303*4882a593Smuzhiyun  * can return clones as well as just modify the original skb.
304*4882a593Smuzhiyun  */
usbnet_skb_return(struct usbnet * dev,struct sk_buff * skb)305*4882a593Smuzhiyun void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun 	struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64);
308*4882a593Smuzhiyun 	unsigned long flags;
309*4882a593Smuzhiyun 	int	status;
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
312*4882a593Smuzhiyun 		skb_queue_tail(&dev->rxq_pause, skb);
313*4882a593Smuzhiyun 		return;
314*4882a593Smuzhiyun 	}
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	/* only update if unset to allow minidriver rx_fixup override */
317*4882a593Smuzhiyun 	if (skb->protocol == 0)
318*4882a593Smuzhiyun 		skb->protocol = eth_type_trans (skb, dev->net);
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	flags = u64_stats_update_begin_irqsave(&stats64->syncp);
321*4882a593Smuzhiyun 	stats64->rx_packets++;
322*4882a593Smuzhiyun 	stats64->rx_bytes += skb->len;
323*4882a593Smuzhiyun 	u64_stats_update_end_irqrestore(&stats64->syncp, flags);
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
326*4882a593Smuzhiyun 		  skb->len + sizeof (struct ethhdr), skb->protocol);
327*4882a593Smuzhiyun 	memset (skb->cb, 0, sizeof (struct skb_data));
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	if (skb_defer_rx_timestamp(skb))
330*4882a593Smuzhiyun 		return;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	status = netif_rx (skb);
333*4882a593Smuzhiyun 	if (status != NET_RX_SUCCESS)
334*4882a593Smuzhiyun 		netif_dbg(dev, rx_err, dev->net,
335*4882a593Smuzhiyun 			  "netif_rx status %d\n", status);
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usbnet_skb_return);
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun /* must be called if hard_mtu or rx_urb_size changed */
usbnet_update_max_qlen(struct usbnet * dev)340*4882a593Smuzhiyun void usbnet_update_max_qlen(struct usbnet *dev)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun 	enum usb_device_speed speed = dev->udev->speed;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	if (!dev->rx_urb_size || !dev->hard_mtu)
345*4882a593Smuzhiyun 		goto insanity;
346*4882a593Smuzhiyun 	switch (speed) {
347*4882a593Smuzhiyun 	case USB_SPEED_HIGH:
348*4882a593Smuzhiyun 		dev->rx_qlen = MAX_QUEUE_MEMORY / dev->rx_urb_size;
349*4882a593Smuzhiyun 		dev->tx_qlen = MAX_QUEUE_MEMORY / dev->hard_mtu;
350*4882a593Smuzhiyun 		break;
351*4882a593Smuzhiyun 	case USB_SPEED_SUPER:
352*4882a593Smuzhiyun 	case USB_SPEED_SUPER_PLUS:
353*4882a593Smuzhiyun 		/*
354*4882a593Smuzhiyun 		 * Not take default 5ms qlen for super speed HC to
355*4882a593Smuzhiyun 		 * save memory, and iperf tests show 2.5ms qlen can
356*4882a593Smuzhiyun 		 * work well
357*4882a593Smuzhiyun 		 */
358*4882a593Smuzhiyun 		dev->rx_qlen = 5 * MAX_QUEUE_MEMORY / dev->rx_urb_size;
359*4882a593Smuzhiyun 		dev->tx_qlen = 5 * MAX_QUEUE_MEMORY / dev->hard_mtu;
360*4882a593Smuzhiyun 		break;
361*4882a593Smuzhiyun 	default:
362*4882a593Smuzhiyun insanity:
363*4882a593Smuzhiyun 		dev->rx_qlen = dev->tx_qlen = 4;
364*4882a593Smuzhiyun 	}
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usbnet_update_max_qlen);
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun /*-------------------------------------------------------------------------
370*4882a593Smuzhiyun  *
371*4882a593Smuzhiyun  * Network Device Driver (peer link to "Host Device", from USB host)
372*4882a593Smuzhiyun  *
373*4882a593Smuzhiyun  *-------------------------------------------------------------------------*/
374*4882a593Smuzhiyun 
usbnet_change_mtu(struct net_device * net,int new_mtu)375*4882a593Smuzhiyun int usbnet_change_mtu (struct net_device *net, int new_mtu)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun 	struct usbnet	*dev = netdev_priv(net);
378*4882a593Smuzhiyun 	int		ll_mtu = new_mtu + net->hard_header_len;
379*4882a593Smuzhiyun 	int		old_hard_mtu = dev->hard_mtu;
380*4882a593Smuzhiyun 	int		old_rx_urb_size = dev->rx_urb_size;
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	// no second zero-length packet read wanted after mtu-sized packets
383*4882a593Smuzhiyun 	if ((ll_mtu % dev->maxpacket) == 0)
384*4882a593Smuzhiyun 		return -EDOM;
385*4882a593Smuzhiyun 	net->mtu = new_mtu;
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	dev->hard_mtu = net->mtu + net->hard_header_len;
388*4882a593Smuzhiyun 	if (dev->rx_urb_size == old_hard_mtu) {
389*4882a593Smuzhiyun 		dev->rx_urb_size = dev->hard_mtu;
390*4882a593Smuzhiyun 		if (dev->rx_urb_size > old_rx_urb_size) {
391*4882a593Smuzhiyun 			usbnet_pause_rx(dev);
392*4882a593Smuzhiyun 			usbnet_unlink_rx_urbs(dev);
393*4882a593Smuzhiyun 			usbnet_resume_rx(dev);
394*4882a593Smuzhiyun 		}
395*4882a593Smuzhiyun 	}
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	/* max qlen depend on hard_mtu and rx_urb_size */
398*4882a593Smuzhiyun 	usbnet_update_max_qlen(dev);
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	return 0;
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usbnet_change_mtu);
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun /* The caller must hold list->lock */
__usbnet_queue_skb(struct sk_buff_head * list,struct sk_buff * newsk,enum skb_state state)405*4882a593Smuzhiyun static void __usbnet_queue_skb(struct sk_buff_head *list,
406*4882a593Smuzhiyun 			struct sk_buff *newsk, enum skb_state state)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun 	struct skb_data *entry = (struct skb_data *) newsk->cb;
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	__skb_queue_tail(list, newsk);
411*4882a593Smuzhiyun 	entry->state = state;
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun /* some LK 2.4 HCDs oopsed if we freed or resubmitted urbs from
417*4882a593Smuzhiyun  * completion callbacks.  2.5 should have fixed those bugs...
418*4882a593Smuzhiyun  */
419*4882a593Smuzhiyun 
defer_bh(struct usbnet * dev,struct sk_buff * skb,struct sk_buff_head * list,enum skb_state state)420*4882a593Smuzhiyun static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
421*4882a593Smuzhiyun 		struct sk_buff_head *list, enum skb_state state)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun 	unsigned long		flags;
424*4882a593Smuzhiyun 	enum skb_state 		old_state;
425*4882a593Smuzhiyun 	struct skb_data *entry = (struct skb_data *) skb->cb;
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	spin_lock_irqsave(&list->lock, flags);
428*4882a593Smuzhiyun 	old_state = entry->state;
429*4882a593Smuzhiyun 	entry->state = state;
430*4882a593Smuzhiyun 	__skb_unlink(skb, list);
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	/* defer_bh() is never called with list == &dev->done.
433*4882a593Smuzhiyun 	 * spin_lock_nested() tells lockdep that it is OK to take
434*4882a593Smuzhiyun 	 * dev->done.lock here with list->lock held.
435*4882a593Smuzhiyun 	 */
436*4882a593Smuzhiyun 	spin_lock_nested(&dev->done.lock, SINGLE_DEPTH_NESTING);
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	__skb_queue_tail(&dev->done, skb);
439*4882a593Smuzhiyun 	if (dev->done.qlen == 1)
440*4882a593Smuzhiyun 		tasklet_schedule(&dev->bh);
441*4882a593Smuzhiyun 	spin_unlock(&dev->done.lock);
442*4882a593Smuzhiyun 	spin_unlock_irqrestore(&list->lock, flags);
443*4882a593Smuzhiyun 	return old_state;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun /* some work can't be done in tasklets, so we use keventd
447*4882a593Smuzhiyun  *
448*4882a593Smuzhiyun  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
449*4882a593Smuzhiyun  * but tasklet_schedule() doesn't.  hope the failure is rare.
450*4882a593Smuzhiyun  */
usbnet_defer_kevent(struct usbnet * dev,int work)451*4882a593Smuzhiyun void usbnet_defer_kevent (struct usbnet *dev, int work)
452*4882a593Smuzhiyun {
453*4882a593Smuzhiyun 	set_bit (work, &dev->flags);
454*4882a593Smuzhiyun 	if (!schedule_work (&dev->kevent))
455*4882a593Smuzhiyun 		netdev_dbg(dev->net, "kevent %d may have been dropped\n", work);
456*4882a593Smuzhiyun 	else
457*4882a593Smuzhiyun 		netdev_dbg(dev->net, "kevent %d scheduled\n", work);
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun static void rx_complete (struct urb *urb);
464*4882a593Smuzhiyun 
rx_submit(struct usbnet * dev,struct urb * urb,gfp_t flags)465*4882a593Smuzhiyun static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
466*4882a593Smuzhiyun {
467*4882a593Smuzhiyun 	struct sk_buff		*skb;
468*4882a593Smuzhiyun 	struct skb_data		*entry;
469*4882a593Smuzhiyun 	int			retval = 0;
470*4882a593Smuzhiyun 	unsigned long		lockflags;
471*4882a593Smuzhiyun 	size_t			size = dev->rx_urb_size;
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	/* prevent rx skb allocation when error ratio is high */
474*4882a593Smuzhiyun 	if (test_bit(EVENT_RX_KILL, &dev->flags)) {
475*4882a593Smuzhiyun 		usb_free_urb(urb);
476*4882a593Smuzhiyun 		return -ENOLINK;
477*4882a593Smuzhiyun 	}
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	if (test_bit(EVENT_NO_IP_ALIGN, &dev->flags))
480*4882a593Smuzhiyun 		skb = __netdev_alloc_skb(dev->net, size, flags);
481*4882a593Smuzhiyun 	else
482*4882a593Smuzhiyun 		skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
483*4882a593Smuzhiyun 	if (!skb) {
484*4882a593Smuzhiyun 		netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
485*4882a593Smuzhiyun 		usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
486*4882a593Smuzhiyun 		usb_free_urb (urb);
487*4882a593Smuzhiyun 		return -ENOMEM;
488*4882a593Smuzhiyun 	}
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	entry = (struct skb_data *) skb->cb;
491*4882a593Smuzhiyun 	entry->urb = urb;
492*4882a593Smuzhiyun 	entry->dev = dev;
493*4882a593Smuzhiyun 	entry->length = 0;
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	usb_fill_bulk_urb (urb, dev->udev, dev->in,
496*4882a593Smuzhiyun 		skb->data, size, rx_complete, skb);
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 	spin_lock_irqsave (&dev->rxq.lock, lockflags);
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	if (netif_running (dev->net) &&
501*4882a593Smuzhiyun 	    netif_device_present (dev->net) &&
502*4882a593Smuzhiyun 	    test_bit(EVENT_DEV_OPEN, &dev->flags) &&
503*4882a593Smuzhiyun 	    !test_bit (EVENT_RX_HALT, &dev->flags) &&
504*4882a593Smuzhiyun 	    !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) {
505*4882a593Smuzhiyun 		switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) {
506*4882a593Smuzhiyun 		case -EPIPE:
507*4882a593Smuzhiyun 			usbnet_defer_kevent (dev, EVENT_RX_HALT);
508*4882a593Smuzhiyun 			break;
509*4882a593Smuzhiyun 		case -ENOMEM:
510*4882a593Smuzhiyun 			usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
511*4882a593Smuzhiyun 			break;
512*4882a593Smuzhiyun 		case -ENODEV:
513*4882a593Smuzhiyun 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
514*4882a593Smuzhiyun 			netif_device_detach (dev->net);
515*4882a593Smuzhiyun 			break;
516*4882a593Smuzhiyun 		case -EHOSTUNREACH:
517*4882a593Smuzhiyun 			retval = -ENOLINK;
518*4882a593Smuzhiyun 			break;
519*4882a593Smuzhiyun 		default:
520*4882a593Smuzhiyun 			netif_dbg(dev, rx_err, dev->net,
521*4882a593Smuzhiyun 				  "rx submit, %d\n", retval);
522*4882a593Smuzhiyun 			tasklet_schedule (&dev->bh);
523*4882a593Smuzhiyun 			break;
524*4882a593Smuzhiyun 		case 0:
525*4882a593Smuzhiyun 			__usbnet_queue_skb(&dev->rxq, skb, rx_start);
526*4882a593Smuzhiyun 		}
527*4882a593Smuzhiyun 	} else {
528*4882a593Smuzhiyun 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
529*4882a593Smuzhiyun 		retval = -ENOLINK;
530*4882a593Smuzhiyun 	}
531*4882a593Smuzhiyun 	spin_unlock_irqrestore (&dev->rxq.lock, lockflags);
532*4882a593Smuzhiyun 	if (retval) {
533*4882a593Smuzhiyun 		dev_kfree_skb_any (skb);
534*4882a593Smuzhiyun 		usb_free_urb (urb);
535*4882a593Smuzhiyun 	}
536*4882a593Smuzhiyun 	return retval;
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
541*4882a593Smuzhiyun 
rx_process(struct usbnet * dev,struct sk_buff * skb)542*4882a593Smuzhiyun static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
543*4882a593Smuzhiyun {
544*4882a593Smuzhiyun 	if (dev->driver_info->rx_fixup &&
545*4882a593Smuzhiyun 	    !dev->driver_info->rx_fixup (dev, skb)) {
546*4882a593Smuzhiyun 		/* With RX_ASSEMBLE, rx_fixup() must update counters */
547*4882a593Smuzhiyun 		if (!(dev->driver_info->flags & FLAG_RX_ASSEMBLE))
548*4882a593Smuzhiyun 			dev->net->stats.rx_errors++;
549*4882a593Smuzhiyun 		goto done;
550*4882a593Smuzhiyun 	}
551*4882a593Smuzhiyun 	// else network stack removes extra byte if we forced a short packet
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	/* all data was already cloned from skb inside the driver */
554*4882a593Smuzhiyun 	if (dev->driver_info->flags & FLAG_MULTI_PACKET)
555*4882a593Smuzhiyun 		goto done;
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	if (skb->len < ETH_HLEN) {
558*4882a593Smuzhiyun 		dev->net->stats.rx_errors++;
559*4882a593Smuzhiyun 		dev->net->stats.rx_length_errors++;
560*4882a593Smuzhiyun 		netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len);
561*4882a593Smuzhiyun 	} else {
562*4882a593Smuzhiyun 		usbnet_skb_return(dev, skb);
563*4882a593Smuzhiyun 		return;
564*4882a593Smuzhiyun 	}
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun done:
567*4882a593Smuzhiyun 	skb_queue_tail(&dev->done, skb);
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
571*4882a593Smuzhiyun 
rx_complete(struct urb * urb)572*4882a593Smuzhiyun static void rx_complete (struct urb *urb)
573*4882a593Smuzhiyun {
574*4882a593Smuzhiyun 	struct sk_buff		*skb = (struct sk_buff *) urb->context;
575*4882a593Smuzhiyun 	struct skb_data		*entry = (struct skb_data *) skb->cb;
576*4882a593Smuzhiyun 	struct usbnet		*dev = entry->dev;
577*4882a593Smuzhiyun 	int			urb_status = urb->status;
578*4882a593Smuzhiyun 	enum skb_state		state;
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	skb_put (skb, urb->actual_length);
581*4882a593Smuzhiyun 	state = rx_done;
582*4882a593Smuzhiyun 	entry->urb = NULL;
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	switch (urb_status) {
585*4882a593Smuzhiyun 	/* success */
586*4882a593Smuzhiyun 	case 0:
587*4882a593Smuzhiyun 		break;
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	/* stalls need manual reset. this is rare ... except that
590*4882a593Smuzhiyun 	 * when going through USB 2.0 TTs, unplug appears this way.
591*4882a593Smuzhiyun 	 * we avoid the highspeed version of the ETIMEDOUT/EILSEQ
592*4882a593Smuzhiyun 	 * storm, recovering as needed.
593*4882a593Smuzhiyun 	 */
594*4882a593Smuzhiyun 	case -EPIPE:
595*4882a593Smuzhiyun 		dev->net->stats.rx_errors++;
596*4882a593Smuzhiyun 		usbnet_defer_kevent (dev, EVENT_RX_HALT);
597*4882a593Smuzhiyun 		fallthrough;
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	/* software-driven interface shutdown */
600*4882a593Smuzhiyun 	case -ECONNRESET:		/* async unlink */
601*4882a593Smuzhiyun 	case -ESHUTDOWN:		/* hardware gone */
602*4882a593Smuzhiyun 		netif_dbg(dev, ifdown, dev->net,
603*4882a593Smuzhiyun 			  "rx shutdown, code %d\n", urb_status);
604*4882a593Smuzhiyun 		goto block;
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun 	/* we get controller i/o faults during hub_wq disconnect() delays.
607*4882a593Smuzhiyun 	 * throttle down resubmits, to avoid log floods; just temporarily,
608*4882a593Smuzhiyun 	 * so we still recover when the fault isn't a hub_wq delay.
609*4882a593Smuzhiyun 	 */
610*4882a593Smuzhiyun 	case -EPROTO:
611*4882a593Smuzhiyun 	case -ETIME:
612*4882a593Smuzhiyun 	case -EILSEQ:
613*4882a593Smuzhiyun 		dev->net->stats.rx_errors++;
614*4882a593Smuzhiyun 		if (!timer_pending (&dev->delay)) {
615*4882a593Smuzhiyun 			mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES);
616*4882a593Smuzhiyun 			netif_dbg(dev, link, dev->net,
617*4882a593Smuzhiyun 				  "rx throttle %d\n", urb_status);
618*4882a593Smuzhiyun 		}
619*4882a593Smuzhiyun block:
620*4882a593Smuzhiyun 		state = rx_cleanup;
621*4882a593Smuzhiyun 		entry->urb = urb;
622*4882a593Smuzhiyun 		urb = NULL;
623*4882a593Smuzhiyun 		break;
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	/* data overrun ... flush fifo? */
626*4882a593Smuzhiyun 	case -EOVERFLOW:
627*4882a593Smuzhiyun 		dev->net->stats.rx_over_errors++;
628*4882a593Smuzhiyun 		fallthrough;
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 	default:
631*4882a593Smuzhiyun 		state = rx_cleanup;
632*4882a593Smuzhiyun 		dev->net->stats.rx_errors++;
633*4882a593Smuzhiyun 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
634*4882a593Smuzhiyun 		break;
635*4882a593Smuzhiyun 	}
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 	/* stop rx if packet error rate is high */
638*4882a593Smuzhiyun 	if (++dev->pkt_cnt > 30) {
639*4882a593Smuzhiyun 		dev->pkt_cnt = 0;
640*4882a593Smuzhiyun 		dev->pkt_err = 0;
641*4882a593Smuzhiyun 	} else {
642*4882a593Smuzhiyun 		if (state == rx_cleanup)
643*4882a593Smuzhiyun 			dev->pkt_err++;
644*4882a593Smuzhiyun 		if (dev->pkt_err > 20)
645*4882a593Smuzhiyun 			set_bit(EVENT_RX_KILL, &dev->flags);
646*4882a593Smuzhiyun 	}
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun 	state = defer_bh(dev, skb, &dev->rxq, state);
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 	if (urb) {
651*4882a593Smuzhiyun 		if (netif_running (dev->net) &&
652*4882a593Smuzhiyun 		    !test_bit (EVENT_RX_HALT, &dev->flags) &&
653*4882a593Smuzhiyun 		    state != unlink_start) {
654*4882a593Smuzhiyun 			rx_submit (dev, urb, GFP_ATOMIC);
655*4882a593Smuzhiyun 			usb_mark_last_busy(dev->udev);
656*4882a593Smuzhiyun 			return;
657*4882a593Smuzhiyun 		}
658*4882a593Smuzhiyun 		usb_free_urb (urb);
659*4882a593Smuzhiyun 	}
660*4882a593Smuzhiyun 	netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun 
663*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
usbnet_pause_rx(struct usbnet * dev)664*4882a593Smuzhiyun void usbnet_pause_rx(struct usbnet *dev)
665*4882a593Smuzhiyun {
666*4882a593Smuzhiyun 	set_bit(EVENT_RX_PAUSED, &dev->flags);
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 	netif_dbg(dev, rx_status, dev->net, "paused rx queue enabled\n");
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usbnet_pause_rx);
671*4882a593Smuzhiyun 
usbnet_resume_rx(struct usbnet * dev)672*4882a593Smuzhiyun void usbnet_resume_rx(struct usbnet *dev)
673*4882a593Smuzhiyun {
674*4882a593Smuzhiyun 	struct sk_buff *skb;
675*4882a593Smuzhiyun 	int num = 0;
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 	clear_bit(EVENT_RX_PAUSED, &dev->flags);
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 	while ((skb = skb_dequeue(&dev->rxq_pause)) != NULL) {
680*4882a593Smuzhiyun 		usbnet_skb_return(dev, skb);
681*4882a593Smuzhiyun 		num++;
682*4882a593Smuzhiyun 	}
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	tasklet_schedule(&dev->bh);
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 	netif_dbg(dev, rx_status, dev->net,
687*4882a593Smuzhiyun 		  "paused rx queue disabled, %d skbs requeued\n", num);
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usbnet_resume_rx);
690*4882a593Smuzhiyun 
usbnet_purge_paused_rxq(struct usbnet * dev)691*4882a593Smuzhiyun void usbnet_purge_paused_rxq(struct usbnet *dev)
692*4882a593Smuzhiyun {
693*4882a593Smuzhiyun 	skb_queue_purge(&dev->rxq_pause);
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usbnet_purge_paused_rxq);
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun // unlink pending rx/tx; completion handlers do all other cleanup
700*4882a593Smuzhiyun 
unlink_urbs(struct usbnet * dev,struct sk_buff_head * q)701*4882a593Smuzhiyun static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
702*4882a593Smuzhiyun {
703*4882a593Smuzhiyun 	unsigned long		flags;
704*4882a593Smuzhiyun 	struct sk_buff		*skb;
705*4882a593Smuzhiyun 	int			count = 0;
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 	spin_lock_irqsave (&q->lock, flags);
708*4882a593Smuzhiyun 	while (!skb_queue_empty(q)) {
709*4882a593Smuzhiyun 		struct skb_data		*entry;
710*4882a593Smuzhiyun 		struct urb		*urb;
711*4882a593Smuzhiyun 		int			retval;
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 		skb_queue_walk(q, skb) {
714*4882a593Smuzhiyun 			entry = (struct skb_data *) skb->cb;
715*4882a593Smuzhiyun 			if (entry->state != unlink_start)
716*4882a593Smuzhiyun 				goto found;
717*4882a593Smuzhiyun 		}
718*4882a593Smuzhiyun 		break;
719*4882a593Smuzhiyun found:
720*4882a593Smuzhiyun 		entry->state = unlink_start;
721*4882a593Smuzhiyun 		urb = entry->urb;
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 		/*
724*4882a593Smuzhiyun 		 * Get reference count of the URB to avoid it to be
725*4882a593Smuzhiyun 		 * freed during usb_unlink_urb, which may trigger
726*4882a593Smuzhiyun 		 * use-after-free problem inside usb_unlink_urb since
727*4882a593Smuzhiyun 		 * usb_unlink_urb is always racing with .complete
728*4882a593Smuzhiyun 		 * handler(include defer_bh).
729*4882a593Smuzhiyun 		 */
730*4882a593Smuzhiyun 		usb_get_urb(urb);
731*4882a593Smuzhiyun 		spin_unlock_irqrestore(&q->lock, flags);
732*4882a593Smuzhiyun 		// during some PM-driven resume scenarios,
733*4882a593Smuzhiyun 		// these (async) unlinks complete immediately
734*4882a593Smuzhiyun 		retval = usb_unlink_urb (urb);
735*4882a593Smuzhiyun 		if (retval != -EINPROGRESS && retval != 0)
736*4882a593Smuzhiyun 			netdev_dbg(dev->net, "unlink urb err, %d\n", retval);
737*4882a593Smuzhiyun 		else
738*4882a593Smuzhiyun 			count++;
739*4882a593Smuzhiyun 		usb_put_urb(urb);
740*4882a593Smuzhiyun 		spin_lock_irqsave(&q->lock, flags);
741*4882a593Smuzhiyun 	}
742*4882a593Smuzhiyun 	spin_unlock_irqrestore (&q->lock, flags);
743*4882a593Smuzhiyun 	return count;
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun // Flush all pending rx urbs
747*4882a593Smuzhiyun // minidrivers may need to do this when the MTU changes
748*4882a593Smuzhiyun 
usbnet_unlink_rx_urbs(struct usbnet * dev)749*4882a593Smuzhiyun void usbnet_unlink_rx_urbs(struct usbnet *dev)
750*4882a593Smuzhiyun {
751*4882a593Smuzhiyun 	if (netif_running(dev->net)) {
752*4882a593Smuzhiyun 		(void) unlink_urbs (dev, &dev->rxq);
753*4882a593Smuzhiyun 		tasklet_schedule(&dev->bh);
754*4882a593Smuzhiyun 	}
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
759*4882a593Smuzhiyun 
wait_skb_queue_empty(struct sk_buff_head * q)760*4882a593Smuzhiyun static void wait_skb_queue_empty(struct sk_buff_head *q)
761*4882a593Smuzhiyun {
762*4882a593Smuzhiyun 	unsigned long flags;
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 	spin_lock_irqsave(&q->lock, flags);
765*4882a593Smuzhiyun 	while (!skb_queue_empty(q)) {
766*4882a593Smuzhiyun 		spin_unlock_irqrestore(&q->lock, flags);
767*4882a593Smuzhiyun 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
768*4882a593Smuzhiyun 		set_current_state(TASK_UNINTERRUPTIBLE);
769*4882a593Smuzhiyun 		spin_lock_irqsave(&q->lock, flags);
770*4882a593Smuzhiyun 	}
771*4882a593Smuzhiyun 	spin_unlock_irqrestore(&q->lock, flags);
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun // precondition: never called in_interrupt
usbnet_terminate_urbs(struct usbnet * dev)775*4882a593Smuzhiyun static void usbnet_terminate_urbs(struct usbnet *dev)
776*4882a593Smuzhiyun {
777*4882a593Smuzhiyun 	DECLARE_WAITQUEUE(wait, current);
778*4882a593Smuzhiyun 	int temp;
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 	/* ensure there are no more active urbs */
781*4882a593Smuzhiyun 	add_wait_queue(&dev->wait, &wait);
782*4882a593Smuzhiyun 	set_current_state(TASK_UNINTERRUPTIBLE);
783*4882a593Smuzhiyun 	temp = unlink_urbs(dev, &dev->txq) +
784*4882a593Smuzhiyun 		unlink_urbs(dev, &dev->rxq);
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 	/* maybe wait for deletions to finish. */
787*4882a593Smuzhiyun 	wait_skb_queue_empty(&dev->rxq);
788*4882a593Smuzhiyun 	wait_skb_queue_empty(&dev->txq);
789*4882a593Smuzhiyun 	wait_skb_queue_empty(&dev->done);
790*4882a593Smuzhiyun 	netif_dbg(dev, ifdown, dev->net,
791*4882a593Smuzhiyun 		  "waited for %d urb completions\n", temp);
792*4882a593Smuzhiyun 	set_current_state(TASK_RUNNING);
793*4882a593Smuzhiyun 	remove_wait_queue(&dev->wait, &wait);
794*4882a593Smuzhiyun }
795*4882a593Smuzhiyun 
usbnet_stop(struct net_device * net)796*4882a593Smuzhiyun int usbnet_stop (struct net_device *net)
797*4882a593Smuzhiyun {
798*4882a593Smuzhiyun 	struct usbnet		*dev = netdev_priv(net);
799*4882a593Smuzhiyun 	const struct driver_info *info = dev->driver_info;
800*4882a593Smuzhiyun 	int			retval, pm, mpn;
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
803*4882a593Smuzhiyun 	netif_stop_queue (net);
804*4882a593Smuzhiyun 
805*4882a593Smuzhiyun 	netif_info(dev, ifdown, dev->net,
806*4882a593Smuzhiyun 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
807*4882a593Smuzhiyun 		   net->stats.rx_packets, net->stats.tx_packets,
808*4882a593Smuzhiyun 		   net->stats.rx_errors, net->stats.tx_errors);
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun 	/* to not race resume */
811*4882a593Smuzhiyun 	pm = usb_autopm_get_interface(dev->intf);
812*4882a593Smuzhiyun 	/* allow minidriver to stop correctly (wireless devices to turn off
813*4882a593Smuzhiyun 	 * radio etc) */
814*4882a593Smuzhiyun 	if (info->stop) {
815*4882a593Smuzhiyun 		retval = info->stop(dev);
816*4882a593Smuzhiyun 		if (retval < 0)
817*4882a593Smuzhiyun 			netif_info(dev, ifdown, dev->net,
818*4882a593Smuzhiyun 				   "stop fail (%d) usbnet usb-%s-%s, %s\n",
819*4882a593Smuzhiyun 				   retval,
820*4882a593Smuzhiyun 				   dev->udev->bus->bus_name, dev->udev->devpath,
821*4882a593Smuzhiyun 				   info->description);
822*4882a593Smuzhiyun 	}
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun 	if (!(info->flags & FLAG_AVOID_UNLINK_URBS))
825*4882a593Smuzhiyun 		usbnet_terminate_urbs(dev);
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun 	usbnet_status_stop(dev);
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 	usbnet_purge_paused_rxq(dev);
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	mpn = !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags);
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 	/* deferred work (timer, softirq, task) must also stop */
834*4882a593Smuzhiyun 	dev->flags = 0;
835*4882a593Smuzhiyun 	del_timer_sync (&dev->delay);
836*4882a593Smuzhiyun 	tasklet_kill (&dev->bh);
837*4882a593Smuzhiyun 	cancel_work_sync(&dev->kevent);
838*4882a593Smuzhiyun 	if (!pm)
839*4882a593Smuzhiyun 		usb_autopm_put_interface(dev->intf);
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 	if (info->manage_power && mpn)
842*4882a593Smuzhiyun 		info->manage_power(dev, 0);
843*4882a593Smuzhiyun 	else
844*4882a593Smuzhiyun 		usb_autopm_put_interface(dev->intf);
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 	return 0;
847*4882a593Smuzhiyun }
848*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usbnet_stop);
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun // posts reads, and enables write queuing
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun // precondition: never called in_interrupt
855*4882a593Smuzhiyun 
usbnet_open(struct net_device * net)856*4882a593Smuzhiyun int usbnet_open (struct net_device *net)
857*4882a593Smuzhiyun {
858*4882a593Smuzhiyun 	struct usbnet		*dev = netdev_priv(net);
859*4882a593Smuzhiyun 	int			retval;
860*4882a593Smuzhiyun 	const struct driver_info *info = dev->driver_info;
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun 	if ((retval = usb_autopm_get_interface(dev->intf)) < 0) {
863*4882a593Smuzhiyun 		netif_info(dev, ifup, dev->net,
864*4882a593Smuzhiyun 			   "resumption fail (%d) usbnet usb-%s-%s, %s\n",
865*4882a593Smuzhiyun 			   retval,
866*4882a593Smuzhiyun 			   dev->udev->bus->bus_name,
867*4882a593Smuzhiyun 			   dev->udev->devpath,
868*4882a593Smuzhiyun 			   info->description);
869*4882a593Smuzhiyun 		goto done_nopm;
870*4882a593Smuzhiyun 	}
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 	// put into "known safe" state
873*4882a593Smuzhiyun 	if (info->reset && (retval = info->reset (dev)) < 0) {
874*4882a593Smuzhiyun 		netif_info(dev, ifup, dev->net,
875*4882a593Smuzhiyun 			   "open reset fail (%d) usbnet usb-%s-%s, %s\n",
876*4882a593Smuzhiyun 			   retval,
877*4882a593Smuzhiyun 			   dev->udev->bus->bus_name,
878*4882a593Smuzhiyun 			   dev->udev->devpath,
879*4882a593Smuzhiyun 			   info->description);
880*4882a593Smuzhiyun 		goto done;
881*4882a593Smuzhiyun 	}
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun 	/* hard_mtu or rx_urb_size may change in reset() */
884*4882a593Smuzhiyun 	usbnet_update_max_qlen(dev);
885*4882a593Smuzhiyun 
886*4882a593Smuzhiyun 	// insist peer be connected
887*4882a593Smuzhiyun 	if (info->check_connect && (retval = info->check_connect (dev)) < 0) {
888*4882a593Smuzhiyun 		netif_dbg(dev, ifup, dev->net, "can't open; %d\n", retval);
889*4882a593Smuzhiyun 		goto done;
890*4882a593Smuzhiyun 	}
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 	/* start any status interrupt transfer */
893*4882a593Smuzhiyun 	if (dev->interrupt) {
894*4882a593Smuzhiyun 		retval = usbnet_status_start(dev, GFP_KERNEL);
895*4882a593Smuzhiyun 		if (retval < 0) {
896*4882a593Smuzhiyun 			netif_err(dev, ifup, dev->net,
897*4882a593Smuzhiyun 				  "intr submit %d\n", retval);
898*4882a593Smuzhiyun 			goto done;
899*4882a593Smuzhiyun 		}
900*4882a593Smuzhiyun 	}
901*4882a593Smuzhiyun 
902*4882a593Smuzhiyun 	set_bit(EVENT_DEV_OPEN, &dev->flags);
903*4882a593Smuzhiyun 	netif_start_queue (net);
904*4882a593Smuzhiyun 	netif_info(dev, ifup, dev->net,
905*4882a593Smuzhiyun 		   "open: enable queueing (rx %d, tx %d) mtu %d %s framing\n",
906*4882a593Smuzhiyun 		   (int)RX_QLEN(dev), (int)TX_QLEN(dev),
907*4882a593Smuzhiyun 		   dev->net->mtu,
908*4882a593Smuzhiyun 		   (dev->driver_info->flags & FLAG_FRAMING_NC) ? "NetChip" :
909*4882a593Smuzhiyun 		   (dev->driver_info->flags & FLAG_FRAMING_GL) ? "GeneSys" :
910*4882a593Smuzhiyun 		   (dev->driver_info->flags & FLAG_FRAMING_Z) ? "Zaurus" :
911*4882a593Smuzhiyun 		   (dev->driver_info->flags & FLAG_FRAMING_RN) ? "RNDIS" :
912*4882a593Smuzhiyun 		   (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" :
913*4882a593Smuzhiyun 		   "simple");
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun 	/* reset rx error state */
916*4882a593Smuzhiyun 	dev->pkt_cnt = 0;
917*4882a593Smuzhiyun 	dev->pkt_err = 0;
918*4882a593Smuzhiyun 	clear_bit(EVENT_RX_KILL, &dev->flags);
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun 	// delay posting reads until we're fully open
921*4882a593Smuzhiyun 	tasklet_schedule (&dev->bh);
922*4882a593Smuzhiyun 	if (info->manage_power) {
923*4882a593Smuzhiyun 		retval = info->manage_power(dev, 1);
924*4882a593Smuzhiyun 		if (retval < 0) {
925*4882a593Smuzhiyun 			retval = 0;
926*4882a593Smuzhiyun 			set_bit(EVENT_NO_RUNTIME_PM, &dev->flags);
927*4882a593Smuzhiyun 		} else {
928*4882a593Smuzhiyun 			usb_autopm_put_interface(dev->intf);
929*4882a593Smuzhiyun 		}
930*4882a593Smuzhiyun 	}
931*4882a593Smuzhiyun 	return retval;
932*4882a593Smuzhiyun done:
933*4882a593Smuzhiyun 	usb_autopm_put_interface(dev->intf);
934*4882a593Smuzhiyun done_nopm:
935*4882a593Smuzhiyun 	return retval;
936*4882a593Smuzhiyun }
937*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usbnet_open);
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun /* ethtool methods; minidrivers may need to add some more, but
942*4882a593Smuzhiyun  * they'll probably want to use this base set.
943*4882a593Smuzhiyun  */
944*4882a593Smuzhiyun 
usbnet_get_link_ksettings(struct net_device * net,struct ethtool_link_ksettings * cmd)945*4882a593Smuzhiyun int usbnet_get_link_ksettings(struct net_device *net,
946*4882a593Smuzhiyun 			      struct ethtool_link_ksettings *cmd)
947*4882a593Smuzhiyun {
948*4882a593Smuzhiyun 	struct usbnet *dev = netdev_priv(net);
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun 	if (!dev->mii.mdio_read)
951*4882a593Smuzhiyun 		return -EOPNOTSUPP;
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 	mii_ethtool_get_link_ksettings(&dev->mii, cmd);
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 	return 0;
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usbnet_get_link_ksettings);
958*4882a593Smuzhiyun 
usbnet_set_link_ksettings(struct net_device * net,const struct ethtool_link_ksettings * cmd)959*4882a593Smuzhiyun int usbnet_set_link_ksettings(struct net_device *net,
960*4882a593Smuzhiyun 			      const struct ethtool_link_ksettings *cmd)
961*4882a593Smuzhiyun {
962*4882a593Smuzhiyun 	struct usbnet *dev = netdev_priv(net);
963*4882a593Smuzhiyun 	int retval;
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 	if (!dev->mii.mdio_write)
966*4882a593Smuzhiyun 		return -EOPNOTSUPP;
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun 	retval = mii_ethtool_set_link_ksettings(&dev->mii, cmd);
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun 	/* link speed/duplex might have changed */
971*4882a593Smuzhiyun 	if (dev->driver_info->link_reset)
972*4882a593Smuzhiyun 		dev->driver_info->link_reset(dev);
973*4882a593Smuzhiyun 
974*4882a593Smuzhiyun 	/* hard_mtu or rx_urb_size may change in link_reset() */
975*4882a593Smuzhiyun 	usbnet_update_max_qlen(dev);
976*4882a593Smuzhiyun 
977*4882a593Smuzhiyun 	return retval;
978*4882a593Smuzhiyun }
979*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usbnet_set_link_ksettings);
980*4882a593Smuzhiyun 
usbnet_get_stats64(struct net_device * net,struct rtnl_link_stats64 * stats)981*4882a593Smuzhiyun void usbnet_get_stats64(struct net_device *net, struct rtnl_link_stats64 *stats)
982*4882a593Smuzhiyun {
983*4882a593Smuzhiyun 	struct usbnet *dev = netdev_priv(net);
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun 	netdev_stats_to_stats64(stats, &net->stats);
986*4882a593Smuzhiyun 	dev_fetch_sw_netstats(stats, dev->stats64);
987*4882a593Smuzhiyun }
988*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usbnet_get_stats64);
989*4882a593Smuzhiyun 
usbnet_get_link(struct net_device * net)990*4882a593Smuzhiyun u32 usbnet_get_link (struct net_device *net)
991*4882a593Smuzhiyun {
992*4882a593Smuzhiyun 	struct usbnet *dev = netdev_priv(net);
993*4882a593Smuzhiyun 
994*4882a593Smuzhiyun 	/* If a check_connect is defined, return its result */
995*4882a593Smuzhiyun 	if (dev->driver_info->check_connect)
996*4882a593Smuzhiyun 		return dev->driver_info->check_connect (dev) == 0;
997*4882a593Smuzhiyun 
998*4882a593Smuzhiyun 	/* if the device has mii operations, use those */
999*4882a593Smuzhiyun 	if (dev->mii.mdio_read)
1000*4882a593Smuzhiyun 		return mii_link_ok(&dev->mii);
1001*4882a593Smuzhiyun 
1002*4882a593Smuzhiyun 	/* Otherwise, dtrt for drivers calling netif_carrier_{on,off} */
1003*4882a593Smuzhiyun 	return ethtool_op_get_link(net);
1004*4882a593Smuzhiyun }
1005*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usbnet_get_link);
1006*4882a593Smuzhiyun 
usbnet_nway_reset(struct net_device * net)1007*4882a593Smuzhiyun int usbnet_nway_reset(struct net_device *net)
1008*4882a593Smuzhiyun {
1009*4882a593Smuzhiyun 	struct usbnet *dev = netdev_priv(net);
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun 	if (!dev->mii.mdio_write)
1012*4882a593Smuzhiyun 		return -EOPNOTSUPP;
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun 	return mii_nway_restart(&dev->mii);
1015*4882a593Smuzhiyun }
1016*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usbnet_nway_reset);
1017*4882a593Smuzhiyun 
usbnet_get_drvinfo(struct net_device * net,struct ethtool_drvinfo * info)1018*4882a593Smuzhiyun void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info)
1019*4882a593Smuzhiyun {
1020*4882a593Smuzhiyun 	struct usbnet *dev = netdev_priv(net);
1021*4882a593Smuzhiyun 
1022*4882a593Smuzhiyun 	strlcpy (info->driver, dev->driver_name, sizeof info->driver);
1023*4882a593Smuzhiyun 	strlcpy (info->fw_version, dev->driver_info->description,
1024*4882a593Smuzhiyun 		sizeof info->fw_version);
1025*4882a593Smuzhiyun 	usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info);
1026*4882a593Smuzhiyun }
1027*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usbnet_get_drvinfo);
1028*4882a593Smuzhiyun 
usbnet_get_msglevel(struct net_device * net)1029*4882a593Smuzhiyun u32 usbnet_get_msglevel (struct net_device *net)
1030*4882a593Smuzhiyun {
1031*4882a593Smuzhiyun 	struct usbnet *dev = netdev_priv(net);
1032*4882a593Smuzhiyun 
1033*4882a593Smuzhiyun 	return dev->msg_enable;
1034*4882a593Smuzhiyun }
1035*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usbnet_get_msglevel);
1036*4882a593Smuzhiyun 
usbnet_set_msglevel(struct net_device * net,u32 level)1037*4882a593Smuzhiyun void usbnet_set_msglevel (struct net_device *net, u32 level)
1038*4882a593Smuzhiyun {
1039*4882a593Smuzhiyun 	struct usbnet *dev = netdev_priv(net);
1040*4882a593Smuzhiyun 
1041*4882a593Smuzhiyun 	dev->msg_enable = level;
1042*4882a593Smuzhiyun }
1043*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usbnet_set_msglevel);
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun /* drivers may override default ethtool_ops in their bind() routine */
1046*4882a593Smuzhiyun static const struct ethtool_ops usbnet_ethtool_ops = {
1047*4882a593Smuzhiyun 	.get_link		= usbnet_get_link,
1048*4882a593Smuzhiyun 	.nway_reset		= usbnet_nway_reset,
1049*4882a593Smuzhiyun 	.get_drvinfo		= usbnet_get_drvinfo,
1050*4882a593Smuzhiyun 	.get_msglevel		= usbnet_get_msglevel,
1051*4882a593Smuzhiyun 	.set_msglevel		= usbnet_set_msglevel,
1052*4882a593Smuzhiyun 	.get_ts_info		= ethtool_op_get_ts_info,
1053*4882a593Smuzhiyun 	.get_link_ksettings	= usbnet_get_link_ksettings,
1054*4882a593Smuzhiyun 	.set_link_ksettings	= usbnet_set_link_ksettings,
1055*4882a593Smuzhiyun };
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
1058*4882a593Smuzhiyun 
__handle_link_change(struct usbnet * dev)1059*4882a593Smuzhiyun static void __handle_link_change(struct usbnet *dev)
1060*4882a593Smuzhiyun {
1061*4882a593Smuzhiyun 	if (!test_bit(EVENT_DEV_OPEN, &dev->flags))
1062*4882a593Smuzhiyun 		return;
1063*4882a593Smuzhiyun 
1064*4882a593Smuzhiyun 	if (!netif_carrier_ok(dev->net)) {
1065*4882a593Smuzhiyun 		/* kill URBs for reading packets to save bus bandwidth */
1066*4882a593Smuzhiyun 		unlink_urbs(dev, &dev->rxq);
1067*4882a593Smuzhiyun 
1068*4882a593Smuzhiyun 		/*
1069*4882a593Smuzhiyun 		 * tx_timeout will unlink URBs for sending packets and
1070*4882a593Smuzhiyun 		 * tx queue is stopped by netcore after link becomes off
1071*4882a593Smuzhiyun 		 */
1072*4882a593Smuzhiyun 	} else {
1073*4882a593Smuzhiyun 		/* submitting URBs for reading packets */
1074*4882a593Smuzhiyun 		tasklet_schedule(&dev->bh);
1075*4882a593Smuzhiyun 	}
1076*4882a593Smuzhiyun 
1077*4882a593Smuzhiyun 	/* hard_mtu or rx_urb_size may change during link change */
1078*4882a593Smuzhiyun 	usbnet_update_max_qlen(dev);
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun 	clear_bit(EVENT_LINK_CHANGE, &dev->flags);
1081*4882a593Smuzhiyun }
1082*4882a593Smuzhiyun 
usbnet_set_rx_mode(struct net_device * net)1083*4882a593Smuzhiyun void usbnet_set_rx_mode(struct net_device *net)
1084*4882a593Smuzhiyun {
1085*4882a593Smuzhiyun 	struct usbnet		*dev = netdev_priv(net);
1086*4882a593Smuzhiyun 
1087*4882a593Smuzhiyun 	usbnet_defer_kevent(dev, EVENT_SET_RX_MODE);
1088*4882a593Smuzhiyun }
1089*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usbnet_set_rx_mode);
1090*4882a593Smuzhiyun 
__handle_set_rx_mode(struct usbnet * dev)1091*4882a593Smuzhiyun static void __handle_set_rx_mode(struct usbnet *dev)
1092*4882a593Smuzhiyun {
1093*4882a593Smuzhiyun 	if (dev->driver_info->set_rx_mode)
1094*4882a593Smuzhiyun 		(dev->driver_info->set_rx_mode)(dev);
1095*4882a593Smuzhiyun 
1096*4882a593Smuzhiyun 	clear_bit(EVENT_SET_RX_MODE, &dev->flags);
1097*4882a593Smuzhiyun }
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun /* work that cannot be done in interrupt context uses keventd.
1100*4882a593Smuzhiyun  *
1101*4882a593Smuzhiyun  * NOTE:  with 2.5 we could do more of this using completion callbacks,
1102*4882a593Smuzhiyun  * especially now that control transfers can be queued.
1103*4882a593Smuzhiyun  */
1104*4882a593Smuzhiyun static void
usbnet_deferred_kevent(struct work_struct * work)1105*4882a593Smuzhiyun usbnet_deferred_kevent (struct work_struct *work)
1106*4882a593Smuzhiyun {
1107*4882a593Smuzhiyun 	struct usbnet		*dev =
1108*4882a593Smuzhiyun 		container_of(work, struct usbnet, kevent);
1109*4882a593Smuzhiyun 	int			status;
1110*4882a593Smuzhiyun 
1111*4882a593Smuzhiyun 	/* usb_clear_halt() needs a thread context */
1112*4882a593Smuzhiyun 	if (test_bit (EVENT_TX_HALT, &dev->flags)) {
1113*4882a593Smuzhiyun 		unlink_urbs (dev, &dev->txq);
1114*4882a593Smuzhiyun 		status = usb_autopm_get_interface(dev->intf);
1115*4882a593Smuzhiyun 		if (status < 0)
1116*4882a593Smuzhiyun 			goto fail_pipe;
1117*4882a593Smuzhiyun 		status = usb_clear_halt (dev->udev, dev->out);
1118*4882a593Smuzhiyun 		usb_autopm_put_interface(dev->intf);
1119*4882a593Smuzhiyun 		if (status < 0 &&
1120*4882a593Smuzhiyun 		    status != -EPIPE &&
1121*4882a593Smuzhiyun 		    status != -ESHUTDOWN) {
1122*4882a593Smuzhiyun 			if (netif_msg_tx_err (dev))
1123*4882a593Smuzhiyun fail_pipe:
1124*4882a593Smuzhiyun 				netdev_err(dev->net, "can't clear tx halt, status %d\n",
1125*4882a593Smuzhiyun 					   status);
1126*4882a593Smuzhiyun 		} else {
1127*4882a593Smuzhiyun 			clear_bit (EVENT_TX_HALT, &dev->flags);
1128*4882a593Smuzhiyun 			if (status != -ESHUTDOWN)
1129*4882a593Smuzhiyun 				netif_wake_queue (dev->net);
1130*4882a593Smuzhiyun 		}
1131*4882a593Smuzhiyun 	}
1132*4882a593Smuzhiyun 	if (test_bit (EVENT_RX_HALT, &dev->flags)) {
1133*4882a593Smuzhiyun 		unlink_urbs (dev, &dev->rxq);
1134*4882a593Smuzhiyun 		status = usb_autopm_get_interface(dev->intf);
1135*4882a593Smuzhiyun 		if (status < 0)
1136*4882a593Smuzhiyun 			goto fail_halt;
1137*4882a593Smuzhiyun 		status = usb_clear_halt (dev->udev, dev->in);
1138*4882a593Smuzhiyun 		usb_autopm_put_interface(dev->intf);
1139*4882a593Smuzhiyun 		if (status < 0 &&
1140*4882a593Smuzhiyun 		    status != -EPIPE &&
1141*4882a593Smuzhiyun 		    status != -ESHUTDOWN) {
1142*4882a593Smuzhiyun 			if (netif_msg_rx_err (dev))
1143*4882a593Smuzhiyun fail_halt:
1144*4882a593Smuzhiyun 				netdev_err(dev->net, "can't clear rx halt, status %d\n",
1145*4882a593Smuzhiyun 					   status);
1146*4882a593Smuzhiyun 		} else {
1147*4882a593Smuzhiyun 			clear_bit (EVENT_RX_HALT, &dev->flags);
1148*4882a593Smuzhiyun 			tasklet_schedule (&dev->bh);
1149*4882a593Smuzhiyun 		}
1150*4882a593Smuzhiyun 	}
1151*4882a593Smuzhiyun 
1152*4882a593Smuzhiyun 	/* tasklet could resubmit itself forever if memory is tight */
1153*4882a593Smuzhiyun 	if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
1154*4882a593Smuzhiyun 		struct urb	*urb = NULL;
1155*4882a593Smuzhiyun 		int resched = 1;
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun 		if (netif_running (dev->net))
1158*4882a593Smuzhiyun 			urb = usb_alloc_urb (0, GFP_KERNEL);
1159*4882a593Smuzhiyun 		else
1160*4882a593Smuzhiyun 			clear_bit (EVENT_RX_MEMORY, &dev->flags);
1161*4882a593Smuzhiyun 		if (urb != NULL) {
1162*4882a593Smuzhiyun 			clear_bit (EVENT_RX_MEMORY, &dev->flags);
1163*4882a593Smuzhiyun 			status = usb_autopm_get_interface(dev->intf);
1164*4882a593Smuzhiyun 			if (status < 0) {
1165*4882a593Smuzhiyun 				usb_free_urb(urb);
1166*4882a593Smuzhiyun 				goto fail_lowmem;
1167*4882a593Smuzhiyun 			}
1168*4882a593Smuzhiyun 			if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK)
1169*4882a593Smuzhiyun 				resched = 0;
1170*4882a593Smuzhiyun 			usb_autopm_put_interface(dev->intf);
1171*4882a593Smuzhiyun fail_lowmem:
1172*4882a593Smuzhiyun 			if (resched)
1173*4882a593Smuzhiyun 				tasklet_schedule (&dev->bh);
1174*4882a593Smuzhiyun 		}
1175*4882a593Smuzhiyun 	}
1176*4882a593Smuzhiyun 
1177*4882a593Smuzhiyun 	if (test_bit (EVENT_LINK_RESET, &dev->flags)) {
1178*4882a593Smuzhiyun 		const struct driver_info *info = dev->driver_info;
1179*4882a593Smuzhiyun 		int			retval = 0;
1180*4882a593Smuzhiyun 
1181*4882a593Smuzhiyun 		clear_bit (EVENT_LINK_RESET, &dev->flags);
1182*4882a593Smuzhiyun 		status = usb_autopm_get_interface(dev->intf);
1183*4882a593Smuzhiyun 		if (status < 0)
1184*4882a593Smuzhiyun 			goto skip_reset;
1185*4882a593Smuzhiyun 		if(info->link_reset && (retval = info->link_reset(dev)) < 0) {
1186*4882a593Smuzhiyun 			usb_autopm_put_interface(dev->intf);
1187*4882a593Smuzhiyun skip_reset:
1188*4882a593Smuzhiyun 			netdev_info(dev->net, "link reset failed (%d) usbnet usb-%s-%s, %s\n",
1189*4882a593Smuzhiyun 				    retval,
1190*4882a593Smuzhiyun 				    dev->udev->bus->bus_name,
1191*4882a593Smuzhiyun 				    dev->udev->devpath,
1192*4882a593Smuzhiyun 				    info->description);
1193*4882a593Smuzhiyun 		} else {
1194*4882a593Smuzhiyun 			usb_autopm_put_interface(dev->intf);
1195*4882a593Smuzhiyun 		}
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun 		/* handle link change from link resetting */
1198*4882a593Smuzhiyun 		__handle_link_change(dev);
1199*4882a593Smuzhiyun 	}
1200*4882a593Smuzhiyun 
1201*4882a593Smuzhiyun 	if (test_bit (EVENT_LINK_CHANGE, &dev->flags))
1202*4882a593Smuzhiyun 		__handle_link_change(dev);
1203*4882a593Smuzhiyun 
1204*4882a593Smuzhiyun 	if (test_bit (EVENT_SET_RX_MODE, &dev->flags))
1205*4882a593Smuzhiyun 		__handle_set_rx_mode(dev);
1206*4882a593Smuzhiyun 
1207*4882a593Smuzhiyun 
1208*4882a593Smuzhiyun 	if (dev->flags)
1209*4882a593Smuzhiyun 		netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags);
1210*4882a593Smuzhiyun }
1211*4882a593Smuzhiyun 
1212*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
1213*4882a593Smuzhiyun 
tx_complete(struct urb * urb)1214*4882a593Smuzhiyun static void tx_complete (struct urb *urb)
1215*4882a593Smuzhiyun {
1216*4882a593Smuzhiyun 	struct sk_buff		*skb = (struct sk_buff *) urb->context;
1217*4882a593Smuzhiyun 	struct skb_data		*entry = (struct skb_data *) skb->cb;
1218*4882a593Smuzhiyun 	struct usbnet		*dev = entry->dev;
1219*4882a593Smuzhiyun 
1220*4882a593Smuzhiyun 	if (urb->status == 0) {
1221*4882a593Smuzhiyun 		struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64);
1222*4882a593Smuzhiyun 		unsigned long flags;
1223*4882a593Smuzhiyun 
1224*4882a593Smuzhiyun 		flags = u64_stats_update_begin_irqsave(&stats64->syncp);
1225*4882a593Smuzhiyun 		stats64->tx_packets += entry->packets;
1226*4882a593Smuzhiyun 		stats64->tx_bytes += entry->length;
1227*4882a593Smuzhiyun 		u64_stats_update_end_irqrestore(&stats64->syncp, flags);
1228*4882a593Smuzhiyun 	} else {
1229*4882a593Smuzhiyun 		dev->net->stats.tx_errors++;
1230*4882a593Smuzhiyun 
1231*4882a593Smuzhiyun 		switch (urb->status) {
1232*4882a593Smuzhiyun 		case -EPIPE:
1233*4882a593Smuzhiyun 			usbnet_defer_kevent (dev, EVENT_TX_HALT);
1234*4882a593Smuzhiyun 			break;
1235*4882a593Smuzhiyun 
1236*4882a593Smuzhiyun 		/* software-driven interface shutdown */
1237*4882a593Smuzhiyun 		case -ECONNRESET:		// async unlink
1238*4882a593Smuzhiyun 		case -ESHUTDOWN:		// hardware gone
1239*4882a593Smuzhiyun 			break;
1240*4882a593Smuzhiyun 
1241*4882a593Smuzhiyun 		/* like rx, tx gets controller i/o faults during hub_wq
1242*4882a593Smuzhiyun 		 * delays and so it uses the same throttling mechanism.
1243*4882a593Smuzhiyun 		 */
1244*4882a593Smuzhiyun 		case -EPROTO:
1245*4882a593Smuzhiyun 		case -ETIME:
1246*4882a593Smuzhiyun 		case -EILSEQ:
1247*4882a593Smuzhiyun 			usb_mark_last_busy(dev->udev);
1248*4882a593Smuzhiyun 			if (!timer_pending (&dev->delay)) {
1249*4882a593Smuzhiyun 				mod_timer (&dev->delay,
1250*4882a593Smuzhiyun 					jiffies + THROTTLE_JIFFIES);
1251*4882a593Smuzhiyun 				netif_dbg(dev, link, dev->net,
1252*4882a593Smuzhiyun 					  "tx throttle %d\n", urb->status);
1253*4882a593Smuzhiyun 			}
1254*4882a593Smuzhiyun 			netif_stop_queue (dev->net);
1255*4882a593Smuzhiyun 			break;
1256*4882a593Smuzhiyun 		default:
1257*4882a593Smuzhiyun 			netif_dbg(dev, tx_err, dev->net,
1258*4882a593Smuzhiyun 				  "tx err %d\n", entry->urb->status);
1259*4882a593Smuzhiyun 			break;
1260*4882a593Smuzhiyun 		}
1261*4882a593Smuzhiyun 	}
1262*4882a593Smuzhiyun 
1263*4882a593Smuzhiyun 	usb_autopm_put_interface_async(dev->intf);
1264*4882a593Smuzhiyun 	(void) defer_bh(dev, skb, &dev->txq, tx_done);
1265*4882a593Smuzhiyun }
1266*4882a593Smuzhiyun 
1267*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
1268*4882a593Smuzhiyun 
usbnet_tx_timeout(struct net_device * net,unsigned int txqueue)1269*4882a593Smuzhiyun void usbnet_tx_timeout (struct net_device *net, unsigned int txqueue)
1270*4882a593Smuzhiyun {
1271*4882a593Smuzhiyun 	struct usbnet		*dev = netdev_priv(net);
1272*4882a593Smuzhiyun 
1273*4882a593Smuzhiyun 	unlink_urbs (dev, &dev->txq);
1274*4882a593Smuzhiyun 	tasklet_schedule (&dev->bh);
1275*4882a593Smuzhiyun 	/* this needs to be handled individually because the generic layer
1276*4882a593Smuzhiyun 	 * doesn't know what is sufficient and could not restore private
1277*4882a593Smuzhiyun 	 * information if a remedy of an unconditional reset were used.
1278*4882a593Smuzhiyun 	 */
1279*4882a593Smuzhiyun 	if (dev->driver_info->recover)
1280*4882a593Smuzhiyun 		(dev->driver_info->recover)(dev);
1281*4882a593Smuzhiyun }
1282*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usbnet_tx_timeout);
1283*4882a593Smuzhiyun 
1284*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
1285*4882a593Smuzhiyun 
build_dma_sg(const struct sk_buff * skb,struct urb * urb)1286*4882a593Smuzhiyun static int build_dma_sg(const struct sk_buff *skb, struct urb *urb)
1287*4882a593Smuzhiyun {
1288*4882a593Smuzhiyun 	unsigned num_sgs, total_len = 0;
1289*4882a593Smuzhiyun 	int i, s = 0;
1290*4882a593Smuzhiyun 
1291*4882a593Smuzhiyun 	num_sgs = skb_shinfo(skb)->nr_frags + 1;
1292*4882a593Smuzhiyun 	if (num_sgs == 1)
1293*4882a593Smuzhiyun 		return 0;
1294*4882a593Smuzhiyun 
1295*4882a593Smuzhiyun 	/* reserve one for zero packet */
1296*4882a593Smuzhiyun 	urb->sg = kmalloc_array(num_sgs + 1, sizeof(struct scatterlist),
1297*4882a593Smuzhiyun 				GFP_ATOMIC);
1298*4882a593Smuzhiyun 	if (!urb->sg)
1299*4882a593Smuzhiyun 		return -ENOMEM;
1300*4882a593Smuzhiyun 
1301*4882a593Smuzhiyun 	urb->num_sgs = num_sgs;
1302*4882a593Smuzhiyun 	sg_init_table(urb->sg, urb->num_sgs + 1);
1303*4882a593Smuzhiyun 
1304*4882a593Smuzhiyun 	sg_set_buf(&urb->sg[s++], skb->data, skb_headlen(skb));
1305*4882a593Smuzhiyun 	total_len += skb_headlen(skb);
1306*4882a593Smuzhiyun 
1307*4882a593Smuzhiyun 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1308*4882a593Smuzhiyun 		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1309*4882a593Smuzhiyun 
1310*4882a593Smuzhiyun 		total_len += skb_frag_size(f);
1311*4882a593Smuzhiyun 		sg_set_page(&urb->sg[i + s], skb_frag_page(f), skb_frag_size(f),
1312*4882a593Smuzhiyun 			    skb_frag_off(f));
1313*4882a593Smuzhiyun 	}
1314*4882a593Smuzhiyun 	urb->transfer_buffer_length = total_len;
1315*4882a593Smuzhiyun 
1316*4882a593Smuzhiyun 	return 1;
1317*4882a593Smuzhiyun }
1318*4882a593Smuzhiyun 
usbnet_start_xmit(struct sk_buff * skb,struct net_device * net)1319*4882a593Smuzhiyun netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1320*4882a593Smuzhiyun 				     struct net_device *net)
1321*4882a593Smuzhiyun {
1322*4882a593Smuzhiyun 	struct usbnet		*dev = netdev_priv(net);
1323*4882a593Smuzhiyun 	unsigned int			length;
1324*4882a593Smuzhiyun 	struct urb		*urb = NULL;
1325*4882a593Smuzhiyun 	struct skb_data		*entry;
1326*4882a593Smuzhiyun 	const struct driver_info *info = dev->driver_info;
1327*4882a593Smuzhiyun 	unsigned long		flags;
1328*4882a593Smuzhiyun 	int retval;
1329*4882a593Smuzhiyun 
1330*4882a593Smuzhiyun 	if (skb)
1331*4882a593Smuzhiyun 		skb_tx_timestamp(skb);
1332*4882a593Smuzhiyun 
1333*4882a593Smuzhiyun 	// some devices want funky USB-level framing, for
1334*4882a593Smuzhiyun 	// win32 driver (usually) and/or hardware quirks
1335*4882a593Smuzhiyun 	if (info->tx_fixup) {
1336*4882a593Smuzhiyun 		skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
1337*4882a593Smuzhiyun 		if (!skb) {
1338*4882a593Smuzhiyun 			/* packet collected; minidriver waiting for more */
1339*4882a593Smuzhiyun 			if (info->flags & FLAG_MULTI_PACKET)
1340*4882a593Smuzhiyun 				goto not_drop;
1341*4882a593Smuzhiyun 			netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
1342*4882a593Smuzhiyun 			goto drop;
1343*4882a593Smuzhiyun 		}
1344*4882a593Smuzhiyun 	}
1345*4882a593Smuzhiyun 
1346*4882a593Smuzhiyun 	if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) {
1347*4882a593Smuzhiyun 		netif_dbg(dev, tx_err, dev->net, "no urb\n");
1348*4882a593Smuzhiyun 		goto drop;
1349*4882a593Smuzhiyun 	}
1350*4882a593Smuzhiyun 
1351*4882a593Smuzhiyun 	entry = (struct skb_data *) skb->cb;
1352*4882a593Smuzhiyun 	entry->urb = urb;
1353*4882a593Smuzhiyun 	entry->dev = dev;
1354*4882a593Smuzhiyun 
1355*4882a593Smuzhiyun 	usb_fill_bulk_urb (urb, dev->udev, dev->out,
1356*4882a593Smuzhiyun 			skb->data, skb->len, tx_complete, skb);
1357*4882a593Smuzhiyun 	if (dev->can_dma_sg) {
1358*4882a593Smuzhiyun 		if (build_dma_sg(skb, urb) < 0)
1359*4882a593Smuzhiyun 			goto drop;
1360*4882a593Smuzhiyun 	}
1361*4882a593Smuzhiyun 	length = urb->transfer_buffer_length;
1362*4882a593Smuzhiyun 
1363*4882a593Smuzhiyun 	/* don't assume the hardware handles USB_ZERO_PACKET
1364*4882a593Smuzhiyun 	 * NOTE:  strictly conforming cdc-ether devices should expect
1365*4882a593Smuzhiyun 	 * the ZLP here, but ignore the one-byte packet.
1366*4882a593Smuzhiyun 	 * NOTE2: CDC NCM specification is different from CDC ECM when
1367*4882a593Smuzhiyun 	 * handling ZLP/short packets, so cdc_ncm driver will make short
1368*4882a593Smuzhiyun 	 * packet itself if needed.
1369*4882a593Smuzhiyun 	 */
1370*4882a593Smuzhiyun 	if (length % dev->maxpacket == 0) {
1371*4882a593Smuzhiyun 		if (!(info->flags & FLAG_SEND_ZLP)) {
1372*4882a593Smuzhiyun 			if (!(info->flags & FLAG_MULTI_PACKET)) {
1373*4882a593Smuzhiyun 				length++;
1374*4882a593Smuzhiyun 				if (skb_tailroom(skb) && !urb->num_sgs) {
1375*4882a593Smuzhiyun 					skb->data[skb->len] = 0;
1376*4882a593Smuzhiyun 					__skb_put(skb, 1);
1377*4882a593Smuzhiyun 				} else if (urb->num_sgs)
1378*4882a593Smuzhiyun 					sg_set_buf(&urb->sg[urb->num_sgs++],
1379*4882a593Smuzhiyun 							dev->padding_pkt, 1);
1380*4882a593Smuzhiyun 			}
1381*4882a593Smuzhiyun 		} else
1382*4882a593Smuzhiyun 			urb->transfer_flags |= URB_ZERO_PACKET;
1383*4882a593Smuzhiyun 	}
1384*4882a593Smuzhiyun 	urb->transfer_buffer_length = length;
1385*4882a593Smuzhiyun 
1386*4882a593Smuzhiyun 	if (info->flags & FLAG_MULTI_PACKET) {
1387*4882a593Smuzhiyun 		/* Driver has set number of packets and a length delta.
1388*4882a593Smuzhiyun 		 * Calculate the complete length and ensure that it's
1389*4882a593Smuzhiyun 		 * positive.
1390*4882a593Smuzhiyun 		 */
1391*4882a593Smuzhiyun 		entry->length += length;
1392*4882a593Smuzhiyun 		if (WARN_ON_ONCE(entry->length <= 0))
1393*4882a593Smuzhiyun 			entry->length = length;
1394*4882a593Smuzhiyun 	} else {
1395*4882a593Smuzhiyun 		usbnet_set_skb_tx_stats(skb, 1, length);
1396*4882a593Smuzhiyun 	}
1397*4882a593Smuzhiyun 
1398*4882a593Smuzhiyun 	spin_lock_irqsave(&dev->txq.lock, flags);
1399*4882a593Smuzhiyun 	retval = usb_autopm_get_interface_async(dev->intf);
1400*4882a593Smuzhiyun 	if (retval < 0) {
1401*4882a593Smuzhiyun 		spin_unlock_irqrestore(&dev->txq.lock, flags);
1402*4882a593Smuzhiyun 		goto drop;
1403*4882a593Smuzhiyun 	}
1404*4882a593Smuzhiyun 	if (netif_queue_stopped(net)) {
1405*4882a593Smuzhiyun 		usb_autopm_put_interface_async(dev->intf);
1406*4882a593Smuzhiyun 		spin_unlock_irqrestore(&dev->txq.lock, flags);
1407*4882a593Smuzhiyun 		goto drop;
1408*4882a593Smuzhiyun 	}
1409*4882a593Smuzhiyun 
1410*4882a593Smuzhiyun #ifdef CONFIG_PM
1411*4882a593Smuzhiyun 	/* if this triggers the device is still a sleep */
1412*4882a593Smuzhiyun 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
1413*4882a593Smuzhiyun 		/* transmission will be done in resume */
1414*4882a593Smuzhiyun 		usb_anchor_urb(urb, &dev->deferred);
1415*4882a593Smuzhiyun 		/* no use to process more packets */
1416*4882a593Smuzhiyun 		netif_stop_queue(net);
1417*4882a593Smuzhiyun 		usb_put_urb(urb);
1418*4882a593Smuzhiyun 		spin_unlock_irqrestore(&dev->txq.lock, flags);
1419*4882a593Smuzhiyun 		netdev_dbg(dev->net, "Delaying transmission for resumption\n");
1420*4882a593Smuzhiyun 		goto deferred;
1421*4882a593Smuzhiyun 	}
1422*4882a593Smuzhiyun #endif
1423*4882a593Smuzhiyun 
1424*4882a593Smuzhiyun 	switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) {
1425*4882a593Smuzhiyun 	case -EPIPE:
1426*4882a593Smuzhiyun 		netif_stop_queue (net);
1427*4882a593Smuzhiyun 		usbnet_defer_kevent (dev, EVENT_TX_HALT);
1428*4882a593Smuzhiyun 		usb_autopm_put_interface_async(dev->intf);
1429*4882a593Smuzhiyun 		break;
1430*4882a593Smuzhiyun 	default:
1431*4882a593Smuzhiyun 		usb_autopm_put_interface_async(dev->intf);
1432*4882a593Smuzhiyun 		netif_dbg(dev, tx_err, dev->net,
1433*4882a593Smuzhiyun 			  "tx: submit urb err %d\n", retval);
1434*4882a593Smuzhiyun 		break;
1435*4882a593Smuzhiyun 	case 0:
1436*4882a593Smuzhiyun 		netif_trans_update(net);
1437*4882a593Smuzhiyun 		__usbnet_queue_skb(&dev->txq, skb, tx_start);
1438*4882a593Smuzhiyun 		if (dev->txq.qlen >= TX_QLEN (dev))
1439*4882a593Smuzhiyun 			netif_stop_queue (net);
1440*4882a593Smuzhiyun 	}
1441*4882a593Smuzhiyun 	spin_unlock_irqrestore (&dev->txq.lock, flags);
1442*4882a593Smuzhiyun 
1443*4882a593Smuzhiyun 	if (retval) {
1444*4882a593Smuzhiyun 		netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval);
1445*4882a593Smuzhiyun drop:
1446*4882a593Smuzhiyun 		dev->net->stats.tx_dropped++;
1447*4882a593Smuzhiyun not_drop:
1448*4882a593Smuzhiyun 		if (skb)
1449*4882a593Smuzhiyun 			dev_kfree_skb_any (skb);
1450*4882a593Smuzhiyun 		if (urb) {
1451*4882a593Smuzhiyun 			kfree(urb->sg);
1452*4882a593Smuzhiyun 			usb_free_urb(urb);
1453*4882a593Smuzhiyun 		}
1454*4882a593Smuzhiyun 	} else
1455*4882a593Smuzhiyun 		netif_dbg(dev, tx_queued, dev->net,
1456*4882a593Smuzhiyun 			  "> tx, len %u, type 0x%x\n", length, skb->protocol);
1457*4882a593Smuzhiyun #ifdef CONFIG_PM
1458*4882a593Smuzhiyun deferred:
1459*4882a593Smuzhiyun #endif
1460*4882a593Smuzhiyun 	return NETDEV_TX_OK;
1461*4882a593Smuzhiyun }
1462*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usbnet_start_xmit);
1463*4882a593Smuzhiyun 
rx_alloc_submit(struct usbnet * dev,gfp_t flags)1464*4882a593Smuzhiyun static int rx_alloc_submit(struct usbnet *dev, gfp_t flags)
1465*4882a593Smuzhiyun {
1466*4882a593Smuzhiyun 	struct urb	*urb;
1467*4882a593Smuzhiyun 	int		i;
1468*4882a593Smuzhiyun 	int		ret = 0;
1469*4882a593Smuzhiyun 
1470*4882a593Smuzhiyun 	/* don't refill the queue all at once */
1471*4882a593Smuzhiyun 	for (i = 0; i < 10 && dev->rxq.qlen < RX_QLEN(dev); i++) {
1472*4882a593Smuzhiyun 		urb = usb_alloc_urb(0, flags);
1473*4882a593Smuzhiyun 		if (urb != NULL) {
1474*4882a593Smuzhiyun 			ret = rx_submit(dev, urb, flags);
1475*4882a593Smuzhiyun 			if (ret)
1476*4882a593Smuzhiyun 				goto err;
1477*4882a593Smuzhiyun 		} else {
1478*4882a593Smuzhiyun 			ret = -ENOMEM;
1479*4882a593Smuzhiyun 			goto err;
1480*4882a593Smuzhiyun 		}
1481*4882a593Smuzhiyun 	}
1482*4882a593Smuzhiyun err:
1483*4882a593Smuzhiyun 	return ret;
1484*4882a593Smuzhiyun }
1485*4882a593Smuzhiyun 
1486*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
1487*4882a593Smuzhiyun 
1488*4882a593Smuzhiyun // tasklet (work deferred from completions, in_irq) or timer
1489*4882a593Smuzhiyun 
usbnet_bh(struct timer_list * t)1490*4882a593Smuzhiyun static void usbnet_bh (struct timer_list *t)
1491*4882a593Smuzhiyun {
1492*4882a593Smuzhiyun 	struct usbnet		*dev = from_timer(dev, t, delay);
1493*4882a593Smuzhiyun 	struct sk_buff		*skb;
1494*4882a593Smuzhiyun 	struct skb_data		*entry;
1495*4882a593Smuzhiyun 
1496*4882a593Smuzhiyun 	while ((skb = skb_dequeue (&dev->done))) {
1497*4882a593Smuzhiyun 		entry = (struct skb_data *) skb->cb;
1498*4882a593Smuzhiyun 		switch (entry->state) {
1499*4882a593Smuzhiyun 		case rx_done:
1500*4882a593Smuzhiyun 			entry->state = rx_cleanup;
1501*4882a593Smuzhiyun 			rx_process (dev, skb);
1502*4882a593Smuzhiyun 			continue;
1503*4882a593Smuzhiyun 		case tx_done:
1504*4882a593Smuzhiyun 			kfree(entry->urb->sg);
1505*4882a593Smuzhiyun 			fallthrough;
1506*4882a593Smuzhiyun 		case rx_cleanup:
1507*4882a593Smuzhiyun 			usb_free_urb (entry->urb);
1508*4882a593Smuzhiyun 			dev_kfree_skb (skb);
1509*4882a593Smuzhiyun 			continue;
1510*4882a593Smuzhiyun 		default:
1511*4882a593Smuzhiyun 			netdev_dbg(dev->net, "bogus skb state %d\n", entry->state);
1512*4882a593Smuzhiyun 		}
1513*4882a593Smuzhiyun 	}
1514*4882a593Smuzhiyun 
1515*4882a593Smuzhiyun 	/* restart RX again after disabling due to high error rate */
1516*4882a593Smuzhiyun 	clear_bit(EVENT_RX_KILL, &dev->flags);
1517*4882a593Smuzhiyun 
1518*4882a593Smuzhiyun 	/* waiting for all pending urbs to complete?
1519*4882a593Smuzhiyun 	 * only then can we forgo submitting anew
1520*4882a593Smuzhiyun 	 */
1521*4882a593Smuzhiyun 	if (waitqueue_active(&dev->wait)) {
1522*4882a593Smuzhiyun 		if (dev->txq.qlen + dev->rxq.qlen + dev->done.qlen == 0)
1523*4882a593Smuzhiyun 			wake_up_all(&dev->wait);
1524*4882a593Smuzhiyun 
1525*4882a593Smuzhiyun 	// or are we maybe short a few urbs?
1526*4882a593Smuzhiyun 	} else if (netif_running (dev->net) &&
1527*4882a593Smuzhiyun 		   netif_device_present (dev->net) &&
1528*4882a593Smuzhiyun 		   netif_carrier_ok(dev->net) &&
1529*4882a593Smuzhiyun 		   !timer_pending(&dev->delay) &&
1530*4882a593Smuzhiyun 		   !test_bit(EVENT_RX_PAUSED, &dev->flags) &&
1531*4882a593Smuzhiyun 		   !test_bit(EVENT_RX_HALT, &dev->flags)) {
1532*4882a593Smuzhiyun 		int	temp = dev->rxq.qlen;
1533*4882a593Smuzhiyun 
1534*4882a593Smuzhiyun 		if (temp < RX_QLEN(dev)) {
1535*4882a593Smuzhiyun 			if (rx_alloc_submit(dev, GFP_ATOMIC) == -ENOLINK)
1536*4882a593Smuzhiyun 				return;
1537*4882a593Smuzhiyun 			if (temp != dev->rxq.qlen)
1538*4882a593Smuzhiyun 				netif_dbg(dev, link, dev->net,
1539*4882a593Smuzhiyun 					  "rxqlen %d --> %d\n",
1540*4882a593Smuzhiyun 					  temp, dev->rxq.qlen);
1541*4882a593Smuzhiyun 			if (dev->rxq.qlen < RX_QLEN(dev))
1542*4882a593Smuzhiyun 				tasklet_schedule (&dev->bh);
1543*4882a593Smuzhiyun 		}
1544*4882a593Smuzhiyun 		if (dev->txq.qlen < TX_QLEN (dev))
1545*4882a593Smuzhiyun 			netif_wake_queue (dev->net);
1546*4882a593Smuzhiyun 	}
1547*4882a593Smuzhiyun }
1548*4882a593Smuzhiyun 
usbnet_bh_tasklet(unsigned long data)1549*4882a593Smuzhiyun static void usbnet_bh_tasklet(unsigned long data)
1550*4882a593Smuzhiyun {
1551*4882a593Smuzhiyun 	struct timer_list *t = (struct timer_list *)data;
1552*4882a593Smuzhiyun 
1553*4882a593Smuzhiyun 	usbnet_bh(t);
1554*4882a593Smuzhiyun }
1555*4882a593Smuzhiyun 
1556*4882a593Smuzhiyun 
1557*4882a593Smuzhiyun /*-------------------------------------------------------------------------
1558*4882a593Smuzhiyun  *
1559*4882a593Smuzhiyun  * USB Device Driver support
1560*4882a593Smuzhiyun  *
1561*4882a593Smuzhiyun  *-------------------------------------------------------------------------*/
1562*4882a593Smuzhiyun 
1563*4882a593Smuzhiyun // precondition: never called in_interrupt
1564*4882a593Smuzhiyun 
usbnet_disconnect(struct usb_interface * intf)1565*4882a593Smuzhiyun void usbnet_disconnect (struct usb_interface *intf)
1566*4882a593Smuzhiyun {
1567*4882a593Smuzhiyun 	struct usbnet		*dev;
1568*4882a593Smuzhiyun 	struct usb_device	*xdev;
1569*4882a593Smuzhiyun 	struct net_device	*net;
1570*4882a593Smuzhiyun 	struct urb		*urb;
1571*4882a593Smuzhiyun 
1572*4882a593Smuzhiyun 	dev = usb_get_intfdata(intf);
1573*4882a593Smuzhiyun 	usb_set_intfdata(intf, NULL);
1574*4882a593Smuzhiyun 	if (!dev)
1575*4882a593Smuzhiyun 		return;
1576*4882a593Smuzhiyun 
1577*4882a593Smuzhiyun 	xdev = interface_to_usbdev (intf);
1578*4882a593Smuzhiyun 
1579*4882a593Smuzhiyun 	netif_info(dev, probe, dev->net, "unregister '%s' usb-%s-%s, %s\n",
1580*4882a593Smuzhiyun 		   intf->dev.driver->name,
1581*4882a593Smuzhiyun 		   xdev->bus->bus_name, xdev->devpath,
1582*4882a593Smuzhiyun 		   dev->driver_info->description);
1583*4882a593Smuzhiyun 
1584*4882a593Smuzhiyun 	net = dev->net;
1585*4882a593Smuzhiyun 	unregister_netdev (net);
1586*4882a593Smuzhiyun 
1587*4882a593Smuzhiyun 	while ((urb = usb_get_from_anchor(&dev->deferred))) {
1588*4882a593Smuzhiyun 		dev_kfree_skb(urb->context);
1589*4882a593Smuzhiyun 		kfree(urb->sg);
1590*4882a593Smuzhiyun 		usb_free_urb(urb);
1591*4882a593Smuzhiyun 	}
1592*4882a593Smuzhiyun 
1593*4882a593Smuzhiyun 	if (dev->driver_info->unbind)
1594*4882a593Smuzhiyun 		dev->driver_info->unbind (dev, intf);
1595*4882a593Smuzhiyun 
1596*4882a593Smuzhiyun 	usb_kill_urb(dev->interrupt);
1597*4882a593Smuzhiyun 	usb_free_urb(dev->interrupt);
1598*4882a593Smuzhiyun 	kfree(dev->padding_pkt);
1599*4882a593Smuzhiyun 
1600*4882a593Smuzhiyun 	free_percpu(dev->stats64);
1601*4882a593Smuzhiyun 	free_netdev(net);
1602*4882a593Smuzhiyun }
1603*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usbnet_disconnect);
1604*4882a593Smuzhiyun 
1605*4882a593Smuzhiyun static const struct net_device_ops usbnet_netdev_ops = {
1606*4882a593Smuzhiyun 	.ndo_open		= usbnet_open,
1607*4882a593Smuzhiyun 	.ndo_stop		= usbnet_stop,
1608*4882a593Smuzhiyun 	.ndo_start_xmit		= usbnet_start_xmit,
1609*4882a593Smuzhiyun 	.ndo_tx_timeout		= usbnet_tx_timeout,
1610*4882a593Smuzhiyun 	.ndo_set_rx_mode	= usbnet_set_rx_mode,
1611*4882a593Smuzhiyun 	.ndo_change_mtu		= usbnet_change_mtu,
1612*4882a593Smuzhiyun 	.ndo_get_stats64	= usbnet_get_stats64,
1613*4882a593Smuzhiyun 	.ndo_set_mac_address 	= eth_mac_addr,
1614*4882a593Smuzhiyun 	.ndo_validate_addr	= eth_validate_addr,
1615*4882a593Smuzhiyun };
1616*4882a593Smuzhiyun 
1617*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
1618*4882a593Smuzhiyun 
1619*4882a593Smuzhiyun // precondition: never called in_interrupt
1620*4882a593Smuzhiyun 
1621*4882a593Smuzhiyun static struct device_type wlan_type = {
1622*4882a593Smuzhiyun 	.name	= "wlan",
1623*4882a593Smuzhiyun };
1624*4882a593Smuzhiyun 
1625*4882a593Smuzhiyun static struct device_type wwan_type = {
1626*4882a593Smuzhiyun 	.name	= "wwan",
1627*4882a593Smuzhiyun };
1628*4882a593Smuzhiyun 
1629*4882a593Smuzhiyun int
usbnet_probe(struct usb_interface * udev,const struct usb_device_id * prod)1630*4882a593Smuzhiyun usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1631*4882a593Smuzhiyun {
1632*4882a593Smuzhiyun 	struct usbnet			*dev;
1633*4882a593Smuzhiyun 	struct net_device		*net;
1634*4882a593Smuzhiyun 	struct usb_host_interface	*interface;
1635*4882a593Smuzhiyun 	const struct driver_info	*info;
1636*4882a593Smuzhiyun 	struct usb_device		*xdev;
1637*4882a593Smuzhiyun 	int				status;
1638*4882a593Smuzhiyun 	const char			*name;
1639*4882a593Smuzhiyun 	struct usb_driver 	*driver = to_usb_driver(udev->dev.driver);
1640*4882a593Smuzhiyun 
1641*4882a593Smuzhiyun 	/* usbnet already took usb runtime pm, so have to enable the feature
1642*4882a593Smuzhiyun 	 * for usb interface, otherwise usb_autopm_get_interface may return
1643*4882a593Smuzhiyun 	 * failure if RUNTIME_PM is enabled.
1644*4882a593Smuzhiyun 	 */
1645*4882a593Smuzhiyun 	if (!driver->supports_autosuspend) {
1646*4882a593Smuzhiyun 		driver->supports_autosuspend = 1;
1647*4882a593Smuzhiyun 		pm_runtime_enable(&udev->dev);
1648*4882a593Smuzhiyun 	}
1649*4882a593Smuzhiyun 
1650*4882a593Smuzhiyun 	name = udev->dev.driver->name;
1651*4882a593Smuzhiyun 	info = (const struct driver_info *) prod->driver_info;
1652*4882a593Smuzhiyun 	if (!info) {
1653*4882a593Smuzhiyun 		dev_dbg (&udev->dev, "blacklisted by %s\n", name);
1654*4882a593Smuzhiyun 		return -ENODEV;
1655*4882a593Smuzhiyun 	}
1656*4882a593Smuzhiyun 	xdev = interface_to_usbdev (udev);
1657*4882a593Smuzhiyun 	interface = udev->cur_altsetting;
1658*4882a593Smuzhiyun 
1659*4882a593Smuzhiyun 	status = -ENOMEM;
1660*4882a593Smuzhiyun 
1661*4882a593Smuzhiyun 	// set up our own records
1662*4882a593Smuzhiyun 	net = alloc_etherdev(sizeof(*dev));
1663*4882a593Smuzhiyun 	if (!net)
1664*4882a593Smuzhiyun 		goto out;
1665*4882a593Smuzhiyun 
1666*4882a593Smuzhiyun 	/* netdev_printk() needs this so do it as early as possible */
1667*4882a593Smuzhiyun 	SET_NETDEV_DEV(net, &udev->dev);
1668*4882a593Smuzhiyun 
1669*4882a593Smuzhiyun 	dev = netdev_priv(net);
1670*4882a593Smuzhiyun 	dev->udev = xdev;
1671*4882a593Smuzhiyun 	dev->intf = udev;
1672*4882a593Smuzhiyun 	dev->driver_info = info;
1673*4882a593Smuzhiyun 	dev->driver_name = name;
1674*4882a593Smuzhiyun 
1675*4882a593Smuzhiyun 	dev->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1676*4882a593Smuzhiyun 	if (!dev->stats64)
1677*4882a593Smuzhiyun 		goto out0;
1678*4882a593Smuzhiyun 
1679*4882a593Smuzhiyun 	dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
1680*4882a593Smuzhiyun 				| NETIF_MSG_PROBE | NETIF_MSG_LINK);
1681*4882a593Smuzhiyun 	init_waitqueue_head(&dev->wait);
1682*4882a593Smuzhiyun 	skb_queue_head_init (&dev->rxq);
1683*4882a593Smuzhiyun 	skb_queue_head_init (&dev->txq);
1684*4882a593Smuzhiyun 	skb_queue_head_init (&dev->done);
1685*4882a593Smuzhiyun 	skb_queue_head_init(&dev->rxq_pause);
1686*4882a593Smuzhiyun 	dev->bh.func = usbnet_bh_tasklet;
1687*4882a593Smuzhiyun 	dev->bh.data = (unsigned long)&dev->delay;
1688*4882a593Smuzhiyun 	INIT_WORK (&dev->kevent, usbnet_deferred_kevent);
1689*4882a593Smuzhiyun 	init_usb_anchor(&dev->deferred);
1690*4882a593Smuzhiyun 	timer_setup(&dev->delay, usbnet_bh, 0);
1691*4882a593Smuzhiyun 	mutex_init (&dev->phy_mutex);
1692*4882a593Smuzhiyun 	mutex_init(&dev->interrupt_mutex);
1693*4882a593Smuzhiyun 	dev->interrupt_count = 0;
1694*4882a593Smuzhiyun 
1695*4882a593Smuzhiyun 	dev->net = net;
1696*4882a593Smuzhiyun 	strcpy (net->name, "usb%d");
1697*4882a593Smuzhiyun 	memcpy (net->dev_addr, node_id, sizeof node_id);
1698*4882a593Smuzhiyun 
1699*4882a593Smuzhiyun 	/* rx and tx sides can use different message sizes;
1700*4882a593Smuzhiyun 	 * bind() should set rx_urb_size in that case.
1701*4882a593Smuzhiyun 	 */
1702*4882a593Smuzhiyun 	dev->hard_mtu = net->mtu + net->hard_header_len;
1703*4882a593Smuzhiyun 	net->min_mtu = 0;
1704*4882a593Smuzhiyun 	net->max_mtu = ETH_MAX_MTU;
1705*4882a593Smuzhiyun 
1706*4882a593Smuzhiyun 	net->netdev_ops = &usbnet_netdev_ops;
1707*4882a593Smuzhiyun 	net->watchdog_timeo = TX_TIMEOUT_JIFFIES;
1708*4882a593Smuzhiyun 	net->ethtool_ops = &usbnet_ethtool_ops;
1709*4882a593Smuzhiyun 
1710*4882a593Smuzhiyun 	// allow device-specific bind/init procedures
1711*4882a593Smuzhiyun 	// NOTE net->name still not usable ...
1712*4882a593Smuzhiyun 	if (info->bind) {
1713*4882a593Smuzhiyun 		status = info->bind (dev, udev);
1714*4882a593Smuzhiyun 		if (status < 0)
1715*4882a593Smuzhiyun 			goto out1;
1716*4882a593Smuzhiyun 
1717*4882a593Smuzhiyun 		// heuristic:  "usb%d" for links we know are two-host,
1718*4882a593Smuzhiyun 		// else "eth%d" when there's reasonable doubt.  userspace
1719*4882a593Smuzhiyun 		// can rename the link if it knows better.
1720*4882a593Smuzhiyun 		if ((dev->driver_info->flags & FLAG_ETHER) != 0 &&
1721*4882a593Smuzhiyun 		    ((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 ||
1722*4882a593Smuzhiyun 		     (net->dev_addr [0] & 0x02) == 0))
1723*4882a593Smuzhiyun 			strcpy (net->name, "eth%d");
1724*4882a593Smuzhiyun 		/* WLAN devices should always be named "wlan%d" */
1725*4882a593Smuzhiyun 		if ((dev->driver_info->flags & FLAG_WLAN) != 0)
1726*4882a593Smuzhiyun 			strcpy(net->name, "wlan%d");
1727*4882a593Smuzhiyun 		/* WWAN devices should always be named "wwan%d" */
1728*4882a593Smuzhiyun 		if ((dev->driver_info->flags & FLAG_WWAN) != 0)
1729*4882a593Smuzhiyun 			strcpy(net->name, "wwan%d");
1730*4882a593Smuzhiyun 
1731*4882a593Smuzhiyun 		/* devices that cannot do ARP */
1732*4882a593Smuzhiyun 		if ((dev->driver_info->flags & FLAG_NOARP) != 0)
1733*4882a593Smuzhiyun 			net->flags |= IFF_NOARP;
1734*4882a593Smuzhiyun 
1735*4882a593Smuzhiyun 		/* maybe the remote can't receive an Ethernet MTU */
1736*4882a593Smuzhiyun 		if (net->mtu > (dev->hard_mtu - net->hard_header_len))
1737*4882a593Smuzhiyun 			net->mtu = dev->hard_mtu - net->hard_header_len;
1738*4882a593Smuzhiyun 	} else if (!info->in || !info->out)
1739*4882a593Smuzhiyun 		status = usbnet_get_endpoints (dev, udev);
1740*4882a593Smuzhiyun 	else {
1741*4882a593Smuzhiyun 		dev->in = usb_rcvbulkpipe (xdev, info->in);
1742*4882a593Smuzhiyun 		dev->out = usb_sndbulkpipe (xdev, info->out);
1743*4882a593Smuzhiyun 		if (!(info->flags & FLAG_NO_SETINT))
1744*4882a593Smuzhiyun 			status = usb_set_interface (xdev,
1745*4882a593Smuzhiyun 				interface->desc.bInterfaceNumber,
1746*4882a593Smuzhiyun 				interface->desc.bAlternateSetting);
1747*4882a593Smuzhiyun 		else
1748*4882a593Smuzhiyun 			status = 0;
1749*4882a593Smuzhiyun 
1750*4882a593Smuzhiyun 	}
1751*4882a593Smuzhiyun 	if (status >= 0 && dev->status)
1752*4882a593Smuzhiyun 		status = init_status (dev, udev);
1753*4882a593Smuzhiyun 	if (status < 0)
1754*4882a593Smuzhiyun 		goto out3;
1755*4882a593Smuzhiyun 
1756*4882a593Smuzhiyun 	if (!dev->rx_urb_size)
1757*4882a593Smuzhiyun 		dev->rx_urb_size = dev->hard_mtu;
1758*4882a593Smuzhiyun 	dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
1759*4882a593Smuzhiyun 	if (dev->maxpacket == 0) {
1760*4882a593Smuzhiyun 		/* that is a broken device */
1761*4882a593Smuzhiyun 		status = -ENODEV;
1762*4882a593Smuzhiyun 		goto out4;
1763*4882a593Smuzhiyun 	}
1764*4882a593Smuzhiyun 
1765*4882a593Smuzhiyun 	/* let userspace know we have a random address */
1766*4882a593Smuzhiyun 	if (ether_addr_equal(net->dev_addr, node_id))
1767*4882a593Smuzhiyun 		net->addr_assign_type = NET_ADDR_RANDOM;
1768*4882a593Smuzhiyun 
1769*4882a593Smuzhiyun 	if ((dev->driver_info->flags & FLAG_WLAN) != 0)
1770*4882a593Smuzhiyun 		SET_NETDEV_DEVTYPE(net, &wlan_type);
1771*4882a593Smuzhiyun 	if ((dev->driver_info->flags & FLAG_WWAN) != 0)
1772*4882a593Smuzhiyun 		SET_NETDEV_DEVTYPE(net, &wwan_type);
1773*4882a593Smuzhiyun 
1774*4882a593Smuzhiyun 	/* initialize max rx_qlen and tx_qlen */
1775*4882a593Smuzhiyun 	usbnet_update_max_qlen(dev);
1776*4882a593Smuzhiyun 
1777*4882a593Smuzhiyun 	if (dev->can_dma_sg && !(info->flags & FLAG_SEND_ZLP) &&
1778*4882a593Smuzhiyun 		!(info->flags & FLAG_MULTI_PACKET)) {
1779*4882a593Smuzhiyun 		dev->padding_pkt = kzalloc(1, GFP_KERNEL);
1780*4882a593Smuzhiyun 		if (!dev->padding_pkt) {
1781*4882a593Smuzhiyun 			status = -ENOMEM;
1782*4882a593Smuzhiyun 			goto out4;
1783*4882a593Smuzhiyun 		}
1784*4882a593Smuzhiyun 	}
1785*4882a593Smuzhiyun 
1786*4882a593Smuzhiyun 	status = register_netdev (net);
1787*4882a593Smuzhiyun 	if (status)
1788*4882a593Smuzhiyun 		goto out5;
1789*4882a593Smuzhiyun 	netif_info(dev, probe, dev->net,
1790*4882a593Smuzhiyun 		   "register '%s' at usb-%s-%s, %s, %pM\n",
1791*4882a593Smuzhiyun 		   udev->dev.driver->name,
1792*4882a593Smuzhiyun 		   xdev->bus->bus_name, xdev->devpath,
1793*4882a593Smuzhiyun 		   dev->driver_info->description,
1794*4882a593Smuzhiyun 		   net->dev_addr);
1795*4882a593Smuzhiyun 
1796*4882a593Smuzhiyun 	// ok, it's ready to go.
1797*4882a593Smuzhiyun 	usb_set_intfdata (udev, dev);
1798*4882a593Smuzhiyun 
1799*4882a593Smuzhiyun 	netif_device_attach (net);
1800*4882a593Smuzhiyun 
1801*4882a593Smuzhiyun 	if (dev->driver_info->flags & FLAG_LINK_INTR)
1802*4882a593Smuzhiyun 		usbnet_link_change(dev, 0, 0);
1803*4882a593Smuzhiyun 
1804*4882a593Smuzhiyun 	return 0;
1805*4882a593Smuzhiyun 
1806*4882a593Smuzhiyun out5:
1807*4882a593Smuzhiyun 	kfree(dev->padding_pkt);
1808*4882a593Smuzhiyun out4:
1809*4882a593Smuzhiyun 	usb_free_urb(dev->interrupt);
1810*4882a593Smuzhiyun out3:
1811*4882a593Smuzhiyun 	if (info->unbind)
1812*4882a593Smuzhiyun 		info->unbind (dev, udev);
1813*4882a593Smuzhiyun out1:
1814*4882a593Smuzhiyun 	/* subdrivers must undo all they did in bind() if they
1815*4882a593Smuzhiyun 	 * fail it, but we may fail later and a deferred kevent
1816*4882a593Smuzhiyun 	 * may trigger an error resubmitting itself and, worse,
1817*4882a593Smuzhiyun 	 * schedule a timer. So we kill it all just in case.
1818*4882a593Smuzhiyun 	 */
1819*4882a593Smuzhiyun 	cancel_work_sync(&dev->kevent);
1820*4882a593Smuzhiyun 	del_timer_sync(&dev->delay);
1821*4882a593Smuzhiyun 	free_percpu(dev->stats64);
1822*4882a593Smuzhiyun out0:
1823*4882a593Smuzhiyun 	free_netdev(net);
1824*4882a593Smuzhiyun out:
1825*4882a593Smuzhiyun 	return status;
1826*4882a593Smuzhiyun }
1827*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usbnet_probe);
1828*4882a593Smuzhiyun 
1829*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
1830*4882a593Smuzhiyun 
1831*4882a593Smuzhiyun /*
1832*4882a593Smuzhiyun  * suspend the whole driver as soon as the first interface is suspended
1833*4882a593Smuzhiyun  * resume only when the last interface is resumed
1834*4882a593Smuzhiyun  */
1835*4882a593Smuzhiyun 
usbnet_suspend(struct usb_interface * intf,pm_message_t message)1836*4882a593Smuzhiyun int usbnet_suspend (struct usb_interface *intf, pm_message_t message)
1837*4882a593Smuzhiyun {
1838*4882a593Smuzhiyun 	struct usbnet		*dev = usb_get_intfdata(intf);
1839*4882a593Smuzhiyun 
1840*4882a593Smuzhiyun 	if (!dev->suspend_count++) {
1841*4882a593Smuzhiyun 		spin_lock_irq(&dev->txq.lock);
1842*4882a593Smuzhiyun 		/* don't autosuspend while transmitting */
1843*4882a593Smuzhiyun 		if (dev->txq.qlen && PMSG_IS_AUTO(message)) {
1844*4882a593Smuzhiyun 			dev->suspend_count--;
1845*4882a593Smuzhiyun 			spin_unlock_irq(&dev->txq.lock);
1846*4882a593Smuzhiyun 			return -EBUSY;
1847*4882a593Smuzhiyun 		} else {
1848*4882a593Smuzhiyun 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
1849*4882a593Smuzhiyun 			spin_unlock_irq(&dev->txq.lock);
1850*4882a593Smuzhiyun 		}
1851*4882a593Smuzhiyun 		/*
1852*4882a593Smuzhiyun 		 * accelerate emptying of the rx and queues, to avoid
1853*4882a593Smuzhiyun 		 * having everything error out.
1854*4882a593Smuzhiyun 		 */
1855*4882a593Smuzhiyun 		netif_device_detach (dev->net);
1856*4882a593Smuzhiyun 		usbnet_terminate_urbs(dev);
1857*4882a593Smuzhiyun 		__usbnet_status_stop_force(dev);
1858*4882a593Smuzhiyun 
1859*4882a593Smuzhiyun 		/*
1860*4882a593Smuzhiyun 		 * reattach so runtime management can use and
1861*4882a593Smuzhiyun 		 * wake the device
1862*4882a593Smuzhiyun 		 */
1863*4882a593Smuzhiyun 		netif_device_attach (dev->net);
1864*4882a593Smuzhiyun 	}
1865*4882a593Smuzhiyun 	return 0;
1866*4882a593Smuzhiyun }
1867*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usbnet_suspend);
1868*4882a593Smuzhiyun 
usbnet_resume(struct usb_interface * intf)1869*4882a593Smuzhiyun int usbnet_resume (struct usb_interface *intf)
1870*4882a593Smuzhiyun {
1871*4882a593Smuzhiyun 	struct usbnet		*dev = usb_get_intfdata(intf);
1872*4882a593Smuzhiyun 	struct sk_buff          *skb;
1873*4882a593Smuzhiyun 	struct urb              *res;
1874*4882a593Smuzhiyun 	int                     retval;
1875*4882a593Smuzhiyun 
1876*4882a593Smuzhiyun 	if (!--dev->suspend_count) {
1877*4882a593Smuzhiyun 		/* resume interrupt URB if it was previously submitted */
1878*4882a593Smuzhiyun 		__usbnet_status_start_force(dev, GFP_NOIO);
1879*4882a593Smuzhiyun 
1880*4882a593Smuzhiyun 		spin_lock_irq(&dev->txq.lock);
1881*4882a593Smuzhiyun 		while ((res = usb_get_from_anchor(&dev->deferred))) {
1882*4882a593Smuzhiyun 
1883*4882a593Smuzhiyun 			skb = (struct sk_buff *)res->context;
1884*4882a593Smuzhiyun 			retval = usb_submit_urb(res, GFP_ATOMIC);
1885*4882a593Smuzhiyun 			if (retval < 0) {
1886*4882a593Smuzhiyun 				dev_kfree_skb_any(skb);
1887*4882a593Smuzhiyun 				kfree(res->sg);
1888*4882a593Smuzhiyun 				usb_free_urb(res);
1889*4882a593Smuzhiyun 				usb_autopm_put_interface_async(dev->intf);
1890*4882a593Smuzhiyun 			} else {
1891*4882a593Smuzhiyun 				netif_trans_update(dev->net);
1892*4882a593Smuzhiyun 				__skb_queue_tail(&dev->txq, skb);
1893*4882a593Smuzhiyun 			}
1894*4882a593Smuzhiyun 		}
1895*4882a593Smuzhiyun 
1896*4882a593Smuzhiyun 		smp_mb();
1897*4882a593Smuzhiyun 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
1898*4882a593Smuzhiyun 		spin_unlock_irq(&dev->txq.lock);
1899*4882a593Smuzhiyun 
1900*4882a593Smuzhiyun 		if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
1901*4882a593Smuzhiyun 			/* handle remote wakeup ASAP
1902*4882a593Smuzhiyun 			 * we cannot race against stop
1903*4882a593Smuzhiyun 			 */
1904*4882a593Smuzhiyun 			if (netif_device_present(dev->net) &&
1905*4882a593Smuzhiyun 				!timer_pending(&dev->delay) &&
1906*4882a593Smuzhiyun 				!test_bit(EVENT_RX_HALT, &dev->flags))
1907*4882a593Smuzhiyun 					rx_alloc_submit(dev, GFP_NOIO);
1908*4882a593Smuzhiyun 
1909*4882a593Smuzhiyun 			if (!(dev->txq.qlen >= TX_QLEN(dev)))
1910*4882a593Smuzhiyun 				netif_tx_wake_all_queues(dev->net);
1911*4882a593Smuzhiyun 			tasklet_schedule (&dev->bh);
1912*4882a593Smuzhiyun 		}
1913*4882a593Smuzhiyun 	}
1914*4882a593Smuzhiyun 
1915*4882a593Smuzhiyun 	if (test_and_clear_bit(EVENT_DEVICE_REPORT_IDLE, &dev->flags))
1916*4882a593Smuzhiyun 		usb_autopm_get_interface_no_resume(intf);
1917*4882a593Smuzhiyun 
1918*4882a593Smuzhiyun 	return 0;
1919*4882a593Smuzhiyun }
1920*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usbnet_resume);
1921*4882a593Smuzhiyun 
1922*4882a593Smuzhiyun /*
1923*4882a593Smuzhiyun  * Either a subdriver implements manage_power, then it is assumed to always
1924*4882a593Smuzhiyun  * be ready to be suspended or it reports the readiness to be suspended
1925*4882a593Smuzhiyun  * explicitly
1926*4882a593Smuzhiyun  */
usbnet_device_suggests_idle(struct usbnet * dev)1927*4882a593Smuzhiyun void usbnet_device_suggests_idle(struct usbnet *dev)
1928*4882a593Smuzhiyun {
1929*4882a593Smuzhiyun 	if (!test_and_set_bit(EVENT_DEVICE_REPORT_IDLE, &dev->flags)) {
1930*4882a593Smuzhiyun 		dev->intf->needs_remote_wakeup = 1;
1931*4882a593Smuzhiyun 		usb_autopm_put_interface_async(dev->intf);
1932*4882a593Smuzhiyun 	}
1933*4882a593Smuzhiyun }
1934*4882a593Smuzhiyun EXPORT_SYMBOL(usbnet_device_suggests_idle);
1935*4882a593Smuzhiyun 
1936*4882a593Smuzhiyun /*
1937*4882a593Smuzhiyun  * For devices that can do without special commands
1938*4882a593Smuzhiyun  */
usbnet_manage_power(struct usbnet * dev,int on)1939*4882a593Smuzhiyun int usbnet_manage_power(struct usbnet *dev, int on)
1940*4882a593Smuzhiyun {
1941*4882a593Smuzhiyun 	dev->intf->needs_remote_wakeup = on;
1942*4882a593Smuzhiyun 	return 0;
1943*4882a593Smuzhiyun }
1944*4882a593Smuzhiyun EXPORT_SYMBOL(usbnet_manage_power);
1945*4882a593Smuzhiyun 
usbnet_link_change(struct usbnet * dev,bool link,bool need_reset)1946*4882a593Smuzhiyun void usbnet_link_change(struct usbnet *dev, bool link, bool need_reset)
1947*4882a593Smuzhiyun {
1948*4882a593Smuzhiyun 	/* update link after link is reseted */
1949*4882a593Smuzhiyun 	if (link && !need_reset)
1950*4882a593Smuzhiyun 		netif_carrier_on(dev->net);
1951*4882a593Smuzhiyun 	else
1952*4882a593Smuzhiyun 		netif_carrier_off(dev->net);
1953*4882a593Smuzhiyun 
1954*4882a593Smuzhiyun 	if (need_reset && link)
1955*4882a593Smuzhiyun 		usbnet_defer_kevent(dev, EVENT_LINK_RESET);
1956*4882a593Smuzhiyun 	else
1957*4882a593Smuzhiyun 		usbnet_defer_kevent(dev, EVENT_LINK_CHANGE);
1958*4882a593Smuzhiyun }
1959*4882a593Smuzhiyun EXPORT_SYMBOL(usbnet_link_change);
1960*4882a593Smuzhiyun 
1961*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
__usbnet_read_cmd(struct usbnet * dev,u8 cmd,u8 reqtype,u16 value,u16 index,void * data,u16 size)1962*4882a593Smuzhiyun static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
1963*4882a593Smuzhiyun 			     u16 value, u16 index, void *data, u16 size)
1964*4882a593Smuzhiyun {
1965*4882a593Smuzhiyun 	void *buf = NULL;
1966*4882a593Smuzhiyun 	int err = -ENOMEM;
1967*4882a593Smuzhiyun 
1968*4882a593Smuzhiyun 	netdev_dbg(dev->net, "usbnet_read_cmd cmd=0x%02x reqtype=%02x"
1969*4882a593Smuzhiyun 		   " value=0x%04x index=0x%04x size=%d\n",
1970*4882a593Smuzhiyun 		   cmd, reqtype, value, index, size);
1971*4882a593Smuzhiyun 
1972*4882a593Smuzhiyun 	if (size) {
1973*4882a593Smuzhiyun 		buf = kmalloc(size, GFP_NOIO);
1974*4882a593Smuzhiyun 		if (!buf)
1975*4882a593Smuzhiyun 			goto out;
1976*4882a593Smuzhiyun 	}
1977*4882a593Smuzhiyun 
1978*4882a593Smuzhiyun 	err = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
1979*4882a593Smuzhiyun 			      cmd, reqtype, value, index, buf, size,
1980*4882a593Smuzhiyun 			      USB_CTRL_GET_TIMEOUT);
1981*4882a593Smuzhiyun 	if (err > 0 && err <= size) {
1982*4882a593Smuzhiyun         if (data)
1983*4882a593Smuzhiyun             memcpy(data, buf, err);
1984*4882a593Smuzhiyun         else
1985*4882a593Smuzhiyun             netdev_dbg(dev->net,
1986*4882a593Smuzhiyun                 "Huh? Data requested but thrown away.\n");
1987*4882a593Smuzhiyun     }
1988*4882a593Smuzhiyun 	kfree(buf);
1989*4882a593Smuzhiyun out:
1990*4882a593Smuzhiyun 	return err;
1991*4882a593Smuzhiyun }
1992*4882a593Smuzhiyun 
__usbnet_write_cmd(struct usbnet * dev,u8 cmd,u8 reqtype,u16 value,u16 index,const void * data,u16 size)1993*4882a593Smuzhiyun static int __usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
1994*4882a593Smuzhiyun 			      u16 value, u16 index, const void *data,
1995*4882a593Smuzhiyun 			      u16 size)
1996*4882a593Smuzhiyun {
1997*4882a593Smuzhiyun 	void *buf = NULL;
1998*4882a593Smuzhiyun 	int err = -ENOMEM;
1999*4882a593Smuzhiyun 
2000*4882a593Smuzhiyun 	netdev_dbg(dev->net, "usbnet_write_cmd cmd=0x%02x reqtype=%02x"
2001*4882a593Smuzhiyun 		   " value=0x%04x index=0x%04x size=%d\n",
2002*4882a593Smuzhiyun 		   cmd, reqtype, value, index, size);
2003*4882a593Smuzhiyun 
2004*4882a593Smuzhiyun 	if (data) {
2005*4882a593Smuzhiyun 		buf = kmemdup(data, size, GFP_NOIO);
2006*4882a593Smuzhiyun 		if (!buf)
2007*4882a593Smuzhiyun 			goto out;
2008*4882a593Smuzhiyun 	} else {
2009*4882a593Smuzhiyun         if (size) {
2010*4882a593Smuzhiyun             WARN_ON_ONCE(1);
2011*4882a593Smuzhiyun             err = -EINVAL;
2012*4882a593Smuzhiyun             goto out;
2013*4882a593Smuzhiyun         }
2014*4882a593Smuzhiyun     }
2015*4882a593Smuzhiyun 
2016*4882a593Smuzhiyun 	err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
2017*4882a593Smuzhiyun 			      cmd, reqtype, value, index, buf, size,
2018*4882a593Smuzhiyun 			      USB_CTRL_SET_TIMEOUT);
2019*4882a593Smuzhiyun 	kfree(buf);
2020*4882a593Smuzhiyun 
2021*4882a593Smuzhiyun out:
2022*4882a593Smuzhiyun 	return err;
2023*4882a593Smuzhiyun }
2024*4882a593Smuzhiyun 
2025*4882a593Smuzhiyun /*
2026*4882a593Smuzhiyun  * The function can't be called inside suspend/resume callback,
2027*4882a593Smuzhiyun  * otherwise deadlock will be caused.
2028*4882a593Smuzhiyun  */
usbnet_read_cmd(struct usbnet * dev,u8 cmd,u8 reqtype,u16 value,u16 index,void * data,u16 size)2029*4882a593Smuzhiyun int usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
2030*4882a593Smuzhiyun 		    u16 value, u16 index, void *data, u16 size)
2031*4882a593Smuzhiyun {
2032*4882a593Smuzhiyun 	int ret;
2033*4882a593Smuzhiyun 
2034*4882a593Smuzhiyun 	if (usb_autopm_get_interface(dev->intf) < 0)
2035*4882a593Smuzhiyun 		return -ENODEV;
2036*4882a593Smuzhiyun 	ret = __usbnet_read_cmd(dev, cmd, reqtype, value, index,
2037*4882a593Smuzhiyun 				data, size);
2038*4882a593Smuzhiyun 	usb_autopm_put_interface(dev->intf);
2039*4882a593Smuzhiyun 	return ret;
2040*4882a593Smuzhiyun }
2041*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usbnet_read_cmd);
2042*4882a593Smuzhiyun 
2043*4882a593Smuzhiyun /*
2044*4882a593Smuzhiyun  * The function can't be called inside suspend/resume callback,
2045*4882a593Smuzhiyun  * otherwise deadlock will be caused.
2046*4882a593Smuzhiyun  */
usbnet_write_cmd(struct usbnet * dev,u8 cmd,u8 reqtype,u16 value,u16 index,const void * data,u16 size)2047*4882a593Smuzhiyun int usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
2048*4882a593Smuzhiyun 		     u16 value, u16 index, const void *data, u16 size)
2049*4882a593Smuzhiyun {
2050*4882a593Smuzhiyun 	int ret;
2051*4882a593Smuzhiyun 
2052*4882a593Smuzhiyun 	if (usb_autopm_get_interface(dev->intf) < 0)
2053*4882a593Smuzhiyun 		return -ENODEV;
2054*4882a593Smuzhiyun 	ret = __usbnet_write_cmd(dev, cmd, reqtype, value, index,
2055*4882a593Smuzhiyun 				 data, size);
2056*4882a593Smuzhiyun 	usb_autopm_put_interface(dev->intf);
2057*4882a593Smuzhiyun 	return ret;
2058*4882a593Smuzhiyun }
2059*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usbnet_write_cmd);
2060*4882a593Smuzhiyun 
2061*4882a593Smuzhiyun /*
2062*4882a593Smuzhiyun  * The function can be called inside suspend/resume callback safely
2063*4882a593Smuzhiyun  * and should only be called by suspend/resume callback generally.
2064*4882a593Smuzhiyun  */
usbnet_read_cmd_nopm(struct usbnet * dev,u8 cmd,u8 reqtype,u16 value,u16 index,void * data,u16 size)2065*4882a593Smuzhiyun int usbnet_read_cmd_nopm(struct usbnet *dev, u8 cmd, u8 reqtype,
2066*4882a593Smuzhiyun 			  u16 value, u16 index, void *data, u16 size)
2067*4882a593Smuzhiyun {
2068*4882a593Smuzhiyun 	return __usbnet_read_cmd(dev, cmd, reqtype, value, index,
2069*4882a593Smuzhiyun 				 data, size);
2070*4882a593Smuzhiyun }
2071*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usbnet_read_cmd_nopm);
2072*4882a593Smuzhiyun 
2073*4882a593Smuzhiyun /*
2074*4882a593Smuzhiyun  * The function can be called inside suspend/resume callback safely
2075*4882a593Smuzhiyun  * and should only be called by suspend/resume callback generally.
2076*4882a593Smuzhiyun  */
usbnet_write_cmd_nopm(struct usbnet * dev,u8 cmd,u8 reqtype,u16 value,u16 index,const void * data,u16 size)2077*4882a593Smuzhiyun int usbnet_write_cmd_nopm(struct usbnet *dev, u8 cmd, u8 reqtype,
2078*4882a593Smuzhiyun 			  u16 value, u16 index, const void *data,
2079*4882a593Smuzhiyun 			  u16 size)
2080*4882a593Smuzhiyun {
2081*4882a593Smuzhiyun 	return __usbnet_write_cmd(dev, cmd, reqtype, value, index,
2082*4882a593Smuzhiyun 				  data, size);
2083*4882a593Smuzhiyun }
2084*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usbnet_write_cmd_nopm);
2085*4882a593Smuzhiyun 
usbnet_async_cmd_cb(struct urb * urb)2086*4882a593Smuzhiyun static void usbnet_async_cmd_cb(struct urb *urb)
2087*4882a593Smuzhiyun {
2088*4882a593Smuzhiyun 	struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
2089*4882a593Smuzhiyun 	int status = urb->status;
2090*4882a593Smuzhiyun 
2091*4882a593Smuzhiyun 	if (status < 0)
2092*4882a593Smuzhiyun 		dev_dbg(&urb->dev->dev, "%s failed with %d",
2093*4882a593Smuzhiyun 			__func__, status);
2094*4882a593Smuzhiyun 
2095*4882a593Smuzhiyun 	kfree(req);
2096*4882a593Smuzhiyun 	usb_free_urb(urb);
2097*4882a593Smuzhiyun }
2098*4882a593Smuzhiyun 
2099*4882a593Smuzhiyun /*
2100*4882a593Smuzhiyun  * The caller must make sure that device can't be put into suspend
2101*4882a593Smuzhiyun  * state until the control URB completes.
2102*4882a593Smuzhiyun  */
usbnet_write_cmd_async(struct usbnet * dev,u8 cmd,u8 reqtype,u16 value,u16 index,const void * data,u16 size)2103*4882a593Smuzhiyun int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype,
2104*4882a593Smuzhiyun 			   u16 value, u16 index, const void *data, u16 size)
2105*4882a593Smuzhiyun {
2106*4882a593Smuzhiyun 	struct usb_ctrlrequest *req;
2107*4882a593Smuzhiyun 	struct urb *urb;
2108*4882a593Smuzhiyun 	int err = -ENOMEM;
2109*4882a593Smuzhiyun 	void *buf = NULL;
2110*4882a593Smuzhiyun 
2111*4882a593Smuzhiyun 	netdev_dbg(dev->net, "usbnet_write_cmd cmd=0x%02x reqtype=%02x"
2112*4882a593Smuzhiyun 		   " value=0x%04x index=0x%04x size=%d\n",
2113*4882a593Smuzhiyun 		   cmd, reqtype, value, index, size);
2114*4882a593Smuzhiyun 
2115*4882a593Smuzhiyun 	urb = usb_alloc_urb(0, GFP_ATOMIC);
2116*4882a593Smuzhiyun 	if (!urb)
2117*4882a593Smuzhiyun 		goto fail;
2118*4882a593Smuzhiyun 
2119*4882a593Smuzhiyun 	if (data) {
2120*4882a593Smuzhiyun 		buf = kmemdup(data, size, GFP_ATOMIC);
2121*4882a593Smuzhiyun 		if (!buf) {
2122*4882a593Smuzhiyun 			netdev_err(dev->net, "Error allocating buffer"
2123*4882a593Smuzhiyun 				   " in %s!\n", __func__);
2124*4882a593Smuzhiyun 			goto fail_free_urb;
2125*4882a593Smuzhiyun 		}
2126*4882a593Smuzhiyun 	}
2127*4882a593Smuzhiyun 
2128*4882a593Smuzhiyun 	req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
2129*4882a593Smuzhiyun 	if (!req)
2130*4882a593Smuzhiyun 		goto fail_free_buf;
2131*4882a593Smuzhiyun 
2132*4882a593Smuzhiyun 	req->bRequestType = reqtype;
2133*4882a593Smuzhiyun 	req->bRequest = cmd;
2134*4882a593Smuzhiyun 	req->wValue = cpu_to_le16(value);
2135*4882a593Smuzhiyun 	req->wIndex = cpu_to_le16(index);
2136*4882a593Smuzhiyun 	req->wLength = cpu_to_le16(size);
2137*4882a593Smuzhiyun 
2138*4882a593Smuzhiyun 	usb_fill_control_urb(urb, dev->udev,
2139*4882a593Smuzhiyun 			     usb_sndctrlpipe(dev->udev, 0),
2140*4882a593Smuzhiyun 			     (void *)req, buf, size,
2141*4882a593Smuzhiyun 			     usbnet_async_cmd_cb, req);
2142*4882a593Smuzhiyun 	urb->transfer_flags |= URB_FREE_BUFFER;
2143*4882a593Smuzhiyun 
2144*4882a593Smuzhiyun 	err = usb_submit_urb(urb, GFP_ATOMIC);
2145*4882a593Smuzhiyun 	if (err < 0) {
2146*4882a593Smuzhiyun 		netdev_err(dev->net, "Error submitting the control"
2147*4882a593Smuzhiyun 			   " message: status=%d\n", err);
2148*4882a593Smuzhiyun 		goto fail_free_all;
2149*4882a593Smuzhiyun 	}
2150*4882a593Smuzhiyun 	return 0;
2151*4882a593Smuzhiyun 
2152*4882a593Smuzhiyun fail_free_all:
2153*4882a593Smuzhiyun 	kfree(req);
2154*4882a593Smuzhiyun fail_free_buf:
2155*4882a593Smuzhiyun 	kfree(buf);
2156*4882a593Smuzhiyun 	/*
2157*4882a593Smuzhiyun 	 * avoid a double free
2158*4882a593Smuzhiyun 	 * needed because the flag can be set only
2159*4882a593Smuzhiyun 	 * after filling the URB
2160*4882a593Smuzhiyun 	 */
2161*4882a593Smuzhiyun 	urb->transfer_flags = 0;
2162*4882a593Smuzhiyun fail_free_urb:
2163*4882a593Smuzhiyun 	usb_free_urb(urb);
2164*4882a593Smuzhiyun fail:
2165*4882a593Smuzhiyun 	return err;
2166*4882a593Smuzhiyun 
2167*4882a593Smuzhiyun }
2168*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usbnet_write_cmd_async);
2169*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
2170*4882a593Smuzhiyun 
usbnet_init(void)2171*4882a593Smuzhiyun static int __init usbnet_init(void)
2172*4882a593Smuzhiyun {
2173*4882a593Smuzhiyun 	/* Compiler should optimize this out. */
2174*4882a593Smuzhiyun 	BUILD_BUG_ON(
2175*4882a593Smuzhiyun 		sizeof_field(struct sk_buff, cb) < sizeof(struct skb_data));
2176*4882a593Smuzhiyun 
2177*4882a593Smuzhiyun 	eth_random_addr(node_id);
2178*4882a593Smuzhiyun 	return 0;
2179*4882a593Smuzhiyun }
2180*4882a593Smuzhiyun module_init(usbnet_init);
2181*4882a593Smuzhiyun 
usbnet_exit(void)2182*4882a593Smuzhiyun static void __exit usbnet_exit(void)
2183*4882a593Smuzhiyun {
2184*4882a593Smuzhiyun }
2185*4882a593Smuzhiyun module_exit(usbnet_exit);
2186*4882a593Smuzhiyun 
2187*4882a593Smuzhiyun MODULE_AUTHOR("David Brownell");
2188*4882a593Smuzhiyun MODULE_DESCRIPTION("USB network driver framework");
2189*4882a593Smuzhiyun MODULE_LICENSE("GPL");
2190