1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4*4882a593Smuzhiyun * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and
5*4882a593Smuzhiyun * James Leu (jleu@mindspring.net).
6*4882a593Smuzhiyun * Copyright (C) 2001 by various other people who didn't put their name here.
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/memblock.h>
10*4882a593Smuzhiyun #include <linux/etherdevice.h>
11*4882a593Smuzhiyun #include <linux/ethtool.h>
12*4882a593Smuzhiyun #include <linux/inetdevice.h>
13*4882a593Smuzhiyun #include <linux/init.h>
14*4882a593Smuzhiyun #include <linux/list.h>
15*4882a593Smuzhiyun #include <linux/netdevice.h>
16*4882a593Smuzhiyun #include <linux/platform_device.h>
17*4882a593Smuzhiyun #include <linux/rtnetlink.h>
18*4882a593Smuzhiyun #include <linux/skbuff.h>
19*4882a593Smuzhiyun #include <linux/slab.h>
20*4882a593Smuzhiyun #include <linux/spinlock.h>
21*4882a593Smuzhiyun #include <init.h>
22*4882a593Smuzhiyun #include <irq_kern.h>
23*4882a593Smuzhiyun #include <irq_user.h>
24*4882a593Smuzhiyun #include "mconsole_kern.h"
25*4882a593Smuzhiyun #include <net_kern.h>
26*4882a593Smuzhiyun #include <net_user.h>
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #define DRIVER_NAME "uml-netdev"
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun static DEFINE_SPINLOCK(opened_lock);
31*4882a593Smuzhiyun static LIST_HEAD(opened);
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun /*
34*4882a593Smuzhiyun * The drop_skb is used when we can't allocate an skb. The
35*4882a593Smuzhiyun * packet is read into drop_skb in order to get the data off the
36*4882a593Smuzhiyun * connection to the host.
37*4882a593Smuzhiyun * It is reallocated whenever a maximum packet size is seen which is
38*4882a593Smuzhiyun * larger than any seen before. update_drop_skb is called from
39*4882a593Smuzhiyun * eth_configure when a new interface is added.
40*4882a593Smuzhiyun */
41*4882a593Smuzhiyun static DEFINE_SPINLOCK(drop_lock);
42*4882a593Smuzhiyun static struct sk_buff *drop_skb;
43*4882a593Smuzhiyun static int drop_max;
44*4882a593Smuzhiyun
update_drop_skb(int max)45*4882a593Smuzhiyun static int update_drop_skb(int max)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun struct sk_buff *new;
48*4882a593Smuzhiyun unsigned long flags;
49*4882a593Smuzhiyun int err = 0;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun spin_lock_irqsave(&drop_lock, flags);
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun if (max <= drop_max)
54*4882a593Smuzhiyun goto out;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun err = -ENOMEM;
57*4882a593Smuzhiyun new = dev_alloc_skb(max);
58*4882a593Smuzhiyun if (new == NULL)
59*4882a593Smuzhiyun goto out;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun skb_put(new, max);
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun kfree_skb(drop_skb);
64*4882a593Smuzhiyun drop_skb = new;
65*4882a593Smuzhiyun drop_max = max;
66*4882a593Smuzhiyun err = 0;
67*4882a593Smuzhiyun out:
68*4882a593Smuzhiyun spin_unlock_irqrestore(&drop_lock, flags);
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun return err;
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun
uml_net_rx(struct net_device * dev)73*4882a593Smuzhiyun static int uml_net_rx(struct net_device *dev)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun struct uml_net_private *lp = netdev_priv(dev);
76*4882a593Smuzhiyun int pkt_len;
77*4882a593Smuzhiyun struct sk_buff *skb;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /* If we can't allocate memory, try again next round. */
80*4882a593Smuzhiyun skb = dev_alloc_skb(lp->max_packet);
81*4882a593Smuzhiyun if (skb == NULL) {
82*4882a593Smuzhiyun drop_skb->dev = dev;
83*4882a593Smuzhiyun /* Read a packet into drop_skb and don't do anything with it. */
84*4882a593Smuzhiyun (*lp->read)(lp->fd, drop_skb, lp);
85*4882a593Smuzhiyun dev->stats.rx_dropped++;
86*4882a593Smuzhiyun return 0;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun skb->dev = dev;
90*4882a593Smuzhiyun skb_put(skb, lp->max_packet);
91*4882a593Smuzhiyun skb_reset_mac_header(skb);
92*4882a593Smuzhiyun pkt_len = (*lp->read)(lp->fd, skb, lp);
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun if (pkt_len > 0) {
95*4882a593Smuzhiyun skb_trim(skb, pkt_len);
96*4882a593Smuzhiyun skb->protocol = (*lp->protocol)(skb);
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun dev->stats.rx_bytes += skb->len;
99*4882a593Smuzhiyun dev->stats.rx_packets++;
100*4882a593Smuzhiyun netif_rx(skb);
101*4882a593Smuzhiyun return pkt_len;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun kfree_skb(skb);
105*4882a593Smuzhiyun return pkt_len;
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun
uml_dev_close(struct work_struct * work)108*4882a593Smuzhiyun static void uml_dev_close(struct work_struct *work)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun struct uml_net_private *lp =
111*4882a593Smuzhiyun container_of(work, struct uml_net_private, work);
112*4882a593Smuzhiyun dev_close(lp->dev);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun
uml_net_interrupt(int irq,void * dev_id)115*4882a593Smuzhiyun static irqreturn_t uml_net_interrupt(int irq, void *dev_id)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun struct net_device *dev = dev_id;
118*4882a593Smuzhiyun struct uml_net_private *lp = netdev_priv(dev);
119*4882a593Smuzhiyun int err;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun if (!netif_running(dev))
122*4882a593Smuzhiyun return IRQ_NONE;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun spin_lock(&lp->lock);
125*4882a593Smuzhiyun while ((err = uml_net_rx(dev)) > 0) ;
126*4882a593Smuzhiyun if (err < 0) {
127*4882a593Smuzhiyun printk(KERN_ERR
128*4882a593Smuzhiyun "Device '%s' read returned %d, shutting it down\n",
129*4882a593Smuzhiyun dev->name, err);
130*4882a593Smuzhiyun /* dev_close can't be called in interrupt context, and takes
131*4882a593Smuzhiyun * again lp->lock.
132*4882a593Smuzhiyun * And dev_close() can be safely called multiple times on the
133*4882a593Smuzhiyun * same device, since it tests for (dev->flags & IFF_UP). So
134*4882a593Smuzhiyun * there's no harm in delaying the device shutdown.
135*4882a593Smuzhiyun * Furthermore, the workqueue will not re-enqueue an already
136*4882a593Smuzhiyun * enqueued work item. */
137*4882a593Smuzhiyun schedule_work(&lp->work);
138*4882a593Smuzhiyun goto out;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun out:
141*4882a593Smuzhiyun spin_unlock(&lp->lock);
142*4882a593Smuzhiyun return IRQ_HANDLED;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
uml_net_open(struct net_device * dev)145*4882a593Smuzhiyun static int uml_net_open(struct net_device *dev)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun struct uml_net_private *lp = netdev_priv(dev);
148*4882a593Smuzhiyun int err;
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun if (lp->fd >= 0) {
151*4882a593Smuzhiyun err = -ENXIO;
152*4882a593Smuzhiyun goto out;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun lp->fd = (*lp->open)(&lp->user);
156*4882a593Smuzhiyun if (lp->fd < 0) {
157*4882a593Smuzhiyun err = lp->fd;
158*4882a593Smuzhiyun goto out;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun err = um_request_irq(dev->irq, lp->fd, IRQ_READ, uml_net_interrupt,
162*4882a593Smuzhiyun IRQF_SHARED, dev->name, dev);
163*4882a593Smuzhiyun if (err != 0) {
164*4882a593Smuzhiyun printk(KERN_ERR "uml_net_open: failed to get irq(%d)\n", err);
165*4882a593Smuzhiyun err = -ENETUNREACH;
166*4882a593Smuzhiyun goto out_close;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun netif_start_queue(dev);
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun /* clear buffer - it can happen that the host side of the interface
172*4882a593Smuzhiyun * is full when we get here. In this case, new data is never queued,
173*4882a593Smuzhiyun * SIGIOs never arrive, and the net never works.
174*4882a593Smuzhiyun */
175*4882a593Smuzhiyun while ((err = uml_net_rx(dev)) > 0) ;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun spin_lock(&opened_lock);
178*4882a593Smuzhiyun list_add(&lp->list, &opened);
179*4882a593Smuzhiyun spin_unlock(&opened_lock);
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun return 0;
182*4882a593Smuzhiyun out_close:
183*4882a593Smuzhiyun if (lp->close != NULL) (*lp->close)(lp->fd, &lp->user);
184*4882a593Smuzhiyun lp->fd = -1;
185*4882a593Smuzhiyun out:
186*4882a593Smuzhiyun return err;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
uml_net_close(struct net_device * dev)189*4882a593Smuzhiyun static int uml_net_close(struct net_device *dev)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun struct uml_net_private *lp = netdev_priv(dev);
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun netif_stop_queue(dev);
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun um_free_irq(dev->irq, dev);
196*4882a593Smuzhiyun if (lp->close != NULL)
197*4882a593Smuzhiyun (*lp->close)(lp->fd, &lp->user);
198*4882a593Smuzhiyun lp->fd = -1;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun spin_lock(&opened_lock);
201*4882a593Smuzhiyun list_del(&lp->list);
202*4882a593Smuzhiyun spin_unlock(&opened_lock);
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun return 0;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
uml_net_start_xmit(struct sk_buff * skb,struct net_device * dev)207*4882a593Smuzhiyun static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun struct uml_net_private *lp = netdev_priv(dev);
210*4882a593Smuzhiyun unsigned long flags;
211*4882a593Smuzhiyun int len;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun netif_stop_queue(dev);
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun spin_lock_irqsave(&lp->lock, flags);
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun len = (*lp->write)(lp->fd, skb, lp);
218*4882a593Smuzhiyun skb_tx_timestamp(skb);
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun if (len == skb->len) {
221*4882a593Smuzhiyun dev->stats.tx_packets++;
222*4882a593Smuzhiyun dev->stats.tx_bytes += skb->len;
223*4882a593Smuzhiyun netif_trans_update(dev);
224*4882a593Smuzhiyun netif_start_queue(dev);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun /* this is normally done in the interrupt when tx finishes */
227*4882a593Smuzhiyun netif_wake_queue(dev);
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun else if (len == 0) {
230*4882a593Smuzhiyun netif_start_queue(dev);
231*4882a593Smuzhiyun dev->stats.tx_dropped++;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun else {
234*4882a593Smuzhiyun netif_start_queue(dev);
235*4882a593Smuzhiyun printk(KERN_ERR "uml_net_start_xmit: failed(%d)\n", len);
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun spin_unlock_irqrestore(&lp->lock, flags);
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun dev_consume_skb_any(skb);
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun return NETDEV_TX_OK;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
uml_net_set_multicast_list(struct net_device * dev)245*4882a593Smuzhiyun static void uml_net_set_multicast_list(struct net_device *dev)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun return;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun
uml_net_tx_timeout(struct net_device * dev,unsigned int txqueue)250*4882a593Smuzhiyun static void uml_net_tx_timeout(struct net_device *dev, unsigned int txqueue)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun netif_trans_update(dev);
253*4882a593Smuzhiyun netif_wake_queue(dev);
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
uml_net_poll_controller(struct net_device * dev)257*4882a593Smuzhiyun static void uml_net_poll_controller(struct net_device *dev)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun disable_irq(dev->irq);
260*4882a593Smuzhiyun uml_net_interrupt(dev->irq, dev);
261*4882a593Smuzhiyun enable_irq(dev->irq);
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun #endif
264*4882a593Smuzhiyun
uml_net_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)265*4882a593Smuzhiyun static void uml_net_get_drvinfo(struct net_device *dev,
266*4882a593Smuzhiyun struct ethtool_drvinfo *info)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun static const struct ethtool_ops uml_net_ethtool_ops = {
272*4882a593Smuzhiyun .get_drvinfo = uml_net_get_drvinfo,
273*4882a593Smuzhiyun .get_link = ethtool_op_get_link,
274*4882a593Smuzhiyun .get_ts_info = ethtool_op_get_ts_info,
275*4882a593Smuzhiyun };
276*4882a593Smuzhiyun
uml_net_setup_etheraddr(struct net_device * dev,char * str)277*4882a593Smuzhiyun void uml_net_setup_etheraddr(struct net_device *dev, char *str)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun unsigned char *addr = dev->dev_addr;
280*4882a593Smuzhiyun char *end;
281*4882a593Smuzhiyun int i;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun if (str == NULL)
284*4882a593Smuzhiyun goto random;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun for (i = 0; i < 6; i++) {
287*4882a593Smuzhiyun addr[i] = simple_strtoul(str, &end, 16);
288*4882a593Smuzhiyun if ((end == str) ||
289*4882a593Smuzhiyun ((*end != ':') && (*end != ',') && (*end != '\0'))) {
290*4882a593Smuzhiyun printk(KERN_ERR
291*4882a593Smuzhiyun "setup_etheraddr: failed to parse '%s' "
292*4882a593Smuzhiyun "as an ethernet address\n", str);
293*4882a593Smuzhiyun goto random;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun str = end + 1;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun if (is_multicast_ether_addr(addr)) {
298*4882a593Smuzhiyun printk(KERN_ERR
299*4882a593Smuzhiyun "Attempt to assign a multicast ethernet address to a "
300*4882a593Smuzhiyun "device disallowed\n");
301*4882a593Smuzhiyun goto random;
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun if (!is_valid_ether_addr(addr)) {
304*4882a593Smuzhiyun printk(KERN_ERR
305*4882a593Smuzhiyun "Attempt to assign an invalid ethernet address to a "
306*4882a593Smuzhiyun "device disallowed\n");
307*4882a593Smuzhiyun goto random;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun if (!is_local_ether_addr(addr)) {
310*4882a593Smuzhiyun printk(KERN_WARNING
311*4882a593Smuzhiyun "Warning: Assigning a globally valid ethernet "
312*4882a593Smuzhiyun "address to a device\n");
313*4882a593Smuzhiyun printk(KERN_WARNING "You should set the 2nd rightmost bit in "
314*4882a593Smuzhiyun "the first byte of the MAC,\n");
315*4882a593Smuzhiyun printk(KERN_WARNING "i.e. %02x:%02x:%02x:%02x:%02x:%02x\n",
316*4882a593Smuzhiyun addr[0] | 0x02, addr[1], addr[2], addr[3], addr[4],
317*4882a593Smuzhiyun addr[5]);
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun return;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun random:
322*4882a593Smuzhiyun printk(KERN_INFO
323*4882a593Smuzhiyun "Choosing a random ethernet address for device %s\n", dev->name);
324*4882a593Smuzhiyun eth_hw_addr_random(dev);
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun static DEFINE_SPINLOCK(devices_lock);
328*4882a593Smuzhiyun static LIST_HEAD(devices);
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun static struct platform_driver uml_net_driver = {
331*4882a593Smuzhiyun .driver = {
332*4882a593Smuzhiyun .name = DRIVER_NAME,
333*4882a593Smuzhiyun },
334*4882a593Smuzhiyun };
335*4882a593Smuzhiyun
net_device_release(struct device * dev)336*4882a593Smuzhiyun static void net_device_release(struct device *dev)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun struct uml_net *device = dev_get_drvdata(dev);
339*4882a593Smuzhiyun struct net_device *netdev = device->dev;
340*4882a593Smuzhiyun struct uml_net_private *lp = netdev_priv(netdev);
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun if (lp->remove != NULL)
343*4882a593Smuzhiyun (*lp->remove)(&lp->user);
344*4882a593Smuzhiyun list_del(&device->list);
345*4882a593Smuzhiyun kfree(device);
346*4882a593Smuzhiyun free_netdev(netdev);
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun static const struct net_device_ops uml_netdev_ops = {
350*4882a593Smuzhiyun .ndo_open = uml_net_open,
351*4882a593Smuzhiyun .ndo_stop = uml_net_close,
352*4882a593Smuzhiyun .ndo_start_xmit = uml_net_start_xmit,
353*4882a593Smuzhiyun .ndo_set_rx_mode = uml_net_set_multicast_list,
354*4882a593Smuzhiyun .ndo_tx_timeout = uml_net_tx_timeout,
355*4882a593Smuzhiyun .ndo_set_mac_address = eth_mac_addr,
356*4882a593Smuzhiyun .ndo_validate_addr = eth_validate_addr,
357*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
358*4882a593Smuzhiyun .ndo_poll_controller = uml_net_poll_controller,
359*4882a593Smuzhiyun #endif
360*4882a593Smuzhiyun };
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun /*
363*4882a593Smuzhiyun * Ensures that platform_driver_register is called only once by
364*4882a593Smuzhiyun * eth_configure. Will be set in an initcall.
365*4882a593Smuzhiyun */
366*4882a593Smuzhiyun static int driver_registered;
367*4882a593Smuzhiyun
eth_configure(int n,void * init,char * mac,struct transport * transport,gfp_t gfp_mask)368*4882a593Smuzhiyun static void eth_configure(int n, void *init, char *mac,
369*4882a593Smuzhiyun struct transport *transport, gfp_t gfp_mask)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun struct uml_net *device;
372*4882a593Smuzhiyun struct net_device *dev;
373*4882a593Smuzhiyun struct uml_net_private *lp;
374*4882a593Smuzhiyun int err, size;
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun size = transport->private_size + sizeof(struct uml_net_private);
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun device = kzalloc(sizeof(*device), gfp_mask);
379*4882a593Smuzhiyun if (device == NULL) {
380*4882a593Smuzhiyun printk(KERN_ERR "eth_configure failed to allocate struct "
381*4882a593Smuzhiyun "uml_net\n");
382*4882a593Smuzhiyun return;
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun dev = alloc_etherdev(size);
386*4882a593Smuzhiyun if (dev == NULL) {
387*4882a593Smuzhiyun printk(KERN_ERR "eth_configure: failed to allocate struct "
388*4882a593Smuzhiyun "net_device for eth%d\n", n);
389*4882a593Smuzhiyun goto out_free_device;
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun INIT_LIST_HEAD(&device->list);
393*4882a593Smuzhiyun device->index = n;
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun /* If this name ends up conflicting with an existing registered
396*4882a593Smuzhiyun * netdevice, that is OK, register_netdev{,ice}() will notice this
397*4882a593Smuzhiyun * and fail.
398*4882a593Smuzhiyun */
399*4882a593Smuzhiyun snprintf(dev->name, sizeof(dev->name), "eth%d", n);
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun uml_net_setup_etheraddr(dev, mac);
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun printk(KERN_INFO "Netdevice %d (%pM) : ", n, dev->dev_addr);
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun lp = netdev_priv(dev);
406*4882a593Smuzhiyun /* This points to the transport private data. It's still clear, but we
407*4882a593Smuzhiyun * must memset it to 0 *now*. Let's help the drivers. */
408*4882a593Smuzhiyun memset(lp, 0, size);
409*4882a593Smuzhiyun INIT_WORK(&lp->work, uml_dev_close);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun /* sysfs register */
412*4882a593Smuzhiyun if (!driver_registered) {
413*4882a593Smuzhiyun platform_driver_register(¨_net_driver);
414*4882a593Smuzhiyun driver_registered = 1;
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun device->pdev.id = n;
417*4882a593Smuzhiyun device->pdev.name = DRIVER_NAME;
418*4882a593Smuzhiyun device->pdev.dev.release = net_device_release;
419*4882a593Smuzhiyun dev_set_drvdata(&device->pdev.dev, device);
420*4882a593Smuzhiyun if (platform_device_register(&device->pdev))
421*4882a593Smuzhiyun goto out_free_netdev;
422*4882a593Smuzhiyun SET_NETDEV_DEV(dev,&device->pdev.dev);
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun device->dev = dev;
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun /*
427*4882a593Smuzhiyun * These just fill in a data structure, so there's no failure
428*4882a593Smuzhiyun * to be worried about.
429*4882a593Smuzhiyun */
430*4882a593Smuzhiyun (*transport->kern->init)(dev, init);
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun *lp = ((struct uml_net_private)
433*4882a593Smuzhiyun { .list = LIST_HEAD_INIT(lp->list),
434*4882a593Smuzhiyun .dev = dev,
435*4882a593Smuzhiyun .fd = -1,
436*4882a593Smuzhiyun .mac = { 0xfe, 0xfd, 0x0, 0x0, 0x0, 0x0},
437*4882a593Smuzhiyun .max_packet = transport->user->max_packet,
438*4882a593Smuzhiyun .protocol = transport->kern->protocol,
439*4882a593Smuzhiyun .open = transport->user->open,
440*4882a593Smuzhiyun .close = transport->user->close,
441*4882a593Smuzhiyun .remove = transport->user->remove,
442*4882a593Smuzhiyun .read = transport->kern->read,
443*4882a593Smuzhiyun .write = transport->kern->write,
444*4882a593Smuzhiyun .add_address = transport->user->add_address,
445*4882a593Smuzhiyun .delete_address = transport->user->delete_address });
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun spin_lock_init(&lp->lock);
448*4882a593Smuzhiyun memcpy(lp->mac, dev->dev_addr, sizeof(lp->mac));
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun if ((transport->user->init != NULL) &&
451*4882a593Smuzhiyun ((*transport->user->init)(&lp->user, dev) != 0))
452*4882a593Smuzhiyun goto out_unregister;
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun dev->mtu = transport->user->mtu;
455*4882a593Smuzhiyun dev->netdev_ops = ¨_netdev_ops;
456*4882a593Smuzhiyun dev->ethtool_ops = ¨_net_ethtool_ops;
457*4882a593Smuzhiyun dev->watchdog_timeo = (HZ >> 1);
458*4882a593Smuzhiyun dev->irq = UM_ETH_IRQ;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun err = update_drop_skb(lp->max_packet);
461*4882a593Smuzhiyun if (err)
462*4882a593Smuzhiyun goto out_undo_user_init;
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun rtnl_lock();
465*4882a593Smuzhiyun err = register_netdevice(dev);
466*4882a593Smuzhiyun rtnl_unlock();
467*4882a593Smuzhiyun if (err)
468*4882a593Smuzhiyun goto out_undo_user_init;
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun spin_lock(&devices_lock);
471*4882a593Smuzhiyun list_add(&device->list, &devices);
472*4882a593Smuzhiyun spin_unlock(&devices_lock);
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun return;
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun out_undo_user_init:
477*4882a593Smuzhiyun if (transport->user->remove != NULL)
478*4882a593Smuzhiyun (*transport->user->remove)(&lp->user);
479*4882a593Smuzhiyun out_unregister:
480*4882a593Smuzhiyun platform_device_unregister(&device->pdev);
481*4882a593Smuzhiyun return; /* platform_device_unregister frees dev and device */
482*4882a593Smuzhiyun out_free_netdev:
483*4882a593Smuzhiyun free_netdev(dev);
484*4882a593Smuzhiyun out_free_device:
485*4882a593Smuzhiyun kfree(device);
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun
find_device(int n)488*4882a593Smuzhiyun static struct uml_net *find_device(int n)
489*4882a593Smuzhiyun {
490*4882a593Smuzhiyun struct uml_net *device;
491*4882a593Smuzhiyun struct list_head *ele;
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun spin_lock(&devices_lock);
494*4882a593Smuzhiyun list_for_each(ele, &devices) {
495*4882a593Smuzhiyun device = list_entry(ele, struct uml_net, list);
496*4882a593Smuzhiyun if (device->index == n)
497*4882a593Smuzhiyun goto out;
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun device = NULL;
500*4882a593Smuzhiyun out:
501*4882a593Smuzhiyun spin_unlock(&devices_lock);
502*4882a593Smuzhiyun return device;
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun
eth_parse(char * str,int * index_out,char ** str_out,char ** error_out)505*4882a593Smuzhiyun static int eth_parse(char *str, int *index_out, char **str_out,
506*4882a593Smuzhiyun char **error_out)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun char *end;
509*4882a593Smuzhiyun int n, err = -EINVAL;
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun n = simple_strtoul(str, &end, 0);
512*4882a593Smuzhiyun if (end == str) {
513*4882a593Smuzhiyun *error_out = "Bad device number";
514*4882a593Smuzhiyun return err;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun str = end;
518*4882a593Smuzhiyun if (*str != '=') {
519*4882a593Smuzhiyun *error_out = "Expected '=' after device number";
520*4882a593Smuzhiyun return err;
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun str++;
524*4882a593Smuzhiyun if (find_device(n)) {
525*4882a593Smuzhiyun *error_out = "Device already configured";
526*4882a593Smuzhiyun return err;
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun *index_out = n;
530*4882a593Smuzhiyun *str_out = str;
531*4882a593Smuzhiyun return 0;
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun struct eth_init {
535*4882a593Smuzhiyun struct list_head list;
536*4882a593Smuzhiyun char *init;
537*4882a593Smuzhiyun int index;
538*4882a593Smuzhiyun };
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun static DEFINE_SPINLOCK(transports_lock);
541*4882a593Smuzhiyun static LIST_HEAD(transports);
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun /* Filled in during early boot */
544*4882a593Smuzhiyun static LIST_HEAD(eth_cmd_line);
545*4882a593Smuzhiyun
check_transport(struct transport * transport,char * eth,int n,void ** init_out,char ** mac_out,gfp_t gfp_mask)546*4882a593Smuzhiyun static int check_transport(struct transport *transport, char *eth, int n,
547*4882a593Smuzhiyun void **init_out, char **mac_out, gfp_t gfp_mask)
548*4882a593Smuzhiyun {
549*4882a593Smuzhiyun int len;
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun len = strlen(transport->name);
552*4882a593Smuzhiyun if (strncmp(eth, transport->name, len))
553*4882a593Smuzhiyun return 0;
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun eth += len;
556*4882a593Smuzhiyun if (*eth == ',')
557*4882a593Smuzhiyun eth++;
558*4882a593Smuzhiyun else if (*eth != '\0')
559*4882a593Smuzhiyun return 0;
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun *init_out = kmalloc(transport->setup_size, gfp_mask);
562*4882a593Smuzhiyun if (*init_out == NULL)
563*4882a593Smuzhiyun return 1;
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun if (!transport->setup(eth, mac_out, *init_out)) {
566*4882a593Smuzhiyun kfree(*init_out);
567*4882a593Smuzhiyun *init_out = NULL;
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun return 1;
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun
register_transport(struct transport * new)572*4882a593Smuzhiyun void register_transport(struct transport *new)
573*4882a593Smuzhiyun {
574*4882a593Smuzhiyun struct list_head *ele, *next;
575*4882a593Smuzhiyun struct eth_init *eth;
576*4882a593Smuzhiyun void *init;
577*4882a593Smuzhiyun char *mac = NULL;
578*4882a593Smuzhiyun int match;
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun spin_lock(&transports_lock);
581*4882a593Smuzhiyun BUG_ON(!list_empty(&new->list));
582*4882a593Smuzhiyun list_add(&new->list, &transports);
583*4882a593Smuzhiyun spin_unlock(&transports_lock);
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun list_for_each_safe(ele, next, ð_cmd_line) {
586*4882a593Smuzhiyun eth = list_entry(ele, struct eth_init, list);
587*4882a593Smuzhiyun match = check_transport(new, eth->init, eth->index, &init,
588*4882a593Smuzhiyun &mac, GFP_KERNEL);
589*4882a593Smuzhiyun if (!match)
590*4882a593Smuzhiyun continue;
591*4882a593Smuzhiyun else if (init != NULL) {
592*4882a593Smuzhiyun eth_configure(eth->index, init, mac, new, GFP_KERNEL);
593*4882a593Smuzhiyun kfree(init);
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun list_del(ð->list);
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun
eth_setup_common(char * str,int index)599*4882a593Smuzhiyun static int eth_setup_common(char *str, int index)
600*4882a593Smuzhiyun {
601*4882a593Smuzhiyun struct list_head *ele;
602*4882a593Smuzhiyun struct transport *transport;
603*4882a593Smuzhiyun void *init;
604*4882a593Smuzhiyun char *mac = NULL;
605*4882a593Smuzhiyun int found = 0;
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun spin_lock(&transports_lock);
608*4882a593Smuzhiyun list_for_each(ele, &transports) {
609*4882a593Smuzhiyun transport = list_entry(ele, struct transport, list);
610*4882a593Smuzhiyun if (!check_transport(transport, str, index, &init,
611*4882a593Smuzhiyun &mac, GFP_ATOMIC))
612*4882a593Smuzhiyun continue;
613*4882a593Smuzhiyun if (init != NULL) {
614*4882a593Smuzhiyun eth_configure(index, init, mac, transport, GFP_ATOMIC);
615*4882a593Smuzhiyun kfree(init);
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun found = 1;
618*4882a593Smuzhiyun break;
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun spin_unlock(&transports_lock);
622*4882a593Smuzhiyun return found;
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun
eth_setup(char * str)625*4882a593Smuzhiyun static int __init eth_setup(char *str)
626*4882a593Smuzhiyun {
627*4882a593Smuzhiyun struct eth_init *new;
628*4882a593Smuzhiyun char *error;
629*4882a593Smuzhiyun int n, err;
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun err = eth_parse(str, &n, &str, &error);
632*4882a593Smuzhiyun if (err) {
633*4882a593Smuzhiyun printk(KERN_ERR "eth_setup - Couldn't parse '%s' : %s\n",
634*4882a593Smuzhiyun str, error);
635*4882a593Smuzhiyun return 1;
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES);
639*4882a593Smuzhiyun if (!new)
640*4882a593Smuzhiyun panic("%s: Failed to allocate %zu bytes\n", __func__,
641*4882a593Smuzhiyun sizeof(*new));
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun INIT_LIST_HEAD(&new->list);
644*4882a593Smuzhiyun new->index = n;
645*4882a593Smuzhiyun new->init = str;
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun list_add_tail(&new->list, ð_cmd_line);
648*4882a593Smuzhiyun return 1;
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun __setup("eth", eth_setup);
652*4882a593Smuzhiyun __uml_help(eth_setup,
653*4882a593Smuzhiyun "eth[0-9]+=<transport>,<options>\n"
654*4882a593Smuzhiyun " Configure a network device.\n\n"
655*4882a593Smuzhiyun );
656*4882a593Smuzhiyun
net_config(char * str,char ** error_out)657*4882a593Smuzhiyun static int net_config(char *str, char **error_out)
658*4882a593Smuzhiyun {
659*4882a593Smuzhiyun int n, err;
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun err = eth_parse(str, &n, &str, error_out);
662*4882a593Smuzhiyun if (err)
663*4882a593Smuzhiyun return err;
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun /* This string is broken up and the pieces used by the underlying
666*4882a593Smuzhiyun * driver. So, it is freed only if eth_setup_common fails.
667*4882a593Smuzhiyun */
668*4882a593Smuzhiyun str = kstrdup(str, GFP_KERNEL);
669*4882a593Smuzhiyun if (str == NULL) {
670*4882a593Smuzhiyun *error_out = "net_config failed to strdup string";
671*4882a593Smuzhiyun return -ENOMEM;
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun err = !eth_setup_common(str, n);
674*4882a593Smuzhiyun if (err)
675*4882a593Smuzhiyun kfree(str);
676*4882a593Smuzhiyun return err;
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun
net_id(char ** str,int * start_out,int * end_out)679*4882a593Smuzhiyun static int net_id(char **str, int *start_out, int *end_out)
680*4882a593Smuzhiyun {
681*4882a593Smuzhiyun char *end;
682*4882a593Smuzhiyun int n;
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun n = simple_strtoul(*str, &end, 0);
685*4882a593Smuzhiyun if ((*end != '\0') || (end == *str))
686*4882a593Smuzhiyun return -1;
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun *start_out = n;
689*4882a593Smuzhiyun *end_out = n;
690*4882a593Smuzhiyun *str = end;
691*4882a593Smuzhiyun return n;
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun
net_remove(int n,char ** error_out)694*4882a593Smuzhiyun static int net_remove(int n, char **error_out)
695*4882a593Smuzhiyun {
696*4882a593Smuzhiyun struct uml_net *device;
697*4882a593Smuzhiyun struct net_device *dev;
698*4882a593Smuzhiyun struct uml_net_private *lp;
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun device = find_device(n);
701*4882a593Smuzhiyun if (device == NULL)
702*4882a593Smuzhiyun return -ENODEV;
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun dev = device->dev;
705*4882a593Smuzhiyun lp = netdev_priv(dev);
706*4882a593Smuzhiyun if (lp->fd > 0)
707*4882a593Smuzhiyun return -EBUSY;
708*4882a593Smuzhiyun unregister_netdev(dev);
709*4882a593Smuzhiyun platform_device_unregister(&device->pdev);
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun return 0;
712*4882a593Smuzhiyun }
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun static struct mc_device net_mc = {
715*4882a593Smuzhiyun .list = LIST_HEAD_INIT(net_mc.list),
716*4882a593Smuzhiyun .name = "eth",
717*4882a593Smuzhiyun .config = net_config,
718*4882a593Smuzhiyun .get_config = NULL,
719*4882a593Smuzhiyun .id = net_id,
720*4882a593Smuzhiyun .remove = net_remove,
721*4882a593Smuzhiyun };
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun #ifdef CONFIG_INET
uml_inetaddr_event(struct notifier_block * this,unsigned long event,void * ptr)724*4882a593Smuzhiyun static int uml_inetaddr_event(struct notifier_block *this, unsigned long event,
725*4882a593Smuzhiyun void *ptr)
726*4882a593Smuzhiyun {
727*4882a593Smuzhiyun struct in_ifaddr *ifa = ptr;
728*4882a593Smuzhiyun struct net_device *dev = ifa->ifa_dev->dev;
729*4882a593Smuzhiyun struct uml_net_private *lp;
730*4882a593Smuzhiyun void (*proc)(unsigned char *, unsigned char *, void *);
731*4882a593Smuzhiyun unsigned char addr_buf[4], netmask_buf[4];
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun if (dev->netdev_ops->ndo_open != uml_net_open)
734*4882a593Smuzhiyun return NOTIFY_DONE;
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun lp = netdev_priv(dev);
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun proc = NULL;
739*4882a593Smuzhiyun switch (event) {
740*4882a593Smuzhiyun case NETDEV_UP:
741*4882a593Smuzhiyun proc = lp->add_address;
742*4882a593Smuzhiyun break;
743*4882a593Smuzhiyun case NETDEV_DOWN:
744*4882a593Smuzhiyun proc = lp->delete_address;
745*4882a593Smuzhiyun break;
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun if (proc != NULL) {
748*4882a593Smuzhiyun memcpy(addr_buf, &ifa->ifa_address, sizeof(addr_buf));
749*4882a593Smuzhiyun memcpy(netmask_buf, &ifa->ifa_mask, sizeof(netmask_buf));
750*4882a593Smuzhiyun (*proc)(addr_buf, netmask_buf, &lp->user);
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun return NOTIFY_DONE;
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun /* uml_net_init shouldn't be called twice on two CPUs at the same time */
756*4882a593Smuzhiyun static struct notifier_block uml_inetaddr_notifier = {
757*4882a593Smuzhiyun .notifier_call = uml_inetaddr_event,
758*4882a593Smuzhiyun };
759*4882a593Smuzhiyun
inet_register(void)760*4882a593Smuzhiyun static void inet_register(void)
761*4882a593Smuzhiyun {
762*4882a593Smuzhiyun struct list_head *ele;
763*4882a593Smuzhiyun struct uml_net_private *lp;
764*4882a593Smuzhiyun struct in_device *ip;
765*4882a593Smuzhiyun struct in_ifaddr *in;
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun register_inetaddr_notifier(¨_inetaddr_notifier);
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun /* Devices may have been opened already, so the uml_inetaddr_notifier
770*4882a593Smuzhiyun * didn't get a chance to run for them. This fakes it so that
771*4882a593Smuzhiyun * addresses which have already been set up get handled properly.
772*4882a593Smuzhiyun */
773*4882a593Smuzhiyun spin_lock(&opened_lock);
774*4882a593Smuzhiyun list_for_each(ele, &opened) {
775*4882a593Smuzhiyun lp = list_entry(ele, struct uml_net_private, list);
776*4882a593Smuzhiyun ip = lp->dev->ip_ptr;
777*4882a593Smuzhiyun if (ip == NULL)
778*4882a593Smuzhiyun continue;
779*4882a593Smuzhiyun in = ip->ifa_list;
780*4882a593Smuzhiyun while (in != NULL) {
781*4882a593Smuzhiyun uml_inetaddr_event(NULL, NETDEV_UP, in);
782*4882a593Smuzhiyun in = in->ifa_next;
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun spin_unlock(&opened_lock);
786*4882a593Smuzhiyun }
787*4882a593Smuzhiyun #else
inet_register(void)788*4882a593Smuzhiyun static inline void inet_register(void)
789*4882a593Smuzhiyun {
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun #endif
792*4882a593Smuzhiyun
uml_net_init(void)793*4882a593Smuzhiyun static int uml_net_init(void)
794*4882a593Smuzhiyun {
795*4882a593Smuzhiyun mconsole_register_dev(&net_mc);
796*4882a593Smuzhiyun inet_register();
797*4882a593Smuzhiyun return 0;
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun __initcall(uml_net_init);
801*4882a593Smuzhiyun
close_devices(void)802*4882a593Smuzhiyun static void close_devices(void)
803*4882a593Smuzhiyun {
804*4882a593Smuzhiyun struct list_head *ele;
805*4882a593Smuzhiyun struct uml_net_private *lp;
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun spin_lock(&opened_lock);
808*4882a593Smuzhiyun list_for_each(ele, &opened) {
809*4882a593Smuzhiyun lp = list_entry(ele, struct uml_net_private, list);
810*4882a593Smuzhiyun um_free_irq(lp->dev->irq, lp->dev);
811*4882a593Smuzhiyun if ((lp->close != NULL) && (lp->fd >= 0))
812*4882a593Smuzhiyun (*lp->close)(lp->fd, &lp->user);
813*4882a593Smuzhiyun if (lp->remove != NULL)
814*4882a593Smuzhiyun (*lp->remove)(&lp->user);
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun spin_unlock(&opened_lock);
817*4882a593Smuzhiyun }
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun __uml_exitcall(close_devices);
820*4882a593Smuzhiyun
iter_addresses(void * d,void (* cb)(unsigned char *,unsigned char *,void *),void * arg)821*4882a593Smuzhiyun void iter_addresses(void *d, void (*cb)(unsigned char *, unsigned char *,
822*4882a593Smuzhiyun void *),
823*4882a593Smuzhiyun void *arg)
824*4882a593Smuzhiyun {
825*4882a593Smuzhiyun struct net_device *dev = d;
826*4882a593Smuzhiyun struct in_device *ip = dev->ip_ptr;
827*4882a593Smuzhiyun struct in_ifaddr *in;
828*4882a593Smuzhiyun unsigned char address[4], netmask[4];
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun if (ip == NULL) return;
831*4882a593Smuzhiyun in = ip->ifa_list;
832*4882a593Smuzhiyun while (in != NULL) {
833*4882a593Smuzhiyun memcpy(address, &in->ifa_address, sizeof(address));
834*4882a593Smuzhiyun memcpy(netmask, &in->ifa_mask, sizeof(netmask));
835*4882a593Smuzhiyun (*cb)(address, netmask, arg);
836*4882a593Smuzhiyun in = in->ifa_next;
837*4882a593Smuzhiyun }
838*4882a593Smuzhiyun }
839*4882a593Smuzhiyun
dev_netmask(void * d,void * m)840*4882a593Smuzhiyun int dev_netmask(void *d, void *m)
841*4882a593Smuzhiyun {
842*4882a593Smuzhiyun struct net_device *dev = d;
843*4882a593Smuzhiyun struct in_device *ip = dev->ip_ptr;
844*4882a593Smuzhiyun struct in_ifaddr *in;
845*4882a593Smuzhiyun __be32 *mask_out = m;
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun if (ip == NULL)
848*4882a593Smuzhiyun return 1;
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun in = ip->ifa_list;
851*4882a593Smuzhiyun if (in == NULL)
852*4882a593Smuzhiyun return 1;
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun *mask_out = in->ifa_mask;
855*4882a593Smuzhiyun return 0;
856*4882a593Smuzhiyun }
857*4882a593Smuzhiyun
get_output_buffer(int * len_out)858*4882a593Smuzhiyun void *get_output_buffer(int *len_out)
859*4882a593Smuzhiyun {
860*4882a593Smuzhiyun void *ret;
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun ret = (void *) __get_free_pages(GFP_KERNEL, 0);
863*4882a593Smuzhiyun if (ret) *len_out = PAGE_SIZE;
864*4882a593Smuzhiyun else *len_out = 0;
865*4882a593Smuzhiyun return ret;
866*4882a593Smuzhiyun }
867*4882a593Smuzhiyun
free_output_buffer(void * buffer)868*4882a593Smuzhiyun void free_output_buffer(void *buffer)
869*4882a593Smuzhiyun {
870*4882a593Smuzhiyun free_pages((unsigned long) buffer, 0);
871*4882a593Smuzhiyun }
872*4882a593Smuzhiyun
tap_setup_common(char * str,char * type,char ** dev_name,char ** mac_out,char ** gate_addr)873*4882a593Smuzhiyun int tap_setup_common(char *str, char *type, char **dev_name, char **mac_out,
874*4882a593Smuzhiyun char **gate_addr)
875*4882a593Smuzhiyun {
876*4882a593Smuzhiyun char *remain;
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun remain = split_if_spec(str, dev_name, mac_out, gate_addr, NULL);
879*4882a593Smuzhiyun if (remain != NULL) {
880*4882a593Smuzhiyun printk(KERN_ERR "tap_setup_common - Extra garbage on "
881*4882a593Smuzhiyun "specification : '%s'\n", remain);
882*4882a593Smuzhiyun return 1;
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun return 0;
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun
eth_protocol(struct sk_buff * skb)888*4882a593Smuzhiyun unsigned short eth_protocol(struct sk_buff *skb)
889*4882a593Smuzhiyun {
890*4882a593Smuzhiyun return eth_type_trans(skb, skb->dev);
891*4882a593Smuzhiyun }
892