1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Ethernet netdevice using ATM AAL5 as underlying carrier
4*4882a593Smuzhiyun * (RFC1483 obsoleted by RFC2684) for Linux
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Authors: Marcell GAL, 2000, XDSL Ltd, Hungary
7*4882a593Smuzhiyun * Eric Kinzie, 2006-2007, US Naval Research Laboratory
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include <linux/init.h>
14*4882a593Smuzhiyun #include <linux/kernel.h>
15*4882a593Smuzhiyun #include <linux/list.h>
16*4882a593Smuzhiyun #include <linux/netdevice.h>
17*4882a593Smuzhiyun #include <linux/skbuff.h>
18*4882a593Smuzhiyun #include <linux/etherdevice.h>
19*4882a593Smuzhiyun #include <linux/rtnetlink.h>
20*4882a593Smuzhiyun #include <linux/ip.h>
21*4882a593Smuzhiyun #include <linux/uaccess.h>
22*4882a593Smuzhiyun #include <linux/slab.h>
23*4882a593Smuzhiyun #include <net/arp.h>
24*4882a593Smuzhiyun #include <linux/atm.h>
25*4882a593Smuzhiyun #include <linux/atmdev.h>
26*4882a593Smuzhiyun #include <linux/capability.h>
27*4882a593Smuzhiyun #include <linux/seq_file.h>
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #include <linux/atmbr2684.h>
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #include "common.h"
32*4882a593Smuzhiyun
skb_debug(const struct sk_buff * skb)33*4882a593Smuzhiyun static void skb_debug(const struct sk_buff *skb)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun #ifdef SKB_DEBUG
36*4882a593Smuzhiyun #define NUM2PRINT 50
37*4882a593Smuzhiyun print_hex_dump(KERN_DEBUG, "br2684: skb: ", DUMP_OFFSET,
38*4882a593Smuzhiyun 16, 1, skb->data, min(NUM2PRINT, skb->len), true);
39*4882a593Smuzhiyun #endif
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun #define BR2684_ETHERTYPE_LEN 2
43*4882a593Smuzhiyun #define BR2684_PAD_LEN 2
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun #define LLC 0xaa, 0xaa, 0x03
46*4882a593Smuzhiyun #define SNAP_BRIDGED 0x00, 0x80, 0xc2
47*4882a593Smuzhiyun #define SNAP_ROUTED 0x00, 0x00, 0x00
48*4882a593Smuzhiyun #define PID_ETHERNET 0x00, 0x07
49*4882a593Smuzhiyun #define ETHERTYPE_IPV4 0x08, 0x00
50*4882a593Smuzhiyun #define ETHERTYPE_IPV6 0x86, 0xdd
51*4882a593Smuzhiyun #define PAD_BRIDGED 0x00, 0x00
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun static const unsigned char ethertype_ipv4[] = { ETHERTYPE_IPV4 };
54*4882a593Smuzhiyun static const unsigned char ethertype_ipv6[] = { ETHERTYPE_IPV6 };
55*4882a593Smuzhiyun static const unsigned char llc_oui_pid_pad[] =
56*4882a593Smuzhiyun { LLC, SNAP_BRIDGED, PID_ETHERNET, PAD_BRIDGED };
57*4882a593Smuzhiyun static const unsigned char pad[] = { PAD_BRIDGED };
58*4882a593Smuzhiyun static const unsigned char llc_oui_ipv4[] = { LLC, SNAP_ROUTED, ETHERTYPE_IPV4 };
59*4882a593Smuzhiyun static const unsigned char llc_oui_ipv6[] = { LLC, SNAP_ROUTED, ETHERTYPE_IPV6 };
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun enum br2684_encaps {
62*4882a593Smuzhiyun e_vc = BR2684_ENCAPS_VC,
63*4882a593Smuzhiyun e_llc = BR2684_ENCAPS_LLC,
64*4882a593Smuzhiyun };
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun struct br2684_vcc {
67*4882a593Smuzhiyun struct atm_vcc *atmvcc;
68*4882a593Smuzhiyun struct net_device *device;
69*4882a593Smuzhiyun /* keep old push, pop functions for chaining */
70*4882a593Smuzhiyun void (*old_push)(struct atm_vcc *vcc, struct sk_buff *skb);
71*4882a593Smuzhiyun void (*old_pop)(struct atm_vcc *vcc, struct sk_buff *skb);
72*4882a593Smuzhiyun void (*old_release_cb)(struct atm_vcc *vcc);
73*4882a593Smuzhiyun struct module *old_owner;
74*4882a593Smuzhiyun enum br2684_encaps encaps;
75*4882a593Smuzhiyun struct list_head brvccs;
76*4882a593Smuzhiyun #ifdef CONFIG_ATM_BR2684_IPFILTER
77*4882a593Smuzhiyun struct br2684_filter filter;
78*4882a593Smuzhiyun #endif /* CONFIG_ATM_BR2684_IPFILTER */
79*4882a593Smuzhiyun unsigned int copies_needed, copies_failed;
80*4882a593Smuzhiyun atomic_t qspace;
81*4882a593Smuzhiyun };
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun struct br2684_dev {
84*4882a593Smuzhiyun struct net_device *net_dev;
85*4882a593Smuzhiyun struct list_head br2684_devs;
86*4882a593Smuzhiyun int number;
87*4882a593Smuzhiyun struct list_head brvccs; /* one device <=> one vcc (before xmas) */
88*4882a593Smuzhiyun int mac_was_set;
89*4882a593Smuzhiyun enum br2684_payload payload;
90*4882a593Smuzhiyun };
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun /*
93*4882a593Smuzhiyun * This lock should be held for writing any time the list of devices or
94*4882a593Smuzhiyun * their attached vcc's could be altered. It should be held for reading
95*4882a593Smuzhiyun * any time these are being queried. Note that we sometimes need to
96*4882a593Smuzhiyun * do read-locking under interrupt context, so write locking must block
97*4882a593Smuzhiyun * the current CPU's interrupts
98*4882a593Smuzhiyun */
99*4882a593Smuzhiyun static DEFINE_RWLOCK(devs_lock);
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun static LIST_HEAD(br2684_devs);
102*4882a593Smuzhiyun
BRPRIV(const struct net_device * net_dev)103*4882a593Smuzhiyun static inline struct br2684_dev *BRPRIV(const struct net_device *net_dev)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun return netdev_priv(net_dev);
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun
list_entry_brdev(const struct list_head * le)108*4882a593Smuzhiyun static inline struct net_device *list_entry_brdev(const struct list_head *le)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun return list_entry(le, struct br2684_dev, br2684_devs)->net_dev;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
BR2684_VCC(const struct atm_vcc * atmvcc)113*4882a593Smuzhiyun static inline struct br2684_vcc *BR2684_VCC(const struct atm_vcc *atmvcc)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun return (struct br2684_vcc *)(atmvcc->user_back);
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
list_entry_brvcc(const struct list_head * le)118*4882a593Smuzhiyun static inline struct br2684_vcc *list_entry_brvcc(const struct list_head *le)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun return list_entry(le, struct br2684_vcc, brvccs);
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun /* Caller should hold read_lock(&devs_lock) */
br2684_find_dev(const struct br2684_if_spec * s)124*4882a593Smuzhiyun static struct net_device *br2684_find_dev(const struct br2684_if_spec *s)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun struct list_head *lh;
127*4882a593Smuzhiyun struct net_device *net_dev;
128*4882a593Smuzhiyun switch (s->method) {
129*4882a593Smuzhiyun case BR2684_FIND_BYNUM:
130*4882a593Smuzhiyun list_for_each(lh, &br2684_devs) {
131*4882a593Smuzhiyun net_dev = list_entry_brdev(lh);
132*4882a593Smuzhiyun if (BRPRIV(net_dev)->number == s->spec.devnum)
133*4882a593Smuzhiyun return net_dev;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun break;
136*4882a593Smuzhiyun case BR2684_FIND_BYIFNAME:
137*4882a593Smuzhiyun list_for_each(lh, &br2684_devs) {
138*4882a593Smuzhiyun net_dev = list_entry_brdev(lh);
139*4882a593Smuzhiyun if (!strncmp(net_dev->name, s->spec.ifname, IFNAMSIZ))
140*4882a593Smuzhiyun return net_dev;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun break;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun return NULL;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
atm_dev_event(struct notifier_block * this,unsigned long event,void * arg)147*4882a593Smuzhiyun static int atm_dev_event(struct notifier_block *this, unsigned long event,
148*4882a593Smuzhiyun void *arg)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun struct atm_dev *atm_dev = arg;
151*4882a593Smuzhiyun struct list_head *lh;
152*4882a593Smuzhiyun struct net_device *net_dev;
153*4882a593Smuzhiyun struct br2684_vcc *brvcc;
154*4882a593Smuzhiyun struct atm_vcc *atm_vcc;
155*4882a593Smuzhiyun unsigned long flags;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun pr_debug("event=%ld dev=%p\n", event, atm_dev);
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun read_lock_irqsave(&devs_lock, flags);
160*4882a593Smuzhiyun list_for_each(lh, &br2684_devs) {
161*4882a593Smuzhiyun net_dev = list_entry_brdev(lh);
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun list_for_each_entry(brvcc, &BRPRIV(net_dev)->brvccs, brvccs) {
164*4882a593Smuzhiyun atm_vcc = brvcc->atmvcc;
165*4882a593Smuzhiyun if (atm_vcc && brvcc->atmvcc->dev == atm_dev) {
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun if (atm_vcc->dev->signal == ATM_PHY_SIG_LOST)
168*4882a593Smuzhiyun netif_carrier_off(net_dev);
169*4882a593Smuzhiyun else
170*4882a593Smuzhiyun netif_carrier_on(net_dev);
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun read_unlock_irqrestore(&devs_lock, flags);
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun return NOTIFY_DONE;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun static struct notifier_block atm_dev_notifier = {
181*4882a593Smuzhiyun .notifier_call = atm_dev_event,
182*4882a593Smuzhiyun };
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun /* chained vcc->pop function. Check if we should wake the netif_queue */
br2684_pop(struct atm_vcc * vcc,struct sk_buff * skb)185*4882a593Smuzhiyun static void br2684_pop(struct atm_vcc *vcc, struct sk_buff *skb)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun struct br2684_vcc *brvcc = BR2684_VCC(vcc);
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun pr_debug("(vcc %p ; net_dev %p )\n", vcc, brvcc->device);
190*4882a593Smuzhiyun brvcc->old_pop(vcc, skb);
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun /* If the queue space just went up from zero, wake */
193*4882a593Smuzhiyun if (atomic_inc_return(&brvcc->qspace) == 1)
194*4882a593Smuzhiyun netif_wake_queue(brvcc->device);
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun /*
198*4882a593Smuzhiyun * Send a packet out a particular vcc. Not to useful right now, but paves
199*4882a593Smuzhiyun * the way for multiple vcc's per itf. Returns true if we can send,
200*4882a593Smuzhiyun * otherwise false
201*4882a593Smuzhiyun */
br2684_xmit_vcc(struct sk_buff * skb,struct net_device * dev,struct br2684_vcc * brvcc)202*4882a593Smuzhiyun static int br2684_xmit_vcc(struct sk_buff *skb, struct net_device *dev,
203*4882a593Smuzhiyun struct br2684_vcc *brvcc)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun struct br2684_dev *brdev = BRPRIV(dev);
206*4882a593Smuzhiyun struct atm_vcc *atmvcc;
207*4882a593Smuzhiyun int minheadroom = (brvcc->encaps == e_llc) ?
208*4882a593Smuzhiyun ((brdev->payload == p_bridged) ?
209*4882a593Smuzhiyun sizeof(llc_oui_pid_pad) : sizeof(llc_oui_ipv4)) :
210*4882a593Smuzhiyun ((brdev->payload == p_bridged) ? BR2684_PAD_LEN : 0);
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun if (skb_headroom(skb) < minheadroom) {
213*4882a593Smuzhiyun struct sk_buff *skb2 = skb_realloc_headroom(skb, minheadroom);
214*4882a593Smuzhiyun brvcc->copies_needed++;
215*4882a593Smuzhiyun dev_kfree_skb(skb);
216*4882a593Smuzhiyun if (skb2 == NULL) {
217*4882a593Smuzhiyun brvcc->copies_failed++;
218*4882a593Smuzhiyun return 0;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun skb = skb2;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun if (brvcc->encaps == e_llc) {
224*4882a593Smuzhiyun if (brdev->payload == p_bridged) {
225*4882a593Smuzhiyun skb_push(skb, sizeof(llc_oui_pid_pad));
226*4882a593Smuzhiyun skb_copy_to_linear_data(skb, llc_oui_pid_pad,
227*4882a593Smuzhiyun sizeof(llc_oui_pid_pad));
228*4882a593Smuzhiyun } else if (brdev->payload == p_routed) {
229*4882a593Smuzhiyun unsigned short prot = ntohs(skb->protocol);
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun skb_push(skb, sizeof(llc_oui_ipv4));
232*4882a593Smuzhiyun switch (prot) {
233*4882a593Smuzhiyun case ETH_P_IP:
234*4882a593Smuzhiyun skb_copy_to_linear_data(skb, llc_oui_ipv4,
235*4882a593Smuzhiyun sizeof(llc_oui_ipv4));
236*4882a593Smuzhiyun break;
237*4882a593Smuzhiyun case ETH_P_IPV6:
238*4882a593Smuzhiyun skb_copy_to_linear_data(skb, llc_oui_ipv6,
239*4882a593Smuzhiyun sizeof(llc_oui_ipv6));
240*4882a593Smuzhiyun break;
241*4882a593Smuzhiyun default:
242*4882a593Smuzhiyun dev_kfree_skb(skb);
243*4882a593Smuzhiyun return 0;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun } else { /* e_vc */
247*4882a593Smuzhiyun if (brdev->payload == p_bridged) {
248*4882a593Smuzhiyun skb_push(skb, 2);
249*4882a593Smuzhiyun memset(skb->data, 0, 2);
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun skb_debug(skb);
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun ATM_SKB(skb)->vcc = atmvcc = brvcc->atmvcc;
255*4882a593Smuzhiyun pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, atmvcc, atmvcc->dev);
256*4882a593Smuzhiyun atm_account_tx(atmvcc, skb);
257*4882a593Smuzhiyun dev->stats.tx_packets++;
258*4882a593Smuzhiyun dev->stats.tx_bytes += skb->len;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun if (atomic_dec_return(&brvcc->qspace) < 1) {
261*4882a593Smuzhiyun /* No more please! */
262*4882a593Smuzhiyun netif_stop_queue(brvcc->device);
263*4882a593Smuzhiyun /* We might have raced with br2684_pop() */
264*4882a593Smuzhiyun if (unlikely(atomic_read(&brvcc->qspace) > 0))
265*4882a593Smuzhiyun netif_wake_queue(brvcc->device);
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun /* If this fails immediately, the skb will be freed and br2684_pop()
269*4882a593Smuzhiyun will wake the queue if appropriate. Just return an error so that
270*4882a593Smuzhiyun the stats are updated correctly */
271*4882a593Smuzhiyun return !atmvcc->send(atmvcc, skb);
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun
br2684_release_cb(struct atm_vcc * atmvcc)274*4882a593Smuzhiyun static void br2684_release_cb(struct atm_vcc *atmvcc)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun struct br2684_vcc *brvcc = BR2684_VCC(atmvcc);
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun if (atomic_read(&brvcc->qspace) > 0)
279*4882a593Smuzhiyun netif_wake_queue(brvcc->device);
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun if (brvcc->old_release_cb)
282*4882a593Smuzhiyun brvcc->old_release_cb(atmvcc);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
pick_outgoing_vcc(const struct sk_buff * skb,const struct br2684_dev * brdev)285*4882a593Smuzhiyun static inline struct br2684_vcc *pick_outgoing_vcc(const struct sk_buff *skb,
286*4882a593Smuzhiyun const struct br2684_dev *brdev)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun return list_empty(&brdev->brvccs) ? NULL : list_entry_brvcc(brdev->brvccs.next); /* 1 vcc/dev right now */
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
br2684_start_xmit(struct sk_buff * skb,struct net_device * dev)291*4882a593Smuzhiyun static netdev_tx_t br2684_start_xmit(struct sk_buff *skb,
292*4882a593Smuzhiyun struct net_device *dev)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun struct br2684_dev *brdev = BRPRIV(dev);
295*4882a593Smuzhiyun struct br2684_vcc *brvcc;
296*4882a593Smuzhiyun struct atm_vcc *atmvcc;
297*4882a593Smuzhiyun netdev_tx_t ret = NETDEV_TX_OK;
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun pr_debug("skb_dst(skb)=%p\n", skb_dst(skb));
300*4882a593Smuzhiyun read_lock(&devs_lock);
301*4882a593Smuzhiyun brvcc = pick_outgoing_vcc(skb, brdev);
302*4882a593Smuzhiyun if (brvcc == NULL) {
303*4882a593Smuzhiyun pr_debug("no vcc attached to dev %s\n", dev->name);
304*4882a593Smuzhiyun dev->stats.tx_errors++;
305*4882a593Smuzhiyun dev->stats.tx_carrier_errors++;
306*4882a593Smuzhiyun /* netif_stop_queue(dev); */
307*4882a593Smuzhiyun dev_kfree_skb(skb);
308*4882a593Smuzhiyun goto out_devs;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun atmvcc = brvcc->atmvcc;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun bh_lock_sock(sk_atm(atmvcc));
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun if (test_bit(ATM_VF_RELEASED, &atmvcc->flags) ||
315*4882a593Smuzhiyun test_bit(ATM_VF_CLOSE, &atmvcc->flags) ||
316*4882a593Smuzhiyun !test_bit(ATM_VF_READY, &atmvcc->flags)) {
317*4882a593Smuzhiyun dev->stats.tx_dropped++;
318*4882a593Smuzhiyun dev_kfree_skb(skb);
319*4882a593Smuzhiyun goto out;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun if (sock_owned_by_user(sk_atm(atmvcc))) {
323*4882a593Smuzhiyun netif_stop_queue(brvcc->device);
324*4882a593Smuzhiyun ret = NETDEV_TX_BUSY;
325*4882a593Smuzhiyun goto out;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun if (!br2684_xmit_vcc(skb, dev, brvcc)) {
329*4882a593Smuzhiyun /*
330*4882a593Smuzhiyun * We should probably use netif_*_queue() here, but that
331*4882a593Smuzhiyun * involves added complication. We need to walk before
332*4882a593Smuzhiyun * we can run.
333*4882a593Smuzhiyun *
334*4882a593Smuzhiyun * Don't free here! this pointer might be no longer valid!
335*4882a593Smuzhiyun */
336*4882a593Smuzhiyun dev->stats.tx_errors++;
337*4882a593Smuzhiyun dev->stats.tx_fifo_errors++;
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun out:
340*4882a593Smuzhiyun bh_unlock_sock(sk_atm(atmvcc));
341*4882a593Smuzhiyun out_devs:
342*4882a593Smuzhiyun read_unlock(&devs_lock);
343*4882a593Smuzhiyun return ret;
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun /*
347*4882a593Smuzhiyun * We remember when the MAC gets set, so we don't override it later with
348*4882a593Smuzhiyun * the ESI of the ATM card of the first VC
349*4882a593Smuzhiyun */
br2684_mac_addr(struct net_device * dev,void * p)350*4882a593Smuzhiyun static int br2684_mac_addr(struct net_device *dev, void *p)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun int err = eth_mac_addr(dev, p);
353*4882a593Smuzhiyun if (!err)
354*4882a593Smuzhiyun BRPRIV(dev)->mac_was_set = 1;
355*4882a593Smuzhiyun return err;
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun #ifdef CONFIG_ATM_BR2684_IPFILTER
359*4882a593Smuzhiyun /* this IOCTL is experimental. */
br2684_setfilt(struct atm_vcc * atmvcc,void __user * arg)360*4882a593Smuzhiyun static int br2684_setfilt(struct atm_vcc *atmvcc, void __user * arg)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun struct br2684_vcc *brvcc;
363*4882a593Smuzhiyun struct br2684_filter_set fs;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun if (copy_from_user(&fs, arg, sizeof fs))
366*4882a593Smuzhiyun return -EFAULT;
367*4882a593Smuzhiyun if (fs.ifspec.method != BR2684_FIND_BYNOTHING) {
368*4882a593Smuzhiyun /*
369*4882a593Smuzhiyun * This is really a per-vcc thing, but we can also search
370*4882a593Smuzhiyun * by device.
371*4882a593Smuzhiyun */
372*4882a593Smuzhiyun struct br2684_dev *brdev;
373*4882a593Smuzhiyun read_lock(&devs_lock);
374*4882a593Smuzhiyun brdev = BRPRIV(br2684_find_dev(&fs.ifspec));
375*4882a593Smuzhiyun if (brdev == NULL || list_empty(&brdev->brvccs) ||
376*4882a593Smuzhiyun brdev->brvccs.next != brdev->brvccs.prev) /* >1 VCC */
377*4882a593Smuzhiyun brvcc = NULL;
378*4882a593Smuzhiyun else
379*4882a593Smuzhiyun brvcc = list_entry_brvcc(brdev->brvccs.next);
380*4882a593Smuzhiyun read_unlock(&devs_lock);
381*4882a593Smuzhiyun if (brvcc == NULL)
382*4882a593Smuzhiyun return -ESRCH;
383*4882a593Smuzhiyun } else
384*4882a593Smuzhiyun brvcc = BR2684_VCC(atmvcc);
385*4882a593Smuzhiyun memcpy(&brvcc->filter, &fs.filter, sizeof(brvcc->filter));
386*4882a593Smuzhiyun return 0;
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun /* Returns 1 if packet should be dropped */
390*4882a593Smuzhiyun static inline int
packet_fails_filter(__be16 type,struct br2684_vcc * brvcc,struct sk_buff * skb)391*4882a593Smuzhiyun packet_fails_filter(__be16 type, struct br2684_vcc *brvcc, struct sk_buff *skb)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun if (brvcc->filter.netmask == 0)
394*4882a593Smuzhiyun return 0; /* no filter in place */
395*4882a593Smuzhiyun if (type == htons(ETH_P_IP) &&
396*4882a593Smuzhiyun (((struct iphdr *)(skb->data))->daddr & brvcc->filter.
397*4882a593Smuzhiyun netmask) == brvcc->filter.prefix)
398*4882a593Smuzhiyun return 0;
399*4882a593Smuzhiyun if (type == htons(ETH_P_ARP))
400*4882a593Smuzhiyun return 0;
401*4882a593Smuzhiyun /*
402*4882a593Smuzhiyun * TODO: we should probably filter ARPs too.. don't want to have
403*4882a593Smuzhiyun * them returning values that don't make sense, or is that ok?
404*4882a593Smuzhiyun */
405*4882a593Smuzhiyun return 1; /* drop */
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun #endif /* CONFIG_ATM_BR2684_IPFILTER */
408*4882a593Smuzhiyun
br2684_close_vcc(struct br2684_vcc * brvcc)409*4882a593Smuzhiyun static void br2684_close_vcc(struct br2684_vcc *brvcc)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun pr_debug("removing VCC %p from dev %p\n", brvcc, brvcc->device);
412*4882a593Smuzhiyun write_lock_irq(&devs_lock);
413*4882a593Smuzhiyun list_del(&brvcc->brvccs);
414*4882a593Smuzhiyun write_unlock_irq(&devs_lock);
415*4882a593Smuzhiyun brvcc->atmvcc->user_back = NULL; /* what about vcc->recvq ??? */
416*4882a593Smuzhiyun brvcc->atmvcc->release_cb = brvcc->old_release_cb;
417*4882a593Smuzhiyun brvcc->old_push(brvcc->atmvcc, NULL); /* pass on the bad news */
418*4882a593Smuzhiyun module_put(brvcc->old_owner);
419*4882a593Smuzhiyun kfree(brvcc);
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun /* when AAL5 PDU comes in: */
br2684_push(struct atm_vcc * atmvcc,struct sk_buff * skb)423*4882a593Smuzhiyun static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
424*4882a593Smuzhiyun {
425*4882a593Smuzhiyun struct br2684_vcc *brvcc = BR2684_VCC(atmvcc);
426*4882a593Smuzhiyun struct net_device *net_dev = brvcc->device;
427*4882a593Smuzhiyun struct br2684_dev *brdev = BRPRIV(net_dev);
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun pr_debug("\n");
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun if (unlikely(skb == NULL)) {
432*4882a593Smuzhiyun /* skb==NULL means VCC is being destroyed */
433*4882a593Smuzhiyun br2684_close_vcc(brvcc);
434*4882a593Smuzhiyun if (list_empty(&brdev->brvccs)) {
435*4882a593Smuzhiyun write_lock_irq(&devs_lock);
436*4882a593Smuzhiyun list_del(&brdev->br2684_devs);
437*4882a593Smuzhiyun write_unlock_irq(&devs_lock);
438*4882a593Smuzhiyun unregister_netdev(net_dev);
439*4882a593Smuzhiyun free_netdev(net_dev);
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun return;
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun skb_debug(skb);
445*4882a593Smuzhiyun atm_return(atmvcc, skb->truesize);
446*4882a593Smuzhiyun pr_debug("skb from brdev %p\n", brdev);
447*4882a593Smuzhiyun if (brvcc->encaps == e_llc) {
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun if (skb->len > 7 && skb->data[7] == 0x01)
450*4882a593Smuzhiyun __skb_trim(skb, skb->len - 4);
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun /* accept packets that have "ipv[46]" in the snap header */
453*4882a593Smuzhiyun if ((skb->len >= (sizeof(llc_oui_ipv4))) &&
454*4882a593Smuzhiyun (memcmp(skb->data, llc_oui_ipv4,
455*4882a593Smuzhiyun sizeof(llc_oui_ipv4) - BR2684_ETHERTYPE_LEN) == 0)) {
456*4882a593Smuzhiyun if (memcmp(skb->data + 6, ethertype_ipv6,
457*4882a593Smuzhiyun sizeof(ethertype_ipv6)) == 0)
458*4882a593Smuzhiyun skb->protocol = htons(ETH_P_IPV6);
459*4882a593Smuzhiyun else if (memcmp(skb->data + 6, ethertype_ipv4,
460*4882a593Smuzhiyun sizeof(ethertype_ipv4)) == 0)
461*4882a593Smuzhiyun skb->protocol = htons(ETH_P_IP);
462*4882a593Smuzhiyun else
463*4882a593Smuzhiyun goto error;
464*4882a593Smuzhiyun skb_pull(skb, sizeof(llc_oui_ipv4));
465*4882a593Smuzhiyun skb_reset_network_header(skb);
466*4882a593Smuzhiyun skb->pkt_type = PACKET_HOST;
467*4882a593Smuzhiyun /*
468*4882a593Smuzhiyun * Let us waste some time for checking the encapsulation.
469*4882a593Smuzhiyun * Note, that only 7 char is checked so frames with a valid FCS
470*4882a593Smuzhiyun * are also accepted (but FCS is not checked of course).
471*4882a593Smuzhiyun */
472*4882a593Smuzhiyun } else if ((skb->len >= sizeof(llc_oui_pid_pad)) &&
473*4882a593Smuzhiyun (memcmp(skb->data, llc_oui_pid_pad, 7) == 0)) {
474*4882a593Smuzhiyun skb_pull(skb, sizeof(llc_oui_pid_pad));
475*4882a593Smuzhiyun skb->protocol = eth_type_trans(skb, net_dev);
476*4882a593Smuzhiyun } else
477*4882a593Smuzhiyun goto error;
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun } else { /* e_vc */
480*4882a593Smuzhiyun if (brdev->payload == p_routed) {
481*4882a593Smuzhiyun struct iphdr *iph;
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun skb_reset_network_header(skb);
484*4882a593Smuzhiyun iph = ip_hdr(skb);
485*4882a593Smuzhiyun if (iph->version == 4)
486*4882a593Smuzhiyun skb->protocol = htons(ETH_P_IP);
487*4882a593Smuzhiyun else if (iph->version == 6)
488*4882a593Smuzhiyun skb->protocol = htons(ETH_P_IPV6);
489*4882a593Smuzhiyun else
490*4882a593Smuzhiyun goto error;
491*4882a593Smuzhiyun skb->pkt_type = PACKET_HOST;
492*4882a593Smuzhiyun } else { /* p_bridged */
493*4882a593Smuzhiyun /* first 2 chars should be 0 */
494*4882a593Smuzhiyun if (memcmp(skb->data, pad, BR2684_PAD_LEN) != 0)
495*4882a593Smuzhiyun goto error;
496*4882a593Smuzhiyun skb_pull(skb, BR2684_PAD_LEN);
497*4882a593Smuzhiyun skb->protocol = eth_type_trans(skb, net_dev);
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun #ifdef CONFIG_ATM_BR2684_IPFILTER
502*4882a593Smuzhiyun if (unlikely(packet_fails_filter(skb->protocol, brvcc, skb)))
503*4882a593Smuzhiyun goto dropped;
504*4882a593Smuzhiyun #endif /* CONFIG_ATM_BR2684_IPFILTER */
505*4882a593Smuzhiyun skb->dev = net_dev;
506*4882a593Smuzhiyun ATM_SKB(skb)->vcc = atmvcc; /* needed ? */
507*4882a593Smuzhiyun pr_debug("received packet's protocol: %x\n", ntohs(skb->protocol));
508*4882a593Smuzhiyun skb_debug(skb);
509*4882a593Smuzhiyun /* sigh, interface is down? */
510*4882a593Smuzhiyun if (unlikely(!(net_dev->flags & IFF_UP)))
511*4882a593Smuzhiyun goto dropped;
512*4882a593Smuzhiyun net_dev->stats.rx_packets++;
513*4882a593Smuzhiyun net_dev->stats.rx_bytes += skb->len;
514*4882a593Smuzhiyun memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data));
515*4882a593Smuzhiyun netif_rx(skb);
516*4882a593Smuzhiyun return;
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun dropped:
519*4882a593Smuzhiyun net_dev->stats.rx_dropped++;
520*4882a593Smuzhiyun goto free_skb;
521*4882a593Smuzhiyun error:
522*4882a593Smuzhiyun net_dev->stats.rx_errors++;
523*4882a593Smuzhiyun free_skb:
524*4882a593Smuzhiyun dev_kfree_skb(skb);
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun /*
528*4882a593Smuzhiyun * Assign a vcc to a dev
529*4882a593Smuzhiyun * Note: we do not have explicit unassign, but look at _push()
530*4882a593Smuzhiyun */
br2684_regvcc(struct atm_vcc * atmvcc,void __user * arg)531*4882a593Smuzhiyun static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
532*4882a593Smuzhiyun {
533*4882a593Smuzhiyun struct br2684_vcc *brvcc;
534*4882a593Smuzhiyun struct br2684_dev *brdev;
535*4882a593Smuzhiyun struct net_device *net_dev;
536*4882a593Smuzhiyun struct atm_backend_br2684 be;
537*4882a593Smuzhiyun int err;
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun if (copy_from_user(&be, arg, sizeof be))
540*4882a593Smuzhiyun return -EFAULT;
541*4882a593Smuzhiyun brvcc = kzalloc(sizeof(struct br2684_vcc), GFP_KERNEL);
542*4882a593Smuzhiyun if (!brvcc)
543*4882a593Smuzhiyun return -ENOMEM;
544*4882a593Smuzhiyun /*
545*4882a593Smuzhiyun * Allow two packets in the ATM queue. One actually being sent, and one
546*4882a593Smuzhiyun * for the ATM 'TX done' handler to send. It shouldn't take long to get
547*4882a593Smuzhiyun * the next one from the netdev queue, when we need it. More than that
548*4882a593Smuzhiyun * would be bufferbloat.
549*4882a593Smuzhiyun */
550*4882a593Smuzhiyun atomic_set(&brvcc->qspace, 2);
551*4882a593Smuzhiyun write_lock_irq(&devs_lock);
552*4882a593Smuzhiyun net_dev = br2684_find_dev(&be.ifspec);
553*4882a593Smuzhiyun if (net_dev == NULL) {
554*4882a593Smuzhiyun pr_err("tried to attach to non-existent device\n");
555*4882a593Smuzhiyun err = -ENXIO;
556*4882a593Smuzhiyun goto error;
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun brdev = BRPRIV(net_dev);
559*4882a593Smuzhiyun if (atmvcc->push == NULL) {
560*4882a593Smuzhiyun err = -EBADFD;
561*4882a593Smuzhiyun goto error;
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun if (!list_empty(&brdev->brvccs)) {
564*4882a593Smuzhiyun /* Only 1 VCC/dev right now */
565*4882a593Smuzhiyun err = -EEXIST;
566*4882a593Smuzhiyun goto error;
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun if (be.fcs_in != BR2684_FCSIN_NO ||
569*4882a593Smuzhiyun be.fcs_out != BR2684_FCSOUT_NO ||
570*4882a593Smuzhiyun be.fcs_auto || be.has_vpiid || be.send_padding ||
571*4882a593Smuzhiyun (be.encaps != BR2684_ENCAPS_VC &&
572*4882a593Smuzhiyun be.encaps != BR2684_ENCAPS_LLC) ||
573*4882a593Smuzhiyun be.min_size != 0) {
574*4882a593Smuzhiyun err = -EINVAL;
575*4882a593Smuzhiyun goto error;
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun pr_debug("vcc=%p, encaps=%d, brvcc=%p\n", atmvcc, be.encaps, brvcc);
578*4882a593Smuzhiyun if (list_empty(&brdev->brvccs) && !brdev->mac_was_set) {
579*4882a593Smuzhiyun unsigned char *esi = atmvcc->dev->esi;
580*4882a593Smuzhiyun if (esi[0] | esi[1] | esi[2] | esi[3] | esi[4] | esi[5])
581*4882a593Smuzhiyun memcpy(net_dev->dev_addr, esi, net_dev->addr_len);
582*4882a593Smuzhiyun else
583*4882a593Smuzhiyun net_dev->dev_addr[2] = 1;
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun list_add(&brvcc->brvccs, &brdev->brvccs);
586*4882a593Smuzhiyun write_unlock_irq(&devs_lock);
587*4882a593Smuzhiyun brvcc->device = net_dev;
588*4882a593Smuzhiyun brvcc->atmvcc = atmvcc;
589*4882a593Smuzhiyun atmvcc->user_back = brvcc;
590*4882a593Smuzhiyun brvcc->encaps = (enum br2684_encaps)be.encaps;
591*4882a593Smuzhiyun brvcc->old_push = atmvcc->push;
592*4882a593Smuzhiyun brvcc->old_pop = atmvcc->pop;
593*4882a593Smuzhiyun brvcc->old_release_cb = atmvcc->release_cb;
594*4882a593Smuzhiyun brvcc->old_owner = atmvcc->owner;
595*4882a593Smuzhiyun barrier();
596*4882a593Smuzhiyun atmvcc->push = br2684_push;
597*4882a593Smuzhiyun atmvcc->pop = br2684_pop;
598*4882a593Smuzhiyun atmvcc->release_cb = br2684_release_cb;
599*4882a593Smuzhiyun atmvcc->owner = THIS_MODULE;
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun /* initialize netdev carrier state */
602*4882a593Smuzhiyun if (atmvcc->dev->signal == ATM_PHY_SIG_LOST)
603*4882a593Smuzhiyun netif_carrier_off(net_dev);
604*4882a593Smuzhiyun else
605*4882a593Smuzhiyun netif_carrier_on(net_dev);
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun __module_get(THIS_MODULE);
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun /* re-process everything received between connection setup and
610*4882a593Smuzhiyun backend setup */
611*4882a593Smuzhiyun vcc_process_recv_queue(atmvcc);
612*4882a593Smuzhiyun return 0;
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun error:
615*4882a593Smuzhiyun write_unlock_irq(&devs_lock);
616*4882a593Smuzhiyun kfree(brvcc);
617*4882a593Smuzhiyun return err;
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun static const struct net_device_ops br2684_netdev_ops = {
621*4882a593Smuzhiyun .ndo_start_xmit = br2684_start_xmit,
622*4882a593Smuzhiyun .ndo_set_mac_address = br2684_mac_addr,
623*4882a593Smuzhiyun .ndo_validate_addr = eth_validate_addr,
624*4882a593Smuzhiyun };
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun static const struct net_device_ops br2684_netdev_ops_routed = {
627*4882a593Smuzhiyun .ndo_start_xmit = br2684_start_xmit,
628*4882a593Smuzhiyun .ndo_set_mac_address = br2684_mac_addr,
629*4882a593Smuzhiyun };
630*4882a593Smuzhiyun
br2684_setup(struct net_device * netdev)631*4882a593Smuzhiyun static void br2684_setup(struct net_device *netdev)
632*4882a593Smuzhiyun {
633*4882a593Smuzhiyun struct br2684_dev *brdev = BRPRIV(netdev);
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun ether_setup(netdev);
636*4882a593Smuzhiyun netdev->hard_header_len += sizeof(llc_oui_pid_pad); /* worst case */
637*4882a593Smuzhiyun brdev->net_dev = netdev;
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun netdev->netdev_ops = &br2684_netdev_ops;
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun INIT_LIST_HEAD(&brdev->brvccs);
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun
br2684_setup_routed(struct net_device * netdev)644*4882a593Smuzhiyun static void br2684_setup_routed(struct net_device *netdev)
645*4882a593Smuzhiyun {
646*4882a593Smuzhiyun struct br2684_dev *brdev = BRPRIV(netdev);
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun brdev->net_dev = netdev;
649*4882a593Smuzhiyun netdev->hard_header_len = sizeof(llc_oui_ipv4); /* worst case */
650*4882a593Smuzhiyun netdev->netdev_ops = &br2684_netdev_ops_routed;
651*4882a593Smuzhiyun netdev->addr_len = 0;
652*4882a593Smuzhiyun netdev->mtu = ETH_DATA_LEN;
653*4882a593Smuzhiyun netdev->min_mtu = 0;
654*4882a593Smuzhiyun netdev->max_mtu = ETH_MAX_MTU;
655*4882a593Smuzhiyun netdev->type = ARPHRD_PPP;
656*4882a593Smuzhiyun netdev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
657*4882a593Smuzhiyun netdev->tx_queue_len = 100;
658*4882a593Smuzhiyun INIT_LIST_HEAD(&brdev->brvccs);
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun
br2684_create(void __user * arg)661*4882a593Smuzhiyun static int br2684_create(void __user *arg)
662*4882a593Smuzhiyun {
663*4882a593Smuzhiyun int err;
664*4882a593Smuzhiyun struct net_device *netdev;
665*4882a593Smuzhiyun struct br2684_dev *brdev;
666*4882a593Smuzhiyun struct atm_newif_br2684 ni;
667*4882a593Smuzhiyun enum br2684_payload payload;
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun pr_debug("\n");
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun if (copy_from_user(&ni, arg, sizeof ni))
672*4882a593Smuzhiyun return -EFAULT;
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun if (ni.media & BR2684_FLAG_ROUTED)
675*4882a593Smuzhiyun payload = p_routed;
676*4882a593Smuzhiyun else
677*4882a593Smuzhiyun payload = p_bridged;
678*4882a593Smuzhiyun ni.media &= 0xffff; /* strip flags */
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun if (ni.media != BR2684_MEDIA_ETHERNET || ni.mtu != 1500)
681*4882a593Smuzhiyun return -EINVAL;
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun netdev = alloc_netdev(sizeof(struct br2684_dev),
684*4882a593Smuzhiyun ni.ifname[0] ? ni.ifname : "nas%d",
685*4882a593Smuzhiyun NET_NAME_UNKNOWN,
686*4882a593Smuzhiyun (payload == p_routed) ? br2684_setup_routed : br2684_setup);
687*4882a593Smuzhiyun if (!netdev)
688*4882a593Smuzhiyun return -ENOMEM;
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun brdev = BRPRIV(netdev);
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun pr_debug("registered netdev %s\n", netdev->name);
693*4882a593Smuzhiyun /* open, stop, do_ioctl ? */
694*4882a593Smuzhiyun err = register_netdev(netdev);
695*4882a593Smuzhiyun if (err < 0) {
696*4882a593Smuzhiyun pr_err("register_netdev failed\n");
697*4882a593Smuzhiyun free_netdev(netdev);
698*4882a593Smuzhiyun return err;
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun write_lock_irq(&devs_lock);
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun brdev->payload = payload;
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun if (list_empty(&br2684_devs)) {
706*4882a593Smuzhiyun /* 1st br2684 device */
707*4882a593Smuzhiyun brdev->number = 1;
708*4882a593Smuzhiyun } else
709*4882a593Smuzhiyun brdev->number = BRPRIV(list_entry_brdev(br2684_devs.prev))->number + 1;
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun list_add_tail(&brdev->br2684_devs, &br2684_devs);
712*4882a593Smuzhiyun write_unlock_irq(&devs_lock);
713*4882a593Smuzhiyun return 0;
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun /*
717*4882a593Smuzhiyun * This handles ioctls actually performed on our vcc - we must return
718*4882a593Smuzhiyun * -ENOIOCTLCMD for any unrecognized ioctl
719*4882a593Smuzhiyun */
br2684_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)720*4882a593Smuzhiyun static int br2684_ioctl(struct socket *sock, unsigned int cmd,
721*4882a593Smuzhiyun unsigned long arg)
722*4882a593Smuzhiyun {
723*4882a593Smuzhiyun struct atm_vcc *atmvcc = ATM_SD(sock);
724*4882a593Smuzhiyun void __user *argp = (void __user *)arg;
725*4882a593Smuzhiyun atm_backend_t b;
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun int err;
728*4882a593Smuzhiyun switch (cmd) {
729*4882a593Smuzhiyun case ATM_SETBACKEND:
730*4882a593Smuzhiyun case ATM_NEWBACKENDIF:
731*4882a593Smuzhiyun err = get_user(b, (atm_backend_t __user *) argp);
732*4882a593Smuzhiyun if (err)
733*4882a593Smuzhiyun return -EFAULT;
734*4882a593Smuzhiyun if (b != ATM_BACKEND_BR2684)
735*4882a593Smuzhiyun return -ENOIOCTLCMD;
736*4882a593Smuzhiyun if (!capable(CAP_NET_ADMIN))
737*4882a593Smuzhiyun return -EPERM;
738*4882a593Smuzhiyun if (cmd == ATM_SETBACKEND) {
739*4882a593Smuzhiyun if (sock->state != SS_CONNECTED)
740*4882a593Smuzhiyun return -EINVAL;
741*4882a593Smuzhiyun return br2684_regvcc(atmvcc, argp);
742*4882a593Smuzhiyun } else {
743*4882a593Smuzhiyun return br2684_create(argp);
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun #ifdef CONFIG_ATM_BR2684_IPFILTER
746*4882a593Smuzhiyun case BR2684_SETFILT:
747*4882a593Smuzhiyun if (atmvcc->push != br2684_push)
748*4882a593Smuzhiyun return -ENOIOCTLCMD;
749*4882a593Smuzhiyun if (!capable(CAP_NET_ADMIN))
750*4882a593Smuzhiyun return -EPERM;
751*4882a593Smuzhiyun err = br2684_setfilt(atmvcc, argp);
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun return err;
754*4882a593Smuzhiyun #endif /* CONFIG_ATM_BR2684_IPFILTER */
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun return -ENOIOCTLCMD;
757*4882a593Smuzhiyun }
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun static struct atm_ioctl br2684_ioctl_ops = {
760*4882a593Smuzhiyun .owner = THIS_MODULE,
761*4882a593Smuzhiyun .ioctl = br2684_ioctl,
762*4882a593Smuzhiyun };
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun #ifdef CONFIG_PROC_FS
br2684_seq_start(struct seq_file * seq,loff_t * pos)765*4882a593Smuzhiyun static void *br2684_seq_start(struct seq_file *seq, loff_t * pos)
766*4882a593Smuzhiyun __acquires(devs_lock)
767*4882a593Smuzhiyun {
768*4882a593Smuzhiyun read_lock(&devs_lock);
769*4882a593Smuzhiyun return seq_list_start(&br2684_devs, *pos);
770*4882a593Smuzhiyun }
771*4882a593Smuzhiyun
br2684_seq_next(struct seq_file * seq,void * v,loff_t * pos)772*4882a593Smuzhiyun static void *br2684_seq_next(struct seq_file *seq, void *v, loff_t * pos)
773*4882a593Smuzhiyun {
774*4882a593Smuzhiyun return seq_list_next(v, &br2684_devs, pos);
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun
br2684_seq_stop(struct seq_file * seq,void * v)777*4882a593Smuzhiyun static void br2684_seq_stop(struct seq_file *seq, void *v)
778*4882a593Smuzhiyun __releases(devs_lock)
779*4882a593Smuzhiyun {
780*4882a593Smuzhiyun read_unlock(&devs_lock);
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun
br2684_seq_show(struct seq_file * seq,void * v)783*4882a593Smuzhiyun static int br2684_seq_show(struct seq_file *seq, void *v)
784*4882a593Smuzhiyun {
785*4882a593Smuzhiyun const struct br2684_dev *brdev = list_entry(v, struct br2684_dev,
786*4882a593Smuzhiyun br2684_devs);
787*4882a593Smuzhiyun const struct net_device *net_dev = brdev->net_dev;
788*4882a593Smuzhiyun const struct br2684_vcc *brvcc;
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun seq_printf(seq, "dev %.16s: num=%d, mac=%pM (%s)\n",
791*4882a593Smuzhiyun net_dev->name,
792*4882a593Smuzhiyun brdev->number,
793*4882a593Smuzhiyun net_dev->dev_addr,
794*4882a593Smuzhiyun brdev->mac_was_set ? "set" : "auto");
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun list_for_each_entry(brvcc, &brdev->brvccs, brvccs) {
797*4882a593Smuzhiyun seq_printf(seq, " vcc %d.%d.%d: encaps=%s payload=%s"
798*4882a593Smuzhiyun ", failed copies %u/%u"
799*4882a593Smuzhiyun "\n", brvcc->atmvcc->dev->number,
800*4882a593Smuzhiyun brvcc->atmvcc->vpi, brvcc->atmvcc->vci,
801*4882a593Smuzhiyun (brvcc->encaps == e_llc) ? "LLC" : "VC",
802*4882a593Smuzhiyun (brdev->payload == p_bridged) ? "bridged" : "routed",
803*4882a593Smuzhiyun brvcc->copies_failed, brvcc->copies_needed);
804*4882a593Smuzhiyun #ifdef CONFIG_ATM_BR2684_IPFILTER
805*4882a593Smuzhiyun if (brvcc->filter.netmask != 0)
806*4882a593Smuzhiyun seq_printf(seq, " filter=%pI4/%pI4\n",
807*4882a593Smuzhiyun &brvcc->filter.prefix,
808*4882a593Smuzhiyun &brvcc->filter.netmask);
809*4882a593Smuzhiyun #endif /* CONFIG_ATM_BR2684_IPFILTER */
810*4882a593Smuzhiyun }
811*4882a593Smuzhiyun return 0;
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun static const struct seq_operations br2684_seq_ops = {
815*4882a593Smuzhiyun .start = br2684_seq_start,
816*4882a593Smuzhiyun .next = br2684_seq_next,
817*4882a593Smuzhiyun .stop = br2684_seq_stop,
818*4882a593Smuzhiyun .show = br2684_seq_show,
819*4882a593Smuzhiyun };
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun extern struct proc_dir_entry *atm_proc_root; /* from proc.c */
822*4882a593Smuzhiyun #endif /* CONFIG_PROC_FS */
823*4882a593Smuzhiyun
br2684_init(void)824*4882a593Smuzhiyun static int __init br2684_init(void)
825*4882a593Smuzhiyun {
826*4882a593Smuzhiyun #ifdef CONFIG_PROC_FS
827*4882a593Smuzhiyun struct proc_dir_entry *p;
828*4882a593Smuzhiyun p = proc_create_seq("br2684", 0, atm_proc_root, &br2684_seq_ops);
829*4882a593Smuzhiyun if (p == NULL)
830*4882a593Smuzhiyun return -ENOMEM;
831*4882a593Smuzhiyun #endif
832*4882a593Smuzhiyun register_atm_ioctl(&br2684_ioctl_ops);
833*4882a593Smuzhiyun register_atmdevice_notifier(&atm_dev_notifier);
834*4882a593Smuzhiyun return 0;
835*4882a593Smuzhiyun }
836*4882a593Smuzhiyun
br2684_exit(void)837*4882a593Smuzhiyun static void __exit br2684_exit(void)
838*4882a593Smuzhiyun {
839*4882a593Smuzhiyun struct net_device *net_dev;
840*4882a593Smuzhiyun struct br2684_dev *brdev;
841*4882a593Smuzhiyun struct br2684_vcc *brvcc;
842*4882a593Smuzhiyun deregister_atm_ioctl(&br2684_ioctl_ops);
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun #ifdef CONFIG_PROC_FS
845*4882a593Smuzhiyun remove_proc_entry("br2684", atm_proc_root);
846*4882a593Smuzhiyun #endif
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun unregister_atmdevice_notifier(&atm_dev_notifier);
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun while (!list_empty(&br2684_devs)) {
852*4882a593Smuzhiyun net_dev = list_entry_brdev(br2684_devs.next);
853*4882a593Smuzhiyun brdev = BRPRIV(net_dev);
854*4882a593Smuzhiyun while (!list_empty(&brdev->brvccs)) {
855*4882a593Smuzhiyun brvcc = list_entry_brvcc(brdev->brvccs.next);
856*4882a593Smuzhiyun br2684_close_vcc(brvcc);
857*4882a593Smuzhiyun }
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun list_del(&brdev->br2684_devs);
860*4882a593Smuzhiyun unregister_netdev(net_dev);
861*4882a593Smuzhiyun free_netdev(net_dev);
862*4882a593Smuzhiyun }
863*4882a593Smuzhiyun }
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun module_init(br2684_init);
866*4882a593Smuzhiyun module_exit(br2684_exit);
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun MODULE_AUTHOR("Marcell GAL");
869*4882a593Smuzhiyun MODULE_DESCRIPTION("RFC2684 bridged protocols over ATM/AAL5");
870*4882a593Smuzhiyun MODULE_LICENSE("GPL");
871