1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Generic HDLC support routines for Linux
4*4882a593Smuzhiyun * Frame Relay support
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl>
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun Theory of PVC state
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun DCE mode:
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun (exist,new) -> 0,0 when "PVC create" or if "link unreliable"
14*4882a593Smuzhiyun 0,x -> 1,1 if "link reliable" when sending FULL STATUS
15*4882a593Smuzhiyun 1,1 -> 1,0 if received FULL STATUS ACK
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun (active) -> 0 when "ifconfig PVC down" or "link unreliable" or "PVC create"
18*4882a593Smuzhiyun -> 1 when "PVC up" and (exist,new) = 1,0
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun DTE mode:
21*4882a593Smuzhiyun (exist,new,active) = FULL STATUS if "link reliable"
22*4882a593Smuzhiyun = 0, 0, 0 if "link unreliable"
23*4882a593Smuzhiyun No LMI:
24*4882a593Smuzhiyun active = open and "link reliable"
25*4882a593Smuzhiyun exist = new = not used
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun CCITT LMI: ITU-T Q.933 Annex A
28*4882a593Smuzhiyun ANSI LMI: ANSI T1.617 Annex D
29*4882a593Smuzhiyun CISCO LMI: the original, aka "Gang of Four" LMI
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun */
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #include <linux/errno.h>
34*4882a593Smuzhiyun #include <linux/etherdevice.h>
35*4882a593Smuzhiyun #include <linux/hdlc.h>
36*4882a593Smuzhiyun #include <linux/if_arp.h>
37*4882a593Smuzhiyun #include <linux/inetdevice.h>
38*4882a593Smuzhiyun #include <linux/init.h>
39*4882a593Smuzhiyun #include <linux/kernel.h>
40*4882a593Smuzhiyun #include <linux/module.h>
41*4882a593Smuzhiyun #include <linux/pkt_sched.h>
42*4882a593Smuzhiyun #include <linux/poll.h>
43*4882a593Smuzhiyun #include <linux/rtnetlink.h>
44*4882a593Smuzhiyun #include <linux/skbuff.h>
45*4882a593Smuzhiyun #include <linux/slab.h>
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun #undef DEBUG_PKT
48*4882a593Smuzhiyun #undef DEBUG_ECN
49*4882a593Smuzhiyun #undef DEBUG_LINK
50*4882a593Smuzhiyun #undef DEBUG_PROTO
51*4882a593Smuzhiyun #undef DEBUG_PVC
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun #define FR_UI 0x03
54*4882a593Smuzhiyun #define FR_PAD 0x00
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun #define NLPID_IP 0xCC
57*4882a593Smuzhiyun #define NLPID_IPV6 0x8E
58*4882a593Smuzhiyun #define NLPID_SNAP 0x80
59*4882a593Smuzhiyun #define NLPID_PAD 0x00
60*4882a593Smuzhiyun #define NLPID_CCITT_ANSI_LMI 0x08
61*4882a593Smuzhiyun #define NLPID_CISCO_LMI 0x09
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun #define LMI_CCITT_ANSI_DLCI 0 /* LMI DLCI */
65*4882a593Smuzhiyun #define LMI_CISCO_DLCI 1023
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun #define LMI_CALLREF 0x00 /* Call Reference */
68*4882a593Smuzhiyun #define LMI_ANSI_LOCKSHIFT 0x95 /* ANSI locking shift */
69*4882a593Smuzhiyun #define LMI_ANSI_CISCO_REPTYPE 0x01 /* report type */
70*4882a593Smuzhiyun #define LMI_CCITT_REPTYPE 0x51
71*4882a593Smuzhiyun #define LMI_ANSI_CISCO_ALIVE 0x03 /* keep alive */
72*4882a593Smuzhiyun #define LMI_CCITT_ALIVE 0x53
73*4882a593Smuzhiyun #define LMI_ANSI_CISCO_PVCSTAT 0x07 /* PVC status */
74*4882a593Smuzhiyun #define LMI_CCITT_PVCSTAT 0x57
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun #define LMI_FULLREP 0x00 /* full report */
77*4882a593Smuzhiyun #define LMI_INTEGRITY 0x01 /* link integrity report */
78*4882a593Smuzhiyun #define LMI_SINGLE 0x02 /* single PVC report */
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun #define LMI_STATUS_ENQUIRY 0x75
81*4882a593Smuzhiyun #define LMI_STATUS 0x7D /* reply */
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun #define LMI_REPT_LEN 1 /* report type element length */
84*4882a593Smuzhiyun #define LMI_INTEG_LEN 2 /* link integrity element length */
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun #define LMI_CCITT_CISCO_LENGTH 13 /* LMI frame lengths */
87*4882a593Smuzhiyun #define LMI_ANSI_LENGTH 14
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun struct fr_hdr {
91*4882a593Smuzhiyun #if defined(__LITTLE_ENDIAN_BITFIELD)
92*4882a593Smuzhiyun unsigned ea1: 1;
93*4882a593Smuzhiyun unsigned cr: 1;
94*4882a593Smuzhiyun unsigned dlcih: 6;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun unsigned ea2: 1;
97*4882a593Smuzhiyun unsigned de: 1;
98*4882a593Smuzhiyun unsigned becn: 1;
99*4882a593Smuzhiyun unsigned fecn: 1;
100*4882a593Smuzhiyun unsigned dlcil: 4;
101*4882a593Smuzhiyun #else
102*4882a593Smuzhiyun unsigned dlcih: 6;
103*4882a593Smuzhiyun unsigned cr: 1;
104*4882a593Smuzhiyun unsigned ea1: 1;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun unsigned dlcil: 4;
107*4882a593Smuzhiyun unsigned fecn: 1;
108*4882a593Smuzhiyun unsigned becn: 1;
109*4882a593Smuzhiyun unsigned de: 1;
110*4882a593Smuzhiyun unsigned ea2: 1;
111*4882a593Smuzhiyun #endif
112*4882a593Smuzhiyun } __packed;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun struct pvc_device {
116*4882a593Smuzhiyun struct net_device *frad;
117*4882a593Smuzhiyun struct net_device *main;
118*4882a593Smuzhiyun struct net_device *ether; /* bridged Ethernet interface */
119*4882a593Smuzhiyun struct pvc_device *next; /* Sorted in ascending DLCI order */
120*4882a593Smuzhiyun int dlci;
121*4882a593Smuzhiyun int open_count;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun struct {
124*4882a593Smuzhiyun unsigned int new: 1;
125*4882a593Smuzhiyun unsigned int active: 1;
126*4882a593Smuzhiyun unsigned int exist: 1;
127*4882a593Smuzhiyun unsigned int deleted: 1;
128*4882a593Smuzhiyun unsigned int fecn: 1;
129*4882a593Smuzhiyun unsigned int becn: 1;
130*4882a593Smuzhiyun unsigned int bandwidth; /* Cisco LMI reporting only */
131*4882a593Smuzhiyun }state;
132*4882a593Smuzhiyun };
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun struct frad_state {
135*4882a593Smuzhiyun fr_proto settings;
136*4882a593Smuzhiyun struct pvc_device *first_pvc;
137*4882a593Smuzhiyun int dce_pvc_count;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun struct timer_list timer;
140*4882a593Smuzhiyun struct net_device *dev;
141*4882a593Smuzhiyun unsigned long last_poll;
142*4882a593Smuzhiyun int reliable;
143*4882a593Smuzhiyun int dce_changed;
144*4882a593Smuzhiyun int request;
145*4882a593Smuzhiyun int fullrep_sent;
146*4882a593Smuzhiyun u32 last_errors; /* last errors bit list */
147*4882a593Smuzhiyun u8 n391cnt;
148*4882a593Smuzhiyun u8 txseq; /* TX sequence number */
149*4882a593Smuzhiyun u8 rxseq; /* RX sequence number */
150*4882a593Smuzhiyun };
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun static int fr_ioctl(struct net_device *dev, struct ifreq *ifr);
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun
q922_to_dlci(u8 * hdr)156*4882a593Smuzhiyun static inline u16 q922_to_dlci(u8 *hdr)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun return ((hdr[0] & 0xFC) << 2) | ((hdr[1] & 0xF0) >> 4);
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun
dlci_to_q922(u8 * hdr,u16 dlci)162*4882a593Smuzhiyun static inline void dlci_to_q922(u8 *hdr, u16 dlci)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun hdr[0] = (dlci >> 2) & 0xFC;
165*4882a593Smuzhiyun hdr[1] = ((dlci << 4) & 0xF0) | 0x01;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun
state(hdlc_device * hdlc)169*4882a593Smuzhiyun static inline struct frad_state* state(hdlc_device *hdlc)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun return(struct frad_state *)(hdlc->state);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun
find_pvc(hdlc_device * hdlc,u16 dlci)175*4882a593Smuzhiyun static inline struct pvc_device *find_pvc(hdlc_device *hdlc, u16 dlci)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun struct pvc_device *pvc = state(hdlc)->first_pvc;
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun while (pvc) {
180*4882a593Smuzhiyun if (pvc->dlci == dlci)
181*4882a593Smuzhiyun return pvc;
182*4882a593Smuzhiyun if (pvc->dlci > dlci)
183*4882a593Smuzhiyun return NULL; /* the list is sorted */
184*4882a593Smuzhiyun pvc = pvc->next;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun return NULL;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun
add_pvc(struct net_device * dev,u16 dlci)191*4882a593Smuzhiyun static struct pvc_device *add_pvc(struct net_device *dev, u16 dlci)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun hdlc_device *hdlc = dev_to_hdlc(dev);
194*4882a593Smuzhiyun struct pvc_device *pvc, **pvc_p = &state(hdlc)->first_pvc;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun while (*pvc_p) {
197*4882a593Smuzhiyun if ((*pvc_p)->dlci == dlci)
198*4882a593Smuzhiyun return *pvc_p;
199*4882a593Smuzhiyun if ((*pvc_p)->dlci > dlci)
200*4882a593Smuzhiyun break; /* the list is sorted */
201*4882a593Smuzhiyun pvc_p = &(*pvc_p)->next;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun pvc = kzalloc(sizeof(*pvc), GFP_ATOMIC);
205*4882a593Smuzhiyun #ifdef DEBUG_PVC
206*4882a593Smuzhiyun printk(KERN_DEBUG "add_pvc: allocated pvc %p, frad %p\n", pvc, dev);
207*4882a593Smuzhiyun #endif
208*4882a593Smuzhiyun if (!pvc)
209*4882a593Smuzhiyun return NULL;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun pvc->dlci = dlci;
212*4882a593Smuzhiyun pvc->frad = dev;
213*4882a593Smuzhiyun pvc->next = *pvc_p; /* Put it in the chain */
214*4882a593Smuzhiyun *pvc_p = pvc;
215*4882a593Smuzhiyun return pvc;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun
pvc_is_used(struct pvc_device * pvc)219*4882a593Smuzhiyun static inline int pvc_is_used(struct pvc_device *pvc)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun return pvc->main || pvc->ether;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun
pvc_carrier(int on,struct pvc_device * pvc)225*4882a593Smuzhiyun static inline void pvc_carrier(int on, struct pvc_device *pvc)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun if (on) {
228*4882a593Smuzhiyun if (pvc->main)
229*4882a593Smuzhiyun if (!netif_carrier_ok(pvc->main))
230*4882a593Smuzhiyun netif_carrier_on(pvc->main);
231*4882a593Smuzhiyun if (pvc->ether)
232*4882a593Smuzhiyun if (!netif_carrier_ok(pvc->ether))
233*4882a593Smuzhiyun netif_carrier_on(pvc->ether);
234*4882a593Smuzhiyun } else {
235*4882a593Smuzhiyun if (pvc->main)
236*4882a593Smuzhiyun if (netif_carrier_ok(pvc->main))
237*4882a593Smuzhiyun netif_carrier_off(pvc->main);
238*4882a593Smuzhiyun if (pvc->ether)
239*4882a593Smuzhiyun if (netif_carrier_ok(pvc->ether))
240*4882a593Smuzhiyun netif_carrier_off(pvc->ether);
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun
delete_unused_pvcs(hdlc_device * hdlc)245*4882a593Smuzhiyun static inline void delete_unused_pvcs(hdlc_device *hdlc)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun struct pvc_device **pvc_p = &state(hdlc)->first_pvc;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun while (*pvc_p) {
250*4882a593Smuzhiyun if (!pvc_is_used(*pvc_p)) {
251*4882a593Smuzhiyun struct pvc_device *pvc = *pvc_p;
252*4882a593Smuzhiyun #ifdef DEBUG_PVC
253*4882a593Smuzhiyun printk(KERN_DEBUG "freeing unused pvc: %p\n", pvc);
254*4882a593Smuzhiyun #endif
255*4882a593Smuzhiyun *pvc_p = pvc->next;
256*4882a593Smuzhiyun kfree(pvc);
257*4882a593Smuzhiyun continue;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun pvc_p = &(*pvc_p)->next;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun
get_dev_p(struct pvc_device * pvc,int type)264*4882a593Smuzhiyun static inline struct net_device **get_dev_p(struct pvc_device *pvc,
265*4882a593Smuzhiyun int type)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun if (type == ARPHRD_ETHER)
268*4882a593Smuzhiyun return &pvc->ether;
269*4882a593Smuzhiyun else
270*4882a593Smuzhiyun return &pvc->main;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun
fr_hard_header(struct sk_buff * skb,u16 dlci)274*4882a593Smuzhiyun static int fr_hard_header(struct sk_buff *skb, u16 dlci)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun if (!skb->dev) { /* Control packets */
277*4882a593Smuzhiyun switch (dlci) {
278*4882a593Smuzhiyun case LMI_CCITT_ANSI_DLCI:
279*4882a593Smuzhiyun skb_push(skb, 4);
280*4882a593Smuzhiyun skb->data[3] = NLPID_CCITT_ANSI_LMI;
281*4882a593Smuzhiyun break;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun case LMI_CISCO_DLCI:
284*4882a593Smuzhiyun skb_push(skb, 4);
285*4882a593Smuzhiyun skb->data[3] = NLPID_CISCO_LMI;
286*4882a593Smuzhiyun break;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun default:
289*4882a593Smuzhiyun return -EINVAL;
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun } else if (skb->dev->type == ARPHRD_DLCI) {
293*4882a593Smuzhiyun switch (skb->protocol) {
294*4882a593Smuzhiyun case htons(ETH_P_IP):
295*4882a593Smuzhiyun skb_push(skb, 4);
296*4882a593Smuzhiyun skb->data[3] = NLPID_IP;
297*4882a593Smuzhiyun break;
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun case htons(ETH_P_IPV6):
300*4882a593Smuzhiyun skb_push(skb, 4);
301*4882a593Smuzhiyun skb->data[3] = NLPID_IPV6;
302*4882a593Smuzhiyun break;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun default:
305*4882a593Smuzhiyun skb_push(skb, 10);
306*4882a593Smuzhiyun skb->data[3] = FR_PAD;
307*4882a593Smuzhiyun skb->data[4] = NLPID_SNAP;
308*4882a593Smuzhiyun /* OUI 00-00-00 indicates an Ethertype follows */
309*4882a593Smuzhiyun skb->data[5] = 0x00;
310*4882a593Smuzhiyun skb->data[6] = 0x00;
311*4882a593Smuzhiyun skb->data[7] = 0x00;
312*4882a593Smuzhiyun /* This should be an Ethertype: */
313*4882a593Smuzhiyun *(__be16 *)(skb->data + 8) = skb->protocol;
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun } else if (skb->dev->type == ARPHRD_ETHER) {
317*4882a593Smuzhiyun skb_push(skb, 10);
318*4882a593Smuzhiyun skb->data[3] = FR_PAD;
319*4882a593Smuzhiyun skb->data[4] = NLPID_SNAP;
320*4882a593Smuzhiyun /* OUI 00-80-C2 stands for the 802.1 organization */
321*4882a593Smuzhiyun skb->data[5] = 0x00;
322*4882a593Smuzhiyun skb->data[6] = 0x80;
323*4882a593Smuzhiyun skb->data[7] = 0xC2;
324*4882a593Smuzhiyun /* PID 00-07 stands for Ethernet frames without FCS */
325*4882a593Smuzhiyun skb->data[8] = 0x00;
326*4882a593Smuzhiyun skb->data[9] = 0x07;
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun } else {
329*4882a593Smuzhiyun return -EINVAL;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun dlci_to_q922(skb->data, dlci);
333*4882a593Smuzhiyun skb->data[2] = FR_UI;
334*4882a593Smuzhiyun return 0;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun
pvc_open(struct net_device * dev)339*4882a593Smuzhiyun static int pvc_open(struct net_device *dev)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun struct pvc_device *pvc = dev->ml_priv;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun if ((pvc->frad->flags & IFF_UP) == 0)
344*4882a593Smuzhiyun return -EIO; /* Frad must be UP in order to activate PVC */
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun if (pvc->open_count++ == 0) {
347*4882a593Smuzhiyun hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
348*4882a593Smuzhiyun if (state(hdlc)->settings.lmi == LMI_NONE)
349*4882a593Smuzhiyun pvc->state.active = netif_carrier_ok(pvc->frad);
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun pvc_carrier(pvc->state.active, pvc);
352*4882a593Smuzhiyun state(hdlc)->dce_changed = 1;
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun return 0;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun
pvc_close(struct net_device * dev)359*4882a593Smuzhiyun static int pvc_close(struct net_device *dev)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun struct pvc_device *pvc = dev->ml_priv;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun if (--pvc->open_count == 0) {
364*4882a593Smuzhiyun hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
365*4882a593Smuzhiyun if (state(hdlc)->settings.lmi == LMI_NONE)
366*4882a593Smuzhiyun pvc->state.active = 0;
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun if (state(hdlc)->settings.dce) {
369*4882a593Smuzhiyun state(hdlc)->dce_changed = 1;
370*4882a593Smuzhiyun pvc->state.active = 0;
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun return 0;
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun
pvc_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)378*4882a593Smuzhiyun static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun struct pvc_device *pvc = dev->ml_priv;
381*4882a593Smuzhiyun fr_proto_pvc_info info;
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun if (ifr->ifr_settings.type == IF_GET_PROTO) {
384*4882a593Smuzhiyun if (dev->type == ARPHRD_ETHER)
385*4882a593Smuzhiyun ifr->ifr_settings.type = IF_PROTO_FR_ETH_PVC;
386*4882a593Smuzhiyun else
387*4882a593Smuzhiyun ifr->ifr_settings.type = IF_PROTO_FR_PVC;
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun if (ifr->ifr_settings.size < sizeof(info)) {
390*4882a593Smuzhiyun /* data size wanted */
391*4882a593Smuzhiyun ifr->ifr_settings.size = sizeof(info);
392*4882a593Smuzhiyun return -ENOBUFS;
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun info.dlci = pvc->dlci;
396*4882a593Smuzhiyun memcpy(info.master, pvc->frad->name, IFNAMSIZ);
397*4882a593Smuzhiyun if (copy_to_user(ifr->ifr_settings.ifs_ifsu.fr_pvc_info,
398*4882a593Smuzhiyun &info, sizeof(info)))
399*4882a593Smuzhiyun return -EFAULT;
400*4882a593Smuzhiyun return 0;
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun return -EINVAL;
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
pvc_xmit(struct sk_buff * skb,struct net_device * dev)406*4882a593Smuzhiyun static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun struct pvc_device *pvc = dev->ml_priv;
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun if (!pvc->state.active)
411*4882a593Smuzhiyun goto drop;
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun if (dev->type == ARPHRD_ETHER) {
414*4882a593Smuzhiyun int pad = ETH_ZLEN - skb->len;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun if (pad > 0) { /* Pad the frame with zeros */
417*4882a593Smuzhiyun if (__skb_pad(skb, pad, false))
418*4882a593Smuzhiyun goto drop;
419*4882a593Smuzhiyun skb_put(skb, pad);
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun /* We already requested the header space with dev->needed_headroom.
424*4882a593Smuzhiyun * So this is just a protection in case the upper layer didn't take
425*4882a593Smuzhiyun * dev->needed_headroom into consideration.
426*4882a593Smuzhiyun */
427*4882a593Smuzhiyun if (skb_headroom(skb) < 10) {
428*4882a593Smuzhiyun struct sk_buff *skb2 = skb_realloc_headroom(skb, 10);
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun if (!skb2)
431*4882a593Smuzhiyun goto drop;
432*4882a593Smuzhiyun dev_kfree_skb(skb);
433*4882a593Smuzhiyun skb = skb2;
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun skb->dev = dev;
437*4882a593Smuzhiyun if (fr_hard_header(skb, pvc->dlci))
438*4882a593Smuzhiyun goto drop;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun dev->stats.tx_bytes += skb->len;
441*4882a593Smuzhiyun dev->stats.tx_packets++;
442*4882a593Smuzhiyun if (pvc->state.fecn) /* TX Congestion counter */
443*4882a593Smuzhiyun dev->stats.tx_compressed++;
444*4882a593Smuzhiyun skb->dev = pvc->frad;
445*4882a593Smuzhiyun skb->protocol = htons(ETH_P_HDLC);
446*4882a593Smuzhiyun skb_reset_network_header(skb);
447*4882a593Smuzhiyun dev_queue_xmit(skb);
448*4882a593Smuzhiyun return NETDEV_TX_OK;
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun drop:
451*4882a593Smuzhiyun dev->stats.tx_dropped++;
452*4882a593Smuzhiyun kfree_skb(skb);
453*4882a593Smuzhiyun return NETDEV_TX_OK;
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun
fr_log_dlci_active(struct pvc_device * pvc)456*4882a593Smuzhiyun static inline void fr_log_dlci_active(struct pvc_device *pvc)
457*4882a593Smuzhiyun {
458*4882a593Smuzhiyun netdev_info(pvc->frad, "DLCI %d [%s%s%s]%s %s\n",
459*4882a593Smuzhiyun pvc->dlci,
460*4882a593Smuzhiyun pvc->main ? pvc->main->name : "",
461*4882a593Smuzhiyun pvc->main && pvc->ether ? " " : "",
462*4882a593Smuzhiyun pvc->ether ? pvc->ether->name : "",
463*4882a593Smuzhiyun pvc->state.new ? " new" : "",
464*4882a593Smuzhiyun !pvc->state.exist ? "deleted" :
465*4882a593Smuzhiyun pvc->state.active ? "active" : "inactive");
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun
fr_lmi_nextseq(u8 x)470*4882a593Smuzhiyun static inline u8 fr_lmi_nextseq(u8 x)
471*4882a593Smuzhiyun {
472*4882a593Smuzhiyun x++;
473*4882a593Smuzhiyun return x ? x : 1;
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun
fr_lmi_send(struct net_device * dev,int fullrep)477*4882a593Smuzhiyun static void fr_lmi_send(struct net_device *dev, int fullrep)
478*4882a593Smuzhiyun {
479*4882a593Smuzhiyun hdlc_device *hdlc = dev_to_hdlc(dev);
480*4882a593Smuzhiyun struct sk_buff *skb;
481*4882a593Smuzhiyun struct pvc_device *pvc = state(hdlc)->first_pvc;
482*4882a593Smuzhiyun int lmi = state(hdlc)->settings.lmi;
483*4882a593Smuzhiyun int dce = state(hdlc)->settings.dce;
484*4882a593Smuzhiyun int len = lmi == LMI_ANSI ? LMI_ANSI_LENGTH : LMI_CCITT_CISCO_LENGTH;
485*4882a593Smuzhiyun int stat_len = (lmi == LMI_CISCO) ? 6 : 3;
486*4882a593Smuzhiyun u8 *data;
487*4882a593Smuzhiyun int i = 0;
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun if (dce && fullrep) {
490*4882a593Smuzhiyun len += state(hdlc)->dce_pvc_count * (2 + stat_len);
491*4882a593Smuzhiyun if (len > HDLC_MAX_MRU) {
492*4882a593Smuzhiyun netdev_warn(dev, "Too many PVCs while sending LMI full report\n");
493*4882a593Smuzhiyun return;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun skb = dev_alloc_skb(len);
498*4882a593Smuzhiyun if (!skb) {
499*4882a593Smuzhiyun netdev_warn(dev, "Memory squeeze on fr_lmi_send()\n");
500*4882a593Smuzhiyun return;
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun memset(skb->data, 0, len);
503*4882a593Smuzhiyun skb_reserve(skb, 4);
504*4882a593Smuzhiyun if (lmi == LMI_CISCO) {
505*4882a593Smuzhiyun fr_hard_header(skb, LMI_CISCO_DLCI);
506*4882a593Smuzhiyun } else {
507*4882a593Smuzhiyun fr_hard_header(skb, LMI_CCITT_ANSI_DLCI);
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun data = skb_tail_pointer(skb);
510*4882a593Smuzhiyun data[i++] = LMI_CALLREF;
511*4882a593Smuzhiyun data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY;
512*4882a593Smuzhiyun if (lmi == LMI_ANSI)
513*4882a593Smuzhiyun data[i++] = LMI_ANSI_LOCKSHIFT;
514*4882a593Smuzhiyun data[i++] = lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
515*4882a593Smuzhiyun LMI_ANSI_CISCO_REPTYPE;
516*4882a593Smuzhiyun data[i++] = LMI_REPT_LEN;
517*4882a593Smuzhiyun data[i++] = fullrep ? LMI_FULLREP : LMI_INTEGRITY;
518*4882a593Smuzhiyun data[i++] = lmi == LMI_CCITT ? LMI_CCITT_ALIVE : LMI_ANSI_CISCO_ALIVE;
519*4882a593Smuzhiyun data[i++] = LMI_INTEG_LEN;
520*4882a593Smuzhiyun data[i++] = state(hdlc)->txseq =
521*4882a593Smuzhiyun fr_lmi_nextseq(state(hdlc)->txseq);
522*4882a593Smuzhiyun data[i++] = state(hdlc)->rxseq;
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun if (dce && fullrep) {
525*4882a593Smuzhiyun while (pvc) {
526*4882a593Smuzhiyun data[i++] = lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
527*4882a593Smuzhiyun LMI_ANSI_CISCO_PVCSTAT;
528*4882a593Smuzhiyun data[i++] = stat_len;
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun /* LMI start/restart */
531*4882a593Smuzhiyun if (state(hdlc)->reliable && !pvc->state.exist) {
532*4882a593Smuzhiyun pvc->state.exist = pvc->state.new = 1;
533*4882a593Smuzhiyun fr_log_dlci_active(pvc);
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun /* ifconfig PVC up */
537*4882a593Smuzhiyun if (pvc->open_count && !pvc->state.active &&
538*4882a593Smuzhiyun pvc->state.exist && !pvc->state.new) {
539*4882a593Smuzhiyun pvc_carrier(1, pvc);
540*4882a593Smuzhiyun pvc->state.active = 1;
541*4882a593Smuzhiyun fr_log_dlci_active(pvc);
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun if (lmi == LMI_CISCO) {
545*4882a593Smuzhiyun data[i] = pvc->dlci >> 8;
546*4882a593Smuzhiyun data[i + 1] = pvc->dlci & 0xFF;
547*4882a593Smuzhiyun } else {
548*4882a593Smuzhiyun data[i] = (pvc->dlci >> 4) & 0x3F;
549*4882a593Smuzhiyun data[i + 1] = ((pvc->dlci << 3) & 0x78) | 0x80;
550*4882a593Smuzhiyun data[i + 2] = 0x80;
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun if (pvc->state.new)
554*4882a593Smuzhiyun data[i + 2] |= 0x08;
555*4882a593Smuzhiyun else if (pvc->state.active)
556*4882a593Smuzhiyun data[i + 2] |= 0x02;
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun i += stat_len;
559*4882a593Smuzhiyun pvc = pvc->next;
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun skb_put(skb, i);
564*4882a593Smuzhiyun skb->priority = TC_PRIO_CONTROL;
565*4882a593Smuzhiyun skb->dev = dev;
566*4882a593Smuzhiyun skb->protocol = htons(ETH_P_HDLC);
567*4882a593Smuzhiyun skb_reset_network_header(skb);
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun dev_queue_xmit(skb);
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun
fr_set_link_state(int reliable,struct net_device * dev)574*4882a593Smuzhiyun static void fr_set_link_state(int reliable, struct net_device *dev)
575*4882a593Smuzhiyun {
576*4882a593Smuzhiyun hdlc_device *hdlc = dev_to_hdlc(dev);
577*4882a593Smuzhiyun struct pvc_device *pvc = state(hdlc)->first_pvc;
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun state(hdlc)->reliable = reliable;
580*4882a593Smuzhiyun if (reliable) {
581*4882a593Smuzhiyun netif_dormant_off(dev);
582*4882a593Smuzhiyun state(hdlc)->n391cnt = 0; /* Request full status */
583*4882a593Smuzhiyun state(hdlc)->dce_changed = 1;
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun if (state(hdlc)->settings.lmi == LMI_NONE) {
586*4882a593Smuzhiyun while (pvc) { /* Activate all PVCs */
587*4882a593Smuzhiyun pvc_carrier(1, pvc);
588*4882a593Smuzhiyun pvc->state.exist = pvc->state.active = 1;
589*4882a593Smuzhiyun pvc->state.new = 0;
590*4882a593Smuzhiyun pvc = pvc->next;
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun } else {
594*4882a593Smuzhiyun netif_dormant_on(dev);
595*4882a593Smuzhiyun while (pvc) { /* Deactivate all PVCs */
596*4882a593Smuzhiyun pvc_carrier(0, pvc);
597*4882a593Smuzhiyun pvc->state.exist = pvc->state.active = 0;
598*4882a593Smuzhiyun pvc->state.new = 0;
599*4882a593Smuzhiyun if (!state(hdlc)->settings.dce)
600*4882a593Smuzhiyun pvc->state.bandwidth = 0;
601*4882a593Smuzhiyun pvc = pvc->next;
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun
fr_timer(struct timer_list * t)607*4882a593Smuzhiyun static void fr_timer(struct timer_list *t)
608*4882a593Smuzhiyun {
609*4882a593Smuzhiyun struct frad_state *st = from_timer(st, t, timer);
610*4882a593Smuzhiyun struct net_device *dev = st->dev;
611*4882a593Smuzhiyun hdlc_device *hdlc = dev_to_hdlc(dev);
612*4882a593Smuzhiyun int i, cnt = 0, reliable;
613*4882a593Smuzhiyun u32 list;
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun if (state(hdlc)->settings.dce) {
616*4882a593Smuzhiyun reliable = state(hdlc)->request &&
617*4882a593Smuzhiyun time_before(jiffies, state(hdlc)->last_poll +
618*4882a593Smuzhiyun state(hdlc)->settings.t392 * HZ);
619*4882a593Smuzhiyun state(hdlc)->request = 0;
620*4882a593Smuzhiyun } else {
621*4882a593Smuzhiyun state(hdlc)->last_errors <<= 1; /* Shift the list */
622*4882a593Smuzhiyun if (state(hdlc)->request) {
623*4882a593Smuzhiyun if (state(hdlc)->reliable)
624*4882a593Smuzhiyun netdev_info(dev, "No LMI status reply received\n");
625*4882a593Smuzhiyun state(hdlc)->last_errors |= 1;
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun list = state(hdlc)->last_errors;
629*4882a593Smuzhiyun for (i = 0; i < state(hdlc)->settings.n393; i++, list >>= 1)
630*4882a593Smuzhiyun cnt += (list & 1); /* errors count */
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun reliable = (cnt < state(hdlc)->settings.n392);
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun if (state(hdlc)->reliable != reliable) {
636*4882a593Smuzhiyun netdev_info(dev, "Link %sreliable\n", reliable ? "" : "un");
637*4882a593Smuzhiyun fr_set_link_state(reliable, dev);
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun if (state(hdlc)->settings.dce)
641*4882a593Smuzhiyun state(hdlc)->timer.expires = jiffies +
642*4882a593Smuzhiyun state(hdlc)->settings.t392 * HZ;
643*4882a593Smuzhiyun else {
644*4882a593Smuzhiyun if (state(hdlc)->n391cnt)
645*4882a593Smuzhiyun state(hdlc)->n391cnt--;
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun fr_lmi_send(dev, state(hdlc)->n391cnt == 0);
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun state(hdlc)->last_poll = jiffies;
650*4882a593Smuzhiyun state(hdlc)->request = 1;
651*4882a593Smuzhiyun state(hdlc)->timer.expires = jiffies +
652*4882a593Smuzhiyun state(hdlc)->settings.t391 * HZ;
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun add_timer(&state(hdlc)->timer);
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun
fr_lmi_recv(struct net_device * dev,struct sk_buff * skb)659*4882a593Smuzhiyun static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
660*4882a593Smuzhiyun {
661*4882a593Smuzhiyun hdlc_device *hdlc = dev_to_hdlc(dev);
662*4882a593Smuzhiyun struct pvc_device *pvc;
663*4882a593Smuzhiyun u8 rxseq, txseq;
664*4882a593Smuzhiyun int lmi = state(hdlc)->settings.lmi;
665*4882a593Smuzhiyun int dce = state(hdlc)->settings.dce;
666*4882a593Smuzhiyun int stat_len = (lmi == LMI_CISCO) ? 6 : 3, reptype, error, no_ram, i;
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun if (skb->len < (lmi == LMI_ANSI ? LMI_ANSI_LENGTH :
669*4882a593Smuzhiyun LMI_CCITT_CISCO_LENGTH)) {
670*4882a593Smuzhiyun netdev_info(dev, "Short LMI frame\n");
671*4882a593Smuzhiyun return 1;
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun if (skb->data[3] != (lmi == LMI_CISCO ? NLPID_CISCO_LMI :
675*4882a593Smuzhiyun NLPID_CCITT_ANSI_LMI)) {
676*4882a593Smuzhiyun netdev_info(dev, "Received non-LMI frame with LMI DLCI\n");
677*4882a593Smuzhiyun return 1;
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun if (skb->data[4] != LMI_CALLREF) {
681*4882a593Smuzhiyun netdev_info(dev, "Invalid LMI Call reference (0x%02X)\n",
682*4882a593Smuzhiyun skb->data[4]);
683*4882a593Smuzhiyun return 1;
684*4882a593Smuzhiyun }
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun if (skb->data[5] != (dce ? LMI_STATUS_ENQUIRY : LMI_STATUS)) {
687*4882a593Smuzhiyun netdev_info(dev, "Invalid LMI Message type (0x%02X)\n",
688*4882a593Smuzhiyun skb->data[5]);
689*4882a593Smuzhiyun return 1;
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun if (lmi == LMI_ANSI) {
693*4882a593Smuzhiyun if (skb->data[6] != LMI_ANSI_LOCKSHIFT) {
694*4882a593Smuzhiyun netdev_info(dev, "Not ANSI locking shift in LMI message (0x%02X)\n",
695*4882a593Smuzhiyun skb->data[6]);
696*4882a593Smuzhiyun return 1;
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun i = 7;
699*4882a593Smuzhiyun } else
700*4882a593Smuzhiyun i = 6;
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
703*4882a593Smuzhiyun LMI_ANSI_CISCO_REPTYPE)) {
704*4882a593Smuzhiyun netdev_info(dev, "Not an LMI Report type IE (0x%02X)\n",
705*4882a593Smuzhiyun skb->data[i]);
706*4882a593Smuzhiyun return 1;
707*4882a593Smuzhiyun }
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun if (skb->data[++i] != LMI_REPT_LEN) {
710*4882a593Smuzhiyun netdev_info(dev, "Invalid LMI Report type IE length (%u)\n",
711*4882a593Smuzhiyun skb->data[i]);
712*4882a593Smuzhiyun return 1;
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun reptype = skb->data[++i];
716*4882a593Smuzhiyun if (reptype != LMI_INTEGRITY && reptype != LMI_FULLREP) {
717*4882a593Smuzhiyun netdev_info(dev, "Unsupported LMI Report type (0x%02X)\n",
718*4882a593Smuzhiyun reptype);
719*4882a593Smuzhiyun return 1;
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun if (skb->data[++i] != (lmi == LMI_CCITT ? LMI_CCITT_ALIVE :
723*4882a593Smuzhiyun LMI_ANSI_CISCO_ALIVE)) {
724*4882a593Smuzhiyun netdev_info(dev, "Not an LMI Link integrity verification IE (0x%02X)\n",
725*4882a593Smuzhiyun skb->data[i]);
726*4882a593Smuzhiyun return 1;
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun if (skb->data[++i] != LMI_INTEG_LEN) {
730*4882a593Smuzhiyun netdev_info(dev, "Invalid LMI Link integrity verification IE length (%u)\n",
731*4882a593Smuzhiyun skb->data[i]);
732*4882a593Smuzhiyun return 1;
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun i++;
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun state(hdlc)->rxseq = skb->data[i++]; /* TX sequence from peer */
737*4882a593Smuzhiyun rxseq = skb->data[i++]; /* Should confirm our sequence */
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun txseq = state(hdlc)->txseq;
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun if (dce)
742*4882a593Smuzhiyun state(hdlc)->last_poll = jiffies;
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun error = 0;
745*4882a593Smuzhiyun if (!state(hdlc)->reliable)
746*4882a593Smuzhiyun error = 1;
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun if (rxseq == 0 || rxseq != txseq) { /* Ask for full report next time */
749*4882a593Smuzhiyun state(hdlc)->n391cnt = 0;
750*4882a593Smuzhiyun error = 1;
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun if (dce) {
754*4882a593Smuzhiyun if (state(hdlc)->fullrep_sent && !error) {
755*4882a593Smuzhiyun /* Stop sending full report - the last one has been confirmed by DTE */
756*4882a593Smuzhiyun state(hdlc)->fullrep_sent = 0;
757*4882a593Smuzhiyun pvc = state(hdlc)->first_pvc;
758*4882a593Smuzhiyun while (pvc) {
759*4882a593Smuzhiyun if (pvc->state.new) {
760*4882a593Smuzhiyun pvc->state.new = 0;
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun /* Tell DTE that new PVC is now active */
763*4882a593Smuzhiyun state(hdlc)->dce_changed = 1;
764*4882a593Smuzhiyun }
765*4882a593Smuzhiyun pvc = pvc->next;
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun }
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun if (state(hdlc)->dce_changed) {
770*4882a593Smuzhiyun reptype = LMI_FULLREP;
771*4882a593Smuzhiyun state(hdlc)->fullrep_sent = 1;
772*4882a593Smuzhiyun state(hdlc)->dce_changed = 0;
773*4882a593Smuzhiyun }
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun state(hdlc)->request = 1; /* got request */
776*4882a593Smuzhiyun fr_lmi_send(dev, reptype == LMI_FULLREP ? 1 : 0);
777*4882a593Smuzhiyun return 0;
778*4882a593Smuzhiyun }
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun /* DTE */
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun state(hdlc)->request = 0; /* got response, no request pending */
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun if (error)
785*4882a593Smuzhiyun return 0;
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun if (reptype != LMI_FULLREP)
788*4882a593Smuzhiyun return 0;
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun pvc = state(hdlc)->first_pvc;
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun while (pvc) {
793*4882a593Smuzhiyun pvc->state.deleted = 1;
794*4882a593Smuzhiyun pvc = pvc->next;
795*4882a593Smuzhiyun }
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun no_ram = 0;
798*4882a593Smuzhiyun while (skb->len >= i + 2 + stat_len) {
799*4882a593Smuzhiyun u16 dlci;
800*4882a593Smuzhiyun u32 bw;
801*4882a593Smuzhiyun unsigned int active, new;
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
804*4882a593Smuzhiyun LMI_ANSI_CISCO_PVCSTAT)) {
805*4882a593Smuzhiyun netdev_info(dev, "Not an LMI PVC status IE (0x%02X)\n",
806*4882a593Smuzhiyun skb->data[i]);
807*4882a593Smuzhiyun return 1;
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun if (skb->data[++i] != stat_len) {
811*4882a593Smuzhiyun netdev_info(dev, "Invalid LMI PVC status IE length (%u)\n",
812*4882a593Smuzhiyun skb->data[i]);
813*4882a593Smuzhiyun return 1;
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun i++;
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun new = !! (skb->data[i + 2] & 0x08);
818*4882a593Smuzhiyun active = !! (skb->data[i + 2] & 0x02);
819*4882a593Smuzhiyun if (lmi == LMI_CISCO) {
820*4882a593Smuzhiyun dlci = (skb->data[i] << 8) | skb->data[i + 1];
821*4882a593Smuzhiyun bw = (skb->data[i + 3] << 16) |
822*4882a593Smuzhiyun (skb->data[i + 4] << 8) |
823*4882a593Smuzhiyun (skb->data[i + 5]);
824*4882a593Smuzhiyun } else {
825*4882a593Smuzhiyun dlci = ((skb->data[i] & 0x3F) << 4) |
826*4882a593Smuzhiyun ((skb->data[i + 1] & 0x78) >> 3);
827*4882a593Smuzhiyun bw = 0;
828*4882a593Smuzhiyun }
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun pvc = add_pvc(dev, dlci);
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun if (!pvc && !no_ram) {
833*4882a593Smuzhiyun netdev_warn(dev, "Memory squeeze on fr_lmi_recv()\n");
834*4882a593Smuzhiyun no_ram = 1;
835*4882a593Smuzhiyun }
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun if (pvc) {
838*4882a593Smuzhiyun pvc->state.exist = 1;
839*4882a593Smuzhiyun pvc->state.deleted = 0;
840*4882a593Smuzhiyun if (active != pvc->state.active ||
841*4882a593Smuzhiyun new != pvc->state.new ||
842*4882a593Smuzhiyun bw != pvc->state.bandwidth ||
843*4882a593Smuzhiyun !pvc->state.exist) {
844*4882a593Smuzhiyun pvc->state.new = new;
845*4882a593Smuzhiyun pvc->state.active = active;
846*4882a593Smuzhiyun pvc->state.bandwidth = bw;
847*4882a593Smuzhiyun pvc_carrier(active, pvc);
848*4882a593Smuzhiyun fr_log_dlci_active(pvc);
849*4882a593Smuzhiyun }
850*4882a593Smuzhiyun }
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun i += stat_len;
853*4882a593Smuzhiyun }
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun pvc = state(hdlc)->first_pvc;
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun while (pvc) {
858*4882a593Smuzhiyun if (pvc->state.deleted && pvc->state.exist) {
859*4882a593Smuzhiyun pvc_carrier(0, pvc);
860*4882a593Smuzhiyun pvc->state.active = pvc->state.new = 0;
861*4882a593Smuzhiyun pvc->state.exist = 0;
862*4882a593Smuzhiyun pvc->state.bandwidth = 0;
863*4882a593Smuzhiyun fr_log_dlci_active(pvc);
864*4882a593Smuzhiyun }
865*4882a593Smuzhiyun pvc = pvc->next;
866*4882a593Smuzhiyun }
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun /* Next full report after N391 polls */
869*4882a593Smuzhiyun state(hdlc)->n391cnt = state(hdlc)->settings.n391;
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun return 0;
872*4882a593Smuzhiyun }
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun
fr_rx(struct sk_buff * skb)875*4882a593Smuzhiyun static int fr_rx(struct sk_buff *skb)
876*4882a593Smuzhiyun {
877*4882a593Smuzhiyun struct net_device *frad = skb->dev;
878*4882a593Smuzhiyun hdlc_device *hdlc = dev_to_hdlc(frad);
879*4882a593Smuzhiyun struct fr_hdr *fh = (struct fr_hdr *)skb->data;
880*4882a593Smuzhiyun u8 *data = skb->data;
881*4882a593Smuzhiyun u16 dlci;
882*4882a593Smuzhiyun struct pvc_device *pvc;
883*4882a593Smuzhiyun struct net_device *dev = NULL;
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun if (skb->len <= 4 || fh->ea1 || data[2] != FR_UI)
886*4882a593Smuzhiyun goto rx_error;
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun dlci = q922_to_dlci(skb->data);
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun if ((dlci == LMI_CCITT_ANSI_DLCI &&
891*4882a593Smuzhiyun (state(hdlc)->settings.lmi == LMI_ANSI ||
892*4882a593Smuzhiyun state(hdlc)->settings.lmi == LMI_CCITT)) ||
893*4882a593Smuzhiyun (dlci == LMI_CISCO_DLCI &&
894*4882a593Smuzhiyun state(hdlc)->settings.lmi == LMI_CISCO)) {
895*4882a593Smuzhiyun if (fr_lmi_recv(frad, skb))
896*4882a593Smuzhiyun goto rx_error;
897*4882a593Smuzhiyun dev_kfree_skb_any(skb);
898*4882a593Smuzhiyun return NET_RX_SUCCESS;
899*4882a593Smuzhiyun }
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun pvc = find_pvc(hdlc, dlci);
902*4882a593Smuzhiyun if (!pvc) {
903*4882a593Smuzhiyun #ifdef DEBUG_PKT
904*4882a593Smuzhiyun netdev_info(frad, "No PVC for received frame's DLCI %d\n",
905*4882a593Smuzhiyun dlci);
906*4882a593Smuzhiyun #endif
907*4882a593Smuzhiyun dev_kfree_skb_any(skb);
908*4882a593Smuzhiyun return NET_RX_DROP;
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun if (pvc->state.fecn != fh->fecn) {
912*4882a593Smuzhiyun #ifdef DEBUG_ECN
913*4882a593Smuzhiyun printk(KERN_DEBUG "%s: DLCI %d FECN O%s\n", frad->name,
914*4882a593Smuzhiyun dlci, fh->fecn ? "N" : "FF");
915*4882a593Smuzhiyun #endif
916*4882a593Smuzhiyun pvc->state.fecn ^= 1;
917*4882a593Smuzhiyun }
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun if (pvc->state.becn != fh->becn) {
920*4882a593Smuzhiyun #ifdef DEBUG_ECN
921*4882a593Smuzhiyun printk(KERN_DEBUG "%s: DLCI %d BECN O%s\n", frad->name,
922*4882a593Smuzhiyun dlci, fh->becn ? "N" : "FF");
923*4882a593Smuzhiyun #endif
924*4882a593Smuzhiyun pvc->state.becn ^= 1;
925*4882a593Smuzhiyun }
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
929*4882a593Smuzhiyun frad->stats.rx_dropped++;
930*4882a593Smuzhiyun return NET_RX_DROP;
931*4882a593Smuzhiyun }
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun if (data[3] == NLPID_IP) {
934*4882a593Smuzhiyun skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
935*4882a593Smuzhiyun dev = pvc->main;
936*4882a593Smuzhiyun skb->protocol = htons(ETH_P_IP);
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun } else if (data[3] == NLPID_IPV6) {
939*4882a593Smuzhiyun skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
940*4882a593Smuzhiyun dev = pvc->main;
941*4882a593Smuzhiyun skb->protocol = htons(ETH_P_IPV6);
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun } else if (skb->len > 10 && data[3] == FR_PAD &&
944*4882a593Smuzhiyun data[4] == NLPID_SNAP && data[5] == FR_PAD) {
945*4882a593Smuzhiyun u16 oui = ntohs(*(__be16*)(data + 6));
946*4882a593Smuzhiyun u16 pid = ntohs(*(__be16*)(data + 8));
947*4882a593Smuzhiyun skb_pull(skb, 10);
948*4882a593Smuzhiyun
949*4882a593Smuzhiyun switch ((((u32)oui) << 16) | pid) {
950*4882a593Smuzhiyun case ETH_P_ARP: /* routed frame with SNAP */
951*4882a593Smuzhiyun case ETH_P_IPX:
952*4882a593Smuzhiyun case ETH_P_IP: /* a long variant */
953*4882a593Smuzhiyun case ETH_P_IPV6:
954*4882a593Smuzhiyun dev = pvc->main;
955*4882a593Smuzhiyun skb->protocol = htons(pid);
956*4882a593Smuzhiyun break;
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun case 0x80C20007: /* bridged Ethernet frame */
959*4882a593Smuzhiyun if ((dev = pvc->ether) != NULL)
960*4882a593Smuzhiyun skb->protocol = eth_type_trans(skb, dev);
961*4882a593Smuzhiyun break;
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun default:
964*4882a593Smuzhiyun netdev_info(frad, "Unsupported protocol, OUI=%x PID=%x\n",
965*4882a593Smuzhiyun oui, pid);
966*4882a593Smuzhiyun dev_kfree_skb_any(skb);
967*4882a593Smuzhiyun return NET_RX_DROP;
968*4882a593Smuzhiyun }
969*4882a593Smuzhiyun } else {
970*4882a593Smuzhiyun netdev_info(frad, "Unsupported protocol, NLPID=%x length=%i\n",
971*4882a593Smuzhiyun data[3], skb->len);
972*4882a593Smuzhiyun dev_kfree_skb_any(skb);
973*4882a593Smuzhiyun return NET_RX_DROP;
974*4882a593Smuzhiyun }
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun if (dev) {
977*4882a593Smuzhiyun dev->stats.rx_packets++; /* PVC traffic */
978*4882a593Smuzhiyun dev->stats.rx_bytes += skb->len;
979*4882a593Smuzhiyun if (pvc->state.becn)
980*4882a593Smuzhiyun dev->stats.rx_compressed++;
981*4882a593Smuzhiyun skb->dev = dev;
982*4882a593Smuzhiyun netif_rx(skb);
983*4882a593Smuzhiyun return NET_RX_SUCCESS;
984*4882a593Smuzhiyun } else {
985*4882a593Smuzhiyun dev_kfree_skb_any(skb);
986*4882a593Smuzhiyun return NET_RX_DROP;
987*4882a593Smuzhiyun }
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun rx_error:
990*4882a593Smuzhiyun frad->stats.rx_errors++; /* Mark error */
991*4882a593Smuzhiyun dev_kfree_skb_any(skb);
992*4882a593Smuzhiyun return NET_RX_DROP;
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun
fr_start(struct net_device * dev)997*4882a593Smuzhiyun static void fr_start(struct net_device *dev)
998*4882a593Smuzhiyun {
999*4882a593Smuzhiyun hdlc_device *hdlc = dev_to_hdlc(dev);
1000*4882a593Smuzhiyun #ifdef DEBUG_LINK
1001*4882a593Smuzhiyun printk(KERN_DEBUG "fr_start\n");
1002*4882a593Smuzhiyun #endif
1003*4882a593Smuzhiyun if (state(hdlc)->settings.lmi != LMI_NONE) {
1004*4882a593Smuzhiyun state(hdlc)->reliable = 0;
1005*4882a593Smuzhiyun state(hdlc)->dce_changed = 1;
1006*4882a593Smuzhiyun state(hdlc)->request = 0;
1007*4882a593Smuzhiyun state(hdlc)->fullrep_sent = 0;
1008*4882a593Smuzhiyun state(hdlc)->last_errors = 0xFFFFFFFF;
1009*4882a593Smuzhiyun state(hdlc)->n391cnt = 0;
1010*4882a593Smuzhiyun state(hdlc)->txseq = state(hdlc)->rxseq = 0;
1011*4882a593Smuzhiyun
1012*4882a593Smuzhiyun state(hdlc)->dev = dev;
1013*4882a593Smuzhiyun timer_setup(&state(hdlc)->timer, fr_timer, 0);
1014*4882a593Smuzhiyun /* First poll after 1 s */
1015*4882a593Smuzhiyun state(hdlc)->timer.expires = jiffies + HZ;
1016*4882a593Smuzhiyun add_timer(&state(hdlc)->timer);
1017*4882a593Smuzhiyun } else
1018*4882a593Smuzhiyun fr_set_link_state(1, dev);
1019*4882a593Smuzhiyun }
1020*4882a593Smuzhiyun
1021*4882a593Smuzhiyun
fr_stop(struct net_device * dev)1022*4882a593Smuzhiyun static void fr_stop(struct net_device *dev)
1023*4882a593Smuzhiyun {
1024*4882a593Smuzhiyun hdlc_device *hdlc = dev_to_hdlc(dev);
1025*4882a593Smuzhiyun #ifdef DEBUG_LINK
1026*4882a593Smuzhiyun printk(KERN_DEBUG "fr_stop\n");
1027*4882a593Smuzhiyun #endif
1028*4882a593Smuzhiyun if (state(hdlc)->settings.lmi != LMI_NONE)
1029*4882a593Smuzhiyun del_timer_sync(&state(hdlc)->timer);
1030*4882a593Smuzhiyun fr_set_link_state(0, dev);
1031*4882a593Smuzhiyun }
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun
fr_close(struct net_device * dev)1034*4882a593Smuzhiyun static void fr_close(struct net_device *dev)
1035*4882a593Smuzhiyun {
1036*4882a593Smuzhiyun hdlc_device *hdlc = dev_to_hdlc(dev);
1037*4882a593Smuzhiyun struct pvc_device *pvc = state(hdlc)->first_pvc;
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun while (pvc) { /* Shutdown all PVCs for this FRAD */
1040*4882a593Smuzhiyun if (pvc->main)
1041*4882a593Smuzhiyun dev_close(pvc->main);
1042*4882a593Smuzhiyun if (pvc->ether)
1043*4882a593Smuzhiyun dev_close(pvc->ether);
1044*4882a593Smuzhiyun pvc = pvc->next;
1045*4882a593Smuzhiyun }
1046*4882a593Smuzhiyun }
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun
pvc_setup(struct net_device * dev)1049*4882a593Smuzhiyun static void pvc_setup(struct net_device *dev)
1050*4882a593Smuzhiyun {
1051*4882a593Smuzhiyun dev->type = ARPHRD_DLCI;
1052*4882a593Smuzhiyun dev->flags = IFF_POINTOPOINT;
1053*4882a593Smuzhiyun dev->hard_header_len = 0;
1054*4882a593Smuzhiyun dev->addr_len = 2;
1055*4882a593Smuzhiyun netif_keep_dst(dev);
1056*4882a593Smuzhiyun }
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun static const struct net_device_ops pvc_ops = {
1059*4882a593Smuzhiyun .ndo_open = pvc_open,
1060*4882a593Smuzhiyun .ndo_stop = pvc_close,
1061*4882a593Smuzhiyun .ndo_start_xmit = pvc_xmit,
1062*4882a593Smuzhiyun .ndo_do_ioctl = pvc_ioctl,
1063*4882a593Smuzhiyun };
1064*4882a593Smuzhiyun
fr_add_pvc(struct net_device * frad,unsigned int dlci,int type)1065*4882a593Smuzhiyun static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
1066*4882a593Smuzhiyun {
1067*4882a593Smuzhiyun hdlc_device *hdlc = dev_to_hdlc(frad);
1068*4882a593Smuzhiyun struct pvc_device *pvc;
1069*4882a593Smuzhiyun struct net_device *dev;
1070*4882a593Smuzhiyun int used;
1071*4882a593Smuzhiyun
1072*4882a593Smuzhiyun if ((pvc = add_pvc(frad, dlci)) == NULL) {
1073*4882a593Smuzhiyun netdev_warn(frad, "Memory squeeze on fr_add_pvc()\n");
1074*4882a593Smuzhiyun return -ENOBUFS;
1075*4882a593Smuzhiyun }
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun if (*get_dev_p(pvc, type))
1078*4882a593Smuzhiyun return -EEXIST;
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun used = pvc_is_used(pvc);
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun if (type == ARPHRD_ETHER)
1083*4882a593Smuzhiyun dev = alloc_netdev(0, "pvceth%d", NET_NAME_UNKNOWN,
1084*4882a593Smuzhiyun ether_setup);
1085*4882a593Smuzhiyun else
1086*4882a593Smuzhiyun dev = alloc_netdev(0, "pvc%d", NET_NAME_UNKNOWN, pvc_setup);
1087*4882a593Smuzhiyun
1088*4882a593Smuzhiyun if (!dev) {
1089*4882a593Smuzhiyun netdev_warn(frad, "Memory squeeze on fr_pvc()\n");
1090*4882a593Smuzhiyun delete_unused_pvcs(hdlc);
1091*4882a593Smuzhiyun return -ENOBUFS;
1092*4882a593Smuzhiyun }
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun if (type == ARPHRD_ETHER) {
1095*4882a593Smuzhiyun dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1096*4882a593Smuzhiyun eth_hw_addr_random(dev);
1097*4882a593Smuzhiyun } else {
1098*4882a593Smuzhiyun *(__be16*)dev->dev_addr = htons(dlci);
1099*4882a593Smuzhiyun dlci_to_q922(dev->broadcast, dlci);
1100*4882a593Smuzhiyun }
1101*4882a593Smuzhiyun dev->netdev_ops = &pvc_ops;
1102*4882a593Smuzhiyun dev->mtu = HDLC_MAX_MTU;
1103*4882a593Smuzhiyun dev->min_mtu = 68;
1104*4882a593Smuzhiyun dev->max_mtu = HDLC_MAX_MTU;
1105*4882a593Smuzhiyun dev->needed_headroom = 10;
1106*4882a593Smuzhiyun dev->priv_flags |= IFF_NO_QUEUE;
1107*4882a593Smuzhiyun dev->ml_priv = pvc;
1108*4882a593Smuzhiyun
1109*4882a593Smuzhiyun if (register_netdevice(dev) != 0) {
1110*4882a593Smuzhiyun free_netdev(dev);
1111*4882a593Smuzhiyun delete_unused_pvcs(hdlc);
1112*4882a593Smuzhiyun return -EIO;
1113*4882a593Smuzhiyun }
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun dev->needs_free_netdev = true;
1116*4882a593Smuzhiyun *get_dev_p(pvc, type) = dev;
1117*4882a593Smuzhiyun if (!used) {
1118*4882a593Smuzhiyun state(hdlc)->dce_changed = 1;
1119*4882a593Smuzhiyun state(hdlc)->dce_pvc_count++;
1120*4882a593Smuzhiyun }
1121*4882a593Smuzhiyun return 0;
1122*4882a593Smuzhiyun }
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun
fr_del_pvc(hdlc_device * hdlc,unsigned int dlci,int type)1126*4882a593Smuzhiyun static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type)
1127*4882a593Smuzhiyun {
1128*4882a593Smuzhiyun struct pvc_device *pvc;
1129*4882a593Smuzhiyun struct net_device *dev;
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun if ((pvc = find_pvc(hdlc, dlci)) == NULL)
1132*4882a593Smuzhiyun return -ENOENT;
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun if ((dev = *get_dev_p(pvc, type)) == NULL)
1135*4882a593Smuzhiyun return -ENOENT;
1136*4882a593Smuzhiyun
1137*4882a593Smuzhiyun if (dev->flags & IFF_UP)
1138*4882a593Smuzhiyun return -EBUSY; /* PVC in use */
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun unregister_netdevice(dev); /* the destructor will free_netdev(dev) */
1141*4882a593Smuzhiyun *get_dev_p(pvc, type) = NULL;
1142*4882a593Smuzhiyun
1143*4882a593Smuzhiyun if (!pvc_is_used(pvc)) {
1144*4882a593Smuzhiyun state(hdlc)->dce_pvc_count--;
1145*4882a593Smuzhiyun state(hdlc)->dce_changed = 1;
1146*4882a593Smuzhiyun }
1147*4882a593Smuzhiyun delete_unused_pvcs(hdlc);
1148*4882a593Smuzhiyun return 0;
1149*4882a593Smuzhiyun }
1150*4882a593Smuzhiyun
1151*4882a593Smuzhiyun
1152*4882a593Smuzhiyun
fr_destroy(struct net_device * frad)1153*4882a593Smuzhiyun static void fr_destroy(struct net_device *frad)
1154*4882a593Smuzhiyun {
1155*4882a593Smuzhiyun hdlc_device *hdlc = dev_to_hdlc(frad);
1156*4882a593Smuzhiyun struct pvc_device *pvc = state(hdlc)->first_pvc;
1157*4882a593Smuzhiyun state(hdlc)->first_pvc = NULL; /* All PVCs destroyed */
1158*4882a593Smuzhiyun state(hdlc)->dce_pvc_count = 0;
1159*4882a593Smuzhiyun state(hdlc)->dce_changed = 1;
1160*4882a593Smuzhiyun
1161*4882a593Smuzhiyun while (pvc) {
1162*4882a593Smuzhiyun struct pvc_device *next = pvc->next;
1163*4882a593Smuzhiyun /* destructors will free_netdev() main and ether */
1164*4882a593Smuzhiyun if (pvc->main)
1165*4882a593Smuzhiyun unregister_netdevice(pvc->main);
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun if (pvc->ether)
1168*4882a593Smuzhiyun unregister_netdevice(pvc->ether);
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun kfree(pvc);
1171*4882a593Smuzhiyun pvc = next;
1172*4882a593Smuzhiyun }
1173*4882a593Smuzhiyun }
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun
1176*4882a593Smuzhiyun static struct hdlc_proto proto = {
1177*4882a593Smuzhiyun .close = fr_close,
1178*4882a593Smuzhiyun .start = fr_start,
1179*4882a593Smuzhiyun .stop = fr_stop,
1180*4882a593Smuzhiyun .detach = fr_destroy,
1181*4882a593Smuzhiyun .ioctl = fr_ioctl,
1182*4882a593Smuzhiyun .netif_rx = fr_rx,
1183*4882a593Smuzhiyun .module = THIS_MODULE,
1184*4882a593Smuzhiyun };
1185*4882a593Smuzhiyun
1186*4882a593Smuzhiyun
fr_ioctl(struct net_device * dev,struct ifreq * ifr)1187*4882a593Smuzhiyun static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
1188*4882a593Smuzhiyun {
1189*4882a593Smuzhiyun fr_proto __user *fr_s = ifr->ifr_settings.ifs_ifsu.fr;
1190*4882a593Smuzhiyun const size_t size = sizeof(fr_proto);
1191*4882a593Smuzhiyun fr_proto new_settings;
1192*4882a593Smuzhiyun hdlc_device *hdlc = dev_to_hdlc(dev);
1193*4882a593Smuzhiyun fr_proto_pvc pvc;
1194*4882a593Smuzhiyun int result;
1195*4882a593Smuzhiyun
1196*4882a593Smuzhiyun switch (ifr->ifr_settings.type) {
1197*4882a593Smuzhiyun case IF_GET_PROTO:
1198*4882a593Smuzhiyun if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
1199*4882a593Smuzhiyun return -EINVAL;
1200*4882a593Smuzhiyun ifr->ifr_settings.type = IF_PROTO_FR;
1201*4882a593Smuzhiyun if (ifr->ifr_settings.size < size) {
1202*4882a593Smuzhiyun ifr->ifr_settings.size = size; /* data size wanted */
1203*4882a593Smuzhiyun return -ENOBUFS;
1204*4882a593Smuzhiyun }
1205*4882a593Smuzhiyun if (copy_to_user(fr_s, &state(hdlc)->settings, size))
1206*4882a593Smuzhiyun return -EFAULT;
1207*4882a593Smuzhiyun return 0;
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyun case IF_PROTO_FR:
1210*4882a593Smuzhiyun if (!capable(CAP_NET_ADMIN))
1211*4882a593Smuzhiyun return -EPERM;
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun if (dev->flags & IFF_UP)
1214*4882a593Smuzhiyun return -EBUSY;
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun if (copy_from_user(&new_settings, fr_s, size))
1217*4882a593Smuzhiyun return -EFAULT;
1218*4882a593Smuzhiyun
1219*4882a593Smuzhiyun if (new_settings.lmi == LMI_DEFAULT)
1220*4882a593Smuzhiyun new_settings.lmi = LMI_ANSI;
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun if ((new_settings.lmi != LMI_NONE &&
1223*4882a593Smuzhiyun new_settings.lmi != LMI_ANSI &&
1224*4882a593Smuzhiyun new_settings.lmi != LMI_CCITT &&
1225*4882a593Smuzhiyun new_settings.lmi != LMI_CISCO) ||
1226*4882a593Smuzhiyun new_settings.t391 < 1 ||
1227*4882a593Smuzhiyun new_settings.t392 < 2 ||
1228*4882a593Smuzhiyun new_settings.n391 < 1 ||
1229*4882a593Smuzhiyun new_settings.n392 < 1 ||
1230*4882a593Smuzhiyun new_settings.n393 < new_settings.n392 ||
1231*4882a593Smuzhiyun new_settings.n393 > 32 ||
1232*4882a593Smuzhiyun (new_settings.dce != 0 &&
1233*4882a593Smuzhiyun new_settings.dce != 1))
1234*4882a593Smuzhiyun return -EINVAL;
1235*4882a593Smuzhiyun
1236*4882a593Smuzhiyun result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
1237*4882a593Smuzhiyun if (result)
1238*4882a593Smuzhiyun return result;
1239*4882a593Smuzhiyun
1240*4882a593Smuzhiyun if (dev_to_hdlc(dev)->proto != &proto) { /* Different proto */
1241*4882a593Smuzhiyun result = attach_hdlc_protocol(dev, &proto,
1242*4882a593Smuzhiyun sizeof(struct frad_state));
1243*4882a593Smuzhiyun if (result)
1244*4882a593Smuzhiyun return result;
1245*4882a593Smuzhiyun state(hdlc)->first_pvc = NULL;
1246*4882a593Smuzhiyun state(hdlc)->dce_pvc_count = 0;
1247*4882a593Smuzhiyun }
1248*4882a593Smuzhiyun memcpy(&state(hdlc)->settings, &new_settings, size);
1249*4882a593Smuzhiyun dev->type = ARPHRD_FRAD;
1250*4882a593Smuzhiyun call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
1251*4882a593Smuzhiyun return 0;
1252*4882a593Smuzhiyun
1253*4882a593Smuzhiyun case IF_PROTO_FR_ADD_PVC:
1254*4882a593Smuzhiyun case IF_PROTO_FR_DEL_PVC:
1255*4882a593Smuzhiyun case IF_PROTO_FR_ADD_ETH_PVC:
1256*4882a593Smuzhiyun case IF_PROTO_FR_DEL_ETH_PVC:
1257*4882a593Smuzhiyun if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
1258*4882a593Smuzhiyun return -EINVAL;
1259*4882a593Smuzhiyun
1260*4882a593Smuzhiyun if (!capable(CAP_NET_ADMIN))
1261*4882a593Smuzhiyun return -EPERM;
1262*4882a593Smuzhiyun
1263*4882a593Smuzhiyun if (copy_from_user(&pvc, ifr->ifr_settings.ifs_ifsu.fr_pvc,
1264*4882a593Smuzhiyun sizeof(fr_proto_pvc)))
1265*4882a593Smuzhiyun return -EFAULT;
1266*4882a593Smuzhiyun
1267*4882a593Smuzhiyun if (pvc.dlci <= 0 || pvc.dlci >= 1024)
1268*4882a593Smuzhiyun return -EINVAL; /* Only 10 bits, DLCI 0 reserved */
1269*4882a593Smuzhiyun
1270*4882a593Smuzhiyun if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC ||
1271*4882a593Smuzhiyun ifr->ifr_settings.type == IF_PROTO_FR_DEL_ETH_PVC)
1272*4882a593Smuzhiyun result = ARPHRD_ETHER; /* bridged Ethernet device */
1273*4882a593Smuzhiyun else
1274*4882a593Smuzhiyun result = ARPHRD_DLCI;
1275*4882a593Smuzhiyun
1276*4882a593Smuzhiyun if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_PVC ||
1277*4882a593Smuzhiyun ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC)
1278*4882a593Smuzhiyun return fr_add_pvc(dev, pvc.dlci, result);
1279*4882a593Smuzhiyun else
1280*4882a593Smuzhiyun return fr_del_pvc(hdlc, pvc.dlci, result);
1281*4882a593Smuzhiyun }
1282*4882a593Smuzhiyun
1283*4882a593Smuzhiyun return -EINVAL;
1284*4882a593Smuzhiyun }
1285*4882a593Smuzhiyun
1286*4882a593Smuzhiyun
mod_init(void)1287*4882a593Smuzhiyun static int __init mod_init(void)
1288*4882a593Smuzhiyun {
1289*4882a593Smuzhiyun register_hdlc_protocol(&proto);
1290*4882a593Smuzhiyun return 0;
1291*4882a593Smuzhiyun }
1292*4882a593Smuzhiyun
1293*4882a593Smuzhiyun
mod_exit(void)1294*4882a593Smuzhiyun static void __exit mod_exit(void)
1295*4882a593Smuzhiyun {
1296*4882a593Smuzhiyun unregister_hdlc_protocol(&proto);
1297*4882a593Smuzhiyun }
1298*4882a593Smuzhiyun
1299*4882a593Smuzhiyun
1300*4882a593Smuzhiyun module_init(mod_init);
1301*4882a593Smuzhiyun module_exit(mod_exit);
1302*4882a593Smuzhiyun
1303*4882a593Smuzhiyun MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
1304*4882a593Smuzhiyun MODULE_DESCRIPTION("Frame-Relay protocol support for generic HDLC");
1305*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
1306