1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * X.25 Packet Layer release 002
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * This is ALPHA test software. This code may break your machine,
6*4882a593Smuzhiyun * randomly fail to work with new releases, misbehave and/or generally
7*4882a593Smuzhiyun * screw up. It might even work.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * This code REQUIRES 2.1.15 or higher
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * History
12*4882a593Smuzhiyun * X.25 001 Jonathan Naylor Started coding.
13*4882a593Smuzhiyun * X.25 002 Jonathan Naylor New timer architecture.
14*4882a593Smuzhiyun * mar/20/00 Daniela Squassoni Disabling/enabling of facilities
15*4882a593Smuzhiyun * negotiation.
16*4882a593Smuzhiyun * 2000-09-04 Henner Eisen dev_hold() / dev_put() for x25_neigh.
17*4882a593Smuzhiyun */
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #define pr_fmt(fmt) "X25: " fmt
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #include <linux/kernel.h>
22*4882a593Smuzhiyun #include <linux/jiffies.h>
23*4882a593Smuzhiyun #include <linux/timer.h>
24*4882a593Smuzhiyun #include <linux/slab.h>
25*4882a593Smuzhiyun #include <linux/netdevice.h>
26*4882a593Smuzhiyun #include <linux/skbuff.h>
27*4882a593Smuzhiyun #include <linux/uaccess.h>
28*4882a593Smuzhiyun #include <linux/init.h>
29*4882a593Smuzhiyun #include <net/x25.h>
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun LIST_HEAD(x25_neigh_list);
32*4882a593Smuzhiyun DEFINE_RWLOCK(x25_neigh_list_lock);
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun static void x25_t20timer_expiry(struct timer_list *);
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun static void x25_transmit_restart_confirmation(struct x25_neigh *nb);
37*4882a593Smuzhiyun static void x25_transmit_restart_request(struct x25_neigh *nb);
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun /*
40*4882a593Smuzhiyun * Linux set/reset timer routines
41*4882a593Smuzhiyun */
x25_start_t20timer(struct x25_neigh * nb)42*4882a593Smuzhiyun static inline void x25_start_t20timer(struct x25_neigh *nb)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun mod_timer(&nb->t20timer, jiffies + nb->t20);
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun
x25_t20timer_expiry(struct timer_list * t)47*4882a593Smuzhiyun static void x25_t20timer_expiry(struct timer_list *t)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun struct x25_neigh *nb = from_timer(nb, t, t20timer);
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun x25_transmit_restart_request(nb);
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun x25_start_t20timer(nb);
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
x25_stop_t20timer(struct x25_neigh * nb)56*4882a593Smuzhiyun static inline void x25_stop_t20timer(struct x25_neigh *nb)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun del_timer(&nb->t20timer);
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
x25_t20timer_pending(struct x25_neigh * nb)61*4882a593Smuzhiyun static inline int x25_t20timer_pending(struct x25_neigh *nb)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun return timer_pending(&nb->t20timer);
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /*
67*4882a593Smuzhiyun * This handles all restart and diagnostic frames.
68*4882a593Smuzhiyun */
x25_link_control(struct sk_buff * skb,struct x25_neigh * nb,unsigned short frametype)69*4882a593Smuzhiyun void x25_link_control(struct sk_buff *skb, struct x25_neigh *nb,
70*4882a593Smuzhiyun unsigned short frametype)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun struct sk_buff *skbn;
73*4882a593Smuzhiyun int confirm;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun switch (frametype) {
76*4882a593Smuzhiyun case X25_RESTART_REQUEST:
77*4882a593Smuzhiyun confirm = !x25_t20timer_pending(nb);
78*4882a593Smuzhiyun x25_stop_t20timer(nb);
79*4882a593Smuzhiyun nb->state = X25_LINK_STATE_3;
80*4882a593Smuzhiyun if (confirm)
81*4882a593Smuzhiyun x25_transmit_restart_confirmation(nb);
82*4882a593Smuzhiyun break;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun case X25_RESTART_CONFIRMATION:
85*4882a593Smuzhiyun x25_stop_t20timer(nb);
86*4882a593Smuzhiyun nb->state = X25_LINK_STATE_3;
87*4882a593Smuzhiyun break;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun case X25_DIAGNOSTIC:
90*4882a593Smuzhiyun if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 4))
91*4882a593Smuzhiyun break;
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun pr_warn("diagnostic #%d - %02X %02X %02X\n",
94*4882a593Smuzhiyun skb->data[3], skb->data[4],
95*4882a593Smuzhiyun skb->data[5], skb->data[6]);
96*4882a593Smuzhiyun break;
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun default:
99*4882a593Smuzhiyun pr_warn("received unknown %02X with LCI 000\n",
100*4882a593Smuzhiyun frametype);
101*4882a593Smuzhiyun break;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun if (nb->state == X25_LINK_STATE_3)
105*4882a593Smuzhiyun while ((skbn = skb_dequeue(&nb->queue)) != NULL)
106*4882a593Smuzhiyun x25_send_frame(skbn, nb);
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /*
110*4882a593Smuzhiyun * This routine is called when a Restart Request is needed
111*4882a593Smuzhiyun */
x25_transmit_restart_request(struct x25_neigh * nb)112*4882a593Smuzhiyun static void x25_transmit_restart_request(struct x25_neigh *nb)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun unsigned char *dptr;
115*4882a593Smuzhiyun int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN + 2;
116*4882a593Smuzhiyun struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun if (!skb)
119*4882a593Smuzhiyun return;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun skb_reserve(skb, X25_MAX_L2_LEN);
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun dptr = skb_put(skb, X25_STD_MIN_LEN + 2);
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun *dptr++ = nb->extended ? X25_GFI_EXTSEQ : X25_GFI_STDSEQ;
126*4882a593Smuzhiyun *dptr++ = 0x00;
127*4882a593Smuzhiyun *dptr++ = X25_RESTART_REQUEST;
128*4882a593Smuzhiyun *dptr++ = 0x00;
129*4882a593Smuzhiyun *dptr++ = 0;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun skb->sk = NULL;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun x25_send_frame(skb, nb);
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun /*
137*4882a593Smuzhiyun * This routine is called when a Restart Confirmation is needed
138*4882a593Smuzhiyun */
x25_transmit_restart_confirmation(struct x25_neigh * nb)139*4882a593Smuzhiyun static void x25_transmit_restart_confirmation(struct x25_neigh *nb)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun unsigned char *dptr;
142*4882a593Smuzhiyun int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN;
143*4882a593Smuzhiyun struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun if (!skb)
146*4882a593Smuzhiyun return;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun skb_reserve(skb, X25_MAX_L2_LEN);
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun dptr = skb_put(skb, X25_STD_MIN_LEN);
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun *dptr++ = nb->extended ? X25_GFI_EXTSEQ : X25_GFI_STDSEQ;
153*4882a593Smuzhiyun *dptr++ = 0x00;
154*4882a593Smuzhiyun *dptr++ = X25_RESTART_CONFIRMATION;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun skb->sk = NULL;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun x25_send_frame(skb, nb);
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun /*
162*4882a593Smuzhiyun * This routine is called when a Clear Request is needed outside of the context
163*4882a593Smuzhiyun * of a connected socket.
164*4882a593Smuzhiyun */
x25_transmit_clear_request(struct x25_neigh * nb,unsigned int lci,unsigned char cause)165*4882a593Smuzhiyun void x25_transmit_clear_request(struct x25_neigh *nb, unsigned int lci,
166*4882a593Smuzhiyun unsigned char cause)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun unsigned char *dptr;
169*4882a593Smuzhiyun int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN + 2;
170*4882a593Smuzhiyun struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun if (!skb)
173*4882a593Smuzhiyun return;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun skb_reserve(skb, X25_MAX_L2_LEN);
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun dptr = skb_put(skb, X25_STD_MIN_LEN + 2);
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun *dptr++ = ((lci >> 8) & 0x0F) | (nb->extended ?
180*4882a593Smuzhiyun X25_GFI_EXTSEQ :
181*4882a593Smuzhiyun X25_GFI_STDSEQ);
182*4882a593Smuzhiyun *dptr++ = (lci >> 0) & 0xFF;
183*4882a593Smuzhiyun *dptr++ = X25_CLEAR_REQUEST;
184*4882a593Smuzhiyun *dptr++ = cause;
185*4882a593Smuzhiyun *dptr++ = 0x00;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun skb->sk = NULL;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun x25_send_frame(skb, nb);
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
x25_transmit_link(struct sk_buff * skb,struct x25_neigh * nb)192*4882a593Smuzhiyun void x25_transmit_link(struct sk_buff *skb, struct x25_neigh *nb)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun switch (nb->state) {
195*4882a593Smuzhiyun case X25_LINK_STATE_0:
196*4882a593Smuzhiyun skb_queue_tail(&nb->queue, skb);
197*4882a593Smuzhiyun nb->state = X25_LINK_STATE_1;
198*4882a593Smuzhiyun x25_establish_link(nb);
199*4882a593Smuzhiyun break;
200*4882a593Smuzhiyun case X25_LINK_STATE_1:
201*4882a593Smuzhiyun case X25_LINK_STATE_2:
202*4882a593Smuzhiyun skb_queue_tail(&nb->queue, skb);
203*4882a593Smuzhiyun break;
204*4882a593Smuzhiyun case X25_LINK_STATE_3:
205*4882a593Smuzhiyun x25_send_frame(skb, nb);
206*4882a593Smuzhiyun break;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun /*
211*4882a593Smuzhiyun * Called when the link layer has become established.
212*4882a593Smuzhiyun */
x25_link_established(struct x25_neigh * nb)213*4882a593Smuzhiyun void x25_link_established(struct x25_neigh *nb)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun switch (nb->state) {
216*4882a593Smuzhiyun case X25_LINK_STATE_0:
217*4882a593Smuzhiyun nb->state = X25_LINK_STATE_2;
218*4882a593Smuzhiyun break;
219*4882a593Smuzhiyun case X25_LINK_STATE_1:
220*4882a593Smuzhiyun x25_transmit_restart_request(nb);
221*4882a593Smuzhiyun nb->state = X25_LINK_STATE_2;
222*4882a593Smuzhiyun x25_start_t20timer(nb);
223*4882a593Smuzhiyun break;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun /*
228*4882a593Smuzhiyun * Called when the link layer has terminated, or an establishment
229*4882a593Smuzhiyun * request has failed.
230*4882a593Smuzhiyun */
231*4882a593Smuzhiyun
x25_link_terminated(struct x25_neigh * nb)232*4882a593Smuzhiyun void x25_link_terminated(struct x25_neigh *nb)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun nb->state = X25_LINK_STATE_0;
235*4882a593Smuzhiyun /* Out of order: clear existing virtual calls (X.25 03/93 4.6.3) */
236*4882a593Smuzhiyun x25_kill_by_neigh(nb);
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun /*
240*4882a593Smuzhiyun * Add a new device.
241*4882a593Smuzhiyun */
x25_link_device_up(struct net_device * dev)242*4882a593Smuzhiyun void x25_link_device_up(struct net_device *dev)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun struct x25_neigh *nb = kmalloc(sizeof(*nb), GFP_ATOMIC);
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun if (!nb)
247*4882a593Smuzhiyun return;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun skb_queue_head_init(&nb->queue);
250*4882a593Smuzhiyun timer_setup(&nb->t20timer, x25_t20timer_expiry, 0);
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun dev_hold(dev);
253*4882a593Smuzhiyun nb->dev = dev;
254*4882a593Smuzhiyun nb->state = X25_LINK_STATE_0;
255*4882a593Smuzhiyun nb->extended = 0;
256*4882a593Smuzhiyun /*
257*4882a593Smuzhiyun * Enables negotiation
258*4882a593Smuzhiyun */
259*4882a593Smuzhiyun nb->global_facil_mask = X25_MASK_REVERSE |
260*4882a593Smuzhiyun X25_MASK_THROUGHPUT |
261*4882a593Smuzhiyun X25_MASK_PACKET_SIZE |
262*4882a593Smuzhiyun X25_MASK_WINDOW_SIZE;
263*4882a593Smuzhiyun nb->t20 = sysctl_x25_restart_request_timeout;
264*4882a593Smuzhiyun refcount_set(&nb->refcnt, 1);
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun write_lock_bh(&x25_neigh_list_lock);
267*4882a593Smuzhiyun list_add(&nb->node, &x25_neigh_list);
268*4882a593Smuzhiyun write_unlock_bh(&x25_neigh_list_lock);
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun /**
272*4882a593Smuzhiyun * __x25_remove_neigh - remove neighbour from x25_neigh_list
273*4882a593Smuzhiyun * @nb: - neigh to remove
274*4882a593Smuzhiyun *
275*4882a593Smuzhiyun * Remove neighbour from x25_neigh_list. If it was there.
276*4882a593Smuzhiyun * Caller must hold x25_neigh_list_lock.
277*4882a593Smuzhiyun */
__x25_remove_neigh(struct x25_neigh * nb)278*4882a593Smuzhiyun static void __x25_remove_neigh(struct x25_neigh *nb)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun skb_queue_purge(&nb->queue);
281*4882a593Smuzhiyun x25_stop_t20timer(nb);
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun if (nb->node.next) {
284*4882a593Smuzhiyun list_del(&nb->node);
285*4882a593Smuzhiyun x25_neigh_put(nb);
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun /*
290*4882a593Smuzhiyun * A device has been removed, remove its links.
291*4882a593Smuzhiyun */
x25_link_device_down(struct net_device * dev)292*4882a593Smuzhiyun void x25_link_device_down(struct net_device *dev)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun struct x25_neigh *nb;
295*4882a593Smuzhiyun struct list_head *entry, *tmp;
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun write_lock_bh(&x25_neigh_list_lock);
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun list_for_each_safe(entry, tmp, &x25_neigh_list) {
300*4882a593Smuzhiyun nb = list_entry(entry, struct x25_neigh, node);
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun if (nb->dev == dev) {
303*4882a593Smuzhiyun __x25_remove_neigh(nb);
304*4882a593Smuzhiyun dev_put(dev);
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun write_unlock_bh(&x25_neigh_list_lock);
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun /*
312*4882a593Smuzhiyun * Given a device, return the neighbour address.
313*4882a593Smuzhiyun */
x25_get_neigh(struct net_device * dev)314*4882a593Smuzhiyun struct x25_neigh *x25_get_neigh(struct net_device *dev)
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun struct x25_neigh *nb, *use = NULL;
317*4882a593Smuzhiyun struct list_head *entry;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun read_lock_bh(&x25_neigh_list_lock);
320*4882a593Smuzhiyun list_for_each(entry, &x25_neigh_list) {
321*4882a593Smuzhiyun nb = list_entry(entry, struct x25_neigh, node);
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun if (nb->dev == dev) {
324*4882a593Smuzhiyun use = nb;
325*4882a593Smuzhiyun break;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun if (use)
330*4882a593Smuzhiyun x25_neigh_hold(use);
331*4882a593Smuzhiyun read_unlock_bh(&x25_neigh_list_lock);
332*4882a593Smuzhiyun return use;
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun /*
336*4882a593Smuzhiyun * Handle the ioctls that control the subscription functions.
337*4882a593Smuzhiyun */
x25_subscr_ioctl(unsigned int cmd,void __user * arg)338*4882a593Smuzhiyun int x25_subscr_ioctl(unsigned int cmd, void __user *arg)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun struct x25_subscrip_struct x25_subscr;
341*4882a593Smuzhiyun struct x25_neigh *nb;
342*4882a593Smuzhiyun struct net_device *dev;
343*4882a593Smuzhiyun int rc = -EINVAL;
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun if (cmd != SIOCX25GSUBSCRIP && cmd != SIOCX25SSUBSCRIP)
346*4882a593Smuzhiyun goto out;
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun rc = -EFAULT;
349*4882a593Smuzhiyun if (copy_from_user(&x25_subscr, arg, sizeof(x25_subscr)))
350*4882a593Smuzhiyun goto out;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun rc = -EINVAL;
353*4882a593Smuzhiyun if ((dev = x25_dev_get(x25_subscr.device)) == NULL)
354*4882a593Smuzhiyun goto out;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun if ((nb = x25_get_neigh(dev)) == NULL)
357*4882a593Smuzhiyun goto out_dev_put;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun dev_put(dev);
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun if (cmd == SIOCX25GSUBSCRIP) {
362*4882a593Smuzhiyun read_lock_bh(&x25_neigh_list_lock);
363*4882a593Smuzhiyun x25_subscr.extended = nb->extended;
364*4882a593Smuzhiyun x25_subscr.global_facil_mask = nb->global_facil_mask;
365*4882a593Smuzhiyun read_unlock_bh(&x25_neigh_list_lock);
366*4882a593Smuzhiyun rc = copy_to_user(arg, &x25_subscr,
367*4882a593Smuzhiyun sizeof(x25_subscr)) ? -EFAULT : 0;
368*4882a593Smuzhiyun } else {
369*4882a593Smuzhiyun rc = -EINVAL;
370*4882a593Smuzhiyun if (!(x25_subscr.extended && x25_subscr.extended != 1)) {
371*4882a593Smuzhiyun rc = 0;
372*4882a593Smuzhiyun write_lock_bh(&x25_neigh_list_lock);
373*4882a593Smuzhiyun nb->extended = x25_subscr.extended;
374*4882a593Smuzhiyun nb->global_facil_mask = x25_subscr.global_facil_mask;
375*4882a593Smuzhiyun write_unlock_bh(&x25_neigh_list_lock);
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun x25_neigh_put(nb);
379*4882a593Smuzhiyun out:
380*4882a593Smuzhiyun return rc;
381*4882a593Smuzhiyun out_dev_put:
382*4882a593Smuzhiyun dev_put(dev);
383*4882a593Smuzhiyun goto out;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun /*
388*4882a593Smuzhiyun * Release all memory associated with X.25 neighbour structures.
389*4882a593Smuzhiyun */
x25_link_free(void)390*4882a593Smuzhiyun void __exit x25_link_free(void)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun struct x25_neigh *nb;
393*4882a593Smuzhiyun struct list_head *entry, *tmp;
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun write_lock_bh(&x25_neigh_list_lock);
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun list_for_each_safe(entry, tmp, &x25_neigh_list) {
398*4882a593Smuzhiyun struct net_device *dev;
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun nb = list_entry(entry, struct x25_neigh, node);
401*4882a593Smuzhiyun dev = nb->dev;
402*4882a593Smuzhiyun __x25_remove_neigh(nb);
403*4882a593Smuzhiyun dev_put(dev);
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun write_unlock_bh(&x25_neigh_list_lock);
406*4882a593Smuzhiyun }
407