1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
5*4882a593Smuzhiyun * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
6*4882a593Smuzhiyun * Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun #include <linux/errno.h>
9*4882a593Smuzhiyun #include <linux/types.h>
10*4882a593Smuzhiyun #include <linux/socket.h>
11*4882a593Smuzhiyun #include <linux/in.h>
12*4882a593Smuzhiyun #include <linux/kernel.h>
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/timer.h>
15*4882a593Smuzhiyun #include <linux/string.h>
16*4882a593Smuzhiyun #include <linux/sockios.h>
17*4882a593Smuzhiyun #include <linux/spinlock.h>
18*4882a593Smuzhiyun #include <linux/net.h>
19*4882a593Smuzhiyun #include <linux/slab.h>
20*4882a593Smuzhiyun #include <net/ax25.h>
21*4882a593Smuzhiyun #include <linux/inet.h>
22*4882a593Smuzhiyun #include <linux/netdevice.h>
23*4882a593Smuzhiyun #include <linux/skbuff.h>
24*4882a593Smuzhiyun #include <net/sock.h>
25*4882a593Smuzhiyun #include <linux/uaccess.h>
26*4882a593Smuzhiyun #include <linux/fcntl.h>
27*4882a593Smuzhiyun #include <linux/mm.h>
28*4882a593Smuzhiyun #include <linux/interrupt.h>
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun static DEFINE_SPINLOCK(ax25_frag_lock);
31*4882a593Smuzhiyun
ax25_send_frame(struct sk_buff * skb,int paclen,ax25_address * src,ax25_address * dest,ax25_digi * digi,struct net_device * dev)32*4882a593Smuzhiyun ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, ax25_address *src, ax25_address *dest, ax25_digi *digi, struct net_device *dev)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun ax25_dev *ax25_dev;
35*4882a593Smuzhiyun ax25_cb *ax25;
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /*
38*4882a593Smuzhiyun * Take the default packet length for the device if zero is
39*4882a593Smuzhiyun * specified.
40*4882a593Smuzhiyun */
41*4882a593Smuzhiyun if (paclen == 0) {
42*4882a593Smuzhiyun if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
43*4882a593Smuzhiyun return NULL;
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun paclen = ax25_dev->values[AX25_VALUES_PACLEN];
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun /*
49*4882a593Smuzhiyun * Look for an existing connection.
50*4882a593Smuzhiyun */
51*4882a593Smuzhiyun if ((ax25 = ax25_find_cb(src, dest, digi, dev)) != NULL) {
52*4882a593Smuzhiyun ax25_output(ax25, paclen, skb);
53*4882a593Smuzhiyun return ax25; /* It already existed */
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
57*4882a593Smuzhiyun return NULL;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun if ((ax25 = ax25_create_cb()) == NULL)
60*4882a593Smuzhiyun return NULL;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun ax25_fillin_cb(ax25, ax25_dev);
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun ax25->source_addr = *src;
65*4882a593Smuzhiyun ax25->dest_addr = *dest;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun if (digi != NULL) {
68*4882a593Smuzhiyun ax25->digipeat = kmemdup(digi, sizeof(*digi), GFP_ATOMIC);
69*4882a593Smuzhiyun if (ax25->digipeat == NULL) {
70*4882a593Smuzhiyun ax25_cb_put(ax25);
71*4882a593Smuzhiyun return NULL;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
76*4882a593Smuzhiyun case AX25_PROTO_STD_SIMPLEX:
77*4882a593Smuzhiyun case AX25_PROTO_STD_DUPLEX:
78*4882a593Smuzhiyun ax25_std_establish_data_link(ax25);
79*4882a593Smuzhiyun break;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun #ifdef CONFIG_AX25_DAMA_SLAVE
82*4882a593Smuzhiyun case AX25_PROTO_DAMA_SLAVE:
83*4882a593Smuzhiyun if (ax25_dev->dama.slave)
84*4882a593Smuzhiyun ax25_ds_establish_data_link(ax25);
85*4882a593Smuzhiyun else
86*4882a593Smuzhiyun ax25_std_establish_data_link(ax25);
87*4882a593Smuzhiyun break;
88*4882a593Smuzhiyun #endif
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun /*
92*4882a593Smuzhiyun * There is one ref for the state machine; a caller needs
93*4882a593Smuzhiyun * one more to put it back, just like with the existing one.
94*4882a593Smuzhiyun */
95*4882a593Smuzhiyun ax25_cb_hold(ax25);
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun ax25_cb_add(ax25);
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun ax25->state = AX25_STATE_1;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun ax25_start_heartbeat(ax25);
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun ax25_output(ax25, paclen, skb);
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun return ax25; /* We had to create it */
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun EXPORT_SYMBOL(ax25_send_frame);
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /*
111*4882a593Smuzhiyun * All outgoing AX.25 I frames pass via this routine. Therefore this is
112*4882a593Smuzhiyun * where the fragmentation of frames takes place. If fragment is set to
113*4882a593Smuzhiyun * zero then we are not allowed to do fragmentation, even if the frame
114*4882a593Smuzhiyun * is too large.
115*4882a593Smuzhiyun */
ax25_output(ax25_cb * ax25,int paclen,struct sk_buff * skb)116*4882a593Smuzhiyun void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun struct sk_buff *skbn;
119*4882a593Smuzhiyun unsigned char *p;
120*4882a593Smuzhiyun int frontlen, len, fragno, ka9qfrag, first = 1;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun if (paclen < 16) {
123*4882a593Smuzhiyun WARN_ON_ONCE(1);
124*4882a593Smuzhiyun kfree_skb(skb);
125*4882a593Smuzhiyun return;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun if ((skb->len - 1) > paclen) {
129*4882a593Smuzhiyun if (*skb->data == AX25_P_TEXT) {
130*4882a593Smuzhiyun skb_pull(skb, 1); /* skip PID */
131*4882a593Smuzhiyun ka9qfrag = 0;
132*4882a593Smuzhiyun } else {
133*4882a593Smuzhiyun paclen -= 2; /* Allow for fragment control info */
134*4882a593Smuzhiyun ka9qfrag = 1;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun fragno = skb->len / paclen;
138*4882a593Smuzhiyun if (skb->len % paclen == 0) fragno--;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun frontlen = skb_headroom(skb); /* Address space + CTRL */
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun while (skb->len > 0) {
143*4882a593Smuzhiyun spin_lock_bh(&ax25_frag_lock);
144*4882a593Smuzhiyun if ((skbn = alloc_skb(paclen + 2 + frontlen, GFP_ATOMIC)) == NULL) {
145*4882a593Smuzhiyun spin_unlock_bh(&ax25_frag_lock);
146*4882a593Smuzhiyun printk(KERN_CRIT "AX.25: ax25_output - out of memory\n");
147*4882a593Smuzhiyun return;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun if (skb->sk != NULL)
151*4882a593Smuzhiyun skb_set_owner_w(skbn, skb->sk);
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun spin_unlock_bh(&ax25_frag_lock);
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun len = (paclen > skb->len) ? skb->len : paclen;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun if (ka9qfrag == 1) {
158*4882a593Smuzhiyun skb_reserve(skbn, frontlen + 2);
159*4882a593Smuzhiyun skb_set_network_header(skbn,
160*4882a593Smuzhiyun skb_network_offset(skb));
161*4882a593Smuzhiyun skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
162*4882a593Smuzhiyun p = skb_push(skbn, 2);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun *p++ = AX25_P_SEGMENT;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun *p = fragno--;
167*4882a593Smuzhiyun if (first) {
168*4882a593Smuzhiyun *p |= AX25_SEG_FIRST;
169*4882a593Smuzhiyun first = 0;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun } else {
172*4882a593Smuzhiyun skb_reserve(skbn, frontlen + 1);
173*4882a593Smuzhiyun skb_set_network_header(skbn,
174*4882a593Smuzhiyun skb_network_offset(skb));
175*4882a593Smuzhiyun skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
176*4882a593Smuzhiyun p = skb_push(skbn, 1);
177*4882a593Smuzhiyun *p = AX25_P_TEXT;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun skb_pull(skb, len);
181*4882a593Smuzhiyun skb_queue_tail(&ax25->write_queue, skbn); /* Throw it on the queue */
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun kfree_skb(skb);
185*4882a593Smuzhiyun } else {
186*4882a593Smuzhiyun skb_queue_tail(&ax25->write_queue, skb); /* Throw it on the queue */
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
190*4882a593Smuzhiyun case AX25_PROTO_STD_SIMPLEX:
191*4882a593Smuzhiyun case AX25_PROTO_STD_DUPLEX:
192*4882a593Smuzhiyun ax25_kick(ax25);
193*4882a593Smuzhiyun break;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun #ifdef CONFIG_AX25_DAMA_SLAVE
196*4882a593Smuzhiyun /*
197*4882a593Smuzhiyun * A DAMA slave is _required_ to work as normal AX.25L2V2
198*4882a593Smuzhiyun * if no DAMA master is available.
199*4882a593Smuzhiyun */
200*4882a593Smuzhiyun case AX25_PROTO_DAMA_SLAVE:
201*4882a593Smuzhiyun if (!ax25->ax25_dev->dama.slave) ax25_kick(ax25);
202*4882a593Smuzhiyun break;
203*4882a593Smuzhiyun #endif
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /*
208*4882a593Smuzhiyun * This procedure is passed a buffer descriptor for an iframe. It builds
209*4882a593Smuzhiyun * the rest of the control part of the frame and then writes it out.
210*4882a593Smuzhiyun */
ax25_send_iframe(ax25_cb * ax25,struct sk_buff * skb,int poll_bit)211*4882a593Smuzhiyun static void ax25_send_iframe(ax25_cb *ax25, struct sk_buff *skb, int poll_bit)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun unsigned char *frame;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun if (skb == NULL)
216*4882a593Smuzhiyun return;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun skb_reset_network_header(skb);
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun if (ax25->modulus == AX25_MODULUS) {
221*4882a593Smuzhiyun frame = skb_push(skb, 1);
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun *frame = AX25_I;
224*4882a593Smuzhiyun *frame |= (poll_bit) ? AX25_PF : 0;
225*4882a593Smuzhiyun *frame |= (ax25->vr << 5);
226*4882a593Smuzhiyun *frame |= (ax25->vs << 1);
227*4882a593Smuzhiyun } else {
228*4882a593Smuzhiyun frame = skb_push(skb, 2);
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun frame[0] = AX25_I;
231*4882a593Smuzhiyun frame[0] |= (ax25->vs << 1);
232*4882a593Smuzhiyun frame[1] = (poll_bit) ? AX25_EPF : 0;
233*4882a593Smuzhiyun frame[1] |= (ax25->vr << 1);
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun ax25_start_idletimer(ax25);
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun ax25_transmit_buffer(ax25, skb, AX25_COMMAND);
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
ax25_kick(ax25_cb * ax25)241*4882a593Smuzhiyun void ax25_kick(ax25_cb *ax25)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun struct sk_buff *skb, *skbn;
244*4882a593Smuzhiyun int last = 1;
245*4882a593Smuzhiyun unsigned short start, end, next;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun if (ax25->state != AX25_STATE_3 && ax25->state != AX25_STATE_4)
248*4882a593Smuzhiyun return;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun if (ax25->condition & AX25_COND_PEER_RX_BUSY)
251*4882a593Smuzhiyun return;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun if (skb_peek(&ax25->write_queue) == NULL)
254*4882a593Smuzhiyun return;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun start = (skb_peek(&ax25->ack_queue) == NULL) ? ax25->va : ax25->vs;
257*4882a593Smuzhiyun end = (ax25->va + ax25->window) % ax25->modulus;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun if (start == end)
260*4882a593Smuzhiyun return;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun /*
263*4882a593Smuzhiyun * Transmit data until either we're out of data to send or
264*4882a593Smuzhiyun * the window is full. Send a poll on the final I frame if
265*4882a593Smuzhiyun * the window is filled.
266*4882a593Smuzhiyun */
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun /*
269*4882a593Smuzhiyun * Dequeue the frame and copy it.
270*4882a593Smuzhiyun * Check for race with ax25_clear_queues().
271*4882a593Smuzhiyun */
272*4882a593Smuzhiyun skb = skb_dequeue(&ax25->write_queue);
273*4882a593Smuzhiyun if (!skb)
274*4882a593Smuzhiyun return;
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun ax25->vs = start;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun do {
279*4882a593Smuzhiyun if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
280*4882a593Smuzhiyun skb_queue_head(&ax25->write_queue, skb);
281*4882a593Smuzhiyun break;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun if (skb->sk != NULL)
285*4882a593Smuzhiyun skb_set_owner_w(skbn, skb->sk);
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun next = (ax25->vs + 1) % ax25->modulus;
288*4882a593Smuzhiyun last = (next == end);
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun /*
291*4882a593Smuzhiyun * Transmit the frame copy.
292*4882a593Smuzhiyun * bke 960114: do not set the Poll bit on the last frame
293*4882a593Smuzhiyun * in DAMA mode.
294*4882a593Smuzhiyun */
295*4882a593Smuzhiyun switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
296*4882a593Smuzhiyun case AX25_PROTO_STD_SIMPLEX:
297*4882a593Smuzhiyun case AX25_PROTO_STD_DUPLEX:
298*4882a593Smuzhiyun ax25_send_iframe(ax25, skbn, (last) ? AX25_POLLON : AX25_POLLOFF);
299*4882a593Smuzhiyun break;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun #ifdef CONFIG_AX25_DAMA_SLAVE
302*4882a593Smuzhiyun case AX25_PROTO_DAMA_SLAVE:
303*4882a593Smuzhiyun ax25_send_iframe(ax25, skbn, AX25_POLLOFF);
304*4882a593Smuzhiyun break;
305*4882a593Smuzhiyun #endif
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun ax25->vs = next;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun /*
311*4882a593Smuzhiyun * Requeue the original data frame.
312*4882a593Smuzhiyun */
313*4882a593Smuzhiyun skb_queue_tail(&ax25->ack_queue, skb);
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun } while (!last && (skb = skb_dequeue(&ax25->write_queue)) != NULL);
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun ax25->condition &= ~AX25_COND_ACK_PENDING;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun if (!ax25_t1timer_running(ax25)) {
320*4882a593Smuzhiyun ax25_stop_t3timer(ax25);
321*4882a593Smuzhiyun ax25_calculate_t1(ax25);
322*4882a593Smuzhiyun ax25_start_t1timer(ax25);
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun
ax25_transmit_buffer(ax25_cb * ax25,struct sk_buff * skb,int type)326*4882a593Smuzhiyun void ax25_transmit_buffer(ax25_cb *ax25, struct sk_buff *skb, int type)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun struct sk_buff *skbn;
329*4882a593Smuzhiyun unsigned char *ptr;
330*4882a593Smuzhiyun int headroom;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun if (ax25->ax25_dev == NULL) {
333*4882a593Smuzhiyun ax25_disconnect(ax25, ENETUNREACH);
334*4882a593Smuzhiyun return;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun headroom = ax25_addr_size(ax25->digipeat);
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun if (skb_headroom(skb) < headroom) {
340*4882a593Smuzhiyun if ((skbn = skb_realloc_headroom(skb, headroom)) == NULL) {
341*4882a593Smuzhiyun printk(KERN_CRIT "AX.25: ax25_transmit_buffer - out of memory\n");
342*4882a593Smuzhiyun kfree_skb(skb);
343*4882a593Smuzhiyun return;
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun if (skb->sk != NULL)
347*4882a593Smuzhiyun skb_set_owner_w(skbn, skb->sk);
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun consume_skb(skb);
350*4882a593Smuzhiyun skb = skbn;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun ptr = skb_push(skb, headroom);
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun ax25_addr_build(ptr, &ax25->source_addr, &ax25->dest_addr, ax25->digipeat, type, ax25->modulus);
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun ax25_queue_xmit(skb, ax25->ax25_dev->dev);
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun /*
361*4882a593Smuzhiyun * A small shim to dev_queue_xmit to add the KISS control byte, and do
362*4882a593Smuzhiyun * any packet forwarding in operation.
363*4882a593Smuzhiyun */
ax25_queue_xmit(struct sk_buff * skb,struct net_device * dev)364*4882a593Smuzhiyun void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev)
365*4882a593Smuzhiyun {
366*4882a593Smuzhiyun unsigned char *ptr;
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun skb->protocol = ax25_type_trans(skb, ax25_fwd_dev(dev));
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun ptr = skb_push(skb, 1);
371*4882a593Smuzhiyun *ptr = 0x00; /* KISS */
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun dev_queue_xmit(skb);
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun
ax25_check_iframes_acked(ax25_cb * ax25,unsigned short nr)376*4882a593Smuzhiyun int ax25_check_iframes_acked(ax25_cb *ax25, unsigned short nr)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun if (ax25->vs == nr) {
379*4882a593Smuzhiyun ax25_frames_acked(ax25, nr);
380*4882a593Smuzhiyun ax25_calculate_rtt(ax25);
381*4882a593Smuzhiyun ax25_stop_t1timer(ax25);
382*4882a593Smuzhiyun ax25_start_t3timer(ax25);
383*4882a593Smuzhiyun return 1;
384*4882a593Smuzhiyun } else {
385*4882a593Smuzhiyun if (ax25->va != nr) {
386*4882a593Smuzhiyun ax25_frames_acked(ax25, nr);
387*4882a593Smuzhiyun ax25_calculate_t1(ax25);
388*4882a593Smuzhiyun ax25_start_t1timer(ax25);
389*4882a593Smuzhiyun return 1;
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun return 0;
393*4882a593Smuzhiyun }
394