1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * shdlc Link Layer Control
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2012 Intel Corporation. All rights reserved.
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #define pr_fmt(fmt) "shdlc: %s: " fmt, __func__
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/types.h>
11*4882a593Smuzhiyun #include <linux/sched.h>
12*4882a593Smuzhiyun #include <linux/wait.h>
13*4882a593Smuzhiyun #include <linux/slab.h>
14*4882a593Smuzhiyun #include <linux/skbuff.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include "llc.h"
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun enum shdlc_state {
19*4882a593Smuzhiyun SHDLC_DISCONNECTED = 0,
20*4882a593Smuzhiyun SHDLC_CONNECTING = 1,
21*4882a593Smuzhiyun SHDLC_NEGOTIATING = 2,
22*4882a593Smuzhiyun SHDLC_HALF_CONNECTED = 3,
23*4882a593Smuzhiyun SHDLC_CONNECTED = 4
24*4882a593Smuzhiyun };
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun struct llc_shdlc {
27*4882a593Smuzhiyun struct nfc_hci_dev *hdev;
28*4882a593Smuzhiyun xmit_to_drv_t xmit_to_drv;
29*4882a593Smuzhiyun rcv_to_hci_t rcv_to_hci;
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun struct mutex state_mutex;
32*4882a593Smuzhiyun enum shdlc_state state;
33*4882a593Smuzhiyun int hard_fault;
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun wait_queue_head_t *connect_wq;
36*4882a593Smuzhiyun int connect_tries;
37*4882a593Smuzhiyun int connect_result;
38*4882a593Smuzhiyun struct timer_list connect_timer;/* aka T3 in spec 10.6.1 */
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun u8 w; /* window size */
41*4882a593Smuzhiyun bool srej_support;
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun struct timer_list t1_timer; /* send ack timeout */
44*4882a593Smuzhiyun bool t1_active;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun struct timer_list t2_timer; /* guard/retransmit timeout */
47*4882a593Smuzhiyun bool t2_active;
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun int ns; /* next seq num for send */
50*4882a593Smuzhiyun int nr; /* next expected seq num for receive */
51*4882a593Smuzhiyun int dnr; /* oldest sent unacked seq num */
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun struct sk_buff_head rcv_q;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun struct sk_buff_head send_q;
56*4882a593Smuzhiyun bool rnr; /* other side is not ready to receive */
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun struct sk_buff_head ack_pending_q;
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun struct work_struct sm_work;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun int tx_headroom;
63*4882a593Smuzhiyun int tx_tailroom;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun llc_failure_t llc_failure;
66*4882a593Smuzhiyun };
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun #define SHDLC_LLC_HEAD_ROOM 2
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun #define SHDLC_MAX_WINDOW 4
71*4882a593Smuzhiyun #define SHDLC_SREJ_SUPPORT false
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun #define SHDLC_CONTROL_HEAD_MASK 0xe0
74*4882a593Smuzhiyun #define SHDLC_CONTROL_HEAD_I 0x80
75*4882a593Smuzhiyun #define SHDLC_CONTROL_HEAD_I2 0xa0
76*4882a593Smuzhiyun #define SHDLC_CONTROL_HEAD_S 0xc0
77*4882a593Smuzhiyun #define SHDLC_CONTROL_HEAD_U 0xe0
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun #define SHDLC_CONTROL_NS_MASK 0x38
80*4882a593Smuzhiyun #define SHDLC_CONTROL_NR_MASK 0x07
81*4882a593Smuzhiyun #define SHDLC_CONTROL_TYPE_MASK 0x18
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun #define SHDLC_CONTROL_M_MASK 0x1f
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun enum sframe_type {
86*4882a593Smuzhiyun S_FRAME_RR = 0x00,
87*4882a593Smuzhiyun S_FRAME_REJ = 0x01,
88*4882a593Smuzhiyun S_FRAME_RNR = 0x02,
89*4882a593Smuzhiyun S_FRAME_SREJ = 0x03
90*4882a593Smuzhiyun };
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun enum uframe_modifier {
93*4882a593Smuzhiyun U_FRAME_UA = 0x06,
94*4882a593Smuzhiyun U_FRAME_RSET = 0x19
95*4882a593Smuzhiyun };
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun #define SHDLC_CONNECT_VALUE_MS 5
98*4882a593Smuzhiyun #define SHDLC_T1_VALUE_MS(w) ((5 * w) / 4)
99*4882a593Smuzhiyun #define SHDLC_T2_VALUE_MS 300
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun #define SHDLC_DUMP_SKB(info, skb) \
102*4882a593Smuzhiyun do { \
103*4882a593Smuzhiyun pr_debug("%s:\n", info); \
104*4882a593Smuzhiyun print_hex_dump(KERN_DEBUG, "shdlc: ", DUMP_PREFIX_OFFSET, \
105*4882a593Smuzhiyun 16, 1, skb->data, skb->len, 0); \
106*4882a593Smuzhiyun } while (0)
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun /* checks x < y <= z modulo 8 */
llc_shdlc_x_lt_y_lteq_z(int x,int y,int z)109*4882a593Smuzhiyun static bool llc_shdlc_x_lt_y_lteq_z(int x, int y, int z)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun if (x < z)
112*4882a593Smuzhiyun return ((x < y) && (y <= z)) ? true : false;
113*4882a593Smuzhiyun else
114*4882a593Smuzhiyun return ((y > x) || (y <= z)) ? true : false;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /* checks x <= y < z modulo 8 */
llc_shdlc_x_lteq_y_lt_z(int x,int y,int z)118*4882a593Smuzhiyun static bool llc_shdlc_x_lteq_y_lt_z(int x, int y, int z)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun if (x <= z)
121*4882a593Smuzhiyun return ((x <= y) && (y < z)) ? true : false;
122*4882a593Smuzhiyun else /* x > z -> z+8 > x */
123*4882a593Smuzhiyun return ((y >= x) || (y < z)) ? true : false;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
llc_shdlc_alloc_skb(struct llc_shdlc * shdlc,int payload_len)126*4882a593Smuzhiyun static struct sk_buff *llc_shdlc_alloc_skb(struct llc_shdlc *shdlc,
127*4882a593Smuzhiyun int payload_len)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun struct sk_buff *skb;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun skb = alloc_skb(shdlc->tx_headroom + SHDLC_LLC_HEAD_ROOM +
132*4882a593Smuzhiyun shdlc->tx_tailroom + payload_len, GFP_KERNEL);
133*4882a593Smuzhiyun if (skb)
134*4882a593Smuzhiyun skb_reserve(skb, shdlc->tx_headroom + SHDLC_LLC_HEAD_ROOM);
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun return skb;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun /* immediately sends an S frame. */
llc_shdlc_send_s_frame(struct llc_shdlc * shdlc,enum sframe_type sframe_type,int nr)140*4882a593Smuzhiyun static int llc_shdlc_send_s_frame(struct llc_shdlc *shdlc,
141*4882a593Smuzhiyun enum sframe_type sframe_type, int nr)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun int r;
144*4882a593Smuzhiyun struct sk_buff *skb;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun pr_debug("sframe_type=%d nr=%d\n", sframe_type, nr);
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun skb = llc_shdlc_alloc_skb(shdlc, 0);
149*4882a593Smuzhiyun if (skb == NULL)
150*4882a593Smuzhiyun return -ENOMEM;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun *(u8 *)skb_push(skb, 1) = SHDLC_CONTROL_HEAD_S | (sframe_type << 3) | nr;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun r = shdlc->xmit_to_drv(shdlc->hdev, skb);
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun kfree_skb(skb);
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun return r;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun /* immediately sends an U frame. skb may contain optional payload */
llc_shdlc_send_u_frame(struct llc_shdlc * shdlc,struct sk_buff * skb,enum uframe_modifier uframe_modifier)162*4882a593Smuzhiyun static int llc_shdlc_send_u_frame(struct llc_shdlc *shdlc,
163*4882a593Smuzhiyun struct sk_buff *skb,
164*4882a593Smuzhiyun enum uframe_modifier uframe_modifier)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun int r;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun pr_debug("uframe_modifier=%d\n", uframe_modifier);
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun *(u8 *)skb_push(skb, 1) = SHDLC_CONTROL_HEAD_U | uframe_modifier;
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun r = shdlc->xmit_to_drv(shdlc->hdev, skb);
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun kfree_skb(skb);
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun return r;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun /*
180*4882a593Smuzhiyun * Free ack_pending frames until y_nr - 1, and reset t2 according to
181*4882a593Smuzhiyun * the remaining oldest ack_pending frame sent time
182*4882a593Smuzhiyun */
llc_shdlc_reset_t2(struct llc_shdlc * shdlc,int y_nr)183*4882a593Smuzhiyun static void llc_shdlc_reset_t2(struct llc_shdlc *shdlc, int y_nr)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun struct sk_buff *skb;
186*4882a593Smuzhiyun int dnr = shdlc->dnr; /* MUST initially be < y_nr */
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun pr_debug("release ack pending up to frame %d excluded\n", y_nr);
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun while (dnr != y_nr) {
191*4882a593Smuzhiyun pr_debug("release ack pending frame %d\n", dnr);
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun skb = skb_dequeue(&shdlc->ack_pending_q);
194*4882a593Smuzhiyun kfree_skb(skb);
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun dnr = (dnr + 1) % 8;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun if (skb_queue_empty(&shdlc->ack_pending_q)) {
200*4882a593Smuzhiyun if (shdlc->t2_active) {
201*4882a593Smuzhiyun del_timer_sync(&shdlc->t2_timer);
202*4882a593Smuzhiyun shdlc->t2_active = false;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun pr_debug
205*4882a593Smuzhiyun ("All sent frames acked. Stopped T2(retransmit)\n");
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun } else {
208*4882a593Smuzhiyun skb = skb_peek(&shdlc->ack_pending_q);
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun mod_timer(&shdlc->t2_timer, *(unsigned long *)skb->cb +
211*4882a593Smuzhiyun msecs_to_jiffies(SHDLC_T2_VALUE_MS));
212*4882a593Smuzhiyun shdlc->t2_active = true;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun pr_debug
215*4882a593Smuzhiyun ("Start T2(retransmit) for remaining unacked sent frames\n");
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun /*
220*4882a593Smuzhiyun * Receive validated frames from lower layer. skb contains HCI payload only.
221*4882a593Smuzhiyun * Handle according to algorithm at spec:10.8.2
222*4882a593Smuzhiyun */
llc_shdlc_rcv_i_frame(struct llc_shdlc * shdlc,struct sk_buff * skb,int ns,int nr)223*4882a593Smuzhiyun static void llc_shdlc_rcv_i_frame(struct llc_shdlc *shdlc,
224*4882a593Smuzhiyun struct sk_buff *skb, int ns, int nr)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun int x_ns = ns;
227*4882a593Smuzhiyun int y_nr = nr;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun pr_debug("recvd I-frame %d, remote waiting frame %d\n", ns, nr);
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun if (shdlc->state != SHDLC_CONNECTED)
232*4882a593Smuzhiyun goto exit;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun if (x_ns != shdlc->nr) {
235*4882a593Smuzhiyun llc_shdlc_send_s_frame(shdlc, S_FRAME_REJ, shdlc->nr);
236*4882a593Smuzhiyun goto exit;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun if (shdlc->t1_active == false) {
240*4882a593Smuzhiyun shdlc->t1_active = true;
241*4882a593Smuzhiyun mod_timer(&shdlc->t1_timer, jiffies +
242*4882a593Smuzhiyun msecs_to_jiffies(SHDLC_T1_VALUE_MS(shdlc->w)));
243*4882a593Smuzhiyun pr_debug("(re)Start T1(send ack)\n");
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun if (skb->len) {
247*4882a593Smuzhiyun shdlc->rcv_to_hci(shdlc->hdev, skb);
248*4882a593Smuzhiyun skb = NULL;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun shdlc->nr = (shdlc->nr + 1) % 8;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun if (llc_shdlc_x_lt_y_lteq_z(shdlc->dnr, y_nr, shdlc->ns)) {
254*4882a593Smuzhiyun llc_shdlc_reset_t2(shdlc, y_nr);
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun shdlc->dnr = y_nr;
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun exit:
260*4882a593Smuzhiyun kfree_skb(skb);
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
llc_shdlc_rcv_ack(struct llc_shdlc * shdlc,int y_nr)263*4882a593Smuzhiyun static void llc_shdlc_rcv_ack(struct llc_shdlc *shdlc, int y_nr)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun pr_debug("remote acked up to frame %d excluded\n", y_nr);
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun if (llc_shdlc_x_lt_y_lteq_z(shdlc->dnr, y_nr, shdlc->ns)) {
268*4882a593Smuzhiyun llc_shdlc_reset_t2(shdlc, y_nr);
269*4882a593Smuzhiyun shdlc->dnr = y_nr;
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
llc_shdlc_requeue_ack_pending(struct llc_shdlc * shdlc)273*4882a593Smuzhiyun static void llc_shdlc_requeue_ack_pending(struct llc_shdlc *shdlc)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun struct sk_buff *skb;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun pr_debug("ns reset to %d\n", shdlc->dnr);
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun while ((skb = skb_dequeue_tail(&shdlc->ack_pending_q))) {
280*4882a593Smuzhiyun skb_pull(skb, 1); /* remove control field */
281*4882a593Smuzhiyun skb_queue_head(&shdlc->send_q, skb);
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun shdlc->ns = shdlc->dnr;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
llc_shdlc_rcv_rej(struct llc_shdlc * shdlc,int y_nr)286*4882a593Smuzhiyun static void llc_shdlc_rcv_rej(struct llc_shdlc *shdlc, int y_nr)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun struct sk_buff *skb;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun pr_debug("remote asks retransmission from frame %d\n", y_nr);
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun if (llc_shdlc_x_lteq_y_lt_z(shdlc->dnr, y_nr, shdlc->ns)) {
293*4882a593Smuzhiyun if (shdlc->t2_active) {
294*4882a593Smuzhiyun del_timer_sync(&shdlc->t2_timer);
295*4882a593Smuzhiyun shdlc->t2_active = false;
296*4882a593Smuzhiyun pr_debug("Stopped T2(retransmit)\n");
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun if (shdlc->dnr != y_nr) {
300*4882a593Smuzhiyun while ((shdlc->dnr = ((shdlc->dnr + 1) % 8)) != y_nr) {
301*4882a593Smuzhiyun skb = skb_dequeue(&shdlc->ack_pending_q);
302*4882a593Smuzhiyun kfree_skb(skb);
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun llc_shdlc_requeue_ack_pending(shdlc);
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun /* See spec RR:10.8.3 REJ:10.8.4 */
llc_shdlc_rcv_s_frame(struct llc_shdlc * shdlc,enum sframe_type s_frame_type,int nr)311*4882a593Smuzhiyun static void llc_shdlc_rcv_s_frame(struct llc_shdlc *shdlc,
312*4882a593Smuzhiyun enum sframe_type s_frame_type, int nr)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun struct sk_buff *skb;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun if (shdlc->state != SHDLC_CONNECTED)
317*4882a593Smuzhiyun return;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun switch (s_frame_type) {
320*4882a593Smuzhiyun case S_FRAME_RR:
321*4882a593Smuzhiyun llc_shdlc_rcv_ack(shdlc, nr);
322*4882a593Smuzhiyun if (shdlc->rnr == true) { /* see SHDLC 10.7.7 */
323*4882a593Smuzhiyun shdlc->rnr = false;
324*4882a593Smuzhiyun if (shdlc->send_q.qlen == 0) {
325*4882a593Smuzhiyun skb = llc_shdlc_alloc_skb(shdlc, 0);
326*4882a593Smuzhiyun if (skb)
327*4882a593Smuzhiyun skb_queue_tail(&shdlc->send_q, skb);
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun break;
331*4882a593Smuzhiyun case S_FRAME_REJ:
332*4882a593Smuzhiyun llc_shdlc_rcv_rej(shdlc, nr);
333*4882a593Smuzhiyun break;
334*4882a593Smuzhiyun case S_FRAME_RNR:
335*4882a593Smuzhiyun llc_shdlc_rcv_ack(shdlc, nr);
336*4882a593Smuzhiyun shdlc->rnr = true;
337*4882a593Smuzhiyun break;
338*4882a593Smuzhiyun default:
339*4882a593Smuzhiyun break;
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun
llc_shdlc_connect_complete(struct llc_shdlc * shdlc,int r)343*4882a593Smuzhiyun static void llc_shdlc_connect_complete(struct llc_shdlc *shdlc, int r)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun pr_debug("result=%d\n", r);
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun del_timer_sync(&shdlc->connect_timer);
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun if (r == 0) {
350*4882a593Smuzhiyun shdlc->ns = 0;
351*4882a593Smuzhiyun shdlc->nr = 0;
352*4882a593Smuzhiyun shdlc->dnr = 0;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun shdlc->state = SHDLC_HALF_CONNECTED;
355*4882a593Smuzhiyun } else {
356*4882a593Smuzhiyun shdlc->state = SHDLC_DISCONNECTED;
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun shdlc->connect_result = r;
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun wake_up(shdlc->connect_wq);
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun
llc_shdlc_connect_initiate(struct llc_shdlc * shdlc)364*4882a593Smuzhiyun static int llc_shdlc_connect_initiate(struct llc_shdlc *shdlc)
365*4882a593Smuzhiyun {
366*4882a593Smuzhiyun struct sk_buff *skb;
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun pr_debug("\n");
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun skb = llc_shdlc_alloc_skb(shdlc, 2);
371*4882a593Smuzhiyun if (skb == NULL)
372*4882a593Smuzhiyun return -ENOMEM;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun skb_put_u8(skb, SHDLC_MAX_WINDOW);
375*4882a593Smuzhiyun skb_put_u8(skb, SHDLC_SREJ_SUPPORT ? 1 : 0);
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun return llc_shdlc_send_u_frame(shdlc, skb, U_FRAME_RSET);
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
llc_shdlc_connect_send_ua(struct llc_shdlc * shdlc)380*4882a593Smuzhiyun static int llc_shdlc_connect_send_ua(struct llc_shdlc *shdlc)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun struct sk_buff *skb;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun pr_debug("\n");
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun skb = llc_shdlc_alloc_skb(shdlc, 0);
387*4882a593Smuzhiyun if (skb == NULL)
388*4882a593Smuzhiyun return -ENOMEM;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun return llc_shdlc_send_u_frame(shdlc, skb, U_FRAME_UA);
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun
llc_shdlc_rcv_u_frame(struct llc_shdlc * shdlc,struct sk_buff * skb,enum uframe_modifier u_frame_modifier)393*4882a593Smuzhiyun static void llc_shdlc_rcv_u_frame(struct llc_shdlc *shdlc,
394*4882a593Smuzhiyun struct sk_buff *skb,
395*4882a593Smuzhiyun enum uframe_modifier u_frame_modifier)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun u8 w = SHDLC_MAX_WINDOW;
398*4882a593Smuzhiyun bool srej_support = SHDLC_SREJ_SUPPORT;
399*4882a593Smuzhiyun int r;
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun pr_debug("u_frame_modifier=%d\n", u_frame_modifier);
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun switch (u_frame_modifier) {
404*4882a593Smuzhiyun case U_FRAME_RSET:
405*4882a593Smuzhiyun switch (shdlc->state) {
406*4882a593Smuzhiyun case SHDLC_NEGOTIATING:
407*4882a593Smuzhiyun case SHDLC_CONNECTING:
408*4882a593Smuzhiyun /*
409*4882a593Smuzhiyun * We sent RSET, but chip wants to negociate or we
410*4882a593Smuzhiyun * got RSET before we managed to send out our.
411*4882a593Smuzhiyun */
412*4882a593Smuzhiyun if (skb->len > 0)
413*4882a593Smuzhiyun w = skb->data[0];
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun if (skb->len > 1)
416*4882a593Smuzhiyun srej_support = skb->data[1] & 0x01 ? true :
417*4882a593Smuzhiyun false;
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun if ((w <= SHDLC_MAX_WINDOW) &&
420*4882a593Smuzhiyun (SHDLC_SREJ_SUPPORT || (srej_support == false))) {
421*4882a593Smuzhiyun shdlc->w = w;
422*4882a593Smuzhiyun shdlc->srej_support = srej_support;
423*4882a593Smuzhiyun r = llc_shdlc_connect_send_ua(shdlc);
424*4882a593Smuzhiyun llc_shdlc_connect_complete(shdlc, r);
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun break;
427*4882a593Smuzhiyun case SHDLC_HALF_CONNECTED:
428*4882a593Smuzhiyun /*
429*4882a593Smuzhiyun * Chip resent RSET due to its timeout - Ignote it
430*4882a593Smuzhiyun * as we already sent UA.
431*4882a593Smuzhiyun */
432*4882a593Smuzhiyun break;
433*4882a593Smuzhiyun case SHDLC_CONNECTED:
434*4882a593Smuzhiyun /*
435*4882a593Smuzhiyun * Chip wants to reset link. This is unexpected and
436*4882a593Smuzhiyun * unsupported.
437*4882a593Smuzhiyun */
438*4882a593Smuzhiyun shdlc->hard_fault = -ECONNRESET;
439*4882a593Smuzhiyun break;
440*4882a593Smuzhiyun default:
441*4882a593Smuzhiyun break;
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun break;
444*4882a593Smuzhiyun case U_FRAME_UA:
445*4882a593Smuzhiyun if ((shdlc->state == SHDLC_CONNECTING &&
446*4882a593Smuzhiyun shdlc->connect_tries > 0) ||
447*4882a593Smuzhiyun (shdlc->state == SHDLC_NEGOTIATING)) {
448*4882a593Smuzhiyun llc_shdlc_connect_complete(shdlc, 0);
449*4882a593Smuzhiyun shdlc->state = SHDLC_CONNECTED;
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun break;
452*4882a593Smuzhiyun default:
453*4882a593Smuzhiyun break;
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun kfree_skb(skb);
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun
llc_shdlc_handle_rcv_queue(struct llc_shdlc * shdlc)459*4882a593Smuzhiyun static void llc_shdlc_handle_rcv_queue(struct llc_shdlc *shdlc)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun struct sk_buff *skb;
462*4882a593Smuzhiyun u8 control;
463*4882a593Smuzhiyun int nr;
464*4882a593Smuzhiyun int ns;
465*4882a593Smuzhiyun enum sframe_type s_frame_type;
466*4882a593Smuzhiyun enum uframe_modifier u_frame_modifier;
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun if (shdlc->rcv_q.qlen)
469*4882a593Smuzhiyun pr_debug("rcvQlen=%d\n", shdlc->rcv_q.qlen);
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun while ((skb = skb_dequeue(&shdlc->rcv_q)) != NULL) {
472*4882a593Smuzhiyun control = skb->data[0];
473*4882a593Smuzhiyun skb_pull(skb, 1);
474*4882a593Smuzhiyun switch (control & SHDLC_CONTROL_HEAD_MASK) {
475*4882a593Smuzhiyun case SHDLC_CONTROL_HEAD_I:
476*4882a593Smuzhiyun case SHDLC_CONTROL_HEAD_I2:
477*4882a593Smuzhiyun if (shdlc->state == SHDLC_HALF_CONNECTED)
478*4882a593Smuzhiyun shdlc->state = SHDLC_CONNECTED;
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun ns = (control & SHDLC_CONTROL_NS_MASK) >> 3;
481*4882a593Smuzhiyun nr = control & SHDLC_CONTROL_NR_MASK;
482*4882a593Smuzhiyun llc_shdlc_rcv_i_frame(shdlc, skb, ns, nr);
483*4882a593Smuzhiyun break;
484*4882a593Smuzhiyun case SHDLC_CONTROL_HEAD_S:
485*4882a593Smuzhiyun if (shdlc->state == SHDLC_HALF_CONNECTED)
486*4882a593Smuzhiyun shdlc->state = SHDLC_CONNECTED;
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun s_frame_type = (control & SHDLC_CONTROL_TYPE_MASK) >> 3;
489*4882a593Smuzhiyun nr = control & SHDLC_CONTROL_NR_MASK;
490*4882a593Smuzhiyun llc_shdlc_rcv_s_frame(shdlc, s_frame_type, nr);
491*4882a593Smuzhiyun kfree_skb(skb);
492*4882a593Smuzhiyun break;
493*4882a593Smuzhiyun case SHDLC_CONTROL_HEAD_U:
494*4882a593Smuzhiyun u_frame_modifier = control & SHDLC_CONTROL_M_MASK;
495*4882a593Smuzhiyun llc_shdlc_rcv_u_frame(shdlc, skb, u_frame_modifier);
496*4882a593Smuzhiyun break;
497*4882a593Smuzhiyun default:
498*4882a593Smuzhiyun pr_err("UNKNOWN Control=%d\n", control);
499*4882a593Smuzhiyun kfree_skb(skb);
500*4882a593Smuzhiyun break;
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun
llc_shdlc_w_used(int ns,int dnr)505*4882a593Smuzhiyun static int llc_shdlc_w_used(int ns, int dnr)
506*4882a593Smuzhiyun {
507*4882a593Smuzhiyun int unack_count;
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun if (dnr <= ns)
510*4882a593Smuzhiyun unack_count = ns - dnr;
511*4882a593Smuzhiyun else
512*4882a593Smuzhiyun unack_count = 8 - dnr + ns;
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun return unack_count;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun /* Send frames according to algorithm at spec:10.8.1 */
llc_shdlc_handle_send_queue(struct llc_shdlc * shdlc)518*4882a593Smuzhiyun static void llc_shdlc_handle_send_queue(struct llc_shdlc *shdlc)
519*4882a593Smuzhiyun {
520*4882a593Smuzhiyun struct sk_buff *skb;
521*4882a593Smuzhiyun int r;
522*4882a593Smuzhiyun unsigned long time_sent;
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun if (shdlc->send_q.qlen)
525*4882a593Smuzhiyun pr_debug
526*4882a593Smuzhiyun ("sendQlen=%d ns=%d dnr=%d rnr=%s w_room=%d unackQlen=%d\n",
527*4882a593Smuzhiyun shdlc->send_q.qlen, shdlc->ns, shdlc->dnr,
528*4882a593Smuzhiyun shdlc->rnr == false ? "false" : "true",
529*4882a593Smuzhiyun shdlc->w - llc_shdlc_w_used(shdlc->ns, shdlc->dnr),
530*4882a593Smuzhiyun shdlc->ack_pending_q.qlen);
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun while (shdlc->send_q.qlen && shdlc->ack_pending_q.qlen < shdlc->w &&
533*4882a593Smuzhiyun (shdlc->rnr == false)) {
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun if (shdlc->t1_active) {
536*4882a593Smuzhiyun del_timer_sync(&shdlc->t1_timer);
537*4882a593Smuzhiyun shdlc->t1_active = false;
538*4882a593Smuzhiyun pr_debug("Stopped T1(send ack)\n");
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun skb = skb_dequeue(&shdlc->send_q);
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun *(u8 *)skb_push(skb, 1) = SHDLC_CONTROL_HEAD_I | (shdlc->ns << 3) |
544*4882a593Smuzhiyun shdlc->nr;
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun pr_debug("Sending I-Frame %d, waiting to rcv %d\n", shdlc->ns,
547*4882a593Smuzhiyun shdlc->nr);
548*4882a593Smuzhiyun SHDLC_DUMP_SKB("shdlc frame written", skb);
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun r = shdlc->xmit_to_drv(shdlc->hdev, skb);
551*4882a593Smuzhiyun if (r < 0) {
552*4882a593Smuzhiyun shdlc->hard_fault = r;
553*4882a593Smuzhiyun break;
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun shdlc->ns = (shdlc->ns + 1) % 8;
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun time_sent = jiffies;
559*4882a593Smuzhiyun *(unsigned long *)skb->cb = time_sent;
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun skb_queue_tail(&shdlc->ack_pending_q, skb);
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun if (shdlc->t2_active == false) {
564*4882a593Smuzhiyun shdlc->t2_active = true;
565*4882a593Smuzhiyun mod_timer(&shdlc->t2_timer, time_sent +
566*4882a593Smuzhiyun msecs_to_jiffies(SHDLC_T2_VALUE_MS));
567*4882a593Smuzhiyun pr_debug("Started T2 (retransmit)\n");
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun
llc_shdlc_connect_timeout(struct timer_list * t)572*4882a593Smuzhiyun static void llc_shdlc_connect_timeout(struct timer_list *t)
573*4882a593Smuzhiyun {
574*4882a593Smuzhiyun struct llc_shdlc *shdlc = from_timer(shdlc, t, connect_timer);
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun pr_debug("\n");
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun schedule_work(&shdlc->sm_work);
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun
llc_shdlc_t1_timeout(struct timer_list * t)581*4882a593Smuzhiyun static void llc_shdlc_t1_timeout(struct timer_list *t)
582*4882a593Smuzhiyun {
583*4882a593Smuzhiyun struct llc_shdlc *shdlc = from_timer(shdlc, t, t1_timer);
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun pr_debug("SoftIRQ: need to send ack\n");
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun schedule_work(&shdlc->sm_work);
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun
llc_shdlc_t2_timeout(struct timer_list * t)590*4882a593Smuzhiyun static void llc_shdlc_t2_timeout(struct timer_list *t)
591*4882a593Smuzhiyun {
592*4882a593Smuzhiyun struct llc_shdlc *shdlc = from_timer(shdlc, t, t2_timer);
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun pr_debug("SoftIRQ: need to retransmit\n");
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun schedule_work(&shdlc->sm_work);
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun
llc_shdlc_sm_work(struct work_struct * work)599*4882a593Smuzhiyun static void llc_shdlc_sm_work(struct work_struct *work)
600*4882a593Smuzhiyun {
601*4882a593Smuzhiyun struct llc_shdlc *shdlc = container_of(work, struct llc_shdlc, sm_work);
602*4882a593Smuzhiyun int r;
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun pr_debug("\n");
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun mutex_lock(&shdlc->state_mutex);
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun switch (shdlc->state) {
609*4882a593Smuzhiyun case SHDLC_DISCONNECTED:
610*4882a593Smuzhiyun skb_queue_purge(&shdlc->rcv_q);
611*4882a593Smuzhiyun skb_queue_purge(&shdlc->send_q);
612*4882a593Smuzhiyun skb_queue_purge(&shdlc->ack_pending_q);
613*4882a593Smuzhiyun break;
614*4882a593Smuzhiyun case SHDLC_CONNECTING:
615*4882a593Smuzhiyun if (shdlc->hard_fault) {
616*4882a593Smuzhiyun llc_shdlc_connect_complete(shdlc, shdlc->hard_fault);
617*4882a593Smuzhiyun break;
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun if (shdlc->connect_tries++ < 5)
621*4882a593Smuzhiyun r = llc_shdlc_connect_initiate(shdlc);
622*4882a593Smuzhiyun else
623*4882a593Smuzhiyun r = -ETIME;
624*4882a593Smuzhiyun if (r < 0) {
625*4882a593Smuzhiyun llc_shdlc_connect_complete(shdlc, r);
626*4882a593Smuzhiyun } else {
627*4882a593Smuzhiyun mod_timer(&shdlc->connect_timer, jiffies +
628*4882a593Smuzhiyun msecs_to_jiffies(SHDLC_CONNECT_VALUE_MS));
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun shdlc->state = SHDLC_NEGOTIATING;
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun break;
633*4882a593Smuzhiyun case SHDLC_NEGOTIATING:
634*4882a593Smuzhiyun if (timer_pending(&shdlc->connect_timer) == 0) {
635*4882a593Smuzhiyun shdlc->state = SHDLC_CONNECTING;
636*4882a593Smuzhiyun schedule_work(&shdlc->sm_work);
637*4882a593Smuzhiyun }
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun llc_shdlc_handle_rcv_queue(shdlc);
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun if (shdlc->hard_fault) {
642*4882a593Smuzhiyun llc_shdlc_connect_complete(shdlc, shdlc->hard_fault);
643*4882a593Smuzhiyun break;
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun break;
646*4882a593Smuzhiyun case SHDLC_HALF_CONNECTED:
647*4882a593Smuzhiyun case SHDLC_CONNECTED:
648*4882a593Smuzhiyun llc_shdlc_handle_rcv_queue(shdlc);
649*4882a593Smuzhiyun llc_shdlc_handle_send_queue(shdlc);
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun if (shdlc->t1_active && timer_pending(&shdlc->t1_timer) == 0) {
652*4882a593Smuzhiyun pr_debug
653*4882a593Smuzhiyun ("Handle T1(send ack) elapsed (T1 now inactive)\n");
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun shdlc->t1_active = false;
656*4882a593Smuzhiyun r = llc_shdlc_send_s_frame(shdlc, S_FRAME_RR,
657*4882a593Smuzhiyun shdlc->nr);
658*4882a593Smuzhiyun if (r < 0)
659*4882a593Smuzhiyun shdlc->hard_fault = r;
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun if (shdlc->t2_active && timer_pending(&shdlc->t2_timer) == 0) {
663*4882a593Smuzhiyun pr_debug
664*4882a593Smuzhiyun ("Handle T2(retransmit) elapsed (T2 inactive)\n");
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun shdlc->t2_active = false;
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun llc_shdlc_requeue_ack_pending(shdlc);
669*4882a593Smuzhiyun llc_shdlc_handle_send_queue(shdlc);
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun if (shdlc->hard_fault)
673*4882a593Smuzhiyun shdlc->llc_failure(shdlc->hdev, shdlc->hard_fault);
674*4882a593Smuzhiyun break;
675*4882a593Smuzhiyun default:
676*4882a593Smuzhiyun break;
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun mutex_unlock(&shdlc->state_mutex);
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun /*
682*4882a593Smuzhiyun * Called from syscall context to establish shdlc link. Sleeps until
683*4882a593Smuzhiyun * link is ready or failure.
684*4882a593Smuzhiyun */
llc_shdlc_connect(struct llc_shdlc * shdlc)685*4882a593Smuzhiyun static int llc_shdlc_connect(struct llc_shdlc *shdlc)
686*4882a593Smuzhiyun {
687*4882a593Smuzhiyun DECLARE_WAIT_QUEUE_HEAD_ONSTACK(connect_wq);
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun pr_debug("\n");
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun mutex_lock(&shdlc->state_mutex);
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun shdlc->state = SHDLC_CONNECTING;
694*4882a593Smuzhiyun shdlc->connect_wq = &connect_wq;
695*4882a593Smuzhiyun shdlc->connect_tries = 0;
696*4882a593Smuzhiyun shdlc->connect_result = 1;
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun mutex_unlock(&shdlc->state_mutex);
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun schedule_work(&shdlc->sm_work);
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun wait_event(connect_wq, shdlc->connect_result != 1);
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun return shdlc->connect_result;
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun
llc_shdlc_disconnect(struct llc_shdlc * shdlc)707*4882a593Smuzhiyun static void llc_shdlc_disconnect(struct llc_shdlc *shdlc)
708*4882a593Smuzhiyun {
709*4882a593Smuzhiyun pr_debug("\n");
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun mutex_lock(&shdlc->state_mutex);
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun shdlc->state = SHDLC_DISCONNECTED;
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun mutex_unlock(&shdlc->state_mutex);
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun schedule_work(&shdlc->sm_work);
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun /*
721*4882a593Smuzhiyun * Receive an incoming shdlc frame. Frame has already been crc-validated.
722*4882a593Smuzhiyun * skb contains only LLC header and payload.
723*4882a593Smuzhiyun * If skb == NULL, it is a notification that the link below is dead.
724*4882a593Smuzhiyun */
llc_shdlc_recv_frame(struct llc_shdlc * shdlc,struct sk_buff * skb)725*4882a593Smuzhiyun static void llc_shdlc_recv_frame(struct llc_shdlc *shdlc, struct sk_buff *skb)
726*4882a593Smuzhiyun {
727*4882a593Smuzhiyun if (skb == NULL) {
728*4882a593Smuzhiyun pr_err("NULL Frame -> link is dead\n");
729*4882a593Smuzhiyun shdlc->hard_fault = -EREMOTEIO;
730*4882a593Smuzhiyun } else {
731*4882a593Smuzhiyun SHDLC_DUMP_SKB("incoming frame", skb);
732*4882a593Smuzhiyun skb_queue_tail(&shdlc->rcv_q, skb);
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun schedule_work(&shdlc->sm_work);
736*4882a593Smuzhiyun }
737*4882a593Smuzhiyun
llc_shdlc_init(struct nfc_hci_dev * hdev,xmit_to_drv_t xmit_to_drv,rcv_to_hci_t rcv_to_hci,int tx_headroom,int tx_tailroom,int * rx_headroom,int * rx_tailroom,llc_failure_t llc_failure)738*4882a593Smuzhiyun static void *llc_shdlc_init(struct nfc_hci_dev *hdev, xmit_to_drv_t xmit_to_drv,
739*4882a593Smuzhiyun rcv_to_hci_t rcv_to_hci, int tx_headroom,
740*4882a593Smuzhiyun int tx_tailroom, int *rx_headroom, int *rx_tailroom,
741*4882a593Smuzhiyun llc_failure_t llc_failure)
742*4882a593Smuzhiyun {
743*4882a593Smuzhiyun struct llc_shdlc *shdlc;
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun *rx_headroom = SHDLC_LLC_HEAD_ROOM;
746*4882a593Smuzhiyun *rx_tailroom = 0;
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun shdlc = kzalloc(sizeof(struct llc_shdlc), GFP_KERNEL);
749*4882a593Smuzhiyun if (shdlc == NULL)
750*4882a593Smuzhiyun return NULL;
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun mutex_init(&shdlc->state_mutex);
753*4882a593Smuzhiyun shdlc->state = SHDLC_DISCONNECTED;
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun timer_setup(&shdlc->connect_timer, llc_shdlc_connect_timeout, 0);
756*4882a593Smuzhiyun timer_setup(&shdlc->t1_timer, llc_shdlc_t1_timeout, 0);
757*4882a593Smuzhiyun timer_setup(&shdlc->t2_timer, llc_shdlc_t2_timeout, 0);
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun shdlc->w = SHDLC_MAX_WINDOW;
760*4882a593Smuzhiyun shdlc->srej_support = SHDLC_SREJ_SUPPORT;
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun skb_queue_head_init(&shdlc->rcv_q);
763*4882a593Smuzhiyun skb_queue_head_init(&shdlc->send_q);
764*4882a593Smuzhiyun skb_queue_head_init(&shdlc->ack_pending_q);
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun INIT_WORK(&shdlc->sm_work, llc_shdlc_sm_work);
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun shdlc->hdev = hdev;
769*4882a593Smuzhiyun shdlc->xmit_to_drv = xmit_to_drv;
770*4882a593Smuzhiyun shdlc->rcv_to_hci = rcv_to_hci;
771*4882a593Smuzhiyun shdlc->tx_headroom = tx_headroom;
772*4882a593Smuzhiyun shdlc->tx_tailroom = tx_tailroom;
773*4882a593Smuzhiyun shdlc->llc_failure = llc_failure;
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun return shdlc;
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun
llc_shdlc_deinit(struct nfc_llc * llc)778*4882a593Smuzhiyun static void llc_shdlc_deinit(struct nfc_llc *llc)
779*4882a593Smuzhiyun {
780*4882a593Smuzhiyun struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun skb_queue_purge(&shdlc->rcv_q);
783*4882a593Smuzhiyun skb_queue_purge(&shdlc->send_q);
784*4882a593Smuzhiyun skb_queue_purge(&shdlc->ack_pending_q);
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun kfree(shdlc);
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun
llc_shdlc_start(struct nfc_llc * llc)789*4882a593Smuzhiyun static int llc_shdlc_start(struct nfc_llc *llc)
790*4882a593Smuzhiyun {
791*4882a593Smuzhiyun struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun return llc_shdlc_connect(shdlc);
794*4882a593Smuzhiyun }
795*4882a593Smuzhiyun
llc_shdlc_stop(struct nfc_llc * llc)796*4882a593Smuzhiyun static int llc_shdlc_stop(struct nfc_llc *llc)
797*4882a593Smuzhiyun {
798*4882a593Smuzhiyun struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun llc_shdlc_disconnect(shdlc);
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun return 0;
803*4882a593Smuzhiyun }
804*4882a593Smuzhiyun
llc_shdlc_rcv_from_drv(struct nfc_llc * llc,struct sk_buff * skb)805*4882a593Smuzhiyun static void llc_shdlc_rcv_from_drv(struct nfc_llc *llc, struct sk_buff *skb)
806*4882a593Smuzhiyun {
807*4882a593Smuzhiyun struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun llc_shdlc_recv_frame(shdlc, skb);
810*4882a593Smuzhiyun }
811*4882a593Smuzhiyun
llc_shdlc_xmit_from_hci(struct nfc_llc * llc,struct sk_buff * skb)812*4882a593Smuzhiyun static int llc_shdlc_xmit_from_hci(struct nfc_llc *llc, struct sk_buff *skb)
813*4882a593Smuzhiyun {
814*4882a593Smuzhiyun struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun skb_queue_tail(&shdlc->send_q, skb);
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun schedule_work(&shdlc->sm_work);
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun return 0;
821*4882a593Smuzhiyun }
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun static struct nfc_llc_ops llc_shdlc_ops = {
824*4882a593Smuzhiyun .init = llc_shdlc_init,
825*4882a593Smuzhiyun .deinit = llc_shdlc_deinit,
826*4882a593Smuzhiyun .start = llc_shdlc_start,
827*4882a593Smuzhiyun .stop = llc_shdlc_stop,
828*4882a593Smuzhiyun .rcv_from_drv = llc_shdlc_rcv_from_drv,
829*4882a593Smuzhiyun .xmit_from_hci = llc_shdlc_xmit_from_hci,
830*4882a593Smuzhiyun };
831*4882a593Smuzhiyun
nfc_llc_shdlc_register(void)832*4882a593Smuzhiyun int nfc_llc_shdlc_register(void)
833*4882a593Smuzhiyun {
834*4882a593Smuzhiyun return nfc_llc_register(LLC_SHDLC_NAME, &llc_shdlc_ops);
835*4882a593Smuzhiyun }
836