xref: /OK3568_Linux_fs/external/rkwifibt/drivers/bluetooth_uart_driver/hci_rtk_h5.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  *
3  *  Bluetooth HCI UART driver
4  *
5  *  Copyright (C) 2011-2014  wifi_fae<wifi_fae@realtek.com.tw>
6  *
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 2 of the License, or
11  *  (at your option) any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, write to the Free Software
20  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23 
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/init.h>
27 #include <linux/types.h>
28 #include <linux/fcntl.h>
29 #include <linux/interrupt.h>
30 #include <linux/ptrace.h>
31 #include <linux/poll.h>
32 #include <linux/slab.h>
33 #include <linux/tty.h>
34 #include <linux/errno.h>
35 #include <linux/string.h>
36 #include <linux/signal.h>
37 #include <linux/ioctl.h>
38 #include <linux/skbuff.h>
39 #include <linux/bitrev.h>
40 #include <asm/unaligned.h>
41 #include <net/bluetooth/bluetooth.h>
42 #include <net/bluetooth/hci_core.h>
43 #include <linux/version.h>
44 
45 #include "hci_uart.h"
46 
47 #ifdef BTCOEX
48 #include "rtk_coex.h"
49 #endif
50 
51 //#define VERSION "1.0"
52 
53 static int txcrc = 1;
54 //static int hciextn = 1;
55 
56 #define H5_TXWINSIZE	4
57 #define H5_ACK_PKT	0x00
58 #define H5_LE_PKT	    0x0F
59 #define H5_VDRSPEC_PKT	0x0E
60 
61 struct h5_struct {
62 	struct sk_buff_head unack;	/* Unack'ed packets queue */
63 	struct sk_buff_head rel;	/* Reliable packets queue */
64 	struct sk_buff_head unrel;	/* Unreliable packets queue */
65 
66 	unsigned long rx_count;
67 	struct sk_buff *rx_skb;
68 	struct delayed_work	retrans_work;
69 	struct hci_uart		*hu;		/* Parent HCI UART */
70 
71 	enum {
72 		H5_W4_PKT_DELIMITER,
73 		H5_W4_PKT_START,
74 		H5_W4_HDR,
75 		H5_W4_DATA,
76 		H5_W4_CRC
77 	} rx_state;
78 
79 	enum {
80 		H5_ESCSTATE_NOESC,
81 		H5_ESCSTATE_ESC
82 	} rx_esc_state;
83 
84 	u16 message_crc;
85 	u8 use_crc;
86 	u8 rxack;		/* Last packet sent by us that the peer ack'ed */
87 
88 	u8 rxseq_txack;		/* rxseq == txack. */
89 	u8 txack_req;		/* Do we need to send ack's to the peer? */
90 	/* Reliable packet sequence number - used to assign seq to each rel pkt. */
91 	u8 msgq_txseq;
92 
93 	/* The spin lock protects seq, ack and ack req */
94 	spinlock_t lock;
95 };
96 
97 /* ---- H5 CRC calculation ---- */
98 
99 /* Table for calculating CRC for polynomial 0x1021, LSB processed first,
100 initial value 0xffff, bits shifted in reverse order. */
101 
102 static const u16 crc_table[] = {
103 	0x0000, 0x1081, 0x2102, 0x3183,
104 	0x4204, 0x5285, 0x6306, 0x7387,
105 	0x8408, 0x9489, 0xa50a, 0xb58b,
106 	0xc60c, 0xd68d, 0xe70e, 0xf78f
107 };
108 
109 /* Initialise the crc calculator */
110 #define H5_CRC_INIT(x) x = 0xffff
111 
112 /*
113    Update crc with next data byte
114 
115    Implementation note
116         The data byte is treated as two nibbles.  The crc is generated
117         in reverse, i.e., bits are fed into the register from the top.
118 */
h5_crc_update(u16 * crc,u8 d)119 static void h5_crc_update(u16 * crc, u8 d)
120 {
121 	u16 reg = *crc;
122 
123 	reg = (reg >> 4) ^ crc_table[(reg ^ d) & 0x000f];
124 	reg = (reg >> 4) ^ crc_table[(reg ^ (d >> 4)) & 0x000f];
125 
126 	*crc = reg;
127 }
128 
129 /* ---- H5 core ---- */
130 
h5_slip_msgdelim(struct sk_buff * skb)131 static void h5_slip_msgdelim(struct sk_buff *skb)
132 {
133 	const char pkt_delim = 0xc0;
134 
135 	memcpy(skb_put(skb, 1), &pkt_delim, 1);
136 }
137 
h5_slip_one_byte(struct sk_buff * skb,u8 c)138 static void h5_slip_one_byte(struct sk_buff *skb, u8 c)
139 {
140 	const char esc_c0[2] = { 0xdb, 0xdc };
141 	const char esc_db[2] = { 0xdb, 0xdd };
142 	const char esc_11[2] = { 0xdb, 0xde };
143 	const char esc_13[2] = { 0xdb, 0xdf };
144 
145 	switch (c) {
146 	case 0xc0:
147 		memcpy(skb_put(skb, 2), &esc_c0, 2);
148 		break;
149 	case 0xdb:
150 		memcpy(skb_put(skb, 2), &esc_db, 2);
151 		break;
152 	case 0x11:
153 		memcpy(skb_put(skb, 2), &esc_11, 2);
154 		break;
155 	case 0x13:
156 		memcpy(skb_put(skb, 2), &esc_13, 2);
157 		break;
158 	default:
159 		memcpy(skb_put(skb, 1), &c, 1);
160 	}
161 }
162 
h5_enqueue(struct hci_uart * hu,struct sk_buff * skb)163 static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
164 {
165 	struct h5_struct *h5 = hu->priv;
166 
167 	if (skb->len > 0xFFF) {	//Pkt length must be less than 4095 bytes
168 		BT_ERR("Packet too long");
169 		kfree_skb(skb);
170 		return 0;
171 	}
172 
173 	switch (bt_cb(skb)->pkt_type) {
174 	case HCI_ACLDATA_PKT:
175 	case HCI_COMMAND_PKT:
176 		skb_queue_tail(&h5->rel, skb);
177 		break;
178 
179 	case HCI_SCODATA_PKT:
180 		skb_queue_tail(&h5->unrel, skb);
181 		break;
182 	case H5_LE_PKT:
183 	case H5_ACK_PKT:
184 	case H5_VDRSPEC_PKT:
185 		skb_queue_tail(&h5->unrel, skb);	/* 3-wire LinkEstablishment */
186 		break;
187 
188 	default:
189 		BT_ERR("Unknown packet type");
190 		kfree_skb(skb);
191 		break;
192 	}
193 
194 	return 0;
195 }
196 
h5_prepare_pkt(struct h5_struct * h5,u8 * data,int len,int pkt_type)197 static struct sk_buff *h5_prepare_pkt(struct h5_struct *h5, u8 * data,
198 				      int len, int pkt_type)
199 {
200 	struct sk_buff *nskb;
201 	u8 hdr[4], chan;
202 	u16 H5_CRC_INIT(h5_txmsg_crc);
203 	int rel, i;
204 	u8 tmp;
205 	unsigned long flags;
206 
207 	switch (pkt_type) {
208 	case HCI_ACLDATA_PKT:
209 		chan = 2;	/* 3-wire ACL channel */
210 		rel = 1;	/* reliable channel */
211 		break;
212 	case HCI_COMMAND_PKT:
213 		chan = 1;	/* 3-wire cmd channel */
214 		rel = 1;	/* reliable channel */
215 		break;
216 	case HCI_EVENT_PKT:
217 		chan = 4;	/* 3-wire cmd channel */
218 		rel = 1;	/* reliable channel */
219 		break;
220 	case HCI_SCODATA_PKT:
221 		chan = 3;	/* 3-wire SCO channel */
222 		rel = 0;	/* unreliable channel */
223 		break;
224 	case H5_LE_PKT:
225 		chan = 15;	/* 3-wire LinkEstablishment channel */
226 		rel = 0;	/* unreliable channel */
227 		break;
228 	case H5_ACK_PKT:
229 		chan = 0;	/* 3-wire ACK channel */
230 		rel = 0;	/* unreliable channel */
231 		break;
232 	case H5_VDRSPEC_PKT:
233 		chan = 14;	/* 3-wire Vendor Specific channel */
234 		rel = 0;	/* unreliable channel */
235 		break;
236 	default:
237 		BT_ERR("Unknown packet type");
238 		return NULL;
239 	}
240 
241 	/* Max len of packet: (original len +4(h5 hdr) +2(crc))*2
242 	   (because bytes 0xc0 and 0xdb are escaped, worst case is
243 	   when the packet is all made of 0xc0 and 0xdb :) )
244 	   + 2 (0xc0 delimiters at start and end). */
245 
246 	nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC);
247 	if (!nskb)
248 		return NULL;
249 
250 	bt_cb(nskb)->pkt_type = pkt_type;
251 
252 	h5_slip_msgdelim(nskb);
253 
254 	spin_lock_irqsave(&h5->lock, flags);
255 	tmp = h5->rxseq_txack;
256 	hdr[0] = h5->rxseq_txack << 3;
257 	h5->txack_req = 0;
258 	spin_unlock_irqrestore(&h5->lock, flags);
259 	BT_DBG("We request packet no %u to card", tmp);
260 
261 	if (rel) {
262 		spin_lock_irqsave(&h5->lock, flags);
263 		tmp = h5->msgq_txseq;
264 		hdr[0] |= 0x80 + h5->msgq_txseq;
265 		h5->msgq_txseq = (h5->msgq_txseq + 1) & 0x07;
266 		spin_unlock_irqrestore(&h5->lock, flags);
267 		BT_DBG("Sending packet with seqno %u", tmp);
268 	}
269 
270 	if (h5->use_crc)
271 		hdr[0] |= 0x40;
272 
273 	hdr[1] = ((len << 4) & 0xff) | chan;
274 	hdr[2] = len >> 4;
275 	hdr[3] = ~(hdr[0] + hdr[1] + hdr[2]);
276 
277 	/* Put H5 header */
278 	for (i = 0; i < 4; i++) {
279 		h5_slip_one_byte(nskb, hdr[i]);
280 
281 		if (h5->use_crc)
282 			h5_crc_update(&h5_txmsg_crc, hdr[i]);
283 	}
284 
285 	/* Put payload */
286 	for (i = 0; i < len; i++) {
287 		h5_slip_one_byte(nskb, data[i]);
288 
289 		if (h5->use_crc)
290 			h5_crc_update(&h5_txmsg_crc, data[i]);
291 	}
292 
293 	/* Put CRC */
294 	if (h5->use_crc) {
295 		h5_txmsg_crc = bitrev16(h5_txmsg_crc);
296 		h5_slip_one_byte(nskb, (u8) ((h5_txmsg_crc >> 8) & 0x00ff));
297 		h5_slip_one_byte(nskb, (u8) (h5_txmsg_crc & 0x00ff));
298 	}
299 
300 	h5_slip_msgdelim(nskb);
301 	return nskb;
302 }
303 
304 /* This is a rewrite of pkt_avail in AH5 */
h5_dequeue(struct hci_uart * hu)305 static struct sk_buff *h5_dequeue(struct hci_uart *hu)
306 {
307 	struct h5_struct *h5 = hu->priv;
308 	unsigned long flags;
309 	struct sk_buff *skb;
310 
311 	/* First of all, check for unreliable messages in the queue,
312 	   since they have priority */
313 
314 	if ((skb = skb_dequeue(&h5->unrel)) != NULL) {
315 		struct sk_buff *nskb =
316 		    h5_prepare_pkt(h5, skb->data, skb->len,
317 				   bt_cb(skb)->pkt_type);
318 		if (nskb) {
319 			kfree_skb(skb);
320 			return nskb;
321 		} else {
322 			skb_queue_head(&h5->unrel, skb);
323 			BT_ERR
324 			    ("Could not dequeue pkt because alloc_skb failed");
325 		}
326 	}
327 
328 	/* Now, try to send a reliable pkt. We can only send a
329 	   reliable packet if the number of packets sent but not yet ack'ed
330 	   is < than the winsize */
331 
332 	spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
333 
334 	if (h5->unack.qlen < H5_TXWINSIZE
335 	    && (skb = skb_dequeue(&h5->rel)) != NULL) {
336 		struct sk_buff *nskb =
337 		    h5_prepare_pkt(h5, skb->data, skb->len,
338 				   bt_cb(skb)->pkt_type);
339 		if (nskb) {
340 			__skb_queue_tail(&h5->unack, skb);
341 			schedule_delayed_work(&h5->retrans_work, HZ / 4);
342 			spin_unlock_irqrestore(&h5->unack.lock, flags);
343 			return nskb;
344 		} else {
345 			skb_queue_head(&h5->rel, skb);
346 			BT_ERR
347 			    ("Could not dequeue pkt because alloc_skb failed");
348 		}
349 	}
350 
351 	spin_unlock_irqrestore(&h5->unack.lock, flags);
352 
353 	/* We could not send a reliable packet, either because there are
354 	   none or because there are too many unack'ed pkts. Did we receive
355 	   any packets we have not acknowledged yet ? */
356 
357 	if (h5->txack_req) {
358 		/* if so, craft an empty ACK pkt and send it on H5 unreliable
359 		   channel 0 */
360 		struct sk_buff *nskb = h5_prepare_pkt(h5, NULL, 0, H5_ACK_PKT);
361 		return nskb;
362 	}
363 
364 	/* We have nothing to send */
365 	return NULL;
366 }
367 
h5_flush(struct hci_uart * hu)368 static int h5_flush(struct hci_uart *hu)
369 {
370 	BT_DBG("hu %p", hu);
371 	return 0;
372 }
373 
374 /* Remove ack'ed packets */
h5_pkt_cull(struct h5_struct * h5)375 static void h5_pkt_cull(struct h5_struct *h5)
376 {
377 	struct sk_buff *skb, *tmp;
378 	unsigned long flags;
379 	int i, pkts_to_be_removed;
380 	u8 seqno;
381 
382 	spin_lock_irqsave(&h5->unack.lock, flags);
383 
384 	pkts_to_be_removed = skb_queue_len(&h5->unack);
385 	seqno = h5->msgq_txseq;
386 
387 	while (pkts_to_be_removed) {
388 		if (h5->rxack == seqno)
389 			break;
390 		pkts_to_be_removed--;
391 		seqno = (seqno - 1) & 0x07;
392 	}
393 
394 	if (h5->rxack != seqno)
395 		BT_ERR("Peer acked invalid packet");
396 
397 	BT_DBG("Removing %u pkts out of %u, up to seqno %u",
398 	       pkts_to_be_removed, skb_queue_len(&h5->unack),
399 	       (seqno - 1) & 0x07);
400 
401 	i = 0;
402 	skb_queue_walk_safe(&h5->unack, skb, tmp) {
403 		if (i >= pkts_to_be_removed)
404 			break;
405 		i++;
406 
407 		__skb_unlink(skb, &h5->unack);
408 		kfree_skb(skb);
409 	}
410 
411 	if (skb_queue_empty(&h5->unack))
412 		cancel_delayed_work(&h5->retrans_work);
413 
414 	spin_unlock_irqrestore(&h5->unack.lock, flags);
415 
416 	if (i != pkts_to_be_removed)
417 		BT_ERR("Removed only %u out of %u pkts", i, pkts_to_be_removed);
418 }
419 
420 /* Handle H5 link-establishment packets. When we
421    detect a "sync" packet, symptom that the BT module has reset,
422    we do nothing :) (yet) */
423 #if 0
424 static void h5_handle_le_pkt(struct hci_uart *hu)
425 {
426 	struct h5_struct *h5 = hu->priv;
427 	u8 conf_pkt[2] = { 0x03, 0xfc };
428 	u8 conf_rsp_pkt[3] = { 0x04, 0x7b, 0x00 };
429 	u8 sync_pkt[2] = { 0x01, 0x7e };
430 	u8 sync_rsp_pkt[2] = { 0x02, 0x7d };
431 
432 	u8 wakeup_pkt[2] = { 0x05, 0xfa };
433 	u8 woken_pkt[2] = { 0x06, 0xf9 };
434 	u8 sleep_pkt[2] = { 0x07, 0x78 };
435 
436 	/* spot "conf" pkts and reply with a "conf rsp" pkt */
437 	if (h5->rx_skb->data[1] >> 4 == 2 && h5->rx_skb->data[2] == 0 &&
438 	    !memcmp(&h5->rx_skb->data[4], conf_pkt, 2)) {
439 		struct sk_buff *nskb = alloc_skb(3, GFP_ATOMIC);
440 
441 		BT_DBG("Found a LE conf pkt");
442 		if (!nskb)
443 			return;
444 
445 		conf_rsp_pkt[2] |= txcrc << 0x4;	//crc check enable, version no = 0. needed to be as avariable.
446 		memcpy(skb_put(nskb, 3), conf_rsp_pkt, 3);
447 		bt_cb(nskb)->pkt_type = H5_LE_PKT;
448 
449 		skb_queue_head(&h5->unrel, nskb);
450 		hci_uart_tx_wakeup(hu);
451 	}
452 	/* spot "conf resp" pkts */
453 	else if (h5->rx_skb->data[1] >> 4 == 2 && h5->rx_skb->data[2] == 0 &&
454 		 !memcmp(&h5->rx_skb->data[4], conf_rsp_pkt, 2)) {
455 		BT_DBG("Found a LE conf resp pkt, device go into active state");
456 		txcrc = (h5->rx_skb->data[6] >> 0x4) & 0x1;
457 	}
458 
459 	/* Spot "sync" pkts. If we find one...disaster! */
460 	else if (h5->rx_skb->data[1] >> 4 == 2 && h5->rx_skb->data[2] == 0 &&
461 		 !memcmp(&h5->rx_skb->data[4], sync_pkt, 2)) {
462 		BT_ERR("Found a LE sync pkt, card has reset");
463 		//DO Something here
464 	}
465 	/* Spot "sync resp" pkts. If we find one...disaster! */
466 	else if (h5->rx_skb->data[1] >> 4 == 2 && h5->rx_skb->data[2] == 0 &&
467 		 !memcmp(&h5->rx_skb->data[4], sync_rsp_pkt, 2)) {
468 		BT_ERR
469 		    ("Found a LE sync resp pkt, device go into initialized state");
470 		//      DO Something here
471 	}
472 	/* Spot "wakeup" pkts. reply woken message when in active mode */
473 	else if (h5->rx_skb->data[1] >> 4 == 2 && h5->rx_skb->data[2] == 0 &&
474 		 !memcmp(&h5->rx_skb->data[4], wakeup_pkt, 2)) {
475 		struct sk_buff *nskb = alloc_skb(2, GFP_ATOMIC);
476 
477 		BT_ERR("Found a LE Wakeup pkt, and reply woken message");
478 		//      DO Something here
479 
480 		memcpy(skb_put(nskb, 2), woken_pkt, 2);
481 		bt_cb(nskb)->pkt_type = H5_LE_PKT;
482 
483 		skb_queue_head(&h5->unrel, nskb);
484 		hci_uart_tx_wakeup(hu);
485 	}
486 	/* Spot "woken" pkts. receive woken message from device */
487 	else if (h5->rx_skb->data[1] >> 4 == 2 && h5->rx_skb->data[2] == 0 &&
488 		 !memcmp(&h5->rx_skb->data[4], woken_pkt, 2)) {
489 		BT_ERR("Found a LE woken pkt from device");
490 		//      DO Something here
491 	}
492 	/* Spot "Sleep" pkts */
493 	else if (h5->rx_skb->data[1] >> 4 == 2 && h5->rx_skb->data[2] == 0 &&
494 		 !memcmp(&h5->rx_indent: Standard input:620: Error:Unmatched 'else'
495 skb->data[4], sleep_pkt, 2)) {
496 		BT_ERR("Found a LE Sleep pkt");
497 		//      DO Something here
498 	}
499 }
500 #endif
501 
h5_unslip_one_byte(struct h5_struct * h5,unsigned char byte)502 static inline void h5_unslip_one_byte(struct h5_struct *h5, unsigned char byte)
503 {
504 	const u8 c0 = 0xc0, db = 0xdb;
505 	const u8 oof1 = 0x11, oof2 = 0x13;
506 
507 	switch (h5->rx_esc_state) {
508 	case H5_ESCSTATE_NOESC:
509 		switch (byte) {
510 		case 0xdb:
511 			h5->rx_esc_state = H5_ESCSTATE_ESC;
512 			break;
513 		default:
514 			memcpy(skb_put(h5->rx_skb, 1), &byte, 1);
515 			if ((h5->rx_skb->data[0] & 0x40) != 0 &&
516 			    h5->rx_state != H5_W4_CRC)
517 				h5_crc_update(&h5->message_crc, byte);
518 			h5->rx_count--;
519 		}
520 		break;
521 
522 	case H5_ESCSTATE_ESC:
523 		switch (byte) {
524 		case 0xdc:
525 			memcpy(skb_put(h5->rx_skb, 1), &c0, 1);
526 			if ((h5->rx_skb->data[0] & 0x40) != 0 &&
527 			    h5->rx_state != H5_W4_CRC)
528 				h5_crc_update(&h5->message_crc, 0xc0);
529 			h5->rx_esc_state = H5_ESCSTATE_NOESC;
530 			h5->rx_count--;
531 			break;
532 
533 		case 0xdd:
534 			memcpy(skb_put(h5->rx_skb, 1), &db, 1);
535 			if ((h5->rx_skb->data[0] & 0x40) != 0 &&
536 			    h5->rx_state != H5_W4_CRC)
537 				h5_crc_update(&h5->message_crc, 0xdb);
538 			h5->rx_esc_state = H5_ESCSTATE_NOESC;
539 			h5->rx_count--;
540 			break;
541 
542 		case 0xde:
543 			memcpy(skb_put(h5->rx_skb, 1), &oof1, 1);
544 			if ((h5->rx_skb->data[0] & 0x40) != 0
545 			    && h5->rx_state != H5_W4_CRC)
546 				h5_crc_update(&h5->message_crc, oof1);
547 			h5->rx_esc_state = H5_ESCSTATE_NOESC;
548 			h5->rx_count--;
549 			break;
550 
551 		case 0xdf:
552 			memcpy(skb_put(h5->rx_skb, 1), &oof2, 1);
553 			if ((h5->rx_skb->data[0] & 0x40) != 0
554 			    && h5->rx_state != H5_W4_CRC)
555 				h5_crc_update(&h5->message_crc, oof2);
556 			h5->rx_esc_state = H5_ESCSTATE_NOESC;
557 			h5->rx_count--;
558 			break;
559 
560 		default:
561 			BT_ERR("Invalid byte %02x after esc byte", byte);
562 			kfree_skb(h5->rx_skb);
563 			h5->rx_skb = NULL;
564 			h5->rx_state = H5_W4_PKT_DELIMITER;
565 			h5->rx_count = 0;
566 		}
567 	}
568 }
569 
h5_complete_rx_pkt(struct hci_uart * hu)570 static void h5_complete_rx_pkt(struct hci_uart *hu)
571 {
572 	struct h5_struct *h5 = hu->priv;
573 	int pass_up;
574 
575 	if (h5->rx_skb->data[0] & 0x80) {	/* reliable pkt */
576 		unsigned long flags;
577 		u8 rxseq;
578 
579 		spin_lock_irqsave(&h5->lock, flags);
580 		rxseq = h5->rxseq_txack;
581 		h5->rxseq_txack++;
582 		h5->rxseq_txack %= 0x8;
583 		h5->txack_req = 1;
584 		spin_unlock_irqrestore(&h5->lock, flags);
585 
586 		BT_DBG("Received seqno %u from card", rxseq);
587 	}
588 
589 	h5->rxack = (h5->rx_skb->data[0] >> 3) & 0x07;
590 	BT_DBG("Request for pkt %u from card", h5->rxack);
591 
592 	h5_pkt_cull(h5);
593 
594 	hci_uart_tx_wakeup(hu);
595 
596 	if ((h5->rx_skb->data[1] & 0x0f) == 2 && h5->rx_skb->data[0] & 0x80) {
597 		bt_cb(h5->rx_skb)->pkt_type = HCI_ACLDATA_PKT;
598 		pass_up = 1;
599 	} else if ((h5->rx_skb->data[1] & 0x0f) == 4 &&
600 		   h5->rx_skb->data[0] & 0x80) {
601 		bt_cb(h5->rx_skb)->pkt_type = HCI_EVENT_PKT;
602 		pass_up = 1;
603 	} else if ((h5->rx_skb->data[1] & 0x0f) == 3) {
604 		bt_cb(h5->rx_skb)->pkt_type = HCI_SCODATA_PKT;
605 		pass_up = 1;
606 	} else if ((h5->rx_skb->data[1] & 0x0f) == 15 &&
607 		   !(h5->rx_skb->data[0] & 0x80)) {
608 		//h5_handle_le_pkt(hu);//Link Establishment Pkt
609 		pass_up = 0;
610 	} else if ((h5->rx_skb->data[1] & 0x0f) == 1 &&
611 		   h5->rx_skb->data[0] & 0x80) {
612 		bt_cb(h5->rx_skb)->pkt_type = HCI_COMMAND_PKT;
613 		pass_up = 1;
614 	} else if ((h5->rx_skb->data[1] & 0x0f) == 14) {
615 		bt_cb(h5->rx_skb)->pkt_type = H5_VDRSPEC_PKT;
616 		pass_up = 1;
617 	} else
618 		pass_up = 0;
619 
620 	if (!pass_up) {
621 		/* struct hci_event_hdr hdr; */
622 		u8 desc = (h5->rx_skb->data[1] & 0x0f);
623 
624 		if (desc != H5_ACK_PKT && desc != H5_LE_PKT) {
625 			/* if (hciextn) {
626 			 * 	desc |= 0xc0;
627 			 * 	skb_pull(h5->rx_skb, 4);
628 			 * 	memcpy(skb_push(h5->rx_skb, 1), &desc, 1);
629 
630 			 * 	hdr.evt = 0xff;
631 			 * 	hdr.plen = h5->rx_skb->len;
632 			 * 	memcpy(skb_push(h5->rx_skb, HCI_EVENT_HDR_SIZE),
633 			 * 	       &hdr, HCI_EVENT_HDR_SIZE);
634 			 * 	bt_cb(h5->rx_skb)->pkt_type = HCI_EVENT_PKT;
635 
636 			 * 	hci_recv_frame(h5->rx_skb);
637 			 * } else { */
638 				BT_ERR("Packet for unknown channel (%u %s)",
639 				       h5->rx_skb->data[1] & 0x0f,
640 				       h5->rx_skb->data[0] & 0x80 ?
641 				       "reliable" : "unreliable");
642 				kfree_skb(h5->rx_skb);
643 			/* } */
644 		} else
645 			kfree_skb(h5->rx_skb);
646 	} else {
647 			/* Pull out H5 hdr */
648 		skb_pull(h5->rx_skb, 4);
649 
650 #ifdef BTCOEX
651 		if (bt_cb(h5->rx_skb)->pkt_type == HCI_EVENT_PKT)
652 			rtk_btcoex_parse_event(h5->rx_skb->data,
653 					       h5->rx_skb->len);
654 
655 		if (bt_cb(h5->rx_skb)->pkt_type == HCI_ACLDATA_PKT)
656 			rtk_btcoex_parse_l2cap_data_rx(h5->rx_skb->data,
657 						       h5->rx_skb->len);
658 #endif
659 
660 #if HCI_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
661 		hci_recv_frame(h5->rx_skb);
662 #else
663 		hci_recv_frame(hu->hdev, h5->rx_skb);
664 #endif
665 	}
666 
667 	h5->rx_state = H5_W4_PKT_DELIMITER;
668 	h5->rx_skb = NULL;
669 }
670 
bscp_get_crc(struct h5_struct * h5)671 static u16 bscp_get_crc(struct h5_struct *h5) {
672 	return get_unaligned_be16(&h5->rx_skb->
673 			data[h5->rx_skb->len - 2]);
674 }
675 
676 /* Recv data */
h5_recv(struct hci_uart * hu,void * data,int count)677 static int h5_recv(struct hci_uart *hu, void *data, int count)
678 {
679 	struct h5_struct *h5 = hu->priv;
680 	register unsigned char *ptr;
681 	u8 rxseq;
682 	unsigned long flags;
683 
684 	BT_DBG("hu %p count %d rx_state %d rx_count %ld",
685 	       hu, count, h5->rx_state, h5->rx_count);
686 
687 	ptr = data;
688 	while (count) {
689 		if (h5->rx_count) {
690 			if (*ptr == 0xc0) {
691 				BT_ERR("Short H5 packet");
692 				kfree_skb(h5->rx_skb);
693 				h5->rx_state = H5_W4_PKT_START;
694 				h5->rx_count = 0;
695 			} else
696 				h5_unslip_one_byte(h5, *ptr);
697 
698 			ptr++;
699 			count--;
700 			continue;
701 		}
702 
703 		switch (h5->rx_state) {
704 		case H5_W4_HDR:
705 			if ((0xff & (u8) ~
706 			     (h5->rx_skb->data[0] +
707 			      h5->rx_skb->data[1] +
708 			      h5->rx_skb->data[2])) != h5->rx_skb->data[3]) {
709 				BT_ERR("Error in H5 hdr checksum");
710 				kfree_skb(h5->rx_skb);
711 				h5->rx_state = H5_W4_PKT_DELIMITER;
712 				h5->rx_count = 0;
713 				continue;
714 			}
715 			rxseq = h5->rxseq_txack;
716 			if (h5->rx_skb->data[0] & 0x80	/* reliable pkt */
717 			    && (h5->rx_skb->data[0] & 0x07) != rxseq) {
718 				BT_ERR("Out-of-order packet arrived, got %u expected %u",
719 				       h5->rx_skb->data[0] & 0x07, rxseq);
720 
721 				spin_lock_irqsave(&h5->lock, flags);
722 				h5->txack_req = 1;
723 				spin_unlock_irqrestore(&h5->lock, flags);
724 				hci_uart_tx_wakeup(hu);
725 				kfree_skb(h5->rx_skb);
726 				h5->rx_state = H5_W4_PKT_DELIMITER;
727 				h5->rx_count = 0;
728 				continue;
729 			}
730 			h5->rx_state = H5_W4_DATA;
731 			h5->rx_count = (h5->rx_skb->data[1] >> 4) + (h5->rx_skb->data[2] << 4);	/* May be 0 */
732 			continue;
733 
734 		case H5_W4_DATA:
735 			if (h5->rx_skb->data[0] & 0x40) {	/* pkt with crc */
736 				h5->rx_state = H5_W4_CRC;
737 				h5->rx_count = 2;
738 			} else
739 				h5_complete_rx_pkt(hu);
740 			continue;
741 
742 		case H5_W4_CRC:
743 			if (bitrev16(h5->message_crc) != bscp_get_crc(h5)) {
744 				BT_ERR
745 				    ("Checksum failed: computed %04x received %04x",
746 				     bitrev16(h5->message_crc),
747 				     bscp_get_crc(h5));
748 
749 				kfree_skb(h5->rx_skb);
750 				h5->rx_state = H5_W4_PKT_DELIMITER;
751 				h5->rx_count = 0;
752 				continue;
753 			}
754 			skb_trim(h5->rx_skb, h5->rx_skb->len - 2);
755 			h5_complete_rx_pkt(hu);
756 			continue;
757 
758 		case H5_W4_PKT_DELIMITER:
759 			switch (*ptr) {
760 			case 0xc0:
761 				h5->rx_state = H5_W4_PKT_START;
762 				break;
763 			default:
764 				/*BT_ERR("Ignoring byte %02x", *ptr); */
765 				break;
766 			}
767 			ptr++;
768 			count--;
769 			break;
770 
771 		case H5_W4_PKT_START:
772 			switch (*ptr) {
773 			case 0xc0:
774 				ptr++;
775 				count--;
776 				break;
777 
778 			default:
779 				h5->rx_state = H5_W4_HDR;
780 				h5->rx_count = 4;
781 				h5->rx_esc_state = H5_ESCSTATE_NOESC;
782 				H5_CRC_INIT(h5->message_crc);
783 
784 				/* Do not increment ptr or decrement count
785 				 * Allocate packet. Max len of a H5 pkt=
786 				 * 0xFFF (payload) +4 (header) +2 (crc) */
787 
788 				h5->rx_skb = bt_skb_alloc(0x1005, GFP_ATOMIC);
789 				if (!h5->rx_skb) {
790 					BT_ERR
791 					    ("Can't allocate mem for new packet");
792 					h5->rx_state = H5_W4_PKT_DELIMITER;
793 					h5->rx_count = 0;
794 					return 0;
795 				}
796 				h5->rx_skb->dev = (void *)hu->hdev;
797 				break;
798 			}
799 			break;
800 		}
801 	}
802 	return count;
803 }
804 
805 /* Arrange to retransmit all messages in the relq. */
h5_timed_event(struct work_struct * work)806 static void h5_timed_event(struct work_struct *work)
807 {
808 	struct h5_struct *h5;
809 	struct hci_uart *hu;
810 	unsigned long flags;
811 	unsigned long flags2;
812 	struct sk_buff *skb;
813 
814 	h5 = container_of(work, struct h5_struct, retrans_work.work);
815 	hu = h5->hu;
816 
817 	BT_INFO("hu %p retransmitting %u pkts", hu, h5->unack.qlen);
818 
819 	spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
820 
821 	/* Move the pkt from unack queue to the head of reliable tx queue and
822 	 * roll back the tx seq number
823 	 */
824 	while ((skb = __skb_dequeue_tail(&h5->unack)) != NULL) {
825 		spin_lock_irqsave(&h5->lock, flags2);
826 		h5->msgq_txseq = (h5->msgq_txseq - 1) & 0x07;
827 		spin_unlock_irqrestore(&h5->lock, flags2);
828 		skb_queue_head(&h5->rel, skb);
829 	}
830 
831 	spin_unlock_irqrestore(&h5->unack.lock, flags);
832 
833 	hci_uart_tx_wakeup(hu);
834 }
835 
h5_open(struct hci_uart * hu)836 static int h5_open(struct hci_uart *hu)
837 {
838 	struct h5_struct *h5;
839 
840 	BT_DBG("hu %p", hu);
841 
842 	BT_INFO("h5_open");
843 	h5 = kzalloc(sizeof(*h5), GFP_ATOMIC);
844 	if (!h5)
845 		return -ENOMEM;
846 
847 	hu->priv = h5;
848 	skb_queue_head_init(&h5->unack);
849 	skb_queue_head_init(&h5->rel);
850 	skb_queue_head_init(&h5->unrel);
851 	spin_lock_init(&h5->lock);
852 
853 	h5->hu = hu;
854 	INIT_DELAYED_WORK(&h5->retrans_work, (void *)h5_timed_event);
855 
856 	h5->rx_state = H5_W4_PKT_DELIMITER;
857 
858 	if (txcrc)
859 		h5->use_crc = 1;
860 
861 	return 0;
862 }
863 
h5_close(struct hci_uart * hu)864 static int h5_close(struct hci_uart *hu)
865 {
866 	struct h5_struct *h5 = hu->priv;
867 
868 	BT_INFO("h5_close");
869 
870 	cancel_delayed_work_sync(&h5->retrans_work);
871 
872 	hu->priv = NULL;
873 
874 	skb_queue_purge(&h5->unack);
875 	skb_queue_purge(&h5->rel);
876 	skb_queue_purge(&h5->unrel);
877 
878 	kfree(h5);
879 
880 	return 0;
881 }
882 
883 static struct hci_uart_proto h5 = {
884 	.id = HCI_UART_3WIRE,
885 	.open = h5_open,
886 	.close = h5_close,
887 	.enqueue = h5_enqueue,
888 	.dequeue = h5_dequeue,
889 	.recv = h5_recv,
890 	.flush = h5_flush
891 };
892 
h5_init(void)893 int h5_init(void)
894 {
895 	int err = hci_uart_register_proto(&h5);
896 
897 	if (!err)
898 		BT_INFO("HCI Realtek H5 protocol initialized");
899 	else
900 		BT_ERR("HCI Realtek H5 protocol registration failed");
901 
902 	return err;
903 }
904 
h5_deinit(void)905 int h5_deinit(void)
906 {
907 	return hci_uart_unregister_proto(&h5);
908 }
909