xref: /OK3568_Linux_fs/kernel/drivers/net/usb/GobiNet/GobiUSBNet.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*===========================================================================
2 FILE:
3    GobiUSBNet.c
4 
5 DESCRIPTION:
6    Qualcomm USB Network device for Gobi 3000
7 
8 FUNCTIONS:
9    GobiNetSuspend
10    GobiNetResume
11    GobiNetDriverBind
12    GobiNetDriverUnbind
13    GobiUSBNetURBCallback
14    GobiUSBNetTXTimeout
15    GobiUSBNetAutoPMThread
16    GobiUSBNetStartXmit
17    GobiUSBNetOpen
18    GobiUSBNetStop
19    GobiUSBNetProbe
20    GobiUSBNetModInit
21    GobiUSBNetModExit
22 
23 Copyright (c) 2011, Code Aurora Forum. All rights reserved.
24 
25 Redistribution and use in source and binary forms, with or without
26 modification, are permitted provided that the following conditions are met:
27     * Redistributions of source code must retain the above copyright
28       notice, this list of conditions and the following disclaimer.
29     * Redistributions in binary form must reproduce the above copyright
30       notice, this list of conditions and the following disclaimer in the
31       documentation and/or other materials provided with the distribution.
32     * Neither the name of Code Aurora Forum nor
33       the names of its contributors may be used to endorse or promote
34       products derived from this software without specific prior written
35       permission.
36 
37 
38 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
39 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
40 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
41 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
42 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
43 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
44 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
45 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
46 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
47 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
48 POSSIBILITY OF SUCH DAMAGE.
49 ===========================================================================*/
50 
51 //---------------------------------------------------------------------------
52 // Include Files
53 //---------------------------------------------------------------------------
54 
55 #include <linux/module.h>
56 #include <linux/netdevice.h>
57 #include <linux/etherdevice.h>
58 #include <linux/kernel.h>
59 #include <linux/ethtool.h>
60 #include <linux/version.h>
61 
62 #include <net/arp.h>
63 #include <net/ip.h>
64 #include <net/ipv6.h>
65 
66 #if LINUX_VERSION_CODE > KERNEL_VERSION(3,16,0) //8b094cd03b4a3793220d8d8d86a173bfea8c285b
67 #include <linux/timekeeping.h>
68 #else
69 #define timespec64  timespec
70 #define ktime_get_ts64 ktime_get_ts
71 #define timespec64_sub timespec_sub
72 #endif
73 
74 #include "Structs.h"
75 #include "QMIDevice.h"
76 #include "QMI.h"
77 
78 #ifndef ETH_P_MAP
79 #define ETH_P_MAP 0xDA1A
80 #endif
81 
82 #if (ETH_P_MAP == 0x00F9)
83 #undef ETH_P_MAP
84 #define ETH_P_MAP 0xDA1A
85 #endif
86 
87 //-----------------------------------------------------------------------------
88 // Definitions
89 //-----------------------------------------------------------------------------
90 
91 // Version Information
92 //add new module or new feature, increase major version. fix bug, increase minor version
93 #define VERSION_NUMBER "V1.6.2.14"
94 #define DRIVER_VERSION "Quectel_Linux&Android_GobiNet_Driver_"VERSION_NUMBER
95 #define DRIVER_AUTHOR "Qualcomm Innovation Center"
96 #define DRIVER_DESC "GobiNet"
97 static const char driver_name[] = "GobiNet";
98 
99 // Debug flag
100 int quec_debug = 0;
101 
102 // Allow user interrupts
103 //int interruptible = 1;
104 
105 // Number of IP packets which may be queued up for transmit
106 static int txQueueLength = 100;
107 
108 // Class should be created during module init, so needs to be global
109 static struct class * gpClass;
110 
111 static const unsigned char ec20_mac[ETH_ALEN] = {0x02, 0x50, 0xf3, 0x00, 0x00, 0x00};
112 static const unsigned char default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3, 0x00, 0x00, 0x00};
113 static const unsigned char node_id[ETH_ALEN] = {0x02, 0x50, 0xf4, 0x00, 0x00, 0x00};
114 //static const u8 broadcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
115 
116 //setup data call by "AT$QCRMCALL=1,1"
117 static uint __read_mostly qcrmcall_mode = 0;
118 module_param( qcrmcall_mode, uint, S_IRUGO | S_IWUSR );
119 
ether_to_ip_fixup(struct net_device * dev,struct sk_buff * skb)120 static struct sk_buff * ether_to_ip_fixup(struct net_device *dev, struct sk_buff *skb) {
121 	const struct ethhdr *ehdr;
122 
123 	skb_reset_mac_header(skb);
124 	ehdr = eth_hdr(skb);
125 
126 	if (ehdr->h_proto == htons(ETH_P_IP)) {
127 		if (unlikely(skb->len <= (sizeof(struct ethhdr) + sizeof(struct iphdr)))) {
128 			goto drop_skb;
129 		}
130 	}
131 	else if (ehdr->h_proto == htons(ETH_P_IPV6)) {
132 		if (unlikely(skb->len <= (sizeof(struct ethhdr) + sizeof(struct ipv6hdr)))) {
133 			goto drop_skb;
134 		}
135 	}
136 	else {
137 		DBG("%s skb h_proto is %04x\n", dev->name, ntohs(ehdr->h_proto));
138 		goto drop_skb;
139 	}
140 
141 	if (unlikely(skb_pull(skb, ETH_HLEN)))
142 		return skb;
143 
144 drop_skb:
145 	return NULL;
146 }
147 
148 //#define QUECTEL_REMOVE_TX_ZLP
149 #define USB_CDC_SET_REMOVE_TX_ZLP_COMMAND 0x5D
150 
151 //#define QUECTEL_WWAN_MULTI_PACKAGES
152 
153 #ifdef QUECTEL_WWAN_MULTI_PACKAGES
154 static uint __read_mostly rx_packets = 10;
155 module_param( rx_packets, uint, S_IRUGO | S_IWUSR );
156 
157 #define USB_CDC_SET_MULTI_PACKAGE_COMMAND (0x5C)
158 #define QUEC_NET_MSG_SPEC		(0x80)
159 #define QUEC_NET_MSG_ID_IP_DATA		(0x00)
160 
161 struct multi_package_config {
162 	__le32 enable;
163 	__le32 package_max_len;
164 	__le32 package_max_count_in_queue;
165 	__le32 timeout;
166 } __packed;
167 
168 struct quec_net_package_header {
169 	unsigned char msg_spec;
170 	unsigned char msg_id;
171 	unsigned short payload_len;
172 	unsigned char reserve[16];
173 } __packed;
174 #endif
175 
176 #ifdef QUECTEL_WWAN_QMAP
177 /*
178     Quectel_WCDMA&LTE_Linux_USB_Driver_User_Guide_V1.9.pdf
179     5.6.	Test QMAP on GobiNet or QMI WWAN
180     0 - no QMAP
181     1 - QMAP (Aggregation protocol)
182     X - QMAP (Multiplexing and Aggregation protocol)
183 */
184 static uint __read_mostly qmap_mode = 0;
185 module_param( qmap_mode, uint, S_IRUGO | S_IWUSR );
186 
187 struct qmap_hdr {
188     u8 cd_rsvd_pad;
189     u8 mux_id;
190     u16 pkt_len;
191 } __packed;
192 
193 enum rmnet_map_v5_header_type {
194 	RMNET_MAP_HEADER_TYPE_UNKNOWN,
195 	RMNET_MAP_HEADER_TYPE_COALESCING = 0x1,
196 	RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD = 0x2,
197 	RMNET_MAP_HEADER_TYPE_ENUM_LENGTH
198 };
199 
200 /* Main QMAP header */
201 struct rmnet_map_header {
202 #if defined(__LITTLE_ENDIAN_BITFIELD)
203 	u8  pad_len:6;
204 	u8  next_hdr:1;
205 	u8  cd_bit:1;
206 #elif defined (__BIG_ENDIAN_BITFIELD)
207 	u8  cd_bit:1;
208 	u8  next_hdr:1;
209 	u8  pad_len:6;
210 #else
211 #error	"Please fix <asm/byteorder.h>"
212 #endif
213 	u8  mux_id;
214 	__be16 pkt_len;
215 }  __aligned(1);
216 
217 /* QMAP v5 headers */
218 struct rmnet_map_v5_csum_header {
219 #if defined(__LITTLE_ENDIAN_BITFIELD)
220 	u8  next_hdr:1;
221 	u8  header_type:7;
222 	u8  hw_reserved:7;
223 	u8  csum_valid_required:1;
224 #elif defined (__BIG_ENDIAN_BITFIELD)
225 	u8  header_type:7;
226 	u8  next_hdr:1;
227 	u8  csum_valid_required:1;
228 	u8  hw_reserved:7;
229 #else
230 #error	"Please fix <asm/byteorder.h>"
231 #endif
232 	__be16 reserved;
233 } __aligned(1);
234 
235 struct qmap_priv {
236 	struct net_device *real_dev;
237 	struct net_device *self_dev;
238 	uint qmap_version;
239 	uint offset_id;
240 	uint mux_id;
241 	uint link_state;
242 
243 #if defined(QUECTEL_UL_DATA_AGG)
244 	/* QMIWDS_ADMIN_SET_DATA_FORMAT_RESP TLV_0x17 and TLV_0x18 */
245 	uint ul_data_aggregation_max_datagrams; //UplinkDataAggregationMaxDatagramsTlv
246 	uint ul_data_aggregation_max_size; //UplinkDataAggregationMaxSizeTlv
247 	uint dl_minimum_padding; //0x1A
248 
249 	spinlock_t agg_lock;
250 	struct sk_buff *agg_skb;
251 	unsigned agg_count;
252 	struct timespec64 agg_time;
253 	struct hrtimer agg_hrtimer;
254 	struct work_struct agg_wq;
255 #endif
256 
257 #ifdef QUECTEL_BRIDGE_MODE
258 	int m_bridge_mode;
259 	uint m_bridge_ipv4;
260 	unsigned char mHostMAC[6];
261 #endif
262 };
263 
264 #ifdef QUECTEL_BRIDGE_MODE
265 static int is_qmap_netdev(const struct net_device *netdev);
266 #endif
267 
268 #endif
269 
270 #ifdef QUECTEL_BRIDGE_MODE
271 static int __read_mostly bridge_mode = 0/*|BIT(1)*/;
272 module_param( bridge_mode, int, S_IRUGO | S_IWUSR );
273 
bridge_arp_reply(struct net_device * net,struct sk_buff * skb,uint bridge_ipv4)274 static int bridge_arp_reply(struct net_device *net, struct sk_buff *skb, uint bridge_ipv4) {
275     struct arphdr *parp;
276     u8 *arpptr, *sha;
277     u8  sip[4], tip[4], ipv4[4];
278     struct sk_buff *reply = NULL;
279 
280     ipv4[0]  = (bridge_ipv4 >> 24) & 0xFF;
281     ipv4[1]  = (bridge_ipv4 >> 16) & 0xFF;
282     ipv4[2]  = (bridge_ipv4 >> 8) & 0xFF;
283     ipv4[3]  = (bridge_ipv4 >> 0) & 0xFF;
284 
285     parp = arp_hdr(skb);
286 
287     if (parp->ar_hrd == htons(ARPHRD_ETHER)  && parp->ar_pro == htons(ETH_P_IP)
288         && parp->ar_op == htons(ARPOP_REQUEST) && parp->ar_hln == 6 && parp->ar_pln == 4) {
289         arpptr = (u8 *)parp + sizeof(struct arphdr);
290         sha = arpptr;
291         arpptr += net->addr_len;	/* sha */
292         memcpy(sip, arpptr, sizeof(sip));
293         arpptr += sizeof(sip);
294         arpptr += net->addr_len;	/* tha */
295         memcpy(tip, arpptr, sizeof(tip));
296 
297         pr_info("%s sip = %d.%d.%d.%d, tip=%d.%d.%d.%d, ipv4=%d.%d.%d.%d\n", netdev_name(net),
298             sip[0], sip[1], sip[2], sip[3], tip[0], tip[1], tip[2], tip[3], ipv4[0], ipv4[1], ipv4[2], ipv4[3]);
299 	//wwan0 sip = 10.151.137.255, tip=10.151.138.0, ipv4=10.151.137.255
300         if (tip[0] == ipv4[0] && tip[1] == ipv4[1] && (tip[2]&0xFC) == (ipv4[2]&0xFC) && tip[3] != ipv4[3])
301             reply = arp_create(ARPOP_REPLY, ETH_P_ARP, *((__be32 *)sip), net, *((__be32 *)tip), sha, ec20_mac, sha);
302 
303         if (reply) {
304             skb_reset_mac_header(reply);
305             __skb_pull(reply, skb_network_offset(reply));
306             reply->ip_summed = CHECKSUM_UNNECESSARY;
307             reply->pkt_type = PACKET_HOST;
308 
309             netif_rx_ni(reply);
310         }
311         return 1;
312     }
313 
314     return 0;
315 }
316 
bridge_mode_tx_fixup(struct net_device * net,struct sk_buff * skb,uint bridge_ipv4,unsigned char * bridge_mac)317 static struct sk_buff *bridge_mode_tx_fixup(struct net_device *net, struct sk_buff *skb, uint bridge_ipv4, unsigned char *bridge_mac) {
318 	struct ethhdr *ehdr;
319 	const struct iphdr *iph;
320 
321 	skb_reset_mac_header(skb);
322 	ehdr = eth_hdr(skb);
323 
324 	if (ehdr->h_proto == htons(ETH_P_ARP)) {
325 		if (bridge_ipv4)
326 			bridge_arp_reply(net, skb, bridge_ipv4);
327 		return NULL;
328 	}
329 
330 	iph = ip_hdr(skb);
331 	//DBG("iphdr: ");
332 	//PrintHex((void *)iph, sizeof(struct iphdr));
333 
334 // 1	0.000000000	0.0.0.0	255.255.255.255	DHCP	362	DHCP Request  - Transaction ID 0xe7643ad7
335 	if (ehdr->h_proto == htons(ETH_P_IP) && iph->protocol == IPPROTO_UDP && iph->saddr == 0x00000000 && iph->daddr == 0xFFFFFFFF) {
336 		//if (udp_hdr(skb)->dest == htons(67)) //DHCP Request
337 		{
338 			memcpy(bridge_mac, ehdr->h_source, ETH_ALEN);
339 			pr_info("%s PC Mac Address: %02x:%02x:%02x:%02x:%02x:%02x\n", netdev_name(net),
340 				bridge_mac[0], bridge_mac[1], bridge_mac[2], bridge_mac[3], bridge_mac[4], bridge_mac[5]);
341 		}
342 	}
343 
344 	if (memcmp(ehdr->h_source, bridge_mac, ETH_ALEN)) {
345 		return NULL;
346 	}
347 
348 	return skb;
349 }
350 
bridge_mode_rx_fixup(sGobiUSBNet * pQmapDev,struct net_device * net,struct sk_buff * skb)351 static void bridge_mode_rx_fixup(sGobiUSBNet *pQmapDev, struct net_device *net, struct sk_buff *skb) {
352 	uint bridge_mode = 0;
353 	unsigned char *bridge_mac;
354 
355 	if (pQmapDev->qmap_mode > 1) {
356 		struct qmap_priv *priv = netdev_priv(net);
357 		bridge_mode = priv->m_bridge_mode;
358 		bridge_mac = priv->mHostMAC;
359 	}
360 	else {
361 		bridge_mode = pQmapDev->m_bridge_mode;
362 		bridge_mac = pQmapDev->mHostMAC;
363 	}
364 
365 	if (bridge_mode)
366 		memcpy(eth_hdr(skb)->h_dest, bridge_mac, ETH_ALEN);
367 	else
368 		memcpy(eth_hdr(skb)->h_dest, net->dev_addr, ETH_ALEN);
369 }
370 
bridge_mode_show(struct device * dev,struct device_attribute * attr,char * buf)371 static ssize_t bridge_mode_show(struct device *dev, struct device_attribute *attr, char *buf) {
372     struct net_device *pNet = to_net_dev(dev);
373     uint bridge_mode = 0;
374 
375 	if (is_qmap_netdev(pNet)) {
376 		struct qmap_priv *priv = netdev_priv(pNet);
377 		bridge_mode = priv->m_bridge_mode;
378 	}
379 	else {
380         struct usbnet * pDev = netdev_priv( pNet );
381 		sGobiUSBNet * pGobiDev = (sGobiUSBNet *)pDev->data[0];
382         bridge_mode = pGobiDev->m_bridge_mode;
383 	}
384 
385     return snprintf(buf, PAGE_SIZE, "%d\n", bridge_mode);
386 }
387 
bridge_mode_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)388 static ssize_t bridge_mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) {
389 	struct net_device *pNet = to_net_dev(dev);
390 	uint old_mode = 0;
391 	uint bridge_mode = simple_strtoul(buf, NULL, 0);
392 
393 	if (pNet->type != ARPHRD_ETHER) {
394 		return count;
395 	}
396 
397 	if (is_qmap_netdev(pNet)) {
398 		struct qmap_priv *priv = netdev_priv(pNet);
399 
400 		old_mode = priv->m_bridge_mode;
401 		priv->m_bridge_mode = bridge_mode;
402 	}
403 	else {
404 		struct usbnet * pDev = netdev_priv( pNet );
405 		sGobiUSBNet * pGobiDev = (sGobiUSBNet *)pDev->data[0];
406 
407 		old_mode = pGobiDev->m_bridge_mode;
408 		pGobiDev->m_bridge_mode = bridge_mode;
409 	}
410 
411 	if (old_mode != bridge_mode)
412 		dev_info(dev, "bridge_mode change to 0x%x\n", bridge_mode);
413 
414 	return count;
415 }
416 
bridge_ipv4_show(struct device * dev,struct device_attribute * attr,char * buf)417 static ssize_t bridge_ipv4_show(struct device *dev, struct device_attribute *attr, char *buf) {
418     struct net_device *pNet = to_net_dev(dev);
419     unsigned int bridge_ipv4 = 0;
420     unsigned char ipv4[4];
421 
422 	if (is_qmap_netdev(pNet)) {
423 		struct qmap_priv *priv = netdev_priv(pNet);
424 		bridge_ipv4 = priv->m_bridge_ipv4;
425 	}
426 	else {
427 		struct usbnet * pDev = netdev_priv( pNet );
428         sGobiUSBNet * pGobiDev = (sGobiUSBNet *)pDev->data[0];
429         bridge_ipv4 = pGobiDev->m_bridge_ipv4;
430 	}
431 
432 	ipv4[0]  = (bridge_ipv4 >> 24) & 0xFF;
433 	ipv4[1]  = (bridge_ipv4 >> 16) & 0xFF;
434 	ipv4[2]  = (bridge_ipv4 >> 8) & 0xFF;
435 	ipv4[3]  = (bridge_ipv4 >> 0) & 0xFF;
436 
437 	return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n",  ipv4[0], ipv4[1], ipv4[2], ipv4[3]);
438 }
439 
bridge_ipv4_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)440 static ssize_t bridge_ipv4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) {
441     struct net_device *pNet = to_net_dev(dev);
442 
443 	if (is_qmap_netdev(pNet)) {
444 		struct qmap_priv *priv = netdev_priv(pNet);
445 		priv->m_bridge_ipv4 = simple_strtoul(buf, NULL, 16);
446 	}
447 	else {
448         struct usbnet * pDev = netdev_priv( pNet );
449         sGobiUSBNet * pGobiDev = (sGobiUSBNet *)pDev->data[0];
450 		pGobiDev->m_bridge_ipv4 = simple_strtoul(buf, NULL, 16);
451 	}
452 
453     return count;
454 }
455 
456 static DEVICE_ATTR(bridge_mode, S_IWUSR | S_IRUGO, bridge_mode_show, bridge_mode_store);
457 static DEVICE_ATTR(bridge_ipv4, S_IWUSR | S_IRUGO, bridge_ipv4_show, bridge_ipv4_store);
458 
459 static struct attribute *qmi_qmap_sysfs_attrs[] = {
460 	&dev_attr_bridge_mode.attr,
461 	&dev_attr_bridge_ipv4.attr,
462 	NULL,
463 };
464 
465 static struct attribute_group qmi_qmap_sysfs_attr_group = {
466 	.attrs = qmi_qmap_sysfs_attrs,
467 };
468 #endif
469 
470 #ifdef QUECTEL_WWAN_QMAP
net_to_qmap(struct net_device * dev)471 static sGobiUSBNet * net_to_qmap(struct net_device *dev) {
472 	struct usbnet *usbnet = netdev_priv(dev);
473 	sGobiUSBNet * pGobiDev = (sGobiUSBNet *)usbnet->data[0];
474 
475 	return pGobiDev;
476 }
477 
add_qhdr(struct sk_buff * skb,u8 mux_id)478 static struct sk_buff * add_qhdr(struct sk_buff *skb, u8 mux_id) {
479 	struct qmap_hdr *qhdr;
480 	int pad = 0;
481 
482 	pad = skb->len%4;
483 	if (pad) {
484 		pad = 4 - pad;
485 		if (skb_tailroom(skb) < pad) {
486 			printk("skb_tailroom small!\n");
487 			pad = 0;
488 		}
489 		if (pad)
490 			__skb_put(skb, pad);
491 	}
492 
493 	qhdr = (struct qmap_hdr *)skb_push(skb, sizeof(struct qmap_hdr));
494 	qhdr->cd_rsvd_pad = pad;
495 	qhdr->mux_id = mux_id;
496 	qhdr->pkt_len = cpu_to_be16(skb->len - sizeof(struct qmap_hdr));
497 
498 	return skb;
499 }
500 
add_qhdr_v5(struct sk_buff * skb,u8 mux_id)501 static struct sk_buff * add_qhdr_v5(struct sk_buff *skb, u8 mux_id) {
502 	struct rmnet_map_header *map_header;
503 	struct rmnet_map_v5_csum_header *ul_header;
504 	u32 padding, map_datalen;
505 
506 	map_datalen = skb->len;
507 	padding = map_datalen%4;
508 	if (padding) {
509 		padding = 4 - padding;
510 		if (skb_tailroom(skb) < padding) {
511 			printk("skb_tailroom small!\n");
512 			padding = 0;
513 		}
514 		if (padding)
515 			__skb_put(skb, padding);
516 	}
517 
518 	map_header = (struct rmnet_map_header *)skb_push(skb, (sizeof(struct rmnet_map_header) + sizeof(struct rmnet_map_v5_csum_header)));
519 	map_header->cd_bit = 0;
520 	map_header->next_hdr = 1;
521 	map_header->pad_len = padding;
522 	map_header->mux_id = mux_id;
523 	map_header->pkt_len = htons(map_datalen + padding);
524 
525 	ul_header = (struct rmnet_map_v5_csum_header *)(map_header + 1);
526 	memset(ul_header, 0, sizeof(*ul_header));
527 	ul_header->header_type = RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD;
528 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
529 #if 0 //TODO
530 		skb->ip_summed = CHECKSUM_NONE;
531 		/* Ask for checksum offloading */
532 		ul_header->csum_valid_required = 1;
533 #endif
534 	}
535 
536 	return skb;
537 }
538 
rmnet_usb_tx_wake_queue(unsigned long data)539 static void rmnet_usb_tx_wake_queue(unsigned long data) {
540 	sGobiUSBNet *pQmapDev = (void *)data;
541 	int i;
542 
543 	for (i = 0; i < pQmapDev->qmap_mode; i++) {
544 		struct net_device *qmap_net = pQmapDev->mpQmapNetDev[i];
545 		if (qmap_net) {
546 			if (netif_queue_stopped(qmap_net) && !netif_queue_stopped(pQmapDev->mpNetDev->net)) {
547 				netif_wake_queue(qmap_net);
548 			}
549 		}
550 	}
551 }
552 
rmnet_usb_tx_skb_destructor(struct sk_buff * skb)553 static void rmnet_usb_tx_skb_destructor(struct sk_buff *skb) {
554 	sGobiUSBNet *pQmapDev = net_to_qmap(skb->dev);
555 	int i;
556 
557 	for (i = 0; i < pQmapDev->qmap_mode; i++) {
558 		struct net_device *qmap_net = pQmapDev->mpQmapNetDev[i];
559 
560 		if (qmap_net) {
561 			if (netif_queue_stopped(qmap_net)) {
562 				tasklet_schedule(&pQmapDev->txq);
563 				break;
564 			}
565 		}
566 	}
567 }
568 
rmnet_vnd_update_rx_stats(struct net_device * net,unsigned rx_packets,unsigned rx_bytes)569 static void rmnet_vnd_update_rx_stats(struct net_device *net,
570 			unsigned rx_packets, unsigned rx_bytes) {
571 	net->stats.rx_packets += rx_packets;
572 	net->stats.rx_bytes += rx_bytes;
573 }
574 
rmnet_vnd_update_tx_stats(struct net_device * net,unsigned tx_packets,unsigned tx_bytes)575 static void rmnet_vnd_update_tx_stats(struct net_device *net,
576 			unsigned tx_packets, unsigned tx_bytes) {
577 	net->stats.tx_packets += tx_packets;
578 	net->stats.tx_bytes += tx_bytes;
579 }
580 
581 #if defined(QUECTEL_UL_DATA_AGG)
582 static long agg_time_limit __read_mostly = 1000000L; //reduce this time, can get better TPUT performance, but will increase USB interrupts
583 module_param(agg_time_limit, long, S_IRUGO | S_IWUSR);
584 MODULE_PARM_DESC(agg_time_limit, "Maximum time packets sit in the agg buf");
585 
586 static long agg_bypass_time __read_mostly = 10000000L;
587 module_param(agg_bypass_time, long, S_IRUGO | S_IWUSR);
588 MODULE_PARM_DESC(agg_bypass_time, "Skip agg when apart spaced more than this");
589 
rmnet_usb_tx_agg_skip(struct sk_buff * skb,int offset)590 static int rmnet_usb_tx_agg_skip(struct sk_buff *skb, int offset)
591 {
592 	u8 *packet_start = skb->data + offset;
593 	int ready2send = 0;
594 
595 	if (skb->protocol == htons(ETH_P_IP)) {
596 		struct iphdr *ip4h = (struct iphdr *)(packet_start);
597 
598 		if (ip4h->protocol == IPPROTO_TCP) {
599 			const struct tcphdr *th = (const struct tcphdr *)(packet_start + sizeof(struct iphdr));
600 			if (th->psh) {
601 				ready2send = 1;
602 			}
603 		}
604 		else if (ip4h->protocol == IPPROTO_ICMP)
605 			ready2send = 1;
606 
607 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
608 		struct ipv6hdr *ip6h = (struct ipv6hdr *)(packet_start);
609 
610 		if (ip6h->nexthdr == NEXTHDR_TCP) {
611 			const struct tcphdr *th = (const struct tcphdr *)(packet_start + sizeof(struct ipv6hdr));
612 			if (th->psh) {
613 				ready2send = 1;
614 			}
615 		} else if (ip6h->nexthdr == NEXTHDR_ICMP) {
616 			ready2send = 1;
617 		} else if (ip6h->nexthdr == NEXTHDR_FRAGMENT) {
618 			struct frag_hdr *frag;
619 
620 			frag = (struct frag_hdr *)(packet_start
621 						   + sizeof(struct ipv6hdr));
622 			if (frag->nexthdr == IPPROTO_ICMPV6)
623 				ready2send = 1;
624 		}
625 	}
626 
627 	return ready2send;
628 }
629 
rmnet_usb_tx_agg_work(struct work_struct * work)630 static void rmnet_usb_tx_agg_work(struct work_struct *work)
631 {
632 	struct qmap_priv *priv =
633 			container_of(work, struct qmap_priv, agg_wq);
634 	struct sk_buff *skb = NULL;
635 	unsigned long flags;
636 
637 	spin_lock_irqsave(&priv->agg_lock, flags);
638 	if (likely(priv->agg_skb)) {
639 		skb = priv->agg_skb;
640 		priv->agg_skb = NULL;
641 		priv->agg_count = 0;
642 		skb->protocol = htons(ETH_P_MAP);
643 		skb->dev = priv->real_dev;
644 		ktime_get_ts64(&priv->agg_time);
645 	}
646 	spin_unlock_irqrestore(&priv->agg_lock, flags);
647 
648 	if (skb) {
649 		int err = dev_queue_xmit(skb);
650 		if (err != NET_XMIT_SUCCESS) {
651 			priv->self_dev->stats.tx_errors++;
652 		}
653 	}
654 }
655 
rmnet_usb_tx_agg_timer_cb(struct hrtimer * timer)656 static enum hrtimer_restart  rmnet_usb_tx_agg_timer_cb(struct hrtimer *timer)
657 {
658 	struct qmap_priv *priv =
659 			container_of(timer, struct qmap_priv, agg_hrtimer);
660 
661 	schedule_work(&priv->agg_wq);
662 	return HRTIMER_NORESTART;
663 }
664 
rmnet_usb_tx_agg(struct sk_buff * skb,struct qmap_priv * priv)665 static int rmnet_usb_tx_agg(struct sk_buff *skb, struct qmap_priv *priv) {
666 	int ready2send = 0;
667 	int xmit_more = 0;
668 	struct timespec64 diff, now;
669 	struct sk_buff *agg_skb = NULL;
670 	unsigned long flags;
671 	int err;
672 	struct net_device *pNet = priv->self_dev;
673 
674 #if LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0) //6b16f9ee89b8d5709f24bc3ac89ae8b5452c0d7c
675 #if LINUX_VERSION_CODE > KERNEL_VERSION(3,16,0)
676 	xmit_more = skb->xmit_more;
677 #endif
678 #else
679 	xmit_more = netdev_xmit_more();
680 #endif
681 
682 	rmnet_vnd_update_tx_stats(pNet, 1, skb->len);
683 
684 	if (priv->ul_data_aggregation_max_datagrams == 1) {
685 		skb->protocol = htons(ETH_P_MAP);
686 		skb->dev = priv->real_dev;
687 		if (!skb->destructor)
688 			skb->destructor = rmnet_usb_tx_skb_destructor;
689 		err = dev_queue_xmit(skb);
690 		if (err != NET_XMIT_SUCCESS)
691 			pNet->stats.tx_errors++;
692 		return NET_XMIT_SUCCESS;
693 	}
694 
695 new_packet:
696 	spin_lock_irqsave(&priv->agg_lock, flags);
697 	agg_skb = NULL;
698 	ready2send = 0;
699 	ktime_get_ts64(&now);
700 	diff = timespec64_sub(now, priv->agg_time);
701 
702 	if (priv->agg_skb) {
703 		if ((priv->agg_skb->len + skb->len) < priv->ul_data_aggregation_max_size) {
704 			memcpy(skb_put(priv->agg_skb, skb->len), skb->data, skb->len);
705 			priv->agg_count++;
706 
707 			if (diff.tv_sec > 0 || diff.tv_nsec > agg_time_limit) {
708 				ready2send = 1;
709 			}
710 			else if (priv->agg_count == priv->ul_data_aggregation_max_datagrams) {
711 				ready2send = 1;
712 			}
713 			else if (xmit_more == 0) {
714 				struct rmnet_map_header *map_header = (struct rmnet_map_header *)skb->data;
715 				size_t offset = sizeof(struct rmnet_map_header);
716 				if (map_header->next_hdr)
717 					offset += sizeof(struct rmnet_map_v5_csum_header);
718 
719 				ready2send = rmnet_usb_tx_agg_skip(skb, offset);
720 			}
721 
722 			dev_kfree_skb_any(skb);
723 			skb = NULL;
724 		}
725 		else {
726 			ready2send = 1;
727 		}
728 
729 		if (ready2send) {
730 			agg_skb = priv->agg_skb;
731 			priv->agg_skb = NULL;
732 			priv->agg_count = 0;
733 		}
734 	}
735 	else if (skb) {
736 		if (diff.tv_sec > 0 || diff.tv_nsec > agg_bypass_time) {
737 			ready2send = 1;
738 		}
739 		else if (xmit_more == 0) {
740 			struct rmnet_map_header *map_header = (struct rmnet_map_header *)skb->data;
741 			size_t offset = sizeof(struct rmnet_map_header);
742 			if (map_header->next_hdr)
743 				offset += sizeof(struct rmnet_map_v5_csum_header);
744 
745 			ready2send = rmnet_usb_tx_agg_skip(skb, offset);
746 		}
747 
748 		if (ready2send == 0) {
749 			priv->agg_skb = alloc_skb(priv->ul_data_aggregation_max_size, GFP_ATOMIC);
750 			if (priv->agg_skb) {
751 				memcpy(skb_put(priv->agg_skb, skb->len), skb->data, skb->len);
752 				priv->agg_count++;
753 				dev_kfree_skb_any(skb);
754 				skb = NULL;
755 			}
756 			else {
757 				ready2send = 1;
758 			}
759 		}
760 
761 		if (ready2send) {
762 			agg_skb = skb;
763 			skb = NULL;
764 		}
765 	}
766 
767 	if (ready2send) {
768 		priv->agg_time = now;
769 	}
770 	spin_unlock_irqrestore(&priv->agg_lock, flags);
771 
772 	if (agg_skb) {
773 		agg_skb->protocol = htons(ETH_P_MAP);
774 		agg_skb->dev = priv->real_dev;
775 		if (!agg_skb->destructor)
776 			agg_skb->destructor = rmnet_usb_tx_skb_destructor;
777 		err = dev_queue_xmit(agg_skb);
778 		if (err != NET_XMIT_SUCCESS) {
779 			pNet->stats.tx_errors++;
780 		}
781 	}
782 
783 	if (skb) {
784 		goto new_packet;
785 	}
786 
787 	if (priv->agg_skb) {
788 		if (!hrtimer_is_queued(&priv->agg_hrtimer))
789 			hrtimer_start(&priv->agg_hrtimer, ns_to_ktime(NSEC_PER_MSEC * 2), HRTIMER_MODE_REL);
790 	}
791 
792 	return NET_XMIT_SUCCESS;
793 }
794 #endif
795 
qmap_open(struct net_device * dev)796 static int qmap_open(struct net_device *dev)
797 {
798 	struct qmap_priv *priv = netdev_priv(dev);
799 	sGobiUSBNet * pGobiDev = net_to_qmap(priv->real_dev);
800 
801 	if (!(priv->real_dev->flags & IFF_UP))
802 		return -ENETDOWN;
803 
804 	if (!pGobiDev->mbQMIReady)
805 		return -ENETDOWN;
806 
807 #if defined(QUECTEL_UL_DATA_AGG)
808 	if (priv->ul_data_aggregation_max_datagrams == 1 && pGobiDev->agg_ctx.ul_data_aggregation_max_datagrams > 1) {
809 		priv->ul_data_aggregation_max_datagrams = pGobiDev->agg_ctx.ul_data_aggregation_max_datagrams;
810 		priv->ul_data_aggregation_max_size = pGobiDev->agg_ctx.ul_data_aggregation_max_size;
811 		priv->dl_minimum_padding = pGobiDev->agg_ctx.dl_minimum_padding;
812 	}
813 #endif
814 
815 	if (netif_carrier_ok(priv->real_dev) && priv->link_state)
816 		netif_carrier_on(dev);
817 
818 	if (netif_carrier_ok(dev)) {
819 		if (netif_queue_stopped(dev) && !netif_queue_stopped(priv->real_dev))
820 			netif_wake_queue(dev);
821 	}
822 
823 	return 0;
824 }
825 
qmap_stop(struct net_device * pNet)826 static int qmap_stop(struct net_device *pNet)
827 {
828 	netif_carrier_off(pNet);
829 	return 0;
830 }
831 
qmap_start_xmit(struct sk_buff * skb,struct net_device * pNet)832 static int qmap_start_xmit(struct sk_buff *skb, struct net_device *pNet)
833 {
834 	int err;
835 	struct qmap_priv *priv = netdev_priv(pNet);
836 
837 	if (netif_queue_stopped(priv->real_dev)) {
838 		//printk(KERN_DEBUG "s\n");
839 		netif_stop_queue(pNet);
840 		return NETDEV_TX_BUSY;
841 	}
842 
843 	if (pNet->type == ARPHRD_ETHER) {
844 #ifdef QUECTEL_BRIDGE_MODE
845 		if (priv->m_bridge_mode && bridge_mode_tx_fixup(pNet, skb, priv->m_bridge_ipv4, priv->mHostMAC) == NULL) {
846 			dev_kfree_skb_any (skb);
847 			return NETDEV_TX_OK;
848 		}
849 #endif
850 
851 		if (ether_to_ip_fixup(pNet, skb) == NULL) {
852 			dev_kfree_skb_any (skb);
853 			return NETDEV_TX_OK;
854 		}
855 	}
856 
857 	if (priv->qmap_version == 5) {
858 		add_qhdr(skb, priv->mux_id);
859 	}
860 	else if (priv->qmap_version == 9) {
861 		add_qhdr_v5(skb, priv->mux_id);
862 	}
863 	else {
864 		dev_kfree_skb_any (skb);
865 		return NETDEV_TX_OK;
866 	}
867 
868 #if defined(QUECTEL_UL_DATA_AGG)
869 	err = rmnet_usb_tx_agg(skb, priv);
870 #else
871 	skb->protocol = htons(ETH_P_MAP);
872 	skb->dev = priv->real_dev;
873 	if (!skb->destructor)
874 		skb->destructor = rmnet_usb_tx_skb_destructor;
875 	err = dev_queue_xmit(skb);
876 #if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,14 ))
877 	if (err == NET_XMIT_SUCCESS) {
878 		rmnet_vnd_update_tx_stats(pNet, 1, skb->len);
879 	} else {
880 		pNet->stats.tx_errors++;
881 	}
882 #endif
883 #endif
884 
885 	return err;
886 }
887 
888 #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 ))
889 #else
890 static const struct net_device_ops qmap_netdev_ops = {
891 	.ndo_open       = qmap_open,
892 	.ndo_stop       = qmap_stop,
893 	.ndo_start_xmit = qmap_start_xmit,
894 };
895 #endif
896 
897 #ifdef QUECTEL_BRIDGE_MODE
is_qmap_netdev(const struct net_device * netdev)898 static int is_qmap_netdev(const struct net_device *netdev) {
899 #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 ))
900     return netdev->open == qmap_open;
901 #else
902     return netdev->netdev_ops == &qmap_netdev_ops;
903 #endif
904 }
905 #endif
906 
qmap_register_device(sGobiUSBNet * pDev,u8 offset_id)907 static int qmap_register_device(sGobiUSBNet * pDev, u8 offset_id)
908 {
909 	struct net_device *real_dev = pDev->mpNetDev->net;
910 	struct net_device *qmap_net;
911 	struct qmap_priv *priv;
912 	int err;
913 
914 	qmap_net = alloc_etherdev(sizeof(*priv));
915 	if (!qmap_net)
916 		return -ENOBUFS;
917 
918 	SET_NETDEV_DEV(qmap_net, &real_dev->dev);
919 	priv = netdev_priv(qmap_net);
920 	priv->offset_id = offset_id;
921 	priv->mux_id = QUECTEL_QMAP_MUX_ID + offset_id;
922 	priv->qmap_version = pDev->qmap_version;
923 	priv->real_dev = real_dev;
924 	priv->self_dev = qmap_net;
925 
926 #if defined(QUECTEL_UL_DATA_AGG)
927 	priv->ul_data_aggregation_max_datagrams = 1;
928 	priv->ul_data_aggregation_max_size = 2048;
929 	priv->dl_minimum_padding = 0;
930 	priv->agg_skb = NULL;
931 	priv->agg_count = 0;
932 	hrtimer_init(&priv->agg_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
933 	priv->agg_hrtimer.function = rmnet_usb_tx_agg_timer_cb;
934 	INIT_WORK(&priv->agg_wq, rmnet_usb_tx_agg_work);
935 	ktime_get_ts64(&priv->agg_time);
936 	spin_lock_init(&priv->agg_lock);
937 #endif
938 
939     sprintf(qmap_net->name, "%s.%d", real_dev->name, offset_id + 1);
940 #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 ))
941     qmap_net->open = qmap_open;
942     qmap_net->stop = qmap_stop;
943     qmap_net->hard_start_xmit = qmap_start_xmit;
944 #else
945     qmap_net->netdev_ops = &qmap_netdev_ops;
946 #endif
947     memcpy (qmap_net->dev_addr, real_dev->dev_addr, ETH_ALEN);
948 
949 #ifdef QUECTEL_BRIDGE_MODE
950 	priv->m_bridge_mode = !!(pDev->m_bridge_mode & BIT(offset_id));
951 	qmap_net->sysfs_groups[0] = &qmi_qmap_sysfs_attr_group;
952 #endif
953 
954     err = register_netdev(qmap_net);
955     if (err < 0) {
956         INFO("register_netdev(%s), err=%d\n", qmap_net->name, err);
957         goto out_free_newdev;
958     }
959     netif_device_attach (qmap_net);
960 
961     pDev->mpQmapNetDev[offset_id] = qmap_net;
962     qmap_net->flags |= IFF_NOARP;
963     qmap_net->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
964 
965     INFO("%s\n", qmap_net->name);
966 
967     return 0;
968 
969 out_free_newdev:
970     free_netdev(qmap_net);
971     return err;
972 }
973 
qmap_unregister_device(sGobiUSBNet * pDev,u8 offset_id)974 static void qmap_unregister_device(sGobiUSBNet * pDev, u8 offset_id) {
975 	struct net_device *qmap_net;
976 #if defined(QUECTEL_UL_DATA_AGG)
977 	struct qmap_priv *priv;
978 	unsigned long flags;
979 #endif
980 
981 	qmap_net = pDev->mpQmapNetDev[offset_id];
982 	if (qmap_net == NULL)
983 		return;
984 
985 	netif_carrier_off(qmap_net);
986 	netif_stop_queue(qmap_net);
987 
988 #if defined(QUECTEL_UL_DATA_AGG)
989 	priv = netdev_priv(qmap_net);
990 	hrtimer_cancel(&priv->agg_hrtimer);
991 	cancel_work_sync(&priv->agg_wq);
992 	spin_lock_irqsave(&priv->agg_lock, flags);
993 	if (priv->agg_skb) {
994 		kfree_skb(priv->agg_skb);
995 	}
996 	spin_unlock_irqrestore(&priv->agg_lock, flags);
997 #endif
998 
999 	unregister_netdev(qmap_net);
1000 	free_netdev(qmap_net);
1001 }
1002 
qmap_mode_show(struct device * dev,struct device_attribute * attr,char * buf)1003 static ssize_t qmap_mode_show(struct device *dev, struct device_attribute *attr, char *buf) {
1004     struct net_device *pNet = to_net_dev(dev);
1005     struct usbnet * pDev = netdev_priv( pNet );
1006     sGobiUSBNet * pGobiDev = (sGobiUSBNet *)pDev->data[0];
1007 
1008     return snprintf(buf, PAGE_SIZE, "%d\n", pGobiDev->qmap_mode);
1009 }
1010 
1011 static DEVICE_ATTR(qmap_mode, S_IRUGO, qmap_mode_show, NULL);
1012 
qmap_size_show(struct device * dev,struct device_attribute * attr,char * buf)1013 static ssize_t qmap_size_show(struct device *dev, struct device_attribute *attr, char *buf) {
1014     struct net_device *pNet = to_net_dev(dev);
1015     struct usbnet * pDev = netdev_priv( pNet );
1016     sGobiUSBNet * pGobiDev = (sGobiUSBNet *)pDev->data[0];
1017 
1018     return snprintf(buf, PAGE_SIZE, "%d\n", pGobiDev->qmap_size);
1019 }
1020 
1021 static DEVICE_ATTR(qmap_size, S_IRUGO, qmap_size_show, NULL);
1022 
link_state_show(struct device * dev,struct device_attribute * attr,char * buf)1023 static ssize_t link_state_show(struct device *dev, struct device_attribute *attr, char *buf) {
1024 	sGobiUSBNet *pQmapDev = net_to_qmap(to_net_dev(dev));
1025 
1026 	return snprintf(buf, PAGE_SIZE, "0x%x\n",  pQmapDev->link_state);
1027 }
1028 
link_state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1029 static ssize_t link_state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) {
1030 	struct net_device *netdev = to_net_dev(dev);
1031 	sGobiUSBNet *pQmapDev = net_to_qmap(to_net_dev(dev));
1032 	unsigned qmap_mode = pQmapDev->qmap_mode;
1033 	unsigned link_state = 0;
1034 	unsigned old_link = pQmapDev->link_state;
1035 	uint offset_id = 0;
1036 
1037 	link_state = simple_strtoul(buf, NULL, 0);
1038 	if (qmap_mode == 1) {
1039 		pQmapDev->link_state = !!link_state;
1040 	}
1041 	else if (qmap_mode > 1) {
1042 		offset_id = ((link_state&0x7F) - 1);
1043 
1044 		if (offset_id >= qmap_mode) {
1045 			dev_info(dev, "%s offset_id is %d. but qmap_mode is %d\n", __func__, offset_id, pQmapDev->qmap_mode);
1046 			return count;
1047 		}
1048 
1049 		if (link_state&0x80)
1050 			pQmapDev->link_state &= ~(1 << offset_id);
1051 		else
1052 			pQmapDev->link_state |= (1 << offset_id);
1053 	}
1054 
1055 	if (old_link != pQmapDev->link_state) {
1056 		struct net_device *qmap_net = pQmapDev->mpQmapNetDev[offset_id];
1057 
1058 		if (pQmapDev->link_state) {
1059 			netif_carrier_on(netdev);
1060 		} else {
1061 			netif_carrier_off(netdev);
1062 		}
1063 
1064 		if (qmap_net && qmap_net != netdev) {
1065 			struct qmap_priv *priv = netdev_priv(qmap_net);
1066 
1067 			priv->link_state = !!(pQmapDev->link_state & (1 << offset_id));
1068 			if (priv->link_state) {
1069 				netif_carrier_on(qmap_net);
1070 				if (netif_queue_stopped(qmap_net) && !netif_queue_stopped(priv->real_dev))
1071 					netif_wake_queue(qmap_net);
1072 			}
1073 			else
1074 				netif_carrier_off(qmap_net);
1075 		}
1076 	}
1077 
1078 	if (old_link != pQmapDev->link_state)
1079 		dev_info(dev, "link_state 0x%x -> 0x%x\n", old_link, pQmapDev->link_state);
1080 
1081 	return count;
1082 }
1083 
1084 static DEVICE_ATTR(link_state, S_IWUSR | S_IRUGO, link_state_show, link_state_store);
1085 #endif
1086 
1087 static struct attribute *gobinet_sysfs_attrs[] = {
1088 #ifdef QUECTEL_BRIDGE_MODE
1089 	&dev_attr_bridge_mode.attr,
1090 	&dev_attr_bridge_ipv4.attr,
1091 #endif
1092 #ifdef QUECTEL_WWAN_QMAP
1093 	&dev_attr_qmap_mode.attr,
1094 	&dev_attr_qmap_size.attr,
1095 	&dev_attr_link_state.attr,
1096 #endif
1097 	NULL,
1098 };
1099 
1100 static struct attribute_group gobinet_sysfs_attr_group = {
1101 	.attrs = gobinet_sysfs_attrs,
1102 };
1103 
1104 #if defined(QUECTEL_WWAN_QMAP)
1105 typedef struct {
1106     unsigned int size;
1107     unsigned int rx_urb_size;
1108     unsigned int ep_type;
1109     unsigned int iface_id;
1110     unsigned int qmap_mode;
1111     unsigned int qmap_version;
1112     unsigned int dl_minimum_padding;
1113     char ifname[8][16];
1114     unsigned char mux_id[8];
1115 } RMNET_INFO;
1116 
rmnet_info_set(struct sGobiUSBNet * pQmapDev,RMNET_INFO * rmnet_info)1117 static void rmnet_info_set(struct sGobiUSBNet *pQmapDev, RMNET_INFO *rmnet_info)
1118 {
1119 	int i;
1120 
1121 	memset(rmnet_info, 0, sizeof(*rmnet_info));
1122 	rmnet_info->size = sizeof(RMNET_INFO);
1123 	rmnet_info->rx_urb_size = pQmapDev->qmap_size;
1124 	rmnet_info->ep_type = 2; //DATA_EP_TYPE_HSUSB
1125 	rmnet_info->iface_id = 4;
1126 	rmnet_info->qmap_mode = pQmapDev->qmap_mode;
1127 	rmnet_info->qmap_version = pQmapDev->qmap_version;
1128 	rmnet_info->dl_minimum_padding = 0;
1129 
1130 	for (i = 0; i < pQmapDev->qmap_mode; i++) {
1131 		struct net_device *qmap_net = pQmapDev->mpQmapNetDev[i];
1132 
1133 		if (!qmap_net)
1134 			break;
1135 
1136 		strcpy(rmnet_info->ifname[i], qmap_net->name);
1137 		rmnet_info->mux_id[i] = QUECTEL_QMAP_MUX_ID;
1138 		if (pQmapDev->qmap_mode > 1) {
1139 			struct qmap_priv *priv = netdev_priv(qmap_net);
1140 
1141 			rmnet_info->mux_id[i] = priv->mux_id;
1142 		}
1143 	}
1144 }
1145 
qmap_ndo_do_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)1146 static int qmap_ndo_do_ioctl(struct net_device *dev,struct ifreq *ifr, int cmd) {
1147 	int rc = -EOPNOTSUPP;
1148 	uint link_state = 0;
1149 	sGobiUSBNet *pQmapDev = net_to_qmap(dev);
1150 
1151 	atomic_inc(&pQmapDev->refcount);
1152 	if (!pQmapDev->mbQMIReady) {
1153 		if (wait_for_completion_interruptible_timeout(&pQmapDev->mQMIReadyCompletion, 15*HZ) <= 0) {
1154 			if (atomic_dec_and_test(&pQmapDev->refcount)) {
1155 				kfree( pQmapDev );
1156 			}
1157 			return -ETIMEDOUT;
1158 		}
1159 	}
1160 	atomic_dec(&pQmapDev->refcount);
1161 
1162 	switch (cmd) {
1163 	case 0x89F1: //SIOCDEVPRIVATE
1164 		rc = copy_from_user(&link_state, ifr->ifr_ifru.ifru_data, sizeof(link_state));
1165 		if (!rc) {
1166 			char buf[32];
1167 			snprintf(buf, sizeof(buf), "%u", link_state);
1168 			link_state_store(&dev->dev, NULL, buf, strlen(buf));
1169 		}
1170 	break;
1171 
1172 	case 0x89F2: //SIOCDEVPRIVATE
1173 		rc = 0;
1174 	break;
1175 
1176 	case 0x89F3: //SIOCDEVPRIVATE
1177 		if (pQmapDev->qmap_mode) {
1178 			RMNET_INFO rmnet_info;
1179 
1180 			rmnet_info_set(pQmapDev, &rmnet_info);
1181 			rc = copy_to_user(ifr->ifr_ifru.ifru_data, &rmnet_info, sizeof(rmnet_info));
1182 		}
1183 	break;
1184 
1185 	default:
1186 	break;
1187 	}
1188 
1189 	return rc;
1190 }
1191 #endif
1192 
1193 #ifdef CONFIG_PM
1194 /*===========================================================================
1195 METHOD:
1196    GobiNetSuspend (Public Method)
1197 
1198 DESCRIPTION:
1199    Stops QMI traffic while device is suspended
1200 
1201 PARAMETERS
1202    pIntf          [ I ] - Pointer to interface
1203    powerEvent     [ I ] - Power management event
1204 
1205 RETURN VALUE:
1206    int - 0 for success
1207          negative errno for failure
1208 ===========================================================================*/
GobiNetSuspend(struct usb_interface * pIntf,pm_message_t powerEvent)1209 static int GobiNetSuspend(
1210    struct usb_interface *     pIntf,
1211    pm_message_t               powerEvent )
1212 {
1213    struct usbnet * pDev;
1214    sGobiUSBNet * pGobiDev;
1215 
1216    if (pIntf == 0)
1217    {
1218       return -ENOMEM;
1219    }
1220 
1221 #if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,23 ))
1222    pDev = usb_get_intfdata( pIntf );
1223 #else
1224    pDev = (struct usbnet *)pIntf->dev.platform_data;
1225 #endif
1226 
1227    if (pDev == NULL || pDev->net == NULL)
1228    {
1229       DBG( "failed to get netdevice\n" );
1230       return -ENXIO;
1231    }
1232 
1233    pGobiDev = (sGobiUSBNet *)pDev->data[0];
1234    if (pGobiDev == NULL)
1235    {
1236       DBG( "failed to get QMIDevice\n" );
1237       return -ENXIO;
1238    }
1239 
1240    if (pGobiDev->mbQMISyncIng)
1241    {
1242       DBG( "QMI sync ing\n" );
1243       return -EBUSY;
1244    }
1245 
1246    // Is this autosuspend or system suspend?
1247    //    do we allow remote wakeup?
1248 #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,33 ))
1249 #if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,18 ))
1250    if (pDev->udev->auto_pm == 0)
1251 #else
1252    if (1)
1253 #endif
1254 #else
1255    if ((powerEvent.event & PM_EVENT_AUTO) == 0)
1256 #endif
1257    {
1258       DBG( "device suspended to power level %d\n",
1259            powerEvent.event );
1260       GobiSetDownReason( pGobiDev, DRIVER_SUSPENDED );
1261    }
1262    else
1263    {
1264       DBG( "device autosuspend\n" );
1265    }
1266 
1267    if (powerEvent.event & PM_EVENT_SUSPEND)
1268    {
1269       // Stop QMI read callbacks
1270    if (pGobiDev->m_qcrmcall_mode) {
1271    } else {
1272       KillRead( pGobiDev );
1273    }
1274 #if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,22 ))
1275       pDev->udev->reset_resume = 0;
1276 #endif
1277 
1278       // Store power state to avoid duplicate resumes
1279       pIntf->dev.power.power_state.event = powerEvent.event;
1280    }
1281    else
1282    {
1283       // Other power modes cause QMI connection to be lost
1284 #if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,22 ))
1285       pDev->udev->reset_resume = 1;
1286 #endif
1287    }
1288 
1289    // Run usbnet's suspend function
1290    return usbnet_suspend( pIntf, powerEvent );
1291 }
QuecGobiNetSuspend(struct usb_interface * pIntf,pm_message_t powerEvent)1292 int QuecGobiNetSuspend(struct usb_interface *pIntf, pm_message_t powerEvent ) {
1293 	return GobiNetSuspend(pIntf, powerEvent);
1294 }
1295 
1296 /*===========================================================================
1297 METHOD:
1298    GobiNetResume (Public Method)
1299 
1300 DESCRIPTION:
1301    Resume QMI traffic or recreate QMI device
1302 
1303 PARAMETERS
1304    pIntf          [ I ] - Pointer to interface
1305 
1306 RETURN VALUE:
1307    int - 0 for success
1308          negative errno for failure
1309 ===========================================================================*/
GobiNetResume(struct usb_interface * pIntf)1310 static int GobiNetResume( struct usb_interface * pIntf )
1311 {
1312    struct usbnet * pDev;
1313    sGobiUSBNet * pGobiDev;
1314    int nRet;
1315    int oldPowerState;
1316 
1317    if (pIntf == 0)
1318    {
1319       return -ENOMEM;
1320    }
1321 
1322 #if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,23 ))
1323    pDev = usb_get_intfdata( pIntf );
1324 #else
1325    pDev = (struct usbnet *)pIntf->dev.platform_data;
1326 #endif
1327 
1328    if (pDev == NULL || pDev->net == NULL)
1329    {
1330       DBG( "failed to get netdevice\n" );
1331       return -ENXIO;
1332    }
1333 
1334    pGobiDev = (sGobiUSBNet *)pDev->data[0];
1335    if (pGobiDev == NULL)
1336    {
1337       DBG( "failed to get QMIDevice\n" );
1338       return -ENXIO;
1339    }
1340 
1341    oldPowerState = pIntf->dev.power.power_state.event;
1342    pIntf->dev.power.power_state.event = PM_EVENT_ON;
1343    DBG( "resuming from power mode %d\n", oldPowerState );
1344 
1345    if (oldPowerState & PM_EVENT_SUSPEND)
1346    {
1347       // It doesn't matter if this is autoresume or system resume
1348       GobiClearDownReason( pGobiDev, DRIVER_SUSPENDED );
1349 
1350       nRet = usbnet_resume( pIntf );
1351       if (nRet != 0)
1352       {
1353          DBG( "usbnet_resume error %d\n", nRet );
1354          return nRet;
1355       }
1356 
1357       // Restart QMI read callbacks
1358       if (pGobiDev->m_qcrmcall_mode) {
1359          nRet = 0;
1360       } else {
1361          nRet = StartRead( pGobiDev );
1362       }
1363       if (nRet != 0)
1364       {
1365          DBG( "StartRead error %d\n", nRet );
1366          return nRet;
1367       }
1368 
1369 #ifdef CONFIG_PM
1370    #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 ))
1371       // Kick Auto PM thread to process any queued URBs
1372       complete( &pGobiDev->mAutoPM.mThreadDoWork );
1373     #endif
1374 #endif /* CONFIG_PM */
1375 
1376 #if defined(QUECTEL_WWAN_QMAP)
1377       if ((!netif_queue_stopped(pDev->net)) && (pGobiDev->qmap_mode > 1)) {
1378             rmnet_usb_tx_wake_queue((unsigned long )pGobiDev);
1379       }
1380 #endif
1381    }
1382    else
1383    {
1384       DBG( "nothing to resume\n" );
1385       return 0;
1386    }
1387 
1388    return nRet;
1389 }
1390 #if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,27 ))
GobiNetResetResume(struct usb_interface * pIntf)1391 static int GobiNetResetResume( struct usb_interface * pIntf )
1392 {
1393    INFO( "device do not support reset_resume\n" );
1394    pIntf->needs_binding = 1;
1395 
1396    return -EOPNOTSUPP;
1397 }
1398 #endif
1399 #endif /* CONFIG_PM */
1400 
ql_net_get_drvinfo(struct net_device * net,struct ethtool_drvinfo * info)1401 static void ql_net_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
1402 {
1403 	usbnet_get_drvinfo(net, info);
1404 	/* Inherit standard device info */
1405 	strlcpy(info->driver, driver_name, sizeof(info->driver));
1406 	strlcpy(info->version, VERSION_NUMBER, sizeof(info->version));
1407 }
1408 
1409 static struct ethtool_ops ql_net_ethtool_ops;
1410 
1411 /*===========================================================================
1412 METHOD:
1413    GobiNetDriverBind (Public Method)
1414 
1415 DESCRIPTION:
1416    Setup in and out pipes
1417 
1418 PARAMETERS
1419    pDev           [ I ] - Pointer to usbnet device
1420    pIntf          [ I ] - Pointer to interface
1421 
1422 RETURN VALUE:
1423    int - 0 for success
1424          Negative errno for error
1425 ===========================================================================*/
GobiNetDriverBind(struct usbnet * pDev,struct usb_interface * pIntf)1426 static int GobiNetDriverBind(
1427    struct usbnet *         pDev,
1428    struct usb_interface *  pIntf )
1429 {
1430    int numEndpoints;
1431    int endpointIndex;
1432    struct usb_host_endpoint * pEndpoint = NULL;
1433    struct usb_host_endpoint * pIn = NULL;
1434    struct usb_host_endpoint * pOut = NULL;
1435 
1436    // Verify one altsetting
1437    if (pIntf->num_altsetting != 1)
1438    {
1439       DBG( "invalid num_altsetting %u\n", pIntf->num_altsetting );
1440       return -ENODEV;
1441    }
1442 
1443    // Verify correct interface (4 for UC20)
1444    if ( !test_bit(pIntf->cur_altsetting->desc.bInterfaceNumber, &pDev->driver_info->data))
1445    {
1446       DBG( "invalid interface %d\n",
1447            pIntf->cur_altsetting->desc.bInterfaceNumber );
1448       return -ENODEV;
1449    }
1450 
1451    if ( pIntf->cur_altsetting->desc.bInterfaceClass != 0xff)
1452    {
1453       struct usb_interface_descriptor *desc = &pIntf->cur_altsetting->desc;
1454       const char *qcfg_usbnet = "UNKNOW";
1455 
1456       if (desc->bInterfaceClass == 2 && desc->bInterfaceSubClass == 0x0e) {
1457          qcfg_usbnet = "MBIM";
1458       } else if (desc->bInterfaceClass == 2 && desc->bInterfaceSubClass == 0x06) {
1459          qcfg_usbnet = "ECM";
1460       } else if (desc->bInterfaceClass == 0xe0 && desc->bInterfaceSubClass == 1 && desc->bInterfaceProtocol == 3) {
1461          qcfg_usbnet = "RNDIS";
1462       }
1463 
1464       INFO( "usbnet is %s not NDIS/RMNET!\n", qcfg_usbnet);
1465 
1466       return -ENODEV;
1467    }
1468 
1469    // Collect In and Out endpoints
1470    numEndpoints = pIntf->cur_altsetting->desc.bNumEndpoints;
1471    for (endpointIndex = 0; endpointIndex < numEndpoints; endpointIndex++)
1472    {
1473       pEndpoint = pIntf->cur_altsetting->endpoint + endpointIndex;
1474       if (pEndpoint == NULL)
1475       {
1476          DBG( "invalid endpoint %u\n", endpointIndex );
1477          return -ENODEV;
1478       }
1479 
1480       if (usb_endpoint_dir_in( &pEndpoint->desc ) == true
1481       &&  usb_endpoint_xfer_int( &pEndpoint->desc ) == false)
1482       {
1483          pIn = pEndpoint;
1484       }
1485       else if (usb_endpoint_dir_out( &pEndpoint->desc ) == true)
1486       {
1487          pOut = pEndpoint;
1488       }
1489    }
1490 
1491    if (pIn == NULL || pOut == NULL)
1492    {
1493       DBG( "invalid endpoints\n" );
1494       return -ENODEV;
1495    }
1496 
1497    if (usb_set_interface( pDev->udev,
1498                           pIntf->cur_altsetting->desc.bInterfaceNumber,
1499                           0 ) != 0)
1500    {
1501       DBG( "unable to set interface\n" );
1502       return -ENODEV;
1503    }
1504 
1505    pDev->in = usb_rcvbulkpipe( pDev->udev,
1506                    pIn->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK );
1507    pDev->out = usb_sndbulkpipe( pDev->udev,
1508                    pOut->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK );
1509 
1510 #if defined(QUECTEL_WWAN_MULTI_PACKAGES)
1511     if (rx_packets && pDev->udev->descriptor.idVendor == cpu_to_le16(0x2C7C)) {
1512         struct multi_package_config rx_config = {
1513             .enable = cpu_to_le32(1),
1514             .package_max_len = cpu_to_le32((1500 + sizeof(struct quec_net_package_header)) * rx_packets),
1515             .package_max_count_in_queue = cpu_to_le32(rx_packets),
1516             .timeout = cpu_to_le32(10*1000), //10ms
1517         };
1518         int ret = 0;
1519 
1520     	ret = usb_control_msg(
1521     		interface_to_usbdev(pIntf),
1522     		usb_sndctrlpipe(interface_to_usbdev(pIntf), 0),
1523     		USB_CDC_SET_MULTI_PACKAGE_COMMAND,
1524     		0x21, //USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE
1525     		1,
1526     		pIntf->cur_altsetting->desc.bInterfaceNumber,
1527     		&rx_config, sizeof(rx_config), 100);
1528 
1529         DBG( "Quectel EC21&EC25 rx_packets=%d, ret=%d\n", rx_packets, ret);
1530         if (ret == sizeof(rx_config)) {
1531            pDev->rx_urb_size = le32_to_cpu(rx_config.package_max_len);
1532         } else {
1533             rx_packets = 0;
1534         }
1535     }
1536 #endif
1537 
1538 #if 1 //def DATA_MODE_RP
1539     /* make MAC addr easily distinguishable from an IP header */
1540     if ((pDev->net->dev_addr[0] & 0xd0) == 0x40) {
1541         /*clear this bit wil make usbnet apdater named as usbX(instead if ethX)*/
1542         pDev->net->dev_addr[0] |= 0x02;	/* set local assignment bit */
1543         pDev->net->dev_addr[0] &= 0xbf;	/* clear "IP" bit */
1544     }
1545     memcpy (pDev->net->dev_addr, node_id, sizeof node_id);
1546     pDev->net->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
1547     pDev->net->features |= (NETIF_F_VLAN_CHALLENGED);
1548 #endif
1549 
1550 	ql_net_ethtool_ops = *pDev->net->ethtool_ops;
1551 	ql_net_ethtool_ops.get_drvinfo = ql_net_get_drvinfo;
1552 	pDev->net->ethtool_ops = &ql_net_ethtool_ops;
1553 
1554    DBG( "in %x, out %x\n",
1555         pIn->desc.bEndpointAddress,
1556         pOut->desc.bEndpointAddress );
1557 
1558    // In later versions of the kernel, usbnet helps with this
1559 #if (LINUX_VERSION_CODE <= KERNEL_VERSION( 2,6,23 ))
1560    pIntf->dev.platform_data = (void *)pDev;
1561 #endif
1562 
1563     if (qcrmcall_mode == 0 && pDev->net->sysfs_groups[0] == NULL && gobinet_sysfs_attr_group.attrs[0] != NULL) {
1564 #if (LINUX_VERSION_CODE <= KERNEL_VERSION( 2,6,32)) //see commit 0c509a6c9393b27a8c5a01acd4a72616206cfc24
1565         pDev->net->sysfs_groups[1] = &gobinet_sysfs_attr_group; //see netdev_register_sysfs()
1566 #else
1567         pDev->net->sysfs_groups[0] = &gobinet_sysfs_attr_group;
1568 #endif
1569     }
1570 
1571     if (!pDev->rx_urb_size) {
1572 //to advoid module report mtu 1460, but rx 1500 bytes IP packets, and cause the customer's system crash
1573 //next setting can make usbnet.c:usbnet_change_mtu() do not modify rx_urb_size according to mtu
1574         pDev->rx_urb_size = ETH_DATA_LEN + ETH_HLEN + 6;
1575     }
1576 
1577    return 0;
1578 }
1579 
1580 /*===========================================================================
1581 METHOD:
1582    GobiNetDriverUnbind (Public Method)
1583 
1584 DESCRIPTION:
1585    Deregisters QMI device (Registration happened in the probe function)
1586 
1587 PARAMETERS
1588    pDev           [ I ] - Pointer to usbnet device
1589    pIntfUnused    [ I ] - Pointer to interface
1590 
1591 RETURN VALUE:
1592    None
1593 ===========================================================================*/
GobiNetDriverUnbind(struct usbnet * pDev,struct usb_interface * pIntf)1594 static void GobiNetDriverUnbind(
1595    struct usbnet *         pDev,
1596    struct usb_interface *  pIntf)
1597 {
1598    sGobiUSBNet * pGobiDev = (sGobiUSBNet *)pDev->data[0];
1599 
1600    // Should already be down, but just in case...
1601    netif_carrier_off( pDev->net );
1602 
1603    if (pGobiDev->m_qcrmcall_mode) {
1604    } else {
1605       DeregisterQMIDevice( pGobiDev );
1606    }
1607 
1608 #if (LINUX_VERSION_CODE >= KERNEL_VERSION( 2,6,29 ))
1609    kfree( pDev->net->netdev_ops );
1610    pDev->net->netdev_ops = NULL;
1611 #endif
1612 
1613 #if (LINUX_VERSION_CODE <= KERNEL_VERSION( 2,6,23 ))
1614    pIntf->dev.platform_data = NULL;
1615 #endif
1616 
1617 #if (LINUX_VERSION_CODE >= KERNEL_VERSION( 2,6,19 ))
1618    pIntf->needs_remote_wakeup = 0;
1619 #endif
1620 
1621    if (atomic_dec_and_test(&pGobiDev->refcount))
1622       kfree( pGobiDev );
1623    else
1624       INFO("memory leak!\n");
1625 }
1626 
1627 #if 1 //def DATA_MODE_RP
1628 
1629 #if defined(QUECTEL_WWAN_QMAP)
_rmnet_usb_rx_handler(struct usbnet * dev,struct sk_buff * skb_in)1630 static void _rmnet_usb_rx_handler(struct usbnet *dev, struct sk_buff *skb_in)
1631 {
1632 	sGobiUSBNet * pQmapDev = (sGobiUSBNet *)dev->data[0];
1633 	struct sk_buff *qmap_skb;
1634 	struct sk_buff_head skb_chain;
1635 	uint dl_minimum_padding = 0;
1636 
1637 #if defined(QUECTEL_UL_DATA_AGG)
1638 	if (pQmapDev->qmap_version == 9)
1639 		dl_minimum_padding = pQmapDev->agg_ctx.dl_minimum_padding;
1640 #endif
1641 
1642 	__skb_queue_head_init(&skb_chain);
1643 
1644 	while (skb_in->len > sizeof(struct qmap_hdr)) {
1645 		struct rmnet_map_header *map_header = (struct rmnet_map_header *)skb_in->data;
1646 		struct rmnet_map_v5_csum_header *ul_header = NULL;
1647 		size_t hdr_size = sizeof(struct rmnet_map_header);
1648 		struct net_device *qmap_net;
1649 		int pkt_len = ntohs(map_header->pkt_len);
1650 		int skb_len;
1651 		__be16 protocol;
1652 		int mux_id;
1653 
1654 		if (map_header->next_hdr) {
1655 			ul_header = (struct rmnet_map_v5_csum_header *)(map_header + 1);
1656 			hdr_size += sizeof(struct rmnet_map_v5_csum_header);
1657 		}
1658 
1659 		skb_len = pkt_len - (map_header->pad_len&0x3F);
1660 		skb_len -= dl_minimum_padding;
1661 		if (skb_len > 1500) {
1662 			dev_info(&dev->net->dev, "drop skb_len=%x larger than 1500\n", skb_len);
1663 			goto error_pkt;
1664 		}
1665 
1666 		if (skb_in->len < (pkt_len + hdr_size)) {
1667 			dev_info(&dev->net->dev, "drop qmap unknow pkt, len=%d, pkt_len=%d\n", skb_in->len, pkt_len);
1668 			goto error_pkt;
1669 		}
1670 
1671 		if (map_header->cd_bit) {
1672 			dev_info(&dev->net->dev, "skip qmap command packet\n");
1673 			goto skip_pkt;
1674 		}
1675 
1676 		switch (skb_in->data[hdr_size] & 0xf0) {
1677 			case 0x40:
1678 				protocol = htons(ETH_P_IP);
1679 			break;
1680 			case 0x60:
1681 				protocol = htons(ETH_P_IPV6);
1682 			break;
1683 			default:
1684 				dev_info(&dev->net->dev, "unknow skb->protocol %02x\n", skb_in->data[hdr_size]);
1685 				goto error_pkt;
1686 		}
1687 
1688 		mux_id = map_header->mux_id - QUECTEL_QMAP_MUX_ID;
1689 		if (mux_id >= pQmapDev->qmap_mode) {
1690 			dev_info(&dev->net->dev, "drop qmap unknow mux_id %x\n", map_header->mux_id);
1691 			goto error_pkt;
1692 		}
1693 
1694 		qmap_net = pQmapDev->mpQmapNetDev[mux_id];
1695 
1696 		if (qmap_net == NULL) {
1697 			dev_info(&dev->net->dev, "drop qmap unknow mux_id %x\n", map_header->mux_id);
1698 			goto skip_pkt;
1699 		}
1700 
1701 		qmap_skb = netdev_alloc_skb(qmap_net, skb_len);
1702 		if (qmap_skb) {
1703 			skb_put(qmap_skb, skb_len);
1704 			memcpy(qmap_skb->data, skb_in->data + hdr_size, skb_len);
1705 		}
1706 
1707 		if (qmap_skb == NULL) {
1708 			dev_info(&dev->net->dev, "fail to alloc skb, pkt_len = %d\n", skb_len);
1709 			goto error_pkt;
1710 		}
1711 
1712 		skb_reset_transport_header(qmap_skb);
1713 		skb_reset_network_header(qmap_skb);
1714 		qmap_skb->pkt_type = PACKET_HOST;
1715 		skb_set_mac_header(qmap_skb, 0);
1716 		qmap_skb->protocol = protocol;
1717 
1718 		if (ul_header && ul_header->header_type == RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD
1719 			&& ul_header->csum_valid_required) {
1720 #if 0 //TODO
1721 			qmap_skb->ip_summed = CHECKSUM_UNNECESSARY;
1722 #endif
1723 		}
1724 
1725 		if (qmap_skb->dev->type == ARPHRD_ETHER) {
1726 			skb_push(qmap_skb, ETH_HLEN);
1727 			skb_reset_mac_header(qmap_skb);
1728 			memcpy(eth_hdr(qmap_skb)->h_source, default_modem_addr, ETH_ALEN);
1729 			memcpy(eth_hdr(qmap_skb)->h_dest, qmap_net->dev_addr, ETH_ALEN);
1730 			eth_hdr(qmap_skb)->h_proto = protocol;
1731 #ifdef QUECTEL_BRIDGE_MODE
1732 			bridge_mode_rx_fixup(pQmapDev, qmap_net, qmap_skb);
1733 #endif
1734 		}
1735 
1736 		__skb_queue_tail(&skb_chain, qmap_skb);
1737 
1738 skip_pkt:
1739 		skb_pull(skb_in, pkt_len + hdr_size);
1740 	}
1741 
1742 error_pkt:
1743 	while ((qmap_skb = __skb_dequeue (&skb_chain))) {
1744 		if (qmap_skb->dev != dev->net) {
1745 			if (qmap_skb->dev->type == ARPHRD_ETHER)
1746 				__skb_pull(qmap_skb, ETH_HLEN);
1747 			rmnet_vnd_update_rx_stats(qmap_skb->dev, 1, qmap_skb->len);
1748 			netif_rx(qmap_skb);
1749 		}
1750 		else {
1751 			qmap_skb->protocol = 0;
1752 			usbnet_skb_return(dev, qmap_skb);
1753 		}
1754 	}
1755 }
1756 
1757 #if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,35 )) //ab95bfe01f9872459c8678572ccadbf646badad0
1758 #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,39 )) //8a4eb5734e8d1dc60a8c28576bbbdfdcc643626d
rmnet_usb_rx_handler(struct sk_buff * skb)1759 static struct sk_buff* rmnet_usb_rx_handler(struct sk_buff *skb)
1760 {
1761 	struct usbnet *dev;
1762 
1763 	if (!skb)
1764 		goto done;
1765 
1766 	//printk("%s skb=%p, protocol=%x, len=%d\n", __func__, skb, skb->protocol, skb->len);
1767 
1768 	if (skb->pkt_type == PACKET_LOOPBACK)
1769 		return skb;
1770 
1771 	if (skb->protocol != htons(ETH_P_MAP)) {
1772 		WARN_ON(1);
1773 		return skb;
1774 	}
1775 
1776 	dev = netdev_priv(skb->dev);
1777 
1778 	if (dev == NULL) {
1779 		WARN_ON(1);
1780 		return skb;
1781 	}
1782 
1783 	_rmnet_usb_rx_handler(dev, skb);
1784 	consume_skb(skb);
1785 
1786 done:
1787 	return NULL;
1788 }
1789 #else
rmnet_usb_rx_handler(struct sk_buff ** pskb)1790 static rx_handler_result_t rmnet_usb_rx_handler(struct sk_buff **pskb)
1791 {
1792 	struct sk_buff *skb = *pskb;
1793 	struct usbnet *dev;
1794 
1795 	if (!skb)
1796 		goto done;
1797 
1798 	//printk("%s skb=%p, protocol=%x, len=%d\n", __func__, skb, skb->protocol, skb->len);
1799 
1800 	if (skb->pkt_type == PACKET_LOOPBACK)
1801 		return RX_HANDLER_PASS;
1802 
1803 	if (skb->protocol != htons(ETH_P_MAP)) {
1804 		WARN_ON(1);
1805 		return RX_HANDLER_PASS;
1806 	}
1807 
1808 	dev = netdev_priv(skb->dev);
1809 
1810 	if (dev == NULL) {
1811 		WARN_ON(1);
1812 		return RX_HANDLER_PASS;
1813 	}
1814 
1815 	_rmnet_usb_rx_handler(dev, skb);
1816 	consume_skb(skb);
1817 
1818 done:
1819 	return RX_HANDLER_CONSUMED;
1820 }
1821 #endif
1822 #endif
1823 #endif
1824 /*===========================================================================
1825 METHOD:
1826    GobiNetDriverTxFixup (Public Method)
1827 
1828 DESCRIPTION:
1829    Handling data format mode on transmit path
1830 
1831 PARAMETERS
1832    pDev           [ I ] - Pointer to usbnet device
1833    pSKB           [ I ] - Pointer to transmit packet buffer
1834    flags          [ I ] - os flags
1835 
1836 RETURN VALUE:
1837    None
1838 ===========================================================================*/
GobiNetDriverTxFixup(struct usbnet * dev,struct sk_buff * skb,gfp_t flags)1839 static struct sk_buff *GobiNetDriverTxFixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
1840 {
1841 	sGobiUSBNet * pGobiDev = (sGobiUSBNet *)dev->data[0];
1842 
1843 	if (!pGobiDev) {
1844 		DBG( "failed to get QMIDevice\n" );
1845 		dev_kfree_skb_any(skb);
1846 		return NULL;
1847 	}
1848 
1849 	if (unlikely(!skb)) {
1850 		return NULL;
1851 	}
1852 
1853 	if (!pGobiDev->mbRawIPMode)
1854 		return skb;
1855 
1856 #ifdef QUECTEL_WWAN_QMAP
1857 	if (pGobiDev->qmap_mode > 1) {
1858 		if (skb->protocol == htons(ETH_P_MAP))
1859 			return skb;
1860 
1861 		goto drop_skb;
1862 	}
1863 	else if (pGobiDev->qmap_mode == 1) {
1864 		if (unlikely(!pGobiDev->link_state)) {
1865 			dev_info(&dev->net->dev, "link_state 0x%x, drop skb, len = %u\n", pGobiDev->link_state, skb->len);
1866 			goto drop_skb;
1867 		}
1868 
1869 		if (dev->net->type == ARPHRD_ETHER) {
1870 #ifdef QUECTEL_BRIDGE_MODE
1871 			if (pGobiDev->m_bridge_mode && bridge_mode_tx_fixup(dev->net, skb, pGobiDev->m_bridge_ipv4, pGobiDev->mHostMAC) == NULL) {
1872 				goto drop_skb;
1873 			}
1874 #endif
1875 
1876 			if (ether_to_ip_fixup(dev->net, skb) == NULL)
1877 				goto drop_skb;
1878 		}
1879 
1880 		if (pGobiDev->qmap_version == 5) {
1881 			add_qhdr(skb, QUECTEL_QMAP_MUX_ID);
1882 		}
1883 		else if (pGobiDev->qmap_version == 9) {
1884 			add_qhdr_v5(skb, QUECTEL_QMAP_MUX_ID);
1885 		}
1886 		else {
1887 	               goto drop_skb;
1888 		}
1889 
1890 		return skb;
1891 	}
1892 #endif
1893 
1894 #ifdef QUECTEL_BRIDGE_MODE
1895 	if (pGobiDev->m_bridge_mode && bridge_mode_tx_fixup(dev->net, skb, pGobiDev->m_bridge_ipv4, pGobiDev->mHostMAC) == NULL) {
1896 		goto drop_skb;
1897 	}
1898 #endif
1899 
1900     // Skip Ethernet header from message
1901 	if (likely(ether_to_ip_fixup(dev->net, skb))) {
1902 		return skb;
1903 	}
1904 	else {
1905 #if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,22 ))
1906 		dev_err(&dev->intf->dev,  "Packet Dropped ");
1907 #elif (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,18 ))
1908 		dev_err(dev->net->dev.parent,  "Packet Dropped ");
1909 #else
1910 		INFO("Packet Dropped ");
1911 #endif
1912 	}
1913 
1914 #if defined(QUECTEL_WWAN_QMAP)
1915 drop_skb:
1916 #endif
1917 #if (LINUX_VERSION_CODE <= KERNEL_VERSION( 2,6,24 )) && defined(CONFIG_X86_32)
1918 	INFO("dev_kfree_skb_any() will make kernel panic on CentOS!\n");
1919 	quec_debug=1;PrintHex(skb->data, 32);quec_debug=0;
1920 #else
1921 	// Filter the packet out, release it
1922 	dev_kfree_skb_any(skb);
1923 #endif
1924 
1925 	return NULL;
1926 }
1927 
1928 #if defined(QUECTEL_WWAN_MULTI_PACKAGES)
GobiNetDriverRxPktsFixup(struct usbnet * dev,struct sk_buff * skb)1929 static int GobiNetDriverRxPktsFixup(struct usbnet *dev, struct sk_buff *skb)
1930 {
1931     sGobiUSBNet * pGobiDev = (sGobiUSBNet *)dev->data[0];
1932 
1933     if (!pGobiDev->mbRawIPMode)
1934         return 1;
1935 
1936     /* This check is no longer done by usbnet */
1937     if (skb->len < dev->net->hard_header_len)
1938         return 0;
1939 
1940     if (!rx_packets) {
1941         return GobiNetDriverRxFixup(dev, skb);
1942     }
1943 
1944     while (likely(skb->len)) {
1945         struct sk_buff* new_skb;
1946         struct quec_net_package_header package_header;
1947 
1948         if (skb->len < sizeof(package_header))
1949             return 0;
1950 
1951         memcpy(&package_header, skb->data, sizeof(package_header));
1952         package_header.payload_len = be16_to_cpu(package_header.payload_len);
1953 
1954         if (package_header.msg_spec != QUEC_NET_MSG_SPEC || package_header.msg_id != QUEC_NET_MSG_ID_IP_DATA)
1955             return 0;
1956 
1957         if (skb->len < (package_header.payload_len + sizeof(package_header)))
1958             return 0;
1959 
1960         skb_pull(skb, sizeof(package_header));
1961 
1962         if (skb->len == package_header.payload_len)
1963             return GobiNetDriverRxFixup(dev, skb);
1964 
1965         new_skb = skb_clone(skb, GFP_ATOMIC);
1966         if (new_skb) {
1967             skb_trim(new_skb, package_header.payload_len);
1968             if (GobiNetDriverRxFixup(dev, new_skb))
1969                 usbnet_skb_return(dev, new_skb);
1970             else
1971                 return 0;
1972         }
1973 
1974         skb_pull(skb, package_header.payload_len);
1975     }
1976 
1977     return 0;
1978 }
1979 #endif
1980 
1981 #ifdef QUECTEL_WWAN_QMAP
GobiNetDriverRxQmapFixup(struct usbnet * dev,struct sk_buff * skb)1982 static int GobiNetDriverRxQmapFixup(struct usbnet *dev, struct sk_buff *skb)
1983 {
1984 #if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,35 )) //ab95bfe01f9872459c8678572ccadbf646badad0
1985 	rx_handler_func_t *rx_handler;
1986 
1987 #if (LINUX_VERSION_CODE < KERNEL_VERSION( 3,3,1 )) //7bdd402706cf26bfef9050dfee3f229b7f33ee4f
1988 	if (skb->dev == NULL) {
1989 		skb->dev = dev->net;
1990 	}
1991 #endif
1992 	rx_handler = rcu_dereference(skb->dev->rx_handler);
1993 
1994 	if (rx_handler == rmnet_usb_rx_handler) {
1995 #if (LINUX_VERSION_CODE < KERNEL_VERSION( 3,3,1 )) //7bdd402706cf26bfef9050dfee3f229b7f33ee4f
1996 		unsigned headroom = skb_headroom(skb);
1997 		if (headroom < ETH_HLEN) {
1998 			unsigned tailroom = skb_tailroom(skb);
1999 			if ((tailroom + headroom) >= ETH_HLEN) {
2000 				unsigned moveroom = ETH_HLEN - headroom;
2001 				memmove(skb->data + moveroom ,skb->data, skb->len);
2002 				skb->data += moveroom;
2003 				skb->tail += moveroom;
2004 				#ifdef WARN_ONCE
2005 				WARN_ONCE(1, "It is better reserve headroom in usbnet.c:rx_submit()!\n");
2006 				#endif
2007 			}
2008 		}
2009 #endif
2010 
2011 		if (dev->net->type == ARPHRD_ETHER && skb_headroom(skb) >= ETH_HLEN) {
2012 			//usbnet.c rx_process() usbnet_skb_return() eth_type_trans()
2013 			skb_push(skb, ETH_HLEN);
2014 			skb_reset_mac_header(skb);
2015 			memcpy(eth_hdr(skb)->h_source, default_modem_addr, ETH_ALEN);
2016 			memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN);
2017 			eth_hdr(skb)->h_proto = htons(ETH_P_MAP);
2018 
2019 			return 1;
2020 		}
2021 
2022 #ifdef WARN_ONCE
2023 		WARN_ONCE(1, "skb_headroom < ETH_HLEN\n");
2024 #endif
2025 		return 0;
2026 	}
2027 #endif
2028 
2029 	_rmnet_usb_rx_handler(dev, skb);
2030 	return 0;
2031 }
2032 #endif
2033 /*===========================================================================
2034 METHOD:
2035    GobiNetDriverRxFixup (Public Method)
2036 
2037 DESCRIPTION:
2038    Handling data format mode on receive path
2039 
2040 PARAMETERS
2041    pDev           [ I ] - Pointer to usbnet device
2042    pSKB           [ I ] - Pointer to received packet buffer
2043 
2044 RETURN VALUE:
2045    None
2046 ===========================================================================*/
GobiNetDriverRxFixup(struct usbnet * dev,struct sk_buff * skb)2047 static int GobiNetDriverRxFixup(struct usbnet *dev, struct sk_buff *skb)
2048 {
2049     __be16 proto;
2050     sGobiUSBNet * pGobiDev = (sGobiUSBNet *)dev->data[0];
2051 
2052     if (!pGobiDev->mbRawIPMode)
2053         return 1;
2054 
2055     /* This check is no longer done by usbnet */
2056     if (skb->len < dev->net->hard_header_len)
2057         return 0;
2058 
2059 #ifdef QUECTEL_WWAN_QMAP
2060     if (pGobiDev->qmap_mode) {
2061         return GobiNetDriverRxQmapFixup(dev, skb);
2062     }
2063 #endif
2064 
2065     switch (skb->data[0] & 0xf0) {
2066     case 0x40:
2067     	proto = htons(ETH_P_IP);
2068     	break;
2069     case 0x60:
2070     	proto = htons(ETH_P_IPV6);
2071     	break;
2072     case 0x00:
2073     	if (is_multicast_ether_addr(skb->data))
2074     		return 1;
2075     	/* possibly bogus destination - rewrite just in case */
2076     	skb_reset_mac_header(skb);
2077     	goto fix_dest;
2078     default:
2079     	/* pass along other packets without modifications */
2080     	return 1;
2081     }
2082     if (skb_headroom(skb) < ETH_HLEN && pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) {
2083         DBG("%s: couldn't pskb_expand_head\n", __func__);
2084         return 0;
2085     }
2086     skb_push(skb, ETH_HLEN);
2087     skb_reset_mac_header(skb);
2088     eth_hdr(skb)->h_proto = proto;
2089     memcpy(eth_hdr(skb)->h_source, ec20_mac, ETH_ALEN);
2090 fix_dest:
2091 #ifdef QUECTEL_BRIDGE_MODE
2092 	bridge_mode_rx_fixup(pGobiDev, dev->net, skb);
2093 #else
2094 	memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN);
2095 #endif
2096 
2097 #ifdef QUECTEL_BRIDGE_MODE
2098 #if 0
2099     if (pGobiDev->m_bridge_mode) {
2100         struct ethhdr *ehdr = eth_hdr(skb);
2101 quec_debug = 1;
2102         DBG(": ");
2103         PrintHex(ehdr, sizeof(struct ethhdr));
2104 quec_debug = 0;
2105     }
2106 #endif
2107 #endif
2108 
2109     return 1;
2110 }
2111 #endif
2112 
2113 #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 ))
2114 #ifdef CONFIG_PM
2115 /*===========================================================================
2116 METHOD:
2117    GobiUSBNetURBCallback (Public Method)
2118 
2119 DESCRIPTION:
2120    Write is complete, cleanup and signal that we're ready for next packet
2121 
2122 PARAMETERS
2123    pURB     [ I ] - Pointer to sAutoPM struct
2124 
2125 RETURN VALUE:
2126    None
2127 ===========================================================================*/
2128 #if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,18 ))
GobiUSBNetURBCallback(struct urb * pURB)2129 void GobiUSBNetURBCallback( struct urb * pURB )
2130 #else
2131 void GobiUSBNetURBCallback(struct urb *pURB, struct pt_regs *regs)
2132 #endif
2133 {
2134    unsigned long activeURBflags;
2135    sAutoPM * pAutoPM = (sAutoPM *)pURB->context;
2136    if (pAutoPM == NULL)
2137    {
2138       // Should never happen
2139       DBG( "bad context\n" );
2140       return;
2141    }
2142 
2143    if (pURB->status != 0)
2144    {
2145       // Note that in case of an error, the behaviour is no different
2146       DBG( "urb finished with error %d\n", pURB->status );
2147    }
2148 
2149    // Remove activeURB (memory to be freed later)
2150    spin_lock_irqsave( &pAutoPM->mActiveURBLock, activeURBflags );
2151 
2152    // EAGAIN used to signify callback is done
2153    pAutoPM->mpActiveURB = ERR_PTR( -EAGAIN );
2154 
2155    spin_unlock_irqrestore( &pAutoPM->mActiveURBLock, activeURBflags );
2156 
2157    complete( &pAutoPM->mThreadDoWork );
2158 
2159 #ifdef URB_FREE_BUFFER_BY_SELF
2160     if (pURB->transfer_flags & URB_FREE_BUFFER)
2161         kfree(pURB->transfer_buffer);
2162 #endif
2163    usb_free_urb( pURB );
2164 }
2165 
2166 /*===========================================================================
2167 METHOD:
2168    GobiUSBNetTXTimeout (Public Method)
2169 
2170 DESCRIPTION:
2171    Timeout declared by the net driver.  Stop all transfers
2172 
2173 PARAMETERS
2174    pNet     [ I ] - Pointer to net device
2175 
2176 RETURN VALUE:
2177    None
2178 ===========================================================================*/
GobiUSBNetTXTimeout(struct net_device * pNet)2179 void GobiUSBNetTXTimeout( struct net_device * pNet )
2180 {
2181    struct sGobiUSBNet * pGobiDev;
2182    sAutoPM * pAutoPM;
2183    sURBList * pURBListEntry;
2184    unsigned long activeURBflags, URBListFlags;
2185    struct usbnet * pDev = netdev_priv( pNet );
2186    struct urb * pURB;
2187 
2188    if (pDev == NULL || pDev->net == NULL)
2189    {
2190       DBG( "failed to get usbnet device\n" );
2191       return;
2192    }
2193 
2194    pGobiDev = (sGobiUSBNet *)pDev->data[0];
2195    if (pGobiDev == NULL)
2196    {
2197       DBG( "failed to get QMIDevice\n" );
2198       return;
2199    }
2200    pAutoPM = &pGobiDev->mAutoPM;
2201 
2202    DBG( "\n" );
2203 
2204    // Grab a pointer to active URB
2205    spin_lock_irqsave( &pAutoPM->mActiveURBLock, activeURBflags );
2206    pURB = pAutoPM->mpActiveURB;
2207    spin_unlock_irqrestore( &pAutoPM->mActiveURBLock, activeURBflags );
2208    // Stop active URB
2209    if (pURB != NULL)
2210    {
2211       usb_kill_urb( pURB );
2212    }
2213 
2214    // Cleanup URB List
2215    spin_lock_irqsave( &pAutoPM->mURBListLock, URBListFlags );
2216 
2217    pURBListEntry = pAutoPM->mpURBList;
2218    while (pURBListEntry != NULL)
2219    {
2220       pAutoPM->mpURBList = pAutoPM->mpURBList->mpNext;
2221       atomic_dec( &pAutoPM->mURBListLen );
2222       usb_free_urb( pURBListEntry->mpURB );
2223       kfree( pURBListEntry );
2224       pURBListEntry = pAutoPM->mpURBList;
2225    }
2226 
2227    spin_unlock_irqrestore( &pAutoPM->mURBListLock, URBListFlags );
2228 
2229    complete( &pAutoPM->mThreadDoWork );
2230 
2231    return;
2232 }
2233 
2234 /*===========================================================================
2235 METHOD:
2236    GobiUSBNetAutoPMThread (Public Method)
2237 
2238 DESCRIPTION:
2239    Handle device Auto PM state asynchronously
2240    Handle network packet transmission asynchronously
2241 
2242 PARAMETERS
2243    pData     [ I ] - Pointer to sAutoPM struct
2244 
2245 RETURN VALUE:
2246    int - 0 for success
2247          Negative errno for error
2248 ===========================================================================*/
GobiUSBNetAutoPMThread(void * pData)2249 static int GobiUSBNetAutoPMThread( void * pData )
2250 {
2251    unsigned long activeURBflags, URBListFlags;
2252    sURBList * pURBListEntry;
2253    int status;
2254    struct usb_device * pUdev;
2255    sAutoPM * pAutoPM = (sAutoPM *)pData;
2256    struct urb * pURB;
2257 
2258    if (pAutoPM == NULL)
2259    {
2260       DBG( "passed null pointer\n" );
2261       return -EINVAL;
2262    }
2263 
2264    pUdev = interface_to_usbdev( pAutoPM->mpIntf );
2265 
2266    DBG( "traffic thread started\n" );
2267 
2268    while (pAutoPM->mbExit == false)
2269    {
2270       // Wait for someone to poke us
2271       wait_for_completion_interruptible( &pAutoPM->mThreadDoWork );
2272 
2273       // Time to exit?
2274       if (pAutoPM->mbExit == true)
2275       {
2276          // Stop activeURB
2277          spin_lock_irqsave( &pAutoPM->mActiveURBLock, activeURBflags );
2278          pURB = pAutoPM->mpActiveURB;
2279          spin_unlock_irqrestore( &pAutoPM->mActiveURBLock, activeURBflags );
2280 
2281          // EAGAIN used to signify callback is done
2282          if (IS_ERR( pAutoPM->mpActiveURB )
2283                  &&  PTR_ERR( pAutoPM->mpActiveURB ) == -EAGAIN )
2284          {
2285              pURB = NULL;
2286          }
2287 
2288          if (pURB != NULL)
2289          {
2290             usb_kill_urb( pURB );
2291          }
2292          // Will be freed in callback function
2293 
2294          // Cleanup URB List
2295          spin_lock_irqsave( &pAutoPM->mURBListLock, URBListFlags );
2296 
2297          pURBListEntry = pAutoPM->mpURBList;
2298          while (pURBListEntry != NULL)
2299          {
2300             pAutoPM->mpURBList = pAutoPM->mpURBList->mpNext;
2301             atomic_dec( &pAutoPM->mURBListLen );
2302             usb_free_urb( pURBListEntry->mpURB );
2303             kfree( pURBListEntry );
2304             pURBListEntry = pAutoPM->mpURBList;
2305          }
2306 
2307          spin_unlock_irqrestore( &pAutoPM->mURBListLock, URBListFlags );
2308 
2309          break;
2310       }
2311 
2312       // Is our URB active?
2313       spin_lock_irqsave( &pAutoPM->mActiveURBLock, activeURBflags );
2314 
2315       // EAGAIN used to signify callback is done
2316       if (IS_ERR( pAutoPM->mpActiveURB )
2317       &&  PTR_ERR( pAutoPM->mpActiveURB ) == -EAGAIN )
2318       {
2319          pAutoPM->mpActiveURB = NULL;
2320 
2321          // Restore IRQs so task can sleep
2322          spin_unlock_irqrestore( &pAutoPM->mActiveURBLock, activeURBflags );
2323 
2324          // URB is done, decrement the Auto PM usage count
2325          usb_autopm_put_interface( pAutoPM->mpIntf );
2326 
2327          // Lock ActiveURB again
2328          spin_lock_irqsave( &pAutoPM->mActiveURBLock, activeURBflags );
2329       }
2330 
2331       if (pAutoPM->mpActiveURB != NULL)
2332       {
2333          // There is already a URB active, go back to sleep
2334          spin_unlock_irqrestore( &pAutoPM->mActiveURBLock, activeURBflags );
2335          continue;
2336       }
2337 
2338       // Is there a URB waiting to be submitted?
2339       spin_lock_irqsave( &pAutoPM->mURBListLock, URBListFlags );
2340       if (pAutoPM->mpURBList == NULL)
2341       {
2342          // No more URBs to submit, go back to sleep
2343          spin_unlock_irqrestore( &pAutoPM->mURBListLock, URBListFlags );
2344          spin_unlock_irqrestore( &pAutoPM->mActiveURBLock, activeURBflags );
2345          continue;
2346       }
2347 
2348       // Pop an element
2349       pURBListEntry = pAutoPM->mpURBList;
2350       pAutoPM->mpURBList = pAutoPM->mpURBList->mpNext;
2351       atomic_dec( &pAutoPM->mURBListLen );
2352       spin_unlock_irqrestore( &pAutoPM->mURBListLock, URBListFlags );
2353 
2354       // Set ActiveURB
2355       pAutoPM->mpActiveURB = pURBListEntry->mpURB;
2356       spin_unlock_irqrestore( &pAutoPM->mActiveURBLock, activeURBflags );
2357 
2358       // Tell autopm core we need device woken up
2359       status = usb_autopm_get_interface( pAutoPM->mpIntf );
2360       if (status < 0)
2361       {
2362          DBG( "unable to autoresume interface: %d\n", status );
2363 
2364          // likely caused by device going from autosuspend -> full suspend
2365          if (status == -EPERM)
2366          {
2367 #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,33 ))
2368 #if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,18 ))
2369             pUdev->auto_pm = 0;
2370 #else
2371              pUdev = pUdev;
2372 #endif
2373 #endif
2374             GobiNetSuspend( pAutoPM->mpIntf, PMSG_SUSPEND );
2375          }
2376 
2377          // Add pURBListEntry back onto pAutoPM->mpURBList
2378          spin_lock_irqsave( &pAutoPM->mURBListLock, URBListFlags );
2379          pURBListEntry->mpNext = pAutoPM->mpURBList;
2380          pAutoPM->mpURBList = pURBListEntry;
2381          atomic_inc( &pAutoPM->mURBListLen );
2382          spin_unlock_irqrestore( &pAutoPM->mURBListLock, URBListFlags );
2383 
2384          spin_lock_irqsave( &pAutoPM->mActiveURBLock, activeURBflags );
2385          pAutoPM->mpActiveURB = NULL;
2386          spin_unlock_irqrestore( &pAutoPM->mActiveURBLock, activeURBflags );
2387 
2388          // Go back to sleep
2389          continue;
2390       }
2391 
2392       // Submit URB
2393       status = usb_submit_urb( pAutoPM->mpActiveURB, GFP_KERNEL );
2394       if (status < 0)
2395       {
2396          // Could happen for a number of reasons
2397          DBG( "Failed to submit URB: %d.  Packet dropped\n", status );
2398          spin_lock_irqsave( &pAutoPM->mActiveURBLock, activeURBflags );
2399          usb_free_urb( pAutoPM->mpActiveURB );
2400          pAutoPM->mpActiveURB = NULL;
2401          spin_unlock_irqrestore( &pAutoPM->mActiveURBLock, activeURBflags );
2402          usb_autopm_put_interface( pAutoPM->mpIntf );
2403 
2404          // Loop again
2405          complete( &pAutoPM->mThreadDoWork );
2406       }
2407 
2408       kfree( pURBListEntry );
2409    }
2410 
2411    DBG( "traffic thread exiting\n" );
2412    pAutoPM->mpThread = NULL;
2413    return 0;
2414 }
2415 
2416 /*===========================================================================
2417 METHOD:
2418    GobiUSBNetStartXmit (Public Method)
2419 
2420 DESCRIPTION:
2421    Convert sk_buff to usb URB and queue for transmit
2422 
2423 PARAMETERS
2424    pNet     [ I ] - Pointer to net device
2425 
2426 RETURN VALUE:
2427    NETDEV_TX_OK on success
2428    NETDEV_TX_BUSY on error
2429 ===========================================================================*/
GobiUSBNetStartXmit(struct sk_buff * pSKB,struct net_device * pNet)2430 int GobiUSBNetStartXmit(
2431    struct sk_buff *     pSKB,
2432    struct net_device *  pNet )
2433 {
2434    unsigned long URBListFlags;
2435    struct sGobiUSBNet * pGobiDev;
2436    sAutoPM * pAutoPM;
2437    sURBList * pURBListEntry, ** ppURBListEnd;
2438    void * pURBData;
2439    struct usbnet * pDev = netdev_priv( pNet );
2440 
2441    //DBG( "\n" );
2442 
2443    if (pDev == NULL || pDev->net == NULL)
2444    {
2445       DBG( "failed to get usbnet device\n" );
2446       return NETDEV_TX_BUSY;
2447    }
2448 
2449    pGobiDev = (sGobiUSBNet *)pDev->data[0];
2450    if (pGobiDev == NULL)
2451    {
2452       DBG( "failed to get QMIDevice\n" );
2453       return NETDEV_TX_BUSY;
2454    }
2455    pAutoPM = &pGobiDev->mAutoPM;
2456 
2457    if( NULL == pSKB )
2458    {
2459        DBG( "Buffer is NULL \n" );
2460        return NETDEV_TX_BUSY;
2461    }
2462 
2463    if (GobiTestDownReason( pGobiDev, DRIVER_SUSPENDED ))
2464    {
2465       // Should not happen
2466       DBG( "device is suspended\n" );
2467       dump_stack();
2468       return NETDEV_TX_BUSY;
2469    }
2470 
2471    if (GobiTestDownReason( pGobiDev, NO_NDIS_CONNECTION ))
2472    {
2473       //netif_carrier_off( pGobiDev->mpNetDev->net );
2474       //DBG( "device is disconnected\n" );
2475       //dump_stack();
2476       return NETDEV_TX_BUSY;
2477    }
2478 
2479    // Convert the sk_buff into a URB
2480 
2481    // Check if buffer is full
2482    if ( atomic_read( &pAutoPM->mURBListLen ) >= txQueueLength)
2483    {
2484       DBG( "not scheduling request, buffer is full\n" );
2485       return NETDEV_TX_BUSY;
2486    }
2487 
2488    // Allocate URBListEntry
2489    pURBListEntry = kmalloc( sizeof( sURBList ), GFP_ATOMIC );
2490    if (pURBListEntry == NULL)
2491    {
2492       DBG( "unable to allocate URBList memory\n" );
2493       return NETDEV_TX_BUSY;
2494    }
2495    pURBListEntry->mpNext = NULL;
2496 
2497    // Allocate URB
2498    pURBListEntry->mpURB = usb_alloc_urb( 0, GFP_ATOMIC );
2499    if (pURBListEntry->mpURB == NULL)
2500    {
2501       DBG( "unable to allocate URB\n" );
2502       // release all memory allocated by now
2503       if (pURBListEntry)
2504          kfree( pURBListEntry );
2505       return NETDEV_TX_BUSY;
2506    }
2507 
2508 #if 1 //def DATA_MODE_RP
2509    GobiNetDriverTxFixup(pDev, pSKB, GFP_ATOMIC);
2510 #endif
2511 
2512    // Allocate URB transfer_buffer
2513    pURBData = kmalloc( pSKB->len, GFP_ATOMIC );
2514    if (pURBData == NULL)
2515    {
2516       DBG( "unable to allocate URB data\n" );
2517       // release all memory allocated by now
2518       if (pURBListEntry)
2519       {
2520          usb_free_urb( pURBListEntry->mpURB );
2521          kfree( pURBListEntry );
2522       }
2523       return NETDEV_TX_BUSY;
2524    }
2525    // Fill with SKB's data
2526    memcpy( pURBData, pSKB->data, pSKB->len );
2527 
2528    usb_fill_bulk_urb( pURBListEntry->mpURB,
2529                       pGobiDev->mpNetDev->udev,
2530                       pGobiDev->mpNetDev->out,
2531                       pURBData,
2532                       pSKB->len,
2533                       GobiUSBNetURBCallback,
2534                       pAutoPM );
2535 
2536    /* Handle the need to send a zero length packet and release the
2537     * transfer buffer
2538     */
2539     pURBListEntry->mpURB->transfer_flags |= (URB_ZERO_PACKET | URB_FREE_BUFFER);
2540 
2541    // Aquire lock on URBList
2542    spin_lock_irqsave( &pAutoPM->mURBListLock, URBListFlags );
2543 
2544    // Add URB to end of list
2545    ppURBListEnd = &pAutoPM->mpURBList;
2546    while ((*ppURBListEnd) != NULL)
2547    {
2548       ppURBListEnd = &(*ppURBListEnd)->mpNext;
2549    }
2550    *ppURBListEnd = pURBListEntry;
2551    atomic_inc( &pAutoPM->mURBListLen );
2552 
2553    spin_unlock_irqrestore( &pAutoPM->mURBListLock, URBListFlags );
2554 
2555    complete( &pAutoPM->mThreadDoWork );
2556 
2557    // Start transfer timer
2558    pNet->trans_start = jiffies;
2559    // Free SKB
2560    if (pSKB)
2561       dev_kfree_skb_any( pSKB );
2562 
2563    return NETDEV_TX_OK;
2564 }
2565 #endif
2566 static int (*local_usbnet_start_xmit) (struct sk_buff *skb, struct net_device *net);
2567 #endif
2568 
GobiUSBNetStartXmit2(struct sk_buff * pSKB,struct net_device * pNet)2569 static int GobiUSBNetStartXmit2( struct sk_buff *pSKB, struct net_device *pNet ){
2570    struct sGobiUSBNet * pGobiDev;
2571    struct usbnet * pDev = netdev_priv( pNet );
2572 
2573    //DBG( "\n" );
2574 
2575    if (pDev == NULL || pDev->net == NULL)
2576    {
2577       DBG( "failed to get usbnet device\n" );
2578       return NETDEV_TX_BUSY;
2579    }
2580 
2581    pGobiDev = (sGobiUSBNet *)pDev->data[0];
2582    if (pGobiDev == NULL)
2583    {
2584       DBG( "failed to get QMIDevice\n" );
2585       return NETDEV_TX_BUSY;
2586    }
2587 
2588    if( NULL == pSKB )
2589    {
2590        DBG( "Buffer is NULL \n" );
2591        return NETDEV_TX_BUSY;
2592    }
2593 
2594    if (GobiTestDownReason( pGobiDev, DRIVER_SUSPENDED ))
2595    {
2596       // Should not happen
2597       DBG( "device is suspended\n" );
2598       dump_stack();
2599       return NETDEV_TX_BUSY;
2600    }
2601 
2602    if (GobiTestDownReason( pGobiDev, NO_NDIS_CONNECTION ))
2603    {
2604       //netif_carrier_off( pGobiDev->mpNetDev->net );
2605       //DBG( "device is disconnected\n" );
2606       //dump_stack();
2607       return NETDEV_TX_BUSY;
2608    }
2609 
2610 #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 ))
2611    return local_usbnet_start_xmit(pSKB, pNet);
2612 #else
2613    return usbnet_start_xmit(pSKB, pNet);
2614 #endif
2615 }
2616 
2617 /*===========================================================================
2618 METHOD:
2619    GobiUSBNetOpen (Public Method)
2620 
2621 DESCRIPTION:
2622    Wrapper to usbnet_open, correctly handling autosuspend
2623    Start AutoPM thread (if CONFIG_PM is defined)
2624 
2625 PARAMETERS
2626    pNet     [ I ] - Pointer to net device
2627 
2628 RETURN VALUE:
2629    int - 0 for success
2630          Negative errno for error
2631 ===========================================================================*/
GobiUSBNetOpen(struct net_device * pNet)2632 static int GobiUSBNetOpen( struct net_device * pNet )
2633 {
2634    int status = 0;
2635    struct sGobiUSBNet * pGobiDev;
2636    struct usbnet * pDev = netdev_priv( pNet );
2637 
2638    if (pDev == NULL)
2639    {
2640       DBG( "failed to get usbnet device\n" );
2641       return -ENXIO;
2642    }
2643 
2644    pGobiDev = (sGobiUSBNet *)pDev->data[0];
2645    if (pGobiDev == NULL)
2646    {
2647       DBG( "failed to get QMIDevice\n" );
2648       return -ENXIO;
2649    }
2650 
2651    DBG( "\n" );
2652 
2653 #ifdef CONFIG_PM
2654    #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 ))
2655    // Start the AutoPM thread
2656    pGobiDev->mAutoPM.mpIntf = pGobiDev->mpIntf;
2657    pGobiDev->mAutoPM.mbExit = false;
2658    pGobiDev->mAutoPM.mpURBList = NULL;
2659    pGobiDev->mAutoPM.mpActiveURB = NULL;
2660    spin_lock_init( &pGobiDev->mAutoPM.mURBListLock );
2661    spin_lock_init( &pGobiDev->mAutoPM.mActiveURBLock );
2662    atomic_set( &pGobiDev->mAutoPM.mURBListLen, 0 );
2663    init_completion( &pGobiDev->mAutoPM.mThreadDoWork );
2664 
2665    pGobiDev->mAutoPM.mpThread = kthread_run( GobiUSBNetAutoPMThread,
2666                                                &pGobiDev->mAutoPM,
2667                                                "GobiUSBNetAutoPMThread" );
2668    if (IS_ERR( pGobiDev->mAutoPM.mpThread ))
2669    {
2670       DBG( "AutoPM thread creation error\n" );
2671       return PTR_ERR( pGobiDev->mAutoPM.mpThread );
2672    }
2673    #endif
2674 #endif /* CONFIG_PM */
2675 
2676    // Allow traffic
2677    GobiClearDownReason( pGobiDev, NET_IFACE_STOPPED );
2678 
2679    // Pass to usbnet_open if defined
2680    if (pGobiDev->mpUSBNetOpen != NULL)
2681    {
2682       status = pGobiDev->mpUSBNetOpen( pNet );
2683 #ifdef CONFIG_PM
2684       // If usbnet_open was successful enable Auto PM
2685       if (status == 0)
2686       {
2687 #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,33 ))
2688          usb_autopm_enable( pGobiDev->mpIntf );
2689 #else
2690          usb_autopm_put_interface( pGobiDev->mpIntf );
2691 #endif
2692       }
2693 #endif /* CONFIG_PM */
2694    }
2695    else
2696    {
2697       DBG( "no USBNetOpen defined\n" );
2698    }
2699 
2700    return status;
2701 }
2702 
2703 /*===========================================================================
2704 METHOD:
2705    GobiUSBNetStop (Public Method)
2706 
2707 DESCRIPTION:
2708    Wrapper to usbnet_stop, correctly handling autosuspend
2709    Stop AutoPM thread (if CONFIG_PM is defined)
2710 
2711 PARAMETERS
2712    pNet     [ I ] - Pointer to net device
2713 
2714 RETURN VALUE:
2715    int - 0 for success
2716          Negative errno for error
2717 ===========================================================================*/
GobiUSBNetStop(struct net_device * pNet)2718 static int GobiUSBNetStop( struct net_device * pNet )
2719 {
2720    struct sGobiUSBNet * pGobiDev;
2721    struct usbnet * pDev = netdev_priv( pNet );
2722 
2723    if (pDev == NULL || pDev->net == NULL)
2724    {
2725       DBG( "failed to get netdevice\n" );
2726       return -ENXIO;
2727    }
2728 
2729    pGobiDev = (sGobiUSBNet *)pDev->data[0];
2730    if (pGobiDev == NULL)
2731    {
2732       DBG( "failed to get QMIDevice\n" );
2733       return -ENXIO;
2734    }
2735 
2736    // Stop traffic
2737    GobiSetDownReason( pGobiDev, NET_IFACE_STOPPED );
2738 
2739 #ifdef CONFIG_PM
2740    #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 ))
2741    // Tell traffic thread to exit
2742    pGobiDev->mAutoPM.mbExit = true;
2743    complete( &pGobiDev->mAutoPM.mThreadDoWork );
2744 
2745    // Wait for it to exit
2746    while( pGobiDev->mAutoPM.mpThread != NULL )
2747    {
2748       msleep( 100 );
2749    }
2750    DBG( "thread stopped\n" );
2751    #endif
2752 #endif /* CONFIG_PM */
2753 
2754    // Pass to usbnet_stop, if defined
2755    if (pGobiDev->mpUSBNetStop != NULL)
2756    {
2757       return pGobiDev->mpUSBNetStop( pNet );
2758    }
2759    else
2760    {
2761       return 0;
2762    }
2763 }
2764 
GobiNetDriver_check_connect(struct usbnet * pDev)2765 static int GobiNetDriver_check_connect(struct usbnet *pDev) {
2766    int status = 0;
2767    struct sGobiUSBNet * pGobiDev = NULL;
2768 
2769    while (status++ < 10) {
2770       pGobiDev = (sGobiUSBNet *)pDev->data[0];
2771       if (pGobiDev && pGobiDev->mbProbeDone)
2772          break;
2773       msleep(1);
2774    }
2775 
2776    return 0;
2777 }
2778 
2779 /*=========================================================================*/
2780 // Struct driver_info
2781 /*=========================================================================*/
2782 static struct driver_info GobiNetInfo =
2783 {
2784    .description   = "GobiNet Ethernet Device",
2785 #if 1//def CONFIG_ANDROID
2786 #if defined(QUECTEL_WWAN_QMAP) && defined(FLAG_RX_ASSEMBLE)
2787 	.flags         = FLAG_RX_ASSEMBLE, //usb0
2788 #endif
2789 #else
2790 #if defined(QUECTEL_WWAN_QMAP) && defined(FLAG_RX_ASSEMBLE)
2791 	.flags         = FLAG_ETHER | FLAG_RX_ASSEMBLE,
2792 #else
2793 	.flags         = FLAG_ETHER,
2794 #endif
2795 #endif
2796    .bind          = GobiNetDriverBind,
2797    .unbind        = GobiNetDriverUnbind,
2798 #if 1 //def DATA_MODE_RP
2799 #if defined(QUECTEL_WWAN_MULTI_PACKAGES)
2800    .rx_fixup      = GobiNetDriverRxPktsFixup,
2801 #else
2802    .rx_fixup      = GobiNetDriverRxFixup,
2803 #endif
2804    .tx_fixup      = GobiNetDriverTxFixup,
2805 #endif
2806    .check_connect = GobiNetDriver_check_connect,
2807    .data          = (1 << 4),
2808 };
2809 
2810 /*=========================================================================*/
2811 // Qualcomm Gobi 3000 VID/PIDs
2812 /*=========================================================================*/
2813 #define GOBI_FIXED_INTF(vend, prod) \
2814     { \
2815           USB_DEVICE( vend, prod ), \
2816           .driver_info = (unsigned long)&GobiNetInfo, \
2817     }
2818 static const struct usb_device_id QuecGobiVIDPIDTable [] =
2819 {
2820     GOBI_FIXED_INTF( 0x05c6, 0x9003 ), // Quectel UC20
2821     GOBI_FIXED_INTF( 0x05c6, 0x9215 ), // Quectel EC20 (MDM9215)
2822     GOBI_FIXED_INTF( 0x2c7c, 0x0125 ), // Quectel EC20 (MDM9X07)/EC25/EG25
2823     GOBI_FIXED_INTF( 0x2c7c, 0x0121 ), // Quectel EC21
2824     GOBI_FIXED_INTF( 0x2c7c, 0x0306 ), // Quectel EP06
2825     GOBI_FIXED_INTF( 0x2c7c, 0x0435 ), // Quectel AG35
2826     GOBI_FIXED_INTF( 0x2c7c, 0x0296 ), // Quectel BG96
2827     GOBI_FIXED_INTF( 0x2c7c, 0x0191 ), // Quectel EG91
2828     GOBI_FIXED_INTF( 0x2c7c, 0x0195 ), // Quectel EG95
2829     GOBI_FIXED_INTF( 0x2c7c, 0x0512 ), // Quectel EG12/EP12/EM12/EG16/EG18,SDx20
2830     GOBI_FIXED_INTF( 0x2c7c, 0x0620 ), // Quectel EG20,SDx24
2831     GOBI_FIXED_INTF( 0x2c7c, 0x0800 ), // Quectel RG500Q,RM500Q,RM510Q,SDX55
2832    //Terminating entry
2833    { }
2834 };
2835 
2836 MODULE_DEVICE_TABLE( usb, QuecGobiVIDPIDTable );
2837 
2838 /*===========================================================================
2839 METHOD:
2840    GobiUSBNetProbe (Public Method)
2841 
2842 DESCRIPTION:
2843    Run usbnet_probe
2844    Setup QMI device
2845 
2846 PARAMETERS
2847    pIntf        [ I ] - Pointer to interface
2848    pVIDPIDs     [ I ] - Pointer to VID/PID table
2849 
2850 RETURN VALUE:
2851    int - 0 for success
2852          Negative errno for error
2853 ===========================================================================*/
GobiUSBNetProbe(struct usb_interface * pIntf,const struct usb_device_id * pVIDPIDs)2854 static int GobiUSBNetProbe(
2855    struct usb_interface *        pIntf,
2856    const struct usb_device_id *  pVIDPIDs )
2857 {
2858    int status;
2859    struct usbnet * pDev;
2860    sGobiUSBNet * pGobiDev;
2861 #if (LINUX_VERSION_CODE >= KERNEL_VERSION( 2,6,29 ))
2862    struct net_device_ops * pNetDevOps;
2863 #endif
2864 
2865    status = usbnet_probe( pIntf, pVIDPIDs );
2866    if (status < 0)
2867    {
2868       DBG( "usbnet_probe failed %d\n", status );
2869 	  return status;
2870    }
2871 
2872 #if (LINUX_VERSION_CODE >= KERNEL_VERSION( 2,6,19 ))
2873    pIntf->needs_remote_wakeup = 1;
2874 #endif
2875 
2876 #if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,23 ))
2877    pDev = usb_get_intfdata( pIntf );
2878 #else
2879    pDev = (struct usbnet *)pIntf->dev.platform_data;
2880 #endif
2881 
2882    if (pDev == NULL || pDev->net == NULL)
2883    {
2884       DBG( "failed to get netdevice\n" );
2885       usbnet_disconnect( pIntf );
2886       return -ENXIO;
2887    }
2888 
2889    pGobiDev = kzalloc( sizeof( sGobiUSBNet ), GFP_KERNEL );
2890    if (pGobiDev == NULL)
2891    {
2892       DBG( "fail to allocate device buffers" );
2893       usbnet_disconnect( pIntf );
2894       return -ENOMEM;
2895    }
2896 
2897    atomic_set(&pGobiDev->refcount, 1);
2898 
2899    pDev->data[0] = (unsigned long)pGobiDev;
2900 
2901    pGobiDev->mpNetDev = pDev;
2902 
2903    // Clearing endpoint halt is a magic handshake that brings
2904    // the device out of low power (airplane) mode
2905    usb_clear_halt( pGobiDev->mpNetDev->udev, pDev->out );
2906 
2907    // Overload PM related network functions
2908 #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 ))
2909    pGobiDev->mpUSBNetOpen = pDev->net->open;
2910    pDev->net->open = GobiUSBNetOpen;
2911    pGobiDev->mpUSBNetStop = pDev->net->stop;
2912    pDev->net->stop = GobiUSBNetStop;
2913 #if defined(CONFIG_PM) && (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,14 ))
2914    pDev->net->hard_start_xmit = GobiUSBNetStartXmit;
2915    pDev->net->tx_timeout = GobiUSBNetTXTimeout;
2916 #else  //quectel donot send dhcp request before ndis connect for uc20
2917     local_usbnet_start_xmit = pDev->net->hard_start_xmit;
2918     pDev->net->hard_start_xmit = GobiUSBNetStartXmit2;
2919 #endif
2920 #else
2921    pNetDevOps = kmalloc( sizeof( struct net_device_ops ), GFP_KERNEL );
2922    if (pNetDevOps == NULL)
2923    {
2924       DBG( "falied to allocate net device ops" );
2925       usbnet_disconnect( pIntf );
2926       return -ENOMEM;
2927    }
2928    memcpy( pNetDevOps, pDev->net->netdev_ops, sizeof( struct net_device_ops ) );
2929 
2930    pGobiDev->mpUSBNetOpen = pNetDevOps->ndo_open;
2931    pNetDevOps->ndo_open = GobiUSBNetOpen;
2932    pGobiDev->mpUSBNetStop = pNetDevOps->ndo_stop;
2933    pNetDevOps->ndo_stop = GobiUSBNetStop;
2934 #if 1 //quectel donot send dhcp request before ndis connect for uc20
2935    pNetDevOps->ndo_start_xmit = GobiUSBNetStartXmit2;
2936 #else
2937    pNetDevOps->ndo_start_xmit = usbnet_start_xmit;
2938 #endif
2939    pNetDevOps->ndo_tx_timeout = usbnet_tx_timeout;
2940 
2941 #if defined(QUECTEL_WWAN_QMAP)
2942    pNetDevOps->ndo_do_ioctl = qmap_ndo_do_ioctl;
2943 #endif
2944 
2945    pDev->net->netdev_ops = pNetDevOps;
2946 #endif
2947 
2948 #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,31 ))
2949    memset( &(pGobiDev->mpNetDev->stats), 0, sizeof( struct net_device_stats ) );
2950 #else
2951    memset( &(pGobiDev->mpNetDev->net->stats), 0, sizeof( struct net_device_stats ) );
2952 #endif
2953 
2954    pGobiDev->mpIntf = pIntf;
2955    memset( &(pGobiDev->mMEID), '0', 14 );
2956 
2957    DBG( "Mac Address:\n" );
2958    PrintHex( &pGobiDev->mpNetDev->net->dev_addr[0], 6 );
2959 
2960    pGobiDev->mbQMIValid = false;
2961    memset( &pGobiDev->mQMIDev, 0, sizeof( sQMIDev ) );
2962    pGobiDev->mQMIDev.mbCdevIsInitialized = false;
2963 
2964    pGobiDev->mQMIDev.mpDevClass = gpClass;
2965 
2966 #ifdef CONFIG_PM
2967    #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 ))
2968    init_completion( &pGobiDev->mAutoPM.mThreadDoWork );
2969    #endif
2970 #endif /* CONFIG_PM */
2971    spin_lock_init( &pGobiDev->mQMIDev.mClientMemLock );
2972 
2973    // Default to device down
2974    pGobiDev->mDownReason = 0;
2975 
2976 //#if (LINUX_VERSION_CODE < KERNEL_VERSION( 3,11,0 ))
2977    GobiSetDownReason( pGobiDev, NO_NDIS_CONNECTION );
2978    GobiSetDownReason( pGobiDev, NET_IFACE_STOPPED );
2979 //#endif
2980 
2981    // Register QMI
2982    pGobiDev->mbMdm9x07 |= (pDev->udev->descriptor.idVendor == cpu_to_le16(0x2c7c));
2983    pGobiDev->mbMdm9x06 |= (pDev->udev->descriptor.idVendor == cpu_to_le16(0x2c7c) && pDev->udev->descriptor.idProduct == cpu_to_le16(0x0296));
2984    pGobiDev->mbRawIPMode = pGobiDev->mbMdm9x07;
2985    if ( pGobiDev->mbRawIPMode)
2986       pGobiDev->mpNetDev->net->flags |= IFF_NOARP;
2987 #ifdef QUECTEL_BRIDGE_MODE
2988    memcpy(pGobiDev->mHostMAC, pDev->net->dev_addr, 6);
2989    pGobiDev->m_bridge_mode = bridge_mode;
2990 #endif
2991 
2992 #ifdef QUECTEL_REMOVE_TX_ZLP
2993 	{
2994 		struct remove_tx_zlp_config {
2995 			__le32 enable;
2996 		} __packed;
2997 
2998 		struct remove_tx_zlp_config cfg;
2999 		cfg.enable = cpu_to_le32(1);  //1-enable  0-disable
3000 
3001 		usb_control_msg(
3002 			interface_to_usbdev(pIntf),
3003 			usb_sndctrlpipe(interface_to_usbdev(pIntf), 0),
3004 			USB_CDC_SET_REMOVE_TX_ZLP_COMMAND,
3005 			0x21, //USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE
3006 			0,
3007 			pIntf->cur_altsetting->desc.bInterfaceNumber,
3008 			&cfg, sizeof(cfg), 100);
3009 	}
3010 #endif
3011 
3012 	pGobiDev->m_qcrmcall_mode = qcrmcall_mode;
3013 
3014 	if (pGobiDev->m_qcrmcall_mode) {
3015 		INFO("AT$QCRMCALL MODE!");
3016 
3017 		GobiClearDownReason( pGobiDev, NO_NDIS_CONNECTION );
3018 		usb_control_msg(
3019 			interface_to_usbdev(pIntf),
3020 			usb_sndctrlpipe(interface_to_usbdev(pIntf), 0),
3021 			0x22, //USB_CDC_REQ_SET_CONTROL_LINE_STATE
3022 			0x21, //USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE
3023 			1, //active CDC DTR
3024 			pIntf->cur_altsetting->desc.bInterfaceNumber,
3025 			NULL, 0, 100);
3026 		status = 0;
3027 	}
3028 	else {
3029 #if defined(QUECTEL_WWAN_QMAP)
3030 		if (pGobiDev->mbRawIPMode) {
3031 			unsigned idProduct = le16_to_cpu(pDev->udev->descriptor.idProduct);
3032 
3033 			pGobiDev->qmap_mode = qmap_mode;
3034 			if (pGobiDev->qmap_mode == 0) {
3035 				if (idProduct == 0x0800) {
3036 					pGobiDev->qmap_mode = 1;
3037 				}
3038 			}
3039 
3040 			pGobiDev->qmap_version = 5;
3041 			if (idProduct == 0x0800) {
3042 				pGobiDev->qmap_version = 9;
3043 			}
3044 	      }
3045 
3046 		if (pGobiDev->qmap_mode) {
3047 			netif_carrier_off(pDev->net);
3048 		}
3049 
3050 		if (pGobiDev->qmap_mode > 1) {
3051 #if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,35 )) //ab95bfe01f9872459c8678572ccadbf646badad0
3052 			rtnl_lock();
3053 			netdev_rx_handler_register(pDev->net, rmnet_usb_rx_handler, NULL);
3054 			rtnl_unlock();
3055 #endif
3056 		}
3057 
3058 #if defined(QUECTEL_UL_DATA_AGG)
3059 		if (pGobiDev->qmap_mode) {
3060 			struct ul_agg_ctx *agg_ctx = &pGobiDev->agg_ctx;
3061 
3062 			agg_ctx->ul_data_aggregation_max_datagrams = 1;
3063 			agg_ctx->ul_data_aggregation_max_size = 2048;
3064 			agg_ctx->dl_minimum_padding = 0;
3065 		}
3066 #endif
3067 #endif
3068 		status = RegisterQMIDevice( pGobiDev );
3069 	}
3070 
3071    if (status != 0)
3072    {
3073       // usbnet_disconnect() will call GobiNetDriverUnbind() which will call
3074       // DeregisterQMIDevice() to clean up any partially created QMI device
3075       usbnet_disconnect( pIntf );
3076       return status;
3077    }
3078 
3079 #if defined(QUECTEL_WWAN_QMAP)
3080 	tasklet_init(&pGobiDev->txq, rmnet_usb_tx_wake_queue, (unsigned long)pGobiDev);
3081 
3082 	if (pGobiDev->qmap_mode > 1) {
3083 		unsigned i;
3084 
3085 		for (i = 0; i < pGobiDev->qmap_mode; i++) {
3086 			qmap_register_device(pGobiDev, i);
3087 		}
3088 	} else {
3089 		pGobiDev->mpQmapNetDev[0] = pDev->net;
3090 	}
3091 #endif
3092 
3093    pGobiDev->mbProbeDone = 1;
3094    // Success
3095    return 0;
3096 }
3097 
GobiUSBNetDisconnect(struct usb_interface * intf)3098 static void GobiUSBNetDisconnect (struct usb_interface *intf) {
3099 #if defined(QUECTEL_WWAN_QMAP)
3100 	struct usbnet *pDev = usb_get_intfdata(intf);
3101 	sGobiUSBNet * pGobiDev = (sGobiUSBNet *)pDev->data[0];
3102 	unsigned i;
3103 
3104 	if (pGobiDev->qmap_mode > 1) {
3105 		for (i = 0; i < pGobiDev->qmap_mode; i++) {
3106 			qmap_unregister_device(pGobiDev, i);
3107 		}
3108 
3109 	}
3110 
3111 	tasklet_kill(&pGobiDev->txq);
3112 #endif
3113 
3114 	usbnet_disconnect(intf);
3115 }
3116 
3117 static struct usb_driver GobiNet =
3118 {
3119    .name       = "GobiNet",
3120    .id_table   = QuecGobiVIDPIDTable,
3121    .probe      = GobiUSBNetProbe,
3122    .disconnect = GobiUSBNetDisconnect,
3123 #ifdef CONFIG_PM
3124    .suspend    = GobiNetSuspend,
3125    .resume     = GobiNetResume,
3126 #if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,27 ))
3127    .reset_resume = GobiNetResetResume,
3128 #endif
3129 #if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,18 ))
3130    .supports_autosuspend = true,
3131 #endif
3132 #endif /* CONFIG_PM */
3133 };
3134 
3135 /*===========================================================================
3136 METHOD:
3137    GobiUSBNetModInit (Public Method)
3138 
3139 DESCRIPTION:
3140    Initialize module
3141    Create device class
3142    Register out usb_driver struct
3143 
3144 RETURN VALUE:
3145    int - 0 for success
3146          Negative errno for error
3147 ===========================================================================*/
GobiUSBNetModInit(void)3148 static int __init GobiUSBNetModInit( void )
3149 {
3150    gpClass = class_create( THIS_MODULE, "GobiQMI" );
3151    if (IS_ERR( gpClass ) == true)
3152    {
3153       DBG( "error at class_create %ld\n", PTR_ERR( gpClass ) );
3154       return -ENOMEM;
3155    }
3156 
3157    // This will be shown whenever driver is loaded
3158    printk( KERN_INFO "%s: %s\n", DRIVER_DESC, DRIVER_VERSION );
3159 
3160    return usb_register( &GobiNet );
3161 }
3162 module_init( GobiUSBNetModInit );
3163 
3164 /*===========================================================================
3165 METHOD:
3166    GobiUSBNetModExit (Public Method)
3167 
3168 DESCRIPTION:
3169    Deregister module
3170    Destroy device class
3171 
3172 RETURN VALUE:
3173    void
3174 ===========================================================================*/
GobiUSBNetModExit(void)3175 static void __exit GobiUSBNetModExit( void )
3176 {
3177    usb_deregister( &GobiNet );
3178 
3179    class_destroy( gpClass );
3180 }
3181 module_exit( GobiUSBNetModExit );
3182 
3183 MODULE_VERSION( DRIVER_VERSION );
3184 MODULE_AUTHOR( DRIVER_AUTHOR );
3185 MODULE_DESCRIPTION( DRIVER_DESC );
3186 MODULE_LICENSE("Dual BSD/GPL");
3187 
3188 #ifdef bool
3189 #undef bool
3190 #endif
3191 
3192 module_param_named( debug, quec_debug, int, S_IRUGO | S_IWUSR );
3193 MODULE_PARM_DESC( debug, "Debuging enabled or not" );
3194 
3195 //module_param_named( interruptible, Quecinterruptible, int, S_IRUGO | S_IWUSR );
3196 //MODULE_PARM_DESC( interruptible, "Listen for and return on user interrupt" );
3197 module_param( txQueueLength, int, S_IRUGO | S_IWUSR );
3198 MODULE_PARM_DESC( txQueueLength,
3199                   "Number of IP packets which may be queued up for transmit" );
3200 
3201