xref: /OK3568_Linux_fs/kernel/drivers/net/usb/GobiNet/GobiUSBNet.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*===========================================================================
2*4882a593Smuzhiyun FILE:
3*4882a593Smuzhiyun    GobiUSBNet.c
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun DESCRIPTION:
6*4882a593Smuzhiyun    Qualcomm USB Network device for Gobi 3000
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun FUNCTIONS:
9*4882a593Smuzhiyun    GobiNetSuspend
10*4882a593Smuzhiyun    GobiNetResume
11*4882a593Smuzhiyun    GobiNetDriverBind
12*4882a593Smuzhiyun    GobiNetDriverUnbind
13*4882a593Smuzhiyun    GobiUSBNetURBCallback
14*4882a593Smuzhiyun    GobiUSBNetTXTimeout
15*4882a593Smuzhiyun    GobiUSBNetAutoPMThread
16*4882a593Smuzhiyun    GobiUSBNetStartXmit
17*4882a593Smuzhiyun    GobiUSBNetOpen
18*4882a593Smuzhiyun    GobiUSBNetStop
19*4882a593Smuzhiyun    GobiUSBNetProbe
20*4882a593Smuzhiyun    GobiUSBNetModInit
21*4882a593Smuzhiyun    GobiUSBNetModExit
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun Copyright (c) 2011, Code Aurora Forum. All rights reserved.
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun Redistribution and use in source and binary forms, with or without
26*4882a593Smuzhiyun modification, are permitted provided that the following conditions are met:
27*4882a593Smuzhiyun     * Redistributions of source code must retain the above copyright
28*4882a593Smuzhiyun       notice, this list of conditions and the following disclaimer.
29*4882a593Smuzhiyun     * Redistributions in binary form must reproduce the above copyright
30*4882a593Smuzhiyun       notice, this list of conditions and the following disclaimer in the
31*4882a593Smuzhiyun       documentation and/or other materials provided with the distribution.
32*4882a593Smuzhiyun     * Neither the name of Code Aurora Forum nor
33*4882a593Smuzhiyun       the names of its contributors may be used to endorse or promote
34*4882a593Smuzhiyun       products derived from this software without specific prior written
35*4882a593Smuzhiyun       permission.
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
39*4882a593Smuzhiyun AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
40*4882a593Smuzhiyun IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
41*4882a593Smuzhiyun ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
42*4882a593Smuzhiyun LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
43*4882a593Smuzhiyun CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
44*4882a593Smuzhiyun SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
45*4882a593Smuzhiyun INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
46*4882a593Smuzhiyun CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
47*4882a593Smuzhiyun ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
48*4882a593Smuzhiyun POSSIBILITY OF SUCH DAMAGE.
49*4882a593Smuzhiyun ===========================================================================*/
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun //---------------------------------------------------------------------------
52*4882a593Smuzhiyun // Include Files
53*4882a593Smuzhiyun //---------------------------------------------------------------------------
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun #include <linux/module.h>
56*4882a593Smuzhiyun #include <linux/netdevice.h>
57*4882a593Smuzhiyun #include <linux/etherdevice.h>
58*4882a593Smuzhiyun #include <linux/kernel.h>
59*4882a593Smuzhiyun #include <linux/ethtool.h>
60*4882a593Smuzhiyun #include <linux/version.h>
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun #include <net/arp.h>
63*4882a593Smuzhiyun #include <net/ip.h>
64*4882a593Smuzhiyun #include <net/ipv6.h>
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun #if LINUX_VERSION_CODE > KERNEL_VERSION(3,16,0) //8b094cd03b4a3793220d8d8d86a173bfea8c285b
67*4882a593Smuzhiyun #include <linux/timekeeping.h>
68*4882a593Smuzhiyun #else
69*4882a593Smuzhiyun #define timespec64  timespec
70*4882a593Smuzhiyun #define ktime_get_ts64 ktime_get_ts
71*4882a593Smuzhiyun #define timespec64_sub timespec_sub
72*4882a593Smuzhiyun #endif
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun #include "Structs.h"
75*4882a593Smuzhiyun #include "QMIDevice.h"
76*4882a593Smuzhiyun #include "QMI.h"
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun #ifndef ETH_P_MAP
79*4882a593Smuzhiyun #define ETH_P_MAP 0xDA1A
80*4882a593Smuzhiyun #endif
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun #if (ETH_P_MAP == 0x00F9)
83*4882a593Smuzhiyun #undef ETH_P_MAP
84*4882a593Smuzhiyun #define ETH_P_MAP 0xDA1A
85*4882a593Smuzhiyun #endif
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun //-----------------------------------------------------------------------------
88*4882a593Smuzhiyun // Definitions
89*4882a593Smuzhiyun //-----------------------------------------------------------------------------
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun // Version Information
92*4882a593Smuzhiyun //add new module or new feature, increase major version. fix bug, increase minor version
93*4882a593Smuzhiyun #define VERSION_NUMBER "V1.6.2.14"
94*4882a593Smuzhiyun #define DRIVER_VERSION "Quectel_Linux&Android_GobiNet_Driver_"VERSION_NUMBER
95*4882a593Smuzhiyun #define DRIVER_AUTHOR "Qualcomm Innovation Center"
96*4882a593Smuzhiyun #define DRIVER_DESC "GobiNet"
97*4882a593Smuzhiyun static const char driver_name[] = "GobiNet";
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun // Debug flag
100*4882a593Smuzhiyun int quec_debug = 0;
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun // Allow user interrupts
103*4882a593Smuzhiyun //int interruptible = 1;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun // Number of IP packets which may be queued up for transmit
106*4882a593Smuzhiyun static int txQueueLength = 100;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun // Class should be created during module init, so needs to be global
109*4882a593Smuzhiyun static struct class * gpClass;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun static const unsigned char ec20_mac[ETH_ALEN] = {0x02, 0x50, 0xf3, 0x00, 0x00, 0x00};
112*4882a593Smuzhiyun static const unsigned char default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3, 0x00, 0x00, 0x00};
113*4882a593Smuzhiyun static const unsigned char node_id[ETH_ALEN] = {0x02, 0x50, 0xf4, 0x00, 0x00, 0x00};
114*4882a593Smuzhiyun //static const u8 broadcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun //setup data call by "AT$QCRMCALL=1,1"
117*4882a593Smuzhiyun static uint __read_mostly qcrmcall_mode = 0;
118*4882a593Smuzhiyun module_param( qcrmcall_mode, uint, S_IRUGO | S_IWUSR );
119*4882a593Smuzhiyun 
ether_to_ip_fixup(struct net_device * dev,struct sk_buff * skb)120*4882a593Smuzhiyun static struct sk_buff * ether_to_ip_fixup(struct net_device *dev, struct sk_buff *skb) {
121*4882a593Smuzhiyun 	const struct ethhdr *ehdr;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	skb_reset_mac_header(skb);
124*4882a593Smuzhiyun 	ehdr = eth_hdr(skb);
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	if (ehdr->h_proto == htons(ETH_P_IP)) {
127*4882a593Smuzhiyun 		if (unlikely(skb->len <= (sizeof(struct ethhdr) + sizeof(struct iphdr)))) {
128*4882a593Smuzhiyun 			goto drop_skb;
129*4882a593Smuzhiyun 		}
130*4882a593Smuzhiyun 	}
131*4882a593Smuzhiyun 	else if (ehdr->h_proto == htons(ETH_P_IPV6)) {
132*4882a593Smuzhiyun 		if (unlikely(skb->len <= (sizeof(struct ethhdr) + sizeof(struct ipv6hdr)))) {
133*4882a593Smuzhiyun 			goto drop_skb;
134*4882a593Smuzhiyun 		}
135*4882a593Smuzhiyun 	}
136*4882a593Smuzhiyun 	else {
137*4882a593Smuzhiyun 		DBG("%s skb h_proto is %04x\n", dev->name, ntohs(ehdr->h_proto));
138*4882a593Smuzhiyun 		goto drop_skb;
139*4882a593Smuzhiyun 	}
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	if (unlikely(skb_pull(skb, ETH_HLEN)))
142*4882a593Smuzhiyun 		return skb;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun drop_skb:
145*4882a593Smuzhiyun 	return NULL;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun //#define QUECTEL_REMOVE_TX_ZLP
149*4882a593Smuzhiyun #define USB_CDC_SET_REMOVE_TX_ZLP_COMMAND 0x5D
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun //#define QUECTEL_WWAN_MULTI_PACKAGES
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun #ifdef QUECTEL_WWAN_MULTI_PACKAGES
154*4882a593Smuzhiyun static uint __read_mostly rx_packets = 10;
155*4882a593Smuzhiyun module_param( rx_packets, uint, S_IRUGO | S_IWUSR );
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun #define USB_CDC_SET_MULTI_PACKAGE_COMMAND (0x5C)
158*4882a593Smuzhiyun #define QUEC_NET_MSG_SPEC		(0x80)
159*4882a593Smuzhiyun #define QUEC_NET_MSG_ID_IP_DATA		(0x00)
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun struct multi_package_config {
162*4882a593Smuzhiyun 	__le32 enable;
163*4882a593Smuzhiyun 	__le32 package_max_len;
164*4882a593Smuzhiyun 	__le32 package_max_count_in_queue;
165*4882a593Smuzhiyun 	__le32 timeout;
166*4882a593Smuzhiyun } __packed;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun struct quec_net_package_header {
169*4882a593Smuzhiyun 	unsigned char msg_spec;
170*4882a593Smuzhiyun 	unsigned char msg_id;
171*4882a593Smuzhiyun 	unsigned short payload_len;
172*4882a593Smuzhiyun 	unsigned char reserve[16];
173*4882a593Smuzhiyun } __packed;
174*4882a593Smuzhiyun #endif
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun #ifdef QUECTEL_WWAN_QMAP
177*4882a593Smuzhiyun /*
178*4882a593Smuzhiyun     Quectel_WCDMA&LTE_Linux_USB_Driver_User_Guide_V1.9.pdf
179*4882a593Smuzhiyun     5.6.	Test QMAP on GobiNet or QMI WWAN
180*4882a593Smuzhiyun     0 - no QMAP
181*4882a593Smuzhiyun     1 - QMAP (Aggregation protocol)
182*4882a593Smuzhiyun     X - QMAP (Multiplexing and Aggregation protocol)
183*4882a593Smuzhiyun */
184*4882a593Smuzhiyun static uint __read_mostly qmap_mode = 0;
185*4882a593Smuzhiyun module_param( qmap_mode, uint, S_IRUGO | S_IWUSR );
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun struct qmap_hdr {
188*4882a593Smuzhiyun     u8 cd_rsvd_pad;
189*4882a593Smuzhiyun     u8 mux_id;
190*4882a593Smuzhiyun     u16 pkt_len;
191*4882a593Smuzhiyun } __packed;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun enum rmnet_map_v5_header_type {
194*4882a593Smuzhiyun 	RMNET_MAP_HEADER_TYPE_UNKNOWN,
195*4882a593Smuzhiyun 	RMNET_MAP_HEADER_TYPE_COALESCING = 0x1,
196*4882a593Smuzhiyun 	RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD = 0x2,
197*4882a593Smuzhiyun 	RMNET_MAP_HEADER_TYPE_ENUM_LENGTH
198*4882a593Smuzhiyun };
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun /* Main QMAP header */
201*4882a593Smuzhiyun struct rmnet_map_header {
202*4882a593Smuzhiyun #if defined(__LITTLE_ENDIAN_BITFIELD)
203*4882a593Smuzhiyun 	u8  pad_len:6;
204*4882a593Smuzhiyun 	u8  next_hdr:1;
205*4882a593Smuzhiyun 	u8  cd_bit:1;
206*4882a593Smuzhiyun #elif defined (__BIG_ENDIAN_BITFIELD)
207*4882a593Smuzhiyun 	u8  cd_bit:1;
208*4882a593Smuzhiyun 	u8  next_hdr:1;
209*4882a593Smuzhiyun 	u8  pad_len:6;
210*4882a593Smuzhiyun #else
211*4882a593Smuzhiyun #error	"Please fix <asm/byteorder.h>"
212*4882a593Smuzhiyun #endif
213*4882a593Smuzhiyun 	u8  mux_id;
214*4882a593Smuzhiyun 	__be16 pkt_len;
215*4882a593Smuzhiyun }  __aligned(1);
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun /* QMAP v5 headers */
218*4882a593Smuzhiyun struct rmnet_map_v5_csum_header {
219*4882a593Smuzhiyun #if defined(__LITTLE_ENDIAN_BITFIELD)
220*4882a593Smuzhiyun 	u8  next_hdr:1;
221*4882a593Smuzhiyun 	u8  header_type:7;
222*4882a593Smuzhiyun 	u8  hw_reserved:7;
223*4882a593Smuzhiyun 	u8  csum_valid_required:1;
224*4882a593Smuzhiyun #elif defined (__BIG_ENDIAN_BITFIELD)
225*4882a593Smuzhiyun 	u8  header_type:7;
226*4882a593Smuzhiyun 	u8  next_hdr:1;
227*4882a593Smuzhiyun 	u8  csum_valid_required:1;
228*4882a593Smuzhiyun 	u8  hw_reserved:7;
229*4882a593Smuzhiyun #else
230*4882a593Smuzhiyun #error	"Please fix <asm/byteorder.h>"
231*4882a593Smuzhiyun #endif
232*4882a593Smuzhiyun 	__be16 reserved;
233*4882a593Smuzhiyun } __aligned(1);
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun struct qmap_priv {
236*4882a593Smuzhiyun 	struct net_device *real_dev;
237*4882a593Smuzhiyun 	struct net_device *self_dev;
238*4882a593Smuzhiyun 	uint qmap_version;
239*4882a593Smuzhiyun 	uint offset_id;
240*4882a593Smuzhiyun 	uint mux_id;
241*4882a593Smuzhiyun 	uint link_state;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun #if defined(QUECTEL_UL_DATA_AGG)
244*4882a593Smuzhiyun 	/* QMIWDS_ADMIN_SET_DATA_FORMAT_RESP TLV_0x17 and TLV_0x18 */
245*4882a593Smuzhiyun 	uint ul_data_aggregation_max_datagrams; //UplinkDataAggregationMaxDatagramsTlv
246*4882a593Smuzhiyun 	uint ul_data_aggregation_max_size; //UplinkDataAggregationMaxSizeTlv
247*4882a593Smuzhiyun 	uint dl_minimum_padding; //0x1A
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	spinlock_t agg_lock;
250*4882a593Smuzhiyun 	struct sk_buff *agg_skb;
251*4882a593Smuzhiyun 	unsigned agg_count;
252*4882a593Smuzhiyun 	struct timespec64 agg_time;
253*4882a593Smuzhiyun 	struct hrtimer agg_hrtimer;
254*4882a593Smuzhiyun 	struct work_struct agg_wq;
255*4882a593Smuzhiyun #endif
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun #ifdef QUECTEL_BRIDGE_MODE
258*4882a593Smuzhiyun 	int m_bridge_mode;
259*4882a593Smuzhiyun 	uint m_bridge_ipv4;
260*4882a593Smuzhiyun 	unsigned char mHostMAC[6];
261*4882a593Smuzhiyun #endif
262*4882a593Smuzhiyun };
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun #ifdef QUECTEL_BRIDGE_MODE
265*4882a593Smuzhiyun static int is_qmap_netdev(const struct net_device *netdev);
266*4882a593Smuzhiyun #endif
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun #endif
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun #ifdef QUECTEL_BRIDGE_MODE
271*4882a593Smuzhiyun static int __read_mostly bridge_mode = 0/*|BIT(1)*/;
272*4882a593Smuzhiyun module_param( bridge_mode, int, S_IRUGO | S_IWUSR );
273*4882a593Smuzhiyun 
bridge_arp_reply(struct net_device * net,struct sk_buff * skb,uint bridge_ipv4)274*4882a593Smuzhiyun static int bridge_arp_reply(struct net_device *net, struct sk_buff *skb, uint bridge_ipv4) {
275*4882a593Smuzhiyun     struct arphdr *parp;
276*4882a593Smuzhiyun     u8 *arpptr, *sha;
277*4882a593Smuzhiyun     u8  sip[4], tip[4], ipv4[4];
278*4882a593Smuzhiyun     struct sk_buff *reply = NULL;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun     ipv4[0]  = (bridge_ipv4 >> 24) & 0xFF;
281*4882a593Smuzhiyun     ipv4[1]  = (bridge_ipv4 >> 16) & 0xFF;
282*4882a593Smuzhiyun     ipv4[2]  = (bridge_ipv4 >> 8) & 0xFF;
283*4882a593Smuzhiyun     ipv4[3]  = (bridge_ipv4 >> 0) & 0xFF;
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun     parp = arp_hdr(skb);
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun     if (parp->ar_hrd == htons(ARPHRD_ETHER)  && parp->ar_pro == htons(ETH_P_IP)
288*4882a593Smuzhiyun         && parp->ar_op == htons(ARPOP_REQUEST) && parp->ar_hln == 6 && parp->ar_pln == 4) {
289*4882a593Smuzhiyun         arpptr = (u8 *)parp + sizeof(struct arphdr);
290*4882a593Smuzhiyun         sha = arpptr;
291*4882a593Smuzhiyun         arpptr += net->addr_len;	/* sha */
292*4882a593Smuzhiyun         memcpy(sip, arpptr, sizeof(sip));
293*4882a593Smuzhiyun         arpptr += sizeof(sip);
294*4882a593Smuzhiyun         arpptr += net->addr_len;	/* tha */
295*4882a593Smuzhiyun         memcpy(tip, arpptr, sizeof(tip));
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun         pr_info("%s sip = %d.%d.%d.%d, tip=%d.%d.%d.%d, ipv4=%d.%d.%d.%d\n", netdev_name(net),
298*4882a593Smuzhiyun             sip[0], sip[1], sip[2], sip[3], tip[0], tip[1], tip[2], tip[3], ipv4[0], ipv4[1], ipv4[2], ipv4[3]);
299*4882a593Smuzhiyun 	//wwan0 sip = 10.151.137.255, tip=10.151.138.0, ipv4=10.151.137.255
300*4882a593Smuzhiyun         if (tip[0] == ipv4[0] && tip[1] == ipv4[1] && (tip[2]&0xFC) == (ipv4[2]&0xFC) && tip[3] != ipv4[3])
301*4882a593Smuzhiyun             reply = arp_create(ARPOP_REPLY, ETH_P_ARP, *((__be32 *)sip), net, *((__be32 *)tip), sha, ec20_mac, sha);
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun         if (reply) {
304*4882a593Smuzhiyun             skb_reset_mac_header(reply);
305*4882a593Smuzhiyun             __skb_pull(reply, skb_network_offset(reply));
306*4882a593Smuzhiyun             reply->ip_summed = CHECKSUM_UNNECESSARY;
307*4882a593Smuzhiyun             reply->pkt_type = PACKET_HOST;
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun             netif_rx_ni(reply);
310*4882a593Smuzhiyun         }
311*4882a593Smuzhiyun         return 1;
312*4882a593Smuzhiyun     }
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun     return 0;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun 
bridge_mode_tx_fixup(struct net_device * net,struct sk_buff * skb,uint bridge_ipv4,unsigned char * bridge_mac)317*4882a593Smuzhiyun static struct sk_buff *bridge_mode_tx_fixup(struct net_device *net, struct sk_buff *skb, uint bridge_ipv4, unsigned char *bridge_mac) {
318*4882a593Smuzhiyun 	struct ethhdr *ehdr;
319*4882a593Smuzhiyun 	const struct iphdr *iph;
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	skb_reset_mac_header(skb);
322*4882a593Smuzhiyun 	ehdr = eth_hdr(skb);
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	if (ehdr->h_proto == htons(ETH_P_ARP)) {
325*4882a593Smuzhiyun 		if (bridge_ipv4)
326*4882a593Smuzhiyun 			bridge_arp_reply(net, skb, bridge_ipv4);
327*4882a593Smuzhiyun 		return NULL;
328*4882a593Smuzhiyun 	}
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	iph = ip_hdr(skb);
331*4882a593Smuzhiyun 	//DBG("iphdr: ");
332*4882a593Smuzhiyun 	//PrintHex((void *)iph, sizeof(struct iphdr));
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun // 1	0.000000000	0.0.0.0	255.255.255.255	DHCP	362	DHCP Request  - Transaction ID 0xe7643ad7
335*4882a593Smuzhiyun 	if (ehdr->h_proto == htons(ETH_P_IP) && iph->protocol == IPPROTO_UDP && iph->saddr == 0x00000000 && iph->daddr == 0xFFFFFFFF) {
336*4882a593Smuzhiyun 		//if (udp_hdr(skb)->dest == htons(67)) //DHCP Request
337*4882a593Smuzhiyun 		{
338*4882a593Smuzhiyun 			memcpy(bridge_mac, ehdr->h_source, ETH_ALEN);
339*4882a593Smuzhiyun 			pr_info("%s PC Mac Address: %02x:%02x:%02x:%02x:%02x:%02x\n", netdev_name(net),
340*4882a593Smuzhiyun 				bridge_mac[0], bridge_mac[1], bridge_mac[2], bridge_mac[3], bridge_mac[4], bridge_mac[5]);
341*4882a593Smuzhiyun 		}
342*4882a593Smuzhiyun 	}
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	if (memcmp(ehdr->h_source, bridge_mac, ETH_ALEN)) {
345*4882a593Smuzhiyun 		return NULL;
346*4882a593Smuzhiyun 	}
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	return skb;
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun 
bridge_mode_rx_fixup(sGobiUSBNet * pQmapDev,struct net_device * net,struct sk_buff * skb)351*4882a593Smuzhiyun static void bridge_mode_rx_fixup(sGobiUSBNet *pQmapDev, struct net_device *net, struct sk_buff *skb) {
352*4882a593Smuzhiyun 	uint bridge_mode = 0;
353*4882a593Smuzhiyun 	unsigned char *bridge_mac;
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	if (pQmapDev->qmap_mode > 1) {
356*4882a593Smuzhiyun 		struct qmap_priv *priv = netdev_priv(net);
357*4882a593Smuzhiyun 		bridge_mode = priv->m_bridge_mode;
358*4882a593Smuzhiyun 		bridge_mac = priv->mHostMAC;
359*4882a593Smuzhiyun 	}
360*4882a593Smuzhiyun 	else {
361*4882a593Smuzhiyun 		bridge_mode = pQmapDev->m_bridge_mode;
362*4882a593Smuzhiyun 		bridge_mac = pQmapDev->mHostMAC;
363*4882a593Smuzhiyun 	}
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	if (bridge_mode)
366*4882a593Smuzhiyun 		memcpy(eth_hdr(skb)->h_dest, bridge_mac, ETH_ALEN);
367*4882a593Smuzhiyun 	else
368*4882a593Smuzhiyun 		memcpy(eth_hdr(skb)->h_dest, net->dev_addr, ETH_ALEN);
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun 
bridge_mode_show(struct device * dev,struct device_attribute * attr,char * buf)371*4882a593Smuzhiyun static ssize_t bridge_mode_show(struct device *dev, struct device_attribute *attr, char *buf) {
372*4882a593Smuzhiyun     struct net_device *pNet = to_net_dev(dev);
373*4882a593Smuzhiyun     uint bridge_mode = 0;
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	if (is_qmap_netdev(pNet)) {
376*4882a593Smuzhiyun 		struct qmap_priv *priv = netdev_priv(pNet);
377*4882a593Smuzhiyun 		bridge_mode = priv->m_bridge_mode;
378*4882a593Smuzhiyun 	}
379*4882a593Smuzhiyun 	else {
380*4882a593Smuzhiyun         struct usbnet * pDev = netdev_priv( pNet );
381*4882a593Smuzhiyun 		sGobiUSBNet * pGobiDev = (sGobiUSBNet *)pDev->data[0];
382*4882a593Smuzhiyun         bridge_mode = pGobiDev->m_bridge_mode;
383*4882a593Smuzhiyun 	}
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun     return snprintf(buf, PAGE_SIZE, "%d\n", bridge_mode);
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun 
bridge_mode_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)388*4882a593Smuzhiyun static ssize_t bridge_mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) {
389*4882a593Smuzhiyun 	struct net_device *pNet = to_net_dev(dev);
390*4882a593Smuzhiyun 	uint old_mode = 0;
391*4882a593Smuzhiyun 	uint bridge_mode = simple_strtoul(buf, NULL, 0);
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	if (pNet->type != ARPHRD_ETHER) {
394*4882a593Smuzhiyun 		return count;
395*4882a593Smuzhiyun 	}
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	if (is_qmap_netdev(pNet)) {
398*4882a593Smuzhiyun 		struct qmap_priv *priv = netdev_priv(pNet);
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 		old_mode = priv->m_bridge_mode;
401*4882a593Smuzhiyun 		priv->m_bridge_mode = bridge_mode;
402*4882a593Smuzhiyun 	}
403*4882a593Smuzhiyun 	else {
404*4882a593Smuzhiyun 		struct usbnet * pDev = netdev_priv( pNet );
405*4882a593Smuzhiyun 		sGobiUSBNet * pGobiDev = (sGobiUSBNet *)pDev->data[0];
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 		old_mode = pGobiDev->m_bridge_mode;
408*4882a593Smuzhiyun 		pGobiDev->m_bridge_mode = bridge_mode;
409*4882a593Smuzhiyun 	}
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	if (old_mode != bridge_mode)
412*4882a593Smuzhiyun 		dev_info(dev, "bridge_mode change to 0x%x\n", bridge_mode);
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	return count;
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun 
bridge_ipv4_show(struct device * dev,struct device_attribute * attr,char * buf)417*4882a593Smuzhiyun static ssize_t bridge_ipv4_show(struct device *dev, struct device_attribute *attr, char *buf) {
418*4882a593Smuzhiyun     struct net_device *pNet = to_net_dev(dev);
419*4882a593Smuzhiyun     unsigned int bridge_ipv4 = 0;
420*4882a593Smuzhiyun     unsigned char ipv4[4];
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	if (is_qmap_netdev(pNet)) {
423*4882a593Smuzhiyun 		struct qmap_priv *priv = netdev_priv(pNet);
424*4882a593Smuzhiyun 		bridge_ipv4 = priv->m_bridge_ipv4;
425*4882a593Smuzhiyun 	}
426*4882a593Smuzhiyun 	else {
427*4882a593Smuzhiyun 		struct usbnet * pDev = netdev_priv( pNet );
428*4882a593Smuzhiyun         sGobiUSBNet * pGobiDev = (sGobiUSBNet *)pDev->data[0];
429*4882a593Smuzhiyun         bridge_ipv4 = pGobiDev->m_bridge_ipv4;
430*4882a593Smuzhiyun 	}
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	ipv4[0]  = (bridge_ipv4 >> 24) & 0xFF;
433*4882a593Smuzhiyun 	ipv4[1]  = (bridge_ipv4 >> 16) & 0xFF;
434*4882a593Smuzhiyun 	ipv4[2]  = (bridge_ipv4 >> 8) & 0xFF;
435*4882a593Smuzhiyun 	ipv4[3]  = (bridge_ipv4 >> 0) & 0xFF;
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n",  ipv4[0], ipv4[1], ipv4[2], ipv4[3]);
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun 
bridge_ipv4_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)440*4882a593Smuzhiyun static ssize_t bridge_ipv4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) {
441*4882a593Smuzhiyun     struct net_device *pNet = to_net_dev(dev);
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	if (is_qmap_netdev(pNet)) {
444*4882a593Smuzhiyun 		struct qmap_priv *priv = netdev_priv(pNet);
445*4882a593Smuzhiyun 		priv->m_bridge_ipv4 = simple_strtoul(buf, NULL, 16);
446*4882a593Smuzhiyun 	}
447*4882a593Smuzhiyun 	else {
448*4882a593Smuzhiyun         struct usbnet * pDev = netdev_priv( pNet );
449*4882a593Smuzhiyun         sGobiUSBNet * pGobiDev = (sGobiUSBNet *)pDev->data[0];
450*4882a593Smuzhiyun 		pGobiDev->m_bridge_ipv4 = simple_strtoul(buf, NULL, 16);
451*4882a593Smuzhiyun 	}
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun     return count;
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun static DEVICE_ATTR(bridge_mode, S_IWUSR | S_IRUGO, bridge_mode_show, bridge_mode_store);
457*4882a593Smuzhiyun static DEVICE_ATTR(bridge_ipv4, S_IWUSR | S_IRUGO, bridge_ipv4_show, bridge_ipv4_store);
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun static struct attribute *qmi_qmap_sysfs_attrs[] = {
460*4882a593Smuzhiyun 	&dev_attr_bridge_mode.attr,
461*4882a593Smuzhiyun 	&dev_attr_bridge_ipv4.attr,
462*4882a593Smuzhiyun 	NULL,
463*4882a593Smuzhiyun };
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun static struct attribute_group qmi_qmap_sysfs_attr_group = {
466*4882a593Smuzhiyun 	.attrs = qmi_qmap_sysfs_attrs,
467*4882a593Smuzhiyun };
468*4882a593Smuzhiyun #endif
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun #ifdef QUECTEL_WWAN_QMAP
net_to_qmap(struct net_device * dev)471*4882a593Smuzhiyun static sGobiUSBNet * net_to_qmap(struct net_device *dev) {
472*4882a593Smuzhiyun 	struct usbnet *usbnet = netdev_priv(dev);
473*4882a593Smuzhiyun 	sGobiUSBNet * pGobiDev = (sGobiUSBNet *)usbnet->data[0];
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	return pGobiDev;
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun 
add_qhdr(struct sk_buff * skb,u8 mux_id)478*4882a593Smuzhiyun static struct sk_buff * add_qhdr(struct sk_buff *skb, u8 mux_id) {
479*4882a593Smuzhiyun 	struct qmap_hdr *qhdr;
480*4882a593Smuzhiyun 	int pad = 0;
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	pad = skb->len%4;
483*4882a593Smuzhiyun 	if (pad) {
484*4882a593Smuzhiyun 		pad = 4 - pad;
485*4882a593Smuzhiyun 		if (skb_tailroom(skb) < pad) {
486*4882a593Smuzhiyun 			printk("skb_tailroom small!\n");
487*4882a593Smuzhiyun 			pad = 0;
488*4882a593Smuzhiyun 		}
489*4882a593Smuzhiyun 		if (pad)
490*4882a593Smuzhiyun 			__skb_put(skb, pad);
491*4882a593Smuzhiyun 	}
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	qhdr = (struct qmap_hdr *)skb_push(skb, sizeof(struct qmap_hdr));
494*4882a593Smuzhiyun 	qhdr->cd_rsvd_pad = pad;
495*4882a593Smuzhiyun 	qhdr->mux_id = mux_id;
496*4882a593Smuzhiyun 	qhdr->pkt_len = cpu_to_be16(skb->len - sizeof(struct qmap_hdr));
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 	return skb;
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun 
add_qhdr_v5(struct sk_buff * skb,u8 mux_id)501*4882a593Smuzhiyun static struct sk_buff * add_qhdr_v5(struct sk_buff *skb, u8 mux_id) {
502*4882a593Smuzhiyun 	struct rmnet_map_header *map_header;
503*4882a593Smuzhiyun 	struct rmnet_map_v5_csum_header *ul_header;
504*4882a593Smuzhiyun 	u32 padding, map_datalen;
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	map_datalen = skb->len;
507*4882a593Smuzhiyun 	padding = map_datalen%4;
508*4882a593Smuzhiyun 	if (padding) {
509*4882a593Smuzhiyun 		padding = 4 - padding;
510*4882a593Smuzhiyun 		if (skb_tailroom(skb) < padding) {
511*4882a593Smuzhiyun 			printk("skb_tailroom small!\n");
512*4882a593Smuzhiyun 			padding = 0;
513*4882a593Smuzhiyun 		}
514*4882a593Smuzhiyun 		if (padding)
515*4882a593Smuzhiyun 			__skb_put(skb, padding);
516*4882a593Smuzhiyun 	}
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	map_header = (struct rmnet_map_header *)skb_push(skb, (sizeof(struct rmnet_map_header) + sizeof(struct rmnet_map_v5_csum_header)));
519*4882a593Smuzhiyun 	map_header->cd_bit = 0;
520*4882a593Smuzhiyun 	map_header->next_hdr = 1;
521*4882a593Smuzhiyun 	map_header->pad_len = padding;
522*4882a593Smuzhiyun 	map_header->mux_id = mux_id;
523*4882a593Smuzhiyun 	map_header->pkt_len = htons(map_datalen + padding);
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 	ul_header = (struct rmnet_map_v5_csum_header *)(map_header + 1);
526*4882a593Smuzhiyun 	memset(ul_header, 0, sizeof(*ul_header));
527*4882a593Smuzhiyun 	ul_header->header_type = RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD;
528*4882a593Smuzhiyun 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
529*4882a593Smuzhiyun #if 0 //TODO
530*4882a593Smuzhiyun 		skb->ip_summed = CHECKSUM_NONE;
531*4882a593Smuzhiyun 		/* Ask for checksum offloading */
532*4882a593Smuzhiyun 		ul_header->csum_valid_required = 1;
533*4882a593Smuzhiyun #endif
534*4882a593Smuzhiyun 	}
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	return skb;
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun 
rmnet_usb_tx_wake_queue(unsigned long data)539*4882a593Smuzhiyun static void rmnet_usb_tx_wake_queue(unsigned long data) {
540*4882a593Smuzhiyun 	sGobiUSBNet *pQmapDev = (void *)data;
541*4882a593Smuzhiyun 	int i;
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	for (i = 0; i < pQmapDev->qmap_mode; i++) {
544*4882a593Smuzhiyun 		struct net_device *qmap_net = pQmapDev->mpQmapNetDev[i];
545*4882a593Smuzhiyun 		if (qmap_net) {
546*4882a593Smuzhiyun 			if (netif_queue_stopped(qmap_net) && !netif_queue_stopped(pQmapDev->mpNetDev->net)) {
547*4882a593Smuzhiyun 				netif_wake_queue(qmap_net);
548*4882a593Smuzhiyun 			}
549*4882a593Smuzhiyun 		}
550*4882a593Smuzhiyun 	}
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun 
rmnet_usb_tx_skb_destructor(struct sk_buff * skb)553*4882a593Smuzhiyun static void rmnet_usb_tx_skb_destructor(struct sk_buff *skb) {
554*4882a593Smuzhiyun 	sGobiUSBNet *pQmapDev = net_to_qmap(skb->dev);
555*4882a593Smuzhiyun 	int i;
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	for (i = 0; i < pQmapDev->qmap_mode; i++) {
558*4882a593Smuzhiyun 		struct net_device *qmap_net = pQmapDev->mpQmapNetDev[i];
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun 		if (qmap_net) {
561*4882a593Smuzhiyun 			if (netif_queue_stopped(qmap_net)) {
562*4882a593Smuzhiyun 				tasklet_schedule(&pQmapDev->txq);
563*4882a593Smuzhiyun 				break;
564*4882a593Smuzhiyun 			}
565*4882a593Smuzhiyun 		}
566*4882a593Smuzhiyun 	}
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun 
rmnet_vnd_update_rx_stats(struct net_device * net,unsigned rx_packets,unsigned rx_bytes)569*4882a593Smuzhiyun static void rmnet_vnd_update_rx_stats(struct net_device *net,
570*4882a593Smuzhiyun 			unsigned rx_packets, unsigned rx_bytes) {
571*4882a593Smuzhiyun 	net->stats.rx_packets += rx_packets;
572*4882a593Smuzhiyun 	net->stats.rx_bytes += rx_bytes;
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun 
rmnet_vnd_update_tx_stats(struct net_device * net,unsigned tx_packets,unsigned tx_bytes)575*4882a593Smuzhiyun static void rmnet_vnd_update_tx_stats(struct net_device *net,
576*4882a593Smuzhiyun 			unsigned tx_packets, unsigned tx_bytes) {
577*4882a593Smuzhiyun 	net->stats.tx_packets += tx_packets;
578*4882a593Smuzhiyun 	net->stats.tx_bytes += tx_bytes;
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun #if defined(QUECTEL_UL_DATA_AGG)
582*4882a593Smuzhiyun static long agg_time_limit __read_mostly = 1000000L; //reduce this time, can get better TPUT performance, but will increase USB interrupts
583*4882a593Smuzhiyun module_param(agg_time_limit, long, S_IRUGO | S_IWUSR);
584*4882a593Smuzhiyun MODULE_PARM_DESC(agg_time_limit, "Maximum time packets sit in the agg buf");
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun static long agg_bypass_time __read_mostly = 10000000L;
587*4882a593Smuzhiyun module_param(agg_bypass_time, long, S_IRUGO | S_IWUSR);
588*4882a593Smuzhiyun MODULE_PARM_DESC(agg_bypass_time, "Skip agg when apart spaced more than this");
589*4882a593Smuzhiyun 
rmnet_usb_tx_agg_skip(struct sk_buff * skb,int offset)590*4882a593Smuzhiyun static int rmnet_usb_tx_agg_skip(struct sk_buff *skb, int offset)
591*4882a593Smuzhiyun {
592*4882a593Smuzhiyun 	u8 *packet_start = skb->data + offset;
593*4882a593Smuzhiyun 	int ready2send = 0;
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	if (skb->protocol == htons(ETH_P_IP)) {
596*4882a593Smuzhiyun 		struct iphdr *ip4h = (struct iphdr *)(packet_start);
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 		if (ip4h->protocol == IPPROTO_TCP) {
599*4882a593Smuzhiyun 			const struct tcphdr *th = (const struct tcphdr *)(packet_start + sizeof(struct iphdr));
600*4882a593Smuzhiyun 			if (th->psh) {
601*4882a593Smuzhiyun 				ready2send = 1;
602*4882a593Smuzhiyun 			}
603*4882a593Smuzhiyun 		}
604*4882a593Smuzhiyun 		else if (ip4h->protocol == IPPROTO_ICMP)
605*4882a593Smuzhiyun 			ready2send = 1;
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
608*4882a593Smuzhiyun 		struct ipv6hdr *ip6h = (struct ipv6hdr *)(packet_start);
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 		if (ip6h->nexthdr == NEXTHDR_TCP) {
611*4882a593Smuzhiyun 			const struct tcphdr *th = (const struct tcphdr *)(packet_start + sizeof(struct ipv6hdr));
612*4882a593Smuzhiyun 			if (th->psh) {
613*4882a593Smuzhiyun 				ready2send = 1;
614*4882a593Smuzhiyun 			}
615*4882a593Smuzhiyun 		} else if (ip6h->nexthdr == NEXTHDR_ICMP) {
616*4882a593Smuzhiyun 			ready2send = 1;
617*4882a593Smuzhiyun 		} else if (ip6h->nexthdr == NEXTHDR_FRAGMENT) {
618*4882a593Smuzhiyun 			struct frag_hdr *frag;
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 			frag = (struct frag_hdr *)(packet_start
621*4882a593Smuzhiyun 						   + sizeof(struct ipv6hdr));
622*4882a593Smuzhiyun 			if (frag->nexthdr == IPPROTO_ICMPV6)
623*4882a593Smuzhiyun 				ready2send = 1;
624*4882a593Smuzhiyun 		}
625*4882a593Smuzhiyun 	}
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	return ready2send;
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun 
rmnet_usb_tx_agg_work(struct work_struct * work)630*4882a593Smuzhiyun static void rmnet_usb_tx_agg_work(struct work_struct *work)
631*4882a593Smuzhiyun {
632*4882a593Smuzhiyun 	struct qmap_priv *priv =
633*4882a593Smuzhiyun 			container_of(work, struct qmap_priv, agg_wq);
634*4882a593Smuzhiyun 	struct sk_buff *skb = NULL;
635*4882a593Smuzhiyun 	unsigned long flags;
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 	spin_lock_irqsave(&priv->agg_lock, flags);
638*4882a593Smuzhiyun 	if (likely(priv->agg_skb)) {
639*4882a593Smuzhiyun 		skb = priv->agg_skb;
640*4882a593Smuzhiyun 		priv->agg_skb = NULL;
641*4882a593Smuzhiyun 		priv->agg_count = 0;
642*4882a593Smuzhiyun 		skb->protocol = htons(ETH_P_MAP);
643*4882a593Smuzhiyun 		skb->dev = priv->real_dev;
644*4882a593Smuzhiyun 		ktime_get_ts64(&priv->agg_time);
645*4882a593Smuzhiyun 	}
646*4882a593Smuzhiyun 	spin_unlock_irqrestore(&priv->agg_lock, flags);
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun 	if (skb) {
649*4882a593Smuzhiyun 		int err = dev_queue_xmit(skb);
650*4882a593Smuzhiyun 		if (err != NET_XMIT_SUCCESS) {
651*4882a593Smuzhiyun 			priv->self_dev->stats.tx_errors++;
652*4882a593Smuzhiyun 		}
653*4882a593Smuzhiyun 	}
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun 
rmnet_usb_tx_agg_timer_cb(struct hrtimer * timer)656*4882a593Smuzhiyun static enum hrtimer_restart  rmnet_usb_tx_agg_timer_cb(struct hrtimer *timer)
657*4882a593Smuzhiyun {
658*4882a593Smuzhiyun 	struct qmap_priv *priv =
659*4882a593Smuzhiyun 			container_of(timer, struct qmap_priv, agg_hrtimer);
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun 	schedule_work(&priv->agg_wq);
662*4882a593Smuzhiyun 	return HRTIMER_NORESTART;
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun 
rmnet_usb_tx_agg(struct sk_buff * skb,struct qmap_priv * priv)665*4882a593Smuzhiyun static int rmnet_usb_tx_agg(struct sk_buff *skb, struct qmap_priv *priv) {
666*4882a593Smuzhiyun 	int ready2send = 0;
667*4882a593Smuzhiyun 	int xmit_more = 0;
668*4882a593Smuzhiyun 	struct timespec64 diff, now;
669*4882a593Smuzhiyun 	struct sk_buff *agg_skb = NULL;
670*4882a593Smuzhiyun 	unsigned long flags;
671*4882a593Smuzhiyun 	int err;
672*4882a593Smuzhiyun 	struct net_device *pNet = priv->self_dev;
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun #if LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0) //6b16f9ee89b8d5709f24bc3ac89ae8b5452c0d7c
675*4882a593Smuzhiyun #if LINUX_VERSION_CODE > KERNEL_VERSION(3,16,0)
676*4882a593Smuzhiyun 	xmit_more = skb->xmit_more;
677*4882a593Smuzhiyun #endif
678*4882a593Smuzhiyun #else
679*4882a593Smuzhiyun 	xmit_more = netdev_xmit_more();
680*4882a593Smuzhiyun #endif
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 	rmnet_vnd_update_tx_stats(pNet, 1, skb->len);
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	if (priv->ul_data_aggregation_max_datagrams == 1) {
685*4882a593Smuzhiyun 		skb->protocol = htons(ETH_P_MAP);
686*4882a593Smuzhiyun 		skb->dev = priv->real_dev;
687*4882a593Smuzhiyun 		if (!skb->destructor)
688*4882a593Smuzhiyun 			skb->destructor = rmnet_usb_tx_skb_destructor;
689*4882a593Smuzhiyun 		err = dev_queue_xmit(skb);
690*4882a593Smuzhiyun 		if (err != NET_XMIT_SUCCESS)
691*4882a593Smuzhiyun 			pNet->stats.tx_errors++;
692*4882a593Smuzhiyun 		return NET_XMIT_SUCCESS;
693*4882a593Smuzhiyun 	}
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun new_packet:
696*4882a593Smuzhiyun 	spin_lock_irqsave(&priv->agg_lock, flags);
697*4882a593Smuzhiyun 	agg_skb = NULL;
698*4882a593Smuzhiyun 	ready2send = 0;
699*4882a593Smuzhiyun 	ktime_get_ts64(&now);
700*4882a593Smuzhiyun 	diff = timespec64_sub(now, priv->agg_time);
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun 	if (priv->agg_skb) {
703*4882a593Smuzhiyun 		if ((priv->agg_skb->len + skb->len) < priv->ul_data_aggregation_max_size) {
704*4882a593Smuzhiyun 			memcpy(skb_put(priv->agg_skb, skb->len), skb->data, skb->len);
705*4882a593Smuzhiyun 			priv->agg_count++;
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 			if (diff.tv_sec > 0 || diff.tv_nsec > agg_time_limit) {
708*4882a593Smuzhiyun 				ready2send = 1;
709*4882a593Smuzhiyun 			}
710*4882a593Smuzhiyun 			else if (priv->agg_count == priv->ul_data_aggregation_max_datagrams) {
711*4882a593Smuzhiyun 				ready2send = 1;
712*4882a593Smuzhiyun 			}
713*4882a593Smuzhiyun 			else if (xmit_more == 0) {
714*4882a593Smuzhiyun 				struct rmnet_map_header *map_header = (struct rmnet_map_header *)skb->data;
715*4882a593Smuzhiyun 				size_t offset = sizeof(struct rmnet_map_header);
716*4882a593Smuzhiyun 				if (map_header->next_hdr)
717*4882a593Smuzhiyun 					offset += sizeof(struct rmnet_map_v5_csum_header);
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun 				ready2send = rmnet_usb_tx_agg_skip(skb, offset);
720*4882a593Smuzhiyun 			}
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 			dev_kfree_skb_any(skb);
723*4882a593Smuzhiyun 			skb = NULL;
724*4882a593Smuzhiyun 		}
725*4882a593Smuzhiyun 		else {
726*4882a593Smuzhiyun 			ready2send = 1;
727*4882a593Smuzhiyun 		}
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 		if (ready2send) {
730*4882a593Smuzhiyun 			agg_skb = priv->agg_skb;
731*4882a593Smuzhiyun 			priv->agg_skb = NULL;
732*4882a593Smuzhiyun 			priv->agg_count = 0;
733*4882a593Smuzhiyun 		}
734*4882a593Smuzhiyun 	}
735*4882a593Smuzhiyun 	else if (skb) {
736*4882a593Smuzhiyun 		if (diff.tv_sec > 0 || diff.tv_nsec > agg_bypass_time) {
737*4882a593Smuzhiyun 			ready2send = 1;
738*4882a593Smuzhiyun 		}
739*4882a593Smuzhiyun 		else if (xmit_more == 0) {
740*4882a593Smuzhiyun 			struct rmnet_map_header *map_header = (struct rmnet_map_header *)skb->data;
741*4882a593Smuzhiyun 			size_t offset = sizeof(struct rmnet_map_header);
742*4882a593Smuzhiyun 			if (map_header->next_hdr)
743*4882a593Smuzhiyun 				offset += sizeof(struct rmnet_map_v5_csum_header);
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 			ready2send = rmnet_usb_tx_agg_skip(skb, offset);
746*4882a593Smuzhiyun 		}
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 		if (ready2send == 0) {
749*4882a593Smuzhiyun 			priv->agg_skb = alloc_skb(priv->ul_data_aggregation_max_size, GFP_ATOMIC);
750*4882a593Smuzhiyun 			if (priv->agg_skb) {
751*4882a593Smuzhiyun 				memcpy(skb_put(priv->agg_skb, skb->len), skb->data, skb->len);
752*4882a593Smuzhiyun 				priv->agg_count++;
753*4882a593Smuzhiyun 				dev_kfree_skb_any(skb);
754*4882a593Smuzhiyun 				skb = NULL;
755*4882a593Smuzhiyun 			}
756*4882a593Smuzhiyun 			else {
757*4882a593Smuzhiyun 				ready2send = 1;
758*4882a593Smuzhiyun 			}
759*4882a593Smuzhiyun 		}
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 		if (ready2send) {
762*4882a593Smuzhiyun 			agg_skb = skb;
763*4882a593Smuzhiyun 			skb = NULL;
764*4882a593Smuzhiyun 		}
765*4882a593Smuzhiyun 	}
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	if (ready2send) {
768*4882a593Smuzhiyun 		priv->agg_time = now;
769*4882a593Smuzhiyun 	}
770*4882a593Smuzhiyun 	spin_unlock_irqrestore(&priv->agg_lock, flags);
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 	if (agg_skb) {
773*4882a593Smuzhiyun 		agg_skb->protocol = htons(ETH_P_MAP);
774*4882a593Smuzhiyun 		agg_skb->dev = priv->real_dev;
775*4882a593Smuzhiyun 		if (!agg_skb->destructor)
776*4882a593Smuzhiyun 			agg_skb->destructor = rmnet_usb_tx_skb_destructor;
777*4882a593Smuzhiyun 		err = dev_queue_xmit(agg_skb);
778*4882a593Smuzhiyun 		if (err != NET_XMIT_SUCCESS) {
779*4882a593Smuzhiyun 			pNet->stats.tx_errors++;
780*4882a593Smuzhiyun 		}
781*4882a593Smuzhiyun 	}
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	if (skb) {
784*4882a593Smuzhiyun 		goto new_packet;
785*4882a593Smuzhiyun 	}
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun 	if (priv->agg_skb) {
788*4882a593Smuzhiyun 		if (!hrtimer_is_queued(&priv->agg_hrtimer))
789*4882a593Smuzhiyun 			hrtimer_start(&priv->agg_hrtimer, ns_to_ktime(NSEC_PER_MSEC * 2), HRTIMER_MODE_REL);
790*4882a593Smuzhiyun 	}
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun 	return NET_XMIT_SUCCESS;
793*4882a593Smuzhiyun }
794*4882a593Smuzhiyun #endif
795*4882a593Smuzhiyun 
qmap_open(struct net_device * dev)796*4882a593Smuzhiyun static int qmap_open(struct net_device *dev)
797*4882a593Smuzhiyun {
798*4882a593Smuzhiyun 	struct qmap_priv *priv = netdev_priv(dev);
799*4882a593Smuzhiyun 	sGobiUSBNet * pGobiDev = net_to_qmap(priv->real_dev);
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 	if (!(priv->real_dev->flags & IFF_UP))
802*4882a593Smuzhiyun 		return -ENETDOWN;
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun 	if (!pGobiDev->mbQMIReady)
805*4882a593Smuzhiyun 		return -ENETDOWN;
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun #if defined(QUECTEL_UL_DATA_AGG)
808*4882a593Smuzhiyun 	if (priv->ul_data_aggregation_max_datagrams == 1 && pGobiDev->agg_ctx.ul_data_aggregation_max_datagrams > 1) {
809*4882a593Smuzhiyun 		priv->ul_data_aggregation_max_datagrams = pGobiDev->agg_ctx.ul_data_aggregation_max_datagrams;
810*4882a593Smuzhiyun 		priv->ul_data_aggregation_max_size = pGobiDev->agg_ctx.ul_data_aggregation_max_size;
811*4882a593Smuzhiyun 		priv->dl_minimum_padding = pGobiDev->agg_ctx.dl_minimum_padding;
812*4882a593Smuzhiyun 	}
813*4882a593Smuzhiyun #endif
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun 	if (netif_carrier_ok(priv->real_dev) && priv->link_state)
816*4882a593Smuzhiyun 		netif_carrier_on(dev);
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 	if (netif_carrier_ok(dev)) {
819*4882a593Smuzhiyun 		if (netif_queue_stopped(dev) && !netif_queue_stopped(priv->real_dev))
820*4882a593Smuzhiyun 			netif_wake_queue(dev);
821*4882a593Smuzhiyun 	}
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun 	return 0;
824*4882a593Smuzhiyun }
825*4882a593Smuzhiyun 
qmap_stop(struct net_device * pNet)826*4882a593Smuzhiyun static int qmap_stop(struct net_device *pNet)
827*4882a593Smuzhiyun {
828*4882a593Smuzhiyun 	netif_carrier_off(pNet);
829*4882a593Smuzhiyun 	return 0;
830*4882a593Smuzhiyun }
831*4882a593Smuzhiyun 
qmap_start_xmit(struct sk_buff * skb,struct net_device * pNet)832*4882a593Smuzhiyun static int qmap_start_xmit(struct sk_buff *skb, struct net_device *pNet)
833*4882a593Smuzhiyun {
834*4882a593Smuzhiyun 	int err;
835*4882a593Smuzhiyun 	struct qmap_priv *priv = netdev_priv(pNet);
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 	if (netif_queue_stopped(priv->real_dev)) {
838*4882a593Smuzhiyun 		//printk(KERN_DEBUG "s\n");
839*4882a593Smuzhiyun 		netif_stop_queue(pNet);
840*4882a593Smuzhiyun 		return NETDEV_TX_BUSY;
841*4882a593Smuzhiyun 	}
842*4882a593Smuzhiyun 
843*4882a593Smuzhiyun 	if (pNet->type == ARPHRD_ETHER) {
844*4882a593Smuzhiyun #ifdef QUECTEL_BRIDGE_MODE
845*4882a593Smuzhiyun 		if (priv->m_bridge_mode && bridge_mode_tx_fixup(pNet, skb, priv->m_bridge_ipv4, priv->mHostMAC) == NULL) {
846*4882a593Smuzhiyun 			dev_kfree_skb_any (skb);
847*4882a593Smuzhiyun 			return NETDEV_TX_OK;
848*4882a593Smuzhiyun 		}
849*4882a593Smuzhiyun #endif
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun 		if (ether_to_ip_fixup(pNet, skb) == NULL) {
852*4882a593Smuzhiyun 			dev_kfree_skb_any (skb);
853*4882a593Smuzhiyun 			return NETDEV_TX_OK;
854*4882a593Smuzhiyun 		}
855*4882a593Smuzhiyun 	}
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 	if (priv->qmap_version == 5) {
858*4882a593Smuzhiyun 		add_qhdr(skb, priv->mux_id);
859*4882a593Smuzhiyun 	}
860*4882a593Smuzhiyun 	else if (priv->qmap_version == 9) {
861*4882a593Smuzhiyun 		add_qhdr_v5(skb, priv->mux_id);
862*4882a593Smuzhiyun 	}
863*4882a593Smuzhiyun 	else {
864*4882a593Smuzhiyun 		dev_kfree_skb_any (skb);
865*4882a593Smuzhiyun 		return NETDEV_TX_OK;
866*4882a593Smuzhiyun 	}
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun #if defined(QUECTEL_UL_DATA_AGG)
869*4882a593Smuzhiyun 	err = rmnet_usb_tx_agg(skb, priv);
870*4882a593Smuzhiyun #else
871*4882a593Smuzhiyun 	skb->protocol = htons(ETH_P_MAP);
872*4882a593Smuzhiyun 	skb->dev = priv->real_dev;
873*4882a593Smuzhiyun 	if (!skb->destructor)
874*4882a593Smuzhiyun 		skb->destructor = rmnet_usb_tx_skb_destructor;
875*4882a593Smuzhiyun 	err = dev_queue_xmit(skb);
876*4882a593Smuzhiyun #if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,14 ))
877*4882a593Smuzhiyun 	if (err == NET_XMIT_SUCCESS) {
878*4882a593Smuzhiyun 		rmnet_vnd_update_tx_stats(pNet, 1, skb->len);
879*4882a593Smuzhiyun 	} else {
880*4882a593Smuzhiyun 		pNet->stats.tx_errors++;
881*4882a593Smuzhiyun 	}
882*4882a593Smuzhiyun #endif
883*4882a593Smuzhiyun #endif
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 	return err;
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 ))
889*4882a593Smuzhiyun #else
890*4882a593Smuzhiyun static const struct net_device_ops qmap_netdev_ops = {
891*4882a593Smuzhiyun 	.ndo_open       = qmap_open,
892*4882a593Smuzhiyun 	.ndo_stop       = qmap_stop,
893*4882a593Smuzhiyun 	.ndo_start_xmit = qmap_start_xmit,
894*4882a593Smuzhiyun };
895*4882a593Smuzhiyun #endif
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun #ifdef QUECTEL_BRIDGE_MODE
is_qmap_netdev(const struct net_device * netdev)898*4882a593Smuzhiyun static int is_qmap_netdev(const struct net_device *netdev) {
899*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 ))
900*4882a593Smuzhiyun     return netdev->open == qmap_open;
901*4882a593Smuzhiyun #else
902*4882a593Smuzhiyun     return netdev->netdev_ops == &qmap_netdev_ops;
903*4882a593Smuzhiyun #endif
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun #endif
906*4882a593Smuzhiyun 
qmap_register_device(sGobiUSBNet * pDev,u8 offset_id)907*4882a593Smuzhiyun static int qmap_register_device(sGobiUSBNet * pDev, u8 offset_id)
908*4882a593Smuzhiyun {
909*4882a593Smuzhiyun 	struct net_device *real_dev = pDev->mpNetDev->net;
910*4882a593Smuzhiyun 	struct net_device *qmap_net;
911*4882a593Smuzhiyun 	struct qmap_priv *priv;
912*4882a593Smuzhiyun 	int err;
913*4882a593Smuzhiyun 
914*4882a593Smuzhiyun 	qmap_net = alloc_etherdev(sizeof(*priv));
915*4882a593Smuzhiyun 	if (!qmap_net)
916*4882a593Smuzhiyun 		return -ENOBUFS;
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun 	SET_NETDEV_DEV(qmap_net, &real_dev->dev);
919*4882a593Smuzhiyun 	priv = netdev_priv(qmap_net);
920*4882a593Smuzhiyun 	priv->offset_id = offset_id;
921*4882a593Smuzhiyun 	priv->mux_id = QUECTEL_QMAP_MUX_ID + offset_id;
922*4882a593Smuzhiyun 	priv->qmap_version = pDev->qmap_version;
923*4882a593Smuzhiyun 	priv->real_dev = real_dev;
924*4882a593Smuzhiyun 	priv->self_dev = qmap_net;
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun #if defined(QUECTEL_UL_DATA_AGG)
927*4882a593Smuzhiyun 	priv->ul_data_aggregation_max_datagrams = 1;
928*4882a593Smuzhiyun 	priv->ul_data_aggregation_max_size = 2048;
929*4882a593Smuzhiyun 	priv->dl_minimum_padding = 0;
930*4882a593Smuzhiyun 	priv->agg_skb = NULL;
931*4882a593Smuzhiyun 	priv->agg_count = 0;
932*4882a593Smuzhiyun 	hrtimer_init(&priv->agg_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
933*4882a593Smuzhiyun 	priv->agg_hrtimer.function = rmnet_usb_tx_agg_timer_cb;
934*4882a593Smuzhiyun 	INIT_WORK(&priv->agg_wq, rmnet_usb_tx_agg_work);
935*4882a593Smuzhiyun 	ktime_get_ts64(&priv->agg_time);
936*4882a593Smuzhiyun 	spin_lock_init(&priv->agg_lock);
937*4882a593Smuzhiyun #endif
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun     sprintf(qmap_net->name, "%s.%d", real_dev->name, offset_id + 1);
940*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 ))
941*4882a593Smuzhiyun     qmap_net->open = qmap_open;
942*4882a593Smuzhiyun     qmap_net->stop = qmap_stop;
943*4882a593Smuzhiyun     qmap_net->hard_start_xmit = qmap_start_xmit;
944*4882a593Smuzhiyun #else
945*4882a593Smuzhiyun     qmap_net->netdev_ops = &qmap_netdev_ops;
946*4882a593Smuzhiyun #endif
947*4882a593Smuzhiyun     memcpy (qmap_net->dev_addr, real_dev->dev_addr, ETH_ALEN);
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun #ifdef QUECTEL_BRIDGE_MODE
950*4882a593Smuzhiyun 	priv->m_bridge_mode = !!(pDev->m_bridge_mode & BIT(offset_id));
951*4882a593Smuzhiyun 	qmap_net->sysfs_groups[0] = &qmi_qmap_sysfs_attr_group;
952*4882a593Smuzhiyun #endif
953*4882a593Smuzhiyun 
954*4882a593Smuzhiyun     err = register_netdev(qmap_net);
955*4882a593Smuzhiyun     if (err < 0) {
956*4882a593Smuzhiyun         INFO("register_netdev(%s), err=%d\n", qmap_net->name, err);
957*4882a593Smuzhiyun         goto out_free_newdev;
958*4882a593Smuzhiyun     }
959*4882a593Smuzhiyun     netif_device_attach (qmap_net);
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun     pDev->mpQmapNetDev[offset_id] = qmap_net;
962*4882a593Smuzhiyun     qmap_net->flags |= IFF_NOARP;
963*4882a593Smuzhiyun     qmap_net->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun     INFO("%s\n", qmap_net->name);
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun     return 0;
968*4882a593Smuzhiyun 
969*4882a593Smuzhiyun out_free_newdev:
970*4882a593Smuzhiyun     free_netdev(qmap_net);
971*4882a593Smuzhiyun     return err;
972*4882a593Smuzhiyun }
973*4882a593Smuzhiyun 
qmap_unregister_device(sGobiUSBNet * pDev,u8 offset_id)974*4882a593Smuzhiyun static void qmap_unregister_device(sGobiUSBNet * pDev, u8 offset_id) {
975*4882a593Smuzhiyun 	struct net_device *qmap_net;
976*4882a593Smuzhiyun #if defined(QUECTEL_UL_DATA_AGG)
977*4882a593Smuzhiyun 	struct qmap_priv *priv;
978*4882a593Smuzhiyun 	unsigned long flags;
979*4882a593Smuzhiyun #endif
980*4882a593Smuzhiyun 
981*4882a593Smuzhiyun 	qmap_net = pDev->mpQmapNetDev[offset_id];
982*4882a593Smuzhiyun 	if (qmap_net == NULL)
983*4882a593Smuzhiyun 		return;
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun 	netif_carrier_off(qmap_net);
986*4882a593Smuzhiyun 	netif_stop_queue(qmap_net);
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun #if defined(QUECTEL_UL_DATA_AGG)
989*4882a593Smuzhiyun 	priv = netdev_priv(qmap_net);
990*4882a593Smuzhiyun 	hrtimer_cancel(&priv->agg_hrtimer);
991*4882a593Smuzhiyun 	cancel_work_sync(&priv->agg_wq);
992*4882a593Smuzhiyun 	spin_lock_irqsave(&priv->agg_lock, flags);
993*4882a593Smuzhiyun 	if (priv->agg_skb) {
994*4882a593Smuzhiyun 		kfree_skb(priv->agg_skb);
995*4882a593Smuzhiyun 	}
996*4882a593Smuzhiyun 	spin_unlock_irqrestore(&priv->agg_lock, flags);
997*4882a593Smuzhiyun #endif
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 	unregister_netdev(qmap_net);
1000*4882a593Smuzhiyun 	free_netdev(qmap_net);
1001*4882a593Smuzhiyun }
1002*4882a593Smuzhiyun 
qmap_mode_show(struct device * dev,struct device_attribute * attr,char * buf)1003*4882a593Smuzhiyun static ssize_t qmap_mode_show(struct device *dev, struct device_attribute *attr, char *buf) {
1004*4882a593Smuzhiyun     struct net_device *pNet = to_net_dev(dev);
1005*4882a593Smuzhiyun     struct usbnet * pDev = netdev_priv( pNet );
1006*4882a593Smuzhiyun     sGobiUSBNet * pGobiDev = (sGobiUSBNet *)pDev->data[0];
1007*4882a593Smuzhiyun 
1008*4882a593Smuzhiyun     return snprintf(buf, PAGE_SIZE, "%d\n", pGobiDev->qmap_mode);
1009*4882a593Smuzhiyun }
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun static DEVICE_ATTR(qmap_mode, S_IRUGO, qmap_mode_show, NULL);
1012*4882a593Smuzhiyun 
qmap_size_show(struct device * dev,struct device_attribute * attr,char * buf)1013*4882a593Smuzhiyun static ssize_t qmap_size_show(struct device *dev, struct device_attribute *attr, char *buf) {
1014*4882a593Smuzhiyun     struct net_device *pNet = to_net_dev(dev);
1015*4882a593Smuzhiyun     struct usbnet * pDev = netdev_priv( pNet );
1016*4882a593Smuzhiyun     sGobiUSBNet * pGobiDev = (sGobiUSBNet *)pDev->data[0];
1017*4882a593Smuzhiyun 
1018*4882a593Smuzhiyun     return snprintf(buf, PAGE_SIZE, "%d\n", pGobiDev->qmap_size);
1019*4882a593Smuzhiyun }
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun static DEVICE_ATTR(qmap_size, S_IRUGO, qmap_size_show, NULL);
1022*4882a593Smuzhiyun 
link_state_show(struct device * dev,struct device_attribute * attr,char * buf)1023*4882a593Smuzhiyun static ssize_t link_state_show(struct device *dev, struct device_attribute *attr, char *buf) {
1024*4882a593Smuzhiyun 	sGobiUSBNet *pQmapDev = net_to_qmap(to_net_dev(dev));
1025*4882a593Smuzhiyun 
1026*4882a593Smuzhiyun 	return snprintf(buf, PAGE_SIZE, "0x%x\n",  pQmapDev->link_state);
1027*4882a593Smuzhiyun }
1028*4882a593Smuzhiyun 
link_state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1029*4882a593Smuzhiyun static ssize_t link_state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) {
1030*4882a593Smuzhiyun 	struct net_device *netdev = to_net_dev(dev);
1031*4882a593Smuzhiyun 	sGobiUSBNet *pQmapDev = net_to_qmap(to_net_dev(dev));
1032*4882a593Smuzhiyun 	unsigned qmap_mode = pQmapDev->qmap_mode;
1033*4882a593Smuzhiyun 	unsigned link_state = 0;
1034*4882a593Smuzhiyun 	unsigned old_link = pQmapDev->link_state;
1035*4882a593Smuzhiyun 	uint offset_id = 0;
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun 	link_state = simple_strtoul(buf, NULL, 0);
1038*4882a593Smuzhiyun 	if (qmap_mode == 1) {
1039*4882a593Smuzhiyun 		pQmapDev->link_state = !!link_state;
1040*4882a593Smuzhiyun 	}
1041*4882a593Smuzhiyun 	else if (qmap_mode > 1) {
1042*4882a593Smuzhiyun 		offset_id = ((link_state&0x7F) - 1);
1043*4882a593Smuzhiyun 
1044*4882a593Smuzhiyun 		if (offset_id >= qmap_mode) {
1045*4882a593Smuzhiyun 			dev_info(dev, "%s offset_id is %d. but qmap_mode is %d\n", __func__, offset_id, pQmapDev->qmap_mode);
1046*4882a593Smuzhiyun 			return count;
1047*4882a593Smuzhiyun 		}
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun 		if (link_state&0x80)
1050*4882a593Smuzhiyun 			pQmapDev->link_state &= ~(1 << offset_id);
1051*4882a593Smuzhiyun 		else
1052*4882a593Smuzhiyun 			pQmapDev->link_state |= (1 << offset_id);
1053*4882a593Smuzhiyun 	}
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun 	if (old_link != pQmapDev->link_state) {
1056*4882a593Smuzhiyun 		struct net_device *qmap_net = pQmapDev->mpQmapNetDev[offset_id];
1057*4882a593Smuzhiyun 
1058*4882a593Smuzhiyun 		if (pQmapDev->link_state) {
1059*4882a593Smuzhiyun 			netif_carrier_on(netdev);
1060*4882a593Smuzhiyun 		} else {
1061*4882a593Smuzhiyun 			netif_carrier_off(netdev);
1062*4882a593Smuzhiyun 		}
1063*4882a593Smuzhiyun 
1064*4882a593Smuzhiyun 		if (qmap_net && qmap_net != netdev) {
1065*4882a593Smuzhiyun 			struct qmap_priv *priv = netdev_priv(qmap_net);
1066*4882a593Smuzhiyun 
1067*4882a593Smuzhiyun 			priv->link_state = !!(pQmapDev->link_state & (1 << offset_id));
1068*4882a593Smuzhiyun 			if (priv->link_state) {
1069*4882a593Smuzhiyun 				netif_carrier_on(qmap_net);
1070*4882a593Smuzhiyun 				if (netif_queue_stopped(qmap_net) && !netif_queue_stopped(priv->real_dev))
1071*4882a593Smuzhiyun 					netif_wake_queue(qmap_net);
1072*4882a593Smuzhiyun 			}
1073*4882a593Smuzhiyun 			else
1074*4882a593Smuzhiyun 				netif_carrier_off(qmap_net);
1075*4882a593Smuzhiyun 		}
1076*4882a593Smuzhiyun 	}
1077*4882a593Smuzhiyun 
1078*4882a593Smuzhiyun 	if (old_link != pQmapDev->link_state)
1079*4882a593Smuzhiyun 		dev_info(dev, "link_state 0x%x -> 0x%x\n", old_link, pQmapDev->link_state);
1080*4882a593Smuzhiyun 
1081*4882a593Smuzhiyun 	return count;
1082*4882a593Smuzhiyun }
1083*4882a593Smuzhiyun 
1084*4882a593Smuzhiyun static DEVICE_ATTR(link_state, S_IWUSR | S_IRUGO, link_state_show, link_state_store);
1085*4882a593Smuzhiyun #endif
1086*4882a593Smuzhiyun 
1087*4882a593Smuzhiyun static struct attribute *gobinet_sysfs_attrs[] = {
1088*4882a593Smuzhiyun #ifdef QUECTEL_BRIDGE_MODE
1089*4882a593Smuzhiyun 	&dev_attr_bridge_mode.attr,
1090*4882a593Smuzhiyun 	&dev_attr_bridge_ipv4.attr,
1091*4882a593Smuzhiyun #endif
1092*4882a593Smuzhiyun #ifdef QUECTEL_WWAN_QMAP
1093*4882a593Smuzhiyun 	&dev_attr_qmap_mode.attr,
1094*4882a593Smuzhiyun 	&dev_attr_qmap_size.attr,
1095*4882a593Smuzhiyun 	&dev_attr_link_state.attr,
1096*4882a593Smuzhiyun #endif
1097*4882a593Smuzhiyun 	NULL,
1098*4882a593Smuzhiyun };
1099*4882a593Smuzhiyun 
1100*4882a593Smuzhiyun static struct attribute_group gobinet_sysfs_attr_group = {
1101*4882a593Smuzhiyun 	.attrs = gobinet_sysfs_attrs,
1102*4882a593Smuzhiyun };
1103*4882a593Smuzhiyun 
1104*4882a593Smuzhiyun #if defined(QUECTEL_WWAN_QMAP)
1105*4882a593Smuzhiyun typedef struct {
1106*4882a593Smuzhiyun     unsigned int size;
1107*4882a593Smuzhiyun     unsigned int rx_urb_size;
1108*4882a593Smuzhiyun     unsigned int ep_type;
1109*4882a593Smuzhiyun     unsigned int iface_id;
1110*4882a593Smuzhiyun     unsigned int qmap_mode;
1111*4882a593Smuzhiyun     unsigned int qmap_version;
1112*4882a593Smuzhiyun     unsigned int dl_minimum_padding;
1113*4882a593Smuzhiyun     char ifname[8][16];
1114*4882a593Smuzhiyun     unsigned char mux_id[8];
1115*4882a593Smuzhiyun } RMNET_INFO;
1116*4882a593Smuzhiyun 
rmnet_info_set(struct sGobiUSBNet * pQmapDev,RMNET_INFO * rmnet_info)1117*4882a593Smuzhiyun static void rmnet_info_set(struct sGobiUSBNet *pQmapDev, RMNET_INFO *rmnet_info)
1118*4882a593Smuzhiyun {
1119*4882a593Smuzhiyun 	int i;
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun 	memset(rmnet_info, 0, sizeof(*rmnet_info));
1122*4882a593Smuzhiyun 	rmnet_info->size = sizeof(RMNET_INFO);
1123*4882a593Smuzhiyun 	rmnet_info->rx_urb_size = pQmapDev->qmap_size;
1124*4882a593Smuzhiyun 	rmnet_info->ep_type = 2; //DATA_EP_TYPE_HSUSB
1125*4882a593Smuzhiyun 	rmnet_info->iface_id = 4;
1126*4882a593Smuzhiyun 	rmnet_info->qmap_mode = pQmapDev->qmap_mode;
1127*4882a593Smuzhiyun 	rmnet_info->qmap_version = pQmapDev->qmap_version;
1128*4882a593Smuzhiyun 	rmnet_info->dl_minimum_padding = 0;
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun 	for (i = 0; i < pQmapDev->qmap_mode; i++) {
1131*4882a593Smuzhiyun 		struct net_device *qmap_net = pQmapDev->mpQmapNetDev[i];
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun 		if (!qmap_net)
1134*4882a593Smuzhiyun 			break;
1135*4882a593Smuzhiyun 
1136*4882a593Smuzhiyun 		strcpy(rmnet_info->ifname[i], qmap_net->name);
1137*4882a593Smuzhiyun 		rmnet_info->mux_id[i] = QUECTEL_QMAP_MUX_ID;
1138*4882a593Smuzhiyun 		if (pQmapDev->qmap_mode > 1) {
1139*4882a593Smuzhiyun 			struct qmap_priv *priv = netdev_priv(qmap_net);
1140*4882a593Smuzhiyun 
1141*4882a593Smuzhiyun 			rmnet_info->mux_id[i] = priv->mux_id;
1142*4882a593Smuzhiyun 		}
1143*4882a593Smuzhiyun 	}
1144*4882a593Smuzhiyun }
1145*4882a593Smuzhiyun 
qmap_ndo_do_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)1146*4882a593Smuzhiyun static int qmap_ndo_do_ioctl(struct net_device *dev,struct ifreq *ifr, int cmd) {
1147*4882a593Smuzhiyun 	int rc = -EOPNOTSUPP;
1148*4882a593Smuzhiyun 	uint link_state = 0;
1149*4882a593Smuzhiyun 	sGobiUSBNet *pQmapDev = net_to_qmap(dev);
1150*4882a593Smuzhiyun 
1151*4882a593Smuzhiyun 	atomic_inc(&pQmapDev->refcount);
1152*4882a593Smuzhiyun 	if (!pQmapDev->mbQMIReady) {
1153*4882a593Smuzhiyun 		if (wait_for_completion_interruptible_timeout(&pQmapDev->mQMIReadyCompletion, 15*HZ) <= 0) {
1154*4882a593Smuzhiyun 			if (atomic_dec_and_test(&pQmapDev->refcount)) {
1155*4882a593Smuzhiyun 				kfree( pQmapDev );
1156*4882a593Smuzhiyun 			}
1157*4882a593Smuzhiyun 			return -ETIMEDOUT;
1158*4882a593Smuzhiyun 		}
1159*4882a593Smuzhiyun 	}
1160*4882a593Smuzhiyun 	atomic_dec(&pQmapDev->refcount);
1161*4882a593Smuzhiyun 
1162*4882a593Smuzhiyun 	switch (cmd) {
1163*4882a593Smuzhiyun 	case 0x89F1: //SIOCDEVPRIVATE
1164*4882a593Smuzhiyun 		rc = copy_from_user(&link_state, ifr->ifr_ifru.ifru_data, sizeof(link_state));
1165*4882a593Smuzhiyun 		if (!rc) {
1166*4882a593Smuzhiyun 			char buf[32];
1167*4882a593Smuzhiyun 			snprintf(buf, sizeof(buf), "%u", link_state);
1168*4882a593Smuzhiyun 			link_state_store(&dev->dev, NULL, buf, strlen(buf));
1169*4882a593Smuzhiyun 		}
1170*4882a593Smuzhiyun 	break;
1171*4882a593Smuzhiyun 
1172*4882a593Smuzhiyun 	case 0x89F2: //SIOCDEVPRIVATE
1173*4882a593Smuzhiyun 		rc = 0;
1174*4882a593Smuzhiyun 	break;
1175*4882a593Smuzhiyun 
1176*4882a593Smuzhiyun 	case 0x89F3: //SIOCDEVPRIVATE
1177*4882a593Smuzhiyun 		if (pQmapDev->qmap_mode) {
1178*4882a593Smuzhiyun 			RMNET_INFO rmnet_info;
1179*4882a593Smuzhiyun 
1180*4882a593Smuzhiyun 			rmnet_info_set(pQmapDev, &rmnet_info);
1181*4882a593Smuzhiyun 			rc = copy_to_user(ifr->ifr_ifru.ifru_data, &rmnet_info, sizeof(rmnet_info));
1182*4882a593Smuzhiyun 		}
1183*4882a593Smuzhiyun 	break;
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun 	default:
1186*4882a593Smuzhiyun 	break;
1187*4882a593Smuzhiyun 	}
1188*4882a593Smuzhiyun 
1189*4882a593Smuzhiyun 	return rc;
1190*4882a593Smuzhiyun }
1191*4882a593Smuzhiyun #endif
1192*4882a593Smuzhiyun 
1193*4882a593Smuzhiyun #ifdef CONFIG_PM
1194*4882a593Smuzhiyun /*===========================================================================
1195*4882a593Smuzhiyun METHOD:
1196*4882a593Smuzhiyun    GobiNetSuspend (Public Method)
1197*4882a593Smuzhiyun 
1198*4882a593Smuzhiyun DESCRIPTION:
1199*4882a593Smuzhiyun    Stops QMI traffic while device is suspended
1200*4882a593Smuzhiyun 
1201*4882a593Smuzhiyun PARAMETERS
1202*4882a593Smuzhiyun    pIntf          [ I ] - Pointer to interface
1203*4882a593Smuzhiyun    powerEvent     [ I ] - Power management event
1204*4882a593Smuzhiyun 
1205*4882a593Smuzhiyun RETURN VALUE:
1206*4882a593Smuzhiyun    int - 0 for success
1207*4882a593Smuzhiyun          negative errno for failure
1208*4882a593Smuzhiyun ===========================================================================*/
GobiNetSuspend(struct usb_interface * pIntf,pm_message_t powerEvent)1209*4882a593Smuzhiyun static int GobiNetSuspend(
1210*4882a593Smuzhiyun    struct usb_interface *     pIntf,
1211*4882a593Smuzhiyun    pm_message_t               powerEvent )
1212*4882a593Smuzhiyun {
1213*4882a593Smuzhiyun    struct usbnet * pDev;
1214*4882a593Smuzhiyun    sGobiUSBNet * pGobiDev;
1215*4882a593Smuzhiyun 
1216*4882a593Smuzhiyun    if (pIntf == 0)
1217*4882a593Smuzhiyun    {
1218*4882a593Smuzhiyun       return -ENOMEM;
1219*4882a593Smuzhiyun    }
1220*4882a593Smuzhiyun 
1221*4882a593Smuzhiyun #if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,23 ))
1222*4882a593Smuzhiyun    pDev = usb_get_intfdata( pIntf );
1223*4882a593Smuzhiyun #else
1224*4882a593Smuzhiyun    pDev = (struct usbnet *)pIntf->dev.platform_data;
1225*4882a593Smuzhiyun #endif
1226*4882a593Smuzhiyun 
1227*4882a593Smuzhiyun    if (pDev == NULL || pDev->net == NULL)
1228*4882a593Smuzhiyun    {
1229*4882a593Smuzhiyun       DBG( "failed to get netdevice\n" );
1230*4882a593Smuzhiyun       return -ENXIO;
1231*4882a593Smuzhiyun    }
1232*4882a593Smuzhiyun 
1233*4882a593Smuzhiyun    pGobiDev = (sGobiUSBNet *)pDev->data[0];
1234*4882a593Smuzhiyun    if (pGobiDev == NULL)
1235*4882a593Smuzhiyun    {
1236*4882a593Smuzhiyun       DBG( "failed to get QMIDevice\n" );
1237*4882a593Smuzhiyun       return -ENXIO;
1238*4882a593Smuzhiyun    }
1239*4882a593Smuzhiyun 
1240*4882a593Smuzhiyun    if (pGobiDev->mbQMISyncIng)
1241*4882a593Smuzhiyun    {
1242*4882a593Smuzhiyun       DBG( "QMI sync ing\n" );
1243*4882a593Smuzhiyun       return -EBUSY;
1244*4882a593Smuzhiyun    }
1245*4882a593Smuzhiyun 
1246*4882a593Smuzhiyun    // Is this autosuspend or system suspend?
1247*4882a593Smuzhiyun    //    do we allow remote wakeup?
1248*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,33 ))
1249*4882a593Smuzhiyun #if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,18 ))
1250*4882a593Smuzhiyun    if (pDev->udev->auto_pm == 0)
1251*4882a593Smuzhiyun #else
1252*4882a593Smuzhiyun    if (1)
1253*4882a593Smuzhiyun #endif
1254*4882a593Smuzhiyun #else
1255*4882a593Smuzhiyun    if ((powerEvent.event & PM_EVENT_AUTO) == 0)
1256*4882a593Smuzhiyun #endif
1257*4882a593Smuzhiyun    {
1258*4882a593Smuzhiyun       DBG( "device suspended to power level %d\n",
1259*4882a593Smuzhiyun            powerEvent.event );
1260*4882a593Smuzhiyun       GobiSetDownReason( pGobiDev, DRIVER_SUSPENDED );
1261*4882a593Smuzhiyun    }
1262*4882a593Smuzhiyun    else
1263*4882a593Smuzhiyun    {
1264*4882a593Smuzhiyun       DBG( "device autosuspend\n" );
1265*4882a593Smuzhiyun    }
1266*4882a593Smuzhiyun 
1267*4882a593Smuzhiyun    if (powerEvent.event & PM_EVENT_SUSPEND)
1268*4882a593Smuzhiyun    {
1269*4882a593Smuzhiyun       // Stop QMI read callbacks
1270*4882a593Smuzhiyun    if (pGobiDev->m_qcrmcall_mode) {
1271*4882a593Smuzhiyun    } else {
1272*4882a593Smuzhiyun       KillRead( pGobiDev );
1273*4882a593Smuzhiyun    }
1274*4882a593Smuzhiyun #if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,22 ))
1275*4882a593Smuzhiyun       pDev->udev->reset_resume = 0;
1276*4882a593Smuzhiyun #endif
1277*4882a593Smuzhiyun 
1278*4882a593Smuzhiyun       // Store power state to avoid duplicate resumes
1279*4882a593Smuzhiyun       pIntf->dev.power.power_state.event = powerEvent.event;
1280*4882a593Smuzhiyun    }
1281*4882a593Smuzhiyun    else
1282*4882a593Smuzhiyun    {
1283*4882a593Smuzhiyun       // Other power modes cause QMI connection to be lost
1284*4882a593Smuzhiyun #if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,22 ))
1285*4882a593Smuzhiyun       pDev->udev->reset_resume = 1;
1286*4882a593Smuzhiyun #endif
1287*4882a593Smuzhiyun    }
1288*4882a593Smuzhiyun 
1289*4882a593Smuzhiyun    // Run usbnet's suspend function
1290*4882a593Smuzhiyun    return usbnet_suspend( pIntf, powerEvent );
1291*4882a593Smuzhiyun }
QuecGobiNetSuspend(struct usb_interface * pIntf,pm_message_t powerEvent)1292*4882a593Smuzhiyun int QuecGobiNetSuspend(struct usb_interface *pIntf, pm_message_t powerEvent ) {
1293*4882a593Smuzhiyun 	return GobiNetSuspend(pIntf, powerEvent);
1294*4882a593Smuzhiyun }
1295*4882a593Smuzhiyun 
1296*4882a593Smuzhiyun /*===========================================================================
1297*4882a593Smuzhiyun METHOD:
1298*4882a593Smuzhiyun    GobiNetResume (Public Method)
1299*4882a593Smuzhiyun 
1300*4882a593Smuzhiyun DESCRIPTION:
1301*4882a593Smuzhiyun    Resume QMI traffic or recreate QMI device
1302*4882a593Smuzhiyun 
1303*4882a593Smuzhiyun PARAMETERS
1304*4882a593Smuzhiyun    pIntf          [ I ] - Pointer to interface
1305*4882a593Smuzhiyun 
1306*4882a593Smuzhiyun RETURN VALUE:
1307*4882a593Smuzhiyun    int - 0 for success
1308*4882a593Smuzhiyun          negative errno for failure
1309*4882a593Smuzhiyun ===========================================================================*/
GobiNetResume(struct usb_interface * pIntf)1310*4882a593Smuzhiyun static int GobiNetResume( struct usb_interface * pIntf )
1311*4882a593Smuzhiyun {
1312*4882a593Smuzhiyun    struct usbnet * pDev;
1313*4882a593Smuzhiyun    sGobiUSBNet * pGobiDev;
1314*4882a593Smuzhiyun    int nRet;
1315*4882a593Smuzhiyun    int oldPowerState;
1316*4882a593Smuzhiyun 
1317*4882a593Smuzhiyun    if (pIntf == 0)
1318*4882a593Smuzhiyun    {
1319*4882a593Smuzhiyun       return -ENOMEM;
1320*4882a593Smuzhiyun    }
1321*4882a593Smuzhiyun 
1322*4882a593Smuzhiyun #if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,23 ))
1323*4882a593Smuzhiyun    pDev = usb_get_intfdata( pIntf );
1324*4882a593Smuzhiyun #else
1325*4882a593Smuzhiyun    pDev = (struct usbnet *)pIntf->dev.platform_data;
1326*4882a593Smuzhiyun #endif
1327*4882a593Smuzhiyun 
1328*4882a593Smuzhiyun    if (pDev == NULL || pDev->net == NULL)
1329*4882a593Smuzhiyun    {
1330*4882a593Smuzhiyun       DBG( "failed to get netdevice\n" );
1331*4882a593Smuzhiyun       return -ENXIO;
1332*4882a593Smuzhiyun    }
1333*4882a593Smuzhiyun 
1334*4882a593Smuzhiyun    pGobiDev = (sGobiUSBNet *)pDev->data[0];
1335*4882a593Smuzhiyun    if (pGobiDev == NULL)
1336*4882a593Smuzhiyun    {
1337*4882a593Smuzhiyun       DBG( "failed to get QMIDevice\n" );
1338*4882a593Smuzhiyun       return -ENXIO;
1339*4882a593Smuzhiyun    }
1340*4882a593Smuzhiyun 
1341*4882a593Smuzhiyun    oldPowerState = pIntf->dev.power.power_state.event;
1342*4882a593Smuzhiyun    pIntf->dev.power.power_state.event = PM_EVENT_ON;
1343*4882a593Smuzhiyun    DBG( "resuming from power mode %d\n", oldPowerState );
1344*4882a593Smuzhiyun 
1345*4882a593Smuzhiyun    if (oldPowerState & PM_EVENT_SUSPEND)
1346*4882a593Smuzhiyun    {
1347*4882a593Smuzhiyun       // It doesn't matter if this is autoresume or system resume
1348*4882a593Smuzhiyun       GobiClearDownReason( pGobiDev, DRIVER_SUSPENDED );
1349*4882a593Smuzhiyun 
1350*4882a593Smuzhiyun       nRet = usbnet_resume( pIntf );
1351*4882a593Smuzhiyun       if (nRet != 0)
1352*4882a593Smuzhiyun       {
1353*4882a593Smuzhiyun          DBG( "usbnet_resume error %d\n", nRet );
1354*4882a593Smuzhiyun          return nRet;
1355*4882a593Smuzhiyun       }
1356*4882a593Smuzhiyun 
1357*4882a593Smuzhiyun       // Restart QMI read callbacks
1358*4882a593Smuzhiyun       if (pGobiDev->m_qcrmcall_mode) {
1359*4882a593Smuzhiyun          nRet = 0;
1360*4882a593Smuzhiyun       } else {
1361*4882a593Smuzhiyun          nRet = StartRead( pGobiDev );
1362*4882a593Smuzhiyun       }
1363*4882a593Smuzhiyun       if (nRet != 0)
1364*4882a593Smuzhiyun       {
1365*4882a593Smuzhiyun          DBG( "StartRead error %d\n", nRet );
1366*4882a593Smuzhiyun          return nRet;
1367*4882a593Smuzhiyun       }
1368*4882a593Smuzhiyun 
1369*4882a593Smuzhiyun #ifdef CONFIG_PM
1370*4882a593Smuzhiyun    #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 ))
1371*4882a593Smuzhiyun       // Kick Auto PM thread to process any queued URBs
1372*4882a593Smuzhiyun       complete( &pGobiDev->mAutoPM.mThreadDoWork );
1373*4882a593Smuzhiyun     #endif
1374*4882a593Smuzhiyun #endif /* CONFIG_PM */
1375*4882a593Smuzhiyun 
1376*4882a593Smuzhiyun #if defined(QUECTEL_WWAN_QMAP)
1377*4882a593Smuzhiyun       if ((!netif_queue_stopped(pDev->net)) && (pGobiDev->qmap_mode > 1)) {
1378*4882a593Smuzhiyun             rmnet_usb_tx_wake_queue((unsigned long )pGobiDev);
1379*4882a593Smuzhiyun       }
1380*4882a593Smuzhiyun #endif
1381*4882a593Smuzhiyun    }
1382*4882a593Smuzhiyun    else
1383*4882a593Smuzhiyun    {
1384*4882a593Smuzhiyun       DBG( "nothing to resume\n" );
1385*4882a593Smuzhiyun       return 0;
1386*4882a593Smuzhiyun    }
1387*4882a593Smuzhiyun 
1388*4882a593Smuzhiyun    return nRet;
1389*4882a593Smuzhiyun }
1390*4882a593Smuzhiyun #if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,27 ))
GobiNetResetResume(struct usb_interface * pIntf)1391*4882a593Smuzhiyun static int GobiNetResetResume( struct usb_interface * pIntf )
1392*4882a593Smuzhiyun {
1393*4882a593Smuzhiyun    INFO( "device do not support reset_resume\n" );
1394*4882a593Smuzhiyun    pIntf->needs_binding = 1;
1395*4882a593Smuzhiyun 
1396*4882a593Smuzhiyun    return -EOPNOTSUPP;
1397*4882a593Smuzhiyun }
1398*4882a593Smuzhiyun #endif
1399*4882a593Smuzhiyun #endif /* CONFIG_PM */
1400*4882a593Smuzhiyun 
ql_net_get_drvinfo(struct net_device * net,struct ethtool_drvinfo * info)1401*4882a593Smuzhiyun static void ql_net_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
1402*4882a593Smuzhiyun {
1403*4882a593Smuzhiyun 	usbnet_get_drvinfo(net, info);
1404*4882a593Smuzhiyun 	/* Inherit standard device info */
1405*4882a593Smuzhiyun 	strlcpy(info->driver, driver_name, sizeof(info->driver));
1406*4882a593Smuzhiyun 	strlcpy(info->version, VERSION_NUMBER, sizeof(info->version));
1407*4882a593Smuzhiyun }
1408*4882a593Smuzhiyun 
1409*4882a593Smuzhiyun static struct ethtool_ops ql_net_ethtool_ops;
1410*4882a593Smuzhiyun 
1411*4882a593Smuzhiyun /*===========================================================================
1412*4882a593Smuzhiyun METHOD:
1413*4882a593Smuzhiyun    GobiNetDriverBind (Public Method)
1414*4882a593Smuzhiyun 
1415*4882a593Smuzhiyun DESCRIPTION:
1416*4882a593Smuzhiyun    Setup in and out pipes
1417*4882a593Smuzhiyun 
1418*4882a593Smuzhiyun PARAMETERS
1419*4882a593Smuzhiyun    pDev           [ I ] - Pointer to usbnet device
1420*4882a593Smuzhiyun    pIntf          [ I ] - Pointer to interface
1421*4882a593Smuzhiyun 
1422*4882a593Smuzhiyun RETURN VALUE:
1423*4882a593Smuzhiyun    int - 0 for success
1424*4882a593Smuzhiyun          Negative errno for error
1425*4882a593Smuzhiyun ===========================================================================*/
GobiNetDriverBind(struct usbnet * pDev,struct usb_interface * pIntf)1426*4882a593Smuzhiyun static int GobiNetDriverBind(
1427*4882a593Smuzhiyun    struct usbnet *         pDev,
1428*4882a593Smuzhiyun    struct usb_interface *  pIntf )
1429*4882a593Smuzhiyun {
1430*4882a593Smuzhiyun    int numEndpoints;
1431*4882a593Smuzhiyun    int endpointIndex;
1432*4882a593Smuzhiyun    struct usb_host_endpoint * pEndpoint = NULL;
1433*4882a593Smuzhiyun    struct usb_host_endpoint * pIn = NULL;
1434*4882a593Smuzhiyun    struct usb_host_endpoint * pOut = NULL;
1435*4882a593Smuzhiyun 
1436*4882a593Smuzhiyun    // Verify one altsetting
1437*4882a593Smuzhiyun    if (pIntf->num_altsetting != 1)
1438*4882a593Smuzhiyun    {
1439*4882a593Smuzhiyun       DBG( "invalid num_altsetting %u\n", pIntf->num_altsetting );
1440*4882a593Smuzhiyun       return -ENODEV;
1441*4882a593Smuzhiyun    }
1442*4882a593Smuzhiyun 
1443*4882a593Smuzhiyun    // Verify correct interface (4 for UC20)
1444*4882a593Smuzhiyun    if ( !test_bit(pIntf->cur_altsetting->desc.bInterfaceNumber, &pDev->driver_info->data))
1445*4882a593Smuzhiyun    {
1446*4882a593Smuzhiyun       DBG( "invalid interface %d\n",
1447*4882a593Smuzhiyun            pIntf->cur_altsetting->desc.bInterfaceNumber );
1448*4882a593Smuzhiyun       return -ENODEV;
1449*4882a593Smuzhiyun    }
1450*4882a593Smuzhiyun 
1451*4882a593Smuzhiyun    if ( pIntf->cur_altsetting->desc.bInterfaceClass != 0xff)
1452*4882a593Smuzhiyun    {
1453*4882a593Smuzhiyun       struct usb_interface_descriptor *desc = &pIntf->cur_altsetting->desc;
1454*4882a593Smuzhiyun       const char *qcfg_usbnet = "UNKNOW";
1455*4882a593Smuzhiyun 
1456*4882a593Smuzhiyun       if (desc->bInterfaceClass == 2 && desc->bInterfaceSubClass == 0x0e) {
1457*4882a593Smuzhiyun          qcfg_usbnet = "MBIM";
1458*4882a593Smuzhiyun       } else if (desc->bInterfaceClass == 2 && desc->bInterfaceSubClass == 0x06) {
1459*4882a593Smuzhiyun          qcfg_usbnet = "ECM";
1460*4882a593Smuzhiyun       } else if (desc->bInterfaceClass == 0xe0 && desc->bInterfaceSubClass == 1 && desc->bInterfaceProtocol == 3) {
1461*4882a593Smuzhiyun          qcfg_usbnet = "RNDIS";
1462*4882a593Smuzhiyun       }
1463*4882a593Smuzhiyun 
1464*4882a593Smuzhiyun       INFO( "usbnet is %s not NDIS/RMNET!\n", qcfg_usbnet);
1465*4882a593Smuzhiyun 
1466*4882a593Smuzhiyun       return -ENODEV;
1467*4882a593Smuzhiyun    }
1468*4882a593Smuzhiyun 
1469*4882a593Smuzhiyun    // Collect In and Out endpoints
1470*4882a593Smuzhiyun    numEndpoints = pIntf->cur_altsetting->desc.bNumEndpoints;
1471*4882a593Smuzhiyun    for (endpointIndex = 0; endpointIndex < numEndpoints; endpointIndex++)
1472*4882a593Smuzhiyun    {
1473*4882a593Smuzhiyun       pEndpoint = pIntf->cur_altsetting->endpoint + endpointIndex;
1474*4882a593Smuzhiyun       if (pEndpoint == NULL)
1475*4882a593Smuzhiyun       {
1476*4882a593Smuzhiyun          DBG( "invalid endpoint %u\n", endpointIndex );
1477*4882a593Smuzhiyun          return -ENODEV;
1478*4882a593Smuzhiyun       }
1479*4882a593Smuzhiyun 
1480*4882a593Smuzhiyun       if (usb_endpoint_dir_in( &pEndpoint->desc ) == true
1481*4882a593Smuzhiyun       &&  usb_endpoint_xfer_int( &pEndpoint->desc ) == false)
1482*4882a593Smuzhiyun       {
1483*4882a593Smuzhiyun          pIn = pEndpoint;
1484*4882a593Smuzhiyun       }
1485*4882a593Smuzhiyun       else if (usb_endpoint_dir_out( &pEndpoint->desc ) == true)
1486*4882a593Smuzhiyun       {
1487*4882a593Smuzhiyun          pOut = pEndpoint;
1488*4882a593Smuzhiyun       }
1489*4882a593Smuzhiyun    }
1490*4882a593Smuzhiyun 
1491*4882a593Smuzhiyun    if (pIn == NULL || pOut == NULL)
1492*4882a593Smuzhiyun    {
1493*4882a593Smuzhiyun       DBG( "invalid endpoints\n" );
1494*4882a593Smuzhiyun       return -ENODEV;
1495*4882a593Smuzhiyun    }
1496*4882a593Smuzhiyun 
1497*4882a593Smuzhiyun    if (usb_set_interface( pDev->udev,
1498*4882a593Smuzhiyun                           pIntf->cur_altsetting->desc.bInterfaceNumber,
1499*4882a593Smuzhiyun                           0 ) != 0)
1500*4882a593Smuzhiyun    {
1501*4882a593Smuzhiyun       DBG( "unable to set interface\n" );
1502*4882a593Smuzhiyun       return -ENODEV;
1503*4882a593Smuzhiyun    }
1504*4882a593Smuzhiyun 
1505*4882a593Smuzhiyun    pDev->in = usb_rcvbulkpipe( pDev->udev,
1506*4882a593Smuzhiyun                    pIn->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK );
1507*4882a593Smuzhiyun    pDev->out = usb_sndbulkpipe( pDev->udev,
1508*4882a593Smuzhiyun                    pOut->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK );
1509*4882a593Smuzhiyun 
1510*4882a593Smuzhiyun #if defined(QUECTEL_WWAN_MULTI_PACKAGES)
1511*4882a593Smuzhiyun     if (rx_packets && pDev->udev->descriptor.idVendor == cpu_to_le16(0x2C7C)) {
1512*4882a593Smuzhiyun         struct multi_package_config rx_config = {
1513*4882a593Smuzhiyun             .enable = cpu_to_le32(1),
1514*4882a593Smuzhiyun             .package_max_len = cpu_to_le32((1500 + sizeof(struct quec_net_package_header)) * rx_packets),
1515*4882a593Smuzhiyun             .package_max_count_in_queue = cpu_to_le32(rx_packets),
1516*4882a593Smuzhiyun             .timeout = cpu_to_le32(10*1000), //10ms
1517*4882a593Smuzhiyun         };
1518*4882a593Smuzhiyun         int ret = 0;
1519*4882a593Smuzhiyun 
1520*4882a593Smuzhiyun     	ret = usb_control_msg(
1521*4882a593Smuzhiyun     		interface_to_usbdev(pIntf),
1522*4882a593Smuzhiyun     		usb_sndctrlpipe(interface_to_usbdev(pIntf), 0),
1523*4882a593Smuzhiyun     		USB_CDC_SET_MULTI_PACKAGE_COMMAND,
1524*4882a593Smuzhiyun     		0x21, //USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE
1525*4882a593Smuzhiyun     		1,
1526*4882a593Smuzhiyun     		pIntf->cur_altsetting->desc.bInterfaceNumber,
1527*4882a593Smuzhiyun     		&rx_config, sizeof(rx_config), 100);
1528*4882a593Smuzhiyun 
1529*4882a593Smuzhiyun         DBG( "Quectel EC21&EC25 rx_packets=%d, ret=%d\n", rx_packets, ret);
1530*4882a593Smuzhiyun         if (ret == sizeof(rx_config)) {
1531*4882a593Smuzhiyun            pDev->rx_urb_size = le32_to_cpu(rx_config.package_max_len);
1532*4882a593Smuzhiyun         } else {
1533*4882a593Smuzhiyun             rx_packets = 0;
1534*4882a593Smuzhiyun         }
1535*4882a593Smuzhiyun     }
1536*4882a593Smuzhiyun #endif
1537*4882a593Smuzhiyun 
1538*4882a593Smuzhiyun #if 1 //def DATA_MODE_RP
1539*4882a593Smuzhiyun     /* make MAC addr easily distinguishable from an IP header */
1540*4882a593Smuzhiyun     if ((pDev->net->dev_addr[0] & 0xd0) == 0x40) {
1541*4882a593Smuzhiyun         /*clear this bit wil make usbnet apdater named as usbX(instead if ethX)*/
1542*4882a593Smuzhiyun         pDev->net->dev_addr[0] |= 0x02;	/* set local assignment bit */
1543*4882a593Smuzhiyun         pDev->net->dev_addr[0] &= 0xbf;	/* clear "IP" bit */
1544*4882a593Smuzhiyun     }
1545*4882a593Smuzhiyun     memcpy (pDev->net->dev_addr, node_id, sizeof node_id);
1546*4882a593Smuzhiyun     pDev->net->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
1547*4882a593Smuzhiyun     pDev->net->features |= (NETIF_F_VLAN_CHALLENGED);
1548*4882a593Smuzhiyun #endif
1549*4882a593Smuzhiyun 
1550*4882a593Smuzhiyun 	ql_net_ethtool_ops = *pDev->net->ethtool_ops;
1551*4882a593Smuzhiyun 	ql_net_ethtool_ops.get_drvinfo = ql_net_get_drvinfo;
1552*4882a593Smuzhiyun 	pDev->net->ethtool_ops = &ql_net_ethtool_ops;
1553*4882a593Smuzhiyun 
1554*4882a593Smuzhiyun    DBG( "in %x, out %x\n",
1555*4882a593Smuzhiyun         pIn->desc.bEndpointAddress,
1556*4882a593Smuzhiyun         pOut->desc.bEndpointAddress );
1557*4882a593Smuzhiyun 
1558*4882a593Smuzhiyun    // In later versions of the kernel, usbnet helps with this
1559*4882a593Smuzhiyun #if (LINUX_VERSION_CODE <= KERNEL_VERSION( 2,6,23 ))
1560*4882a593Smuzhiyun    pIntf->dev.platform_data = (void *)pDev;
1561*4882a593Smuzhiyun #endif
1562*4882a593Smuzhiyun 
1563*4882a593Smuzhiyun     if (qcrmcall_mode == 0 && pDev->net->sysfs_groups[0] == NULL && gobinet_sysfs_attr_group.attrs[0] != NULL) {
1564*4882a593Smuzhiyun #if (LINUX_VERSION_CODE <= KERNEL_VERSION( 2,6,32)) //see commit 0c509a6c9393b27a8c5a01acd4a72616206cfc24
1565*4882a593Smuzhiyun         pDev->net->sysfs_groups[1] = &gobinet_sysfs_attr_group; //see netdev_register_sysfs()
1566*4882a593Smuzhiyun #else
1567*4882a593Smuzhiyun         pDev->net->sysfs_groups[0] = &gobinet_sysfs_attr_group;
1568*4882a593Smuzhiyun #endif
1569*4882a593Smuzhiyun     }
1570*4882a593Smuzhiyun 
1571*4882a593Smuzhiyun     if (!pDev->rx_urb_size) {
1572*4882a593Smuzhiyun //to advoid module report mtu 1460, but rx 1500 bytes IP packets, and cause the customer's system crash
1573*4882a593Smuzhiyun //next setting can make usbnet.c:usbnet_change_mtu() do not modify rx_urb_size according to mtu
1574*4882a593Smuzhiyun         pDev->rx_urb_size = ETH_DATA_LEN + ETH_HLEN + 6;
1575*4882a593Smuzhiyun     }
1576*4882a593Smuzhiyun 
1577*4882a593Smuzhiyun    return 0;
1578*4882a593Smuzhiyun }
1579*4882a593Smuzhiyun 
1580*4882a593Smuzhiyun /*===========================================================================
1581*4882a593Smuzhiyun METHOD:
1582*4882a593Smuzhiyun    GobiNetDriverUnbind (Public Method)
1583*4882a593Smuzhiyun 
1584*4882a593Smuzhiyun DESCRIPTION:
1585*4882a593Smuzhiyun    Deregisters QMI device (Registration happened in the probe function)
1586*4882a593Smuzhiyun 
1587*4882a593Smuzhiyun PARAMETERS
1588*4882a593Smuzhiyun    pDev           [ I ] - Pointer to usbnet device
1589*4882a593Smuzhiyun    pIntfUnused    [ I ] - Pointer to interface
1590*4882a593Smuzhiyun 
1591*4882a593Smuzhiyun RETURN VALUE:
1592*4882a593Smuzhiyun    None
1593*4882a593Smuzhiyun ===========================================================================*/
GobiNetDriverUnbind(struct usbnet * pDev,struct usb_interface * pIntf)1594*4882a593Smuzhiyun static void GobiNetDriverUnbind(
1595*4882a593Smuzhiyun    struct usbnet *         pDev,
1596*4882a593Smuzhiyun    struct usb_interface *  pIntf)
1597*4882a593Smuzhiyun {
1598*4882a593Smuzhiyun    sGobiUSBNet * pGobiDev = (sGobiUSBNet *)pDev->data[0];
1599*4882a593Smuzhiyun 
1600*4882a593Smuzhiyun    // Should already be down, but just in case...
1601*4882a593Smuzhiyun    netif_carrier_off( pDev->net );
1602*4882a593Smuzhiyun 
1603*4882a593Smuzhiyun    if (pGobiDev->m_qcrmcall_mode) {
1604*4882a593Smuzhiyun    } else {
1605*4882a593Smuzhiyun       DeregisterQMIDevice( pGobiDev );
1606*4882a593Smuzhiyun    }
1607*4882a593Smuzhiyun 
1608*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION( 2,6,29 ))
1609*4882a593Smuzhiyun    kfree( pDev->net->netdev_ops );
1610*4882a593Smuzhiyun    pDev->net->netdev_ops = NULL;
1611*4882a593Smuzhiyun #endif
1612*4882a593Smuzhiyun 
1613*4882a593Smuzhiyun #if (LINUX_VERSION_CODE <= KERNEL_VERSION( 2,6,23 ))
1614*4882a593Smuzhiyun    pIntf->dev.platform_data = NULL;
1615*4882a593Smuzhiyun #endif
1616*4882a593Smuzhiyun 
1617*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION( 2,6,19 ))
1618*4882a593Smuzhiyun    pIntf->needs_remote_wakeup = 0;
1619*4882a593Smuzhiyun #endif
1620*4882a593Smuzhiyun 
1621*4882a593Smuzhiyun    if (atomic_dec_and_test(&pGobiDev->refcount))
1622*4882a593Smuzhiyun       kfree( pGobiDev );
1623*4882a593Smuzhiyun    else
1624*4882a593Smuzhiyun       INFO("memory leak!\n");
1625*4882a593Smuzhiyun }
1626*4882a593Smuzhiyun 
1627*4882a593Smuzhiyun #if 1 //def DATA_MODE_RP
1628*4882a593Smuzhiyun 
1629*4882a593Smuzhiyun #if defined(QUECTEL_WWAN_QMAP)
_rmnet_usb_rx_handler(struct usbnet * dev,struct sk_buff * skb_in)1630*4882a593Smuzhiyun static void _rmnet_usb_rx_handler(struct usbnet *dev, struct sk_buff *skb_in)
1631*4882a593Smuzhiyun {
1632*4882a593Smuzhiyun 	sGobiUSBNet * pQmapDev = (sGobiUSBNet *)dev->data[0];
1633*4882a593Smuzhiyun 	struct sk_buff *qmap_skb;
1634*4882a593Smuzhiyun 	struct sk_buff_head skb_chain;
1635*4882a593Smuzhiyun 	uint dl_minimum_padding = 0;
1636*4882a593Smuzhiyun 
1637*4882a593Smuzhiyun #if defined(QUECTEL_UL_DATA_AGG)
1638*4882a593Smuzhiyun 	if (pQmapDev->qmap_version == 9)
1639*4882a593Smuzhiyun 		dl_minimum_padding = pQmapDev->agg_ctx.dl_minimum_padding;
1640*4882a593Smuzhiyun #endif
1641*4882a593Smuzhiyun 
1642*4882a593Smuzhiyun 	__skb_queue_head_init(&skb_chain);
1643*4882a593Smuzhiyun 
1644*4882a593Smuzhiyun 	while (skb_in->len > sizeof(struct qmap_hdr)) {
1645*4882a593Smuzhiyun 		struct rmnet_map_header *map_header = (struct rmnet_map_header *)skb_in->data;
1646*4882a593Smuzhiyun 		struct rmnet_map_v5_csum_header *ul_header = NULL;
1647*4882a593Smuzhiyun 		size_t hdr_size = sizeof(struct rmnet_map_header);
1648*4882a593Smuzhiyun 		struct net_device *qmap_net;
1649*4882a593Smuzhiyun 		int pkt_len = ntohs(map_header->pkt_len);
1650*4882a593Smuzhiyun 		int skb_len;
1651*4882a593Smuzhiyun 		__be16 protocol;
1652*4882a593Smuzhiyun 		int mux_id;
1653*4882a593Smuzhiyun 
1654*4882a593Smuzhiyun 		if (map_header->next_hdr) {
1655*4882a593Smuzhiyun 			ul_header = (struct rmnet_map_v5_csum_header *)(map_header + 1);
1656*4882a593Smuzhiyun 			hdr_size += sizeof(struct rmnet_map_v5_csum_header);
1657*4882a593Smuzhiyun 		}
1658*4882a593Smuzhiyun 
1659*4882a593Smuzhiyun 		skb_len = pkt_len - (map_header->pad_len&0x3F);
1660*4882a593Smuzhiyun 		skb_len -= dl_minimum_padding;
1661*4882a593Smuzhiyun 		if (skb_len > 1500) {
1662*4882a593Smuzhiyun 			dev_info(&dev->net->dev, "drop skb_len=%x larger than 1500\n", skb_len);
1663*4882a593Smuzhiyun 			goto error_pkt;
1664*4882a593Smuzhiyun 		}
1665*4882a593Smuzhiyun 
1666*4882a593Smuzhiyun 		if (skb_in->len < (pkt_len + hdr_size)) {
1667*4882a593Smuzhiyun 			dev_info(&dev->net->dev, "drop qmap unknow pkt, len=%d, pkt_len=%d\n", skb_in->len, pkt_len);
1668*4882a593Smuzhiyun 			goto error_pkt;
1669*4882a593Smuzhiyun 		}
1670*4882a593Smuzhiyun 
1671*4882a593Smuzhiyun 		if (map_header->cd_bit) {
1672*4882a593Smuzhiyun 			dev_info(&dev->net->dev, "skip qmap command packet\n");
1673*4882a593Smuzhiyun 			goto skip_pkt;
1674*4882a593Smuzhiyun 		}
1675*4882a593Smuzhiyun 
1676*4882a593Smuzhiyun 		switch (skb_in->data[hdr_size] & 0xf0) {
1677*4882a593Smuzhiyun 			case 0x40:
1678*4882a593Smuzhiyun 				protocol = htons(ETH_P_IP);
1679*4882a593Smuzhiyun 			break;
1680*4882a593Smuzhiyun 			case 0x60:
1681*4882a593Smuzhiyun 				protocol = htons(ETH_P_IPV6);
1682*4882a593Smuzhiyun 			break;
1683*4882a593Smuzhiyun 			default:
1684*4882a593Smuzhiyun 				dev_info(&dev->net->dev, "unknow skb->protocol %02x\n", skb_in->data[hdr_size]);
1685*4882a593Smuzhiyun 				goto error_pkt;
1686*4882a593Smuzhiyun 		}
1687*4882a593Smuzhiyun 
1688*4882a593Smuzhiyun 		mux_id = map_header->mux_id - QUECTEL_QMAP_MUX_ID;
1689*4882a593Smuzhiyun 		if (mux_id >= pQmapDev->qmap_mode) {
1690*4882a593Smuzhiyun 			dev_info(&dev->net->dev, "drop qmap unknow mux_id %x\n", map_header->mux_id);
1691*4882a593Smuzhiyun 			goto error_pkt;
1692*4882a593Smuzhiyun 		}
1693*4882a593Smuzhiyun 
1694*4882a593Smuzhiyun 		qmap_net = pQmapDev->mpQmapNetDev[mux_id];
1695*4882a593Smuzhiyun 
1696*4882a593Smuzhiyun 		if (qmap_net == NULL) {
1697*4882a593Smuzhiyun 			dev_info(&dev->net->dev, "drop qmap unknow mux_id %x\n", map_header->mux_id);
1698*4882a593Smuzhiyun 			goto skip_pkt;
1699*4882a593Smuzhiyun 		}
1700*4882a593Smuzhiyun 
1701*4882a593Smuzhiyun 		qmap_skb = netdev_alloc_skb(qmap_net, skb_len);
1702*4882a593Smuzhiyun 		if (qmap_skb) {
1703*4882a593Smuzhiyun 			skb_put(qmap_skb, skb_len);
1704*4882a593Smuzhiyun 			memcpy(qmap_skb->data, skb_in->data + hdr_size, skb_len);
1705*4882a593Smuzhiyun 		}
1706*4882a593Smuzhiyun 
1707*4882a593Smuzhiyun 		if (qmap_skb == NULL) {
1708*4882a593Smuzhiyun 			dev_info(&dev->net->dev, "fail to alloc skb, pkt_len = %d\n", skb_len);
1709*4882a593Smuzhiyun 			goto error_pkt;
1710*4882a593Smuzhiyun 		}
1711*4882a593Smuzhiyun 
1712*4882a593Smuzhiyun 		skb_reset_transport_header(qmap_skb);
1713*4882a593Smuzhiyun 		skb_reset_network_header(qmap_skb);
1714*4882a593Smuzhiyun 		qmap_skb->pkt_type = PACKET_HOST;
1715*4882a593Smuzhiyun 		skb_set_mac_header(qmap_skb, 0);
1716*4882a593Smuzhiyun 		qmap_skb->protocol = protocol;
1717*4882a593Smuzhiyun 
1718*4882a593Smuzhiyun 		if (ul_header && ul_header->header_type == RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD
1719*4882a593Smuzhiyun 			&& ul_header->csum_valid_required) {
1720*4882a593Smuzhiyun #if 0 //TODO
1721*4882a593Smuzhiyun 			qmap_skb->ip_summed = CHECKSUM_UNNECESSARY;
1722*4882a593Smuzhiyun #endif
1723*4882a593Smuzhiyun 		}
1724*4882a593Smuzhiyun 
1725*4882a593Smuzhiyun 		if (qmap_skb->dev->type == ARPHRD_ETHER) {
1726*4882a593Smuzhiyun 			skb_push(qmap_skb, ETH_HLEN);
1727*4882a593Smuzhiyun 			skb_reset_mac_header(qmap_skb);
1728*4882a593Smuzhiyun 			memcpy(eth_hdr(qmap_skb)->h_source, default_modem_addr, ETH_ALEN);
1729*4882a593Smuzhiyun 			memcpy(eth_hdr(qmap_skb)->h_dest, qmap_net->dev_addr, ETH_ALEN);
1730*4882a593Smuzhiyun 			eth_hdr(qmap_skb)->h_proto = protocol;
1731*4882a593Smuzhiyun #ifdef QUECTEL_BRIDGE_MODE
1732*4882a593Smuzhiyun 			bridge_mode_rx_fixup(pQmapDev, qmap_net, qmap_skb);
1733*4882a593Smuzhiyun #endif
1734*4882a593Smuzhiyun 		}
1735*4882a593Smuzhiyun 
1736*4882a593Smuzhiyun 		__skb_queue_tail(&skb_chain, qmap_skb);
1737*4882a593Smuzhiyun 
1738*4882a593Smuzhiyun skip_pkt:
1739*4882a593Smuzhiyun 		skb_pull(skb_in, pkt_len + hdr_size);
1740*4882a593Smuzhiyun 	}
1741*4882a593Smuzhiyun 
1742*4882a593Smuzhiyun error_pkt:
1743*4882a593Smuzhiyun 	while ((qmap_skb = __skb_dequeue (&skb_chain))) {
1744*4882a593Smuzhiyun 		if (qmap_skb->dev != dev->net) {
1745*4882a593Smuzhiyun 			if (qmap_skb->dev->type == ARPHRD_ETHER)
1746*4882a593Smuzhiyun 				__skb_pull(qmap_skb, ETH_HLEN);
1747*4882a593Smuzhiyun 			rmnet_vnd_update_rx_stats(qmap_skb->dev, 1, qmap_skb->len);
1748*4882a593Smuzhiyun 			netif_rx(qmap_skb);
1749*4882a593Smuzhiyun 		}
1750*4882a593Smuzhiyun 		else {
1751*4882a593Smuzhiyun 			qmap_skb->protocol = 0;
1752*4882a593Smuzhiyun 			usbnet_skb_return(dev, qmap_skb);
1753*4882a593Smuzhiyun 		}
1754*4882a593Smuzhiyun 	}
1755*4882a593Smuzhiyun }
1756*4882a593Smuzhiyun 
1757*4882a593Smuzhiyun #if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,35 )) //ab95bfe01f9872459c8678572ccadbf646badad0
1758*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,39 )) //8a4eb5734e8d1dc60a8c28576bbbdfdcc643626d
rmnet_usb_rx_handler(struct sk_buff * skb)1759*4882a593Smuzhiyun static struct sk_buff* rmnet_usb_rx_handler(struct sk_buff *skb)
1760*4882a593Smuzhiyun {
1761*4882a593Smuzhiyun 	struct usbnet *dev;
1762*4882a593Smuzhiyun 
1763*4882a593Smuzhiyun 	if (!skb)
1764*4882a593Smuzhiyun 		goto done;
1765*4882a593Smuzhiyun 
1766*4882a593Smuzhiyun 	//printk("%s skb=%p, protocol=%x, len=%d\n", __func__, skb, skb->protocol, skb->len);
1767*4882a593Smuzhiyun 
1768*4882a593Smuzhiyun 	if (skb->pkt_type == PACKET_LOOPBACK)
1769*4882a593Smuzhiyun 		return skb;
1770*4882a593Smuzhiyun 
1771*4882a593Smuzhiyun 	if (skb->protocol != htons(ETH_P_MAP)) {
1772*4882a593Smuzhiyun 		WARN_ON(1);
1773*4882a593Smuzhiyun 		return skb;
1774*4882a593Smuzhiyun 	}
1775*4882a593Smuzhiyun 
1776*4882a593Smuzhiyun 	dev = netdev_priv(skb->dev);
1777*4882a593Smuzhiyun 
1778*4882a593Smuzhiyun 	if (dev == NULL) {
1779*4882a593Smuzhiyun 		WARN_ON(1);
1780*4882a593Smuzhiyun 		return skb;
1781*4882a593Smuzhiyun 	}
1782*4882a593Smuzhiyun 
1783*4882a593Smuzhiyun 	_rmnet_usb_rx_handler(dev, skb);
1784*4882a593Smuzhiyun 	consume_skb(skb);
1785*4882a593Smuzhiyun 
1786*4882a593Smuzhiyun done:
1787*4882a593Smuzhiyun 	return NULL;
1788*4882a593Smuzhiyun }
1789*4882a593Smuzhiyun #else
rmnet_usb_rx_handler(struct sk_buff ** pskb)1790*4882a593Smuzhiyun static rx_handler_result_t rmnet_usb_rx_handler(struct sk_buff **pskb)
1791*4882a593Smuzhiyun {
1792*4882a593Smuzhiyun 	struct sk_buff *skb = *pskb;
1793*4882a593Smuzhiyun 	struct usbnet *dev;
1794*4882a593Smuzhiyun 
1795*4882a593Smuzhiyun 	if (!skb)
1796*4882a593Smuzhiyun 		goto done;
1797*4882a593Smuzhiyun 
1798*4882a593Smuzhiyun 	//printk("%s skb=%p, protocol=%x, len=%d\n", __func__, skb, skb->protocol, skb->len);
1799*4882a593Smuzhiyun 
1800*4882a593Smuzhiyun 	if (skb->pkt_type == PACKET_LOOPBACK)
1801*4882a593Smuzhiyun 		return RX_HANDLER_PASS;
1802*4882a593Smuzhiyun 
1803*4882a593Smuzhiyun 	if (skb->protocol != htons(ETH_P_MAP)) {
1804*4882a593Smuzhiyun 		WARN_ON(1);
1805*4882a593Smuzhiyun 		return RX_HANDLER_PASS;
1806*4882a593Smuzhiyun 	}
1807*4882a593Smuzhiyun 
1808*4882a593Smuzhiyun 	dev = netdev_priv(skb->dev);
1809*4882a593Smuzhiyun 
1810*4882a593Smuzhiyun 	if (dev == NULL) {
1811*4882a593Smuzhiyun 		WARN_ON(1);
1812*4882a593Smuzhiyun 		return RX_HANDLER_PASS;
1813*4882a593Smuzhiyun 	}
1814*4882a593Smuzhiyun 
1815*4882a593Smuzhiyun 	_rmnet_usb_rx_handler(dev, skb);
1816*4882a593Smuzhiyun 	consume_skb(skb);
1817*4882a593Smuzhiyun 
1818*4882a593Smuzhiyun done:
1819*4882a593Smuzhiyun 	return RX_HANDLER_CONSUMED;
1820*4882a593Smuzhiyun }
1821*4882a593Smuzhiyun #endif
1822*4882a593Smuzhiyun #endif
1823*4882a593Smuzhiyun #endif
1824*4882a593Smuzhiyun /*===========================================================================
1825*4882a593Smuzhiyun METHOD:
1826*4882a593Smuzhiyun    GobiNetDriverTxFixup (Public Method)
1827*4882a593Smuzhiyun 
1828*4882a593Smuzhiyun DESCRIPTION:
1829*4882a593Smuzhiyun    Handling data format mode on transmit path
1830*4882a593Smuzhiyun 
1831*4882a593Smuzhiyun PARAMETERS
1832*4882a593Smuzhiyun    pDev           [ I ] - Pointer to usbnet device
1833*4882a593Smuzhiyun    pSKB           [ I ] - Pointer to transmit packet buffer
1834*4882a593Smuzhiyun    flags          [ I ] - os flags
1835*4882a593Smuzhiyun 
1836*4882a593Smuzhiyun RETURN VALUE:
1837*4882a593Smuzhiyun    None
1838*4882a593Smuzhiyun ===========================================================================*/
GobiNetDriverTxFixup(struct usbnet * dev,struct sk_buff * skb,gfp_t flags)1839*4882a593Smuzhiyun static struct sk_buff *GobiNetDriverTxFixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
1840*4882a593Smuzhiyun {
1841*4882a593Smuzhiyun 	sGobiUSBNet * pGobiDev = (sGobiUSBNet *)dev->data[0];
1842*4882a593Smuzhiyun 
1843*4882a593Smuzhiyun 	if (!pGobiDev) {
1844*4882a593Smuzhiyun 		DBG( "failed to get QMIDevice\n" );
1845*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
1846*4882a593Smuzhiyun 		return NULL;
1847*4882a593Smuzhiyun 	}
1848*4882a593Smuzhiyun 
1849*4882a593Smuzhiyun 	if (unlikely(!skb)) {
1850*4882a593Smuzhiyun 		return NULL;
1851*4882a593Smuzhiyun 	}
1852*4882a593Smuzhiyun 
1853*4882a593Smuzhiyun 	if (!pGobiDev->mbRawIPMode)
1854*4882a593Smuzhiyun 		return skb;
1855*4882a593Smuzhiyun 
1856*4882a593Smuzhiyun #ifdef QUECTEL_WWAN_QMAP
1857*4882a593Smuzhiyun 	if (pGobiDev->qmap_mode > 1) {
1858*4882a593Smuzhiyun 		if (skb->protocol == htons(ETH_P_MAP))
1859*4882a593Smuzhiyun 			return skb;
1860*4882a593Smuzhiyun 
1861*4882a593Smuzhiyun 		goto drop_skb;
1862*4882a593Smuzhiyun 	}
1863*4882a593Smuzhiyun 	else if (pGobiDev->qmap_mode == 1) {
1864*4882a593Smuzhiyun 		if (unlikely(!pGobiDev->link_state)) {
1865*4882a593Smuzhiyun 			dev_info(&dev->net->dev, "link_state 0x%x, drop skb, len = %u\n", pGobiDev->link_state, skb->len);
1866*4882a593Smuzhiyun 			goto drop_skb;
1867*4882a593Smuzhiyun 		}
1868*4882a593Smuzhiyun 
1869*4882a593Smuzhiyun 		if (dev->net->type == ARPHRD_ETHER) {
1870*4882a593Smuzhiyun #ifdef QUECTEL_BRIDGE_MODE
1871*4882a593Smuzhiyun 			if (pGobiDev->m_bridge_mode && bridge_mode_tx_fixup(dev->net, skb, pGobiDev->m_bridge_ipv4, pGobiDev->mHostMAC) == NULL) {
1872*4882a593Smuzhiyun 				goto drop_skb;
1873*4882a593Smuzhiyun 			}
1874*4882a593Smuzhiyun #endif
1875*4882a593Smuzhiyun 
1876*4882a593Smuzhiyun 			if (ether_to_ip_fixup(dev->net, skb) == NULL)
1877*4882a593Smuzhiyun 				goto drop_skb;
1878*4882a593Smuzhiyun 		}
1879*4882a593Smuzhiyun 
1880*4882a593Smuzhiyun 		if (pGobiDev->qmap_version == 5) {
1881*4882a593Smuzhiyun 			add_qhdr(skb, QUECTEL_QMAP_MUX_ID);
1882*4882a593Smuzhiyun 		}
1883*4882a593Smuzhiyun 		else if (pGobiDev->qmap_version == 9) {
1884*4882a593Smuzhiyun 			add_qhdr_v5(skb, QUECTEL_QMAP_MUX_ID);
1885*4882a593Smuzhiyun 		}
1886*4882a593Smuzhiyun 		else {
1887*4882a593Smuzhiyun 	               goto drop_skb;
1888*4882a593Smuzhiyun 		}
1889*4882a593Smuzhiyun 
1890*4882a593Smuzhiyun 		return skb;
1891*4882a593Smuzhiyun 	}
1892*4882a593Smuzhiyun #endif
1893*4882a593Smuzhiyun 
1894*4882a593Smuzhiyun #ifdef QUECTEL_BRIDGE_MODE
1895*4882a593Smuzhiyun 	if (pGobiDev->m_bridge_mode && bridge_mode_tx_fixup(dev->net, skb, pGobiDev->m_bridge_ipv4, pGobiDev->mHostMAC) == NULL) {
1896*4882a593Smuzhiyun 		goto drop_skb;
1897*4882a593Smuzhiyun 	}
1898*4882a593Smuzhiyun #endif
1899*4882a593Smuzhiyun 
1900*4882a593Smuzhiyun     // Skip Ethernet header from message
1901*4882a593Smuzhiyun 	if (likely(ether_to_ip_fixup(dev->net, skb))) {
1902*4882a593Smuzhiyun 		return skb;
1903*4882a593Smuzhiyun 	}
1904*4882a593Smuzhiyun 	else {
1905*4882a593Smuzhiyun #if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,22 ))
1906*4882a593Smuzhiyun 		dev_err(&dev->intf->dev,  "Packet Dropped ");
1907*4882a593Smuzhiyun #elif (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,18 ))
1908*4882a593Smuzhiyun 		dev_err(dev->net->dev.parent,  "Packet Dropped ");
1909*4882a593Smuzhiyun #else
1910*4882a593Smuzhiyun 		INFO("Packet Dropped ");
1911*4882a593Smuzhiyun #endif
1912*4882a593Smuzhiyun 	}
1913*4882a593Smuzhiyun 
1914*4882a593Smuzhiyun #if defined(QUECTEL_WWAN_QMAP)
1915*4882a593Smuzhiyun drop_skb:
1916*4882a593Smuzhiyun #endif
1917*4882a593Smuzhiyun #if (LINUX_VERSION_CODE <= KERNEL_VERSION( 2,6,24 )) && defined(CONFIG_X86_32)
1918*4882a593Smuzhiyun 	INFO("dev_kfree_skb_any() will make kernel panic on CentOS!\n");
1919*4882a593Smuzhiyun 	quec_debug=1;PrintHex(skb->data, 32);quec_debug=0;
1920*4882a593Smuzhiyun #else
1921*4882a593Smuzhiyun 	// Filter the packet out, release it
1922*4882a593Smuzhiyun 	dev_kfree_skb_any(skb);
1923*4882a593Smuzhiyun #endif
1924*4882a593Smuzhiyun 
1925*4882a593Smuzhiyun 	return NULL;
1926*4882a593Smuzhiyun }
1927*4882a593Smuzhiyun 
1928*4882a593Smuzhiyun #if defined(QUECTEL_WWAN_MULTI_PACKAGES)
GobiNetDriverRxPktsFixup(struct usbnet * dev,struct sk_buff * skb)1929*4882a593Smuzhiyun static int GobiNetDriverRxPktsFixup(struct usbnet *dev, struct sk_buff *skb)
1930*4882a593Smuzhiyun {
1931*4882a593Smuzhiyun     sGobiUSBNet * pGobiDev = (sGobiUSBNet *)dev->data[0];
1932*4882a593Smuzhiyun 
1933*4882a593Smuzhiyun     if (!pGobiDev->mbRawIPMode)
1934*4882a593Smuzhiyun         return 1;
1935*4882a593Smuzhiyun 
1936*4882a593Smuzhiyun     /* This check is no longer done by usbnet */
1937*4882a593Smuzhiyun     if (skb->len < dev->net->hard_header_len)
1938*4882a593Smuzhiyun         return 0;
1939*4882a593Smuzhiyun 
1940*4882a593Smuzhiyun     if (!rx_packets) {
1941*4882a593Smuzhiyun         return GobiNetDriverRxFixup(dev, skb);
1942*4882a593Smuzhiyun     }
1943*4882a593Smuzhiyun 
1944*4882a593Smuzhiyun     while (likely(skb->len)) {
1945*4882a593Smuzhiyun         struct sk_buff* new_skb;
1946*4882a593Smuzhiyun         struct quec_net_package_header package_header;
1947*4882a593Smuzhiyun 
1948*4882a593Smuzhiyun         if (skb->len < sizeof(package_header))
1949*4882a593Smuzhiyun             return 0;
1950*4882a593Smuzhiyun 
1951*4882a593Smuzhiyun         memcpy(&package_header, skb->data, sizeof(package_header));
1952*4882a593Smuzhiyun         package_header.payload_len = be16_to_cpu(package_header.payload_len);
1953*4882a593Smuzhiyun 
1954*4882a593Smuzhiyun         if (package_header.msg_spec != QUEC_NET_MSG_SPEC || package_header.msg_id != QUEC_NET_MSG_ID_IP_DATA)
1955*4882a593Smuzhiyun             return 0;
1956*4882a593Smuzhiyun 
1957*4882a593Smuzhiyun         if (skb->len < (package_header.payload_len + sizeof(package_header)))
1958*4882a593Smuzhiyun             return 0;
1959*4882a593Smuzhiyun 
1960*4882a593Smuzhiyun         skb_pull(skb, sizeof(package_header));
1961*4882a593Smuzhiyun 
1962*4882a593Smuzhiyun         if (skb->len == package_header.payload_len)
1963*4882a593Smuzhiyun             return GobiNetDriverRxFixup(dev, skb);
1964*4882a593Smuzhiyun 
1965*4882a593Smuzhiyun         new_skb = skb_clone(skb, GFP_ATOMIC);
1966*4882a593Smuzhiyun         if (new_skb) {
1967*4882a593Smuzhiyun             skb_trim(new_skb, package_header.payload_len);
1968*4882a593Smuzhiyun             if (GobiNetDriverRxFixup(dev, new_skb))
1969*4882a593Smuzhiyun                 usbnet_skb_return(dev, new_skb);
1970*4882a593Smuzhiyun             else
1971*4882a593Smuzhiyun                 return 0;
1972*4882a593Smuzhiyun         }
1973*4882a593Smuzhiyun 
1974*4882a593Smuzhiyun         skb_pull(skb, package_header.payload_len);
1975*4882a593Smuzhiyun     }
1976*4882a593Smuzhiyun 
1977*4882a593Smuzhiyun     return 0;
1978*4882a593Smuzhiyun }
1979*4882a593Smuzhiyun #endif
1980*4882a593Smuzhiyun 
1981*4882a593Smuzhiyun #ifdef QUECTEL_WWAN_QMAP
GobiNetDriverRxQmapFixup(struct usbnet * dev,struct sk_buff * skb)1982*4882a593Smuzhiyun static int GobiNetDriverRxQmapFixup(struct usbnet *dev, struct sk_buff *skb)
1983*4882a593Smuzhiyun {
1984*4882a593Smuzhiyun #if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,35 )) //ab95bfe01f9872459c8678572ccadbf646badad0
1985*4882a593Smuzhiyun 	rx_handler_func_t *rx_handler;
1986*4882a593Smuzhiyun 
1987*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION( 3,3,1 )) //7bdd402706cf26bfef9050dfee3f229b7f33ee4f
1988*4882a593Smuzhiyun 	if (skb->dev == NULL) {
1989*4882a593Smuzhiyun 		skb->dev = dev->net;
1990*4882a593Smuzhiyun 	}
1991*4882a593Smuzhiyun #endif
1992*4882a593Smuzhiyun 	rx_handler = rcu_dereference(skb->dev->rx_handler);
1993*4882a593Smuzhiyun 
1994*4882a593Smuzhiyun 	if (rx_handler == rmnet_usb_rx_handler) {
1995*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION( 3,3,1 )) //7bdd402706cf26bfef9050dfee3f229b7f33ee4f
1996*4882a593Smuzhiyun 		unsigned headroom = skb_headroom(skb);
1997*4882a593Smuzhiyun 		if (headroom < ETH_HLEN) {
1998*4882a593Smuzhiyun 			unsigned tailroom = skb_tailroom(skb);
1999*4882a593Smuzhiyun 			if ((tailroom + headroom) >= ETH_HLEN) {
2000*4882a593Smuzhiyun 				unsigned moveroom = ETH_HLEN - headroom;
2001*4882a593Smuzhiyun 				memmove(skb->data + moveroom ,skb->data, skb->len);
2002*4882a593Smuzhiyun 				skb->data += moveroom;
2003*4882a593Smuzhiyun 				skb->tail += moveroom;
2004*4882a593Smuzhiyun 				#ifdef WARN_ONCE
2005*4882a593Smuzhiyun 				WARN_ONCE(1, "It is better reserve headroom in usbnet.c:rx_submit()!\n");
2006*4882a593Smuzhiyun 				#endif
2007*4882a593Smuzhiyun 			}
2008*4882a593Smuzhiyun 		}
2009*4882a593Smuzhiyun #endif
2010*4882a593Smuzhiyun 
2011*4882a593Smuzhiyun 		if (dev->net->type == ARPHRD_ETHER && skb_headroom(skb) >= ETH_HLEN) {
2012*4882a593Smuzhiyun 			//usbnet.c rx_process() usbnet_skb_return() eth_type_trans()
2013*4882a593Smuzhiyun 			skb_push(skb, ETH_HLEN);
2014*4882a593Smuzhiyun 			skb_reset_mac_header(skb);
2015*4882a593Smuzhiyun 			memcpy(eth_hdr(skb)->h_source, default_modem_addr, ETH_ALEN);
2016*4882a593Smuzhiyun 			memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN);
2017*4882a593Smuzhiyun 			eth_hdr(skb)->h_proto = htons(ETH_P_MAP);
2018*4882a593Smuzhiyun 
2019*4882a593Smuzhiyun 			return 1;
2020*4882a593Smuzhiyun 		}
2021*4882a593Smuzhiyun 
2022*4882a593Smuzhiyun #ifdef WARN_ONCE
2023*4882a593Smuzhiyun 		WARN_ONCE(1, "skb_headroom < ETH_HLEN\n");
2024*4882a593Smuzhiyun #endif
2025*4882a593Smuzhiyun 		return 0;
2026*4882a593Smuzhiyun 	}
2027*4882a593Smuzhiyun #endif
2028*4882a593Smuzhiyun 
2029*4882a593Smuzhiyun 	_rmnet_usb_rx_handler(dev, skb);
2030*4882a593Smuzhiyun 	return 0;
2031*4882a593Smuzhiyun }
2032*4882a593Smuzhiyun #endif
2033*4882a593Smuzhiyun /*===========================================================================
2034*4882a593Smuzhiyun METHOD:
2035*4882a593Smuzhiyun    GobiNetDriverRxFixup (Public Method)
2036*4882a593Smuzhiyun 
2037*4882a593Smuzhiyun DESCRIPTION:
2038*4882a593Smuzhiyun    Handling data format mode on receive path
2039*4882a593Smuzhiyun 
2040*4882a593Smuzhiyun PARAMETERS
2041*4882a593Smuzhiyun    pDev           [ I ] - Pointer to usbnet device
2042*4882a593Smuzhiyun    pSKB           [ I ] - Pointer to received packet buffer
2043*4882a593Smuzhiyun 
2044*4882a593Smuzhiyun RETURN VALUE:
2045*4882a593Smuzhiyun    None
2046*4882a593Smuzhiyun ===========================================================================*/
GobiNetDriverRxFixup(struct usbnet * dev,struct sk_buff * skb)2047*4882a593Smuzhiyun static int GobiNetDriverRxFixup(struct usbnet *dev, struct sk_buff *skb)
2048*4882a593Smuzhiyun {
2049*4882a593Smuzhiyun     __be16 proto;
2050*4882a593Smuzhiyun     sGobiUSBNet * pGobiDev = (sGobiUSBNet *)dev->data[0];
2051*4882a593Smuzhiyun 
2052*4882a593Smuzhiyun     if (!pGobiDev->mbRawIPMode)
2053*4882a593Smuzhiyun         return 1;
2054*4882a593Smuzhiyun 
2055*4882a593Smuzhiyun     /* This check is no longer done by usbnet */
2056*4882a593Smuzhiyun     if (skb->len < dev->net->hard_header_len)
2057*4882a593Smuzhiyun         return 0;
2058*4882a593Smuzhiyun 
2059*4882a593Smuzhiyun #ifdef QUECTEL_WWAN_QMAP
2060*4882a593Smuzhiyun     if (pGobiDev->qmap_mode) {
2061*4882a593Smuzhiyun         return GobiNetDriverRxQmapFixup(dev, skb);
2062*4882a593Smuzhiyun     }
2063*4882a593Smuzhiyun #endif
2064*4882a593Smuzhiyun 
2065*4882a593Smuzhiyun     switch (skb->data[0] & 0xf0) {
2066*4882a593Smuzhiyun     case 0x40:
2067*4882a593Smuzhiyun     	proto = htons(ETH_P_IP);
2068*4882a593Smuzhiyun     	break;
2069*4882a593Smuzhiyun     case 0x60:
2070*4882a593Smuzhiyun     	proto = htons(ETH_P_IPV6);
2071*4882a593Smuzhiyun     	break;
2072*4882a593Smuzhiyun     case 0x00:
2073*4882a593Smuzhiyun     	if (is_multicast_ether_addr(skb->data))
2074*4882a593Smuzhiyun     		return 1;
2075*4882a593Smuzhiyun     	/* possibly bogus destination - rewrite just in case */
2076*4882a593Smuzhiyun     	skb_reset_mac_header(skb);
2077*4882a593Smuzhiyun     	goto fix_dest;
2078*4882a593Smuzhiyun     default:
2079*4882a593Smuzhiyun     	/* pass along other packets without modifications */
2080*4882a593Smuzhiyun     	return 1;
2081*4882a593Smuzhiyun     }
2082*4882a593Smuzhiyun     if (skb_headroom(skb) < ETH_HLEN && pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) {
2083*4882a593Smuzhiyun         DBG("%s: couldn't pskb_expand_head\n", __func__);
2084*4882a593Smuzhiyun         return 0;
2085*4882a593Smuzhiyun     }
2086*4882a593Smuzhiyun     skb_push(skb, ETH_HLEN);
2087*4882a593Smuzhiyun     skb_reset_mac_header(skb);
2088*4882a593Smuzhiyun     eth_hdr(skb)->h_proto = proto;
2089*4882a593Smuzhiyun     memcpy(eth_hdr(skb)->h_source, ec20_mac, ETH_ALEN);
2090*4882a593Smuzhiyun fix_dest:
2091*4882a593Smuzhiyun #ifdef QUECTEL_BRIDGE_MODE
2092*4882a593Smuzhiyun 	bridge_mode_rx_fixup(pGobiDev, dev->net, skb);
2093*4882a593Smuzhiyun #else
2094*4882a593Smuzhiyun 	memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN);
2095*4882a593Smuzhiyun #endif
2096*4882a593Smuzhiyun 
2097*4882a593Smuzhiyun #ifdef QUECTEL_BRIDGE_MODE
2098*4882a593Smuzhiyun #if 0
2099*4882a593Smuzhiyun     if (pGobiDev->m_bridge_mode) {
2100*4882a593Smuzhiyun         struct ethhdr *ehdr = eth_hdr(skb);
2101*4882a593Smuzhiyun quec_debug = 1;
2102*4882a593Smuzhiyun         DBG(": ");
2103*4882a593Smuzhiyun         PrintHex(ehdr, sizeof(struct ethhdr));
2104*4882a593Smuzhiyun quec_debug = 0;
2105*4882a593Smuzhiyun     }
2106*4882a593Smuzhiyun #endif
2107*4882a593Smuzhiyun #endif
2108*4882a593Smuzhiyun 
2109*4882a593Smuzhiyun     return 1;
2110*4882a593Smuzhiyun }
2111*4882a593Smuzhiyun #endif
2112*4882a593Smuzhiyun 
2113*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 ))
2114*4882a593Smuzhiyun #ifdef CONFIG_PM
2115*4882a593Smuzhiyun /*===========================================================================
2116*4882a593Smuzhiyun METHOD:
2117*4882a593Smuzhiyun    GobiUSBNetURBCallback (Public Method)
2118*4882a593Smuzhiyun 
2119*4882a593Smuzhiyun DESCRIPTION:
2120*4882a593Smuzhiyun    Write is complete, cleanup and signal that we're ready for next packet
2121*4882a593Smuzhiyun 
2122*4882a593Smuzhiyun PARAMETERS
2123*4882a593Smuzhiyun    pURB     [ I ] - Pointer to sAutoPM struct
2124*4882a593Smuzhiyun 
2125*4882a593Smuzhiyun RETURN VALUE:
2126*4882a593Smuzhiyun    None
2127*4882a593Smuzhiyun ===========================================================================*/
2128*4882a593Smuzhiyun #if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,18 ))
GobiUSBNetURBCallback(struct urb * pURB)2129*4882a593Smuzhiyun void GobiUSBNetURBCallback( struct urb * pURB )
2130*4882a593Smuzhiyun #else
2131*4882a593Smuzhiyun void GobiUSBNetURBCallback(struct urb *pURB, struct pt_regs *regs)
2132*4882a593Smuzhiyun #endif
2133*4882a593Smuzhiyun {
2134*4882a593Smuzhiyun    unsigned long activeURBflags;
2135*4882a593Smuzhiyun    sAutoPM * pAutoPM = (sAutoPM *)pURB->context;
2136*4882a593Smuzhiyun    if (pAutoPM == NULL)
2137*4882a593Smuzhiyun    {
2138*4882a593Smuzhiyun       // Should never happen
2139*4882a593Smuzhiyun       DBG( "bad context\n" );
2140*4882a593Smuzhiyun       return;
2141*4882a593Smuzhiyun    }
2142*4882a593Smuzhiyun 
2143*4882a593Smuzhiyun    if (pURB->status != 0)
2144*4882a593Smuzhiyun    {
2145*4882a593Smuzhiyun       // Note that in case of an error, the behaviour is no different
2146*4882a593Smuzhiyun       DBG( "urb finished with error %d\n", pURB->status );
2147*4882a593Smuzhiyun    }
2148*4882a593Smuzhiyun 
2149*4882a593Smuzhiyun    // Remove activeURB (memory to be freed later)
2150*4882a593Smuzhiyun    spin_lock_irqsave( &pAutoPM->mActiveURBLock, activeURBflags );
2151*4882a593Smuzhiyun 
2152*4882a593Smuzhiyun    // EAGAIN used to signify callback is done
2153*4882a593Smuzhiyun    pAutoPM->mpActiveURB = ERR_PTR( -EAGAIN );
2154*4882a593Smuzhiyun 
2155*4882a593Smuzhiyun    spin_unlock_irqrestore( &pAutoPM->mActiveURBLock, activeURBflags );
2156*4882a593Smuzhiyun 
2157*4882a593Smuzhiyun    complete( &pAutoPM->mThreadDoWork );
2158*4882a593Smuzhiyun 
2159*4882a593Smuzhiyun #ifdef URB_FREE_BUFFER_BY_SELF
2160*4882a593Smuzhiyun     if (pURB->transfer_flags & URB_FREE_BUFFER)
2161*4882a593Smuzhiyun         kfree(pURB->transfer_buffer);
2162*4882a593Smuzhiyun #endif
2163*4882a593Smuzhiyun    usb_free_urb( pURB );
2164*4882a593Smuzhiyun }
2165*4882a593Smuzhiyun 
2166*4882a593Smuzhiyun /*===========================================================================
2167*4882a593Smuzhiyun METHOD:
2168*4882a593Smuzhiyun    GobiUSBNetTXTimeout (Public Method)
2169*4882a593Smuzhiyun 
2170*4882a593Smuzhiyun DESCRIPTION:
2171*4882a593Smuzhiyun    Timeout declared by the net driver.  Stop all transfers
2172*4882a593Smuzhiyun 
2173*4882a593Smuzhiyun PARAMETERS
2174*4882a593Smuzhiyun    pNet     [ I ] - Pointer to net device
2175*4882a593Smuzhiyun 
2176*4882a593Smuzhiyun RETURN VALUE:
2177*4882a593Smuzhiyun    None
2178*4882a593Smuzhiyun ===========================================================================*/
GobiUSBNetTXTimeout(struct net_device * pNet)2179*4882a593Smuzhiyun void GobiUSBNetTXTimeout( struct net_device * pNet )
2180*4882a593Smuzhiyun {
2181*4882a593Smuzhiyun    struct sGobiUSBNet * pGobiDev;
2182*4882a593Smuzhiyun    sAutoPM * pAutoPM;
2183*4882a593Smuzhiyun    sURBList * pURBListEntry;
2184*4882a593Smuzhiyun    unsigned long activeURBflags, URBListFlags;
2185*4882a593Smuzhiyun    struct usbnet * pDev = netdev_priv( pNet );
2186*4882a593Smuzhiyun    struct urb * pURB;
2187*4882a593Smuzhiyun 
2188*4882a593Smuzhiyun    if (pDev == NULL || pDev->net == NULL)
2189*4882a593Smuzhiyun    {
2190*4882a593Smuzhiyun       DBG( "failed to get usbnet device\n" );
2191*4882a593Smuzhiyun       return;
2192*4882a593Smuzhiyun    }
2193*4882a593Smuzhiyun 
2194*4882a593Smuzhiyun    pGobiDev = (sGobiUSBNet *)pDev->data[0];
2195*4882a593Smuzhiyun    if (pGobiDev == NULL)
2196*4882a593Smuzhiyun    {
2197*4882a593Smuzhiyun       DBG( "failed to get QMIDevice\n" );
2198*4882a593Smuzhiyun       return;
2199*4882a593Smuzhiyun    }
2200*4882a593Smuzhiyun    pAutoPM = &pGobiDev->mAutoPM;
2201*4882a593Smuzhiyun 
2202*4882a593Smuzhiyun    DBG( "\n" );
2203*4882a593Smuzhiyun 
2204*4882a593Smuzhiyun    // Grab a pointer to active URB
2205*4882a593Smuzhiyun    spin_lock_irqsave( &pAutoPM->mActiveURBLock, activeURBflags );
2206*4882a593Smuzhiyun    pURB = pAutoPM->mpActiveURB;
2207*4882a593Smuzhiyun    spin_unlock_irqrestore( &pAutoPM->mActiveURBLock, activeURBflags );
2208*4882a593Smuzhiyun    // Stop active URB
2209*4882a593Smuzhiyun    if (pURB != NULL)
2210*4882a593Smuzhiyun    {
2211*4882a593Smuzhiyun       usb_kill_urb( pURB );
2212*4882a593Smuzhiyun    }
2213*4882a593Smuzhiyun 
2214*4882a593Smuzhiyun    // Cleanup URB List
2215*4882a593Smuzhiyun    spin_lock_irqsave( &pAutoPM->mURBListLock, URBListFlags );
2216*4882a593Smuzhiyun 
2217*4882a593Smuzhiyun    pURBListEntry = pAutoPM->mpURBList;
2218*4882a593Smuzhiyun    while (pURBListEntry != NULL)
2219*4882a593Smuzhiyun    {
2220*4882a593Smuzhiyun       pAutoPM->mpURBList = pAutoPM->mpURBList->mpNext;
2221*4882a593Smuzhiyun       atomic_dec( &pAutoPM->mURBListLen );
2222*4882a593Smuzhiyun       usb_free_urb( pURBListEntry->mpURB );
2223*4882a593Smuzhiyun       kfree( pURBListEntry );
2224*4882a593Smuzhiyun       pURBListEntry = pAutoPM->mpURBList;
2225*4882a593Smuzhiyun    }
2226*4882a593Smuzhiyun 
2227*4882a593Smuzhiyun    spin_unlock_irqrestore( &pAutoPM->mURBListLock, URBListFlags );
2228*4882a593Smuzhiyun 
2229*4882a593Smuzhiyun    complete( &pAutoPM->mThreadDoWork );
2230*4882a593Smuzhiyun 
2231*4882a593Smuzhiyun    return;
2232*4882a593Smuzhiyun }
2233*4882a593Smuzhiyun 
2234*4882a593Smuzhiyun /*===========================================================================
2235*4882a593Smuzhiyun METHOD:
2236*4882a593Smuzhiyun    GobiUSBNetAutoPMThread (Public Method)
2237*4882a593Smuzhiyun 
2238*4882a593Smuzhiyun DESCRIPTION:
2239*4882a593Smuzhiyun    Handle device Auto PM state asynchronously
2240*4882a593Smuzhiyun    Handle network packet transmission asynchronously
2241*4882a593Smuzhiyun 
2242*4882a593Smuzhiyun PARAMETERS
2243*4882a593Smuzhiyun    pData     [ I ] - Pointer to sAutoPM struct
2244*4882a593Smuzhiyun 
2245*4882a593Smuzhiyun RETURN VALUE:
2246*4882a593Smuzhiyun    int - 0 for success
2247*4882a593Smuzhiyun          Negative errno for error
2248*4882a593Smuzhiyun ===========================================================================*/
GobiUSBNetAutoPMThread(void * pData)2249*4882a593Smuzhiyun static int GobiUSBNetAutoPMThread( void * pData )
2250*4882a593Smuzhiyun {
2251*4882a593Smuzhiyun    unsigned long activeURBflags, URBListFlags;
2252*4882a593Smuzhiyun    sURBList * pURBListEntry;
2253*4882a593Smuzhiyun    int status;
2254*4882a593Smuzhiyun    struct usb_device * pUdev;
2255*4882a593Smuzhiyun    sAutoPM * pAutoPM = (sAutoPM *)pData;
2256*4882a593Smuzhiyun    struct urb * pURB;
2257*4882a593Smuzhiyun 
2258*4882a593Smuzhiyun    if (pAutoPM == NULL)
2259*4882a593Smuzhiyun    {
2260*4882a593Smuzhiyun       DBG( "passed null pointer\n" );
2261*4882a593Smuzhiyun       return -EINVAL;
2262*4882a593Smuzhiyun    }
2263*4882a593Smuzhiyun 
2264*4882a593Smuzhiyun    pUdev = interface_to_usbdev( pAutoPM->mpIntf );
2265*4882a593Smuzhiyun 
2266*4882a593Smuzhiyun    DBG( "traffic thread started\n" );
2267*4882a593Smuzhiyun 
2268*4882a593Smuzhiyun    while (pAutoPM->mbExit == false)
2269*4882a593Smuzhiyun    {
2270*4882a593Smuzhiyun       // Wait for someone to poke us
2271*4882a593Smuzhiyun       wait_for_completion_interruptible( &pAutoPM->mThreadDoWork );
2272*4882a593Smuzhiyun 
2273*4882a593Smuzhiyun       // Time to exit?
2274*4882a593Smuzhiyun       if (pAutoPM->mbExit == true)
2275*4882a593Smuzhiyun       {
2276*4882a593Smuzhiyun          // Stop activeURB
2277*4882a593Smuzhiyun          spin_lock_irqsave( &pAutoPM->mActiveURBLock, activeURBflags );
2278*4882a593Smuzhiyun          pURB = pAutoPM->mpActiveURB;
2279*4882a593Smuzhiyun          spin_unlock_irqrestore( &pAutoPM->mActiveURBLock, activeURBflags );
2280*4882a593Smuzhiyun 
2281*4882a593Smuzhiyun          // EAGAIN used to signify callback is done
2282*4882a593Smuzhiyun          if (IS_ERR( pAutoPM->mpActiveURB )
2283*4882a593Smuzhiyun                  &&  PTR_ERR( pAutoPM->mpActiveURB ) == -EAGAIN )
2284*4882a593Smuzhiyun          {
2285*4882a593Smuzhiyun              pURB = NULL;
2286*4882a593Smuzhiyun          }
2287*4882a593Smuzhiyun 
2288*4882a593Smuzhiyun          if (pURB != NULL)
2289*4882a593Smuzhiyun          {
2290*4882a593Smuzhiyun             usb_kill_urb( pURB );
2291*4882a593Smuzhiyun          }
2292*4882a593Smuzhiyun          // Will be freed in callback function
2293*4882a593Smuzhiyun 
2294*4882a593Smuzhiyun          // Cleanup URB List
2295*4882a593Smuzhiyun          spin_lock_irqsave( &pAutoPM->mURBListLock, URBListFlags );
2296*4882a593Smuzhiyun 
2297*4882a593Smuzhiyun          pURBListEntry = pAutoPM->mpURBList;
2298*4882a593Smuzhiyun          while (pURBListEntry != NULL)
2299*4882a593Smuzhiyun          {
2300*4882a593Smuzhiyun             pAutoPM->mpURBList = pAutoPM->mpURBList->mpNext;
2301*4882a593Smuzhiyun             atomic_dec( &pAutoPM->mURBListLen );
2302*4882a593Smuzhiyun             usb_free_urb( pURBListEntry->mpURB );
2303*4882a593Smuzhiyun             kfree( pURBListEntry );
2304*4882a593Smuzhiyun             pURBListEntry = pAutoPM->mpURBList;
2305*4882a593Smuzhiyun          }
2306*4882a593Smuzhiyun 
2307*4882a593Smuzhiyun          spin_unlock_irqrestore( &pAutoPM->mURBListLock, URBListFlags );
2308*4882a593Smuzhiyun 
2309*4882a593Smuzhiyun          break;
2310*4882a593Smuzhiyun       }
2311*4882a593Smuzhiyun 
2312*4882a593Smuzhiyun       // Is our URB active?
2313*4882a593Smuzhiyun       spin_lock_irqsave( &pAutoPM->mActiveURBLock, activeURBflags );
2314*4882a593Smuzhiyun 
2315*4882a593Smuzhiyun       // EAGAIN used to signify callback is done
2316*4882a593Smuzhiyun       if (IS_ERR( pAutoPM->mpActiveURB )
2317*4882a593Smuzhiyun       &&  PTR_ERR( pAutoPM->mpActiveURB ) == -EAGAIN )
2318*4882a593Smuzhiyun       {
2319*4882a593Smuzhiyun          pAutoPM->mpActiveURB = NULL;
2320*4882a593Smuzhiyun 
2321*4882a593Smuzhiyun          // Restore IRQs so task can sleep
2322*4882a593Smuzhiyun          spin_unlock_irqrestore( &pAutoPM->mActiveURBLock, activeURBflags );
2323*4882a593Smuzhiyun 
2324*4882a593Smuzhiyun          // URB is done, decrement the Auto PM usage count
2325*4882a593Smuzhiyun          usb_autopm_put_interface( pAutoPM->mpIntf );
2326*4882a593Smuzhiyun 
2327*4882a593Smuzhiyun          // Lock ActiveURB again
2328*4882a593Smuzhiyun          spin_lock_irqsave( &pAutoPM->mActiveURBLock, activeURBflags );
2329*4882a593Smuzhiyun       }
2330*4882a593Smuzhiyun 
2331*4882a593Smuzhiyun       if (pAutoPM->mpActiveURB != NULL)
2332*4882a593Smuzhiyun       {
2333*4882a593Smuzhiyun          // There is already a URB active, go back to sleep
2334*4882a593Smuzhiyun          spin_unlock_irqrestore( &pAutoPM->mActiveURBLock, activeURBflags );
2335*4882a593Smuzhiyun          continue;
2336*4882a593Smuzhiyun       }
2337*4882a593Smuzhiyun 
2338*4882a593Smuzhiyun       // Is there a URB waiting to be submitted?
2339*4882a593Smuzhiyun       spin_lock_irqsave( &pAutoPM->mURBListLock, URBListFlags );
2340*4882a593Smuzhiyun       if (pAutoPM->mpURBList == NULL)
2341*4882a593Smuzhiyun       {
2342*4882a593Smuzhiyun          // No more URBs to submit, go back to sleep
2343*4882a593Smuzhiyun          spin_unlock_irqrestore( &pAutoPM->mURBListLock, URBListFlags );
2344*4882a593Smuzhiyun          spin_unlock_irqrestore( &pAutoPM->mActiveURBLock, activeURBflags );
2345*4882a593Smuzhiyun          continue;
2346*4882a593Smuzhiyun       }
2347*4882a593Smuzhiyun 
2348*4882a593Smuzhiyun       // Pop an element
2349*4882a593Smuzhiyun       pURBListEntry = pAutoPM->mpURBList;
2350*4882a593Smuzhiyun       pAutoPM->mpURBList = pAutoPM->mpURBList->mpNext;
2351*4882a593Smuzhiyun       atomic_dec( &pAutoPM->mURBListLen );
2352*4882a593Smuzhiyun       spin_unlock_irqrestore( &pAutoPM->mURBListLock, URBListFlags );
2353*4882a593Smuzhiyun 
2354*4882a593Smuzhiyun       // Set ActiveURB
2355*4882a593Smuzhiyun       pAutoPM->mpActiveURB = pURBListEntry->mpURB;
2356*4882a593Smuzhiyun       spin_unlock_irqrestore( &pAutoPM->mActiveURBLock, activeURBflags );
2357*4882a593Smuzhiyun 
2358*4882a593Smuzhiyun       // Tell autopm core we need device woken up
2359*4882a593Smuzhiyun       status = usb_autopm_get_interface( pAutoPM->mpIntf );
2360*4882a593Smuzhiyun       if (status < 0)
2361*4882a593Smuzhiyun       {
2362*4882a593Smuzhiyun          DBG( "unable to autoresume interface: %d\n", status );
2363*4882a593Smuzhiyun 
2364*4882a593Smuzhiyun          // likely caused by device going from autosuspend -> full suspend
2365*4882a593Smuzhiyun          if (status == -EPERM)
2366*4882a593Smuzhiyun          {
2367*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,33 ))
2368*4882a593Smuzhiyun #if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,18 ))
2369*4882a593Smuzhiyun             pUdev->auto_pm = 0;
2370*4882a593Smuzhiyun #else
2371*4882a593Smuzhiyun              pUdev = pUdev;
2372*4882a593Smuzhiyun #endif
2373*4882a593Smuzhiyun #endif
2374*4882a593Smuzhiyun             GobiNetSuspend( pAutoPM->mpIntf, PMSG_SUSPEND );
2375*4882a593Smuzhiyun          }
2376*4882a593Smuzhiyun 
2377*4882a593Smuzhiyun          // Add pURBListEntry back onto pAutoPM->mpURBList
2378*4882a593Smuzhiyun          spin_lock_irqsave( &pAutoPM->mURBListLock, URBListFlags );
2379*4882a593Smuzhiyun          pURBListEntry->mpNext = pAutoPM->mpURBList;
2380*4882a593Smuzhiyun          pAutoPM->mpURBList = pURBListEntry;
2381*4882a593Smuzhiyun          atomic_inc( &pAutoPM->mURBListLen );
2382*4882a593Smuzhiyun          spin_unlock_irqrestore( &pAutoPM->mURBListLock, URBListFlags );
2383*4882a593Smuzhiyun 
2384*4882a593Smuzhiyun          spin_lock_irqsave( &pAutoPM->mActiveURBLock, activeURBflags );
2385*4882a593Smuzhiyun          pAutoPM->mpActiveURB = NULL;
2386*4882a593Smuzhiyun          spin_unlock_irqrestore( &pAutoPM->mActiveURBLock, activeURBflags );
2387*4882a593Smuzhiyun 
2388*4882a593Smuzhiyun          // Go back to sleep
2389*4882a593Smuzhiyun          continue;
2390*4882a593Smuzhiyun       }
2391*4882a593Smuzhiyun 
2392*4882a593Smuzhiyun       // Submit URB
2393*4882a593Smuzhiyun       status = usb_submit_urb( pAutoPM->mpActiveURB, GFP_KERNEL );
2394*4882a593Smuzhiyun       if (status < 0)
2395*4882a593Smuzhiyun       {
2396*4882a593Smuzhiyun          // Could happen for a number of reasons
2397*4882a593Smuzhiyun          DBG( "Failed to submit URB: %d.  Packet dropped\n", status );
2398*4882a593Smuzhiyun          spin_lock_irqsave( &pAutoPM->mActiveURBLock, activeURBflags );
2399*4882a593Smuzhiyun          usb_free_urb( pAutoPM->mpActiveURB );
2400*4882a593Smuzhiyun          pAutoPM->mpActiveURB = NULL;
2401*4882a593Smuzhiyun          spin_unlock_irqrestore( &pAutoPM->mActiveURBLock, activeURBflags );
2402*4882a593Smuzhiyun          usb_autopm_put_interface( pAutoPM->mpIntf );
2403*4882a593Smuzhiyun 
2404*4882a593Smuzhiyun          // Loop again
2405*4882a593Smuzhiyun          complete( &pAutoPM->mThreadDoWork );
2406*4882a593Smuzhiyun       }
2407*4882a593Smuzhiyun 
2408*4882a593Smuzhiyun       kfree( pURBListEntry );
2409*4882a593Smuzhiyun    }
2410*4882a593Smuzhiyun 
2411*4882a593Smuzhiyun    DBG( "traffic thread exiting\n" );
2412*4882a593Smuzhiyun    pAutoPM->mpThread = NULL;
2413*4882a593Smuzhiyun    return 0;
2414*4882a593Smuzhiyun }
2415*4882a593Smuzhiyun 
2416*4882a593Smuzhiyun /*===========================================================================
2417*4882a593Smuzhiyun METHOD:
2418*4882a593Smuzhiyun    GobiUSBNetStartXmit (Public Method)
2419*4882a593Smuzhiyun 
2420*4882a593Smuzhiyun DESCRIPTION:
2421*4882a593Smuzhiyun    Convert sk_buff to usb URB and queue for transmit
2422*4882a593Smuzhiyun 
2423*4882a593Smuzhiyun PARAMETERS
2424*4882a593Smuzhiyun    pNet     [ I ] - Pointer to net device
2425*4882a593Smuzhiyun 
2426*4882a593Smuzhiyun RETURN VALUE:
2427*4882a593Smuzhiyun    NETDEV_TX_OK on success
2428*4882a593Smuzhiyun    NETDEV_TX_BUSY on error
2429*4882a593Smuzhiyun ===========================================================================*/
GobiUSBNetStartXmit(struct sk_buff * pSKB,struct net_device * pNet)2430*4882a593Smuzhiyun int GobiUSBNetStartXmit(
2431*4882a593Smuzhiyun    struct sk_buff *     pSKB,
2432*4882a593Smuzhiyun    struct net_device *  pNet )
2433*4882a593Smuzhiyun {
2434*4882a593Smuzhiyun    unsigned long URBListFlags;
2435*4882a593Smuzhiyun    struct sGobiUSBNet * pGobiDev;
2436*4882a593Smuzhiyun    sAutoPM * pAutoPM;
2437*4882a593Smuzhiyun    sURBList * pURBListEntry, ** ppURBListEnd;
2438*4882a593Smuzhiyun    void * pURBData;
2439*4882a593Smuzhiyun    struct usbnet * pDev = netdev_priv( pNet );
2440*4882a593Smuzhiyun 
2441*4882a593Smuzhiyun    //DBG( "\n" );
2442*4882a593Smuzhiyun 
2443*4882a593Smuzhiyun    if (pDev == NULL || pDev->net == NULL)
2444*4882a593Smuzhiyun    {
2445*4882a593Smuzhiyun       DBG( "failed to get usbnet device\n" );
2446*4882a593Smuzhiyun       return NETDEV_TX_BUSY;
2447*4882a593Smuzhiyun    }
2448*4882a593Smuzhiyun 
2449*4882a593Smuzhiyun    pGobiDev = (sGobiUSBNet *)pDev->data[0];
2450*4882a593Smuzhiyun    if (pGobiDev == NULL)
2451*4882a593Smuzhiyun    {
2452*4882a593Smuzhiyun       DBG( "failed to get QMIDevice\n" );
2453*4882a593Smuzhiyun       return NETDEV_TX_BUSY;
2454*4882a593Smuzhiyun    }
2455*4882a593Smuzhiyun    pAutoPM = &pGobiDev->mAutoPM;
2456*4882a593Smuzhiyun 
2457*4882a593Smuzhiyun    if( NULL == pSKB )
2458*4882a593Smuzhiyun    {
2459*4882a593Smuzhiyun        DBG( "Buffer is NULL \n" );
2460*4882a593Smuzhiyun        return NETDEV_TX_BUSY;
2461*4882a593Smuzhiyun    }
2462*4882a593Smuzhiyun 
2463*4882a593Smuzhiyun    if (GobiTestDownReason( pGobiDev, DRIVER_SUSPENDED ))
2464*4882a593Smuzhiyun    {
2465*4882a593Smuzhiyun       // Should not happen
2466*4882a593Smuzhiyun       DBG( "device is suspended\n" );
2467*4882a593Smuzhiyun       dump_stack();
2468*4882a593Smuzhiyun       return NETDEV_TX_BUSY;
2469*4882a593Smuzhiyun    }
2470*4882a593Smuzhiyun 
2471*4882a593Smuzhiyun    if (GobiTestDownReason( pGobiDev, NO_NDIS_CONNECTION ))
2472*4882a593Smuzhiyun    {
2473*4882a593Smuzhiyun       //netif_carrier_off( pGobiDev->mpNetDev->net );
2474*4882a593Smuzhiyun       //DBG( "device is disconnected\n" );
2475*4882a593Smuzhiyun       //dump_stack();
2476*4882a593Smuzhiyun       return NETDEV_TX_BUSY;
2477*4882a593Smuzhiyun    }
2478*4882a593Smuzhiyun 
2479*4882a593Smuzhiyun    // Convert the sk_buff into a URB
2480*4882a593Smuzhiyun 
2481*4882a593Smuzhiyun    // Check if buffer is full
2482*4882a593Smuzhiyun    if ( atomic_read( &pAutoPM->mURBListLen ) >= txQueueLength)
2483*4882a593Smuzhiyun    {
2484*4882a593Smuzhiyun       DBG( "not scheduling request, buffer is full\n" );
2485*4882a593Smuzhiyun       return NETDEV_TX_BUSY;
2486*4882a593Smuzhiyun    }
2487*4882a593Smuzhiyun 
2488*4882a593Smuzhiyun    // Allocate URBListEntry
2489*4882a593Smuzhiyun    pURBListEntry = kmalloc( sizeof( sURBList ), GFP_ATOMIC );
2490*4882a593Smuzhiyun    if (pURBListEntry == NULL)
2491*4882a593Smuzhiyun    {
2492*4882a593Smuzhiyun       DBG( "unable to allocate URBList memory\n" );
2493*4882a593Smuzhiyun       return NETDEV_TX_BUSY;
2494*4882a593Smuzhiyun    }
2495*4882a593Smuzhiyun    pURBListEntry->mpNext = NULL;
2496*4882a593Smuzhiyun 
2497*4882a593Smuzhiyun    // Allocate URB
2498*4882a593Smuzhiyun    pURBListEntry->mpURB = usb_alloc_urb( 0, GFP_ATOMIC );
2499*4882a593Smuzhiyun    if (pURBListEntry->mpURB == NULL)
2500*4882a593Smuzhiyun    {
2501*4882a593Smuzhiyun       DBG( "unable to allocate URB\n" );
2502*4882a593Smuzhiyun       // release all memory allocated by now
2503*4882a593Smuzhiyun       if (pURBListEntry)
2504*4882a593Smuzhiyun          kfree( pURBListEntry );
2505*4882a593Smuzhiyun       return NETDEV_TX_BUSY;
2506*4882a593Smuzhiyun    }
2507*4882a593Smuzhiyun 
2508*4882a593Smuzhiyun #if 1 //def DATA_MODE_RP
2509*4882a593Smuzhiyun    GobiNetDriverTxFixup(pDev, pSKB, GFP_ATOMIC);
2510*4882a593Smuzhiyun #endif
2511*4882a593Smuzhiyun 
2512*4882a593Smuzhiyun    // Allocate URB transfer_buffer
2513*4882a593Smuzhiyun    pURBData = kmalloc( pSKB->len, GFP_ATOMIC );
2514*4882a593Smuzhiyun    if (pURBData == NULL)
2515*4882a593Smuzhiyun    {
2516*4882a593Smuzhiyun       DBG( "unable to allocate URB data\n" );
2517*4882a593Smuzhiyun       // release all memory allocated by now
2518*4882a593Smuzhiyun       if (pURBListEntry)
2519*4882a593Smuzhiyun       {
2520*4882a593Smuzhiyun          usb_free_urb( pURBListEntry->mpURB );
2521*4882a593Smuzhiyun          kfree( pURBListEntry );
2522*4882a593Smuzhiyun       }
2523*4882a593Smuzhiyun       return NETDEV_TX_BUSY;
2524*4882a593Smuzhiyun    }
2525*4882a593Smuzhiyun    // Fill with SKB's data
2526*4882a593Smuzhiyun    memcpy( pURBData, pSKB->data, pSKB->len );
2527*4882a593Smuzhiyun 
2528*4882a593Smuzhiyun    usb_fill_bulk_urb( pURBListEntry->mpURB,
2529*4882a593Smuzhiyun                       pGobiDev->mpNetDev->udev,
2530*4882a593Smuzhiyun                       pGobiDev->mpNetDev->out,
2531*4882a593Smuzhiyun                       pURBData,
2532*4882a593Smuzhiyun                       pSKB->len,
2533*4882a593Smuzhiyun                       GobiUSBNetURBCallback,
2534*4882a593Smuzhiyun                       pAutoPM );
2535*4882a593Smuzhiyun 
2536*4882a593Smuzhiyun    /* Handle the need to send a zero length packet and release the
2537*4882a593Smuzhiyun     * transfer buffer
2538*4882a593Smuzhiyun     */
2539*4882a593Smuzhiyun     pURBListEntry->mpURB->transfer_flags |= (URB_ZERO_PACKET | URB_FREE_BUFFER);
2540*4882a593Smuzhiyun 
2541*4882a593Smuzhiyun    // Aquire lock on URBList
2542*4882a593Smuzhiyun    spin_lock_irqsave( &pAutoPM->mURBListLock, URBListFlags );
2543*4882a593Smuzhiyun 
2544*4882a593Smuzhiyun    // Add URB to end of list
2545*4882a593Smuzhiyun    ppURBListEnd = &pAutoPM->mpURBList;
2546*4882a593Smuzhiyun    while ((*ppURBListEnd) != NULL)
2547*4882a593Smuzhiyun    {
2548*4882a593Smuzhiyun       ppURBListEnd = &(*ppURBListEnd)->mpNext;
2549*4882a593Smuzhiyun    }
2550*4882a593Smuzhiyun    *ppURBListEnd = pURBListEntry;
2551*4882a593Smuzhiyun    atomic_inc( &pAutoPM->mURBListLen );
2552*4882a593Smuzhiyun 
2553*4882a593Smuzhiyun    spin_unlock_irqrestore( &pAutoPM->mURBListLock, URBListFlags );
2554*4882a593Smuzhiyun 
2555*4882a593Smuzhiyun    complete( &pAutoPM->mThreadDoWork );
2556*4882a593Smuzhiyun 
2557*4882a593Smuzhiyun    // Start transfer timer
2558*4882a593Smuzhiyun    pNet->trans_start = jiffies;
2559*4882a593Smuzhiyun    // Free SKB
2560*4882a593Smuzhiyun    if (pSKB)
2561*4882a593Smuzhiyun       dev_kfree_skb_any( pSKB );
2562*4882a593Smuzhiyun 
2563*4882a593Smuzhiyun    return NETDEV_TX_OK;
2564*4882a593Smuzhiyun }
2565*4882a593Smuzhiyun #endif
2566*4882a593Smuzhiyun static int (*local_usbnet_start_xmit) (struct sk_buff *skb, struct net_device *net);
2567*4882a593Smuzhiyun #endif
2568*4882a593Smuzhiyun 
GobiUSBNetStartXmit2(struct sk_buff * pSKB,struct net_device * pNet)2569*4882a593Smuzhiyun static int GobiUSBNetStartXmit2( struct sk_buff *pSKB, struct net_device *pNet ){
2570*4882a593Smuzhiyun    struct sGobiUSBNet * pGobiDev;
2571*4882a593Smuzhiyun    struct usbnet * pDev = netdev_priv( pNet );
2572*4882a593Smuzhiyun 
2573*4882a593Smuzhiyun    //DBG( "\n" );
2574*4882a593Smuzhiyun 
2575*4882a593Smuzhiyun    if (pDev == NULL || pDev->net == NULL)
2576*4882a593Smuzhiyun    {
2577*4882a593Smuzhiyun       DBG( "failed to get usbnet device\n" );
2578*4882a593Smuzhiyun       return NETDEV_TX_BUSY;
2579*4882a593Smuzhiyun    }
2580*4882a593Smuzhiyun 
2581*4882a593Smuzhiyun    pGobiDev = (sGobiUSBNet *)pDev->data[0];
2582*4882a593Smuzhiyun    if (pGobiDev == NULL)
2583*4882a593Smuzhiyun    {
2584*4882a593Smuzhiyun       DBG( "failed to get QMIDevice\n" );
2585*4882a593Smuzhiyun       return NETDEV_TX_BUSY;
2586*4882a593Smuzhiyun    }
2587*4882a593Smuzhiyun 
2588*4882a593Smuzhiyun    if( NULL == pSKB )
2589*4882a593Smuzhiyun    {
2590*4882a593Smuzhiyun        DBG( "Buffer is NULL \n" );
2591*4882a593Smuzhiyun        return NETDEV_TX_BUSY;
2592*4882a593Smuzhiyun    }
2593*4882a593Smuzhiyun 
2594*4882a593Smuzhiyun    if (GobiTestDownReason( pGobiDev, DRIVER_SUSPENDED ))
2595*4882a593Smuzhiyun    {
2596*4882a593Smuzhiyun       // Should not happen
2597*4882a593Smuzhiyun       DBG( "device is suspended\n" );
2598*4882a593Smuzhiyun       dump_stack();
2599*4882a593Smuzhiyun       return NETDEV_TX_BUSY;
2600*4882a593Smuzhiyun    }
2601*4882a593Smuzhiyun 
2602*4882a593Smuzhiyun    if (GobiTestDownReason( pGobiDev, NO_NDIS_CONNECTION ))
2603*4882a593Smuzhiyun    {
2604*4882a593Smuzhiyun       //netif_carrier_off( pGobiDev->mpNetDev->net );
2605*4882a593Smuzhiyun       //DBG( "device is disconnected\n" );
2606*4882a593Smuzhiyun       //dump_stack();
2607*4882a593Smuzhiyun       return NETDEV_TX_BUSY;
2608*4882a593Smuzhiyun    }
2609*4882a593Smuzhiyun 
2610*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 ))
2611*4882a593Smuzhiyun    return local_usbnet_start_xmit(pSKB, pNet);
2612*4882a593Smuzhiyun #else
2613*4882a593Smuzhiyun    return usbnet_start_xmit(pSKB, pNet);
2614*4882a593Smuzhiyun #endif
2615*4882a593Smuzhiyun }
2616*4882a593Smuzhiyun 
2617*4882a593Smuzhiyun /*===========================================================================
2618*4882a593Smuzhiyun METHOD:
2619*4882a593Smuzhiyun    GobiUSBNetOpen (Public Method)
2620*4882a593Smuzhiyun 
2621*4882a593Smuzhiyun DESCRIPTION:
2622*4882a593Smuzhiyun    Wrapper to usbnet_open, correctly handling autosuspend
2623*4882a593Smuzhiyun    Start AutoPM thread (if CONFIG_PM is defined)
2624*4882a593Smuzhiyun 
2625*4882a593Smuzhiyun PARAMETERS
2626*4882a593Smuzhiyun    pNet     [ I ] - Pointer to net device
2627*4882a593Smuzhiyun 
2628*4882a593Smuzhiyun RETURN VALUE:
2629*4882a593Smuzhiyun    int - 0 for success
2630*4882a593Smuzhiyun          Negative errno for error
2631*4882a593Smuzhiyun ===========================================================================*/
GobiUSBNetOpen(struct net_device * pNet)2632*4882a593Smuzhiyun static int GobiUSBNetOpen( struct net_device * pNet )
2633*4882a593Smuzhiyun {
2634*4882a593Smuzhiyun    int status = 0;
2635*4882a593Smuzhiyun    struct sGobiUSBNet * pGobiDev;
2636*4882a593Smuzhiyun    struct usbnet * pDev = netdev_priv( pNet );
2637*4882a593Smuzhiyun 
2638*4882a593Smuzhiyun    if (pDev == NULL)
2639*4882a593Smuzhiyun    {
2640*4882a593Smuzhiyun       DBG( "failed to get usbnet device\n" );
2641*4882a593Smuzhiyun       return -ENXIO;
2642*4882a593Smuzhiyun    }
2643*4882a593Smuzhiyun 
2644*4882a593Smuzhiyun    pGobiDev = (sGobiUSBNet *)pDev->data[0];
2645*4882a593Smuzhiyun    if (pGobiDev == NULL)
2646*4882a593Smuzhiyun    {
2647*4882a593Smuzhiyun       DBG( "failed to get QMIDevice\n" );
2648*4882a593Smuzhiyun       return -ENXIO;
2649*4882a593Smuzhiyun    }
2650*4882a593Smuzhiyun 
2651*4882a593Smuzhiyun    DBG( "\n" );
2652*4882a593Smuzhiyun 
2653*4882a593Smuzhiyun #ifdef CONFIG_PM
2654*4882a593Smuzhiyun    #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 ))
2655*4882a593Smuzhiyun    // Start the AutoPM thread
2656*4882a593Smuzhiyun    pGobiDev->mAutoPM.mpIntf = pGobiDev->mpIntf;
2657*4882a593Smuzhiyun    pGobiDev->mAutoPM.mbExit = false;
2658*4882a593Smuzhiyun    pGobiDev->mAutoPM.mpURBList = NULL;
2659*4882a593Smuzhiyun    pGobiDev->mAutoPM.mpActiveURB = NULL;
2660*4882a593Smuzhiyun    spin_lock_init( &pGobiDev->mAutoPM.mURBListLock );
2661*4882a593Smuzhiyun    spin_lock_init( &pGobiDev->mAutoPM.mActiveURBLock );
2662*4882a593Smuzhiyun    atomic_set( &pGobiDev->mAutoPM.mURBListLen, 0 );
2663*4882a593Smuzhiyun    init_completion( &pGobiDev->mAutoPM.mThreadDoWork );
2664*4882a593Smuzhiyun 
2665*4882a593Smuzhiyun    pGobiDev->mAutoPM.mpThread = kthread_run( GobiUSBNetAutoPMThread,
2666*4882a593Smuzhiyun                                                &pGobiDev->mAutoPM,
2667*4882a593Smuzhiyun                                                "GobiUSBNetAutoPMThread" );
2668*4882a593Smuzhiyun    if (IS_ERR( pGobiDev->mAutoPM.mpThread ))
2669*4882a593Smuzhiyun    {
2670*4882a593Smuzhiyun       DBG( "AutoPM thread creation error\n" );
2671*4882a593Smuzhiyun       return PTR_ERR( pGobiDev->mAutoPM.mpThread );
2672*4882a593Smuzhiyun    }
2673*4882a593Smuzhiyun    #endif
2674*4882a593Smuzhiyun #endif /* CONFIG_PM */
2675*4882a593Smuzhiyun 
2676*4882a593Smuzhiyun    // Allow traffic
2677*4882a593Smuzhiyun    GobiClearDownReason( pGobiDev, NET_IFACE_STOPPED );
2678*4882a593Smuzhiyun 
2679*4882a593Smuzhiyun    // Pass to usbnet_open if defined
2680*4882a593Smuzhiyun    if (pGobiDev->mpUSBNetOpen != NULL)
2681*4882a593Smuzhiyun    {
2682*4882a593Smuzhiyun       status = pGobiDev->mpUSBNetOpen( pNet );
2683*4882a593Smuzhiyun #ifdef CONFIG_PM
2684*4882a593Smuzhiyun       // If usbnet_open was successful enable Auto PM
2685*4882a593Smuzhiyun       if (status == 0)
2686*4882a593Smuzhiyun       {
2687*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,33 ))
2688*4882a593Smuzhiyun          usb_autopm_enable( pGobiDev->mpIntf );
2689*4882a593Smuzhiyun #else
2690*4882a593Smuzhiyun          usb_autopm_put_interface( pGobiDev->mpIntf );
2691*4882a593Smuzhiyun #endif
2692*4882a593Smuzhiyun       }
2693*4882a593Smuzhiyun #endif /* CONFIG_PM */
2694*4882a593Smuzhiyun    }
2695*4882a593Smuzhiyun    else
2696*4882a593Smuzhiyun    {
2697*4882a593Smuzhiyun       DBG( "no USBNetOpen defined\n" );
2698*4882a593Smuzhiyun    }
2699*4882a593Smuzhiyun 
2700*4882a593Smuzhiyun    return status;
2701*4882a593Smuzhiyun }
2702*4882a593Smuzhiyun 
2703*4882a593Smuzhiyun /*===========================================================================
2704*4882a593Smuzhiyun METHOD:
2705*4882a593Smuzhiyun    GobiUSBNetStop (Public Method)
2706*4882a593Smuzhiyun 
2707*4882a593Smuzhiyun DESCRIPTION:
2708*4882a593Smuzhiyun    Wrapper to usbnet_stop, correctly handling autosuspend
2709*4882a593Smuzhiyun    Stop AutoPM thread (if CONFIG_PM is defined)
2710*4882a593Smuzhiyun 
2711*4882a593Smuzhiyun PARAMETERS
2712*4882a593Smuzhiyun    pNet     [ I ] - Pointer to net device
2713*4882a593Smuzhiyun 
2714*4882a593Smuzhiyun RETURN VALUE:
2715*4882a593Smuzhiyun    int - 0 for success
2716*4882a593Smuzhiyun          Negative errno for error
2717*4882a593Smuzhiyun ===========================================================================*/
GobiUSBNetStop(struct net_device * pNet)2718*4882a593Smuzhiyun static int GobiUSBNetStop( struct net_device * pNet )
2719*4882a593Smuzhiyun {
2720*4882a593Smuzhiyun    struct sGobiUSBNet * pGobiDev;
2721*4882a593Smuzhiyun    struct usbnet * pDev = netdev_priv( pNet );
2722*4882a593Smuzhiyun 
2723*4882a593Smuzhiyun    if (pDev == NULL || pDev->net == NULL)
2724*4882a593Smuzhiyun    {
2725*4882a593Smuzhiyun       DBG( "failed to get netdevice\n" );
2726*4882a593Smuzhiyun       return -ENXIO;
2727*4882a593Smuzhiyun    }
2728*4882a593Smuzhiyun 
2729*4882a593Smuzhiyun    pGobiDev = (sGobiUSBNet *)pDev->data[0];
2730*4882a593Smuzhiyun    if (pGobiDev == NULL)
2731*4882a593Smuzhiyun    {
2732*4882a593Smuzhiyun       DBG( "failed to get QMIDevice\n" );
2733*4882a593Smuzhiyun       return -ENXIO;
2734*4882a593Smuzhiyun    }
2735*4882a593Smuzhiyun 
2736*4882a593Smuzhiyun    // Stop traffic
2737*4882a593Smuzhiyun    GobiSetDownReason( pGobiDev, NET_IFACE_STOPPED );
2738*4882a593Smuzhiyun 
2739*4882a593Smuzhiyun #ifdef CONFIG_PM
2740*4882a593Smuzhiyun    #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 ))
2741*4882a593Smuzhiyun    // Tell traffic thread to exit
2742*4882a593Smuzhiyun    pGobiDev->mAutoPM.mbExit = true;
2743*4882a593Smuzhiyun    complete( &pGobiDev->mAutoPM.mThreadDoWork );
2744*4882a593Smuzhiyun 
2745*4882a593Smuzhiyun    // Wait for it to exit
2746*4882a593Smuzhiyun    while( pGobiDev->mAutoPM.mpThread != NULL )
2747*4882a593Smuzhiyun    {
2748*4882a593Smuzhiyun       msleep( 100 );
2749*4882a593Smuzhiyun    }
2750*4882a593Smuzhiyun    DBG( "thread stopped\n" );
2751*4882a593Smuzhiyun    #endif
2752*4882a593Smuzhiyun #endif /* CONFIG_PM */
2753*4882a593Smuzhiyun 
2754*4882a593Smuzhiyun    // Pass to usbnet_stop, if defined
2755*4882a593Smuzhiyun    if (pGobiDev->mpUSBNetStop != NULL)
2756*4882a593Smuzhiyun    {
2757*4882a593Smuzhiyun       return pGobiDev->mpUSBNetStop( pNet );
2758*4882a593Smuzhiyun    }
2759*4882a593Smuzhiyun    else
2760*4882a593Smuzhiyun    {
2761*4882a593Smuzhiyun       return 0;
2762*4882a593Smuzhiyun    }
2763*4882a593Smuzhiyun }
2764*4882a593Smuzhiyun 
GobiNetDriver_check_connect(struct usbnet * pDev)2765*4882a593Smuzhiyun static int GobiNetDriver_check_connect(struct usbnet *pDev) {
2766*4882a593Smuzhiyun    int status = 0;
2767*4882a593Smuzhiyun    struct sGobiUSBNet * pGobiDev = NULL;
2768*4882a593Smuzhiyun 
2769*4882a593Smuzhiyun    while (status++ < 10) {
2770*4882a593Smuzhiyun       pGobiDev = (sGobiUSBNet *)pDev->data[0];
2771*4882a593Smuzhiyun       if (pGobiDev && pGobiDev->mbProbeDone)
2772*4882a593Smuzhiyun          break;
2773*4882a593Smuzhiyun       msleep(1);
2774*4882a593Smuzhiyun    }
2775*4882a593Smuzhiyun 
2776*4882a593Smuzhiyun    return 0;
2777*4882a593Smuzhiyun }
2778*4882a593Smuzhiyun 
2779*4882a593Smuzhiyun /*=========================================================================*/
2780*4882a593Smuzhiyun // Struct driver_info
2781*4882a593Smuzhiyun /*=========================================================================*/
2782*4882a593Smuzhiyun static struct driver_info GobiNetInfo =
2783*4882a593Smuzhiyun {
2784*4882a593Smuzhiyun    .description   = "GobiNet Ethernet Device",
2785*4882a593Smuzhiyun #if 1//def CONFIG_ANDROID
2786*4882a593Smuzhiyun #if defined(QUECTEL_WWAN_QMAP) && defined(FLAG_RX_ASSEMBLE)
2787*4882a593Smuzhiyun 	.flags         = FLAG_RX_ASSEMBLE, //usb0
2788*4882a593Smuzhiyun #endif
2789*4882a593Smuzhiyun #else
2790*4882a593Smuzhiyun #if defined(QUECTEL_WWAN_QMAP) && defined(FLAG_RX_ASSEMBLE)
2791*4882a593Smuzhiyun 	.flags         = FLAG_ETHER | FLAG_RX_ASSEMBLE,
2792*4882a593Smuzhiyun #else
2793*4882a593Smuzhiyun 	.flags         = FLAG_ETHER,
2794*4882a593Smuzhiyun #endif
2795*4882a593Smuzhiyun #endif
2796*4882a593Smuzhiyun    .bind          = GobiNetDriverBind,
2797*4882a593Smuzhiyun    .unbind        = GobiNetDriverUnbind,
2798*4882a593Smuzhiyun #if 1 //def DATA_MODE_RP
2799*4882a593Smuzhiyun #if defined(QUECTEL_WWAN_MULTI_PACKAGES)
2800*4882a593Smuzhiyun    .rx_fixup      = GobiNetDriverRxPktsFixup,
2801*4882a593Smuzhiyun #else
2802*4882a593Smuzhiyun    .rx_fixup      = GobiNetDriverRxFixup,
2803*4882a593Smuzhiyun #endif
2804*4882a593Smuzhiyun    .tx_fixup      = GobiNetDriverTxFixup,
2805*4882a593Smuzhiyun #endif
2806*4882a593Smuzhiyun    .check_connect = GobiNetDriver_check_connect,
2807*4882a593Smuzhiyun    .data          = (1 << 4),
2808*4882a593Smuzhiyun };
2809*4882a593Smuzhiyun 
2810*4882a593Smuzhiyun /*=========================================================================*/
2811*4882a593Smuzhiyun // Qualcomm Gobi 3000 VID/PIDs
2812*4882a593Smuzhiyun /*=========================================================================*/
2813*4882a593Smuzhiyun #define GOBI_FIXED_INTF(vend, prod) \
2814*4882a593Smuzhiyun     { \
2815*4882a593Smuzhiyun           USB_DEVICE( vend, prod ), \
2816*4882a593Smuzhiyun           .driver_info = (unsigned long)&GobiNetInfo, \
2817*4882a593Smuzhiyun     }
2818*4882a593Smuzhiyun static const struct usb_device_id QuecGobiVIDPIDTable [] =
2819*4882a593Smuzhiyun {
2820*4882a593Smuzhiyun     GOBI_FIXED_INTF( 0x05c6, 0x9003 ), // Quectel UC20
2821*4882a593Smuzhiyun     GOBI_FIXED_INTF( 0x05c6, 0x9215 ), // Quectel EC20 (MDM9215)
2822*4882a593Smuzhiyun     GOBI_FIXED_INTF( 0x2c7c, 0x0125 ), // Quectel EC20 (MDM9X07)/EC25/EG25
2823*4882a593Smuzhiyun     GOBI_FIXED_INTF( 0x2c7c, 0x0121 ), // Quectel EC21
2824*4882a593Smuzhiyun     GOBI_FIXED_INTF( 0x2c7c, 0x0306 ), // Quectel EP06
2825*4882a593Smuzhiyun     GOBI_FIXED_INTF( 0x2c7c, 0x0435 ), // Quectel AG35
2826*4882a593Smuzhiyun     GOBI_FIXED_INTF( 0x2c7c, 0x0296 ), // Quectel BG96
2827*4882a593Smuzhiyun     GOBI_FIXED_INTF( 0x2c7c, 0x0191 ), // Quectel EG91
2828*4882a593Smuzhiyun     GOBI_FIXED_INTF( 0x2c7c, 0x0195 ), // Quectel EG95
2829*4882a593Smuzhiyun     GOBI_FIXED_INTF( 0x2c7c, 0x0512 ), // Quectel EG12/EP12/EM12/EG16/EG18,SDx20
2830*4882a593Smuzhiyun     GOBI_FIXED_INTF( 0x2c7c, 0x0620 ), // Quectel EG20,SDx24
2831*4882a593Smuzhiyun     GOBI_FIXED_INTF( 0x2c7c, 0x0800 ), // Quectel RG500Q,RM500Q,RM510Q,SDX55
2832*4882a593Smuzhiyun    //Terminating entry
2833*4882a593Smuzhiyun    { }
2834*4882a593Smuzhiyun };
2835*4882a593Smuzhiyun 
2836*4882a593Smuzhiyun MODULE_DEVICE_TABLE( usb, QuecGobiVIDPIDTable );
2837*4882a593Smuzhiyun 
2838*4882a593Smuzhiyun /*===========================================================================
2839*4882a593Smuzhiyun METHOD:
2840*4882a593Smuzhiyun    GobiUSBNetProbe (Public Method)
2841*4882a593Smuzhiyun 
2842*4882a593Smuzhiyun DESCRIPTION:
2843*4882a593Smuzhiyun    Run usbnet_probe
2844*4882a593Smuzhiyun    Setup QMI device
2845*4882a593Smuzhiyun 
2846*4882a593Smuzhiyun PARAMETERS
2847*4882a593Smuzhiyun    pIntf        [ I ] - Pointer to interface
2848*4882a593Smuzhiyun    pVIDPIDs     [ I ] - Pointer to VID/PID table
2849*4882a593Smuzhiyun 
2850*4882a593Smuzhiyun RETURN VALUE:
2851*4882a593Smuzhiyun    int - 0 for success
2852*4882a593Smuzhiyun          Negative errno for error
2853*4882a593Smuzhiyun ===========================================================================*/
GobiUSBNetProbe(struct usb_interface * pIntf,const struct usb_device_id * pVIDPIDs)2854*4882a593Smuzhiyun static int GobiUSBNetProbe(
2855*4882a593Smuzhiyun    struct usb_interface *        pIntf,
2856*4882a593Smuzhiyun    const struct usb_device_id *  pVIDPIDs )
2857*4882a593Smuzhiyun {
2858*4882a593Smuzhiyun    int status;
2859*4882a593Smuzhiyun    struct usbnet * pDev;
2860*4882a593Smuzhiyun    sGobiUSBNet * pGobiDev;
2861*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION( 2,6,29 ))
2862*4882a593Smuzhiyun    struct net_device_ops * pNetDevOps;
2863*4882a593Smuzhiyun #endif
2864*4882a593Smuzhiyun 
2865*4882a593Smuzhiyun    status = usbnet_probe( pIntf, pVIDPIDs );
2866*4882a593Smuzhiyun    if (status < 0)
2867*4882a593Smuzhiyun    {
2868*4882a593Smuzhiyun       DBG( "usbnet_probe failed %d\n", status );
2869*4882a593Smuzhiyun 	  return status;
2870*4882a593Smuzhiyun    }
2871*4882a593Smuzhiyun 
2872*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION( 2,6,19 ))
2873*4882a593Smuzhiyun    pIntf->needs_remote_wakeup = 1;
2874*4882a593Smuzhiyun #endif
2875*4882a593Smuzhiyun 
2876*4882a593Smuzhiyun #if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,23 ))
2877*4882a593Smuzhiyun    pDev = usb_get_intfdata( pIntf );
2878*4882a593Smuzhiyun #else
2879*4882a593Smuzhiyun    pDev = (struct usbnet *)pIntf->dev.platform_data;
2880*4882a593Smuzhiyun #endif
2881*4882a593Smuzhiyun 
2882*4882a593Smuzhiyun    if (pDev == NULL || pDev->net == NULL)
2883*4882a593Smuzhiyun    {
2884*4882a593Smuzhiyun       DBG( "failed to get netdevice\n" );
2885*4882a593Smuzhiyun       usbnet_disconnect( pIntf );
2886*4882a593Smuzhiyun       return -ENXIO;
2887*4882a593Smuzhiyun    }
2888*4882a593Smuzhiyun 
2889*4882a593Smuzhiyun    pGobiDev = kzalloc( sizeof( sGobiUSBNet ), GFP_KERNEL );
2890*4882a593Smuzhiyun    if (pGobiDev == NULL)
2891*4882a593Smuzhiyun    {
2892*4882a593Smuzhiyun       DBG( "fail to allocate device buffers" );
2893*4882a593Smuzhiyun       usbnet_disconnect( pIntf );
2894*4882a593Smuzhiyun       return -ENOMEM;
2895*4882a593Smuzhiyun    }
2896*4882a593Smuzhiyun 
2897*4882a593Smuzhiyun    atomic_set(&pGobiDev->refcount, 1);
2898*4882a593Smuzhiyun 
2899*4882a593Smuzhiyun    pDev->data[0] = (unsigned long)pGobiDev;
2900*4882a593Smuzhiyun 
2901*4882a593Smuzhiyun    pGobiDev->mpNetDev = pDev;
2902*4882a593Smuzhiyun 
2903*4882a593Smuzhiyun    // Clearing endpoint halt is a magic handshake that brings
2904*4882a593Smuzhiyun    // the device out of low power (airplane) mode
2905*4882a593Smuzhiyun    usb_clear_halt( pGobiDev->mpNetDev->udev, pDev->out );
2906*4882a593Smuzhiyun 
2907*4882a593Smuzhiyun    // Overload PM related network functions
2908*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 ))
2909*4882a593Smuzhiyun    pGobiDev->mpUSBNetOpen = pDev->net->open;
2910*4882a593Smuzhiyun    pDev->net->open = GobiUSBNetOpen;
2911*4882a593Smuzhiyun    pGobiDev->mpUSBNetStop = pDev->net->stop;
2912*4882a593Smuzhiyun    pDev->net->stop = GobiUSBNetStop;
2913*4882a593Smuzhiyun #if defined(CONFIG_PM) && (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,14 ))
2914*4882a593Smuzhiyun    pDev->net->hard_start_xmit = GobiUSBNetStartXmit;
2915*4882a593Smuzhiyun    pDev->net->tx_timeout = GobiUSBNetTXTimeout;
2916*4882a593Smuzhiyun #else  //quectel donot send dhcp request before ndis connect for uc20
2917*4882a593Smuzhiyun     local_usbnet_start_xmit = pDev->net->hard_start_xmit;
2918*4882a593Smuzhiyun     pDev->net->hard_start_xmit = GobiUSBNetStartXmit2;
2919*4882a593Smuzhiyun #endif
2920*4882a593Smuzhiyun #else
2921*4882a593Smuzhiyun    pNetDevOps = kmalloc( sizeof( struct net_device_ops ), GFP_KERNEL );
2922*4882a593Smuzhiyun    if (pNetDevOps == NULL)
2923*4882a593Smuzhiyun    {
2924*4882a593Smuzhiyun       DBG( "falied to allocate net device ops" );
2925*4882a593Smuzhiyun       usbnet_disconnect( pIntf );
2926*4882a593Smuzhiyun       return -ENOMEM;
2927*4882a593Smuzhiyun    }
2928*4882a593Smuzhiyun    memcpy( pNetDevOps, pDev->net->netdev_ops, sizeof( struct net_device_ops ) );
2929*4882a593Smuzhiyun 
2930*4882a593Smuzhiyun    pGobiDev->mpUSBNetOpen = pNetDevOps->ndo_open;
2931*4882a593Smuzhiyun    pNetDevOps->ndo_open = GobiUSBNetOpen;
2932*4882a593Smuzhiyun    pGobiDev->mpUSBNetStop = pNetDevOps->ndo_stop;
2933*4882a593Smuzhiyun    pNetDevOps->ndo_stop = GobiUSBNetStop;
2934*4882a593Smuzhiyun #if 1 //quectel donot send dhcp request before ndis connect for uc20
2935*4882a593Smuzhiyun    pNetDevOps->ndo_start_xmit = GobiUSBNetStartXmit2;
2936*4882a593Smuzhiyun #else
2937*4882a593Smuzhiyun    pNetDevOps->ndo_start_xmit = usbnet_start_xmit;
2938*4882a593Smuzhiyun #endif
2939*4882a593Smuzhiyun    pNetDevOps->ndo_tx_timeout = usbnet_tx_timeout;
2940*4882a593Smuzhiyun 
2941*4882a593Smuzhiyun #if defined(QUECTEL_WWAN_QMAP)
2942*4882a593Smuzhiyun    pNetDevOps->ndo_do_ioctl = qmap_ndo_do_ioctl;
2943*4882a593Smuzhiyun #endif
2944*4882a593Smuzhiyun 
2945*4882a593Smuzhiyun    pDev->net->netdev_ops = pNetDevOps;
2946*4882a593Smuzhiyun #endif
2947*4882a593Smuzhiyun 
2948*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,31 ))
2949*4882a593Smuzhiyun    memset( &(pGobiDev->mpNetDev->stats), 0, sizeof( struct net_device_stats ) );
2950*4882a593Smuzhiyun #else
2951*4882a593Smuzhiyun    memset( &(pGobiDev->mpNetDev->net->stats), 0, sizeof( struct net_device_stats ) );
2952*4882a593Smuzhiyun #endif
2953*4882a593Smuzhiyun 
2954*4882a593Smuzhiyun    pGobiDev->mpIntf = pIntf;
2955*4882a593Smuzhiyun    memset( &(pGobiDev->mMEID), '0', 14 );
2956*4882a593Smuzhiyun 
2957*4882a593Smuzhiyun    DBG( "Mac Address:\n" );
2958*4882a593Smuzhiyun    PrintHex( &pGobiDev->mpNetDev->net->dev_addr[0], 6 );
2959*4882a593Smuzhiyun 
2960*4882a593Smuzhiyun    pGobiDev->mbQMIValid = false;
2961*4882a593Smuzhiyun    memset( &pGobiDev->mQMIDev, 0, sizeof( sQMIDev ) );
2962*4882a593Smuzhiyun    pGobiDev->mQMIDev.mbCdevIsInitialized = false;
2963*4882a593Smuzhiyun 
2964*4882a593Smuzhiyun    pGobiDev->mQMIDev.mpDevClass = gpClass;
2965*4882a593Smuzhiyun 
2966*4882a593Smuzhiyun #ifdef CONFIG_PM
2967*4882a593Smuzhiyun    #if (LINUX_VERSION_CODE < KERNEL_VERSION( 2,6,29 ))
2968*4882a593Smuzhiyun    init_completion( &pGobiDev->mAutoPM.mThreadDoWork );
2969*4882a593Smuzhiyun    #endif
2970*4882a593Smuzhiyun #endif /* CONFIG_PM */
2971*4882a593Smuzhiyun    spin_lock_init( &pGobiDev->mQMIDev.mClientMemLock );
2972*4882a593Smuzhiyun 
2973*4882a593Smuzhiyun    // Default to device down
2974*4882a593Smuzhiyun    pGobiDev->mDownReason = 0;
2975*4882a593Smuzhiyun 
2976*4882a593Smuzhiyun //#if (LINUX_VERSION_CODE < KERNEL_VERSION( 3,11,0 ))
2977*4882a593Smuzhiyun    GobiSetDownReason( pGobiDev, NO_NDIS_CONNECTION );
2978*4882a593Smuzhiyun    GobiSetDownReason( pGobiDev, NET_IFACE_STOPPED );
2979*4882a593Smuzhiyun //#endif
2980*4882a593Smuzhiyun 
2981*4882a593Smuzhiyun    // Register QMI
2982*4882a593Smuzhiyun    pGobiDev->mbMdm9x07 |= (pDev->udev->descriptor.idVendor == cpu_to_le16(0x2c7c));
2983*4882a593Smuzhiyun    pGobiDev->mbMdm9x06 |= (pDev->udev->descriptor.idVendor == cpu_to_le16(0x2c7c) && pDev->udev->descriptor.idProduct == cpu_to_le16(0x0296));
2984*4882a593Smuzhiyun    pGobiDev->mbRawIPMode = pGobiDev->mbMdm9x07;
2985*4882a593Smuzhiyun    if ( pGobiDev->mbRawIPMode)
2986*4882a593Smuzhiyun       pGobiDev->mpNetDev->net->flags |= IFF_NOARP;
2987*4882a593Smuzhiyun #ifdef QUECTEL_BRIDGE_MODE
2988*4882a593Smuzhiyun    memcpy(pGobiDev->mHostMAC, pDev->net->dev_addr, 6);
2989*4882a593Smuzhiyun    pGobiDev->m_bridge_mode = bridge_mode;
2990*4882a593Smuzhiyun #endif
2991*4882a593Smuzhiyun 
2992*4882a593Smuzhiyun #ifdef QUECTEL_REMOVE_TX_ZLP
2993*4882a593Smuzhiyun 	{
2994*4882a593Smuzhiyun 		struct remove_tx_zlp_config {
2995*4882a593Smuzhiyun 			__le32 enable;
2996*4882a593Smuzhiyun 		} __packed;
2997*4882a593Smuzhiyun 
2998*4882a593Smuzhiyun 		struct remove_tx_zlp_config cfg;
2999*4882a593Smuzhiyun 		cfg.enable = cpu_to_le32(1);  //1-enable  0-disable
3000*4882a593Smuzhiyun 
3001*4882a593Smuzhiyun 		usb_control_msg(
3002*4882a593Smuzhiyun 			interface_to_usbdev(pIntf),
3003*4882a593Smuzhiyun 			usb_sndctrlpipe(interface_to_usbdev(pIntf), 0),
3004*4882a593Smuzhiyun 			USB_CDC_SET_REMOVE_TX_ZLP_COMMAND,
3005*4882a593Smuzhiyun 			0x21, //USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE
3006*4882a593Smuzhiyun 			0,
3007*4882a593Smuzhiyun 			pIntf->cur_altsetting->desc.bInterfaceNumber,
3008*4882a593Smuzhiyun 			&cfg, sizeof(cfg), 100);
3009*4882a593Smuzhiyun 	}
3010*4882a593Smuzhiyun #endif
3011*4882a593Smuzhiyun 
3012*4882a593Smuzhiyun 	pGobiDev->m_qcrmcall_mode = qcrmcall_mode;
3013*4882a593Smuzhiyun 
3014*4882a593Smuzhiyun 	if (pGobiDev->m_qcrmcall_mode) {
3015*4882a593Smuzhiyun 		INFO("AT$QCRMCALL MODE!");
3016*4882a593Smuzhiyun 
3017*4882a593Smuzhiyun 		GobiClearDownReason( pGobiDev, NO_NDIS_CONNECTION );
3018*4882a593Smuzhiyun 		usb_control_msg(
3019*4882a593Smuzhiyun 			interface_to_usbdev(pIntf),
3020*4882a593Smuzhiyun 			usb_sndctrlpipe(interface_to_usbdev(pIntf), 0),
3021*4882a593Smuzhiyun 			0x22, //USB_CDC_REQ_SET_CONTROL_LINE_STATE
3022*4882a593Smuzhiyun 			0x21, //USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE
3023*4882a593Smuzhiyun 			1, //active CDC DTR
3024*4882a593Smuzhiyun 			pIntf->cur_altsetting->desc.bInterfaceNumber,
3025*4882a593Smuzhiyun 			NULL, 0, 100);
3026*4882a593Smuzhiyun 		status = 0;
3027*4882a593Smuzhiyun 	}
3028*4882a593Smuzhiyun 	else {
3029*4882a593Smuzhiyun #if defined(QUECTEL_WWAN_QMAP)
3030*4882a593Smuzhiyun 		if (pGobiDev->mbRawIPMode) {
3031*4882a593Smuzhiyun 			unsigned idProduct = le16_to_cpu(pDev->udev->descriptor.idProduct);
3032*4882a593Smuzhiyun 
3033*4882a593Smuzhiyun 			pGobiDev->qmap_mode = qmap_mode;
3034*4882a593Smuzhiyun 			if (pGobiDev->qmap_mode == 0) {
3035*4882a593Smuzhiyun 				if (idProduct == 0x0800) {
3036*4882a593Smuzhiyun 					pGobiDev->qmap_mode = 1;
3037*4882a593Smuzhiyun 				}
3038*4882a593Smuzhiyun 			}
3039*4882a593Smuzhiyun 
3040*4882a593Smuzhiyun 			pGobiDev->qmap_version = 5;
3041*4882a593Smuzhiyun 			if (idProduct == 0x0800) {
3042*4882a593Smuzhiyun 				pGobiDev->qmap_version = 9;
3043*4882a593Smuzhiyun 			}
3044*4882a593Smuzhiyun 	      }
3045*4882a593Smuzhiyun 
3046*4882a593Smuzhiyun 		if (pGobiDev->qmap_mode) {
3047*4882a593Smuzhiyun 			netif_carrier_off(pDev->net);
3048*4882a593Smuzhiyun 		}
3049*4882a593Smuzhiyun 
3050*4882a593Smuzhiyun 		if (pGobiDev->qmap_mode > 1) {
3051*4882a593Smuzhiyun #if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,35 )) //ab95bfe01f9872459c8678572ccadbf646badad0
3052*4882a593Smuzhiyun 			rtnl_lock();
3053*4882a593Smuzhiyun 			netdev_rx_handler_register(pDev->net, rmnet_usb_rx_handler, NULL);
3054*4882a593Smuzhiyun 			rtnl_unlock();
3055*4882a593Smuzhiyun #endif
3056*4882a593Smuzhiyun 		}
3057*4882a593Smuzhiyun 
3058*4882a593Smuzhiyun #if defined(QUECTEL_UL_DATA_AGG)
3059*4882a593Smuzhiyun 		if (pGobiDev->qmap_mode) {
3060*4882a593Smuzhiyun 			struct ul_agg_ctx *agg_ctx = &pGobiDev->agg_ctx;
3061*4882a593Smuzhiyun 
3062*4882a593Smuzhiyun 			agg_ctx->ul_data_aggregation_max_datagrams = 1;
3063*4882a593Smuzhiyun 			agg_ctx->ul_data_aggregation_max_size = 2048;
3064*4882a593Smuzhiyun 			agg_ctx->dl_minimum_padding = 0;
3065*4882a593Smuzhiyun 		}
3066*4882a593Smuzhiyun #endif
3067*4882a593Smuzhiyun #endif
3068*4882a593Smuzhiyun 		status = RegisterQMIDevice( pGobiDev );
3069*4882a593Smuzhiyun 	}
3070*4882a593Smuzhiyun 
3071*4882a593Smuzhiyun    if (status != 0)
3072*4882a593Smuzhiyun    {
3073*4882a593Smuzhiyun       // usbnet_disconnect() will call GobiNetDriverUnbind() which will call
3074*4882a593Smuzhiyun       // DeregisterQMIDevice() to clean up any partially created QMI device
3075*4882a593Smuzhiyun       usbnet_disconnect( pIntf );
3076*4882a593Smuzhiyun       return status;
3077*4882a593Smuzhiyun    }
3078*4882a593Smuzhiyun 
3079*4882a593Smuzhiyun #if defined(QUECTEL_WWAN_QMAP)
3080*4882a593Smuzhiyun 	tasklet_init(&pGobiDev->txq, rmnet_usb_tx_wake_queue, (unsigned long)pGobiDev);
3081*4882a593Smuzhiyun 
3082*4882a593Smuzhiyun 	if (pGobiDev->qmap_mode > 1) {
3083*4882a593Smuzhiyun 		unsigned i;
3084*4882a593Smuzhiyun 
3085*4882a593Smuzhiyun 		for (i = 0; i < pGobiDev->qmap_mode; i++) {
3086*4882a593Smuzhiyun 			qmap_register_device(pGobiDev, i);
3087*4882a593Smuzhiyun 		}
3088*4882a593Smuzhiyun 	} else {
3089*4882a593Smuzhiyun 		pGobiDev->mpQmapNetDev[0] = pDev->net;
3090*4882a593Smuzhiyun 	}
3091*4882a593Smuzhiyun #endif
3092*4882a593Smuzhiyun 
3093*4882a593Smuzhiyun    pGobiDev->mbProbeDone = 1;
3094*4882a593Smuzhiyun    // Success
3095*4882a593Smuzhiyun    return 0;
3096*4882a593Smuzhiyun }
3097*4882a593Smuzhiyun 
GobiUSBNetDisconnect(struct usb_interface * intf)3098*4882a593Smuzhiyun static void GobiUSBNetDisconnect (struct usb_interface *intf) {
3099*4882a593Smuzhiyun #if defined(QUECTEL_WWAN_QMAP)
3100*4882a593Smuzhiyun 	struct usbnet *pDev = usb_get_intfdata(intf);
3101*4882a593Smuzhiyun 	sGobiUSBNet * pGobiDev = (sGobiUSBNet *)pDev->data[0];
3102*4882a593Smuzhiyun 	unsigned i;
3103*4882a593Smuzhiyun 
3104*4882a593Smuzhiyun 	if (pGobiDev->qmap_mode > 1) {
3105*4882a593Smuzhiyun 		for (i = 0; i < pGobiDev->qmap_mode; i++) {
3106*4882a593Smuzhiyun 			qmap_unregister_device(pGobiDev, i);
3107*4882a593Smuzhiyun 		}
3108*4882a593Smuzhiyun 
3109*4882a593Smuzhiyun 	}
3110*4882a593Smuzhiyun 
3111*4882a593Smuzhiyun 	tasklet_kill(&pGobiDev->txq);
3112*4882a593Smuzhiyun #endif
3113*4882a593Smuzhiyun 
3114*4882a593Smuzhiyun 	usbnet_disconnect(intf);
3115*4882a593Smuzhiyun }
3116*4882a593Smuzhiyun 
3117*4882a593Smuzhiyun static struct usb_driver GobiNet =
3118*4882a593Smuzhiyun {
3119*4882a593Smuzhiyun    .name       = "GobiNet",
3120*4882a593Smuzhiyun    .id_table   = QuecGobiVIDPIDTable,
3121*4882a593Smuzhiyun    .probe      = GobiUSBNetProbe,
3122*4882a593Smuzhiyun    .disconnect = GobiUSBNetDisconnect,
3123*4882a593Smuzhiyun #ifdef CONFIG_PM
3124*4882a593Smuzhiyun    .suspend    = GobiNetSuspend,
3125*4882a593Smuzhiyun    .resume     = GobiNetResume,
3126*4882a593Smuzhiyun #if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,27 ))
3127*4882a593Smuzhiyun    .reset_resume = GobiNetResetResume,
3128*4882a593Smuzhiyun #endif
3129*4882a593Smuzhiyun #if (LINUX_VERSION_CODE > KERNEL_VERSION( 2,6,18 ))
3130*4882a593Smuzhiyun    .supports_autosuspend = true,
3131*4882a593Smuzhiyun #endif
3132*4882a593Smuzhiyun #endif /* CONFIG_PM */
3133*4882a593Smuzhiyun };
3134*4882a593Smuzhiyun 
3135*4882a593Smuzhiyun /*===========================================================================
3136*4882a593Smuzhiyun METHOD:
3137*4882a593Smuzhiyun    GobiUSBNetModInit (Public Method)
3138*4882a593Smuzhiyun 
3139*4882a593Smuzhiyun DESCRIPTION:
3140*4882a593Smuzhiyun    Initialize module
3141*4882a593Smuzhiyun    Create device class
3142*4882a593Smuzhiyun    Register out usb_driver struct
3143*4882a593Smuzhiyun 
3144*4882a593Smuzhiyun RETURN VALUE:
3145*4882a593Smuzhiyun    int - 0 for success
3146*4882a593Smuzhiyun          Negative errno for error
3147*4882a593Smuzhiyun ===========================================================================*/
GobiUSBNetModInit(void)3148*4882a593Smuzhiyun static int __init GobiUSBNetModInit( void )
3149*4882a593Smuzhiyun {
3150*4882a593Smuzhiyun    gpClass = class_create( THIS_MODULE, "GobiQMI" );
3151*4882a593Smuzhiyun    if (IS_ERR( gpClass ) == true)
3152*4882a593Smuzhiyun    {
3153*4882a593Smuzhiyun       DBG( "error at class_create %ld\n", PTR_ERR( gpClass ) );
3154*4882a593Smuzhiyun       return -ENOMEM;
3155*4882a593Smuzhiyun    }
3156*4882a593Smuzhiyun 
3157*4882a593Smuzhiyun    // This will be shown whenever driver is loaded
3158*4882a593Smuzhiyun    printk( KERN_INFO "%s: %s\n", DRIVER_DESC, DRIVER_VERSION );
3159*4882a593Smuzhiyun 
3160*4882a593Smuzhiyun    return usb_register( &GobiNet );
3161*4882a593Smuzhiyun }
3162*4882a593Smuzhiyun module_init( GobiUSBNetModInit );
3163*4882a593Smuzhiyun 
3164*4882a593Smuzhiyun /*===========================================================================
3165*4882a593Smuzhiyun METHOD:
3166*4882a593Smuzhiyun    GobiUSBNetModExit (Public Method)
3167*4882a593Smuzhiyun 
3168*4882a593Smuzhiyun DESCRIPTION:
3169*4882a593Smuzhiyun    Deregister module
3170*4882a593Smuzhiyun    Destroy device class
3171*4882a593Smuzhiyun 
3172*4882a593Smuzhiyun RETURN VALUE:
3173*4882a593Smuzhiyun    void
3174*4882a593Smuzhiyun ===========================================================================*/
GobiUSBNetModExit(void)3175*4882a593Smuzhiyun static void __exit GobiUSBNetModExit( void )
3176*4882a593Smuzhiyun {
3177*4882a593Smuzhiyun    usb_deregister( &GobiNet );
3178*4882a593Smuzhiyun 
3179*4882a593Smuzhiyun    class_destroy( gpClass );
3180*4882a593Smuzhiyun }
3181*4882a593Smuzhiyun module_exit( GobiUSBNetModExit );
3182*4882a593Smuzhiyun 
3183*4882a593Smuzhiyun MODULE_VERSION( DRIVER_VERSION );
3184*4882a593Smuzhiyun MODULE_AUTHOR( DRIVER_AUTHOR );
3185*4882a593Smuzhiyun MODULE_DESCRIPTION( DRIVER_DESC );
3186*4882a593Smuzhiyun MODULE_LICENSE("Dual BSD/GPL");
3187*4882a593Smuzhiyun 
3188*4882a593Smuzhiyun #ifdef bool
3189*4882a593Smuzhiyun #undef bool
3190*4882a593Smuzhiyun #endif
3191*4882a593Smuzhiyun 
3192*4882a593Smuzhiyun module_param_named( debug, quec_debug, int, S_IRUGO | S_IWUSR );
3193*4882a593Smuzhiyun MODULE_PARM_DESC( debug, "Debuging enabled or not" );
3194*4882a593Smuzhiyun 
3195*4882a593Smuzhiyun //module_param_named( interruptible, Quecinterruptible, int, S_IRUGO | S_IWUSR );
3196*4882a593Smuzhiyun //MODULE_PARM_DESC( interruptible, "Listen for and return on user interrupt" );
3197*4882a593Smuzhiyun module_param( txQueueLength, int, S_IRUGO | S_IWUSR );
3198*4882a593Smuzhiyun MODULE_PARM_DESC( txQueueLength,
3199*4882a593Smuzhiyun                   "Number of IP packets which may be queued up for transmit" );
3200*4882a593Smuzhiyun 
3201