xref: /OK3568_Linux_fs/kernel/drivers/net/wireless/marvell/mwifiex/uap_txrx.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * NXP Wireless LAN device driver: AP TX and RX data handling
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright 2011-2020 NXP
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * This software file (the "File") is distributed by NXP
7*4882a593Smuzhiyun  * under the terms of the GNU General Public License Version 2, June 1991
8*4882a593Smuzhiyun  * (the "License").  You may use, redistribute and/or modify this File in
9*4882a593Smuzhiyun  * accordance with the terms and conditions of the License, a copy of which
10*4882a593Smuzhiyun  * is available by writing to the Free Software Foundation, Inc.,
11*4882a593Smuzhiyun  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12*4882a593Smuzhiyun  * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15*4882a593Smuzhiyun  * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16*4882a593Smuzhiyun  * ARE EXPRESSLY DISCLAIMED.  The License provides additional details about
17*4882a593Smuzhiyun  * this warranty disclaimer.
18*4882a593Smuzhiyun  */
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #include "decl.h"
21*4882a593Smuzhiyun #include "ioctl.h"
22*4882a593Smuzhiyun #include "main.h"
23*4882a593Smuzhiyun #include "wmm.h"
24*4882a593Smuzhiyun #include "11n_aggr.h"
25*4882a593Smuzhiyun #include "11n_rxreorder.h"
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun /* This function checks if particular RA list has packets more than low bridge
28*4882a593Smuzhiyun  * packet threshold and then deletes packet from this RA list.
29*4882a593Smuzhiyun  * Function deletes packets from such RA list and returns true. If no such list
30*4882a593Smuzhiyun  * is found, false is returned.
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun static bool
mwifiex_uap_del_tx_pkts_in_ralist(struct mwifiex_private * priv,struct list_head * ra_list_head,int tid)33*4882a593Smuzhiyun mwifiex_uap_del_tx_pkts_in_ralist(struct mwifiex_private *priv,
34*4882a593Smuzhiyun 				  struct list_head *ra_list_head,
35*4882a593Smuzhiyun 				  int tid)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun 	struct mwifiex_ra_list_tbl *ra_list;
38*4882a593Smuzhiyun 	struct sk_buff *skb, *tmp;
39*4882a593Smuzhiyun 	bool pkt_deleted = false;
40*4882a593Smuzhiyun 	struct mwifiex_txinfo *tx_info;
41*4882a593Smuzhiyun 	struct mwifiex_adapter *adapter = priv->adapter;
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 	list_for_each_entry(ra_list, ra_list_head, list) {
44*4882a593Smuzhiyun 		if (skb_queue_empty(&ra_list->skb_head))
45*4882a593Smuzhiyun 			continue;
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 		skb_queue_walk_safe(&ra_list->skb_head, skb, tmp) {
48*4882a593Smuzhiyun 			tx_info = MWIFIEX_SKB_TXCB(skb);
49*4882a593Smuzhiyun 			if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT) {
50*4882a593Smuzhiyun 				__skb_unlink(skb, &ra_list->skb_head);
51*4882a593Smuzhiyun 				mwifiex_write_data_complete(adapter, skb, 0,
52*4882a593Smuzhiyun 							    -1);
53*4882a593Smuzhiyun 				if (ra_list->tx_paused)
54*4882a593Smuzhiyun 					priv->wmm.pkts_paused[tid]--;
55*4882a593Smuzhiyun 				else
56*4882a593Smuzhiyun 					atomic_dec(&priv->wmm.tx_pkts_queued);
57*4882a593Smuzhiyun 				pkt_deleted = true;
58*4882a593Smuzhiyun 			}
59*4882a593Smuzhiyun 			if ((atomic_read(&adapter->pending_bridged_pkts) <=
60*4882a593Smuzhiyun 					     MWIFIEX_BRIDGED_PKTS_THR_LOW))
61*4882a593Smuzhiyun 				break;
62*4882a593Smuzhiyun 		}
63*4882a593Smuzhiyun 	}
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	return pkt_deleted;
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun /* This function deletes packets from particular RA List. RA list index
69*4882a593Smuzhiyun  * from which packets are deleted is preserved so that packets from next RA
70*4882a593Smuzhiyun  * list are deleted upon subsequent call thus maintaining fairness.
71*4882a593Smuzhiyun  */
mwifiex_uap_cleanup_tx_queues(struct mwifiex_private * priv)72*4882a593Smuzhiyun static void mwifiex_uap_cleanup_tx_queues(struct mwifiex_private *priv)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	struct list_head *ra_list;
75*4882a593Smuzhiyun 	int i;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	spin_lock_bh(&priv->wmm.ra_list_spinlock);
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	for (i = 0; i < MAX_NUM_TID; i++, priv->del_list_idx++) {
80*4882a593Smuzhiyun 		if (priv->del_list_idx == MAX_NUM_TID)
81*4882a593Smuzhiyun 			priv->del_list_idx = 0;
82*4882a593Smuzhiyun 		ra_list = &priv->wmm.tid_tbl_ptr[priv->del_list_idx].ra_list;
83*4882a593Smuzhiyun 		if (mwifiex_uap_del_tx_pkts_in_ralist(priv, ra_list, i)) {
84*4882a593Smuzhiyun 			priv->del_list_idx++;
85*4882a593Smuzhiyun 			break;
86*4882a593Smuzhiyun 		}
87*4882a593Smuzhiyun 	}
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	spin_unlock_bh(&priv->wmm.ra_list_spinlock);
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 
mwifiex_uap_queue_bridged_pkt(struct mwifiex_private * priv,struct sk_buff * skb)93*4882a593Smuzhiyun static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
94*4882a593Smuzhiyun 					 struct sk_buff *skb)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	struct mwifiex_adapter *adapter = priv->adapter;
97*4882a593Smuzhiyun 	struct uap_rxpd *uap_rx_pd;
98*4882a593Smuzhiyun 	struct rx_packet_hdr *rx_pkt_hdr;
99*4882a593Smuzhiyun 	struct sk_buff *new_skb;
100*4882a593Smuzhiyun 	struct mwifiex_txinfo *tx_info;
101*4882a593Smuzhiyun 	int hdr_chop;
102*4882a593Smuzhiyun 	struct ethhdr *p_ethhdr;
103*4882a593Smuzhiyun 	struct mwifiex_sta_node *src_node;
104*4882a593Smuzhiyun 	int index;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	uap_rx_pd = (struct uap_rxpd *)(skb->data);
107*4882a593Smuzhiyun 	rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	if ((atomic_read(&adapter->pending_bridged_pkts) >=
110*4882a593Smuzhiyun 					     MWIFIEX_BRIDGED_PKTS_THR_HIGH)) {
111*4882a593Smuzhiyun 		mwifiex_dbg(priv->adapter, ERROR,
112*4882a593Smuzhiyun 			    "Tx: Bridge packet limit reached. Drop packet!\n");
113*4882a593Smuzhiyun 		kfree_skb(skb);
114*4882a593Smuzhiyun 		mwifiex_uap_cleanup_tx_queues(priv);
115*4882a593Smuzhiyun 		return;
116*4882a593Smuzhiyun 	}
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
119*4882a593Smuzhiyun 		     sizeof(bridge_tunnel_header))) ||
120*4882a593Smuzhiyun 	    (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
121*4882a593Smuzhiyun 		     sizeof(rfc1042_header)) &&
122*4882a593Smuzhiyun 	     ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
123*4882a593Smuzhiyun 	     ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX)) {
124*4882a593Smuzhiyun 		/* Replace the 803 header and rfc1042 header (llc/snap) with
125*4882a593Smuzhiyun 		 * an Ethernet II header, keep the src/dst and snap_type
126*4882a593Smuzhiyun 		 * (ethertype).
127*4882a593Smuzhiyun 		 *
128*4882a593Smuzhiyun 		 * The firmware only passes up SNAP frames converting all RX
129*4882a593Smuzhiyun 		 * data from 802.11 to 802.2/LLC/SNAP frames.
130*4882a593Smuzhiyun 		 *
131*4882a593Smuzhiyun 		 * To create the Ethernet II, just move the src, dst address
132*4882a593Smuzhiyun 		 * right before the snap_type.
133*4882a593Smuzhiyun 		 */
134*4882a593Smuzhiyun 		p_ethhdr = (struct ethhdr *)
135*4882a593Smuzhiyun 			((u8 *)(&rx_pkt_hdr->eth803_hdr)
136*4882a593Smuzhiyun 			 + sizeof(rx_pkt_hdr->eth803_hdr)
137*4882a593Smuzhiyun 			 + sizeof(rx_pkt_hdr->rfc1042_hdr)
138*4882a593Smuzhiyun 			 - sizeof(rx_pkt_hdr->eth803_hdr.h_dest)
139*4882a593Smuzhiyun 			 - sizeof(rx_pkt_hdr->eth803_hdr.h_source)
140*4882a593Smuzhiyun 			 - sizeof(rx_pkt_hdr->rfc1042_hdr.snap_type));
141*4882a593Smuzhiyun 		memcpy(p_ethhdr->h_source, rx_pkt_hdr->eth803_hdr.h_source,
142*4882a593Smuzhiyun 		       sizeof(p_ethhdr->h_source));
143*4882a593Smuzhiyun 		memcpy(p_ethhdr->h_dest, rx_pkt_hdr->eth803_hdr.h_dest,
144*4882a593Smuzhiyun 		       sizeof(p_ethhdr->h_dest));
145*4882a593Smuzhiyun 		/* Chop off the rxpd + the excess memory from
146*4882a593Smuzhiyun 		 * 802.2/llc/snap header that was removed.
147*4882a593Smuzhiyun 		 */
148*4882a593Smuzhiyun 		hdr_chop = (u8 *)p_ethhdr - (u8 *)uap_rx_pd;
149*4882a593Smuzhiyun 	} else {
150*4882a593Smuzhiyun 		/* Chop off the rxpd */
151*4882a593Smuzhiyun 		hdr_chop = (u8 *)&rx_pkt_hdr->eth803_hdr - (u8 *)uap_rx_pd;
152*4882a593Smuzhiyun 	}
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	/* Chop off the leading header bytes so that it points
155*4882a593Smuzhiyun 	 * to the start of either the reconstructed EthII frame
156*4882a593Smuzhiyun 	 * or the 802.2/llc/snap frame.
157*4882a593Smuzhiyun 	 */
158*4882a593Smuzhiyun 	skb_pull(skb, hdr_chop);
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) {
161*4882a593Smuzhiyun 		mwifiex_dbg(priv->adapter, ERROR,
162*4882a593Smuzhiyun 			    "data: Tx: insufficient skb headroom %d\n",
163*4882a593Smuzhiyun 			    skb_headroom(skb));
164*4882a593Smuzhiyun 		/* Insufficient skb headroom - allocate a new skb */
165*4882a593Smuzhiyun 		new_skb =
166*4882a593Smuzhiyun 			skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
167*4882a593Smuzhiyun 		if (unlikely(!new_skb)) {
168*4882a593Smuzhiyun 			mwifiex_dbg(priv->adapter, ERROR,
169*4882a593Smuzhiyun 				    "Tx: cannot allocate new_skb\n");
170*4882a593Smuzhiyun 			kfree_skb(skb);
171*4882a593Smuzhiyun 			priv->stats.tx_dropped++;
172*4882a593Smuzhiyun 			return;
173*4882a593Smuzhiyun 		}
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 		kfree_skb(skb);
176*4882a593Smuzhiyun 		skb = new_skb;
177*4882a593Smuzhiyun 		mwifiex_dbg(priv->adapter, INFO,
178*4882a593Smuzhiyun 			    "info: new skb headroom %d\n",
179*4882a593Smuzhiyun 			    skb_headroom(skb));
180*4882a593Smuzhiyun 	}
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	tx_info = MWIFIEX_SKB_TXCB(skb);
183*4882a593Smuzhiyun 	memset(tx_info, 0, sizeof(*tx_info));
184*4882a593Smuzhiyun 	tx_info->bss_num = priv->bss_num;
185*4882a593Smuzhiyun 	tx_info->bss_type = priv->bss_type;
186*4882a593Smuzhiyun 	tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	src_node = mwifiex_get_sta_entry(priv, rx_pkt_hdr->eth803_hdr.h_source);
189*4882a593Smuzhiyun 	if (src_node) {
190*4882a593Smuzhiyun 		src_node->stats.last_rx = jiffies;
191*4882a593Smuzhiyun 		src_node->stats.rx_bytes += skb->len;
192*4882a593Smuzhiyun 		src_node->stats.rx_packets++;
193*4882a593Smuzhiyun 		src_node->stats.last_tx_rate = uap_rx_pd->rx_rate;
194*4882a593Smuzhiyun 		src_node->stats.last_tx_htinfo = uap_rx_pd->ht_info;
195*4882a593Smuzhiyun 	}
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	if (is_unicast_ether_addr(rx_pkt_hdr->eth803_hdr.h_dest)) {
198*4882a593Smuzhiyun 		/* Update bridge packet statistics as the
199*4882a593Smuzhiyun 		 * packet is not going to kernel/upper layer.
200*4882a593Smuzhiyun 		 */
201*4882a593Smuzhiyun 		priv->stats.rx_bytes += skb->len;
202*4882a593Smuzhiyun 		priv->stats.rx_packets++;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 		/* Sending bridge packet to TX queue, so save the packet
205*4882a593Smuzhiyun 		 * length in TXCB to update statistics in TX complete.
206*4882a593Smuzhiyun 		 */
207*4882a593Smuzhiyun 		tx_info->pkt_len = skb->len;
208*4882a593Smuzhiyun 	}
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	__net_timestamp(skb);
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	index = mwifiex_1d_to_wmm_queue[skb->priority];
213*4882a593Smuzhiyun 	atomic_inc(&priv->wmm_tx_pending[index]);
214*4882a593Smuzhiyun 	mwifiex_wmm_add_buf_txqueue(priv, skb);
215*4882a593Smuzhiyun 	atomic_inc(&adapter->tx_pending);
216*4882a593Smuzhiyun 	atomic_inc(&adapter->pending_bridged_pkts);
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	mwifiex_queue_main_work(priv->adapter);
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	return;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun /*
224*4882a593Smuzhiyun  * This function contains logic for AP packet forwarding.
225*4882a593Smuzhiyun  *
226*4882a593Smuzhiyun  * If a packet is multicast/broadcast, it is sent to kernel/upper layer
227*4882a593Smuzhiyun  * as well as queued back to AP TX queue so that it can be sent to other
228*4882a593Smuzhiyun  * associated stations.
229*4882a593Smuzhiyun  * If a packet is unicast and RA is present in associated station list,
230*4882a593Smuzhiyun  * it is again requeued into AP TX queue.
231*4882a593Smuzhiyun  * If a packet is unicast and RA is not in associated station list,
232*4882a593Smuzhiyun  * packet is forwarded to kernel to handle routing logic.
233*4882a593Smuzhiyun  */
mwifiex_handle_uap_rx_forward(struct mwifiex_private * priv,struct sk_buff * skb)234*4882a593Smuzhiyun int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
235*4882a593Smuzhiyun 				  struct sk_buff *skb)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun 	struct mwifiex_adapter *adapter = priv->adapter;
238*4882a593Smuzhiyun 	struct uap_rxpd *uap_rx_pd;
239*4882a593Smuzhiyun 	struct rx_packet_hdr *rx_pkt_hdr;
240*4882a593Smuzhiyun 	u8 ra[ETH_ALEN];
241*4882a593Smuzhiyun 	struct sk_buff *skb_uap;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	uap_rx_pd = (struct uap_rxpd *)(skb->data);
244*4882a593Smuzhiyun 	rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	/* don't do packet forwarding in disconnected state */
247*4882a593Smuzhiyun 	if (!priv->media_connected) {
248*4882a593Smuzhiyun 		mwifiex_dbg(adapter, ERROR,
249*4882a593Smuzhiyun 			    "drop packet in disconnected state.\n");
250*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
251*4882a593Smuzhiyun 		return 0;
252*4882a593Smuzhiyun 	}
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	memcpy(ra, rx_pkt_hdr->eth803_hdr.h_dest, ETH_ALEN);
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	if (is_multicast_ether_addr(ra)) {
257*4882a593Smuzhiyun 		skb_uap = skb_copy(skb, GFP_ATOMIC);
258*4882a593Smuzhiyun 		mwifiex_uap_queue_bridged_pkt(priv, skb_uap);
259*4882a593Smuzhiyun 	} else {
260*4882a593Smuzhiyun 		if (mwifiex_get_sta_entry(priv, ra)) {
261*4882a593Smuzhiyun 			/* Requeue Intra-BSS packet */
262*4882a593Smuzhiyun 			mwifiex_uap_queue_bridged_pkt(priv, skb);
263*4882a593Smuzhiyun 			return 0;
264*4882a593Smuzhiyun 		}
265*4882a593Smuzhiyun 	}
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	/* Forward unicat/Inter-BSS packets to kernel. */
268*4882a593Smuzhiyun 	return mwifiex_process_rx_packet(priv, skb);
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun 
mwifiex_uap_recv_packet(struct mwifiex_private * priv,struct sk_buff * skb)271*4882a593Smuzhiyun int mwifiex_uap_recv_packet(struct mwifiex_private *priv,
272*4882a593Smuzhiyun 			    struct sk_buff *skb)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun 	struct mwifiex_adapter *adapter = priv->adapter;
275*4882a593Smuzhiyun 	struct mwifiex_sta_node *src_node;
276*4882a593Smuzhiyun 	struct ethhdr *p_ethhdr;
277*4882a593Smuzhiyun 	struct sk_buff *skb_uap;
278*4882a593Smuzhiyun 	struct mwifiex_txinfo *tx_info;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	if (!skb)
281*4882a593Smuzhiyun 		return -1;
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	p_ethhdr = (void *)skb->data;
284*4882a593Smuzhiyun 	src_node = mwifiex_get_sta_entry(priv, p_ethhdr->h_source);
285*4882a593Smuzhiyun 	if (src_node) {
286*4882a593Smuzhiyun 		src_node->stats.last_rx = jiffies;
287*4882a593Smuzhiyun 		src_node->stats.rx_bytes += skb->len;
288*4882a593Smuzhiyun 		src_node->stats.rx_packets++;
289*4882a593Smuzhiyun 	}
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	if (is_multicast_ether_addr(p_ethhdr->h_dest) ||
292*4882a593Smuzhiyun 	    mwifiex_get_sta_entry(priv, p_ethhdr->h_dest)) {
293*4882a593Smuzhiyun 		if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN)
294*4882a593Smuzhiyun 			skb_uap =
295*4882a593Smuzhiyun 			skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
296*4882a593Smuzhiyun 		else
297*4882a593Smuzhiyun 			skb_uap = skb_copy(skb, GFP_ATOMIC);
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 		if (likely(skb_uap)) {
300*4882a593Smuzhiyun 			tx_info = MWIFIEX_SKB_TXCB(skb_uap);
301*4882a593Smuzhiyun 			memset(tx_info, 0, sizeof(*tx_info));
302*4882a593Smuzhiyun 			tx_info->bss_num = priv->bss_num;
303*4882a593Smuzhiyun 			tx_info->bss_type = priv->bss_type;
304*4882a593Smuzhiyun 			tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT;
305*4882a593Smuzhiyun 			__net_timestamp(skb_uap);
306*4882a593Smuzhiyun 			mwifiex_wmm_add_buf_txqueue(priv, skb_uap);
307*4882a593Smuzhiyun 			atomic_inc(&adapter->tx_pending);
308*4882a593Smuzhiyun 			atomic_inc(&adapter->pending_bridged_pkts);
309*4882a593Smuzhiyun 			if ((atomic_read(&adapter->pending_bridged_pkts) >=
310*4882a593Smuzhiyun 					MWIFIEX_BRIDGED_PKTS_THR_HIGH)) {
311*4882a593Smuzhiyun 				mwifiex_dbg(adapter, ERROR,
312*4882a593Smuzhiyun 					    "Tx: Bridge packet limit reached. Drop packet!\n");
313*4882a593Smuzhiyun 				mwifiex_uap_cleanup_tx_queues(priv);
314*4882a593Smuzhiyun 			}
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 		} else {
317*4882a593Smuzhiyun 			mwifiex_dbg(adapter, ERROR, "failed to allocate skb_uap");
318*4882a593Smuzhiyun 		}
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 		mwifiex_queue_main_work(adapter);
321*4882a593Smuzhiyun 		/* Don't forward Intra-BSS unicast packet to upper layer*/
322*4882a593Smuzhiyun 		if (mwifiex_get_sta_entry(priv, p_ethhdr->h_dest))
323*4882a593Smuzhiyun 			return 0;
324*4882a593Smuzhiyun 	}
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	skb->dev = priv->netdev;
327*4882a593Smuzhiyun 	skb->protocol = eth_type_trans(skb, priv->netdev);
328*4882a593Smuzhiyun 	skb->ip_summed = CHECKSUM_NONE;
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	/* This is required only in case of 11n and USB/PCIE as we alloc
331*4882a593Smuzhiyun 	 * a buffer of 4K only if its 11N (to be able to receive 4K
332*4882a593Smuzhiyun 	 * AMSDU packets). In case of SD we allocate buffers based
333*4882a593Smuzhiyun 	 * on the size of packet and hence this is not needed.
334*4882a593Smuzhiyun 	 *
335*4882a593Smuzhiyun 	 * Modifying the truesize here as our allocation for each
336*4882a593Smuzhiyun 	 * skb is 4K but we only receive 2K packets and this cause
337*4882a593Smuzhiyun 	 * the kernel to start dropping packets in case where
338*4882a593Smuzhiyun 	 * application has allocated buffer based on 2K size i.e.
339*4882a593Smuzhiyun 	 * if there a 64K packet received (in IP fragments and
340*4882a593Smuzhiyun 	 * application allocates 64K to receive this packet but
341*4882a593Smuzhiyun 	 * this packet would almost double up because we allocate
342*4882a593Smuzhiyun 	 * each 1.5K fragment in 4K and pass it up. As soon as the
343*4882a593Smuzhiyun 	 * 64K limit hits kernel will start to drop rest of the
344*4882a593Smuzhiyun 	 * fragments. Currently we fail the Filesndl-ht.scr script
345*4882a593Smuzhiyun 	 * for UDP, hence this fix
346*4882a593Smuzhiyun 	 */
347*4882a593Smuzhiyun 	if ((adapter->iface_type == MWIFIEX_USB ||
348*4882a593Smuzhiyun 	     adapter->iface_type == MWIFIEX_PCIE) &&
349*4882a593Smuzhiyun 	    skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE)
350*4882a593Smuzhiyun 		skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	/* Forward multicast/broadcast packet to upper layer*/
353*4882a593Smuzhiyun 	netif_rx_any_context(skb);
354*4882a593Smuzhiyun 	return 0;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun /*
358*4882a593Smuzhiyun  * This function processes the packet received on AP interface.
359*4882a593Smuzhiyun  *
360*4882a593Smuzhiyun  * The function looks into the RxPD and performs sanity tests on the
361*4882a593Smuzhiyun  * received buffer to ensure its a valid packet before processing it
362*4882a593Smuzhiyun  * further. If the packet is determined to be aggregated, it is
363*4882a593Smuzhiyun  * de-aggregated accordingly. Then skb is passed to AP packet forwarding logic.
364*4882a593Smuzhiyun  *
365*4882a593Smuzhiyun  * The completion callback is called after processing is complete.
366*4882a593Smuzhiyun  */
mwifiex_process_uap_rx_packet(struct mwifiex_private * priv,struct sk_buff * skb)367*4882a593Smuzhiyun int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
368*4882a593Smuzhiyun 				  struct sk_buff *skb)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun 	struct mwifiex_adapter *adapter = priv->adapter;
371*4882a593Smuzhiyun 	int ret;
372*4882a593Smuzhiyun 	struct uap_rxpd *uap_rx_pd;
373*4882a593Smuzhiyun 	struct rx_packet_hdr *rx_pkt_hdr;
374*4882a593Smuzhiyun 	u16 rx_pkt_type;
375*4882a593Smuzhiyun 	u8 ta[ETH_ALEN], pkt_type;
376*4882a593Smuzhiyun 	struct mwifiex_sta_node *node;
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	uap_rx_pd = (struct uap_rxpd *)(skb->data);
379*4882a593Smuzhiyun 	rx_pkt_type = le16_to_cpu(uap_rx_pd->rx_pkt_type);
380*4882a593Smuzhiyun 	rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	ether_addr_copy(ta, rx_pkt_hdr->eth803_hdr.h_source);
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	if ((le16_to_cpu(uap_rx_pd->rx_pkt_offset) +
385*4882a593Smuzhiyun 	     le16_to_cpu(uap_rx_pd->rx_pkt_length)) > (u16) skb->len) {
386*4882a593Smuzhiyun 		mwifiex_dbg(adapter, ERROR,
387*4882a593Smuzhiyun 			    "wrong rx packet: len=%d, offset=%d, length=%d\n",
388*4882a593Smuzhiyun 			    skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset),
389*4882a593Smuzhiyun 			    le16_to_cpu(uap_rx_pd->rx_pkt_length));
390*4882a593Smuzhiyun 		priv->stats.rx_dropped++;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 		node = mwifiex_get_sta_entry(priv, ta);
393*4882a593Smuzhiyun 		if (node)
394*4882a593Smuzhiyun 			node->stats.tx_failed++;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
397*4882a593Smuzhiyun 		return 0;
398*4882a593Smuzhiyun 	}
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	if (rx_pkt_type == PKT_TYPE_MGMT) {
401*4882a593Smuzhiyun 		ret = mwifiex_process_mgmt_packet(priv, skb);
402*4882a593Smuzhiyun 		if (ret)
403*4882a593Smuzhiyun 			mwifiex_dbg(adapter, DATA, "Rx of mgmt packet failed");
404*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
405*4882a593Smuzhiyun 		return ret;
406*4882a593Smuzhiyun 	}
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	if (rx_pkt_type != PKT_TYPE_BAR && uap_rx_pd->priority < MAX_NUM_TID) {
410*4882a593Smuzhiyun 		spin_lock_bh(&priv->sta_list_spinlock);
411*4882a593Smuzhiyun 		node = mwifiex_get_sta_entry(priv, ta);
412*4882a593Smuzhiyun 		if (node)
413*4882a593Smuzhiyun 			node->rx_seq[uap_rx_pd->priority] =
414*4882a593Smuzhiyun 						le16_to_cpu(uap_rx_pd->seq_num);
415*4882a593Smuzhiyun 		spin_unlock_bh(&priv->sta_list_spinlock);
416*4882a593Smuzhiyun 	}
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	if (!priv->ap_11n_enabled ||
419*4882a593Smuzhiyun 	    (!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) &&
420*4882a593Smuzhiyun 	    (le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) {
421*4882a593Smuzhiyun 		ret = mwifiex_handle_uap_rx_forward(priv, skb);
422*4882a593Smuzhiyun 		return ret;
423*4882a593Smuzhiyun 	}
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	/* Reorder and send to kernel */
426*4882a593Smuzhiyun 	pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type);
427*4882a593Smuzhiyun 	ret = mwifiex_11n_rx_reorder_pkt(priv, le16_to_cpu(uap_rx_pd->seq_num),
428*4882a593Smuzhiyun 					 uap_rx_pd->priority, ta, pkt_type,
429*4882a593Smuzhiyun 					 skb);
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	if (ret || (rx_pkt_type == PKT_TYPE_BAR))
432*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	if (ret)
435*4882a593Smuzhiyun 		priv->stats.rx_dropped++;
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	return ret;
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun /*
441*4882a593Smuzhiyun  * This function fills the TxPD for AP tx packets.
442*4882a593Smuzhiyun  *
443*4882a593Smuzhiyun  * The Tx buffer received by this function should already have the
444*4882a593Smuzhiyun  * header space allocated for TxPD.
445*4882a593Smuzhiyun  *
446*4882a593Smuzhiyun  * This function inserts the TxPD in between interface header and actual
447*4882a593Smuzhiyun  * data and adjusts the buffer pointers accordingly.
448*4882a593Smuzhiyun  *
449*4882a593Smuzhiyun  * The following TxPD fields are set by this function, as required -
450*4882a593Smuzhiyun  *      - BSS number
451*4882a593Smuzhiyun  *      - Tx packet length and offset
452*4882a593Smuzhiyun  *      - Priority
453*4882a593Smuzhiyun  *      - Packet delay
454*4882a593Smuzhiyun  *      - Priority specific Tx control
455*4882a593Smuzhiyun  *      - Flags
456*4882a593Smuzhiyun  */
mwifiex_process_uap_txpd(struct mwifiex_private * priv,struct sk_buff * skb)457*4882a593Smuzhiyun void *mwifiex_process_uap_txpd(struct mwifiex_private *priv,
458*4882a593Smuzhiyun 			       struct sk_buff *skb)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun 	struct mwifiex_adapter *adapter = priv->adapter;
461*4882a593Smuzhiyun 	struct uap_txpd *txpd;
462*4882a593Smuzhiyun 	struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
463*4882a593Smuzhiyun 	int pad;
464*4882a593Smuzhiyun 	u16 pkt_type, pkt_offset;
465*4882a593Smuzhiyun 	int hroom = adapter->intf_hdr_len;
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	if (!skb->len) {
468*4882a593Smuzhiyun 		mwifiex_dbg(adapter, ERROR,
469*4882a593Smuzhiyun 			    "Tx: bad packet length: %d\n", skb->len);
470*4882a593Smuzhiyun 		tx_info->status_code = -1;
471*4882a593Smuzhiyun 		return skb->data;
472*4882a593Smuzhiyun 	}
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	BUG_ON(skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN);
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0;
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	pad = ((void *)skb->data - (sizeof(*txpd) + hroom) - NULL) &
479*4882a593Smuzhiyun 			(MWIFIEX_DMA_ALIGN_SZ - 1);
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	skb_push(skb, sizeof(*txpd) + pad);
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	txpd = (struct uap_txpd *)skb->data;
484*4882a593Smuzhiyun 	memset(txpd, 0, sizeof(*txpd));
485*4882a593Smuzhiyun 	txpd->bss_num = priv->bss_num;
486*4882a593Smuzhiyun 	txpd->bss_type = priv->bss_type;
487*4882a593Smuzhiyun 	txpd->tx_pkt_length = cpu_to_le16((u16)(skb->len - (sizeof(*txpd) +
488*4882a593Smuzhiyun 						pad)));
489*4882a593Smuzhiyun 	txpd->priority = (u8)skb->priority;
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	txpd->pkt_delay_2ms = mwifiex_wmm_compute_drv_pkt_delay(priv, skb);
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	if (tx_info->flags & MWIFIEX_BUF_FLAG_EAPOL_TX_STATUS ||
494*4882a593Smuzhiyun 	    tx_info->flags & MWIFIEX_BUF_FLAG_ACTION_TX_STATUS) {
495*4882a593Smuzhiyun 		txpd->tx_token_id = tx_info->ack_frame_id;
496*4882a593Smuzhiyun 		txpd->flags |= MWIFIEX_TXPD_FLAGS_REQ_TX_STATUS;
497*4882a593Smuzhiyun 	}
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	if (txpd->priority < ARRAY_SIZE(priv->wmm.user_pri_pkt_tx_ctrl))
500*4882a593Smuzhiyun 		/*
501*4882a593Smuzhiyun 		 * Set the priority specific tx_control field, setting of 0 will
502*4882a593Smuzhiyun 		 * cause the default value to be used later in this function.
503*4882a593Smuzhiyun 		 */
504*4882a593Smuzhiyun 		txpd->tx_control =
505*4882a593Smuzhiyun 		    cpu_to_le32(priv->wmm.user_pri_pkt_tx_ctrl[txpd->priority]);
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	/* Offset of actual data */
508*4882a593Smuzhiyun 	pkt_offset = sizeof(*txpd) + pad;
509*4882a593Smuzhiyun 	if (pkt_type == PKT_TYPE_MGMT) {
510*4882a593Smuzhiyun 		/* Set the packet type and add header for management frame */
511*4882a593Smuzhiyun 		txpd->tx_pkt_type = cpu_to_le16(pkt_type);
512*4882a593Smuzhiyun 		pkt_offset += MWIFIEX_MGMT_FRAME_HEADER_SIZE;
513*4882a593Smuzhiyun 	}
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	txpd->tx_pkt_offset = cpu_to_le16(pkt_offset);
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	/* make space for adapter->intf_hdr_len */
518*4882a593Smuzhiyun 	skb_push(skb, hroom);
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	if (!txpd->tx_control)
521*4882a593Smuzhiyun 		/* TxCtrl set by user or default */
522*4882a593Smuzhiyun 		txpd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl);
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	return skb->data;
525*4882a593Smuzhiyun }
526