xref: /OK3568_Linux_fs/kernel/drivers/net/wireless/ath/wil6210/txrx.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: ISC
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
4*4882a593Smuzhiyun  * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/etherdevice.h>
8*4882a593Smuzhiyun #include <net/ieee80211_radiotap.h>
9*4882a593Smuzhiyun #include <linux/if_arp.h>
10*4882a593Smuzhiyun #include <linux/moduleparam.h>
11*4882a593Smuzhiyun #include <linux/ip.h>
12*4882a593Smuzhiyun #include <linux/ipv6.h>
13*4882a593Smuzhiyun #include <linux/if_vlan.h>
14*4882a593Smuzhiyun #include <net/ipv6.h>
15*4882a593Smuzhiyun #include <linux/prefetch.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include "wil6210.h"
18*4882a593Smuzhiyun #include "wmi.h"
19*4882a593Smuzhiyun #include "txrx.h"
20*4882a593Smuzhiyun #include "trace.h"
21*4882a593Smuzhiyun #include "txrx_edma.h"
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun bool rx_align_2;
24*4882a593Smuzhiyun module_param(rx_align_2, bool, 0444);
25*4882a593Smuzhiyun MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no");
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun bool rx_large_buf;
28*4882a593Smuzhiyun module_param(rx_large_buf, bool, 0444);
29*4882a593Smuzhiyun MODULE_PARM_DESC(rx_large_buf, " allocate 8KB RX buffers, default - no");
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun /* Drop Tx packets in case Tx ring is full */
32*4882a593Smuzhiyun bool drop_if_ring_full;
33*4882a593Smuzhiyun 
wil_rx_snaplen(void)34*4882a593Smuzhiyun static inline uint wil_rx_snaplen(void)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun 	return rx_align_2 ? 6 : 0;
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun /* wil_ring_wmark_low - low watermark for available descriptor space */
wil_ring_wmark_low(struct wil_ring * ring)40*4882a593Smuzhiyun static inline int wil_ring_wmark_low(struct wil_ring *ring)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun 	return ring->size / 8;
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun /* wil_ring_wmark_high - high watermark for available descriptor space */
wil_ring_wmark_high(struct wil_ring * ring)46*4882a593Smuzhiyun static inline int wil_ring_wmark_high(struct wil_ring *ring)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun 	return ring->size / 4;
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun /* returns true if num avail descriptors is lower than wmark_low */
wil_ring_avail_low(struct wil_ring * ring)52*4882a593Smuzhiyun static inline int wil_ring_avail_low(struct wil_ring *ring)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun 	return wil_ring_avail_tx(ring) < wil_ring_wmark_low(ring);
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun /* returns true if num avail descriptors is higher than wmark_high */
wil_ring_avail_high(struct wil_ring * ring)58*4882a593Smuzhiyun static inline int wil_ring_avail_high(struct wil_ring *ring)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	return wil_ring_avail_tx(ring) > wil_ring_wmark_high(ring);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun /* returns true when all tx vrings are empty */
wil_is_tx_idle(struct wil6210_priv * wil)64*4882a593Smuzhiyun bool wil_is_tx_idle(struct wil6210_priv *wil)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	int i;
67*4882a593Smuzhiyun 	unsigned long data_comp_to;
68*4882a593Smuzhiyun 	int min_ring_id = wil_get_min_tx_ring_id(wil);
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
71*4882a593Smuzhiyun 		struct wil_ring *vring = &wil->ring_tx[i];
72*4882a593Smuzhiyun 		int vring_index = vring - wil->ring_tx;
73*4882a593Smuzhiyun 		struct wil_ring_tx_data *txdata =
74*4882a593Smuzhiyun 			&wil->ring_tx_data[vring_index];
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 		spin_lock(&txdata->lock);
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 		if (!vring->va || !txdata->enabled) {
79*4882a593Smuzhiyun 			spin_unlock(&txdata->lock);
80*4882a593Smuzhiyun 			continue;
81*4882a593Smuzhiyun 		}
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 		data_comp_to = jiffies + msecs_to_jiffies(
84*4882a593Smuzhiyun 					WIL_DATA_COMPLETION_TO_MS);
85*4882a593Smuzhiyun 		if (test_bit(wil_status_napi_en, wil->status)) {
86*4882a593Smuzhiyun 			while (!wil_ring_is_empty(vring)) {
87*4882a593Smuzhiyun 				if (time_after(jiffies, data_comp_to)) {
88*4882a593Smuzhiyun 					wil_dbg_pm(wil,
89*4882a593Smuzhiyun 						   "TO waiting for idle tx\n");
90*4882a593Smuzhiyun 					spin_unlock(&txdata->lock);
91*4882a593Smuzhiyun 					return false;
92*4882a593Smuzhiyun 				}
93*4882a593Smuzhiyun 				wil_dbg_ratelimited(wil,
94*4882a593Smuzhiyun 						    "tx vring is not empty -> NAPI\n");
95*4882a593Smuzhiyun 				spin_unlock(&txdata->lock);
96*4882a593Smuzhiyun 				napi_synchronize(&wil->napi_tx);
97*4882a593Smuzhiyun 				msleep(20);
98*4882a593Smuzhiyun 				spin_lock(&txdata->lock);
99*4882a593Smuzhiyun 				if (!vring->va || !txdata->enabled)
100*4882a593Smuzhiyun 					break;
101*4882a593Smuzhiyun 			}
102*4882a593Smuzhiyun 		}
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 		spin_unlock(&txdata->lock);
105*4882a593Smuzhiyun 	}
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	return true;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun 
wil_vring_alloc(struct wil6210_priv * wil,struct wil_ring * vring)110*4882a593Smuzhiyun static int wil_vring_alloc(struct wil6210_priv *wil, struct wil_ring *vring)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun 	struct device *dev = wil_to_dev(wil);
113*4882a593Smuzhiyun 	size_t sz = vring->size * sizeof(vring->va[0]);
114*4882a593Smuzhiyun 	uint i;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	wil_dbg_misc(wil, "vring_alloc:\n");
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(vring->va[0]) != 32);
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	vring->swhead = 0;
121*4882a593Smuzhiyun 	vring->swtail = 0;
122*4882a593Smuzhiyun 	vring->ctx = kcalloc(vring->size, sizeof(vring->ctx[0]), GFP_KERNEL);
123*4882a593Smuzhiyun 	if (!vring->ctx) {
124*4882a593Smuzhiyun 		vring->va = NULL;
125*4882a593Smuzhiyun 		return -ENOMEM;
126*4882a593Smuzhiyun 	}
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	/* vring->va should be aligned on its size rounded up to power of 2
129*4882a593Smuzhiyun 	 * This is granted by the dma_alloc_coherent.
130*4882a593Smuzhiyun 	 *
131*4882a593Smuzhiyun 	 * HW has limitation that all vrings addresses must share the same
132*4882a593Smuzhiyun 	 * upper 16 msb bits part of 48 bits address. To workaround that,
133*4882a593Smuzhiyun 	 * if we are using more than 32 bit addresses switch to 32 bit
134*4882a593Smuzhiyun 	 * allocation before allocating vring memory.
135*4882a593Smuzhiyun 	 *
136*4882a593Smuzhiyun 	 * There's no check for the return value of dma_set_mask_and_coherent,
137*4882a593Smuzhiyun 	 * since we assume if we were able to set the mask during
138*4882a593Smuzhiyun 	 * initialization in this system it will not fail if we set it again
139*4882a593Smuzhiyun 	 */
140*4882a593Smuzhiyun 	if (wil->dma_addr_size > 32)
141*4882a593Smuzhiyun 		dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
144*4882a593Smuzhiyun 	if (!vring->va) {
145*4882a593Smuzhiyun 		kfree(vring->ctx);
146*4882a593Smuzhiyun 		vring->ctx = NULL;
147*4882a593Smuzhiyun 		return -ENOMEM;
148*4882a593Smuzhiyun 	}
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	if (wil->dma_addr_size > 32)
151*4882a593Smuzhiyun 		dma_set_mask_and_coherent(dev,
152*4882a593Smuzhiyun 					  DMA_BIT_MASK(wil->dma_addr_size));
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	/* initially, all descriptors are SW owned
155*4882a593Smuzhiyun 	 * For Tx and Rx, ownership bit is at the same location, thus
156*4882a593Smuzhiyun 	 * we can use any
157*4882a593Smuzhiyun 	 */
158*4882a593Smuzhiyun 	for (i = 0; i < vring->size; i++) {
159*4882a593Smuzhiyun 		volatile struct vring_tx_desc *_d =
160*4882a593Smuzhiyun 			&vring->va[i].tx.legacy;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 		_d->dma.status = TX_DMA_STATUS_DU;
163*4882a593Smuzhiyun 	}
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	wil_dbg_misc(wil, "vring[%d] 0x%p:%pad 0x%p\n", vring->size,
166*4882a593Smuzhiyun 		     vring->va, &vring->pa, vring->ctx);
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	return 0;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun 
wil_txdesc_unmap(struct device * dev,union wil_tx_desc * desc,struct wil_ctx * ctx)171*4882a593Smuzhiyun static void wil_txdesc_unmap(struct device *dev, union wil_tx_desc *desc,
172*4882a593Smuzhiyun 			     struct wil_ctx *ctx)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun 	struct vring_tx_desc *d = &desc->legacy;
175*4882a593Smuzhiyun 	dma_addr_t pa = wil_desc_addr(&d->dma.addr);
176*4882a593Smuzhiyun 	u16 dmalen = le16_to_cpu(d->dma.length);
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	switch (ctx->mapped_as) {
179*4882a593Smuzhiyun 	case wil_mapped_as_single:
180*4882a593Smuzhiyun 		dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
181*4882a593Smuzhiyun 		break;
182*4882a593Smuzhiyun 	case wil_mapped_as_page:
183*4882a593Smuzhiyun 		dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
184*4882a593Smuzhiyun 		break;
185*4882a593Smuzhiyun 	default:
186*4882a593Smuzhiyun 		break;
187*4882a593Smuzhiyun 	}
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun 
wil_vring_free(struct wil6210_priv * wil,struct wil_ring * vring)190*4882a593Smuzhiyun static void wil_vring_free(struct wil6210_priv *wil, struct wil_ring *vring)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun 	struct device *dev = wil_to_dev(wil);
193*4882a593Smuzhiyun 	size_t sz = vring->size * sizeof(vring->va[0]);
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	lockdep_assert_held(&wil->mutex);
196*4882a593Smuzhiyun 	if (!vring->is_rx) {
197*4882a593Smuzhiyun 		int vring_index = vring - wil->ring_tx;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 		wil_dbg_misc(wil, "free Tx vring %d [%d] 0x%p:%pad 0x%p\n",
200*4882a593Smuzhiyun 			     vring_index, vring->size, vring->va,
201*4882a593Smuzhiyun 			     &vring->pa, vring->ctx);
202*4882a593Smuzhiyun 	} else {
203*4882a593Smuzhiyun 		wil_dbg_misc(wil, "free Rx vring [%d] 0x%p:%pad 0x%p\n",
204*4882a593Smuzhiyun 			     vring->size, vring->va,
205*4882a593Smuzhiyun 			     &vring->pa, vring->ctx);
206*4882a593Smuzhiyun 	}
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	while (!wil_ring_is_empty(vring)) {
209*4882a593Smuzhiyun 		dma_addr_t pa;
210*4882a593Smuzhiyun 		u16 dmalen;
211*4882a593Smuzhiyun 		struct wil_ctx *ctx;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 		if (!vring->is_rx) {
214*4882a593Smuzhiyun 			struct vring_tx_desc dd, *d = &dd;
215*4882a593Smuzhiyun 			volatile struct vring_tx_desc *_d =
216*4882a593Smuzhiyun 					&vring->va[vring->swtail].tx.legacy;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 			ctx = &vring->ctx[vring->swtail];
219*4882a593Smuzhiyun 			if (!ctx) {
220*4882a593Smuzhiyun 				wil_dbg_txrx(wil,
221*4882a593Smuzhiyun 					     "ctx(%d) was already completed\n",
222*4882a593Smuzhiyun 					     vring->swtail);
223*4882a593Smuzhiyun 				vring->swtail = wil_ring_next_tail(vring);
224*4882a593Smuzhiyun 				continue;
225*4882a593Smuzhiyun 			}
226*4882a593Smuzhiyun 			*d = *_d;
227*4882a593Smuzhiyun 			wil_txdesc_unmap(dev, (union wil_tx_desc *)d, ctx);
228*4882a593Smuzhiyun 			if (ctx->skb)
229*4882a593Smuzhiyun 				dev_kfree_skb_any(ctx->skb);
230*4882a593Smuzhiyun 			vring->swtail = wil_ring_next_tail(vring);
231*4882a593Smuzhiyun 		} else { /* rx */
232*4882a593Smuzhiyun 			struct vring_rx_desc dd, *d = &dd;
233*4882a593Smuzhiyun 			volatile struct vring_rx_desc *_d =
234*4882a593Smuzhiyun 				&vring->va[vring->swhead].rx.legacy;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 			ctx = &vring->ctx[vring->swhead];
237*4882a593Smuzhiyun 			*d = *_d;
238*4882a593Smuzhiyun 			pa = wil_desc_addr(&d->dma.addr);
239*4882a593Smuzhiyun 			dmalen = le16_to_cpu(d->dma.length);
240*4882a593Smuzhiyun 			dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
241*4882a593Smuzhiyun 			kfree_skb(ctx->skb);
242*4882a593Smuzhiyun 			wil_ring_advance_head(vring, 1);
243*4882a593Smuzhiyun 		}
244*4882a593Smuzhiyun 	}
245*4882a593Smuzhiyun 	dma_free_coherent(dev, sz, (void *)vring->va, vring->pa);
246*4882a593Smuzhiyun 	kfree(vring->ctx);
247*4882a593Smuzhiyun 	vring->pa = 0;
248*4882a593Smuzhiyun 	vring->va = NULL;
249*4882a593Smuzhiyun 	vring->ctx = NULL;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun /* Allocate one skb for Rx VRING
253*4882a593Smuzhiyun  *
254*4882a593Smuzhiyun  * Safe to call from IRQ
255*4882a593Smuzhiyun  */
wil_vring_alloc_skb(struct wil6210_priv * wil,struct wil_ring * vring,u32 i,int headroom)256*4882a593Smuzhiyun static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct wil_ring *vring,
257*4882a593Smuzhiyun 			       u32 i, int headroom)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun 	struct device *dev = wil_to_dev(wil);
260*4882a593Smuzhiyun 	unsigned int sz = wil->rx_buf_len + ETH_HLEN + wil_rx_snaplen();
261*4882a593Smuzhiyun 	struct vring_rx_desc dd, *d = &dd;
262*4882a593Smuzhiyun 	volatile struct vring_rx_desc *_d = &vring->va[i].rx.legacy;
263*4882a593Smuzhiyun 	dma_addr_t pa;
264*4882a593Smuzhiyun 	struct sk_buff *skb = dev_alloc_skb(sz + headroom);
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	if (unlikely(!skb))
267*4882a593Smuzhiyun 		return -ENOMEM;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	skb_reserve(skb, headroom);
270*4882a593Smuzhiyun 	skb_put(skb, sz);
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	/**
273*4882a593Smuzhiyun 	 * Make sure that the network stack calculates checksum for packets
274*4882a593Smuzhiyun 	 * which failed the HW checksum calculation
275*4882a593Smuzhiyun 	 */
276*4882a593Smuzhiyun 	skb->ip_summed = CHECKSUM_NONE;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
279*4882a593Smuzhiyun 	if (unlikely(dma_mapping_error(dev, pa))) {
280*4882a593Smuzhiyun 		kfree_skb(skb);
281*4882a593Smuzhiyun 		return -ENOMEM;
282*4882a593Smuzhiyun 	}
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	d->dma.d0 = RX_DMA_D0_CMD_DMA_RT | RX_DMA_D0_CMD_DMA_IT;
285*4882a593Smuzhiyun 	wil_desc_addr_set(&d->dma.addr, pa);
286*4882a593Smuzhiyun 	/* ip_length don't care */
287*4882a593Smuzhiyun 	/* b11 don't care */
288*4882a593Smuzhiyun 	/* error don't care */
289*4882a593Smuzhiyun 	d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
290*4882a593Smuzhiyun 	d->dma.length = cpu_to_le16(sz);
291*4882a593Smuzhiyun 	*_d = *d;
292*4882a593Smuzhiyun 	vring->ctx[i].skb = skb;
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	return 0;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun /* Adds radiotap header
298*4882a593Smuzhiyun  *
299*4882a593Smuzhiyun  * Any error indicated as "Bad FCS"
300*4882a593Smuzhiyun  *
301*4882a593Smuzhiyun  * Vendor data for 04:ce:14-1 (Wilocity-1) consists of:
302*4882a593Smuzhiyun  *  - Rx descriptor: 32 bytes
303*4882a593Smuzhiyun  *  - Phy info
304*4882a593Smuzhiyun  */
wil_rx_add_radiotap_header(struct wil6210_priv * wil,struct sk_buff * skb)305*4882a593Smuzhiyun static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
306*4882a593Smuzhiyun 				       struct sk_buff *skb)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun 	struct wil6210_rtap {
309*4882a593Smuzhiyun 		struct ieee80211_radiotap_header rthdr;
310*4882a593Smuzhiyun 		/* fields should be in the order of bits in rthdr.it_present */
311*4882a593Smuzhiyun 		/* flags */
312*4882a593Smuzhiyun 		u8 flags;
313*4882a593Smuzhiyun 		/* channel */
314*4882a593Smuzhiyun 		__le16 chnl_freq __aligned(2);
315*4882a593Smuzhiyun 		__le16 chnl_flags;
316*4882a593Smuzhiyun 		/* MCS */
317*4882a593Smuzhiyun 		u8 mcs_present;
318*4882a593Smuzhiyun 		u8 mcs_flags;
319*4882a593Smuzhiyun 		u8 mcs_index;
320*4882a593Smuzhiyun 	} __packed;
321*4882a593Smuzhiyun 	struct vring_rx_desc *d = wil_skb_rxdesc(skb);
322*4882a593Smuzhiyun 	struct wil6210_rtap *rtap;
323*4882a593Smuzhiyun 	int rtap_len = sizeof(struct wil6210_rtap);
324*4882a593Smuzhiyun 	struct ieee80211_channel *ch = wil->monitor_chandef.chan;
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	if (skb_headroom(skb) < rtap_len &&
327*4882a593Smuzhiyun 	    pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) {
328*4882a593Smuzhiyun 		wil_err(wil, "Unable to expand headroom to %d\n", rtap_len);
329*4882a593Smuzhiyun 		return;
330*4882a593Smuzhiyun 	}
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	rtap = skb_push(skb, rtap_len);
333*4882a593Smuzhiyun 	memset(rtap, 0, rtap_len);
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	rtap->rthdr.it_version = PKTHDR_RADIOTAP_VERSION;
336*4882a593Smuzhiyun 	rtap->rthdr.it_len = cpu_to_le16(rtap_len);
337*4882a593Smuzhiyun 	rtap->rthdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
338*4882a593Smuzhiyun 			(1 << IEEE80211_RADIOTAP_CHANNEL) |
339*4882a593Smuzhiyun 			(1 << IEEE80211_RADIOTAP_MCS));
340*4882a593Smuzhiyun 	if (d->dma.status & RX_DMA_STATUS_ERROR)
341*4882a593Smuzhiyun 		rtap->flags |= IEEE80211_RADIOTAP_F_BADFCS;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	rtap->chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320);
344*4882a593Smuzhiyun 	rtap->chnl_flags = cpu_to_le16(0);
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	rtap->mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS;
347*4882a593Smuzhiyun 	rtap->mcs_flags = 0;
348*4882a593Smuzhiyun 	rtap->mcs_index = wil_rxdesc_mcs(d);
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun 
wil_is_rx_idle(struct wil6210_priv * wil)351*4882a593Smuzhiyun static bool wil_is_rx_idle(struct wil6210_priv *wil)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun 	struct vring_rx_desc *_d;
354*4882a593Smuzhiyun 	struct wil_ring *ring = &wil->ring_rx;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	_d = (struct vring_rx_desc *)&ring->va[ring->swhead].rx.legacy;
357*4882a593Smuzhiyun 	if (_d->dma.status & RX_DMA_STATUS_DU)
358*4882a593Smuzhiyun 		return false;
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	return true;
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun 
wil_rx_get_cid_by_skb(struct wil6210_priv * wil,struct sk_buff * skb)363*4882a593Smuzhiyun static int wil_rx_get_cid_by_skb(struct wil6210_priv *wil, struct sk_buff *skb)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun 	struct vring_rx_desc *d = wil_skb_rxdesc(skb);
366*4882a593Smuzhiyun 	int mid = wil_rxdesc_mid(d);
367*4882a593Smuzhiyun 	struct wil6210_vif *vif = wil->vifs[mid];
368*4882a593Smuzhiyun 	/* cid from DMA descriptor is limited to 3 bits.
369*4882a593Smuzhiyun 	 * In case of cid>=8, the value would be cid modulo 8 and we need to
370*4882a593Smuzhiyun 	 * find real cid by locating the transmitter (ta) inside sta array
371*4882a593Smuzhiyun 	 */
372*4882a593Smuzhiyun 	int cid = wil_rxdesc_cid(d);
373*4882a593Smuzhiyun 	unsigned int snaplen = wil_rx_snaplen();
374*4882a593Smuzhiyun 	struct ieee80211_hdr_3addr *hdr;
375*4882a593Smuzhiyun 	int i;
376*4882a593Smuzhiyun 	unsigned char *ta;
377*4882a593Smuzhiyun 	u8 ftype;
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	/* in monitor mode there are no connections */
380*4882a593Smuzhiyun 	if (vif->wdev.iftype == NL80211_IFTYPE_MONITOR)
381*4882a593Smuzhiyun 		return cid;
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	ftype = wil_rxdesc_ftype(d) << 2;
384*4882a593Smuzhiyun 	if (likely(ftype == IEEE80211_FTYPE_DATA)) {
385*4882a593Smuzhiyun 		if (unlikely(skb->len < ETH_HLEN + snaplen)) {
386*4882a593Smuzhiyun 			wil_err_ratelimited(wil,
387*4882a593Smuzhiyun 					    "Short data frame, len = %d\n",
388*4882a593Smuzhiyun 					    skb->len);
389*4882a593Smuzhiyun 			return -ENOENT;
390*4882a593Smuzhiyun 		}
391*4882a593Smuzhiyun 		ta = wil_skb_get_sa(skb);
392*4882a593Smuzhiyun 	} else {
393*4882a593Smuzhiyun 		if (unlikely(skb->len < sizeof(struct ieee80211_hdr_3addr))) {
394*4882a593Smuzhiyun 			wil_err_ratelimited(wil, "Short frame, len = %d\n",
395*4882a593Smuzhiyun 					    skb->len);
396*4882a593Smuzhiyun 			return -ENOENT;
397*4882a593Smuzhiyun 		}
398*4882a593Smuzhiyun 		hdr = (void *)skb->data;
399*4882a593Smuzhiyun 		ta = hdr->addr2;
400*4882a593Smuzhiyun 	}
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	if (wil->max_assoc_sta <= WIL6210_RX_DESC_MAX_CID)
403*4882a593Smuzhiyun 		return cid;
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	/* assuming no concurrency between AP interfaces and STA interfaces.
406*4882a593Smuzhiyun 	 * multista is used only in P2P_GO or AP mode. In other modes return
407*4882a593Smuzhiyun 	 * cid from the rx descriptor
408*4882a593Smuzhiyun 	 */
409*4882a593Smuzhiyun 	if (vif->wdev.iftype != NL80211_IFTYPE_P2P_GO &&
410*4882a593Smuzhiyun 	    vif->wdev.iftype != NL80211_IFTYPE_AP)
411*4882a593Smuzhiyun 		return cid;
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	/* For Rx packets cid from rx descriptor is limited to 3 bits (0..7),
414*4882a593Smuzhiyun 	 * to find the real cid, compare transmitter address with the stored
415*4882a593Smuzhiyun 	 * stations mac address in the driver sta array
416*4882a593Smuzhiyun 	 */
417*4882a593Smuzhiyun 	for (i = cid; i < wil->max_assoc_sta; i += WIL6210_RX_DESC_MAX_CID) {
418*4882a593Smuzhiyun 		if (wil->sta[i].status != wil_sta_unused &&
419*4882a593Smuzhiyun 		    ether_addr_equal(wil->sta[i].addr, ta)) {
420*4882a593Smuzhiyun 			cid = i;
421*4882a593Smuzhiyun 			break;
422*4882a593Smuzhiyun 		}
423*4882a593Smuzhiyun 	}
424*4882a593Smuzhiyun 	if (i >= wil->max_assoc_sta) {
425*4882a593Smuzhiyun 		wil_err_ratelimited(wil, "Could not find cid for frame with transmit addr = %pM, iftype = %d, frametype = %d, len = %d\n",
426*4882a593Smuzhiyun 				    ta, vif->wdev.iftype, ftype, skb->len);
427*4882a593Smuzhiyun 		cid = -ENOENT;
428*4882a593Smuzhiyun 	}
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	return cid;
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun /* reap 1 frame from @swhead
434*4882a593Smuzhiyun  *
435*4882a593Smuzhiyun  * Rx descriptor copied to skb->cb
436*4882a593Smuzhiyun  *
437*4882a593Smuzhiyun  * Safe to call from IRQ
438*4882a593Smuzhiyun  */
wil_vring_reap_rx(struct wil6210_priv * wil,struct wil_ring * vring)439*4882a593Smuzhiyun static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
440*4882a593Smuzhiyun 					 struct wil_ring *vring)
441*4882a593Smuzhiyun {
442*4882a593Smuzhiyun 	struct device *dev = wil_to_dev(wil);
443*4882a593Smuzhiyun 	struct wil6210_vif *vif;
444*4882a593Smuzhiyun 	struct net_device *ndev;
445*4882a593Smuzhiyun 	volatile struct vring_rx_desc *_d;
446*4882a593Smuzhiyun 	struct vring_rx_desc *d;
447*4882a593Smuzhiyun 	struct sk_buff *skb;
448*4882a593Smuzhiyun 	dma_addr_t pa;
449*4882a593Smuzhiyun 	unsigned int snaplen = wil_rx_snaplen();
450*4882a593Smuzhiyun 	unsigned int sz = wil->rx_buf_len + ETH_HLEN + snaplen;
451*4882a593Smuzhiyun 	u16 dmalen;
452*4882a593Smuzhiyun 	u8 ftype;
453*4882a593Smuzhiyun 	int cid, mid;
454*4882a593Smuzhiyun 	int i;
455*4882a593Smuzhiyun 	struct wil_net_stats *stats;
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(struct skb_rx_info) > sizeof(skb->cb));
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun again:
460*4882a593Smuzhiyun 	if (unlikely(wil_ring_is_empty(vring)))
461*4882a593Smuzhiyun 		return NULL;
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	i = (int)vring->swhead;
464*4882a593Smuzhiyun 	_d = &vring->va[i].rx.legacy;
465*4882a593Smuzhiyun 	if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) {
466*4882a593Smuzhiyun 		/* it is not error, we just reached end of Rx done area */
467*4882a593Smuzhiyun 		return NULL;
468*4882a593Smuzhiyun 	}
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 	skb = vring->ctx[i].skb;
471*4882a593Smuzhiyun 	vring->ctx[i].skb = NULL;
472*4882a593Smuzhiyun 	wil_ring_advance_head(vring, 1);
473*4882a593Smuzhiyun 	if (!skb) {
474*4882a593Smuzhiyun 		wil_err(wil, "No Rx skb at [%d]\n", i);
475*4882a593Smuzhiyun 		goto again;
476*4882a593Smuzhiyun 	}
477*4882a593Smuzhiyun 	d = wil_skb_rxdesc(skb);
478*4882a593Smuzhiyun 	*d = *_d;
479*4882a593Smuzhiyun 	pa = wil_desc_addr(&d->dma.addr);
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
482*4882a593Smuzhiyun 	dmalen = le16_to_cpu(d->dma.length);
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	trace_wil6210_rx(i, d);
485*4882a593Smuzhiyun 	wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", i, dmalen);
486*4882a593Smuzhiyun 	wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4,
487*4882a593Smuzhiyun 			  (const void *)d, sizeof(*d), false);
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	mid = wil_rxdesc_mid(d);
490*4882a593Smuzhiyun 	vif = wil->vifs[mid];
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	if (unlikely(!vif)) {
493*4882a593Smuzhiyun 		wil_dbg_txrx(wil, "skipped RX descriptor with invalid mid %d",
494*4882a593Smuzhiyun 			     mid);
495*4882a593Smuzhiyun 		kfree_skb(skb);
496*4882a593Smuzhiyun 		goto again;
497*4882a593Smuzhiyun 	}
498*4882a593Smuzhiyun 	ndev = vif_to_ndev(vif);
499*4882a593Smuzhiyun 	if (unlikely(dmalen > sz)) {
500*4882a593Smuzhiyun 		wil_err_ratelimited(wil, "Rx size too large: %d bytes!\n",
501*4882a593Smuzhiyun 				    dmalen);
502*4882a593Smuzhiyun 		kfree_skb(skb);
503*4882a593Smuzhiyun 		goto again;
504*4882a593Smuzhiyun 	}
505*4882a593Smuzhiyun 	skb_trim(skb, dmalen);
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	prefetch(skb->data);
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
510*4882a593Smuzhiyun 			  skb->data, skb_headlen(skb), false);
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	cid = wil_rx_get_cid_by_skb(wil, skb);
513*4882a593Smuzhiyun 	if (cid == -ENOENT) {
514*4882a593Smuzhiyun 		kfree_skb(skb);
515*4882a593Smuzhiyun 		goto again;
516*4882a593Smuzhiyun 	}
517*4882a593Smuzhiyun 	wil_skb_set_cid(skb, (u8)cid);
518*4882a593Smuzhiyun 	stats = &wil->sta[cid].stats;
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	stats->last_mcs_rx = wil_rxdesc_mcs(d);
521*4882a593Smuzhiyun 	if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs))
522*4882a593Smuzhiyun 		stats->rx_per_mcs[stats->last_mcs_rx]++;
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	/* use radiotap header only if required */
525*4882a593Smuzhiyun 	if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
526*4882a593Smuzhiyun 		wil_rx_add_radiotap_header(wil, skb);
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	/* no extra checks if in sniffer mode */
529*4882a593Smuzhiyun 	if (ndev->type != ARPHRD_ETHER)
530*4882a593Smuzhiyun 		return skb;
531*4882a593Smuzhiyun 	/* Non-data frames may be delivered through Rx DMA channel (ex: BAR)
532*4882a593Smuzhiyun 	 * Driver should recognize it by frame type, that is found
533*4882a593Smuzhiyun 	 * in Rx descriptor. If type is not data, it is 802.11 frame as is
534*4882a593Smuzhiyun 	 */
535*4882a593Smuzhiyun 	ftype = wil_rxdesc_ftype(d) << 2;
536*4882a593Smuzhiyun 	if (unlikely(ftype != IEEE80211_FTYPE_DATA)) {
537*4882a593Smuzhiyun 		u8 fc1 = wil_rxdesc_fc1(d);
538*4882a593Smuzhiyun 		int tid = wil_rxdesc_tid(d);
539*4882a593Smuzhiyun 		u16 seq = wil_rxdesc_seq(d);
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 		wil_dbg_txrx(wil,
542*4882a593Smuzhiyun 			     "Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
543*4882a593Smuzhiyun 			     fc1, mid, cid, tid, seq);
544*4882a593Smuzhiyun 		stats->rx_non_data_frame++;
545*4882a593Smuzhiyun 		if (wil_is_back_req(fc1)) {
546*4882a593Smuzhiyun 			wil_dbg_txrx(wil,
547*4882a593Smuzhiyun 				     "BAR: MID %d CID %d TID %d Seq 0x%03x\n",
548*4882a593Smuzhiyun 				     mid, cid, tid, seq);
549*4882a593Smuzhiyun 			wil_rx_bar(wil, vif, cid, tid, seq);
550*4882a593Smuzhiyun 		} else {
551*4882a593Smuzhiyun 			/* print again all info. One can enable only this
552*4882a593Smuzhiyun 			 * without overhead for printing every Rx frame
553*4882a593Smuzhiyun 			 */
554*4882a593Smuzhiyun 			wil_dbg_txrx(wil,
555*4882a593Smuzhiyun 				     "Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
556*4882a593Smuzhiyun 				     fc1, mid, cid, tid, seq);
557*4882a593Smuzhiyun 			wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4,
558*4882a593Smuzhiyun 					  (const void *)d, sizeof(*d), false);
559*4882a593Smuzhiyun 			wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
560*4882a593Smuzhiyun 					  skb->data, skb_headlen(skb), false);
561*4882a593Smuzhiyun 		}
562*4882a593Smuzhiyun 		kfree_skb(skb);
563*4882a593Smuzhiyun 		goto again;
564*4882a593Smuzhiyun 	}
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	/* L4 IDENT is on when HW calculated checksum, check status
567*4882a593Smuzhiyun 	 * and in case of error drop the packet
568*4882a593Smuzhiyun 	 * higher stack layers will handle retransmission (if required)
569*4882a593Smuzhiyun 	 */
570*4882a593Smuzhiyun 	if (likely(d->dma.status & RX_DMA_STATUS_L4I)) {
571*4882a593Smuzhiyun 		/* L4 protocol identified, csum calculated */
572*4882a593Smuzhiyun 		if (likely((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0))
573*4882a593Smuzhiyun 			skb->ip_summed = CHECKSUM_UNNECESSARY;
574*4882a593Smuzhiyun 		/* If HW reports bad checksum, let IP stack re-check it
575*4882a593Smuzhiyun 		 * For example, HW don't understand Microsoft IP stack that
576*4882a593Smuzhiyun 		 * mis-calculates TCP checksum - if it should be 0x0,
577*4882a593Smuzhiyun 		 * it writes 0xffff in violation of RFC 1624
578*4882a593Smuzhiyun 		 */
579*4882a593Smuzhiyun 		else
580*4882a593Smuzhiyun 			stats->rx_csum_err++;
581*4882a593Smuzhiyun 	}
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	if (snaplen) {
584*4882a593Smuzhiyun 		/* Packet layout
585*4882a593Smuzhiyun 		 * +-------+-------+---------+------------+------+
586*4882a593Smuzhiyun 		 * | SA(6) | DA(6) | SNAP(6) | ETHTYPE(2) | DATA |
587*4882a593Smuzhiyun 		 * +-------+-------+---------+------------+------+
588*4882a593Smuzhiyun 		 * Need to remove SNAP, shifting SA and DA forward
589*4882a593Smuzhiyun 		 */
590*4882a593Smuzhiyun 		memmove(skb->data + snaplen, skb->data, 2 * ETH_ALEN);
591*4882a593Smuzhiyun 		skb_pull(skb, snaplen);
592*4882a593Smuzhiyun 	}
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	return skb;
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun /* allocate and fill up to @count buffers in rx ring
598*4882a593Smuzhiyun  * buffers posted at @swtail
599*4882a593Smuzhiyun  * Note: we have a single RX queue for servicing all VIFs, but we
600*4882a593Smuzhiyun  * allocate skbs with headroom according to main interface only. This
601*4882a593Smuzhiyun  * means it will not work with monitor interface together with other VIFs.
602*4882a593Smuzhiyun  * Currently we only support monitor interface on its own without other VIFs,
603*4882a593Smuzhiyun  * and we will need to fix this code once we add support.
604*4882a593Smuzhiyun  */
wil_rx_refill(struct wil6210_priv * wil,int count)605*4882a593Smuzhiyun static int wil_rx_refill(struct wil6210_priv *wil, int count)
606*4882a593Smuzhiyun {
607*4882a593Smuzhiyun 	struct net_device *ndev = wil->main_ndev;
608*4882a593Smuzhiyun 	struct wil_ring *v = &wil->ring_rx;
609*4882a593Smuzhiyun 	u32 next_tail;
610*4882a593Smuzhiyun 	int rc = 0;
611*4882a593Smuzhiyun 	int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ?
612*4882a593Smuzhiyun 			WIL6210_RTAP_SIZE : 0;
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 	for (; next_tail = wil_ring_next_tail(v),
615*4882a593Smuzhiyun 	     (next_tail != v->swhead) && (count-- > 0);
616*4882a593Smuzhiyun 	     v->swtail = next_tail) {
617*4882a593Smuzhiyun 		rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom);
618*4882a593Smuzhiyun 		if (unlikely(rc)) {
619*4882a593Smuzhiyun 			wil_err_ratelimited(wil, "Error %d in rx refill[%d]\n",
620*4882a593Smuzhiyun 					    rc, v->swtail);
621*4882a593Smuzhiyun 			break;
622*4882a593Smuzhiyun 		}
623*4882a593Smuzhiyun 	}
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	/* make sure all writes to descriptors (shared memory) are done before
626*4882a593Smuzhiyun 	 * committing them to HW
627*4882a593Smuzhiyun 	 */
628*4882a593Smuzhiyun 	wmb();
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 	wil_w(wil, v->hwtail, v->swtail);
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	return rc;
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun /**
636*4882a593Smuzhiyun  * reverse_memcmp - Compare two areas of memory, in reverse order
637*4882a593Smuzhiyun  * @cs: One area of memory
638*4882a593Smuzhiyun  * @ct: Another area of memory
639*4882a593Smuzhiyun  * @count: The size of the area.
640*4882a593Smuzhiyun  *
641*4882a593Smuzhiyun  * Cut'n'paste from original memcmp (see lib/string.c)
642*4882a593Smuzhiyun  * with minimal modifications
643*4882a593Smuzhiyun  */
reverse_memcmp(const void * cs,const void * ct,size_t count)644*4882a593Smuzhiyun int reverse_memcmp(const void *cs, const void *ct, size_t count)
645*4882a593Smuzhiyun {
646*4882a593Smuzhiyun 	const unsigned char *su1, *su2;
647*4882a593Smuzhiyun 	int res = 0;
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	for (su1 = cs + count - 1, su2 = ct + count - 1; count > 0;
650*4882a593Smuzhiyun 	     --su1, --su2, count--) {
651*4882a593Smuzhiyun 		res = *su1 - *su2;
652*4882a593Smuzhiyun 		if (res)
653*4882a593Smuzhiyun 			break;
654*4882a593Smuzhiyun 	}
655*4882a593Smuzhiyun 	return res;
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun 
wil_rx_crypto_check(struct wil6210_priv * wil,struct sk_buff * skb)658*4882a593Smuzhiyun static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb)
659*4882a593Smuzhiyun {
660*4882a593Smuzhiyun 	struct vring_rx_desc *d = wil_skb_rxdesc(skb);
661*4882a593Smuzhiyun 	int cid = wil_skb_get_cid(skb);
662*4882a593Smuzhiyun 	int tid = wil_rxdesc_tid(d);
663*4882a593Smuzhiyun 	int key_id = wil_rxdesc_key_id(d);
664*4882a593Smuzhiyun 	int mc = wil_rxdesc_mcast(d);
665*4882a593Smuzhiyun 	struct wil_sta_info *s = &wil->sta[cid];
666*4882a593Smuzhiyun 	struct wil_tid_crypto_rx *c = mc ? &s->group_crypto_rx :
667*4882a593Smuzhiyun 				      &s->tid_crypto_rx[tid];
668*4882a593Smuzhiyun 	struct wil_tid_crypto_rx_single *cc = &c->key_id[key_id];
669*4882a593Smuzhiyun 	const u8 *pn = (u8 *)&d->mac.pn_15_0;
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 	if (!cc->key_set) {
672*4882a593Smuzhiyun 		wil_err_ratelimited(wil,
673*4882a593Smuzhiyun 				    "Key missing. CID %d TID %d MCast %d KEY_ID %d\n",
674*4882a593Smuzhiyun 				    cid, tid, mc, key_id);
675*4882a593Smuzhiyun 		return -EINVAL;
676*4882a593Smuzhiyun 	}
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 	if (reverse_memcmp(pn, cc->pn, IEEE80211_GCMP_PN_LEN) <= 0) {
679*4882a593Smuzhiyun 		wil_err_ratelimited(wil,
680*4882a593Smuzhiyun 				    "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n",
681*4882a593Smuzhiyun 				    cid, tid, mc, key_id, pn, cc->pn);
682*4882a593Smuzhiyun 		return -EINVAL;
683*4882a593Smuzhiyun 	}
684*4882a593Smuzhiyun 	memcpy(cc->pn, pn, IEEE80211_GCMP_PN_LEN);
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 	return 0;
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun 
wil_rx_error_check(struct wil6210_priv * wil,struct sk_buff * skb,struct wil_net_stats * stats)689*4882a593Smuzhiyun static int wil_rx_error_check(struct wil6210_priv *wil, struct sk_buff *skb,
690*4882a593Smuzhiyun 			      struct wil_net_stats *stats)
691*4882a593Smuzhiyun {
692*4882a593Smuzhiyun 	struct vring_rx_desc *d = wil_skb_rxdesc(skb);
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun 	if ((d->dma.status & RX_DMA_STATUS_ERROR) &&
695*4882a593Smuzhiyun 	    (d->dma.error & RX_DMA_ERROR_MIC)) {
696*4882a593Smuzhiyun 		stats->rx_mic_error++;
697*4882a593Smuzhiyun 		wil_dbg_txrx(wil, "MIC error, dropping packet\n");
698*4882a593Smuzhiyun 		return -EFAULT;
699*4882a593Smuzhiyun 	}
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 	return 0;
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun 
wil_get_netif_rx_params(struct sk_buff * skb,int * cid,int * security)704*4882a593Smuzhiyun static void wil_get_netif_rx_params(struct sk_buff *skb, int *cid,
705*4882a593Smuzhiyun 				    int *security)
706*4882a593Smuzhiyun {
707*4882a593Smuzhiyun 	struct vring_rx_desc *d = wil_skb_rxdesc(skb);
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	*cid = wil_skb_get_cid(skb);
710*4882a593Smuzhiyun 	*security = wil_rxdesc_security(d);
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun /*
714*4882a593Smuzhiyun  * Check if skb is ptk eapol key message
715*4882a593Smuzhiyun  *
716*4882a593Smuzhiyun  * returns a pointer to the start of the eapol key structure, NULL
717*4882a593Smuzhiyun  * if frame is not PTK eapol key
718*4882a593Smuzhiyun  */
wil_is_ptk_eapol_key(struct wil6210_priv * wil,struct sk_buff * skb)719*4882a593Smuzhiyun static struct wil_eapol_key *wil_is_ptk_eapol_key(struct wil6210_priv *wil,
720*4882a593Smuzhiyun 						  struct sk_buff *skb)
721*4882a593Smuzhiyun {
722*4882a593Smuzhiyun 	u8 *buf;
723*4882a593Smuzhiyun 	const struct wil_1x_hdr *hdr;
724*4882a593Smuzhiyun 	struct wil_eapol_key *key;
725*4882a593Smuzhiyun 	u16 key_info;
726*4882a593Smuzhiyun 	int len = skb->len;
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 	if (!skb_mac_header_was_set(skb)) {
729*4882a593Smuzhiyun 		wil_err(wil, "mac header was not set\n");
730*4882a593Smuzhiyun 		return NULL;
731*4882a593Smuzhiyun 	}
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 	len -= skb_mac_offset(skb);
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun 	if (len < sizeof(struct ethhdr) + sizeof(struct wil_1x_hdr) +
736*4882a593Smuzhiyun 	    sizeof(struct wil_eapol_key))
737*4882a593Smuzhiyun 		return NULL;
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun 	buf = skb_mac_header(skb) + sizeof(struct ethhdr);
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun 	hdr = (const struct wil_1x_hdr *)buf;
742*4882a593Smuzhiyun 	if (hdr->type != WIL_1X_TYPE_EAPOL_KEY)
743*4882a593Smuzhiyun 		return NULL;
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	key = (struct wil_eapol_key *)(buf + sizeof(struct wil_1x_hdr));
746*4882a593Smuzhiyun 	if (key->type != WIL_EAPOL_KEY_TYPE_WPA &&
747*4882a593Smuzhiyun 	    key->type != WIL_EAPOL_KEY_TYPE_RSN)
748*4882a593Smuzhiyun 		return NULL;
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun 	key_info = be16_to_cpu(key->key_info);
751*4882a593Smuzhiyun 	if (!(key_info & WIL_KEY_INFO_KEY_TYPE)) /* check if pairwise */
752*4882a593Smuzhiyun 		return NULL;
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 	return key;
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun 
wil_skb_is_eap_3(struct wil6210_priv * wil,struct sk_buff * skb)757*4882a593Smuzhiyun static bool wil_skb_is_eap_3(struct wil6210_priv *wil, struct sk_buff *skb)
758*4882a593Smuzhiyun {
759*4882a593Smuzhiyun 	struct wil_eapol_key *key;
760*4882a593Smuzhiyun 	u16 key_info;
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun 	key = wil_is_ptk_eapol_key(wil, skb);
763*4882a593Smuzhiyun 	if (!key)
764*4882a593Smuzhiyun 		return false;
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 	key_info = be16_to_cpu(key->key_info);
767*4882a593Smuzhiyun 	if (key_info & (WIL_KEY_INFO_MIC |
768*4882a593Smuzhiyun 			WIL_KEY_INFO_ENCR_KEY_DATA)) {
769*4882a593Smuzhiyun 		/* 3/4 of 4-Way Handshake */
770*4882a593Smuzhiyun 		wil_dbg_misc(wil, "EAPOL key message 3\n");
771*4882a593Smuzhiyun 		return true;
772*4882a593Smuzhiyun 	}
773*4882a593Smuzhiyun 	/* 1/4 of 4-Way Handshake */
774*4882a593Smuzhiyun 	wil_dbg_misc(wil, "EAPOL key message 1\n");
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	return false;
777*4882a593Smuzhiyun }
778*4882a593Smuzhiyun 
wil_skb_is_eap_4(struct wil6210_priv * wil,struct sk_buff * skb)779*4882a593Smuzhiyun static bool wil_skb_is_eap_4(struct wil6210_priv *wil, struct sk_buff *skb)
780*4882a593Smuzhiyun {
781*4882a593Smuzhiyun 	struct wil_eapol_key *key;
782*4882a593Smuzhiyun 	u32 *nonce, i;
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun 	key = wil_is_ptk_eapol_key(wil, skb);
785*4882a593Smuzhiyun 	if (!key)
786*4882a593Smuzhiyun 		return false;
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun 	nonce = (u32 *)key->key_nonce;
789*4882a593Smuzhiyun 	for (i = 0; i < WIL_EAP_NONCE_LEN / sizeof(u32); i++, nonce++) {
790*4882a593Smuzhiyun 		if (*nonce != 0) {
791*4882a593Smuzhiyun 			/* message 2/4 */
792*4882a593Smuzhiyun 			wil_dbg_misc(wil, "EAPOL key message 2\n");
793*4882a593Smuzhiyun 			return false;
794*4882a593Smuzhiyun 		}
795*4882a593Smuzhiyun 	}
796*4882a593Smuzhiyun 	wil_dbg_misc(wil, "EAPOL key message 4\n");
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 	return true;
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun 
wil_enable_tx_key_worker(struct work_struct * work)801*4882a593Smuzhiyun void wil_enable_tx_key_worker(struct work_struct *work)
802*4882a593Smuzhiyun {
803*4882a593Smuzhiyun 	struct wil6210_vif *vif = container_of(work,
804*4882a593Smuzhiyun 			struct wil6210_vif, enable_tx_key_worker);
805*4882a593Smuzhiyun 	struct wil6210_priv *wil = vif_to_wil(vif);
806*4882a593Smuzhiyun 	int rc, cid;
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 	rtnl_lock();
809*4882a593Smuzhiyun 	if (vif->ptk_rekey_state != WIL_REKEY_WAIT_M4_SENT) {
810*4882a593Smuzhiyun 		wil_dbg_misc(wil, "Invalid rekey state = %d\n",
811*4882a593Smuzhiyun 			     vif->ptk_rekey_state);
812*4882a593Smuzhiyun 		rtnl_unlock();
813*4882a593Smuzhiyun 		return;
814*4882a593Smuzhiyun 	}
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 	cid =  wil_find_cid_by_idx(wil, vif->mid, 0);
817*4882a593Smuzhiyun 	if (!wil_cid_valid(wil, cid)) {
818*4882a593Smuzhiyun 		wil_err(wil, "Invalid cid = %d\n", cid);
819*4882a593Smuzhiyun 		rtnl_unlock();
820*4882a593Smuzhiyun 		return;
821*4882a593Smuzhiyun 	}
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun 	wil_dbg_misc(wil, "Apply PTK key after eapol was sent out\n");
824*4882a593Smuzhiyun 	rc = wmi_add_cipher_key(vif, 0, wil->sta[cid].addr, 0, NULL,
825*4882a593Smuzhiyun 				WMI_KEY_USE_APPLY_PTK);
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun 	vif->ptk_rekey_state = WIL_REKEY_IDLE;
828*4882a593Smuzhiyun 	rtnl_unlock();
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun 	if (rc)
831*4882a593Smuzhiyun 		wil_err(wil, "Apply PTK key failed %d\n", rc);
832*4882a593Smuzhiyun }
833*4882a593Smuzhiyun 
wil_tx_complete_handle_eapol(struct wil6210_vif * vif,struct sk_buff * skb)834*4882a593Smuzhiyun void wil_tx_complete_handle_eapol(struct wil6210_vif *vif, struct sk_buff *skb)
835*4882a593Smuzhiyun {
836*4882a593Smuzhiyun 	struct wil6210_priv *wil = vif_to_wil(vif);
837*4882a593Smuzhiyun 	struct wireless_dev *wdev = vif_to_wdev(vif);
838*4882a593Smuzhiyun 	bool q = false;
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun 	if (wdev->iftype != NL80211_IFTYPE_STATION ||
841*4882a593Smuzhiyun 	    !test_bit(WMI_FW_CAPABILITY_SPLIT_REKEY, wil->fw_capabilities))
842*4882a593Smuzhiyun 		return;
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 	/* check if skb is an EAP message 4/4 */
845*4882a593Smuzhiyun 	if (!wil_skb_is_eap_4(wil, skb))
846*4882a593Smuzhiyun 		return;
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun 	spin_lock_bh(&wil->eap_lock);
849*4882a593Smuzhiyun 	switch (vif->ptk_rekey_state) {
850*4882a593Smuzhiyun 	case WIL_REKEY_IDLE:
851*4882a593Smuzhiyun 		/* ignore idle state, can happen due to M4 retransmission */
852*4882a593Smuzhiyun 		break;
853*4882a593Smuzhiyun 	case WIL_REKEY_M3_RECEIVED:
854*4882a593Smuzhiyun 		vif->ptk_rekey_state = WIL_REKEY_IDLE;
855*4882a593Smuzhiyun 		break;
856*4882a593Smuzhiyun 	case WIL_REKEY_WAIT_M4_SENT:
857*4882a593Smuzhiyun 		q = true;
858*4882a593Smuzhiyun 		break;
859*4882a593Smuzhiyun 	default:
860*4882a593Smuzhiyun 		wil_err(wil, "Unknown rekey state = %d",
861*4882a593Smuzhiyun 			vif->ptk_rekey_state);
862*4882a593Smuzhiyun 	}
863*4882a593Smuzhiyun 	spin_unlock_bh(&wil->eap_lock);
864*4882a593Smuzhiyun 
865*4882a593Smuzhiyun 	if (q) {
866*4882a593Smuzhiyun 		q = queue_work(wil->wmi_wq, &vif->enable_tx_key_worker);
867*4882a593Smuzhiyun 		wil_dbg_misc(wil, "queue_work of enable_tx_key_worker -> %d\n",
868*4882a593Smuzhiyun 			     q);
869*4882a593Smuzhiyun 	}
870*4882a593Smuzhiyun }
871*4882a593Smuzhiyun 
wil_rx_handle_eapol(struct wil6210_vif * vif,struct sk_buff * skb)872*4882a593Smuzhiyun static void wil_rx_handle_eapol(struct wil6210_vif *vif, struct sk_buff *skb)
873*4882a593Smuzhiyun {
874*4882a593Smuzhiyun 	struct wil6210_priv *wil = vif_to_wil(vif);
875*4882a593Smuzhiyun 	struct wireless_dev *wdev = vif_to_wdev(vif);
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun 	if (wdev->iftype != NL80211_IFTYPE_STATION ||
878*4882a593Smuzhiyun 	    !test_bit(WMI_FW_CAPABILITY_SPLIT_REKEY, wil->fw_capabilities))
879*4882a593Smuzhiyun 		return;
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 	/* check if skb is a EAP message 3/4 */
882*4882a593Smuzhiyun 	if (!wil_skb_is_eap_3(wil, skb))
883*4882a593Smuzhiyun 		return;
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 	if (vif->ptk_rekey_state == WIL_REKEY_IDLE)
886*4882a593Smuzhiyun 		vif->ptk_rekey_state = WIL_REKEY_M3_RECEIVED;
887*4882a593Smuzhiyun }
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun /*
890*4882a593Smuzhiyun  * Pass Rx packet to the netif. Update statistics.
891*4882a593Smuzhiyun  * Called in softirq context (NAPI poll).
892*4882a593Smuzhiyun  */
wil_netif_rx(struct sk_buff * skb,struct net_device * ndev,int cid,struct wil_net_stats * stats,bool gro)893*4882a593Smuzhiyun void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid,
894*4882a593Smuzhiyun 		  struct wil_net_stats *stats, bool gro)
895*4882a593Smuzhiyun {
896*4882a593Smuzhiyun 	struct wil6210_vif *vif = ndev_to_vif(ndev);
897*4882a593Smuzhiyun 	struct wil6210_priv *wil = ndev_to_wil(ndev);
898*4882a593Smuzhiyun 	struct wireless_dev *wdev = vif_to_wdev(vif);
899*4882a593Smuzhiyun 	unsigned int len = skb->len;
900*4882a593Smuzhiyun 	u8 *sa, *da = wil_skb_get_da(skb);
901*4882a593Smuzhiyun 	/* here looking for DA, not A1, thus Rxdesc's 'mcast' indication
902*4882a593Smuzhiyun 	 * is not suitable, need to look at data
903*4882a593Smuzhiyun 	 */
904*4882a593Smuzhiyun 	int mcast = is_multicast_ether_addr(da);
905*4882a593Smuzhiyun 	struct sk_buff *xmit_skb = NULL;
906*4882a593Smuzhiyun 
907*4882a593Smuzhiyun 	if (wdev->iftype == NL80211_IFTYPE_STATION) {
908*4882a593Smuzhiyun 		sa = wil_skb_get_sa(skb);
909*4882a593Smuzhiyun 		if (mcast && ether_addr_equal(sa, ndev->dev_addr)) {
910*4882a593Smuzhiyun 			/* mcast packet looped back to us */
911*4882a593Smuzhiyun 			dev_kfree_skb(skb);
912*4882a593Smuzhiyun 			ndev->stats.rx_dropped++;
913*4882a593Smuzhiyun 			stats->rx_dropped++;
914*4882a593Smuzhiyun 			wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
915*4882a593Smuzhiyun 			return;
916*4882a593Smuzhiyun 		}
917*4882a593Smuzhiyun 	} else if (wdev->iftype == NL80211_IFTYPE_AP && !vif->ap_isolate) {
918*4882a593Smuzhiyun 		if (mcast) {
919*4882a593Smuzhiyun 			/* send multicast frames both to higher layers in
920*4882a593Smuzhiyun 			 * local net stack and back to the wireless medium
921*4882a593Smuzhiyun 			 */
922*4882a593Smuzhiyun 			xmit_skb = skb_copy(skb, GFP_ATOMIC);
923*4882a593Smuzhiyun 		} else {
924*4882a593Smuzhiyun 			int xmit_cid = wil_find_cid(wil, vif->mid, da);
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 			if (xmit_cid >= 0) {
927*4882a593Smuzhiyun 				/* The destination station is associated to
928*4882a593Smuzhiyun 				 * this AP (in this VLAN), so send the frame
929*4882a593Smuzhiyun 				 * directly to it and do not pass it to local
930*4882a593Smuzhiyun 				 * net stack.
931*4882a593Smuzhiyun 				 */
932*4882a593Smuzhiyun 				xmit_skb = skb;
933*4882a593Smuzhiyun 				skb = NULL;
934*4882a593Smuzhiyun 			}
935*4882a593Smuzhiyun 		}
936*4882a593Smuzhiyun 	}
937*4882a593Smuzhiyun 	if (xmit_skb) {
938*4882a593Smuzhiyun 		/* Send to wireless media and increase priority by 256 to
939*4882a593Smuzhiyun 		 * keep the received priority instead of reclassifying
940*4882a593Smuzhiyun 		 * the frame (see cfg80211_classify8021d).
941*4882a593Smuzhiyun 		 */
942*4882a593Smuzhiyun 		xmit_skb->dev = ndev;
943*4882a593Smuzhiyun 		xmit_skb->priority += 256;
944*4882a593Smuzhiyun 		xmit_skb->protocol = htons(ETH_P_802_3);
945*4882a593Smuzhiyun 		skb_reset_network_header(xmit_skb);
946*4882a593Smuzhiyun 		skb_reset_mac_header(xmit_skb);
947*4882a593Smuzhiyun 		wil_dbg_txrx(wil, "Rx -> Tx %d bytes\n", len);
948*4882a593Smuzhiyun 		dev_queue_xmit(xmit_skb);
949*4882a593Smuzhiyun 	}
950*4882a593Smuzhiyun 
951*4882a593Smuzhiyun 	if (skb) { /* deliver to local stack */
952*4882a593Smuzhiyun 		skb->protocol = eth_type_trans(skb, ndev);
953*4882a593Smuzhiyun 		skb->dev = ndev;
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 		if (skb->protocol == cpu_to_be16(ETH_P_PAE))
956*4882a593Smuzhiyun 			wil_rx_handle_eapol(vif, skb);
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun 		if (gro)
959*4882a593Smuzhiyun 			napi_gro_receive(&wil->napi_rx, skb);
960*4882a593Smuzhiyun 		else
961*4882a593Smuzhiyun 			netif_rx_ni(skb);
962*4882a593Smuzhiyun 	}
963*4882a593Smuzhiyun 	ndev->stats.rx_packets++;
964*4882a593Smuzhiyun 	stats->rx_packets++;
965*4882a593Smuzhiyun 	ndev->stats.rx_bytes += len;
966*4882a593Smuzhiyun 	stats->rx_bytes += len;
967*4882a593Smuzhiyun 	if (mcast)
968*4882a593Smuzhiyun 		ndev->stats.multicast++;
969*4882a593Smuzhiyun }
970*4882a593Smuzhiyun 
wil_netif_rx_any(struct sk_buff * skb,struct net_device * ndev)971*4882a593Smuzhiyun void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
972*4882a593Smuzhiyun {
973*4882a593Smuzhiyun 	int cid, security;
974*4882a593Smuzhiyun 	struct wil6210_priv *wil = ndev_to_wil(ndev);
975*4882a593Smuzhiyun 	struct wil_net_stats *stats;
976*4882a593Smuzhiyun 
977*4882a593Smuzhiyun 	wil->txrx_ops.get_netif_rx_params(skb, &cid, &security);
978*4882a593Smuzhiyun 
979*4882a593Smuzhiyun 	stats = &wil->sta[cid].stats;
980*4882a593Smuzhiyun 
981*4882a593Smuzhiyun 	skb_orphan(skb);
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun 	if (security && (wil->txrx_ops.rx_crypto_check(wil, skb) != 0)) {
984*4882a593Smuzhiyun 		wil_dbg_txrx(wil, "Rx drop %d bytes\n", skb->len);
985*4882a593Smuzhiyun 		dev_kfree_skb(skb);
986*4882a593Smuzhiyun 		ndev->stats.rx_dropped++;
987*4882a593Smuzhiyun 		stats->rx_replay++;
988*4882a593Smuzhiyun 		stats->rx_dropped++;
989*4882a593Smuzhiyun 		return;
990*4882a593Smuzhiyun 	}
991*4882a593Smuzhiyun 
992*4882a593Smuzhiyun 	/* check errors reported by HW and update statistics */
993*4882a593Smuzhiyun 	if (unlikely(wil->txrx_ops.rx_error_check(wil, skb, stats))) {
994*4882a593Smuzhiyun 		dev_kfree_skb(skb);
995*4882a593Smuzhiyun 		return;
996*4882a593Smuzhiyun 	}
997*4882a593Smuzhiyun 
998*4882a593Smuzhiyun 	wil_netif_rx(skb, ndev, cid, stats, true);
999*4882a593Smuzhiyun }
1000*4882a593Smuzhiyun 
1001*4882a593Smuzhiyun /* Proceed all completed skb's from Rx VRING
1002*4882a593Smuzhiyun  *
1003*4882a593Smuzhiyun  * Safe to call from NAPI poll, i.e. softirq with interrupts enabled
1004*4882a593Smuzhiyun  */
wil_rx_handle(struct wil6210_priv * wil,int * quota)1005*4882a593Smuzhiyun void wil_rx_handle(struct wil6210_priv *wil, int *quota)
1006*4882a593Smuzhiyun {
1007*4882a593Smuzhiyun 	struct net_device *ndev = wil->main_ndev;
1008*4882a593Smuzhiyun 	struct wireless_dev *wdev = ndev->ieee80211_ptr;
1009*4882a593Smuzhiyun 	struct wil_ring *v = &wil->ring_rx;
1010*4882a593Smuzhiyun 	struct sk_buff *skb;
1011*4882a593Smuzhiyun 
1012*4882a593Smuzhiyun 	if (unlikely(!v->va)) {
1013*4882a593Smuzhiyun 		wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
1014*4882a593Smuzhiyun 		return;
1015*4882a593Smuzhiyun 	}
1016*4882a593Smuzhiyun 	wil_dbg_txrx(wil, "rx_handle\n");
1017*4882a593Smuzhiyun 	while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) {
1018*4882a593Smuzhiyun 		(*quota)--;
1019*4882a593Smuzhiyun 
1020*4882a593Smuzhiyun 		/* monitor is currently supported on main interface only */
1021*4882a593Smuzhiyun 		if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
1022*4882a593Smuzhiyun 			skb->dev = ndev;
1023*4882a593Smuzhiyun 			skb_reset_mac_header(skb);
1024*4882a593Smuzhiyun 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1025*4882a593Smuzhiyun 			skb->pkt_type = PACKET_OTHERHOST;
1026*4882a593Smuzhiyun 			skb->protocol = htons(ETH_P_802_2);
1027*4882a593Smuzhiyun 			wil_netif_rx_any(skb, ndev);
1028*4882a593Smuzhiyun 		} else {
1029*4882a593Smuzhiyun 			wil_rx_reorder(wil, skb);
1030*4882a593Smuzhiyun 		}
1031*4882a593Smuzhiyun 	}
1032*4882a593Smuzhiyun 	wil_rx_refill(wil, v->size);
1033*4882a593Smuzhiyun }
1034*4882a593Smuzhiyun 
wil_rx_buf_len_init(struct wil6210_priv * wil)1035*4882a593Smuzhiyun static void wil_rx_buf_len_init(struct wil6210_priv *wil)
1036*4882a593Smuzhiyun {
1037*4882a593Smuzhiyun 	wil->rx_buf_len = rx_large_buf ?
1038*4882a593Smuzhiyun 		WIL_MAX_ETH_MTU : TXRX_BUF_LEN_DEFAULT - WIL_MAX_MPDU_OVERHEAD;
1039*4882a593Smuzhiyun 	if (mtu_max > wil->rx_buf_len) {
1040*4882a593Smuzhiyun 		/* do not allow RX buffers to be smaller than mtu_max, for
1041*4882a593Smuzhiyun 		 * backward compatibility (mtu_max parameter was also used
1042*4882a593Smuzhiyun 		 * to support receiving large packets)
1043*4882a593Smuzhiyun 		 */
1044*4882a593Smuzhiyun 		wil_info(wil, "Override RX buffer to mtu_max(%d)\n", mtu_max);
1045*4882a593Smuzhiyun 		wil->rx_buf_len = mtu_max;
1046*4882a593Smuzhiyun 	}
1047*4882a593Smuzhiyun }
1048*4882a593Smuzhiyun 
wil_rx_init(struct wil6210_priv * wil,uint order)1049*4882a593Smuzhiyun static int wil_rx_init(struct wil6210_priv *wil, uint order)
1050*4882a593Smuzhiyun {
1051*4882a593Smuzhiyun 	struct wil_ring *vring = &wil->ring_rx;
1052*4882a593Smuzhiyun 	int rc;
1053*4882a593Smuzhiyun 
1054*4882a593Smuzhiyun 	wil_dbg_misc(wil, "rx_init\n");
1055*4882a593Smuzhiyun 
1056*4882a593Smuzhiyun 	if (vring->va) {
1057*4882a593Smuzhiyun 		wil_err(wil, "Rx ring already allocated\n");
1058*4882a593Smuzhiyun 		return -EINVAL;
1059*4882a593Smuzhiyun 	}
1060*4882a593Smuzhiyun 
1061*4882a593Smuzhiyun 	wil_rx_buf_len_init(wil);
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun 	vring->size = 1 << order;
1064*4882a593Smuzhiyun 	vring->is_rx = true;
1065*4882a593Smuzhiyun 	rc = wil_vring_alloc(wil, vring);
1066*4882a593Smuzhiyun 	if (rc)
1067*4882a593Smuzhiyun 		return rc;
1068*4882a593Smuzhiyun 
1069*4882a593Smuzhiyun 	rc = wmi_rx_chain_add(wil, vring);
1070*4882a593Smuzhiyun 	if (rc)
1071*4882a593Smuzhiyun 		goto err_free;
1072*4882a593Smuzhiyun 
1073*4882a593Smuzhiyun 	rc = wil_rx_refill(wil, vring->size);
1074*4882a593Smuzhiyun 	if (rc)
1075*4882a593Smuzhiyun 		goto err_free;
1076*4882a593Smuzhiyun 
1077*4882a593Smuzhiyun 	return 0;
1078*4882a593Smuzhiyun  err_free:
1079*4882a593Smuzhiyun 	wil_vring_free(wil, vring);
1080*4882a593Smuzhiyun 
1081*4882a593Smuzhiyun 	return rc;
1082*4882a593Smuzhiyun }
1083*4882a593Smuzhiyun 
wil_rx_fini(struct wil6210_priv * wil)1084*4882a593Smuzhiyun static void wil_rx_fini(struct wil6210_priv *wil)
1085*4882a593Smuzhiyun {
1086*4882a593Smuzhiyun 	struct wil_ring *vring = &wil->ring_rx;
1087*4882a593Smuzhiyun 
1088*4882a593Smuzhiyun 	wil_dbg_misc(wil, "rx_fini\n");
1089*4882a593Smuzhiyun 
1090*4882a593Smuzhiyun 	if (vring->va)
1091*4882a593Smuzhiyun 		wil_vring_free(wil, vring);
1092*4882a593Smuzhiyun }
1093*4882a593Smuzhiyun 
wil_tx_desc_map(union wil_tx_desc * desc,dma_addr_t pa,u32 len,int vring_index)1094*4882a593Smuzhiyun static int wil_tx_desc_map(union wil_tx_desc *desc, dma_addr_t pa,
1095*4882a593Smuzhiyun 			   u32 len, int vring_index)
1096*4882a593Smuzhiyun {
1097*4882a593Smuzhiyun 	struct vring_tx_desc *d = &desc->legacy;
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun 	wil_desc_addr_set(&d->dma.addr, pa);
1100*4882a593Smuzhiyun 	d->dma.ip_length = 0;
1101*4882a593Smuzhiyun 	/* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
1102*4882a593Smuzhiyun 	d->dma.b11 = 0/*14 | BIT(7)*/;
1103*4882a593Smuzhiyun 	d->dma.error = 0;
1104*4882a593Smuzhiyun 	d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
1105*4882a593Smuzhiyun 	d->dma.length = cpu_to_le16((u16)len);
1106*4882a593Smuzhiyun 	d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
1107*4882a593Smuzhiyun 	d->mac.d[0] = 0;
1108*4882a593Smuzhiyun 	d->mac.d[1] = 0;
1109*4882a593Smuzhiyun 	d->mac.d[2] = 0;
1110*4882a593Smuzhiyun 	d->mac.ucode_cmd = 0;
1111*4882a593Smuzhiyun 	/* translation type:  0 - bypass; 1 - 802.3; 2 - native wifi */
1112*4882a593Smuzhiyun 	d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
1113*4882a593Smuzhiyun 		      (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
1114*4882a593Smuzhiyun 
1115*4882a593Smuzhiyun 	return 0;
1116*4882a593Smuzhiyun }
1117*4882a593Smuzhiyun 
wil_tx_data_init(struct wil_ring_tx_data * txdata)1118*4882a593Smuzhiyun void wil_tx_data_init(struct wil_ring_tx_data *txdata)
1119*4882a593Smuzhiyun {
1120*4882a593Smuzhiyun 	spin_lock_bh(&txdata->lock);
1121*4882a593Smuzhiyun 	txdata->dot1x_open = false;
1122*4882a593Smuzhiyun 	txdata->enabled = 0;
1123*4882a593Smuzhiyun 	txdata->idle = 0;
1124*4882a593Smuzhiyun 	txdata->last_idle = 0;
1125*4882a593Smuzhiyun 	txdata->begin = 0;
1126*4882a593Smuzhiyun 	txdata->agg_wsize = 0;
1127*4882a593Smuzhiyun 	txdata->agg_timeout = 0;
1128*4882a593Smuzhiyun 	txdata->agg_amsdu = 0;
1129*4882a593Smuzhiyun 	txdata->addba_in_progress = false;
1130*4882a593Smuzhiyun 	txdata->mid = U8_MAX;
1131*4882a593Smuzhiyun 	spin_unlock_bh(&txdata->lock);
1132*4882a593Smuzhiyun }
1133*4882a593Smuzhiyun 
wil_vring_init_tx(struct wil6210_vif * vif,int id,int size,int cid,int tid)1134*4882a593Smuzhiyun static int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size,
1135*4882a593Smuzhiyun 			     int cid, int tid)
1136*4882a593Smuzhiyun {
1137*4882a593Smuzhiyun 	struct wil6210_priv *wil = vif_to_wil(vif);
1138*4882a593Smuzhiyun 	int rc;
1139*4882a593Smuzhiyun 	struct wmi_vring_cfg_cmd cmd = {
1140*4882a593Smuzhiyun 		.action = cpu_to_le32(WMI_VRING_CMD_ADD),
1141*4882a593Smuzhiyun 		.vring_cfg = {
1142*4882a593Smuzhiyun 			.tx_sw_ring = {
1143*4882a593Smuzhiyun 				.max_mpdu_size =
1144*4882a593Smuzhiyun 					cpu_to_le16(wil_mtu2macbuf(mtu_max)),
1145*4882a593Smuzhiyun 				.ring_size = cpu_to_le16(size),
1146*4882a593Smuzhiyun 			},
1147*4882a593Smuzhiyun 			.ringid = id,
1148*4882a593Smuzhiyun 			.encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
1149*4882a593Smuzhiyun 			.mac_ctrl = 0,
1150*4882a593Smuzhiyun 			.to_resolution = 0,
1151*4882a593Smuzhiyun 			.agg_max_wsize = 0,
1152*4882a593Smuzhiyun 			.schd_params = {
1153*4882a593Smuzhiyun 				.priority = cpu_to_le16(0),
1154*4882a593Smuzhiyun 				.timeslot_us = cpu_to_le16(0xfff),
1155*4882a593Smuzhiyun 			},
1156*4882a593Smuzhiyun 		},
1157*4882a593Smuzhiyun 	};
1158*4882a593Smuzhiyun 	struct {
1159*4882a593Smuzhiyun 		struct wmi_cmd_hdr wmi;
1160*4882a593Smuzhiyun 		struct wmi_vring_cfg_done_event cmd;
1161*4882a593Smuzhiyun 	} __packed reply = {
1162*4882a593Smuzhiyun 		.cmd = {.status = WMI_FW_STATUS_FAILURE},
1163*4882a593Smuzhiyun 	};
1164*4882a593Smuzhiyun 	struct wil_ring *vring = &wil->ring_tx[id];
1165*4882a593Smuzhiyun 	struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id];
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun 	if (cid >= WIL6210_RX_DESC_MAX_CID) {
1168*4882a593Smuzhiyun 		cmd.vring_cfg.cidxtid = CIDXTID_EXTENDED_CID_TID;
1169*4882a593Smuzhiyun 		cmd.vring_cfg.cid = cid;
1170*4882a593Smuzhiyun 		cmd.vring_cfg.tid = tid;
1171*4882a593Smuzhiyun 	} else {
1172*4882a593Smuzhiyun 		cmd.vring_cfg.cidxtid = mk_cidxtid(cid, tid);
1173*4882a593Smuzhiyun 	}
1174*4882a593Smuzhiyun 
1175*4882a593Smuzhiyun 	wil_dbg_misc(wil, "vring_init_tx: max_mpdu_size %d\n",
1176*4882a593Smuzhiyun 		     cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
1177*4882a593Smuzhiyun 	lockdep_assert_held(&wil->mutex);
1178*4882a593Smuzhiyun 
1179*4882a593Smuzhiyun 	if (vring->va) {
1180*4882a593Smuzhiyun 		wil_err(wil, "Tx ring [%d] already allocated\n", id);
1181*4882a593Smuzhiyun 		rc = -EINVAL;
1182*4882a593Smuzhiyun 		goto out;
1183*4882a593Smuzhiyun 	}
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun 	wil_tx_data_init(txdata);
1186*4882a593Smuzhiyun 	vring->is_rx = false;
1187*4882a593Smuzhiyun 	vring->size = size;
1188*4882a593Smuzhiyun 	rc = wil_vring_alloc(wil, vring);
1189*4882a593Smuzhiyun 	if (rc)
1190*4882a593Smuzhiyun 		goto out;
1191*4882a593Smuzhiyun 
1192*4882a593Smuzhiyun 	wil->ring2cid_tid[id][0] = cid;
1193*4882a593Smuzhiyun 	wil->ring2cid_tid[id][1] = tid;
1194*4882a593Smuzhiyun 
1195*4882a593Smuzhiyun 	cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun 	if (!vif->privacy)
1198*4882a593Smuzhiyun 		txdata->dot1x_open = true;
1199*4882a593Smuzhiyun 	rc = wmi_call(wil, WMI_VRING_CFG_CMDID, vif->mid, &cmd, sizeof(cmd),
1200*4882a593Smuzhiyun 		      WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply),
1201*4882a593Smuzhiyun 		      WIL_WMI_CALL_GENERAL_TO_MS);
1202*4882a593Smuzhiyun 	if (rc)
1203*4882a593Smuzhiyun 		goto out_free;
1204*4882a593Smuzhiyun 
1205*4882a593Smuzhiyun 	if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
1206*4882a593Smuzhiyun 		wil_err(wil, "Tx config failed, status 0x%02x\n",
1207*4882a593Smuzhiyun 			reply.cmd.status);
1208*4882a593Smuzhiyun 		rc = -EINVAL;
1209*4882a593Smuzhiyun 		goto out_free;
1210*4882a593Smuzhiyun 	}
1211*4882a593Smuzhiyun 
1212*4882a593Smuzhiyun 	spin_lock_bh(&txdata->lock);
1213*4882a593Smuzhiyun 	vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
1214*4882a593Smuzhiyun 	txdata->mid = vif->mid;
1215*4882a593Smuzhiyun 	txdata->enabled = 1;
1216*4882a593Smuzhiyun 	spin_unlock_bh(&txdata->lock);
1217*4882a593Smuzhiyun 
1218*4882a593Smuzhiyun 	if (txdata->dot1x_open && (agg_wsize >= 0))
1219*4882a593Smuzhiyun 		wil_addba_tx_request(wil, id, agg_wsize);
1220*4882a593Smuzhiyun 
1221*4882a593Smuzhiyun 	return 0;
1222*4882a593Smuzhiyun  out_free:
1223*4882a593Smuzhiyun 	spin_lock_bh(&txdata->lock);
1224*4882a593Smuzhiyun 	txdata->dot1x_open = false;
1225*4882a593Smuzhiyun 	txdata->enabled = 0;
1226*4882a593Smuzhiyun 	spin_unlock_bh(&txdata->lock);
1227*4882a593Smuzhiyun 	wil_vring_free(wil, vring);
1228*4882a593Smuzhiyun 	wil->ring2cid_tid[id][0] = wil->max_assoc_sta;
1229*4882a593Smuzhiyun 	wil->ring2cid_tid[id][1] = 0;
1230*4882a593Smuzhiyun 
1231*4882a593Smuzhiyun  out:
1232*4882a593Smuzhiyun 
1233*4882a593Smuzhiyun 	return rc;
1234*4882a593Smuzhiyun }
1235*4882a593Smuzhiyun 
wil_tx_vring_modify(struct wil6210_vif * vif,int ring_id,int cid,int tid)1236*4882a593Smuzhiyun static int wil_tx_vring_modify(struct wil6210_vif *vif, int ring_id, int cid,
1237*4882a593Smuzhiyun 			       int tid)
1238*4882a593Smuzhiyun {
1239*4882a593Smuzhiyun 	struct wil6210_priv *wil = vif_to_wil(vif);
1240*4882a593Smuzhiyun 	int rc;
1241*4882a593Smuzhiyun 	struct wmi_vring_cfg_cmd cmd = {
1242*4882a593Smuzhiyun 		.action = cpu_to_le32(WMI_VRING_CMD_MODIFY),
1243*4882a593Smuzhiyun 		.vring_cfg = {
1244*4882a593Smuzhiyun 			.tx_sw_ring = {
1245*4882a593Smuzhiyun 				.max_mpdu_size =
1246*4882a593Smuzhiyun 					cpu_to_le16(wil_mtu2macbuf(mtu_max)),
1247*4882a593Smuzhiyun 				.ring_size = 0,
1248*4882a593Smuzhiyun 			},
1249*4882a593Smuzhiyun 			.ringid = ring_id,
1250*4882a593Smuzhiyun 			.cidxtid = mk_cidxtid(cid, tid),
1251*4882a593Smuzhiyun 			.encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
1252*4882a593Smuzhiyun 			.mac_ctrl = 0,
1253*4882a593Smuzhiyun 			.to_resolution = 0,
1254*4882a593Smuzhiyun 			.agg_max_wsize = 0,
1255*4882a593Smuzhiyun 			.schd_params = {
1256*4882a593Smuzhiyun 				.priority = cpu_to_le16(0),
1257*4882a593Smuzhiyun 				.timeslot_us = cpu_to_le16(0xfff),
1258*4882a593Smuzhiyun 			},
1259*4882a593Smuzhiyun 		},
1260*4882a593Smuzhiyun 	};
1261*4882a593Smuzhiyun 	struct {
1262*4882a593Smuzhiyun 		struct wmi_cmd_hdr wmi;
1263*4882a593Smuzhiyun 		struct wmi_vring_cfg_done_event cmd;
1264*4882a593Smuzhiyun 	} __packed reply = {
1265*4882a593Smuzhiyun 		.cmd = {.status = WMI_FW_STATUS_FAILURE},
1266*4882a593Smuzhiyun 	};
1267*4882a593Smuzhiyun 	struct wil_ring *vring = &wil->ring_tx[ring_id];
1268*4882a593Smuzhiyun 	struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_id];
1269*4882a593Smuzhiyun 
1270*4882a593Smuzhiyun 	wil_dbg_misc(wil, "vring_modify: ring %d cid %d tid %d\n", ring_id,
1271*4882a593Smuzhiyun 		     cid, tid);
1272*4882a593Smuzhiyun 	lockdep_assert_held(&wil->mutex);
1273*4882a593Smuzhiyun 
1274*4882a593Smuzhiyun 	if (!vring->va) {
1275*4882a593Smuzhiyun 		wil_err(wil, "Tx ring [%d] not allocated\n", ring_id);
1276*4882a593Smuzhiyun 		return -EINVAL;
1277*4882a593Smuzhiyun 	}
1278*4882a593Smuzhiyun 
1279*4882a593Smuzhiyun 	if (wil->ring2cid_tid[ring_id][0] != cid ||
1280*4882a593Smuzhiyun 	    wil->ring2cid_tid[ring_id][1] != tid) {
1281*4882a593Smuzhiyun 		wil_err(wil, "ring info does not match cid=%u tid=%u\n",
1282*4882a593Smuzhiyun 			wil->ring2cid_tid[ring_id][0],
1283*4882a593Smuzhiyun 			wil->ring2cid_tid[ring_id][1]);
1284*4882a593Smuzhiyun 	}
1285*4882a593Smuzhiyun 
1286*4882a593Smuzhiyun 	cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
1287*4882a593Smuzhiyun 
1288*4882a593Smuzhiyun 	rc = wmi_call(wil, WMI_VRING_CFG_CMDID, vif->mid, &cmd, sizeof(cmd),
1289*4882a593Smuzhiyun 		      WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply),
1290*4882a593Smuzhiyun 		      WIL_WMI_CALL_GENERAL_TO_MS);
1291*4882a593Smuzhiyun 	if (rc)
1292*4882a593Smuzhiyun 		goto fail;
1293*4882a593Smuzhiyun 
1294*4882a593Smuzhiyun 	if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
1295*4882a593Smuzhiyun 		wil_err(wil, "Tx modify failed, status 0x%02x\n",
1296*4882a593Smuzhiyun 			reply.cmd.status);
1297*4882a593Smuzhiyun 		rc = -EINVAL;
1298*4882a593Smuzhiyun 		goto fail;
1299*4882a593Smuzhiyun 	}
1300*4882a593Smuzhiyun 
1301*4882a593Smuzhiyun 	/* set BA aggregation window size to 0 to force a new BA with the
1302*4882a593Smuzhiyun 	 * new AP
1303*4882a593Smuzhiyun 	 */
1304*4882a593Smuzhiyun 	txdata->agg_wsize = 0;
1305*4882a593Smuzhiyun 	if (txdata->dot1x_open && agg_wsize >= 0)
1306*4882a593Smuzhiyun 		wil_addba_tx_request(wil, ring_id, agg_wsize);
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun 	return 0;
1309*4882a593Smuzhiyun fail:
1310*4882a593Smuzhiyun 	spin_lock_bh(&txdata->lock);
1311*4882a593Smuzhiyun 	txdata->dot1x_open = false;
1312*4882a593Smuzhiyun 	txdata->enabled = 0;
1313*4882a593Smuzhiyun 	spin_unlock_bh(&txdata->lock);
1314*4882a593Smuzhiyun 	wil->ring2cid_tid[ring_id][0] = wil->max_assoc_sta;
1315*4882a593Smuzhiyun 	wil->ring2cid_tid[ring_id][1] = 0;
1316*4882a593Smuzhiyun 	return rc;
1317*4882a593Smuzhiyun }
1318*4882a593Smuzhiyun 
wil_vring_init_bcast(struct wil6210_vif * vif,int id,int size)1319*4882a593Smuzhiyun int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size)
1320*4882a593Smuzhiyun {
1321*4882a593Smuzhiyun 	struct wil6210_priv *wil = vif_to_wil(vif);
1322*4882a593Smuzhiyun 	int rc;
1323*4882a593Smuzhiyun 	struct wmi_bcast_vring_cfg_cmd cmd = {
1324*4882a593Smuzhiyun 		.action = cpu_to_le32(WMI_VRING_CMD_ADD),
1325*4882a593Smuzhiyun 		.vring_cfg = {
1326*4882a593Smuzhiyun 			.tx_sw_ring = {
1327*4882a593Smuzhiyun 				.max_mpdu_size =
1328*4882a593Smuzhiyun 					cpu_to_le16(wil_mtu2macbuf(mtu_max)),
1329*4882a593Smuzhiyun 				.ring_size = cpu_to_le16(size),
1330*4882a593Smuzhiyun 			},
1331*4882a593Smuzhiyun 			.ringid = id,
1332*4882a593Smuzhiyun 			.encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
1333*4882a593Smuzhiyun 		},
1334*4882a593Smuzhiyun 	};
1335*4882a593Smuzhiyun 	struct {
1336*4882a593Smuzhiyun 		struct wmi_cmd_hdr wmi;
1337*4882a593Smuzhiyun 		struct wmi_vring_cfg_done_event cmd;
1338*4882a593Smuzhiyun 	} __packed reply = {
1339*4882a593Smuzhiyun 		.cmd = {.status = WMI_FW_STATUS_FAILURE},
1340*4882a593Smuzhiyun 	};
1341*4882a593Smuzhiyun 	struct wil_ring *vring = &wil->ring_tx[id];
1342*4882a593Smuzhiyun 	struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id];
1343*4882a593Smuzhiyun 
1344*4882a593Smuzhiyun 	wil_dbg_misc(wil, "vring_init_bcast: max_mpdu_size %d\n",
1345*4882a593Smuzhiyun 		     cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
1346*4882a593Smuzhiyun 	lockdep_assert_held(&wil->mutex);
1347*4882a593Smuzhiyun 
1348*4882a593Smuzhiyun 	if (vring->va) {
1349*4882a593Smuzhiyun 		wil_err(wil, "Tx ring [%d] already allocated\n", id);
1350*4882a593Smuzhiyun 		rc = -EINVAL;
1351*4882a593Smuzhiyun 		goto out;
1352*4882a593Smuzhiyun 	}
1353*4882a593Smuzhiyun 
1354*4882a593Smuzhiyun 	wil_tx_data_init(txdata);
1355*4882a593Smuzhiyun 	vring->is_rx = false;
1356*4882a593Smuzhiyun 	vring->size = size;
1357*4882a593Smuzhiyun 	rc = wil_vring_alloc(wil, vring);
1358*4882a593Smuzhiyun 	if (rc)
1359*4882a593Smuzhiyun 		goto out;
1360*4882a593Smuzhiyun 
1361*4882a593Smuzhiyun 	wil->ring2cid_tid[id][0] = wil->max_assoc_sta; /* CID */
1362*4882a593Smuzhiyun 	wil->ring2cid_tid[id][1] = 0; /* TID */
1363*4882a593Smuzhiyun 
1364*4882a593Smuzhiyun 	cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
1365*4882a593Smuzhiyun 
1366*4882a593Smuzhiyun 	if (!vif->privacy)
1367*4882a593Smuzhiyun 		txdata->dot1x_open = true;
1368*4882a593Smuzhiyun 	rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, vif->mid,
1369*4882a593Smuzhiyun 		      &cmd, sizeof(cmd),
1370*4882a593Smuzhiyun 		      WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply),
1371*4882a593Smuzhiyun 		      WIL_WMI_CALL_GENERAL_TO_MS);
1372*4882a593Smuzhiyun 	if (rc)
1373*4882a593Smuzhiyun 		goto out_free;
1374*4882a593Smuzhiyun 
1375*4882a593Smuzhiyun 	if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
1376*4882a593Smuzhiyun 		wil_err(wil, "Tx config failed, status 0x%02x\n",
1377*4882a593Smuzhiyun 			reply.cmd.status);
1378*4882a593Smuzhiyun 		rc = -EINVAL;
1379*4882a593Smuzhiyun 		goto out_free;
1380*4882a593Smuzhiyun 	}
1381*4882a593Smuzhiyun 
1382*4882a593Smuzhiyun 	spin_lock_bh(&txdata->lock);
1383*4882a593Smuzhiyun 	vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
1384*4882a593Smuzhiyun 	txdata->mid = vif->mid;
1385*4882a593Smuzhiyun 	txdata->enabled = 1;
1386*4882a593Smuzhiyun 	spin_unlock_bh(&txdata->lock);
1387*4882a593Smuzhiyun 
1388*4882a593Smuzhiyun 	return 0;
1389*4882a593Smuzhiyun  out_free:
1390*4882a593Smuzhiyun 	spin_lock_bh(&txdata->lock);
1391*4882a593Smuzhiyun 	txdata->enabled = 0;
1392*4882a593Smuzhiyun 	txdata->dot1x_open = false;
1393*4882a593Smuzhiyun 	spin_unlock_bh(&txdata->lock);
1394*4882a593Smuzhiyun 	wil_vring_free(wil, vring);
1395*4882a593Smuzhiyun  out:
1396*4882a593Smuzhiyun 
1397*4882a593Smuzhiyun 	return rc;
1398*4882a593Smuzhiyun }
1399*4882a593Smuzhiyun 
wil_find_tx_ucast(struct wil6210_priv * wil,struct wil6210_vif * vif,struct sk_buff * skb)1400*4882a593Smuzhiyun static struct wil_ring *wil_find_tx_ucast(struct wil6210_priv *wil,
1401*4882a593Smuzhiyun 					  struct wil6210_vif *vif,
1402*4882a593Smuzhiyun 					  struct sk_buff *skb)
1403*4882a593Smuzhiyun {
1404*4882a593Smuzhiyun 	int i, cid;
1405*4882a593Smuzhiyun 	const u8 *da = wil_skb_get_da(skb);
1406*4882a593Smuzhiyun 	int min_ring_id = wil_get_min_tx_ring_id(wil);
1407*4882a593Smuzhiyun 
1408*4882a593Smuzhiyun 	cid = wil_find_cid(wil, vif->mid, da);
1409*4882a593Smuzhiyun 
1410*4882a593Smuzhiyun 	if (cid < 0 || cid >= wil->max_assoc_sta)
1411*4882a593Smuzhiyun 		return NULL;
1412*4882a593Smuzhiyun 
1413*4882a593Smuzhiyun 	/* TODO: fix for multiple TID */
1414*4882a593Smuzhiyun 	for (i = min_ring_id; i < ARRAY_SIZE(wil->ring2cid_tid); i++) {
1415*4882a593Smuzhiyun 		if (!wil->ring_tx_data[i].dot1x_open &&
1416*4882a593Smuzhiyun 		    skb->protocol != cpu_to_be16(ETH_P_PAE))
1417*4882a593Smuzhiyun 			continue;
1418*4882a593Smuzhiyun 		if (wil->ring2cid_tid[i][0] == cid) {
1419*4882a593Smuzhiyun 			struct wil_ring *v = &wil->ring_tx[i];
1420*4882a593Smuzhiyun 			struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i];
1421*4882a593Smuzhiyun 
1422*4882a593Smuzhiyun 			wil_dbg_txrx(wil, "find_tx_ucast: (%pM) -> [%d]\n",
1423*4882a593Smuzhiyun 				     da, i);
1424*4882a593Smuzhiyun 			if (v->va && txdata->enabled) {
1425*4882a593Smuzhiyun 				return v;
1426*4882a593Smuzhiyun 			} else {
1427*4882a593Smuzhiyun 				wil_dbg_txrx(wil,
1428*4882a593Smuzhiyun 					     "find_tx_ucast: vring[%d] not valid\n",
1429*4882a593Smuzhiyun 					     i);
1430*4882a593Smuzhiyun 				return NULL;
1431*4882a593Smuzhiyun 			}
1432*4882a593Smuzhiyun 		}
1433*4882a593Smuzhiyun 	}
1434*4882a593Smuzhiyun 
1435*4882a593Smuzhiyun 	return NULL;
1436*4882a593Smuzhiyun }
1437*4882a593Smuzhiyun 
1438*4882a593Smuzhiyun static int wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
1439*4882a593Smuzhiyun 		       struct wil_ring *ring, struct sk_buff *skb);
1440*4882a593Smuzhiyun 
wil_find_tx_ring_sta(struct wil6210_priv * wil,struct wil6210_vif * vif,struct sk_buff * skb)1441*4882a593Smuzhiyun static struct wil_ring *wil_find_tx_ring_sta(struct wil6210_priv *wil,
1442*4882a593Smuzhiyun 					     struct wil6210_vif *vif,
1443*4882a593Smuzhiyun 					     struct sk_buff *skb)
1444*4882a593Smuzhiyun {
1445*4882a593Smuzhiyun 	struct wil_ring *ring;
1446*4882a593Smuzhiyun 	int i;
1447*4882a593Smuzhiyun 	u8 cid;
1448*4882a593Smuzhiyun 	struct wil_ring_tx_data  *txdata;
1449*4882a593Smuzhiyun 	int min_ring_id = wil_get_min_tx_ring_id(wil);
1450*4882a593Smuzhiyun 
1451*4882a593Smuzhiyun 	/* In the STA mode, it is expected to have only 1 VRING
1452*4882a593Smuzhiyun 	 * for the AP we connected to.
1453*4882a593Smuzhiyun 	 * find 1-st vring eligible for this skb and use it.
1454*4882a593Smuzhiyun 	 */
1455*4882a593Smuzhiyun 	for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
1456*4882a593Smuzhiyun 		ring = &wil->ring_tx[i];
1457*4882a593Smuzhiyun 		txdata = &wil->ring_tx_data[i];
1458*4882a593Smuzhiyun 		if (!ring->va || !txdata->enabled || txdata->mid != vif->mid)
1459*4882a593Smuzhiyun 			continue;
1460*4882a593Smuzhiyun 
1461*4882a593Smuzhiyun 		cid = wil->ring2cid_tid[i][0];
1462*4882a593Smuzhiyun 		if (cid >= wil->max_assoc_sta) /* skip BCAST */
1463*4882a593Smuzhiyun 			continue;
1464*4882a593Smuzhiyun 
1465*4882a593Smuzhiyun 		if (!wil->ring_tx_data[i].dot1x_open &&
1466*4882a593Smuzhiyun 		    skb->protocol != cpu_to_be16(ETH_P_PAE))
1467*4882a593Smuzhiyun 			continue;
1468*4882a593Smuzhiyun 
1469*4882a593Smuzhiyun 		wil_dbg_txrx(wil, "Tx -> ring %d\n", i);
1470*4882a593Smuzhiyun 
1471*4882a593Smuzhiyun 		return ring;
1472*4882a593Smuzhiyun 	}
1473*4882a593Smuzhiyun 
1474*4882a593Smuzhiyun 	wil_dbg_txrx(wil, "Tx while no rings active?\n");
1475*4882a593Smuzhiyun 
1476*4882a593Smuzhiyun 	return NULL;
1477*4882a593Smuzhiyun }
1478*4882a593Smuzhiyun 
1479*4882a593Smuzhiyun /* Use one of 2 strategies:
1480*4882a593Smuzhiyun  *
1481*4882a593Smuzhiyun  * 1. New (real broadcast):
1482*4882a593Smuzhiyun  *    use dedicated broadcast vring
1483*4882a593Smuzhiyun  * 2. Old (pseudo-DMS):
1484*4882a593Smuzhiyun  *    Find 1-st vring and return it;
1485*4882a593Smuzhiyun  *    duplicate skb and send it to other active vrings;
1486*4882a593Smuzhiyun  *    in all cases override dest address to unicast peer's address
1487*4882a593Smuzhiyun  * Use old strategy when new is not supported yet:
1488*4882a593Smuzhiyun  *  - for PBSS
1489*4882a593Smuzhiyun  */
wil_find_tx_bcast_1(struct wil6210_priv * wil,struct wil6210_vif * vif,struct sk_buff * skb)1490*4882a593Smuzhiyun static struct wil_ring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
1491*4882a593Smuzhiyun 					    struct wil6210_vif *vif,
1492*4882a593Smuzhiyun 					    struct sk_buff *skb)
1493*4882a593Smuzhiyun {
1494*4882a593Smuzhiyun 	struct wil_ring *v;
1495*4882a593Smuzhiyun 	struct wil_ring_tx_data *txdata;
1496*4882a593Smuzhiyun 	int i = vif->bcast_ring;
1497*4882a593Smuzhiyun 
1498*4882a593Smuzhiyun 	if (i < 0)
1499*4882a593Smuzhiyun 		return NULL;
1500*4882a593Smuzhiyun 	v = &wil->ring_tx[i];
1501*4882a593Smuzhiyun 	txdata = &wil->ring_tx_data[i];
1502*4882a593Smuzhiyun 	if (!v->va || !txdata->enabled)
1503*4882a593Smuzhiyun 		return NULL;
1504*4882a593Smuzhiyun 	if (!wil->ring_tx_data[i].dot1x_open &&
1505*4882a593Smuzhiyun 	    skb->protocol != cpu_to_be16(ETH_P_PAE))
1506*4882a593Smuzhiyun 		return NULL;
1507*4882a593Smuzhiyun 
1508*4882a593Smuzhiyun 	return v;
1509*4882a593Smuzhiyun }
1510*4882a593Smuzhiyun 
1511*4882a593Smuzhiyun /* apply multicast to unicast only for ARP and IP packets
1512*4882a593Smuzhiyun  * (see NL80211_CMD_SET_MULTICAST_TO_UNICAST for more info)
1513*4882a593Smuzhiyun  */
wil_check_multicast_to_unicast(struct wil6210_priv * wil,struct sk_buff * skb)1514*4882a593Smuzhiyun static bool wil_check_multicast_to_unicast(struct wil6210_priv *wil,
1515*4882a593Smuzhiyun 					   struct sk_buff *skb)
1516*4882a593Smuzhiyun {
1517*4882a593Smuzhiyun 	const struct ethhdr *eth = (void *)skb->data;
1518*4882a593Smuzhiyun 	const struct vlan_ethhdr *ethvlan = (void *)skb->data;
1519*4882a593Smuzhiyun 	__be16 ethertype;
1520*4882a593Smuzhiyun 
1521*4882a593Smuzhiyun 	if (!wil->multicast_to_unicast)
1522*4882a593Smuzhiyun 		return false;
1523*4882a593Smuzhiyun 
1524*4882a593Smuzhiyun 	/* multicast to unicast conversion only for some payload */
1525*4882a593Smuzhiyun 	ethertype = eth->h_proto;
1526*4882a593Smuzhiyun 	if (ethertype == htons(ETH_P_8021Q) && skb->len >= VLAN_ETH_HLEN)
1527*4882a593Smuzhiyun 		ethertype = ethvlan->h_vlan_encapsulated_proto;
1528*4882a593Smuzhiyun 	switch (ethertype) {
1529*4882a593Smuzhiyun 	case htons(ETH_P_ARP):
1530*4882a593Smuzhiyun 	case htons(ETH_P_IP):
1531*4882a593Smuzhiyun 	case htons(ETH_P_IPV6):
1532*4882a593Smuzhiyun 		break;
1533*4882a593Smuzhiyun 	default:
1534*4882a593Smuzhiyun 		return false;
1535*4882a593Smuzhiyun 	}
1536*4882a593Smuzhiyun 
1537*4882a593Smuzhiyun 	return true;
1538*4882a593Smuzhiyun }
1539*4882a593Smuzhiyun 
wil_set_da_for_vring(struct wil6210_priv * wil,struct sk_buff * skb,int vring_index)1540*4882a593Smuzhiyun static void wil_set_da_for_vring(struct wil6210_priv *wil,
1541*4882a593Smuzhiyun 				 struct sk_buff *skb, int vring_index)
1542*4882a593Smuzhiyun {
1543*4882a593Smuzhiyun 	u8 *da = wil_skb_get_da(skb);
1544*4882a593Smuzhiyun 	int cid = wil->ring2cid_tid[vring_index][0];
1545*4882a593Smuzhiyun 
1546*4882a593Smuzhiyun 	ether_addr_copy(da, wil->sta[cid].addr);
1547*4882a593Smuzhiyun }
1548*4882a593Smuzhiyun 
wil_find_tx_bcast_2(struct wil6210_priv * wil,struct wil6210_vif * vif,struct sk_buff * skb)1549*4882a593Smuzhiyun static struct wil_ring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
1550*4882a593Smuzhiyun 					    struct wil6210_vif *vif,
1551*4882a593Smuzhiyun 					    struct sk_buff *skb)
1552*4882a593Smuzhiyun {
1553*4882a593Smuzhiyun 	struct wil_ring *v, *v2;
1554*4882a593Smuzhiyun 	struct sk_buff *skb2;
1555*4882a593Smuzhiyun 	int i;
1556*4882a593Smuzhiyun 	u8 cid;
1557*4882a593Smuzhiyun 	const u8 *src = wil_skb_get_sa(skb);
1558*4882a593Smuzhiyun 	struct wil_ring_tx_data *txdata, *txdata2;
1559*4882a593Smuzhiyun 	int min_ring_id = wil_get_min_tx_ring_id(wil);
1560*4882a593Smuzhiyun 
1561*4882a593Smuzhiyun 	/* find 1-st vring eligible for data */
1562*4882a593Smuzhiyun 	for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
1563*4882a593Smuzhiyun 		v = &wil->ring_tx[i];
1564*4882a593Smuzhiyun 		txdata = &wil->ring_tx_data[i];
1565*4882a593Smuzhiyun 		if (!v->va || !txdata->enabled || txdata->mid != vif->mid)
1566*4882a593Smuzhiyun 			continue;
1567*4882a593Smuzhiyun 
1568*4882a593Smuzhiyun 		cid = wil->ring2cid_tid[i][0];
1569*4882a593Smuzhiyun 		if (cid >= wil->max_assoc_sta) /* skip BCAST */
1570*4882a593Smuzhiyun 			continue;
1571*4882a593Smuzhiyun 		if (!wil->ring_tx_data[i].dot1x_open &&
1572*4882a593Smuzhiyun 		    skb->protocol != cpu_to_be16(ETH_P_PAE))
1573*4882a593Smuzhiyun 			continue;
1574*4882a593Smuzhiyun 
1575*4882a593Smuzhiyun 		/* don't Tx back to source when re-routing Rx->Tx at the AP */
1576*4882a593Smuzhiyun 		if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
1577*4882a593Smuzhiyun 			continue;
1578*4882a593Smuzhiyun 
1579*4882a593Smuzhiyun 		goto found;
1580*4882a593Smuzhiyun 	}
1581*4882a593Smuzhiyun 
1582*4882a593Smuzhiyun 	wil_dbg_txrx(wil, "Tx while no vrings active?\n");
1583*4882a593Smuzhiyun 
1584*4882a593Smuzhiyun 	return NULL;
1585*4882a593Smuzhiyun 
1586*4882a593Smuzhiyun found:
1587*4882a593Smuzhiyun 	wil_dbg_txrx(wil, "BCAST -> ring %d\n", i);
1588*4882a593Smuzhiyun 	wil_set_da_for_vring(wil, skb, i);
1589*4882a593Smuzhiyun 
1590*4882a593Smuzhiyun 	/* find other active vrings and duplicate skb for each */
1591*4882a593Smuzhiyun 	for (i++; i < WIL6210_MAX_TX_RINGS; i++) {
1592*4882a593Smuzhiyun 		v2 = &wil->ring_tx[i];
1593*4882a593Smuzhiyun 		txdata2 = &wil->ring_tx_data[i];
1594*4882a593Smuzhiyun 		if (!v2->va || txdata2->mid != vif->mid)
1595*4882a593Smuzhiyun 			continue;
1596*4882a593Smuzhiyun 		cid = wil->ring2cid_tid[i][0];
1597*4882a593Smuzhiyun 		if (cid >= wil->max_assoc_sta) /* skip BCAST */
1598*4882a593Smuzhiyun 			continue;
1599*4882a593Smuzhiyun 		if (!wil->ring_tx_data[i].dot1x_open &&
1600*4882a593Smuzhiyun 		    skb->protocol != cpu_to_be16(ETH_P_PAE))
1601*4882a593Smuzhiyun 			continue;
1602*4882a593Smuzhiyun 
1603*4882a593Smuzhiyun 		if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
1604*4882a593Smuzhiyun 			continue;
1605*4882a593Smuzhiyun 
1606*4882a593Smuzhiyun 		skb2 = skb_copy(skb, GFP_ATOMIC);
1607*4882a593Smuzhiyun 		if (skb2) {
1608*4882a593Smuzhiyun 			wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
1609*4882a593Smuzhiyun 			wil_set_da_for_vring(wil, skb2, i);
1610*4882a593Smuzhiyun 			wil_tx_ring(wil, vif, v2, skb2);
1611*4882a593Smuzhiyun 			/* successful call to wil_tx_ring takes skb2 ref */
1612*4882a593Smuzhiyun 			dev_kfree_skb_any(skb2);
1613*4882a593Smuzhiyun 		} else {
1614*4882a593Smuzhiyun 			wil_err(wil, "skb_copy failed\n");
1615*4882a593Smuzhiyun 		}
1616*4882a593Smuzhiyun 	}
1617*4882a593Smuzhiyun 
1618*4882a593Smuzhiyun 	return v;
1619*4882a593Smuzhiyun }
1620*4882a593Smuzhiyun 
1621*4882a593Smuzhiyun static inline
wil_tx_desc_set_nr_frags(struct vring_tx_desc * d,int nr_frags)1622*4882a593Smuzhiyun void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
1623*4882a593Smuzhiyun {
1624*4882a593Smuzhiyun 	d->mac.d[2] |= (nr_frags << MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
1625*4882a593Smuzhiyun }
1626*4882a593Smuzhiyun 
1627*4882a593Smuzhiyun /* Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
1628*4882a593Smuzhiyun  * @skb is used to obtain the protocol and headers length.
1629*4882a593Smuzhiyun  * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
1630*4882a593Smuzhiyun  * 2 - middle, 3 - last descriptor.
1631*4882a593Smuzhiyun  */
1632*4882a593Smuzhiyun 
wil_tx_desc_offload_setup_tso(struct vring_tx_desc * d,struct sk_buff * skb,int tso_desc_type,bool is_ipv4,int tcp_hdr_len,int skb_net_hdr_len)1633*4882a593Smuzhiyun static void wil_tx_desc_offload_setup_tso(struct vring_tx_desc *d,
1634*4882a593Smuzhiyun 					  struct sk_buff *skb,
1635*4882a593Smuzhiyun 					  int tso_desc_type, bool is_ipv4,
1636*4882a593Smuzhiyun 					  int tcp_hdr_len, int skb_net_hdr_len)
1637*4882a593Smuzhiyun {
1638*4882a593Smuzhiyun 	d->dma.b11 = ETH_HLEN; /* MAC header length */
1639*4882a593Smuzhiyun 	d->dma.b11 |= is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS;
1640*4882a593Smuzhiyun 
1641*4882a593Smuzhiyun 	d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
1642*4882a593Smuzhiyun 	/* L4 header len: TCP header length */
1643*4882a593Smuzhiyun 	d->dma.d0 |= (tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
1644*4882a593Smuzhiyun 
1645*4882a593Smuzhiyun 	/* Setup TSO: bit and desc type */
1646*4882a593Smuzhiyun 	d->dma.d0 |= (BIT(DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS)) |
1647*4882a593Smuzhiyun 		(tso_desc_type << DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS);
1648*4882a593Smuzhiyun 	d->dma.d0 |= (is_ipv4 << DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS);
1649*4882a593Smuzhiyun 
1650*4882a593Smuzhiyun 	d->dma.ip_length = skb_net_hdr_len;
1651*4882a593Smuzhiyun 	/* Enable TCP/UDP checksum */
1652*4882a593Smuzhiyun 	d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
1653*4882a593Smuzhiyun 	/* Calculate pseudo-header */
1654*4882a593Smuzhiyun 	d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
1655*4882a593Smuzhiyun }
1656*4882a593Smuzhiyun 
1657*4882a593Smuzhiyun /* Sets the descriptor @d up for csum. The corresponding
1658*4882a593Smuzhiyun  * @skb is used to obtain the protocol and headers length.
1659*4882a593Smuzhiyun  * Returns the protocol: 0 - not TCP, 1 - TCPv4, 2 - TCPv6.
1660*4882a593Smuzhiyun  * Note, if d==NULL, the function only returns the protocol result.
1661*4882a593Smuzhiyun  *
1662*4882a593Smuzhiyun  * It is very similar to previous wil_tx_desc_offload_setup_tso. This
1663*4882a593Smuzhiyun  * is "if unrolling" to optimize the critical path.
1664*4882a593Smuzhiyun  */
1665*4882a593Smuzhiyun 
wil_tx_desc_offload_setup(struct vring_tx_desc * d,struct sk_buff * skb)1666*4882a593Smuzhiyun static int wil_tx_desc_offload_setup(struct vring_tx_desc *d,
1667*4882a593Smuzhiyun 				     struct sk_buff *skb){
1668*4882a593Smuzhiyun 	int protocol;
1669*4882a593Smuzhiyun 
1670*4882a593Smuzhiyun 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1671*4882a593Smuzhiyun 		return 0;
1672*4882a593Smuzhiyun 
1673*4882a593Smuzhiyun 	d->dma.b11 = ETH_HLEN; /* MAC header length */
1674*4882a593Smuzhiyun 
1675*4882a593Smuzhiyun 	switch (skb->protocol) {
1676*4882a593Smuzhiyun 	case cpu_to_be16(ETH_P_IP):
1677*4882a593Smuzhiyun 		protocol = ip_hdr(skb)->protocol;
1678*4882a593Smuzhiyun 		d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS);
1679*4882a593Smuzhiyun 		break;
1680*4882a593Smuzhiyun 	case cpu_to_be16(ETH_P_IPV6):
1681*4882a593Smuzhiyun 		protocol = ipv6_hdr(skb)->nexthdr;
1682*4882a593Smuzhiyun 		break;
1683*4882a593Smuzhiyun 	default:
1684*4882a593Smuzhiyun 		return -EINVAL;
1685*4882a593Smuzhiyun 	}
1686*4882a593Smuzhiyun 
1687*4882a593Smuzhiyun 	switch (protocol) {
1688*4882a593Smuzhiyun 	case IPPROTO_TCP:
1689*4882a593Smuzhiyun 		d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
1690*4882a593Smuzhiyun 		/* L4 header len: TCP header length */
1691*4882a593Smuzhiyun 		d->dma.d0 |=
1692*4882a593Smuzhiyun 		(tcp_hdrlen(skb) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
1693*4882a593Smuzhiyun 		break;
1694*4882a593Smuzhiyun 	case IPPROTO_UDP:
1695*4882a593Smuzhiyun 		/* L4 header len: UDP header length */
1696*4882a593Smuzhiyun 		d->dma.d0 |=
1697*4882a593Smuzhiyun 		(sizeof(struct udphdr) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
1698*4882a593Smuzhiyun 		break;
1699*4882a593Smuzhiyun 	default:
1700*4882a593Smuzhiyun 		return -EINVAL;
1701*4882a593Smuzhiyun 	}
1702*4882a593Smuzhiyun 
1703*4882a593Smuzhiyun 	d->dma.ip_length = skb_network_header_len(skb);
1704*4882a593Smuzhiyun 	/* Enable TCP/UDP checksum */
1705*4882a593Smuzhiyun 	d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
1706*4882a593Smuzhiyun 	/* Calculate pseudo-header */
1707*4882a593Smuzhiyun 	d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
1708*4882a593Smuzhiyun 
1709*4882a593Smuzhiyun 	return 0;
1710*4882a593Smuzhiyun }
1711*4882a593Smuzhiyun 
wil_tx_last_desc(struct vring_tx_desc * d)1712*4882a593Smuzhiyun static inline void wil_tx_last_desc(struct vring_tx_desc *d)
1713*4882a593Smuzhiyun {
1714*4882a593Smuzhiyun 	d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS) |
1715*4882a593Smuzhiyun 	      BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS) |
1716*4882a593Smuzhiyun 	      BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
1717*4882a593Smuzhiyun }
1718*4882a593Smuzhiyun 
wil_set_tx_desc_last_tso(volatile struct vring_tx_desc * d)1719*4882a593Smuzhiyun static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc *d)
1720*4882a593Smuzhiyun {
1721*4882a593Smuzhiyun 	d->dma.d0 |= wil_tso_type_lst <<
1722*4882a593Smuzhiyun 		  DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS;
1723*4882a593Smuzhiyun }
1724*4882a593Smuzhiyun 
__wil_tx_vring_tso(struct wil6210_priv * wil,struct wil6210_vif * vif,struct wil_ring * vring,struct sk_buff * skb)1725*4882a593Smuzhiyun static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
1726*4882a593Smuzhiyun 			      struct wil_ring *vring, struct sk_buff *skb)
1727*4882a593Smuzhiyun {
1728*4882a593Smuzhiyun 	struct device *dev = wil_to_dev(wil);
1729*4882a593Smuzhiyun 
1730*4882a593Smuzhiyun 	/* point to descriptors in shared memory */
1731*4882a593Smuzhiyun 	volatile struct vring_tx_desc *_desc = NULL, *_hdr_desc,
1732*4882a593Smuzhiyun 				      *_first_desc = NULL;
1733*4882a593Smuzhiyun 
1734*4882a593Smuzhiyun 	/* pointers to shadow descriptors */
1735*4882a593Smuzhiyun 	struct vring_tx_desc desc_mem, hdr_desc_mem, first_desc_mem,
1736*4882a593Smuzhiyun 			     *d = &hdr_desc_mem, *hdr_desc = &hdr_desc_mem,
1737*4882a593Smuzhiyun 			     *first_desc = &first_desc_mem;
1738*4882a593Smuzhiyun 
1739*4882a593Smuzhiyun 	/* pointer to shadow descriptors' context */
1740*4882a593Smuzhiyun 	struct wil_ctx *hdr_ctx, *first_ctx = NULL;
1741*4882a593Smuzhiyun 
1742*4882a593Smuzhiyun 	int descs_used = 0; /* total number of used descriptors */
1743*4882a593Smuzhiyun 	int sg_desc_cnt = 0; /* number of descriptors for current mss*/
1744*4882a593Smuzhiyun 
1745*4882a593Smuzhiyun 	u32 swhead = vring->swhead;
1746*4882a593Smuzhiyun 	int used, avail = wil_ring_avail_tx(vring);
1747*4882a593Smuzhiyun 	int nr_frags = skb_shinfo(skb)->nr_frags;
1748*4882a593Smuzhiyun 	int min_desc_required = nr_frags + 1;
1749*4882a593Smuzhiyun 	int mss = skb_shinfo(skb)->gso_size;	/* payload size w/o headers */
1750*4882a593Smuzhiyun 	int f, len, hdrlen, headlen;
1751*4882a593Smuzhiyun 	int vring_index = vring - wil->ring_tx;
1752*4882a593Smuzhiyun 	struct wil_ring_tx_data *txdata = &wil->ring_tx_data[vring_index];
1753*4882a593Smuzhiyun 	uint i = swhead;
1754*4882a593Smuzhiyun 	dma_addr_t pa;
1755*4882a593Smuzhiyun 	const skb_frag_t *frag = NULL;
1756*4882a593Smuzhiyun 	int rem_data = mss;
1757*4882a593Smuzhiyun 	int lenmss;
1758*4882a593Smuzhiyun 	int hdr_compensation_need = true;
1759*4882a593Smuzhiyun 	int desc_tso_type = wil_tso_type_first;
1760*4882a593Smuzhiyun 	bool is_ipv4;
1761*4882a593Smuzhiyun 	int tcp_hdr_len;
1762*4882a593Smuzhiyun 	int skb_net_hdr_len;
1763*4882a593Smuzhiyun 	int gso_type;
1764*4882a593Smuzhiyun 	int rc = -EINVAL;
1765*4882a593Smuzhiyun 
1766*4882a593Smuzhiyun 	wil_dbg_txrx(wil, "tx_vring_tso: %d bytes to vring %d\n", skb->len,
1767*4882a593Smuzhiyun 		     vring_index);
1768*4882a593Smuzhiyun 
1769*4882a593Smuzhiyun 	if (unlikely(!txdata->enabled))
1770*4882a593Smuzhiyun 		return -EINVAL;
1771*4882a593Smuzhiyun 
1772*4882a593Smuzhiyun 	/* A typical page 4K is 3-4 payloads, we assume each fragment
1773*4882a593Smuzhiyun 	 * is a full payload, that's how min_desc_required has been
1774*4882a593Smuzhiyun 	 * calculated. In real we might need more or less descriptors,
1775*4882a593Smuzhiyun 	 * this is the initial check only.
1776*4882a593Smuzhiyun 	 */
1777*4882a593Smuzhiyun 	if (unlikely(avail < min_desc_required)) {
1778*4882a593Smuzhiyun 		wil_err_ratelimited(wil,
1779*4882a593Smuzhiyun 				    "TSO: Tx ring[%2d] full. No space for %d fragments\n",
1780*4882a593Smuzhiyun 				    vring_index, min_desc_required);
1781*4882a593Smuzhiyun 		return -ENOMEM;
1782*4882a593Smuzhiyun 	}
1783*4882a593Smuzhiyun 
1784*4882a593Smuzhiyun 	/* Header Length = MAC header len + IP header len + TCP header len*/
1785*4882a593Smuzhiyun 	hdrlen = ETH_HLEN +
1786*4882a593Smuzhiyun 		(int)skb_network_header_len(skb) +
1787*4882a593Smuzhiyun 		tcp_hdrlen(skb);
1788*4882a593Smuzhiyun 
1789*4882a593Smuzhiyun 	gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
1790*4882a593Smuzhiyun 	switch (gso_type) {
1791*4882a593Smuzhiyun 	case SKB_GSO_TCPV4:
1792*4882a593Smuzhiyun 		/* TCP v4, zero out the IP length and IPv4 checksum fields
1793*4882a593Smuzhiyun 		 * as required by the offloading doc
1794*4882a593Smuzhiyun 		 */
1795*4882a593Smuzhiyun 		ip_hdr(skb)->tot_len = 0;
1796*4882a593Smuzhiyun 		ip_hdr(skb)->check = 0;
1797*4882a593Smuzhiyun 		is_ipv4 = true;
1798*4882a593Smuzhiyun 		break;
1799*4882a593Smuzhiyun 	case SKB_GSO_TCPV6:
1800*4882a593Smuzhiyun 		/* TCP v6, zero out the payload length */
1801*4882a593Smuzhiyun 		ipv6_hdr(skb)->payload_len = 0;
1802*4882a593Smuzhiyun 		is_ipv4 = false;
1803*4882a593Smuzhiyun 		break;
1804*4882a593Smuzhiyun 	default:
1805*4882a593Smuzhiyun 		/* other than TCPv4 or TCPv6 types are not supported for TSO.
1806*4882a593Smuzhiyun 		 * It is also illegal for both to be set simultaneously
1807*4882a593Smuzhiyun 		 */
1808*4882a593Smuzhiyun 		return -EINVAL;
1809*4882a593Smuzhiyun 	}
1810*4882a593Smuzhiyun 
1811*4882a593Smuzhiyun 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1812*4882a593Smuzhiyun 		return -EINVAL;
1813*4882a593Smuzhiyun 
1814*4882a593Smuzhiyun 	/* tcp header length and skb network header length are fixed for all
1815*4882a593Smuzhiyun 	 * packet's descriptors - read then once here
1816*4882a593Smuzhiyun 	 */
1817*4882a593Smuzhiyun 	tcp_hdr_len = tcp_hdrlen(skb);
1818*4882a593Smuzhiyun 	skb_net_hdr_len = skb_network_header_len(skb);
1819*4882a593Smuzhiyun 
1820*4882a593Smuzhiyun 	_hdr_desc = &vring->va[i].tx.legacy;
1821*4882a593Smuzhiyun 
1822*4882a593Smuzhiyun 	pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE);
1823*4882a593Smuzhiyun 	if (unlikely(dma_mapping_error(dev, pa))) {
1824*4882a593Smuzhiyun 		wil_err(wil, "TSO: Skb head DMA map error\n");
1825*4882a593Smuzhiyun 		goto err_exit;
1826*4882a593Smuzhiyun 	}
1827*4882a593Smuzhiyun 
1828*4882a593Smuzhiyun 	wil->txrx_ops.tx_desc_map((union wil_tx_desc *)hdr_desc, pa,
1829*4882a593Smuzhiyun 				  hdrlen, vring_index);
1830*4882a593Smuzhiyun 	wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4,
1831*4882a593Smuzhiyun 				      tcp_hdr_len, skb_net_hdr_len);
1832*4882a593Smuzhiyun 	wil_tx_last_desc(hdr_desc);
1833*4882a593Smuzhiyun 
1834*4882a593Smuzhiyun 	vring->ctx[i].mapped_as = wil_mapped_as_single;
1835*4882a593Smuzhiyun 	hdr_ctx = &vring->ctx[i];
1836*4882a593Smuzhiyun 
1837*4882a593Smuzhiyun 	descs_used++;
1838*4882a593Smuzhiyun 	headlen = skb_headlen(skb) - hdrlen;
1839*4882a593Smuzhiyun 
1840*4882a593Smuzhiyun 	for (f = headlen ? -1 : 0; f < nr_frags; f++)  {
1841*4882a593Smuzhiyun 		if (headlen) {
1842*4882a593Smuzhiyun 			len = headlen;
1843*4882a593Smuzhiyun 			wil_dbg_txrx(wil, "TSO: process skb head, len %u\n",
1844*4882a593Smuzhiyun 				     len);
1845*4882a593Smuzhiyun 		} else {
1846*4882a593Smuzhiyun 			frag = &skb_shinfo(skb)->frags[f];
1847*4882a593Smuzhiyun 			len = skb_frag_size(frag);
1848*4882a593Smuzhiyun 			wil_dbg_txrx(wil, "TSO: frag[%d]: len %u\n", f, len);
1849*4882a593Smuzhiyun 		}
1850*4882a593Smuzhiyun 
1851*4882a593Smuzhiyun 		while (len) {
1852*4882a593Smuzhiyun 			wil_dbg_txrx(wil,
1853*4882a593Smuzhiyun 				     "TSO: len %d, rem_data %d, descs_used %d\n",
1854*4882a593Smuzhiyun 				     len, rem_data, descs_used);
1855*4882a593Smuzhiyun 
1856*4882a593Smuzhiyun 			if (descs_used == avail)  {
1857*4882a593Smuzhiyun 				wil_err_ratelimited(wil, "TSO: ring overflow\n");
1858*4882a593Smuzhiyun 				rc = -ENOMEM;
1859*4882a593Smuzhiyun 				goto mem_error;
1860*4882a593Smuzhiyun 			}
1861*4882a593Smuzhiyun 
1862*4882a593Smuzhiyun 			lenmss = min_t(int, rem_data, len);
1863*4882a593Smuzhiyun 			i = (swhead + descs_used) % vring->size;
1864*4882a593Smuzhiyun 			wil_dbg_txrx(wil, "TSO: lenmss %d, i %d\n", lenmss, i);
1865*4882a593Smuzhiyun 
1866*4882a593Smuzhiyun 			if (!headlen) {
1867*4882a593Smuzhiyun 				pa = skb_frag_dma_map(dev, frag,
1868*4882a593Smuzhiyun 						      skb_frag_size(frag) - len,
1869*4882a593Smuzhiyun 						      lenmss, DMA_TO_DEVICE);
1870*4882a593Smuzhiyun 				vring->ctx[i].mapped_as = wil_mapped_as_page;
1871*4882a593Smuzhiyun 			} else {
1872*4882a593Smuzhiyun 				pa = dma_map_single(dev,
1873*4882a593Smuzhiyun 						    skb->data +
1874*4882a593Smuzhiyun 						    skb_headlen(skb) - headlen,
1875*4882a593Smuzhiyun 						    lenmss,
1876*4882a593Smuzhiyun 						    DMA_TO_DEVICE);
1877*4882a593Smuzhiyun 				vring->ctx[i].mapped_as = wil_mapped_as_single;
1878*4882a593Smuzhiyun 				headlen -= lenmss;
1879*4882a593Smuzhiyun 			}
1880*4882a593Smuzhiyun 
1881*4882a593Smuzhiyun 			if (unlikely(dma_mapping_error(dev, pa))) {
1882*4882a593Smuzhiyun 				wil_err(wil, "TSO: DMA map page error\n");
1883*4882a593Smuzhiyun 				goto mem_error;
1884*4882a593Smuzhiyun 			}
1885*4882a593Smuzhiyun 
1886*4882a593Smuzhiyun 			_desc = &vring->va[i].tx.legacy;
1887*4882a593Smuzhiyun 
1888*4882a593Smuzhiyun 			if (!_first_desc) {
1889*4882a593Smuzhiyun 				_first_desc = _desc;
1890*4882a593Smuzhiyun 				first_ctx = &vring->ctx[i];
1891*4882a593Smuzhiyun 				d = first_desc;
1892*4882a593Smuzhiyun 			} else {
1893*4882a593Smuzhiyun 				d = &desc_mem;
1894*4882a593Smuzhiyun 			}
1895*4882a593Smuzhiyun 
1896*4882a593Smuzhiyun 			wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d,
1897*4882a593Smuzhiyun 						  pa, lenmss, vring_index);
1898*4882a593Smuzhiyun 			wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type,
1899*4882a593Smuzhiyun 						      is_ipv4, tcp_hdr_len,
1900*4882a593Smuzhiyun 						      skb_net_hdr_len);
1901*4882a593Smuzhiyun 
1902*4882a593Smuzhiyun 			/* use tso_type_first only once */
1903*4882a593Smuzhiyun 			desc_tso_type = wil_tso_type_mid;
1904*4882a593Smuzhiyun 
1905*4882a593Smuzhiyun 			descs_used++;  /* desc used so far */
1906*4882a593Smuzhiyun 			sg_desc_cnt++; /* desc used for this segment */
1907*4882a593Smuzhiyun 			len -= lenmss;
1908*4882a593Smuzhiyun 			rem_data -= lenmss;
1909*4882a593Smuzhiyun 
1910*4882a593Smuzhiyun 			wil_dbg_txrx(wil,
1911*4882a593Smuzhiyun 				     "TSO: len %d, rem_data %d, descs_used %d, sg_desc_cnt %d,\n",
1912*4882a593Smuzhiyun 				     len, rem_data, descs_used, sg_desc_cnt);
1913*4882a593Smuzhiyun 
1914*4882a593Smuzhiyun 			/* Close the segment if reached mss size or last frag*/
1915*4882a593Smuzhiyun 			if (rem_data == 0 || (f == nr_frags - 1 && len == 0)) {
1916*4882a593Smuzhiyun 				if (hdr_compensation_need) {
1917*4882a593Smuzhiyun 					/* first segment include hdr desc for
1918*4882a593Smuzhiyun 					 * release
1919*4882a593Smuzhiyun 					 */
1920*4882a593Smuzhiyun 					hdr_ctx->nr_frags = sg_desc_cnt;
1921*4882a593Smuzhiyun 					wil_tx_desc_set_nr_frags(first_desc,
1922*4882a593Smuzhiyun 								 sg_desc_cnt +
1923*4882a593Smuzhiyun 								 1);
1924*4882a593Smuzhiyun 					hdr_compensation_need = false;
1925*4882a593Smuzhiyun 				} else {
1926*4882a593Smuzhiyun 					wil_tx_desc_set_nr_frags(first_desc,
1927*4882a593Smuzhiyun 								 sg_desc_cnt);
1928*4882a593Smuzhiyun 				}
1929*4882a593Smuzhiyun 				first_ctx->nr_frags = sg_desc_cnt - 1;
1930*4882a593Smuzhiyun 
1931*4882a593Smuzhiyun 				wil_tx_last_desc(d);
1932*4882a593Smuzhiyun 
1933*4882a593Smuzhiyun 				/* first descriptor may also be the last
1934*4882a593Smuzhiyun 				 * for this mss - make sure not to copy
1935*4882a593Smuzhiyun 				 * it twice
1936*4882a593Smuzhiyun 				 */
1937*4882a593Smuzhiyun 				if (first_desc != d)
1938*4882a593Smuzhiyun 					*_first_desc = *first_desc;
1939*4882a593Smuzhiyun 
1940*4882a593Smuzhiyun 				/*last descriptor will be copied at the end
1941*4882a593Smuzhiyun 				 * of this TS processing
1942*4882a593Smuzhiyun 				 */
1943*4882a593Smuzhiyun 				if (f < nr_frags - 1 || len > 0)
1944*4882a593Smuzhiyun 					*_desc = *d;
1945*4882a593Smuzhiyun 
1946*4882a593Smuzhiyun 				rem_data = mss;
1947*4882a593Smuzhiyun 				_first_desc = NULL;
1948*4882a593Smuzhiyun 				sg_desc_cnt = 0;
1949*4882a593Smuzhiyun 			} else if (first_desc != d) /* update mid descriptor */
1950*4882a593Smuzhiyun 					*_desc = *d;
1951*4882a593Smuzhiyun 		}
1952*4882a593Smuzhiyun 	}
1953*4882a593Smuzhiyun 
1954*4882a593Smuzhiyun 	if (!_desc)
1955*4882a593Smuzhiyun 		goto mem_error;
1956*4882a593Smuzhiyun 
1957*4882a593Smuzhiyun 	/* first descriptor may also be the last.
1958*4882a593Smuzhiyun 	 * in this case d pointer is invalid
1959*4882a593Smuzhiyun 	 */
1960*4882a593Smuzhiyun 	if (_first_desc == _desc)
1961*4882a593Smuzhiyun 		d = first_desc;
1962*4882a593Smuzhiyun 
1963*4882a593Smuzhiyun 	/* Last data descriptor */
1964*4882a593Smuzhiyun 	wil_set_tx_desc_last_tso(d);
1965*4882a593Smuzhiyun 	*_desc = *d;
1966*4882a593Smuzhiyun 
1967*4882a593Smuzhiyun 	/* Fill the total number of descriptors in first desc (hdr)*/
1968*4882a593Smuzhiyun 	wil_tx_desc_set_nr_frags(hdr_desc, descs_used);
1969*4882a593Smuzhiyun 	*_hdr_desc = *hdr_desc;
1970*4882a593Smuzhiyun 
1971*4882a593Smuzhiyun 	/* hold reference to skb
1972*4882a593Smuzhiyun 	 * to prevent skb release before accounting
1973*4882a593Smuzhiyun 	 * in case of immediate "tx done"
1974*4882a593Smuzhiyun 	 */
1975*4882a593Smuzhiyun 	vring->ctx[i].skb = skb_get(skb);
1976*4882a593Smuzhiyun 
1977*4882a593Smuzhiyun 	/* performance monitoring */
1978*4882a593Smuzhiyun 	used = wil_ring_used_tx(vring);
1979*4882a593Smuzhiyun 	if (wil_val_in_range(wil->ring_idle_trsh,
1980*4882a593Smuzhiyun 			     used, used + descs_used)) {
1981*4882a593Smuzhiyun 		txdata->idle += get_cycles() - txdata->last_idle;
1982*4882a593Smuzhiyun 		wil_dbg_txrx(wil,  "Ring[%2d] not idle %d -> %d\n",
1983*4882a593Smuzhiyun 			     vring_index, used, used + descs_used);
1984*4882a593Smuzhiyun 	}
1985*4882a593Smuzhiyun 
1986*4882a593Smuzhiyun 	/* Make sure to advance the head only after descriptor update is done.
1987*4882a593Smuzhiyun 	 * This will prevent a race condition where the completion thread
1988*4882a593Smuzhiyun 	 * will see the DU bit set from previous run and will handle the
1989*4882a593Smuzhiyun 	 * skb before it was completed.
1990*4882a593Smuzhiyun 	 */
1991*4882a593Smuzhiyun 	wmb();
1992*4882a593Smuzhiyun 
1993*4882a593Smuzhiyun 	/* advance swhead */
1994*4882a593Smuzhiyun 	wil_ring_advance_head(vring, descs_used);
1995*4882a593Smuzhiyun 	wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
1996*4882a593Smuzhiyun 
1997*4882a593Smuzhiyun 	/* make sure all writes to descriptors (shared memory) are done before
1998*4882a593Smuzhiyun 	 * committing them to HW
1999*4882a593Smuzhiyun 	 */
2000*4882a593Smuzhiyun 	wmb();
2001*4882a593Smuzhiyun 
2002*4882a593Smuzhiyun 	if (wil->tx_latency)
2003*4882a593Smuzhiyun 		*(ktime_t *)&skb->cb = ktime_get();
2004*4882a593Smuzhiyun 	else
2005*4882a593Smuzhiyun 		memset(skb->cb, 0, sizeof(ktime_t));
2006*4882a593Smuzhiyun 
2007*4882a593Smuzhiyun 	wil_w(wil, vring->hwtail, vring->swhead);
2008*4882a593Smuzhiyun 	return 0;
2009*4882a593Smuzhiyun 
2010*4882a593Smuzhiyun mem_error:
2011*4882a593Smuzhiyun 	while (descs_used > 0) {
2012*4882a593Smuzhiyun 		struct wil_ctx *ctx;
2013*4882a593Smuzhiyun 
2014*4882a593Smuzhiyun 		i = (swhead + descs_used - 1) % vring->size;
2015*4882a593Smuzhiyun 		d = (struct vring_tx_desc *)&vring->va[i].tx.legacy;
2016*4882a593Smuzhiyun 		_desc = &vring->va[i].tx.legacy;
2017*4882a593Smuzhiyun 		*d = *_desc;
2018*4882a593Smuzhiyun 		_desc->dma.status = TX_DMA_STATUS_DU;
2019*4882a593Smuzhiyun 		ctx = &vring->ctx[i];
2020*4882a593Smuzhiyun 		wil_txdesc_unmap(dev, (union wil_tx_desc *)d, ctx);
2021*4882a593Smuzhiyun 		memset(ctx, 0, sizeof(*ctx));
2022*4882a593Smuzhiyun 		descs_used--;
2023*4882a593Smuzhiyun 	}
2024*4882a593Smuzhiyun err_exit:
2025*4882a593Smuzhiyun 	return rc;
2026*4882a593Smuzhiyun }
2027*4882a593Smuzhiyun 
__wil_tx_ring(struct wil6210_priv * wil,struct wil6210_vif * vif,struct wil_ring * ring,struct sk_buff * skb)2028*4882a593Smuzhiyun static int __wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
2029*4882a593Smuzhiyun 			 struct wil_ring *ring, struct sk_buff *skb)
2030*4882a593Smuzhiyun {
2031*4882a593Smuzhiyun 	struct device *dev = wil_to_dev(wil);
2032*4882a593Smuzhiyun 	struct vring_tx_desc dd, *d = &dd;
2033*4882a593Smuzhiyun 	volatile struct vring_tx_desc *_d;
2034*4882a593Smuzhiyun 	u32 swhead = ring->swhead;
2035*4882a593Smuzhiyun 	int avail = wil_ring_avail_tx(ring);
2036*4882a593Smuzhiyun 	int nr_frags = skb_shinfo(skb)->nr_frags;
2037*4882a593Smuzhiyun 	uint f = 0;
2038*4882a593Smuzhiyun 	int ring_index = ring - wil->ring_tx;
2039*4882a593Smuzhiyun 	struct wil_ring_tx_data  *txdata = &wil->ring_tx_data[ring_index];
2040*4882a593Smuzhiyun 	uint i = swhead;
2041*4882a593Smuzhiyun 	dma_addr_t pa;
2042*4882a593Smuzhiyun 	int used;
2043*4882a593Smuzhiyun 	bool mcast = (ring_index == vif->bcast_ring);
2044*4882a593Smuzhiyun 	uint len = skb_headlen(skb);
2045*4882a593Smuzhiyun 
2046*4882a593Smuzhiyun 	wil_dbg_txrx(wil, "tx_ring: %d bytes to ring %d, nr_frags %d\n",
2047*4882a593Smuzhiyun 		     skb->len, ring_index, nr_frags);
2048*4882a593Smuzhiyun 
2049*4882a593Smuzhiyun 	if (unlikely(!txdata->enabled))
2050*4882a593Smuzhiyun 		return -EINVAL;
2051*4882a593Smuzhiyun 
2052*4882a593Smuzhiyun 	if (unlikely(avail < 1 + nr_frags)) {
2053*4882a593Smuzhiyun 		wil_err_ratelimited(wil,
2054*4882a593Smuzhiyun 				    "Tx ring[%2d] full. No space for %d fragments\n",
2055*4882a593Smuzhiyun 				    ring_index, 1 + nr_frags);
2056*4882a593Smuzhiyun 		return -ENOMEM;
2057*4882a593Smuzhiyun 	}
2058*4882a593Smuzhiyun 	_d = &ring->va[i].tx.legacy;
2059*4882a593Smuzhiyun 
2060*4882a593Smuzhiyun 	pa = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
2061*4882a593Smuzhiyun 
2062*4882a593Smuzhiyun 	wil_dbg_txrx(wil, "Tx[%2d] skb %d bytes 0x%p -> %pad\n", ring_index,
2063*4882a593Smuzhiyun 		     skb_headlen(skb), skb->data, &pa);
2064*4882a593Smuzhiyun 	wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
2065*4882a593Smuzhiyun 			  skb->data, skb_headlen(skb), false);
2066*4882a593Smuzhiyun 
2067*4882a593Smuzhiyun 	if (unlikely(dma_mapping_error(dev, pa)))
2068*4882a593Smuzhiyun 		return -EINVAL;
2069*4882a593Smuzhiyun 	ring->ctx[i].mapped_as = wil_mapped_as_single;
2070*4882a593Smuzhiyun 	/* 1-st segment */
2071*4882a593Smuzhiyun 	wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d, pa, len,
2072*4882a593Smuzhiyun 				   ring_index);
2073*4882a593Smuzhiyun 	if (unlikely(mcast)) {
2074*4882a593Smuzhiyun 		d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */
2075*4882a593Smuzhiyun 		if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) /* set MCS 1 */
2076*4882a593Smuzhiyun 			d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS);
2077*4882a593Smuzhiyun 	}
2078*4882a593Smuzhiyun 	/* Process TCP/UDP checksum offloading */
2079*4882a593Smuzhiyun 	if (unlikely(wil_tx_desc_offload_setup(d, skb))) {
2080*4882a593Smuzhiyun 		wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
2081*4882a593Smuzhiyun 			ring_index);
2082*4882a593Smuzhiyun 		goto dma_error;
2083*4882a593Smuzhiyun 	}
2084*4882a593Smuzhiyun 
2085*4882a593Smuzhiyun 	ring->ctx[i].nr_frags = nr_frags;
2086*4882a593Smuzhiyun 	wil_tx_desc_set_nr_frags(d, nr_frags + 1);
2087*4882a593Smuzhiyun 
2088*4882a593Smuzhiyun 	/* middle segments */
2089*4882a593Smuzhiyun 	for (; f < nr_frags; f++) {
2090*4882a593Smuzhiyun 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
2091*4882a593Smuzhiyun 		int len = skb_frag_size(frag);
2092*4882a593Smuzhiyun 
2093*4882a593Smuzhiyun 		*_d = *d;
2094*4882a593Smuzhiyun 		wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", ring_index, i);
2095*4882a593Smuzhiyun 		wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
2096*4882a593Smuzhiyun 				  (const void *)d, sizeof(*d), false);
2097*4882a593Smuzhiyun 		i = (swhead + f + 1) % ring->size;
2098*4882a593Smuzhiyun 		_d = &ring->va[i].tx.legacy;
2099*4882a593Smuzhiyun 		pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
2100*4882a593Smuzhiyun 				      DMA_TO_DEVICE);
2101*4882a593Smuzhiyun 		if (unlikely(dma_mapping_error(dev, pa))) {
2102*4882a593Smuzhiyun 			wil_err(wil, "Tx[%2d] failed to map fragment\n",
2103*4882a593Smuzhiyun 				ring_index);
2104*4882a593Smuzhiyun 			goto dma_error;
2105*4882a593Smuzhiyun 		}
2106*4882a593Smuzhiyun 		ring->ctx[i].mapped_as = wil_mapped_as_page;
2107*4882a593Smuzhiyun 		wil->txrx_ops.tx_desc_map((union wil_tx_desc *)d,
2108*4882a593Smuzhiyun 					   pa, len, ring_index);
2109*4882a593Smuzhiyun 		/* no need to check return code -
2110*4882a593Smuzhiyun 		 * if it succeeded for 1-st descriptor,
2111*4882a593Smuzhiyun 		 * it will succeed here too
2112*4882a593Smuzhiyun 		 */
2113*4882a593Smuzhiyun 		wil_tx_desc_offload_setup(d, skb);
2114*4882a593Smuzhiyun 	}
2115*4882a593Smuzhiyun 	/* for the last seg only */
2116*4882a593Smuzhiyun 	d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
2117*4882a593Smuzhiyun 	d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS);
2118*4882a593Smuzhiyun 	d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
2119*4882a593Smuzhiyun 	*_d = *d;
2120*4882a593Smuzhiyun 	wil_dbg_txrx(wil, "Tx[%2d] desc[%4d]\n", ring_index, i);
2121*4882a593Smuzhiyun 	wil_hex_dump_txrx("TxD ", DUMP_PREFIX_NONE, 32, 4,
2122*4882a593Smuzhiyun 			  (const void *)d, sizeof(*d), false);
2123*4882a593Smuzhiyun 
2124*4882a593Smuzhiyun 	/* hold reference to skb
2125*4882a593Smuzhiyun 	 * to prevent skb release before accounting
2126*4882a593Smuzhiyun 	 * in case of immediate "tx done"
2127*4882a593Smuzhiyun 	 */
2128*4882a593Smuzhiyun 	ring->ctx[i].skb = skb_get(skb);
2129*4882a593Smuzhiyun 
2130*4882a593Smuzhiyun 	/* performance monitoring */
2131*4882a593Smuzhiyun 	used = wil_ring_used_tx(ring);
2132*4882a593Smuzhiyun 	if (wil_val_in_range(wil->ring_idle_trsh,
2133*4882a593Smuzhiyun 			     used, used + nr_frags + 1)) {
2134*4882a593Smuzhiyun 		txdata->idle += get_cycles() - txdata->last_idle;
2135*4882a593Smuzhiyun 		wil_dbg_txrx(wil,  "Ring[%2d] not idle %d -> %d\n",
2136*4882a593Smuzhiyun 			     ring_index, used, used + nr_frags + 1);
2137*4882a593Smuzhiyun 	}
2138*4882a593Smuzhiyun 
2139*4882a593Smuzhiyun 	/* Make sure to advance the head only after descriptor update is done.
2140*4882a593Smuzhiyun 	 * This will prevent a race condition where the completion thread
2141*4882a593Smuzhiyun 	 * will see the DU bit set from previous run and will handle the
2142*4882a593Smuzhiyun 	 * skb before it was completed.
2143*4882a593Smuzhiyun 	 */
2144*4882a593Smuzhiyun 	wmb();
2145*4882a593Smuzhiyun 
2146*4882a593Smuzhiyun 	/* advance swhead */
2147*4882a593Smuzhiyun 	wil_ring_advance_head(ring, nr_frags + 1);
2148*4882a593Smuzhiyun 	wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", ring_index, swhead,
2149*4882a593Smuzhiyun 		     ring->swhead);
2150*4882a593Smuzhiyun 	trace_wil6210_tx(ring_index, swhead, skb->len, nr_frags);
2151*4882a593Smuzhiyun 
2152*4882a593Smuzhiyun 	/* make sure all writes to descriptors (shared memory) are done before
2153*4882a593Smuzhiyun 	 * committing them to HW
2154*4882a593Smuzhiyun 	 */
2155*4882a593Smuzhiyun 	wmb();
2156*4882a593Smuzhiyun 
2157*4882a593Smuzhiyun 	if (wil->tx_latency)
2158*4882a593Smuzhiyun 		*(ktime_t *)&skb->cb = ktime_get();
2159*4882a593Smuzhiyun 	else
2160*4882a593Smuzhiyun 		memset(skb->cb, 0, sizeof(ktime_t));
2161*4882a593Smuzhiyun 
2162*4882a593Smuzhiyun 	wil_w(wil, ring->hwtail, ring->swhead);
2163*4882a593Smuzhiyun 
2164*4882a593Smuzhiyun 	return 0;
2165*4882a593Smuzhiyun  dma_error:
2166*4882a593Smuzhiyun 	/* unmap what we have mapped */
2167*4882a593Smuzhiyun 	nr_frags = f + 1; /* frags mapped + one for skb head */
2168*4882a593Smuzhiyun 	for (f = 0; f < nr_frags; f++) {
2169*4882a593Smuzhiyun 		struct wil_ctx *ctx;
2170*4882a593Smuzhiyun 
2171*4882a593Smuzhiyun 		i = (swhead + f) % ring->size;
2172*4882a593Smuzhiyun 		ctx = &ring->ctx[i];
2173*4882a593Smuzhiyun 		_d = &ring->va[i].tx.legacy;
2174*4882a593Smuzhiyun 		*d = *_d;
2175*4882a593Smuzhiyun 		_d->dma.status = TX_DMA_STATUS_DU;
2176*4882a593Smuzhiyun 		wil->txrx_ops.tx_desc_unmap(dev,
2177*4882a593Smuzhiyun 					    (union wil_tx_desc *)d,
2178*4882a593Smuzhiyun 					    ctx);
2179*4882a593Smuzhiyun 
2180*4882a593Smuzhiyun 		memset(ctx, 0, sizeof(*ctx));
2181*4882a593Smuzhiyun 	}
2182*4882a593Smuzhiyun 
2183*4882a593Smuzhiyun 	return -EINVAL;
2184*4882a593Smuzhiyun }
2185*4882a593Smuzhiyun 
wil_tx_ring(struct wil6210_priv * wil,struct wil6210_vif * vif,struct wil_ring * ring,struct sk_buff * skb)2186*4882a593Smuzhiyun static int wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
2187*4882a593Smuzhiyun 		       struct wil_ring *ring, struct sk_buff *skb)
2188*4882a593Smuzhiyun {
2189*4882a593Smuzhiyun 	int ring_index = ring - wil->ring_tx;
2190*4882a593Smuzhiyun 	struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ring_index];
2191*4882a593Smuzhiyun 	int rc;
2192*4882a593Smuzhiyun 
2193*4882a593Smuzhiyun 	spin_lock(&txdata->lock);
2194*4882a593Smuzhiyun 
2195*4882a593Smuzhiyun 	if (test_bit(wil_status_suspending, wil->status) ||
2196*4882a593Smuzhiyun 	    test_bit(wil_status_suspended, wil->status) ||
2197*4882a593Smuzhiyun 	    test_bit(wil_status_resuming, wil->status)) {
2198*4882a593Smuzhiyun 		wil_dbg_txrx(wil,
2199*4882a593Smuzhiyun 			     "suspend/resume in progress. drop packet\n");
2200*4882a593Smuzhiyun 		spin_unlock(&txdata->lock);
2201*4882a593Smuzhiyun 		return -EINVAL;
2202*4882a593Smuzhiyun 	}
2203*4882a593Smuzhiyun 
2204*4882a593Smuzhiyun 	rc = (skb_is_gso(skb) ? wil->txrx_ops.tx_ring_tso : __wil_tx_ring)
2205*4882a593Smuzhiyun 	     (wil, vif, ring, skb);
2206*4882a593Smuzhiyun 
2207*4882a593Smuzhiyun 	spin_unlock(&txdata->lock);
2208*4882a593Smuzhiyun 
2209*4882a593Smuzhiyun 	return rc;
2210*4882a593Smuzhiyun }
2211*4882a593Smuzhiyun 
2212*4882a593Smuzhiyun /* Check status of tx vrings and stop/wake net queues if needed
2213*4882a593Smuzhiyun  * It will start/stop net queues of a specific VIF net_device.
2214*4882a593Smuzhiyun  *
2215*4882a593Smuzhiyun  * This function does one of two checks:
2216*4882a593Smuzhiyun  * In case check_stop is true, will check if net queues need to be stopped. If
2217*4882a593Smuzhiyun  * the conditions for stopping are met, netif_tx_stop_all_queues() is called.
2218*4882a593Smuzhiyun  * In case check_stop is false, will check if net queues need to be waked. If
2219*4882a593Smuzhiyun  * the conditions for waking are met, netif_tx_wake_all_queues() is called.
2220*4882a593Smuzhiyun  * vring is the vring which is currently being modified by either adding
2221*4882a593Smuzhiyun  * descriptors (tx) into it or removing descriptors (tx complete) from it. Can
2222*4882a593Smuzhiyun  * be null when irrelevant (e.g. connect/disconnect events).
2223*4882a593Smuzhiyun  *
2224*4882a593Smuzhiyun  * The implementation is to stop net queues if modified vring has low
2225*4882a593Smuzhiyun  * descriptor availability. Wake if all vrings are not in low descriptor
2226*4882a593Smuzhiyun  * availability and modified vring has high descriptor availability.
2227*4882a593Smuzhiyun  */
__wil_update_net_queues(struct wil6210_priv * wil,struct wil6210_vif * vif,struct wil_ring * ring,bool check_stop)2228*4882a593Smuzhiyun static inline void __wil_update_net_queues(struct wil6210_priv *wil,
2229*4882a593Smuzhiyun 					   struct wil6210_vif *vif,
2230*4882a593Smuzhiyun 					   struct wil_ring *ring,
2231*4882a593Smuzhiyun 					   bool check_stop)
2232*4882a593Smuzhiyun {
2233*4882a593Smuzhiyun 	int i;
2234*4882a593Smuzhiyun 	int min_ring_id = wil_get_min_tx_ring_id(wil);
2235*4882a593Smuzhiyun 
2236*4882a593Smuzhiyun 	if (unlikely(!vif))
2237*4882a593Smuzhiyun 		return;
2238*4882a593Smuzhiyun 
2239*4882a593Smuzhiyun 	if (ring)
2240*4882a593Smuzhiyun 		wil_dbg_txrx(wil, "vring %d, mid %d, check_stop=%d, stopped=%d",
2241*4882a593Smuzhiyun 			     (int)(ring - wil->ring_tx), vif->mid, check_stop,
2242*4882a593Smuzhiyun 			     vif->net_queue_stopped);
2243*4882a593Smuzhiyun 	else
2244*4882a593Smuzhiyun 		wil_dbg_txrx(wil, "check_stop=%d, mid=%d, stopped=%d",
2245*4882a593Smuzhiyun 			     check_stop, vif->mid, vif->net_queue_stopped);
2246*4882a593Smuzhiyun 
2247*4882a593Smuzhiyun 	if (ring && drop_if_ring_full)
2248*4882a593Smuzhiyun 		/* no need to stop/wake net queues */
2249*4882a593Smuzhiyun 		return;
2250*4882a593Smuzhiyun 
2251*4882a593Smuzhiyun 	if (check_stop == vif->net_queue_stopped)
2252*4882a593Smuzhiyun 		/* net queues already in desired state */
2253*4882a593Smuzhiyun 		return;
2254*4882a593Smuzhiyun 
2255*4882a593Smuzhiyun 	if (check_stop) {
2256*4882a593Smuzhiyun 		if (!ring || unlikely(wil_ring_avail_low(ring))) {
2257*4882a593Smuzhiyun 			/* not enough room in the vring */
2258*4882a593Smuzhiyun 			netif_tx_stop_all_queues(vif_to_ndev(vif));
2259*4882a593Smuzhiyun 			vif->net_queue_stopped = true;
2260*4882a593Smuzhiyun 			wil_dbg_txrx(wil, "netif_tx_stop called\n");
2261*4882a593Smuzhiyun 		}
2262*4882a593Smuzhiyun 		return;
2263*4882a593Smuzhiyun 	}
2264*4882a593Smuzhiyun 
2265*4882a593Smuzhiyun 	/* Do not wake the queues in suspend flow */
2266*4882a593Smuzhiyun 	if (test_bit(wil_status_suspending, wil->status) ||
2267*4882a593Smuzhiyun 	    test_bit(wil_status_suspended, wil->status))
2268*4882a593Smuzhiyun 		return;
2269*4882a593Smuzhiyun 
2270*4882a593Smuzhiyun 	/* check wake */
2271*4882a593Smuzhiyun 	for (i = min_ring_id; i < WIL6210_MAX_TX_RINGS; i++) {
2272*4882a593Smuzhiyun 		struct wil_ring *cur_ring = &wil->ring_tx[i];
2273*4882a593Smuzhiyun 		struct wil_ring_tx_data  *txdata = &wil->ring_tx_data[i];
2274*4882a593Smuzhiyun 
2275*4882a593Smuzhiyun 		if (txdata->mid != vif->mid || !cur_ring->va ||
2276*4882a593Smuzhiyun 		    !txdata->enabled || cur_ring == ring)
2277*4882a593Smuzhiyun 			continue;
2278*4882a593Smuzhiyun 
2279*4882a593Smuzhiyun 		if (wil_ring_avail_low(cur_ring)) {
2280*4882a593Smuzhiyun 			wil_dbg_txrx(wil, "ring %d full, can't wake\n",
2281*4882a593Smuzhiyun 				     (int)(cur_ring - wil->ring_tx));
2282*4882a593Smuzhiyun 			return;
2283*4882a593Smuzhiyun 		}
2284*4882a593Smuzhiyun 	}
2285*4882a593Smuzhiyun 
2286*4882a593Smuzhiyun 	if (!ring || wil_ring_avail_high(ring)) {
2287*4882a593Smuzhiyun 		/* enough room in the ring */
2288*4882a593Smuzhiyun 		wil_dbg_txrx(wil, "calling netif_tx_wake\n");
2289*4882a593Smuzhiyun 		netif_tx_wake_all_queues(vif_to_ndev(vif));
2290*4882a593Smuzhiyun 		vif->net_queue_stopped = false;
2291*4882a593Smuzhiyun 	}
2292*4882a593Smuzhiyun }
2293*4882a593Smuzhiyun 
wil_update_net_queues(struct wil6210_priv * wil,struct wil6210_vif * vif,struct wil_ring * ring,bool check_stop)2294*4882a593Smuzhiyun void wil_update_net_queues(struct wil6210_priv *wil, struct wil6210_vif *vif,
2295*4882a593Smuzhiyun 			   struct wil_ring *ring, bool check_stop)
2296*4882a593Smuzhiyun {
2297*4882a593Smuzhiyun 	spin_lock(&wil->net_queue_lock);
2298*4882a593Smuzhiyun 	__wil_update_net_queues(wil, vif, ring, check_stop);
2299*4882a593Smuzhiyun 	spin_unlock(&wil->net_queue_lock);
2300*4882a593Smuzhiyun }
2301*4882a593Smuzhiyun 
wil_update_net_queues_bh(struct wil6210_priv * wil,struct wil6210_vif * vif,struct wil_ring * ring,bool check_stop)2302*4882a593Smuzhiyun void wil_update_net_queues_bh(struct wil6210_priv *wil, struct wil6210_vif *vif,
2303*4882a593Smuzhiyun 			      struct wil_ring *ring, bool check_stop)
2304*4882a593Smuzhiyun {
2305*4882a593Smuzhiyun 	spin_lock_bh(&wil->net_queue_lock);
2306*4882a593Smuzhiyun 	__wil_update_net_queues(wil, vif, ring, check_stop);
2307*4882a593Smuzhiyun 	spin_unlock_bh(&wil->net_queue_lock);
2308*4882a593Smuzhiyun }
2309*4882a593Smuzhiyun 
wil_start_xmit(struct sk_buff * skb,struct net_device * ndev)2310*4882a593Smuzhiyun netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2311*4882a593Smuzhiyun {
2312*4882a593Smuzhiyun 	struct wil6210_vif *vif = ndev_to_vif(ndev);
2313*4882a593Smuzhiyun 	struct wil6210_priv *wil = vif_to_wil(vif);
2314*4882a593Smuzhiyun 	const u8 *da = wil_skb_get_da(skb);
2315*4882a593Smuzhiyun 	bool bcast = is_multicast_ether_addr(da);
2316*4882a593Smuzhiyun 	struct wil_ring *ring;
2317*4882a593Smuzhiyun 	static bool pr_once_fw;
2318*4882a593Smuzhiyun 	int rc;
2319*4882a593Smuzhiyun 
2320*4882a593Smuzhiyun 	wil_dbg_txrx(wil, "start_xmit\n");
2321*4882a593Smuzhiyun 	if (unlikely(!test_bit(wil_status_fwready, wil->status))) {
2322*4882a593Smuzhiyun 		if (!pr_once_fw) {
2323*4882a593Smuzhiyun 			wil_err(wil, "FW not ready\n");
2324*4882a593Smuzhiyun 			pr_once_fw = true;
2325*4882a593Smuzhiyun 		}
2326*4882a593Smuzhiyun 		goto drop;
2327*4882a593Smuzhiyun 	}
2328*4882a593Smuzhiyun 	if (unlikely(!test_bit(wil_vif_fwconnected, vif->status))) {
2329*4882a593Smuzhiyun 		wil_dbg_ratelimited(wil,
2330*4882a593Smuzhiyun 				    "VIF not connected, packet dropped\n");
2331*4882a593Smuzhiyun 		goto drop;
2332*4882a593Smuzhiyun 	}
2333*4882a593Smuzhiyun 	if (unlikely(vif->wdev.iftype == NL80211_IFTYPE_MONITOR)) {
2334*4882a593Smuzhiyun 		wil_err(wil, "Xmit in monitor mode not supported\n");
2335*4882a593Smuzhiyun 		goto drop;
2336*4882a593Smuzhiyun 	}
2337*4882a593Smuzhiyun 	pr_once_fw = false;
2338*4882a593Smuzhiyun 
2339*4882a593Smuzhiyun 	/* find vring */
2340*4882a593Smuzhiyun 	if (vif->wdev.iftype == NL80211_IFTYPE_STATION && !vif->pbss) {
2341*4882a593Smuzhiyun 		/* in STA mode (ESS), all to same VRING (to AP) */
2342*4882a593Smuzhiyun 		ring = wil_find_tx_ring_sta(wil, vif, skb);
2343*4882a593Smuzhiyun 	} else if (bcast) {
2344*4882a593Smuzhiyun 		if (vif->pbss || wil_check_multicast_to_unicast(wil, skb))
2345*4882a593Smuzhiyun 			/* in pbss, no bcast VRING - duplicate skb in
2346*4882a593Smuzhiyun 			 * all stations VRINGs
2347*4882a593Smuzhiyun 			 */
2348*4882a593Smuzhiyun 			ring = wil_find_tx_bcast_2(wil, vif, skb);
2349*4882a593Smuzhiyun 		else if (vif->wdev.iftype == NL80211_IFTYPE_AP)
2350*4882a593Smuzhiyun 			/* AP has a dedicated bcast VRING */
2351*4882a593Smuzhiyun 			ring = wil_find_tx_bcast_1(wil, vif, skb);
2352*4882a593Smuzhiyun 		else
2353*4882a593Smuzhiyun 			/* unexpected combination, fallback to duplicating
2354*4882a593Smuzhiyun 			 * the skb in all stations VRINGs
2355*4882a593Smuzhiyun 			 */
2356*4882a593Smuzhiyun 			ring = wil_find_tx_bcast_2(wil, vif, skb);
2357*4882a593Smuzhiyun 	} else {
2358*4882a593Smuzhiyun 		/* unicast, find specific VRING by dest. address */
2359*4882a593Smuzhiyun 		ring = wil_find_tx_ucast(wil, vif, skb);
2360*4882a593Smuzhiyun 	}
2361*4882a593Smuzhiyun 	if (unlikely(!ring)) {
2362*4882a593Smuzhiyun 		wil_dbg_txrx(wil, "No Tx RING found for %pM\n", da);
2363*4882a593Smuzhiyun 		goto drop;
2364*4882a593Smuzhiyun 	}
2365*4882a593Smuzhiyun 	/* set up vring entry */
2366*4882a593Smuzhiyun 	rc = wil_tx_ring(wil, vif, ring, skb);
2367*4882a593Smuzhiyun 
2368*4882a593Smuzhiyun 	switch (rc) {
2369*4882a593Smuzhiyun 	case 0:
2370*4882a593Smuzhiyun 		/* shall we stop net queues? */
2371*4882a593Smuzhiyun 		wil_update_net_queues_bh(wil, vif, ring, true);
2372*4882a593Smuzhiyun 		/* statistics will be updated on the tx_complete */
2373*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
2374*4882a593Smuzhiyun 		return NETDEV_TX_OK;
2375*4882a593Smuzhiyun 	case -ENOMEM:
2376*4882a593Smuzhiyun 		if (drop_if_ring_full)
2377*4882a593Smuzhiyun 			goto drop;
2378*4882a593Smuzhiyun 		return NETDEV_TX_BUSY;
2379*4882a593Smuzhiyun 	default:
2380*4882a593Smuzhiyun 		break; /* goto drop; */
2381*4882a593Smuzhiyun 	}
2382*4882a593Smuzhiyun  drop:
2383*4882a593Smuzhiyun 	ndev->stats.tx_dropped++;
2384*4882a593Smuzhiyun 	dev_kfree_skb_any(skb);
2385*4882a593Smuzhiyun 
2386*4882a593Smuzhiyun 	return NET_XMIT_DROP;
2387*4882a593Smuzhiyun }
2388*4882a593Smuzhiyun 
wil_tx_latency_calc(struct wil6210_priv * wil,struct sk_buff * skb,struct wil_sta_info * sta)2389*4882a593Smuzhiyun void wil_tx_latency_calc(struct wil6210_priv *wil, struct sk_buff *skb,
2390*4882a593Smuzhiyun 			 struct wil_sta_info *sta)
2391*4882a593Smuzhiyun {
2392*4882a593Smuzhiyun 	int skb_time_us;
2393*4882a593Smuzhiyun 	int bin;
2394*4882a593Smuzhiyun 
2395*4882a593Smuzhiyun 	if (!wil->tx_latency)
2396*4882a593Smuzhiyun 		return;
2397*4882a593Smuzhiyun 
2398*4882a593Smuzhiyun 	if (ktime_to_ms(*(ktime_t *)&skb->cb) == 0)
2399*4882a593Smuzhiyun 		return;
2400*4882a593Smuzhiyun 
2401*4882a593Smuzhiyun 	skb_time_us = ktime_us_delta(ktime_get(), *(ktime_t *)&skb->cb);
2402*4882a593Smuzhiyun 	bin = skb_time_us / wil->tx_latency_res;
2403*4882a593Smuzhiyun 	bin = min_t(int, bin, WIL_NUM_LATENCY_BINS - 1);
2404*4882a593Smuzhiyun 
2405*4882a593Smuzhiyun 	wil_dbg_txrx(wil, "skb time %dus => bin %d\n", skb_time_us, bin);
2406*4882a593Smuzhiyun 	sta->tx_latency_bins[bin]++;
2407*4882a593Smuzhiyun 	sta->stats.tx_latency_total_us += skb_time_us;
2408*4882a593Smuzhiyun 	if (skb_time_us < sta->stats.tx_latency_min_us)
2409*4882a593Smuzhiyun 		sta->stats.tx_latency_min_us = skb_time_us;
2410*4882a593Smuzhiyun 	if (skb_time_us > sta->stats.tx_latency_max_us)
2411*4882a593Smuzhiyun 		sta->stats.tx_latency_max_us = skb_time_us;
2412*4882a593Smuzhiyun }
2413*4882a593Smuzhiyun 
2414*4882a593Smuzhiyun /* Clean up transmitted skb's from the Tx VRING
2415*4882a593Smuzhiyun  *
2416*4882a593Smuzhiyun  * Return number of descriptors cleared
2417*4882a593Smuzhiyun  *
2418*4882a593Smuzhiyun  * Safe to call from IRQ
2419*4882a593Smuzhiyun  */
wil_tx_complete(struct wil6210_vif * vif,int ringid)2420*4882a593Smuzhiyun int wil_tx_complete(struct wil6210_vif *vif, int ringid)
2421*4882a593Smuzhiyun {
2422*4882a593Smuzhiyun 	struct wil6210_priv *wil = vif_to_wil(vif);
2423*4882a593Smuzhiyun 	struct net_device *ndev = vif_to_ndev(vif);
2424*4882a593Smuzhiyun 	struct device *dev = wil_to_dev(wil);
2425*4882a593Smuzhiyun 	struct wil_ring *vring = &wil->ring_tx[ringid];
2426*4882a593Smuzhiyun 	struct wil_ring_tx_data *txdata = &wil->ring_tx_data[ringid];
2427*4882a593Smuzhiyun 	int done = 0;
2428*4882a593Smuzhiyun 	int cid = wil->ring2cid_tid[ringid][0];
2429*4882a593Smuzhiyun 	struct wil_net_stats *stats = NULL;
2430*4882a593Smuzhiyun 	volatile struct vring_tx_desc *_d;
2431*4882a593Smuzhiyun 	int used_before_complete;
2432*4882a593Smuzhiyun 	int used_new;
2433*4882a593Smuzhiyun 
2434*4882a593Smuzhiyun 	if (unlikely(!vring->va)) {
2435*4882a593Smuzhiyun 		wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
2436*4882a593Smuzhiyun 		return 0;
2437*4882a593Smuzhiyun 	}
2438*4882a593Smuzhiyun 
2439*4882a593Smuzhiyun 	if (unlikely(!txdata->enabled)) {
2440*4882a593Smuzhiyun 		wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid);
2441*4882a593Smuzhiyun 		return 0;
2442*4882a593Smuzhiyun 	}
2443*4882a593Smuzhiyun 
2444*4882a593Smuzhiyun 	wil_dbg_txrx(wil, "tx_complete: (%d)\n", ringid);
2445*4882a593Smuzhiyun 
2446*4882a593Smuzhiyun 	used_before_complete = wil_ring_used_tx(vring);
2447*4882a593Smuzhiyun 
2448*4882a593Smuzhiyun 	if (cid < wil->max_assoc_sta)
2449*4882a593Smuzhiyun 		stats = &wil->sta[cid].stats;
2450*4882a593Smuzhiyun 
2451*4882a593Smuzhiyun 	while (!wil_ring_is_empty(vring)) {
2452*4882a593Smuzhiyun 		int new_swtail;
2453*4882a593Smuzhiyun 		struct wil_ctx *ctx = &vring->ctx[vring->swtail];
2454*4882a593Smuzhiyun 		/* For the fragmented skb, HW will set DU bit only for the
2455*4882a593Smuzhiyun 		 * last fragment. look for it.
2456*4882a593Smuzhiyun 		 * In TSO the first DU will include hdr desc
2457*4882a593Smuzhiyun 		 */
2458*4882a593Smuzhiyun 		int lf = (vring->swtail + ctx->nr_frags) % vring->size;
2459*4882a593Smuzhiyun 		/* TODO: check we are not past head */
2460*4882a593Smuzhiyun 
2461*4882a593Smuzhiyun 		_d = &vring->va[lf].tx.legacy;
2462*4882a593Smuzhiyun 		if (unlikely(!(_d->dma.status & TX_DMA_STATUS_DU)))
2463*4882a593Smuzhiyun 			break;
2464*4882a593Smuzhiyun 
2465*4882a593Smuzhiyun 		new_swtail = (lf + 1) % vring->size;
2466*4882a593Smuzhiyun 		while (vring->swtail != new_swtail) {
2467*4882a593Smuzhiyun 			struct vring_tx_desc dd, *d = &dd;
2468*4882a593Smuzhiyun 			u16 dmalen;
2469*4882a593Smuzhiyun 			struct sk_buff *skb;
2470*4882a593Smuzhiyun 
2471*4882a593Smuzhiyun 			ctx = &vring->ctx[vring->swtail];
2472*4882a593Smuzhiyun 			skb = ctx->skb;
2473*4882a593Smuzhiyun 			_d = &vring->va[vring->swtail].tx.legacy;
2474*4882a593Smuzhiyun 
2475*4882a593Smuzhiyun 			*d = *_d;
2476*4882a593Smuzhiyun 
2477*4882a593Smuzhiyun 			dmalen = le16_to_cpu(d->dma.length);
2478*4882a593Smuzhiyun 			trace_wil6210_tx_done(ringid, vring->swtail, dmalen,
2479*4882a593Smuzhiyun 					      d->dma.error);
2480*4882a593Smuzhiyun 			wil_dbg_txrx(wil,
2481*4882a593Smuzhiyun 				     "TxC[%2d][%3d] : %d bytes, status 0x%02x err 0x%02x\n",
2482*4882a593Smuzhiyun 				     ringid, vring->swtail, dmalen,
2483*4882a593Smuzhiyun 				     d->dma.status, d->dma.error);
2484*4882a593Smuzhiyun 			wil_hex_dump_txrx("TxCD ", DUMP_PREFIX_NONE, 32, 4,
2485*4882a593Smuzhiyun 					  (const void *)d, sizeof(*d), false);
2486*4882a593Smuzhiyun 
2487*4882a593Smuzhiyun 			wil->txrx_ops.tx_desc_unmap(dev,
2488*4882a593Smuzhiyun 						    (union wil_tx_desc *)d,
2489*4882a593Smuzhiyun 						    ctx);
2490*4882a593Smuzhiyun 
2491*4882a593Smuzhiyun 			if (skb) {
2492*4882a593Smuzhiyun 				if (likely(d->dma.error == 0)) {
2493*4882a593Smuzhiyun 					ndev->stats.tx_packets++;
2494*4882a593Smuzhiyun 					ndev->stats.tx_bytes += skb->len;
2495*4882a593Smuzhiyun 					if (stats) {
2496*4882a593Smuzhiyun 						stats->tx_packets++;
2497*4882a593Smuzhiyun 						stats->tx_bytes += skb->len;
2498*4882a593Smuzhiyun 
2499*4882a593Smuzhiyun 						wil_tx_latency_calc(wil, skb,
2500*4882a593Smuzhiyun 							&wil->sta[cid]);
2501*4882a593Smuzhiyun 					}
2502*4882a593Smuzhiyun 				} else {
2503*4882a593Smuzhiyun 					ndev->stats.tx_errors++;
2504*4882a593Smuzhiyun 					if (stats)
2505*4882a593Smuzhiyun 						stats->tx_errors++;
2506*4882a593Smuzhiyun 				}
2507*4882a593Smuzhiyun 
2508*4882a593Smuzhiyun 				if (skb->protocol == cpu_to_be16(ETH_P_PAE))
2509*4882a593Smuzhiyun 					wil_tx_complete_handle_eapol(vif, skb);
2510*4882a593Smuzhiyun 
2511*4882a593Smuzhiyun 				wil_consume_skb(skb, d->dma.error == 0);
2512*4882a593Smuzhiyun 			}
2513*4882a593Smuzhiyun 			memset(ctx, 0, sizeof(*ctx));
2514*4882a593Smuzhiyun 			/* Make sure the ctx is zeroed before updating the tail
2515*4882a593Smuzhiyun 			 * to prevent a case where wil_tx_ring will see
2516*4882a593Smuzhiyun 			 * this descriptor as used and handle it before ctx zero
2517*4882a593Smuzhiyun 			 * is completed.
2518*4882a593Smuzhiyun 			 */
2519*4882a593Smuzhiyun 			wmb();
2520*4882a593Smuzhiyun 			/* There is no need to touch HW descriptor:
2521*4882a593Smuzhiyun 			 * - ststus bit TX_DMA_STATUS_DU is set by design,
2522*4882a593Smuzhiyun 			 *   so hardware will not try to process this desc.,
2523*4882a593Smuzhiyun 			 * - rest of descriptor will be initialized on Tx.
2524*4882a593Smuzhiyun 			 */
2525*4882a593Smuzhiyun 			vring->swtail = wil_ring_next_tail(vring);
2526*4882a593Smuzhiyun 			done++;
2527*4882a593Smuzhiyun 		}
2528*4882a593Smuzhiyun 	}
2529*4882a593Smuzhiyun 
2530*4882a593Smuzhiyun 	/* performance monitoring */
2531*4882a593Smuzhiyun 	used_new = wil_ring_used_tx(vring);
2532*4882a593Smuzhiyun 	if (wil_val_in_range(wil->ring_idle_trsh,
2533*4882a593Smuzhiyun 			     used_new, used_before_complete)) {
2534*4882a593Smuzhiyun 		wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n",
2535*4882a593Smuzhiyun 			     ringid, used_before_complete, used_new);
2536*4882a593Smuzhiyun 		txdata->last_idle = get_cycles();
2537*4882a593Smuzhiyun 	}
2538*4882a593Smuzhiyun 
2539*4882a593Smuzhiyun 	/* shall we wake net queues? */
2540*4882a593Smuzhiyun 	if (done)
2541*4882a593Smuzhiyun 		wil_update_net_queues(wil, vif, vring, false);
2542*4882a593Smuzhiyun 
2543*4882a593Smuzhiyun 	return done;
2544*4882a593Smuzhiyun }
2545*4882a593Smuzhiyun 
wil_tx_init(struct wil6210_priv * wil)2546*4882a593Smuzhiyun static inline int wil_tx_init(struct wil6210_priv *wil)
2547*4882a593Smuzhiyun {
2548*4882a593Smuzhiyun 	return 0;
2549*4882a593Smuzhiyun }
2550*4882a593Smuzhiyun 
wil_tx_fini(struct wil6210_priv * wil)2551*4882a593Smuzhiyun static inline void wil_tx_fini(struct wil6210_priv *wil) {}
2552*4882a593Smuzhiyun 
wil_get_reorder_params(struct wil6210_priv * wil,struct sk_buff * skb,int * tid,int * cid,int * mid,u16 * seq,int * mcast,int * retry)2553*4882a593Smuzhiyun static void wil_get_reorder_params(struct wil6210_priv *wil,
2554*4882a593Smuzhiyun 				   struct sk_buff *skb, int *tid, int *cid,
2555*4882a593Smuzhiyun 				   int *mid, u16 *seq, int *mcast, int *retry)
2556*4882a593Smuzhiyun {
2557*4882a593Smuzhiyun 	struct vring_rx_desc *d = wil_skb_rxdesc(skb);
2558*4882a593Smuzhiyun 
2559*4882a593Smuzhiyun 	*tid = wil_rxdesc_tid(d);
2560*4882a593Smuzhiyun 	*cid = wil_skb_get_cid(skb);
2561*4882a593Smuzhiyun 	*mid = wil_rxdesc_mid(d);
2562*4882a593Smuzhiyun 	*seq = wil_rxdesc_seq(d);
2563*4882a593Smuzhiyun 	*mcast = wil_rxdesc_mcast(d);
2564*4882a593Smuzhiyun 	*retry = wil_rxdesc_retry(d);
2565*4882a593Smuzhiyun }
2566*4882a593Smuzhiyun 
wil_init_txrx_ops_legacy_dma(struct wil6210_priv * wil)2567*4882a593Smuzhiyun void wil_init_txrx_ops_legacy_dma(struct wil6210_priv *wil)
2568*4882a593Smuzhiyun {
2569*4882a593Smuzhiyun 	wil->txrx_ops.configure_interrupt_moderation =
2570*4882a593Smuzhiyun 		wil_configure_interrupt_moderation;
2571*4882a593Smuzhiyun 	/* TX ops */
2572*4882a593Smuzhiyun 	wil->txrx_ops.tx_desc_map = wil_tx_desc_map;
2573*4882a593Smuzhiyun 	wil->txrx_ops.tx_desc_unmap = wil_txdesc_unmap;
2574*4882a593Smuzhiyun 	wil->txrx_ops.tx_ring_tso =  __wil_tx_vring_tso;
2575*4882a593Smuzhiyun 	wil->txrx_ops.ring_init_tx = wil_vring_init_tx;
2576*4882a593Smuzhiyun 	wil->txrx_ops.ring_fini_tx = wil_vring_free;
2577*4882a593Smuzhiyun 	wil->txrx_ops.ring_init_bcast = wil_vring_init_bcast;
2578*4882a593Smuzhiyun 	wil->txrx_ops.tx_init = wil_tx_init;
2579*4882a593Smuzhiyun 	wil->txrx_ops.tx_fini = wil_tx_fini;
2580*4882a593Smuzhiyun 	wil->txrx_ops.tx_ring_modify = wil_tx_vring_modify;
2581*4882a593Smuzhiyun 	/* RX ops */
2582*4882a593Smuzhiyun 	wil->txrx_ops.rx_init = wil_rx_init;
2583*4882a593Smuzhiyun 	wil->txrx_ops.wmi_addba_rx_resp = wmi_addba_rx_resp;
2584*4882a593Smuzhiyun 	wil->txrx_ops.get_reorder_params = wil_get_reorder_params;
2585*4882a593Smuzhiyun 	wil->txrx_ops.get_netif_rx_params =
2586*4882a593Smuzhiyun 		wil_get_netif_rx_params;
2587*4882a593Smuzhiyun 	wil->txrx_ops.rx_crypto_check = wil_rx_crypto_check;
2588*4882a593Smuzhiyun 	wil->txrx_ops.rx_error_check = wil_rx_error_check;
2589*4882a593Smuzhiyun 	wil->txrx_ops.is_rx_idle = wil_is_rx_idle;
2590*4882a593Smuzhiyun 	wil->txrx_ops.rx_fini = wil_rx_fini;
2591*4882a593Smuzhiyun }
2592