1*4882a593Smuzhiyun // SPDX-License-Identifier: ISC
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2005-2011 Atheros Communications Inc.
4*4882a593Smuzhiyun * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5*4882a593Smuzhiyun * Copyright (c) 2018, The Linux Foundation. All rights reserved.
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include "core.h"
9*4882a593Smuzhiyun #include "htc.h"
10*4882a593Smuzhiyun #include "htt.h"
11*4882a593Smuzhiyun #include "txrx.h"
12*4882a593Smuzhiyun #include "debug.h"
13*4882a593Smuzhiyun #include "trace.h"
14*4882a593Smuzhiyun #include "mac.h"
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include <linux/log2.h>
17*4882a593Smuzhiyun #include <linux/bitfield.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun /* when under memory pressure rx ring refill may fail and needs a retry */
20*4882a593Smuzhiyun #define HTT_RX_RING_REFILL_RETRY_MS 50
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #define HTT_RX_RING_REFILL_RESCHED_MS 5
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun static struct sk_buff *
ath10k_htt_rx_find_skb_paddr(struct ath10k * ar,u64 paddr)27*4882a593Smuzhiyun ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun struct ath10k_skb_rxcb *rxcb;
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
32*4882a593Smuzhiyun if (rxcb->paddr == paddr)
33*4882a593Smuzhiyun return ATH10K_RXCB_SKB(rxcb);
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun WARN_ON_ONCE(1);
36*4882a593Smuzhiyun return NULL;
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun
ath10k_htt_rx_ring_free(struct ath10k_htt * htt)39*4882a593Smuzhiyun static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun struct sk_buff *skb;
42*4882a593Smuzhiyun struct ath10k_skb_rxcb *rxcb;
43*4882a593Smuzhiyun struct hlist_node *n;
44*4882a593Smuzhiyun int i;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun if (htt->rx_ring.in_ord_rx) {
47*4882a593Smuzhiyun hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
48*4882a593Smuzhiyun skb = ATH10K_RXCB_SKB(rxcb);
49*4882a593Smuzhiyun dma_unmap_single(htt->ar->dev, rxcb->paddr,
50*4882a593Smuzhiyun skb->len + skb_tailroom(skb),
51*4882a593Smuzhiyun DMA_FROM_DEVICE);
52*4882a593Smuzhiyun hash_del(&rxcb->hlist);
53*4882a593Smuzhiyun dev_kfree_skb_any(skb);
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun } else {
56*4882a593Smuzhiyun for (i = 0; i < htt->rx_ring.size; i++) {
57*4882a593Smuzhiyun skb = htt->rx_ring.netbufs_ring[i];
58*4882a593Smuzhiyun if (!skb)
59*4882a593Smuzhiyun continue;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun rxcb = ATH10K_SKB_RXCB(skb);
62*4882a593Smuzhiyun dma_unmap_single(htt->ar->dev, rxcb->paddr,
63*4882a593Smuzhiyun skb->len + skb_tailroom(skb),
64*4882a593Smuzhiyun DMA_FROM_DEVICE);
65*4882a593Smuzhiyun dev_kfree_skb_any(skb);
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun htt->rx_ring.fill_cnt = 0;
70*4882a593Smuzhiyun hash_init(htt->rx_ring.skb_table);
71*4882a593Smuzhiyun memset(htt->rx_ring.netbufs_ring, 0,
72*4882a593Smuzhiyun htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun
ath10k_htt_get_rx_ring_size_32(struct ath10k_htt * htt)75*4882a593Smuzhiyun static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32);
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
ath10k_htt_get_rx_ring_size_64(struct ath10k_htt * htt)80*4882a593Smuzhiyun static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64);
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
ath10k_htt_config_paddrs_ring_32(struct ath10k_htt * htt,void * vaddr)85*4882a593Smuzhiyun static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt,
86*4882a593Smuzhiyun void *vaddr)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun htt->rx_ring.paddrs_ring_32 = vaddr;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
ath10k_htt_config_paddrs_ring_64(struct ath10k_htt * htt,void * vaddr)91*4882a593Smuzhiyun static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt,
92*4882a593Smuzhiyun void *vaddr)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun htt->rx_ring.paddrs_ring_64 = vaddr;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
ath10k_htt_set_paddrs_ring_32(struct ath10k_htt * htt,dma_addr_t paddr,int idx)97*4882a593Smuzhiyun static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt,
98*4882a593Smuzhiyun dma_addr_t paddr, int idx)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
ath10k_htt_set_paddrs_ring_64(struct ath10k_htt * htt,dma_addr_t paddr,int idx)103*4882a593Smuzhiyun static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt,
104*4882a593Smuzhiyun dma_addr_t paddr, int idx)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr);
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun
ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt * htt,int idx)109*4882a593Smuzhiyun static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun htt->rx_ring.paddrs_ring_32[idx] = 0;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt * htt,int idx)114*4882a593Smuzhiyun static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun htt->rx_ring.paddrs_ring_64[idx] = 0;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun
ath10k_htt_get_vaddr_ring_32(struct ath10k_htt * htt)119*4882a593Smuzhiyun static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun return (void *)htt->rx_ring.paddrs_ring_32;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
ath10k_htt_get_vaddr_ring_64(struct ath10k_htt * htt)124*4882a593Smuzhiyun static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun return (void *)htt->rx_ring.paddrs_ring_64;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
__ath10k_htt_rx_ring_fill_n(struct ath10k_htt * htt,int num)129*4882a593Smuzhiyun static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun struct htt_rx_desc *rx_desc;
132*4882a593Smuzhiyun struct ath10k_skb_rxcb *rxcb;
133*4882a593Smuzhiyun struct sk_buff *skb;
134*4882a593Smuzhiyun dma_addr_t paddr;
135*4882a593Smuzhiyun int ret = 0, idx;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun /* The Full Rx Reorder firmware has no way of telling the host
138*4882a593Smuzhiyun * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
139*4882a593Smuzhiyun * To keep things simple make sure ring is always half empty. This
140*4882a593Smuzhiyun * guarantees there'll be no replenishment overruns possible.
141*4882a593Smuzhiyun */
142*4882a593Smuzhiyun BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun if (idx < 0 || idx >= htt->rx_ring.size) {
147*4882a593Smuzhiyun ath10k_err(htt->ar, "rx ring index is not valid, firmware malfunctioning?\n");
148*4882a593Smuzhiyun idx &= htt->rx_ring.size_mask;
149*4882a593Smuzhiyun ret = -ENOMEM;
150*4882a593Smuzhiyun goto fail;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun while (num > 0) {
154*4882a593Smuzhiyun skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
155*4882a593Smuzhiyun if (!skb) {
156*4882a593Smuzhiyun ret = -ENOMEM;
157*4882a593Smuzhiyun goto fail;
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
161*4882a593Smuzhiyun skb_pull(skb,
162*4882a593Smuzhiyun PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
163*4882a593Smuzhiyun skb->data);
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun /* Clear rx_desc attention word before posting to Rx ring */
166*4882a593Smuzhiyun rx_desc = (struct htt_rx_desc *)skb->data;
167*4882a593Smuzhiyun rx_desc->attention.flags = __cpu_to_le32(0);
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun paddr = dma_map_single(htt->ar->dev, skb->data,
170*4882a593Smuzhiyun skb->len + skb_tailroom(skb),
171*4882a593Smuzhiyun DMA_FROM_DEVICE);
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
174*4882a593Smuzhiyun dev_kfree_skb_any(skb);
175*4882a593Smuzhiyun ret = -ENOMEM;
176*4882a593Smuzhiyun goto fail;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun rxcb = ATH10K_SKB_RXCB(skb);
180*4882a593Smuzhiyun rxcb->paddr = paddr;
181*4882a593Smuzhiyun htt->rx_ring.netbufs_ring[idx] = skb;
182*4882a593Smuzhiyun ath10k_htt_set_paddrs_ring(htt, paddr, idx);
183*4882a593Smuzhiyun htt->rx_ring.fill_cnt++;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun if (htt->rx_ring.in_ord_rx) {
186*4882a593Smuzhiyun hash_add(htt->rx_ring.skb_table,
187*4882a593Smuzhiyun &ATH10K_SKB_RXCB(skb)->hlist,
188*4882a593Smuzhiyun paddr);
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun num--;
192*4882a593Smuzhiyun idx++;
193*4882a593Smuzhiyun idx &= htt->rx_ring.size_mask;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun fail:
197*4882a593Smuzhiyun /*
198*4882a593Smuzhiyun * Make sure the rx buffer is updated before available buffer
199*4882a593Smuzhiyun * index to avoid any potential rx ring corruption.
200*4882a593Smuzhiyun */
201*4882a593Smuzhiyun mb();
202*4882a593Smuzhiyun *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
203*4882a593Smuzhiyun return ret;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
ath10k_htt_rx_ring_fill_n(struct ath10k_htt * htt,int num)206*4882a593Smuzhiyun static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun lockdep_assert_held(&htt->rx_ring.lock);
209*4882a593Smuzhiyun return __ath10k_htt_rx_ring_fill_n(htt, num);
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt * htt)212*4882a593Smuzhiyun static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun int ret, num_deficit, num_to_fill;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun /* Refilling the whole RX ring buffer proves to be a bad idea. The
217*4882a593Smuzhiyun * reason is RX may take up significant amount of CPU cycles and starve
218*4882a593Smuzhiyun * other tasks, e.g. TX on an ethernet device while acting as a bridge
219*4882a593Smuzhiyun * with ath10k wlan interface. This ended up with very poor performance
220*4882a593Smuzhiyun * once CPU the host system was overwhelmed with RX on ath10k.
221*4882a593Smuzhiyun *
222*4882a593Smuzhiyun * By limiting the number of refills the replenishing occurs
223*4882a593Smuzhiyun * progressively. This in turns makes use of the fact tasklets are
224*4882a593Smuzhiyun * processed in FIFO order. This means actual RX processing can starve
225*4882a593Smuzhiyun * out refilling. If there's not enough buffers on RX ring FW will not
226*4882a593Smuzhiyun * report RX until it is refilled with enough buffers. This
227*4882a593Smuzhiyun * automatically balances load wrt to CPU power.
228*4882a593Smuzhiyun *
229*4882a593Smuzhiyun * This probably comes at a cost of lower maximum throughput but
230*4882a593Smuzhiyun * improves the average and stability.
231*4882a593Smuzhiyun */
232*4882a593Smuzhiyun spin_lock_bh(&htt->rx_ring.lock);
233*4882a593Smuzhiyun num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
234*4882a593Smuzhiyun num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
235*4882a593Smuzhiyun num_deficit -= num_to_fill;
236*4882a593Smuzhiyun ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
237*4882a593Smuzhiyun if (ret == -ENOMEM) {
238*4882a593Smuzhiyun /*
239*4882a593Smuzhiyun * Failed to fill it to the desired level -
240*4882a593Smuzhiyun * we'll start a timer and try again next time.
241*4882a593Smuzhiyun * As long as enough buffers are left in the ring for
242*4882a593Smuzhiyun * another A-MPDU rx, no special recovery is needed.
243*4882a593Smuzhiyun */
244*4882a593Smuzhiyun mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
245*4882a593Smuzhiyun msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
246*4882a593Smuzhiyun } else if (num_deficit > 0) {
247*4882a593Smuzhiyun mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
248*4882a593Smuzhiyun msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun spin_unlock_bh(&htt->rx_ring.lock);
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
ath10k_htt_rx_ring_refill_retry(struct timer_list * t)253*4882a593Smuzhiyun static void ath10k_htt_rx_ring_refill_retry(struct timer_list *t)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun struct ath10k_htt *htt = from_timer(htt, t, rx_ring.refill_retry_timer);
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun ath10k_htt_rx_msdu_buff_replenish(htt);
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
ath10k_htt_rx_ring_refill(struct ath10k * ar)260*4882a593Smuzhiyun int ath10k_htt_rx_ring_refill(struct ath10k *ar)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun struct ath10k_htt *htt = &ar->htt;
263*4882a593Smuzhiyun int ret;
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
266*4882a593Smuzhiyun return 0;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun spin_lock_bh(&htt->rx_ring.lock);
269*4882a593Smuzhiyun ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
270*4882a593Smuzhiyun htt->rx_ring.fill_cnt));
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun if (ret)
273*4882a593Smuzhiyun ath10k_htt_rx_ring_free(htt);
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun spin_unlock_bh(&htt->rx_ring.lock);
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun return ret;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun
ath10k_htt_rx_free(struct ath10k_htt * htt)280*4882a593Smuzhiyun void ath10k_htt_rx_free(struct ath10k_htt *htt)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun if (htt->ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
283*4882a593Smuzhiyun return;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun del_timer_sync(&htt->rx_ring.refill_retry_timer);
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun skb_queue_purge(&htt->rx_msdus_q);
288*4882a593Smuzhiyun skb_queue_purge(&htt->rx_in_ord_compl_q);
289*4882a593Smuzhiyun skb_queue_purge(&htt->tx_fetch_ind_q);
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun spin_lock_bh(&htt->rx_ring.lock);
292*4882a593Smuzhiyun ath10k_htt_rx_ring_free(htt);
293*4882a593Smuzhiyun spin_unlock_bh(&htt->rx_ring.lock);
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun dma_free_coherent(htt->ar->dev,
296*4882a593Smuzhiyun ath10k_htt_get_rx_ring_size(htt),
297*4882a593Smuzhiyun ath10k_htt_get_vaddr_ring(htt),
298*4882a593Smuzhiyun htt->rx_ring.base_paddr);
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun dma_free_coherent(htt->ar->dev,
301*4882a593Smuzhiyun sizeof(*htt->rx_ring.alloc_idx.vaddr),
302*4882a593Smuzhiyun htt->rx_ring.alloc_idx.vaddr,
303*4882a593Smuzhiyun htt->rx_ring.alloc_idx.paddr);
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun kfree(htt->rx_ring.netbufs_ring);
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
ath10k_htt_rx_netbuf_pop(struct ath10k_htt * htt)308*4882a593Smuzhiyun static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun struct ath10k *ar = htt->ar;
311*4882a593Smuzhiyun int idx;
312*4882a593Smuzhiyun struct sk_buff *msdu;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun lockdep_assert_held(&htt->rx_ring.lock);
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun if (htt->rx_ring.fill_cnt == 0) {
317*4882a593Smuzhiyun ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
318*4882a593Smuzhiyun return NULL;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun idx = htt->rx_ring.sw_rd_idx.msdu_payld;
322*4882a593Smuzhiyun msdu = htt->rx_ring.netbufs_ring[idx];
323*4882a593Smuzhiyun htt->rx_ring.netbufs_ring[idx] = NULL;
324*4882a593Smuzhiyun ath10k_htt_reset_paddrs_ring(htt, idx);
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun idx++;
327*4882a593Smuzhiyun idx &= htt->rx_ring.size_mask;
328*4882a593Smuzhiyun htt->rx_ring.sw_rd_idx.msdu_payld = idx;
329*4882a593Smuzhiyun htt->rx_ring.fill_cnt--;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun dma_unmap_single(htt->ar->dev,
332*4882a593Smuzhiyun ATH10K_SKB_RXCB(msdu)->paddr,
333*4882a593Smuzhiyun msdu->len + skb_tailroom(msdu),
334*4882a593Smuzhiyun DMA_FROM_DEVICE);
335*4882a593Smuzhiyun ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
336*4882a593Smuzhiyun msdu->data, msdu->len + skb_tailroom(msdu));
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun return msdu;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
ath10k_htt_rx_amsdu_pop(struct ath10k_htt * htt,struct sk_buff_head * amsdu)342*4882a593Smuzhiyun static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
343*4882a593Smuzhiyun struct sk_buff_head *amsdu)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun struct ath10k *ar = htt->ar;
346*4882a593Smuzhiyun int msdu_len, msdu_chaining = 0;
347*4882a593Smuzhiyun struct sk_buff *msdu;
348*4882a593Smuzhiyun struct htt_rx_desc *rx_desc;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun lockdep_assert_held(&htt->rx_ring.lock);
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun for (;;) {
353*4882a593Smuzhiyun int last_msdu, msdu_len_invalid, msdu_chained;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun msdu = ath10k_htt_rx_netbuf_pop(htt);
356*4882a593Smuzhiyun if (!msdu) {
357*4882a593Smuzhiyun __skb_queue_purge(amsdu);
358*4882a593Smuzhiyun return -ENOENT;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun __skb_queue_tail(amsdu, msdu);
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun rx_desc = (struct htt_rx_desc *)msdu->data;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun /* FIXME: we must report msdu payload since this is what caller
366*4882a593Smuzhiyun * expects now
367*4882a593Smuzhiyun */
368*4882a593Smuzhiyun skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
369*4882a593Smuzhiyun skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun /*
372*4882a593Smuzhiyun * Sanity check - confirm the HW is finished filling in the
373*4882a593Smuzhiyun * rx data.
374*4882a593Smuzhiyun * If the HW and SW are working correctly, then it's guaranteed
375*4882a593Smuzhiyun * that the HW's MAC DMA is done before this point in the SW.
376*4882a593Smuzhiyun * To prevent the case that we handle a stale Rx descriptor,
377*4882a593Smuzhiyun * just assert for now until we have a way to recover.
378*4882a593Smuzhiyun */
379*4882a593Smuzhiyun if (!(__le32_to_cpu(rx_desc->attention.flags)
380*4882a593Smuzhiyun & RX_ATTENTION_FLAGS_MSDU_DONE)) {
381*4882a593Smuzhiyun __skb_queue_purge(amsdu);
382*4882a593Smuzhiyun return -EIO;
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
386*4882a593Smuzhiyun & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
387*4882a593Smuzhiyun RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
388*4882a593Smuzhiyun msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
389*4882a593Smuzhiyun RX_MSDU_START_INFO0_MSDU_LENGTH);
390*4882a593Smuzhiyun msdu_chained = rx_desc->frag_info.ring2_more_count;
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun if (msdu_len_invalid)
393*4882a593Smuzhiyun msdu_len = 0;
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun skb_trim(msdu, 0);
396*4882a593Smuzhiyun skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
397*4882a593Smuzhiyun msdu_len -= msdu->len;
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun /* Note: Chained buffers do not contain rx descriptor */
400*4882a593Smuzhiyun while (msdu_chained--) {
401*4882a593Smuzhiyun msdu = ath10k_htt_rx_netbuf_pop(htt);
402*4882a593Smuzhiyun if (!msdu) {
403*4882a593Smuzhiyun __skb_queue_purge(amsdu);
404*4882a593Smuzhiyun return -ENOENT;
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun __skb_queue_tail(amsdu, msdu);
408*4882a593Smuzhiyun skb_trim(msdu, 0);
409*4882a593Smuzhiyun skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
410*4882a593Smuzhiyun msdu_len -= msdu->len;
411*4882a593Smuzhiyun msdu_chaining = 1;
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
415*4882a593Smuzhiyun RX_MSDU_END_INFO0_LAST_MSDU;
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
418*4882a593Smuzhiyun sizeof(*rx_desc) - sizeof(u32));
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun if (last_msdu)
421*4882a593Smuzhiyun break;
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun if (skb_queue_empty(amsdu))
425*4882a593Smuzhiyun msdu_chaining = -1;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun /*
428*4882a593Smuzhiyun * Don't refill the ring yet.
429*4882a593Smuzhiyun *
430*4882a593Smuzhiyun * First, the elements popped here are still in use - it is not
431*4882a593Smuzhiyun * safe to overwrite them until the matching call to
432*4882a593Smuzhiyun * mpdu_desc_list_next. Second, for efficiency it is preferable to
433*4882a593Smuzhiyun * refill the rx ring with 1 PPDU's worth of rx buffers (something
434*4882a593Smuzhiyun * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
435*4882a593Smuzhiyun * (something like 3 buffers). Consequently, we'll rely on the txrx
436*4882a593Smuzhiyun * SW to tell us when it is done pulling all the PPDU's rx buffers
437*4882a593Smuzhiyun * out of the rx ring, and then refill it just once.
438*4882a593Smuzhiyun */
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun return msdu_chaining;
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun
ath10k_htt_rx_pop_paddr(struct ath10k_htt * htt,u64 paddr)443*4882a593Smuzhiyun static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
444*4882a593Smuzhiyun u64 paddr)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun struct ath10k *ar = htt->ar;
447*4882a593Smuzhiyun struct ath10k_skb_rxcb *rxcb;
448*4882a593Smuzhiyun struct sk_buff *msdu;
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun lockdep_assert_held(&htt->rx_ring.lock);
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
453*4882a593Smuzhiyun if (!msdu)
454*4882a593Smuzhiyun return NULL;
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun rxcb = ATH10K_SKB_RXCB(msdu);
457*4882a593Smuzhiyun hash_del(&rxcb->hlist);
458*4882a593Smuzhiyun htt->rx_ring.fill_cnt--;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun dma_unmap_single(htt->ar->dev, rxcb->paddr,
461*4882a593Smuzhiyun msdu->len + skb_tailroom(msdu),
462*4882a593Smuzhiyun DMA_FROM_DEVICE);
463*4882a593Smuzhiyun ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
464*4882a593Smuzhiyun msdu->data, msdu->len + skb_tailroom(msdu));
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun return msdu;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun
ath10k_htt_append_frag_list(struct sk_buff * skb_head,struct sk_buff * frag_list,unsigned int frag_len)469*4882a593Smuzhiyun static inline void ath10k_htt_append_frag_list(struct sk_buff *skb_head,
470*4882a593Smuzhiyun struct sk_buff *frag_list,
471*4882a593Smuzhiyun unsigned int frag_len)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun skb_shinfo(skb_head)->frag_list = frag_list;
474*4882a593Smuzhiyun skb_head->data_len = frag_len;
475*4882a593Smuzhiyun skb_head->len += skb_head->data_len;
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun
ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt * htt,struct sk_buff * msdu,struct htt_rx_in_ord_msdu_desc ** msdu_desc)478*4882a593Smuzhiyun static int ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt *htt,
479*4882a593Smuzhiyun struct sk_buff *msdu,
480*4882a593Smuzhiyun struct htt_rx_in_ord_msdu_desc **msdu_desc)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun struct ath10k *ar = htt->ar;
483*4882a593Smuzhiyun u32 paddr;
484*4882a593Smuzhiyun struct sk_buff *frag_buf;
485*4882a593Smuzhiyun struct sk_buff *prev_frag_buf;
486*4882a593Smuzhiyun u8 last_frag;
487*4882a593Smuzhiyun struct htt_rx_in_ord_msdu_desc *ind_desc = *msdu_desc;
488*4882a593Smuzhiyun struct htt_rx_desc *rxd;
489*4882a593Smuzhiyun int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun rxd = (void *)msdu->data;
492*4882a593Smuzhiyun trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun skb_put(msdu, sizeof(struct htt_rx_desc));
495*4882a593Smuzhiyun skb_pull(msdu, sizeof(struct htt_rx_desc));
496*4882a593Smuzhiyun skb_put(msdu, min(amsdu_len, HTT_RX_MSDU_SIZE));
497*4882a593Smuzhiyun amsdu_len -= msdu->len;
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun last_frag = ind_desc->reserved;
500*4882a593Smuzhiyun if (last_frag) {
501*4882a593Smuzhiyun if (amsdu_len) {
502*4882a593Smuzhiyun ath10k_warn(ar, "invalid amsdu len %u, left %d",
503*4882a593Smuzhiyun __le16_to_cpu(ind_desc->msdu_len),
504*4882a593Smuzhiyun amsdu_len);
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun return 0;
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun ind_desc++;
510*4882a593Smuzhiyun paddr = __le32_to_cpu(ind_desc->msdu_paddr);
511*4882a593Smuzhiyun frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
512*4882a593Smuzhiyun if (!frag_buf) {
513*4882a593Smuzhiyun ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%x", paddr);
514*4882a593Smuzhiyun return -ENOENT;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
518*4882a593Smuzhiyun ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len);
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun amsdu_len -= frag_buf->len;
521*4882a593Smuzhiyun prev_frag_buf = frag_buf;
522*4882a593Smuzhiyun last_frag = ind_desc->reserved;
523*4882a593Smuzhiyun while (!last_frag) {
524*4882a593Smuzhiyun ind_desc++;
525*4882a593Smuzhiyun paddr = __le32_to_cpu(ind_desc->msdu_paddr);
526*4882a593Smuzhiyun frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
527*4882a593Smuzhiyun if (!frag_buf) {
528*4882a593Smuzhiyun ath10k_warn(ar, "failed to pop frag-n paddr: 0x%x",
529*4882a593Smuzhiyun paddr);
530*4882a593Smuzhiyun prev_frag_buf->next = NULL;
531*4882a593Smuzhiyun return -ENOENT;
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
535*4882a593Smuzhiyun last_frag = ind_desc->reserved;
536*4882a593Smuzhiyun amsdu_len -= frag_buf->len;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun prev_frag_buf->next = frag_buf;
539*4882a593Smuzhiyun prev_frag_buf = frag_buf;
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun if (amsdu_len) {
543*4882a593Smuzhiyun ath10k_warn(ar, "invalid amsdu len %u, left %d",
544*4882a593Smuzhiyun __le16_to_cpu(ind_desc->msdu_len), amsdu_len);
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun *msdu_desc = ind_desc;
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun prev_frag_buf->next = NULL;
550*4882a593Smuzhiyun return 0;
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun static int
ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt * htt,struct sk_buff * msdu,struct htt_rx_in_ord_msdu_desc_ext ** msdu_desc)554*4882a593Smuzhiyun ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt *htt,
555*4882a593Smuzhiyun struct sk_buff *msdu,
556*4882a593Smuzhiyun struct htt_rx_in_ord_msdu_desc_ext **msdu_desc)
557*4882a593Smuzhiyun {
558*4882a593Smuzhiyun struct ath10k *ar = htt->ar;
559*4882a593Smuzhiyun u64 paddr;
560*4882a593Smuzhiyun struct sk_buff *frag_buf;
561*4882a593Smuzhiyun struct sk_buff *prev_frag_buf;
562*4882a593Smuzhiyun u8 last_frag;
563*4882a593Smuzhiyun struct htt_rx_in_ord_msdu_desc_ext *ind_desc = *msdu_desc;
564*4882a593Smuzhiyun struct htt_rx_desc *rxd;
565*4882a593Smuzhiyun int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun rxd = (void *)msdu->data;
568*4882a593Smuzhiyun trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun skb_put(msdu, sizeof(struct htt_rx_desc));
571*4882a593Smuzhiyun skb_pull(msdu, sizeof(struct htt_rx_desc));
572*4882a593Smuzhiyun skb_put(msdu, min(amsdu_len, HTT_RX_MSDU_SIZE));
573*4882a593Smuzhiyun amsdu_len -= msdu->len;
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun last_frag = ind_desc->reserved;
576*4882a593Smuzhiyun if (last_frag) {
577*4882a593Smuzhiyun if (amsdu_len) {
578*4882a593Smuzhiyun ath10k_warn(ar, "invalid amsdu len %u, left %d",
579*4882a593Smuzhiyun __le16_to_cpu(ind_desc->msdu_len),
580*4882a593Smuzhiyun amsdu_len);
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun return 0;
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun ind_desc++;
586*4882a593Smuzhiyun paddr = __le64_to_cpu(ind_desc->msdu_paddr);
587*4882a593Smuzhiyun frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
588*4882a593Smuzhiyun if (!frag_buf) {
589*4882a593Smuzhiyun ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%llx", paddr);
590*4882a593Smuzhiyun return -ENOENT;
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
594*4882a593Smuzhiyun ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len);
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun amsdu_len -= frag_buf->len;
597*4882a593Smuzhiyun prev_frag_buf = frag_buf;
598*4882a593Smuzhiyun last_frag = ind_desc->reserved;
599*4882a593Smuzhiyun while (!last_frag) {
600*4882a593Smuzhiyun ind_desc++;
601*4882a593Smuzhiyun paddr = __le64_to_cpu(ind_desc->msdu_paddr);
602*4882a593Smuzhiyun frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
603*4882a593Smuzhiyun if (!frag_buf) {
604*4882a593Smuzhiyun ath10k_warn(ar, "failed to pop frag-n paddr: 0x%llx",
605*4882a593Smuzhiyun paddr);
606*4882a593Smuzhiyun prev_frag_buf->next = NULL;
607*4882a593Smuzhiyun return -ENOENT;
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
611*4882a593Smuzhiyun last_frag = ind_desc->reserved;
612*4882a593Smuzhiyun amsdu_len -= frag_buf->len;
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun prev_frag_buf->next = frag_buf;
615*4882a593Smuzhiyun prev_frag_buf = frag_buf;
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun if (amsdu_len) {
619*4882a593Smuzhiyun ath10k_warn(ar, "invalid amsdu len %u, left %d",
620*4882a593Smuzhiyun __le16_to_cpu(ind_desc->msdu_len), amsdu_len);
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun *msdu_desc = ind_desc;
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun prev_frag_buf->next = NULL;
626*4882a593Smuzhiyun return 0;
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun
ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt * htt,struct htt_rx_in_ord_ind * ev,struct sk_buff_head * list)629*4882a593Smuzhiyun static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
630*4882a593Smuzhiyun struct htt_rx_in_ord_ind *ev,
631*4882a593Smuzhiyun struct sk_buff_head *list)
632*4882a593Smuzhiyun {
633*4882a593Smuzhiyun struct ath10k *ar = htt->ar;
634*4882a593Smuzhiyun struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32;
635*4882a593Smuzhiyun struct htt_rx_desc *rxd;
636*4882a593Smuzhiyun struct sk_buff *msdu;
637*4882a593Smuzhiyun int msdu_count, ret;
638*4882a593Smuzhiyun bool is_offload;
639*4882a593Smuzhiyun u32 paddr;
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun lockdep_assert_held(&htt->rx_ring.lock);
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun msdu_count = __le16_to_cpu(ev->msdu_count);
644*4882a593Smuzhiyun is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun while (msdu_count--) {
647*4882a593Smuzhiyun paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
650*4882a593Smuzhiyun if (!msdu) {
651*4882a593Smuzhiyun __skb_queue_purge(list);
652*4882a593Smuzhiyun return -ENOENT;
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun if (!is_offload && ar->monitor_arvif) {
656*4882a593Smuzhiyun ret = ath10k_htt_rx_handle_amsdu_mon_32(htt, msdu,
657*4882a593Smuzhiyun &msdu_desc);
658*4882a593Smuzhiyun if (ret) {
659*4882a593Smuzhiyun __skb_queue_purge(list);
660*4882a593Smuzhiyun return ret;
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun __skb_queue_tail(list, msdu);
663*4882a593Smuzhiyun msdu_desc++;
664*4882a593Smuzhiyun continue;
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun __skb_queue_tail(list, msdu);
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun if (!is_offload) {
670*4882a593Smuzhiyun rxd = (void *)msdu->data;
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun skb_put(msdu, sizeof(*rxd));
675*4882a593Smuzhiyun skb_pull(msdu, sizeof(*rxd));
676*4882a593Smuzhiyun skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun if (!(__le32_to_cpu(rxd->attention.flags) &
679*4882a593Smuzhiyun RX_ATTENTION_FLAGS_MSDU_DONE)) {
680*4882a593Smuzhiyun ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
681*4882a593Smuzhiyun return -EIO;
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun msdu_desc++;
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun return 0;
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun
ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt * htt,struct htt_rx_in_ord_ind * ev,struct sk_buff_head * list)691*4882a593Smuzhiyun static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt,
692*4882a593Smuzhiyun struct htt_rx_in_ord_ind *ev,
693*4882a593Smuzhiyun struct sk_buff_head *list)
694*4882a593Smuzhiyun {
695*4882a593Smuzhiyun struct ath10k *ar = htt->ar;
696*4882a593Smuzhiyun struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64;
697*4882a593Smuzhiyun struct htt_rx_desc *rxd;
698*4882a593Smuzhiyun struct sk_buff *msdu;
699*4882a593Smuzhiyun int msdu_count, ret;
700*4882a593Smuzhiyun bool is_offload;
701*4882a593Smuzhiyun u64 paddr;
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun lockdep_assert_held(&htt->rx_ring.lock);
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun msdu_count = __le16_to_cpu(ev->msdu_count);
706*4882a593Smuzhiyun is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun while (msdu_count--) {
709*4882a593Smuzhiyun paddr = __le64_to_cpu(msdu_desc->msdu_paddr);
710*4882a593Smuzhiyun msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
711*4882a593Smuzhiyun if (!msdu) {
712*4882a593Smuzhiyun __skb_queue_purge(list);
713*4882a593Smuzhiyun return -ENOENT;
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun if (!is_offload && ar->monitor_arvif) {
717*4882a593Smuzhiyun ret = ath10k_htt_rx_handle_amsdu_mon_64(htt, msdu,
718*4882a593Smuzhiyun &msdu_desc);
719*4882a593Smuzhiyun if (ret) {
720*4882a593Smuzhiyun __skb_queue_purge(list);
721*4882a593Smuzhiyun return ret;
722*4882a593Smuzhiyun }
723*4882a593Smuzhiyun __skb_queue_tail(list, msdu);
724*4882a593Smuzhiyun msdu_desc++;
725*4882a593Smuzhiyun continue;
726*4882a593Smuzhiyun }
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun __skb_queue_tail(list, msdu);
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun if (!is_offload) {
731*4882a593Smuzhiyun rxd = (void *)msdu->data;
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun skb_put(msdu, sizeof(*rxd));
736*4882a593Smuzhiyun skb_pull(msdu, sizeof(*rxd));
737*4882a593Smuzhiyun skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun if (!(__le32_to_cpu(rxd->attention.flags) &
740*4882a593Smuzhiyun RX_ATTENTION_FLAGS_MSDU_DONE)) {
741*4882a593Smuzhiyun ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
742*4882a593Smuzhiyun return -EIO;
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun msdu_desc++;
747*4882a593Smuzhiyun }
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun return 0;
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun
ath10k_htt_rx_alloc(struct ath10k_htt * htt)752*4882a593Smuzhiyun int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
753*4882a593Smuzhiyun {
754*4882a593Smuzhiyun struct ath10k *ar = htt->ar;
755*4882a593Smuzhiyun dma_addr_t paddr;
756*4882a593Smuzhiyun void *vaddr, *vaddr_ring;
757*4882a593Smuzhiyun size_t size;
758*4882a593Smuzhiyun struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
761*4882a593Smuzhiyun return 0;
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun htt->rx_confused = false;
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun /* XXX: The fill level could be changed during runtime in response to
766*4882a593Smuzhiyun * the host processing latency. Is this really worth it?
767*4882a593Smuzhiyun */
768*4882a593Smuzhiyun htt->rx_ring.size = HTT_RX_RING_SIZE;
769*4882a593Smuzhiyun htt->rx_ring.size_mask = htt->rx_ring.size - 1;
770*4882a593Smuzhiyun htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level;
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun if (!is_power_of_2(htt->rx_ring.size)) {
773*4882a593Smuzhiyun ath10k_warn(ar, "htt rx ring size is not power of 2\n");
774*4882a593Smuzhiyun return -EINVAL;
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun htt->rx_ring.netbufs_ring =
778*4882a593Smuzhiyun kcalloc(htt->rx_ring.size, sizeof(struct sk_buff *),
779*4882a593Smuzhiyun GFP_KERNEL);
780*4882a593Smuzhiyun if (!htt->rx_ring.netbufs_ring)
781*4882a593Smuzhiyun goto err_netbuf;
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun size = ath10k_htt_get_rx_ring_size(htt);
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
786*4882a593Smuzhiyun if (!vaddr_ring)
787*4882a593Smuzhiyun goto err_dma_ring;
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun ath10k_htt_config_paddrs_ring(htt, vaddr_ring);
790*4882a593Smuzhiyun htt->rx_ring.base_paddr = paddr;
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun vaddr = dma_alloc_coherent(htt->ar->dev,
793*4882a593Smuzhiyun sizeof(*htt->rx_ring.alloc_idx.vaddr),
794*4882a593Smuzhiyun &paddr, GFP_KERNEL);
795*4882a593Smuzhiyun if (!vaddr)
796*4882a593Smuzhiyun goto err_dma_idx;
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun htt->rx_ring.alloc_idx.vaddr = vaddr;
799*4882a593Smuzhiyun htt->rx_ring.alloc_idx.paddr = paddr;
800*4882a593Smuzhiyun htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
801*4882a593Smuzhiyun *htt->rx_ring.alloc_idx.vaddr = 0;
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun /* Initialize the Rx refill retry timer */
804*4882a593Smuzhiyun timer_setup(timer, ath10k_htt_rx_ring_refill_retry, 0);
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun spin_lock_init(&htt->rx_ring.lock);
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun htt->rx_ring.fill_cnt = 0;
809*4882a593Smuzhiyun htt->rx_ring.sw_rd_idx.msdu_payld = 0;
810*4882a593Smuzhiyun hash_init(htt->rx_ring.skb_table);
811*4882a593Smuzhiyun
812*4882a593Smuzhiyun skb_queue_head_init(&htt->rx_msdus_q);
813*4882a593Smuzhiyun skb_queue_head_init(&htt->rx_in_ord_compl_q);
814*4882a593Smuzhiyun skb_queue_head_init(&htt->tx_fetch_ind_q);
815*4882a593Smuzhiyun atomic_set(&htt->num_mpdus_ready, 0);
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
818*4882a593Smuzhiyun htt->rx_ring.size, htt->rx_ring.fill_level);
819*4882a593Smuzhiyun return 0;
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun err_dma_idx:
822*4882a593Smuzhiyun dma_free_coherent(htt->ar->dev,
823*4882a593Smuzhiyun ath10k_htt_get_rx_ring_size(htt),
824*4882a593Smuzhiyun vaddr_ring,
825*4882a593Smuzhiyun htt->rx_ring.base_paddr);
826*4882a593Smuzhiyun err_dma_ring:
827*4882a593Smuzhiyun kfree(htt->rx_ring.netbufs_ring);
828*4882a593Smuzhiyun err_netbuf:
829*4882a593Smuzhiyun return -ENOMEM;
830*4882a593Smuzhiyun }
831*4882a593Smuzhiyun
ath10k_htt_rx_crypto_param_len(struct ath10k * ar,enum htt_rx_mpdu_encrypt_type type)832*4882a593Smuzhiyun static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
833*4882a593Smuzhiyun enum htt_rx_mpdu_encrypt_type type)
834*4882a593Smuzhiyun {
835*4882a593Smuzhiyun switch (type) {
836*4882a593Smuzhiyun case HTT_RX_MPDU_ENCRYPT_NONE:
837*4882a593Smuzhiyun return 0;
838*4882a593Smuzhiyun case HTT_RX_MPDU_ENCRYPT_WEP40:
839*4882a593Smuzhiyun case HTT_RX_MPDU_ENCRYPT_WEP104:
840*4882a593Smuzhiyun return IEEE80211_WEP_IV_LEN;
841*4882a593Smuzhiyun case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
842*4882a593Smuzhiyun case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
843*4882a593Smuzhiyun return IEEE80211_TKIP_IV_LEN;
844*4882a593Smuzhiyun case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
845*4882a593Smuzhiyun return IEEE80211_CCMP_HDR_LEN;
846*4882a593Smuzhiyun case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
847*4882a593Smuzhiyun return IEEE80211_CCMP_256_HDR_LEN;
848*4882a593Smuzhiyun case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
849*4882a593Smuzhiyun case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
850*4882a593Smuzhiyun return IEEE80211_GCMP_HDR_LEN;
851*4882a593Smuzhiyun case HTT_RX_MPDU_ENCRYPT_WEP128:
852*4882a593Smuzhiyun case HTT_RX_MPDU_ENCRYPT_WAPI:
853*4882a593Smuzhiyun break;
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun ath10k_warn(ar, "unsupported encryption type %d\n", type);
857*4882a593Smuzhiyun return 0;
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun #define MICHAEL_MIC_LEN 8
861*4882a593Smuzhiyun
ath10k_htt_rx_crypto_mic_len(struct ath10k * ar,enum htt_rx_mpdu_encrypt_type type)862*4882a593Smuzhiyun static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar,
863*4882a593Smuzhiyun enum htt_rx_mpdu_encrypt_type type)
864*4882a593Smuzhiyun {
865*4882a593Smuzhiyun switch (type) {
866*4882a593Smuzhiyun case HTT_RX_MPDU_ENCRYPT_NONE:
867*4882a593Smuzhiyun case HTT_RX_MPDU_ENCRYPT_WEP40:
868*4882a593Smuzhiyun case HTT_RX_MPDU_ENCRYPT_WEP104:
869*4882a593Smuzhiyun case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
870*4882a593Smuzhiyun case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
871*4882a593Smuzhiyun return 0;
872*4882a593Smuzhiyun case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
873*4882a593Smuzhiyun return IEEE80211_CCMP_MIC_LEN;
874*4882a593Smuzhiyun case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
875*4882a593Smuzhiyun return IEEE80211_CCMP_256_MIC_LEN;
876*4882a593Smuzhiyun case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
877*4882a593Smuzhiyun case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
878*4882a593Smuzhiyun return IEEE80211_GCMP_MIC_LEN;
879*4882a593Smuzhiyun case HTT_RX_MPDU_ENCRYPT_WEP128:
880*4882a593Smuzhiyun case HTT_RX_MPDU_ENCRYPT_WAPI:
881*4882a593Smuzhiyun break;
882*4882a593Smuzhiyun }
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun ath10k_warn(ar, "unsupported encryption type %d\n", type);
885*4882a593Smuzhiyun return 0;
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun
ath10k_htt_rx_crypto_icv_len(struct ath10k * ar,enum htt_rx_mpdu_encrypt_type type)888*4882a593Smuzhiyun static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar,
889*4882a593Smuzhiyun enum htt_rx_mpdu_encrypt_type type)
890*4882a593Smuzhiyun {
891*4882a593Smuzhiyun switch (type) {
892*4882a593Smuzhiyun case HTT_RX_MPDU_ENCRYPT_NONE:
893*4882a593Smuzhiyun case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
894*4882a593Smuzhiyun case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
895*4882a593Smuzhiyun case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
896*4882a593Smuzhiyun case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
897*4882a593Smuzhiyun return 0;
898*4882a593Smuzhiyun case HTT_RX_MPDU_ENCRYPT_WEP40:
899*4882a593Smuzhiyun case HTT_RX_MPDU_ENCRYPT_WEP104:
900*4882a593Smuzhiyun return IEEE80211_WEP_ICV_LEN;
901*4882a593Smuzhiyun case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
902*4882a593Smuzhiyun case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
903*4882a593Smuzhiyun return IEEE80211_TKIP_ICV_LEN;
904*4882a593Smuzhiyun case HTT_RX_MPDU_ENCRYPT_WEP128:
905*4882a593Smuzhiyun case HTT_RX_MPDU_ENCRYPT_WAPI:
906*4882a593Smuzhiyun break;
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun ath10k_warn(ar, "unsupported encryption type %d\n", type);
910*4882a593Smuzhiyun return 0;
911*4882a593Smuzhiyun }
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun struct amsdu_subframe_hdr {
914*4882a593Smuzhiyun u8 dst[ETH_ALEN];
915*4882a593Smuzhiyun u8 src[ETH_ALEN];
916*4882a593Smuzhiyun __be16 len;
917*4882a593Smuzhiyun } __packed;
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
920*4882a593Smuzhiyun
ath10k_bw_to_mac80211_bw(u8 bw)921*4882a593Smuzhiyun static inline u8 ath10k_bw_to_mac80211_bw(u8 bw)
922*4882a593Smuzhiyun {
923*4882a593Smuzhiyun u8 ret = 0;
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun switch (bw) {
926*4882a593Smuzhiyun case 0:
927*4882a593Smuzhiyun ret = RATE_INFO_BW_20;
928*4882a593Smuzhiyun break;
929*4882a593Smuzhiyun case 1:
930*4882a593Smuzhiyun ret = RATE_INFO_BW_40;
931*4882a593Smuzhiyun break;
932*4882a593Smuzhiyun case 2:
933*4882a593Smuzhiyun ret = RATE_INFO_BW_80;
934*4882a593Smuzhiyun break;
935*4882a593Smuzhiyun case 3:
936*4882a593Smuzhiyun ret = RATE_INFO_BW_160;
937*4882a593Smuzhiyun break;
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun return ret;
941*4882a593Smuzhiyun }
942*4882a593Smuzhiyun
ath10k_htt_rx_h_rates(struct ath10k * ar,struct ieee80211_rx_status * status,struct htt_rx_desc * rxd)943*4882a593Smuzhiyun static void ath10k_htt_rx_h_rates(struct ath10k *ar,
944*4882a593Smuzhiyun struct ieee80211_rx_status *status,
945*4882a593Smuzhiyun struct htt_rx_desc *rxd)
946*4882a593Smuzhiyun {
947*4882a593Smuzhiyun struct ieee80211_supported_band *sband;
948*4882a593Smuzhiyun u8 cck, rate, bw, sgi, mcs, nss;
949*4882a593Smuzhiyun u8 preamble = 0;
950*4882a593Smuzhiyun u8 group_id;
951*4882a593Smuzhiyun u32 info1, info2, info3;
952*4882a593Smuzhiyun u32 stbc, nsts_su;
953*4882a593Smuzhiyun
954*4882a593Smuzhiyun info1 = __le32_to_cpu(rxd->ppdu_start.info1);
955*4882a593Smuzhiyun info2 = __le32_to_cpu(rxd->ppdu_start.info2);
956*4882a593Smuzhiyun info3 = __le32_to_cpu(rxd->ppdu_start.info3);
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
959*4882a593Smuzhiyun
960*4882a593Smuzhiyun switch (preamble) {
961*4882a593Smuzhiyun case HTT_RX_LEGACY:
962*4882a593Smuzhiyun /* To get legacy rate index band is required. Since band can't
963*4882a593Smuzhiyun * be undefined check if freq is non-zero.
964*4882a593Smuzhiyun */
965*4882a593Smuzhiyun if (!status->freq)
966*4882a593Smuzhiyun return;
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
969*4882a593Smuzhiyun rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
970*4882a593Smuzhiyun rate &= ~RX_PPDU_START_RATE_FLAG;
971*4882a593Smuzhiyun
972*4882a593Smuzhiyun sband = &ar->mac.sbands[status->band];
973*4882a593Smuzhiyun status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);
974*4882a593Smuzhiyun break;
975*4882a593Smuzhiyun case HTT_RX_HT:
976*4882a593Smuzhiyun case HTT_RX_HT_WITH_TXBF:
977*4882a593Smuzhiyun /* HT-SIG - Table 20-11 in info2 and info3 */
978*4882a593Smuzhiyun mcs = info2 & 0x1F;
979*4882a593Smuzhiyun nss = mcs >> 3;
980*4882a593Smuzhiyun bw = (info2 >> 7) & 1;
981*4882a593Smuzhiyun sgi = (info3 >> 7) & 1;
982*4882a593Smuzhiyun
983*4882a593Smuzhiyun status->rate_idx = mcs;
984*4882a593Smuzhiyun status->encoding = RX_ENC_HT;
985*4882a593Smuzhiyun if (sgi)
986*4882a593Smuzhiyun status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
987*4882a593Smuzhiyun if (bw)
988*4882a593Smuzhiyun status->bw = RATE_INFO_BW_40;
989*4882a593Smuzhiyun break;
990*4882a593Smuzhiyun case HTT_RX_VHT:
991*4882a593Smuzhiyun case HTT_RX_VHT_WITH_TXBF:
992*4882a593Smuzhiyun /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
993*4882a593Smuzhiyun * TODO check this
994*4882a593Smuzhiyun */
995*4882a593Smuzhiyun bw = info2 & 3;
996*4882a593Smuzhiyun sgi = info3 & 1;
997*4882a593Smuzhiyun stbc = (info2 >> 3) & 1;
998*4882a593Smuzhiyun group_id = (info2 >> 4) & 0x3F;
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun if (GROUP_ID_IS_SU_MIMO(group_id)) {
1001*4882a593Smuzhiyun mcs = (info3 >> 4) & 0x0F;
1002*4882a593Smuzhiyun nsts_su = ((info2 >> 10) & 0x07);
1003*4882a593Smuzhiyun if (stbc)
1004*4882a593Smuzhiyun nss = (nsts_su >> 2) + 1;
1005*4882a593Smuzhiyun else
1006*4882a593Smuzhiyun nss = (nsts_su + 1);
1007*4882a593Smuzhiyun } else {
1008*4882a593Smuzhiyun /* Hardware doesn't decode VHT-SIG-B into Rx descriptor
1009*4882a593Smuzhiyun * so it's impossible to decode MCS. Also since
1010*4882a593Smuzhiyun * firmware consumes Group Id Management frames host
1011*4882a593Smuzhiyun * has no knowledge regarding group/user position
1012*4882a593Smuzhiyun * mapping so it's impossible to pick the correct Nsts
1013*4882a593Smuzhiyun * from VHT-SIG-A1.
1014*4882a593Smuzhiyun *
1015*4882a593Smuzhiyun * Bandwidth and SGI are valid so report the rateinfo
1016*4882a593Smuzhiyun * on best-effort basis.
1017*4882a593Smuzhiyun */
1018*4882a593Smuzhiyun mcs = 0;
1019*4882a593Smuzhiyun nss = 1;
1020*4882a593Smuzhiyun }
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun if (mcs > 0x09) {
1023*4882a593Smuzhiyun ath10k_warn(ar, "invalid MCS received %u\n", mcs);
1024*4882a593Smuzhiyun ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
1025*4882a593Smuzhiyun __le32_to_cpu(rxd->attention.flags),
1026*4882a593Smuzhiyun __le32_to_cpu(rxd->mpdu_start.info0),
1027*4882a593Smuzhiyun __le32_to_cpu(rxd->mpdu_start.info1),
1028*4882a593Smuzhiyun __le32_to_cpu(rxd->msdu_start.common.info0),
1029*4882a593Smuzhiyun __le32_to_cpu(rxd->msdu_start.common.info1),
1030*4882a593Smuzhiyun rxd->ppdu_start.info0,
1031*4882a593Smuzhiyun __le32_to_cpu(rxd->ppdu_start.info1),
1032*4882a593Smuzhiyun __le32_to_cpu(rxd->ppdu_start.info2),
1033*4882a593Smuzhiyun __le32_to_cpu(rxd->ppdu_start.info3),
1034*4882a593Smuzhiyun __le32_to_cpu(rxd->ppdu_start.info4));
1035*4882a593Smuzhiyun
1036*4882a593Smuzhiyun ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
1037*4882a593Smuzhiyun __le32_to_cpu(rxd->msdu_end.common.info0),
1038*4882a593Smuzhiyun __le32_to_cpu(rxd->mpdu_end.info0));
1039*4882a593Smuzhiyun
1040*4882a593Smuzhiyun ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
1041*4882a593Smuzhiyun "rx desc msdu payload: ",
1042*4882a593Smuzhiyun rxd->msdu_payload, 50);
1043*4882a593Smuzhiyun }
1044*4882a593Smuzhiyun
1045*4882a593Smuzhiyun status->rate_idx = mcs;
1046*4882a593Smuzhiyun status->nss = nss;
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun if (sgi)
1049*4882a593Smuzhiyun status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun status->bw = ath10k_bw_to_mac80211_bw(bw);
1052*4882a593Smuzhiyun status->encoding = RX_ENC_VHT;
1053*4882a593Smuzhiyun break;
1054*4882a593Smuzhiyun default:
1055*4882a593Smuzhiyun break;
1056*4882a593Smuzhiyun }
1057*4882a593Smuzhiyun }
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun static struct ieee80211_channel *
ath10k_htt_rx_h_peer_channel(struct ath10k * ar,struct htt_rx_desc * rxd)1060*4882a593Smuzhiyun ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
1061*4882a593Smuzhiyun {
1062*4882a593Smuzhiyun struct ath10k_peer *peer;
1063*4882a593Smuzhiyun struct ath10k_vif *arvif;
1064*4882a593Smuzhiyun struct cfg80211_chan_def def;
1065*4882a593Smuzhiyun u16 peer_id;
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyun lockdep_assert_held(&ar->data_lock);
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun if (!rxd)
1070*4882a593Smuzhiyun return NULL;
1071*4882a593Smuzhiyun
1072*4882a593Smuzhiyun if (rxd->attention.flags &
1073*4882a593Smuzhiyun __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
1074*4882a593Smuzhiyun return NULL;
1075*4882a593Smuzhiyun
1076*4882a593Smuzhiyun if (!(rxd->msdu_end.common.info0 &
1077*4882a593Smuzhiyun __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
1078*4882a593Smuzhiyun return NULL;
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1081*4882a593Smuzhiyun RX_MPDU_START_INFO0_PEER_IDX);
1082*4882a593Smuzhiyun
1083*4882a593Smuzhiyun peer = ath10k_peer_find_by_id(ar, peer_id);
1084*4882a593Smuzhiyun if (!peer)
1085*4882a593Smuzhiyun return NULL;
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun arvif = ath10k_get_arvif(ar, peer->vdev_id);
1088*4882a593Smuzhiyun if (WARN_ON_ONCE(!arvif))
1089*4882a593Smuzhiyun return NULL;
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun if (ath10k_mac_vif_chan(arvif->vif, &def))
1092*4882a593Smuzhiyun return NULL;
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun return def.chan;
1095*4882a593Smuzhiyun }
1096*4882a593Smuzhiyun
1097*4882a593Smuzhiyun static struct ieee80211_channel *
ath10k_htt_rx_h_vdev_channel(struct ath10k * ar,u32 vdev_id)1098*4882a593Smuzhiyun ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
1099*4882a593Smuzhiyun {
1100*4882a593Smuzhiyun struct ath10k_vif *arvif;
1101*4882a593Smuzhiyun struct cfg80211_chan_def def;
1102*4882a593Smuzhiyun
1103*4882a593Smuzhiyun lockdep_assert_held(&ar->data_lock);
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun list_for_each_entry(arvif, &ar->arvifs, list) {
1106*4882a593Smuzhiyun if (arvif->vdev_id == vdev_id &&
1107*4882a593Smuzhiyun ath10k_mac_vif_chan(arvif->vif, &def) == 0)
1108*4882a593Smuzhiyun return def.chan;
1109*4882a593Smuzhiyun }
1110*4882a593Smuzhiyun
1111*4882a593Smuzhiyun return NULL;
1112*4882a593Smuzhiyun }
1113*4882a593Smuzhiyun
1114*4882a593Smuzhiyun static void
ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * conf,void * data)1115*4882a593Smuzhiyun ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
1116*4882a593Smuzhiyun struct ieee80211_chanctx_conf *conf,
1117*4882a593Smuzhiyun void *data)
1118*4882a593Smuzhiyun {
1119*4882a593Smuzhiyun struct cfg80211_chan_def *def = data;
1120*4882a593Smuzhiyun
1121*4882a593Smuzhiyun *def = conf->def;
1122*4882a593Smuzhiyun }
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun static struct ieee80211_channel *
ath10k_htt_rx_h_any_channel(struct ath10k * ar)1125*4882a593Smuzhiyun ath10k_htt_rx_h_any_channel(struct ath10k *ar)
1126*4882a593Smuzhiyun {
1127*4882a593Smuzhiyun struct cfg80211_chan_def def = {};
1128*4882a593Smuzhiyun
1129*4882a593Smuzhiyun ieee80211_iter_chan_contexts_atomic(ar->hw,
1130*4882a593Smuzhiyun ath10k_htt_rx_h_any_chan_iter,
1131*4882a593Smuzhiyun &def);
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun return def.chan;
1134*4882a593Smuzhiyun }
1135*4882a593Smuzhiyun
ath10k_htt_rx_h_channel(struct ath10k * ar,struct ieee80211_rx_status * status,struct htt_rx_desc * rxd,u32 vdev_id)1136*4882a593Smuzhiyun static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
1137*4882a593Smuzhiyun struct ieee80211_rx_status *status,
1138*4882a593Smuzhiyun struct htt_rx_desc *rxd,
1139*4882a593Smuzhiyun u32 vdev_id)
1140*4882a593Smuzhiyun {
1141*4882a593Smuzhiyun struct ieee80211_channel *ch;
1142*4882a593Smuzhiyun
1143*4882a593Smuzhiyun spin_lock_bh(&ar->data_lock);
1144*4882a593Smuzhiyun ch = ar->scan_channel;
1145*4882a593Smuzhiyun if (!ch)
1146*4882a593Smuzhiyun ch = ar->rx_channel;
1147*4882a593Smuzhiyun if (!ch)
1148*4882a593Smuzhiyun ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
1149*4882a593Smuzhiyun if (!ch)
1150*4882a593Smuzhiyun ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
1151*4882a593Smuzhiyun if (!ch)
1152*4882a593Smuzhiyun ch = ath10k_htt_rx_h_any_channel(ar);
1153*4882a593Smuzhiyun if (!ch)
1154*4882a593Smuzhiyun ch = ar->tgt_oper_chan;
1155*4882a593Smuzhiyun spin_unlock_bh(&ar->data_lock);
1156*4882a593Smuzhiyun
1157*4882a593Smuzhiyun if (!ch)
1158*4882a593Smuzhiyun return false;
1159*4882a593Smuzhiyun
1160*4882a593Smuzhiyun status->band = ch->band;
1161*4882a593Smuzhiyun status->freq = ch->center_freq;
1162*4882a593Smuzhiyun
1163*4882a593Smuzhiyun return true;
1164*4882a593Smuzhiyun }
1165*4882a593Smuzhiyun
ath10k_htt_rx_h_signal(struct ath10k * ar,struct ieee80211_rx_status * status,struct htt_rx_desc * rxd)1166*4882a593Smuzhiyun static void ath10k_htt_rx_h_signal(struct ath10k *ar,
1167*4882a593Smuzhiyun struct ieee80211_rx_status *status,
1168*4882a593Smuzhiyun struct htt_rx_desc *rxd)
1169*4882a593Smuzhiyun {
1170*4882a593Smuzhiyun int i;
1171*4882a593Smuzhiyun
1172*4882a593Smuzhiyun for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) {
1173*4882a593Smuzhiyun status->chains &= ~BIT(i);
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun if (rxd->ppdu_start.rssi_chains[i].pri20_mhz != 0x80) {
1176*4882a593Smuzhiyun status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR +
1177*4882a593Smuzhiyun rxd->ppdu_start.rssi_chains[i].pri20_mhz;
1178*4882a593Smuzhiyun
1179*4882a593Smuzhiyun status->chains |= BIT(i);
1180*4882a593Smuzhiyun }
1181*4882a593Smuzhiyun }
1182*4882a593Smuzhiyun
1183*4882a593Smuzhiyun /* FIXME: Get real NF */
1184*4882a593Smuzhiyun status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
1185*4882a593Smuzhiyun rxd->ppdu_start.rssi_comb;
1186*4882a593Smuzhiyun status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
1187*4882a593Smuzhiyun }
1188*4882a593Smuzhiyun
ath10k_htt_rx_h_mactime(struct ath10k * ar,struct ieee80211_rx_status * status,struct htt_rx_desc * rxd)1189*4882a593Smuzhiyun static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
1190*4882a593Smuzhiyun struct ieee80211_rx_status *status,
1191*4882a593Smuzhiyun struct htt_rx_desc *rxd)
1192*4882a593Smuzhiyun {
1193*4882a593Smuzhiyun /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
1194*4882a593Smuzhiyun * means all prior MSDUs in a PPDU are reported to mac80211 without the
1195*4882a593Smuzhiyun * TSF. Is it worth holding frames until end of PPDU is known?
1196*4882a593Smuzhiyun *
1197*4882a593Smuzhiyun * FIXME: Can we get/compute 64bit TSF?
1198*4882a593Smuzhiyun */
1199*4882a593Smuzhiyun status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
1200*4882a593Smuzhiyun status->flag |= RX_FLAG_MACTIME_END;
1201*4882a593Smuzhiyun }
1202*4882a593Smuzhiyun
ath10k_htt_rx_h_ppdu(struct ath10k * ar,struct sk_buff_head * amsdu,struct ieee80211_rx_status * status,u32 vdev_id)1203*4882a593Smuzhiyun static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
1204*4882a593Smuzhiyun struct sk_buff_head *amsdu,
1205*4882a593Smuzhiyun struct ieee80211_rx_status *status,
1206*4882a593Smuzhiyun u32 vdev_id)
1207*4882a593Smuzhiyun {
1208*4882a593Smuzhiyun struct sk_buff *first;
1209*4882a593Smuzhiyun struct htt_rx_desc *rxd;
1210*4882a593Smuzhiyun bool is_first_ppdu;
1211*4882a593Smuzhiyun bool is_last_ppdu;
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun if (skb_queue_empty(amsdu))
1214*4882a593Smuzhiyun return;
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun first = skb_peek(amsdu);
1217*4882a593Smuzhiyun rxd = (void *)first->data - sizeof(*rxd);
1218*4882a593Smuzhiyun
1219*4882a593Smuzhiyun is_first_ppdu = !!(rxd->attention.flags &
1220*4882a593Smuzhiyun __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
1221*4882a593Smuzhiyun is_last_ppdu = !!(rxd->attention.flags &
1222*4882a593Smuzhiyun __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
1223*4882a593Smuzhiyun
1224*4882a593Smuzhiyun if (is_first_ppdu) {
1225*4882a593Smuzhiyun /* New PPDU starts so clear out the old per-PPDU status. */
1226*4882a593Smuzhiyun status->freq = 0;
1227*4882a593Smuzhiyun status->rate_idx = 0;
1228*4882a593Smuzhiyun status->nss = 0;
1229*4882a593Smuzhiyun status->encoding = RX_ENC_LEGACY;
1230*4882a593Smuzhiyun status->bw = RATE_INFO_BW_20;
1231*4882a593Smuzhiyun
1232*4882a593Smuzhiyun status->flag &= ~RX_FLAG_MACTIME_END;
1233*4882a593Smuzhiyun status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1234*4882a593Smuzhiyun
1235*4882a593Smuzhiyun status->flag &= ~(RX_FLAG_AMPDU_IS_LAST);
1236*4882a593Smuzhiyun status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
1237*4882a593Smuzhiyun status->ampdu_reference = ar->ampdu_reference;
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun ath10k_htt_rx_h_signal(ar, status, rxd);
1240*4882a593Smuzhiyun ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
1241*4882a593Smuzhiyun ath10k_htt_rx_h_rates(ar, status, rxd);
1242*4882a593Smuzhiyun }
1243*4882a593Smuzhiyun
1244*4882a593Smuzhiyun if (is_last_ppdu) {
1245*4882a593Smuzhiyun ath10k_htt_rx_h_mactime(ar, status, rxd);
1246*4882a593Smuzhiyun
1247*4882a593Smuzhiyun /* set ampdu last segment flag */
1248*4882a593Smuzhiyun status->flag |= RX_FLAG_AMPDU_IS_LAST;
1249*4882a593Smuzhiyun ar->ampdu_reference++;
1250*4882a593Smuzhiyun }
1251*4882a593Smuzhiyun }
1252*4882a593Smuzhiyun
1253*4882a593Smuzhiyun static const char * const tid_to_ac[] = {
1254*4882a593Smuzhiyun "BE",
1255*4882a593Smuzhiyun "BK",
1256*4882a593Smuzhiyun "BK",
1257*4882a593Smuzhiyun "BE",
1258*4882a593Smuzhiyun "VI",
1259*4882a593Smuzhiyun "VI",
1260*4882a593Smuzhiyun "VO",
1261*4882a593Smuzhiyun "VO",
1262*4882a593Smuzhiyun };
1263*4882a593Smuzhiyun
ath10k_get_tid(struct ieee80211_hdr * hdr,char * out,size_t size)1264*4882a593Smuzhiyun static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
1265*4882a593Smuzhiyun {
1266*4882a593Smuzhiyun u8 *qc;
1267*4882a593Smuzhiyun int tid;
1268*4882a593Smuzhiyun
1269*4882a593Smuzhiyun if (!ieee80211_is_data_qos(hdr->frame_control))
1270*4882a593Smuzhiyun return "";
1271*4882a593Smuzhiyun
1272*4882a593Smuzhiyun qc = ieee80211_get_qos_ctl(hdr);
1273*4882a593Smuzhiyun tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
1274*4882a593Smuzhiyun if (tid < 8)
1275*4882a593Smuzhiyun snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
1276*4882a593Smuzhiyun else
1277*4882a593Smuzhiyun snprintf(out, size, "tid %d", tid);
1278*4882a593Smuzhiyun
1279*4882a593Smuzhiyun return out;
1280*4882a593Smuzhiyun }
1281*4882a593Smuzhiyun
ath10k_htt_rx_h_queue_msdu(struct ath10k * ar,struct ieee80211_rx_status * rx_status,struct sk_buff * skb)1282*4882a593Smuzhiyun static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar,
1283*4882a593Smuzhiyun struct ieee80211_rx_status *rx_status,
1284*4882a593Smuzhiyun struct sk_buff *skb)
1285*4882a593Smuzhiyun {
1286*4882a593Smuzhiyun struct ieee80211_rx_status *status;
1287*4882a593Smuzhiyun
1288*4882a593Smuzhiyun status = IEEE80211_SKB_RXCB(skb);
1289*4882a593Smuzhiyun *status = *rx_status;
1290*4882a593Smuzhiyun
1291*4882a593Smuzhiyun skb_queue_tail(&ar->htt.rx_msdus_q, skb);
1292*4882a593Smuzhiyun }
1293*4882a593Smuzhiyun
ath10k_process_rx(struct ath10k * ar,struct sk_buff * skb)1294*4882a593Smuzhiyun static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
1295*4882a593Smuzhiyun {
1296*4882a593Smuzhiyun struct ieee80211_rx_status *status;
1297*4882a593Smuzhiyun struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1298*4882a593Smuzhiyun char tid[32];
1299*4882a593Smuzhiyun
1300*4882a593Smuzhiyun status = IEEE80211_SKB_RXCB(skb);
1301*4882a593Smuzhiyun
1302*4882a593Smuzhiyun if (!(ar->filter_flags & FIF_FCSFAIL) &&
1303*4882a593Smuzhiyun status->flag & RX_FLAG_FAILED_FCS_CRC) {
1304*4882a593Smuzhiyun ar->stats.rx_crc_err_drop++;
1305*4882a593Smuzhiyun dev_kfree_skb_any(skb);
1306*4882a593Smuzhiyun return;
1307*4882a593Smuzhiyun }
1308*4882a593Smuzhiyun
1309*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_DATA,
1310*4882a593Smuzhiyun "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
1311*4882a593Smuzhiyun skb,
1312*4882a593Smuzhiyun skb->len,
1313*4882a593Smuzhiyun ieee80211_get_SA(hdr),
1314*4882a593Smuzhiyun ath10k_get_tid(hdr, tid, sizeof(tid)),
1315*4882a593Smuzhiyun is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
1316*4882a593Smuzhiyun "mcast" : "ucast",
1317*4882a593Smuzhiyun (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
1318*4882a593Smuzhiyun (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
1319*4882a593Smuzhiyun (status->encoding == RX_ENC_HT) ? "ht" : "",
1320*4882a593Smuzhiyun (status->encoding == RX_ENC_VHT) ? "vht" : "",
1321*4882a593Smuzhiyun (status->bw == RATE_INFO_BW_40) ? "40" : "",
1322*4882a593Smuzhiyun (status->bw == RATE_INFO_BW_80) ? "80" : "",
1323*4882a593Smuzhiyun (status->bw == RATE_INFO_BW_160) ? "160" : "",
1324*4882a593Smuzhiyun status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
1325*4882a593Smuzhiyun status->rate_idx,
1326*4882a593Smuzhiyun status->nss,
1327*4882a593Smuzhiyun status->freq,
1328*4882a593Smuzhiyun status->band, status->flag,
1329*4882a593Smuzhiyun !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
1330*4882a593Smuzhiyun !!(status->flag & RX_FLAG_MMIC_ERROR),
1331*4882a593Smuzhiyun !!(status->flag & RX_FLAG_AMSDU_MORE));
1332*4882a593Smuzhiyun ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
1333*4882a593Smuzhiyun skb->data, skb->len);
1334*4882a593Smuzhiyun trace_ath10k_rx_hdr(ar, skb->data, skb->len);
1335*4882a593Smuzhiyun trace_ath10k_rx_payload(ar, skb->data, skb->len);
1336*4882a593Smuzhiyun
1337*4882a593Smuzhiyun ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
1338*4882a593Smuzhiyun }
1339*4882a593Smuzhiyun
ath10k_htt_rx_nwifi_hdrlen(struct ath10k * ar,struct ieee80211_hdr * hdr)1340*4882a593Smuzhiyun static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
1341*4882a593Smuzhiyun struct ieee80211_hdr *hdr)
1342*4882a593Smuzhiyun {
1343*4882a593Smuzhiyun int len = ieee80211_hdrlen(hdr->frame_control);
1344*4882a593Smuzhiyun
1345*4882a593Smuzhiyun if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
1346*4882a593Smuzhiyun ar->running_fw->fw_file.fw_features))
1347*4882a593Smuzhiyun len = round_up(len, 4);
1348*4882a593Smuzhiyun
1349*4882a593Smuzhiyun return len;
1350*4882a593Smuzhiyun }
1351*4882a593Smuzhiyun
ath10k_htt_rx_h_undecap_raw(struct ath10k * ar,struct sk_buff * msdu,struct ieee80211_rx_status * status,enum htt_rx_mpdu_encrypt_type enctype,bool is_decrypted,const u8 first_hdr[64])1352*4882a593Smuzhiyun static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
1353*4882a593Smuzhiyun struct sk_buff *msdu,
1354*4882a593Smuzhiyun struct ieee80211_rx_status *status,
1355*4882a593Smuzhiyun enum htt_rx_mpdu_encrypt_type enctype,
1356*4882a593Smuzhiyun bool is_decrypted,
1357*4882a593Smuzhiyun const u8 first_hdr[64])
1358*4882a593Smuzhiyun {
1359*4882a593Smuzhiyun struct ieee80211_hdr *hdr;
1360*4882a593Smuzhiyun struct htt_rx_desc *rxd;
1361*4882a593Smuzhiyun size_t hdr_len;
1362*4882a593Smuzhiyun size_t crypto_len;
1363*4882a593Smuzhiyun bool is_first;
1364*4882a593Smuzhiyun bool is_last;
1365*4882a593Smuzhiyun bool msdu_limit_err;
1366*4882a593Smuzhiyun int bytes_aligned = ar->hw_params.decap_align_bytes;
1367*4882a593Smuzhiyun u8 *qos;
1368*4882a593Smuzhiyun
1369*4882a593Smuzhiyun rxd = (void *)msdu->data - sizeof(*rxd);
1370*4882a593Smuzhiyun is_first = !!(rxd->msdu_end.common.info0 &
1371*4882a593Smuzhiyun __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1372*4882a593Smuzhiyun is_last = !!(rxd->msdu_end.common.info0 &
1373*4882a593Smuzhiyun __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1374*4882a593Smuzhiyun
1375*4882a593Smuzhiyun /* Delivered decapped frame:
1376*4882a593Smuzhiyun * [802.11 header]
1377*4882a593Smuzhiyun * [crypto param] <-- can be trimmed if !fcs_err &&
1378*4882a593Smuzhiyun * !decrypt_err && !peer_idx_invalid
1379*4882a593Smuzhiyun * [amsdu header] <-- only if A-MSDU
1380*4882a593Smuzhiyun * [rfc1042/llc]
1381*4882a593Smuzhiyun * [payload]
1382*4882a593Smuzhiyun * [FCS] <-- at end, needs to be trimmed
1383*4882a593Smuzhiyun */
1384*4882a593Smuzhiyun
1385*4882a593Smuzhiyun /* Some hardwares(QCA99x0 variants) limit number of msdus in a-msdu when
1386*4882a593Smuzhiyun * deaggregate, so that unwanted MSDU-deaggregation is avoided for
1387*4882a593Smuzhiyun * error packets. If limit exceeds, hw sends all remaining MSDUs as
1388*4882a593Smuzhiyun * a single last MSDU with this msdu limit error set.
1389*4882a593Smuzhiyun */
1390*4882a593Smuzhiyun msdu_limit_err = ath10k_rx_desc_msdu_limit_error(&ar->hw_params, rxd);
1391*4882a593Smuzhiyun
1392*4882a593Smuzhiyun /* If MSDU limit error happens, then don't warn on, the partial raw MSDU
1393*4882a593Smuzhiyun * without first MSDU is expected in that case, and handled later here.
1394*4882a593Smuzhiyun */
1395*4882a593Smuzhiyun /* This probably shouldn't happen but warn just in case */
1396*4882a593Smuzhiyun if (WARN_ON_ONCE(!is_first && !msdu_limit_err))
1397*4882a593Smuzhiyun return;
1398*4882a593Smuzhiyun
1399*4882a593Smuzhiyun /* This probably shouldn't happen but warn just in case */
1400*4882a593Smuzhiyun if (WARN_ON_ONCE(!(is_first && is_last) && !msdu_limit_err))
1401*4882a593Smuzhiyun return;
1402*4882a593Smuzhiyun
1403*4882a593Smuzhiyun skb_trim(msdu, msdu->len - FCS_LEN);
1404*4882a593Smuzhiyun
1405*4882a593Smuzhiyun /* Push original 80211 header */
1406*4882a593Smuzhiyun if (unlikely(msdu_limit_err)) {
1407*4882a593Smuzhiyun hdr = (struct ieee80211_hdr *)first_hdr;
1408*4882a593Smuzhiyun hdr_len = ieee80211_hdrlen(hdr->frame_control);
1409*4882a593Smuzhiyun crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1410*4882a593Smuzhiyun
1411*4882a593Smuzhiyun if (ieee80211_is_data_qos(hdr->frame_control)) {
1412*4882a593Smuzhiyun qos = ieee80211_get_qos_ctl(hdr);
1413*4882a593Smuzhiyun qos[0] |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1414*4882a593Smuzhiyun }
1415*4882a593Smuzhiyun
1416*4882a593Smuzhiyun if (crypto_len)
1417*4882a593Smuzhiyun memcpy(skb_push(msdu, crypto_len),
1418*4882a593Smuzhiyun (void *)hdr + round_up(hdr_len, bytes_aligned),
1419*4882a593Smuzhiyun crypto_len);
1420*4882a593Smuzhiyun
1421*4882a593Smuzhiyun memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1422*4882a593Smuzhiyun }
1423*4882a593Smuzhiyun
1424*4882a593Smuzhiyun /* In most cases this will be true for sniffed frames. It makes sense
1425*4882a593Smuzhiyun * to deliver them as-is without stripping the crypto param. This is
1426*4882a593Smuzhiyun * necessary for software based decryption.
1427*4882a593Smuzhiyun *
1428*4882a593Smuzhiyun * If there's no error then the frame is decrypted. At least that is
1429*4882a593Smuzhiyun * the case for frames that come in via fragmented rx indication.
1430*4882a593Smuzhiyun */
1431*4882a593Smuzhiyun if (!is_decrypted)
1432*4882a593Smuzhiyun return;
1433*4882a593Smuzhiyun
1434*4882a593Smuzhiyun /* The payload is decrypted so strip crypto params. Start from tail
1435*4882a593Smuzhiyun * since hdr is used to compute some stuff.
1436*4882a593Smuzhiyun */
1437*4882a593Smuzhiyun
1438*4882a593Smuzhiyun hdr = (void *)msdu->data;
1439*4882a593Smuzhiyun
1440*4882a593Smuzhiyun /* Tail */
1441*4882a593Smuzhiyun if (status->flag & RX_FLAG_IV_STRIPPED) {
1442*4882a593Smuzhiyun skb_trim(msdu, msdu->len -
1443*4882a593Smuzhiyun ath10k_htt_rx_crypto_mic_len(ar, enctype));
1444*4882a593Smuzhiyun
1445*4882a593Smuzhiyun skb_trim(msdu, msdu->len -
1446*4882a593Smuzhiyun ath10k_htt_rx_crypto_icv_len(ar, enctype));
1447*4882a593Smuzhiyun } else {
1448*4882a593Smuzhiyun /* MIC */
1449*4882a593Smuzhiyun if (status->flag & RX_FLAG_MIC_STRIPPED)
1450*4882a593Smuzhiyun skb_trim(msdu, msdu->len -
1451*4882a593Smuzhiyun ath10k_htt_rx_crypto_mic_len(ar, enctype));
1452*4882a593Smuzhiyun
1453*4882a593Smuzhiyun /* ICV */
1454*4882a593Smuzhiyun if (status->flag & RX_FLAG_ICV_STRIPPED)
1455*4882a593Smuzhiyun skb_trim(msdu, msdu->len -
1456*4882a593Smuzhiyun ath10k_htt_rx_crypto_icv_len(ar, enctype));
1457*4882a593Smuzhiyun }
1458*4882a593Smuzhiyun
1459*4882a593Smuzhiyun /* MMIC */
1460*4882a593Smuzhiyun if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
1461*4882a593Smuzhiyun !ieee80211_has_morefrags(hdr->frame_control) &&
1462*4882a593Smuzhiyun enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1463*4882a593Smuzhiyun skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN);
1464*4882a593Smuzhiyun
1465*4882a593Smuzhiyun /* Head */
1466*4882a593Smuzhiyun if (status->flag & RX_FLAG_IV_STRIPPED) {
1467*4882a593Smuzhiyun hdr_len = ieee80211_hdrlen(hdr->frame_control);
1468*4882a593Smuzhiyun crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1469*4882a593Smuzhiyun
1470*4882a593Smuzhiyun memmove((void *)msdu->data + crypto_len,
1471*4882a593Smuzhiyun (void *)msdu->data, hdr_len);
1472*4882a593Smuzhiyun skb_pull(msdu, crypto_len);
1473*4882a593Smuzhiyun }
1474*4882a593Smuzhiyun }
1475*4882a593Smuzhiyun
ath10k_htt_rx_h_undecap_nwifi(struct ath10k * ar,struct sk_buff * msdu,struct ieee80211_rx_status * status,const u8 first_hdr[64],enum htt_rx_mpdu_encrypt_type enctype)1476*4882a593Smuzhiyun static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
1477*4882a593Smuzhiyun struct sk_buff *msdu,
1478*4882a593Smuzhiyun struct ieee80211_rx_status *status,
1479*4882a593Smuzhiyun const u8 first_hdr[64],
1480*4882a593Smuzhiyun enum htt_rx_mpdu_encrypt_type enctype)
1481*4882a593Smuzhiyun {
1482*4882a593Smuzhiyun struct ieee80211_hdr *hdr;
1483*4882a593Smuzhiyun struct htt_rx_desc *rxd;
1484*4882a593Smuzhiyun size_t hdr_len;
1485*4882a593Smuzhiyun u8 da[ETH_ALEN];
1486*4882a593Smuzhiyun u8 sa[ETH_ALEN];
1487*4882a593Smuzhiyun int l3_pad_bytes;
1488*4882a593Smuzhiyun int bytes_aligned = ar->hw_params.decap_align_bytes;
1489*4882a593Smuzhiyun
1490*4882a593Smuzhiyun /* Delivered decapped frame:
1491*4882a593Smuzhiyun * [nwifi 802.11 header] <-- replaced with 802.11 hdr
1492*4882a593Smuzhiyun * [rfc1042/llc]
1493*4882a593Smuzhiyun *
1494*4882a593Smuzhiyun * Note: The nwifi header doesn't have QoS Control and is
1495*4882a593Smuzhiyun * (always?) a 3addr frame.
1496*4882a593Smuzhiyun *
1497*4882a593Smuzhiyun * Note2: There's no A-MSDU subframe header. Even if it's part
1498*4882a593Smuzhiyun * of an A-MSDU.
1499*4882a593Smuzhiyun */
1500*4882a593Smuzhiyun
1501*4882a593Smuzhiyun /* pull decapped header and copy SA & DA */
1502*4882a593Smuzhiyun rxd = (void *)msdu->data - sizeof(*rxd);
1503*4882a593Smuzhiyun
1504*4882a593Smuzhiyun l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1505*4882a593Smuzhiyun skb_put(msdu, l3_pad_bytes);
1506*4882a593Smuzhiyun
1507*4882a593Smuzhiyun hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
1508*4882a593Smuzhiyun
1509*4882a593Smuzhiyun hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
1510*4882a593Smuzhiyun ether_addr_copy(da, ieee80211_get_DA(hdr));
1511*4882a593Smuzhiyun ether_addr_copy(sa, ieee80211_get_SA(hdr));
1512*4882a593Smuzhiyun skb_pull(msdu, hdr_len);
1513*4882a593Smuzhiyun
1514*4882a593Smuzhiyun /* push original 802.11 header */
1515*4882a593Smuzhiyun hdr = (struct ieee80211_hdr *)first_hdr;
1516*4882a593Smuzhiyun hdr_len = ieee80211_hdrlen(hdr->frame_control);
1517*4882a593Smuzhiyun
1518*4882a593Smuzhiyun if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1519*4882a593Smuzhiyun memcpy(skb_push(msdu,
1520*4882a593Smuzhiyun ath10k_htt_rx_crypto_param_len(ar, enctype)),
1521*4882a593Smuzhiyun (void *)hdr + round_up(hdr_len, bytes_aligned),
1522*4882a593Smuzhiyun ath10k_htt_rx_crypto_param_len(ar, enctype));
1523*4882a593Smuzhiyun }
1524*4882a593Smuzhiyun
1525*4882a593Smuzhiyun memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1526*4882a593Smuzhiyun
1527*4882a593Smuzhiyun /* original 802.11 header has a different DA and in
1528*4882a593Smuzhiyun * case of 4addr it may also have different SA
1529*4882a593Smuzhiyun */
1530*4882a593Smuzhiyun hdr = (struct ieee80211_hdr *)msdu->data;
1531*4882a593Smuzhiyun ether_addr_copy(ieee80211_get_DA(hdr), da);
1532*4882a593Smuzhiyun ether_addr_copy(ieee80211_get_SA(hdr), sa);
1533*4882a593Smuzhiyun }
1534*4882a593Smuzhiyun
ath10k_htt_rx_h_find_rfc1042(struct ath10k * ar,struct sk_buff * msdu,enum htt_rx_mpdu_encrypt_type enctype)1535*4882a593Smuzhiyun static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
1536*4882a593Smuzhiyun struct sk_buff *msdu,
1537*4882a593Smuzhiyun enum htt_rx_mpdu_encrypt_type enctype)
1538*4882a593Smuzhiyun {
1539*4882a593Smuzhiyun struct ieee80211_hdr *hdr;
1540*4882a593Smuzhiyun struct htt_rx_desc *rxd;
1541*4882a593Smuzhiyun size_t hdr_len, crypto_len;
1542*4882a593Smuzhiyun void *rfc1042;
1543*4882a593Smuzhiyun bool is_first, is_last, is_amsdu;
1544*4882a593Smuzhiyun int bytes_aligned = ar->hw_params.decap_align_bytes;
1545*4882a593Smuzhiyun
1546*4882a593Smuzhiyun rxd = (void *)msdu->data - sizeof(*rxd);
1547*4882a593Smuzhiyun hdr = (void *)rxd->rx_hdr_status;
1548*4882a593Smuzhiyun
1549*4882a593Smuzhiyun is_first = !!(rxd->msdu_end.common.info0 &
1550*4882a593Smuzhiyun __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1551*4882a593Smuzhiyun is_last = !!(rxd->msdu_end.common.info0 &
1552*4882a593Smuzhiyun __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1553*4882a593Smuzhiyun is_amsdu = !(is_first && is_last);
1554*4882a593Smuzhiyun
1555*4882a593Smuzhiyun rfc1042 = hdr;
1556*4882a593Smuzhiyun
1557*4882a593Smuzhiyun if (is_first) {
1558*4882a593Smuzhiyun hdr_len = ieee80211_hdrlen(hdr->frame_control);
1559*4882a593Smuzhiyun crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1560*4882a593Smuzhiyun
1561*4882a593Smuzhiyun rfc1042 += round_up(hdr_len, bytes_aligned) +
1562*4882a593Smuzhiyun round_up(crypto_len, bytes_aligned);
1563*4882a593Smuzhiyun }
1564*4882a593Smuzhiyun
1565*4882a593Smuzhiyun if (is_amsdu)
1566*4882a593Smuzhiyun rfc1042 += sizeof(struct amsdu_subframe_hdr);
1567*4882a593Smuzhiyun
1568*4882a593Smuzhiyun return rfc1042;
1569*4882a593Smuzhiyun }
1570*4882a593Smuzhiyun
ath10k_htt_rx_h_undecap_eth(struct ath10k * ar,struct sk_buff * msdu,struct ieee80211_rx_status * status,const u8 first_hdr[64],enum htt_rx_mpdu_encrypt_type enctype)1571*4882a593Smuzhiyun static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1572*4882a593Smuzhiyun struct sk_buff *msdu,
1573*4882a593Smuzhiyun struct ieee80211_rx_status *status,
1574*4882a593Smuzhiyun const u8 first_hdr[64],
1575*4882a593Smuzhiyun enum htt_rx_mpdu_encrypt_type enctype)
1576*4882a593Smuzhiyun {
1577*4882a593Smuzhiyun struct ieee80211_hdr *hdr;
1578*4882a593Smuzhiyun struct ethhdr *eth;
1579*4882a593Smuzhiyun size_t hdr_len;
1580*4882a593Smuzhiyun void *rfc1042;
1581*4882a593Smuzhiyun u8 da[ETH_ALEN];
1582*4882a593Smuzhiyun u8 sa[ETH_ALEN];
1583*4882a593Smuzhiyun int l3_pad_bytes;
1584*4882a593Smuzhiyun struct htt_rx_desc *rxd;
1585*4882a593Smuzhiyun int bytes_aligned = ar->hw_params.decap_align_bytes;
1586*4882a593Smuzhiyun
1587*4882a593Smuzhiyun /* Delivered decapped frame:
1588*4882a593Smuzhiyun * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
1589*4882a593Smuzhiyun * [payload]
1590*4882a593Smuzhiyun */
1591*4882a593Smuzhiyun
1592*4882a593Smuzhiyun rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
1593*4882a593Smuzhiyun if (WARN_ON_ONCE(!rfc1042))
1594*4882a593Smuzhiyun return;
1595*4882a593Smuzhiyun
1596*4882a593Smuzhiyun rxd = (void *)msdu->data - sizeof(*rxd);
1597*4882a593Smuzhiyun l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1598*4882a593Smuzhiyun skb_put(msdu, l3_pad_bytes);
1599*4882a593Smuzhiyun skb_pull(msdu, l3_pad_bytes);
1600*4882a593Smuzhiyun
1601*4882a593Smuzhiyun /* pull decapped header and copy SA & DA */
1602*4882a593Smuzhiyun eth = (struct ethhdr *)msdu->data;
1603*4882a593Smuzhiyun ether_addr_copy(da, eth->h_dest);
1604*4882a593Smuzhiyun ether_addr_copy(sa, eth->h_source);
1605*4882a593Smuzhiyun skb_pull(msdu, sizeof(struct ethhdr));
1606*4882a593Smuzhiyun
1607*4882a593Smuzhiyun /* push rfc1042/llc/snap */
1608*4882a593Smuzhiyun memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
1609*4882a593Smuzhiyun sizeof(struct rfc1042_hdr));
1610*4882a593Smuzhiyun
1611*4882a593Smuzhiyun /* push original 802.11 header */
1612*4882a593Smuzhiyun hdr = (struct ieee80211_hdr *)first_hdr;
1613*4882a593Smuzhiyun hdr_len = ieee80211_hdrlen(hdr->frame_control);
1614*4882a593Smuzhiyun
1615*4882a593Smuzhiyun if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1616*4882a593Smuzhiyun memcpy(skb_push(msdu,
1617*4882a593Smuzhiyun ath10k_htt_rx_crypto_param_len(ar, enctype)),
1618*4882a593Smuzhiyun (void *)hdr + round_up(hdr_len, bytes_aligned),
1619*4882a593Smuzhiyun ath10k_htt_rx_crypto_param_len(ar, enctype));
1620*4882a593Smuzhiyun }
1621*4882a593Smuzhiyun
1622*4882a593Smuzhiyun memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1623*4882a593Smuzhiyun
1624*4882a593Smuzhiyun /* original 802.11 header has a different DA and in
1625*4882a593Smuzhiyun * case of 4addr it may also have different SA
1626*4882a593Smuzhiyun */
1627*4882a593Smuzhiyun hdr = (struct ieee80211_hdr *)msdu->data;
1628*4882a593Smuzhiyun ether_addr_copy(ieee80211_get_DA(hdr), da);
1629*4882a593Smuzhiyun ether_addr_copy(ieee80211_get_SA(hdr), sa);
1630*4882a593Smuzhiyun }
1631*4882a593Smuzhiyun
ath10k_htt_rx_h_undecap_snap(struct ath10k * ar,struct sk_buff * msdu,struct ieee80211_rx_status * status,const u8 first_hdr[64],enum htt_rx_mpdu_encrypt_type enctype)1632*4882a593Smuzhiyun static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
1633*4882a593Smuzhiyun struct sk_buff *msdu,
1634*4882a593Smuzhiyun struct ieee80211_rx_status *status,
1635*4882a593Smuzhiyun const u8 first_hdr[64],
1636*4882a593Smuzhiyun enum htt_rx_mpdu_encrypt_type enctype)
1637*4882a593Smuzhiyun {
1638*4882a593Smuzhiyun struct ieee80211_hdr *hdr;
1639*4882a593Smuzhiyun size_t hdr_len;
1640*4882a593Smuzhiyun int l3_pad_bytes;
1641*4882a593Smuzhiyun struct htt_rx_desc *rxd;
1642*4882a593Smuzhiyun int bytes_aligned = ar->hw_params.decap_align_bytes;
1643*4882a593Smuzhiyun
1644*4882a593Smuzhiyun /* Delivered decapped frame:
1645*4882a593Smuzhiyun * [amsdu header] <-- replaced with 802.11 hdr
1646*4882a593Smuzhiyun * [rfc1042/llc]
1647*4882a593Smuzhiyun * [payload]
1648*4882a593Smuzhiyun */
1649*4882a593Smuzhiyun
1650*4882a593Smuzhiyun rxd = (void *)msdu->data - sizeof(*rxd);
1651*4882a593Smuzhiyun l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1652*4882a593Smuzhiyun
1653*4882a593Smuzhiyun skb_put(msdu, l3_pad_bytes);
1654*4882a593Smuzhiyun skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
1655*4882a593Smuzhiyun
1656*4882a593Smuzhiyun hdr = (struct ieee80211_hdr *)first_hdr;
1657*4882a593Smuzhiyun hdr_len = ieee80211_hdrlen(hdr->frame_control);
1658*4882a593Smuzhiyun
1659*4882a593Smuzhiyun if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1660*4882a593Smuzhiyun memcpy(skb_push(msdu,
1661*4882a593Smuzhiyun ath10k_htt_rx_crypto_param_len(ar, enctype)),
1662*4882a593Smuzhiyun (void *)hdr + round_up(hdr_len, bytes_aligned),
1663*4882a593Smuzhiyun ath10k_htt_rx_crypto_param_len(ar, enctype));
1664*4882a593Smuzhiyun }
1665*4882a593Smuzhiyun
1666*4882a593Smuzhiyun memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1667*4882a593Smuzhiyun }
1668*4882a593Smuzhiyun
ath10k_htt_rx_h_undecap(struct ath10k * ar,struct sk_buff * msdu,struct ieee80211_rx_status * status,u8 first_hdr[64],enum htt_rx_mpdu_encrypt_type enctype,bool is_decrypted)1669*4882a593Smuzhiyun static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
1670*4882a593Smuzhiyun struct sk_buff *msdu,
1671*4882a593Smuzhiyun struct ieee80211_rx_status *status,
1672*4882a593Smuzhiyun u8 first_hdr[64],
1673*4882a593Smuzhiyun enum htt_rx_mpdu_encrypt_type enctype,
1674*4882a593Smuzhiyun bool is_decrypted)
1675*4882a593Smuzhiyun {
1676*4882a593Smuzhiyun struct htt_rx_desc *rxd;
1677*4882a593Smuzhiyun enum rx_msdu_decap_format decap;
1678*4882a593Smuzhiyun
1679*4882a593Smuzhiyun /* First msdu's decapped header:
1680*4882a593Smuzhiyun * [802.11 header] <-- padded to 4 bytes long
1681*4882a593Smuzhiyun * [crypto param] <-- padded to 4 bytes long
1682*4882a593Smuzhiyun * [amsdu header] <-- only if A-MSDU
1683*4882a593Smuzhiyun * [rfc1042/llc]
1684*4882a593Smuzhiyun *
1685*4882a593Smuzhiyun * Other (2nd, 3rd, ..) msdu's decapped header:
1686*4882a593Smuzhiyun * [amsdu header] <-- only if A-MSDU
1687*4882a593Smuzhiyun * [rfc1042/llc]
1688*4882a593Smuzhiyun */
1689*4882a593Smuzhiyun
1690*4882a593Smuzhiyun rxd = (void *)msdu->data - sizeof(*rxd);
1691*4882a593Smuzhiyun decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
1692*4882a593Smuzhiyun RX_MSDU_START_INFO1_DECAP_FORMAT);
1693*4882a593Smuzhiyun
1694*4882a593Smuzhiyun switch (decap) {
1695*4882a593Smuzhiyun case RX_MSDU_DECAP_RAW:
1696*4882a593Smuzhiyun ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
1697*4882a593Smuzhiyun is_decrypted, first_hdr);
1698*4882a593Smuzhiyun break;
1699*4882a593Smuzhiyun case RX_MSDU_DECAP_NATIVE_WIFI:
1700*4882a593Smuzhiyun ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr,
1701*4882a593Smuzhiyun enctype);
1702*4882a593Smuzhiyun break;
1703*4882a593Smuzhiyun case RX_MSDU_DECAP_ETHERNET2_DIX:
1704*4882a593Smuzhiyun ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
1705*4882a593Smuzhiyun break;
1706*4882a593Smuzhiyun case RX_MSDU_DECAP_8023_SNAP_LLC:
1707*4882a593Smuzhiyun ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr,
1708*4882a593Smuzhiyun enctype);
1709*4882a593Smuzhiyun break;
1710*4882a593Smuzhiyun }
1711*4882a593Smuzhiyun }
1712*4882a593Smuzhiyun
ath10k_htt_rx_get_csum_state(struct sk_buff * skb)1713*4882a593Smuzhiyun static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
1714*4882a593Smuzhiyun {
1715*4882a593Smuzhiyun struct htt_rx_desc *rxd;
1716*4882a593Smuzhiyun u32 flags, info;
1717*4882a593Smuzhiyun bool is_ip4, is_ip6;
1718*4882a593Smuzhiyun bool is_tcp, is_udp;
1719*4882a593Smuzhiyun bool ip_csum_ok, tcpudp_csum_ok;
1720*4882a593Smuzhiyun
1721*4882a593Smuzhiyun rxd = (void *)skb->data - sizeof(*rxd);
1722*4882a593Smuzhiyun flags = __le32_to_cpu(rxd->attention.flags);
1723*4882a593Smuzhiyun info = __le32_to_cpu(rxd->msdu_start.common.info1);
1724*4882a593Smuzhiyun
1725*4882a593Smuzhiyun is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
1726*4882a593Smuzhiyun is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
1727*4882a593Smuzhiyun is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
1728*4882a593Smuzhiyun is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
1729*4882a593Smuzhiyun ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
1730*4882a593Smuzhiyun tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
1731*4882a593Smuzhiyun
1732*4882a593Smuzhiyun if (!is_ip4 && !is_ip6)
1733*4882a593Smuzhiyun return CHECKSUM_NONE;
1734*4882a593Smuzhiyun if (!is_tcp && !is_udp)
1735*4882a593Smuzhiyun return CHECKSUM_NONE;
1736*4882a593Smuzhiyun if (!ip_csum_ok)
1737*4882a593Smuzhiyun return CHECKSUM_NONE;
1738*4882a593Smuzhiyun if (!tcpudp_csum_ok)
1739*4882a593Smuzhiyun return CHECKSUM_NONE;
1740*4882a593Smuzhiyun
1741*4882a593Smuzhiyun return CHECKSUM_UNNECESSARY;
1742*4882a593Smuzhiyun }
1743*4882a593Smuzhiyun
ath10k_htt_rx_h_csum_offload(struct sk_buff * msdu)1744*4882a593Smuzhiyun static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
1745*4882a593Smuzhiyun {
1746*4882a593Smuzhiyun msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
1747*4882a593Smuzhiyun }
1748*4882a593Smuzhiyun
ath10k_htt_rx_h_get_pn(struct ath10k * ar,struct sk_buff * skb,u16 offset,enum htt_rx_mpdu_encrypt_type enctype)1749*4882a593Smuzhiyun static u64 ath10k_htt_rx_h_get_pn(struct ath10k *ar, struct sk_buff *skb,
1750*4882a593Smuzhiyun u16 offset,
1751*4882a593Smuzhiyun enum htt_rx_mpdu_encrypt_type enctype)
1752*4882a593Smuzhiyun {
1753*4882a593Smuzhiyun struct ieee80211_hdr *hdr;
1754*4882a593Smuzhiyun u64 pn = 0;
1755*4882a593Smuzhiyun u8 *ehdr;
1756*4882a593Smuzhiyun
1757*4882a593Smuzhiyun hdr = (struct ieee80211_hdr *)(skb->data + offset);
1758*4882a593Smuzhiyun ehdr = skb->data + offset + ieee80211_hdrlen(hdr->frame_control);
1759*4882a593Smuzhiyun
1760*4882a593Smuzhiyun if (enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2) {
1761*4882a593Smuzhiyun pn = ehdr[0];
1762*4882a593Smuzhiyun pn |= (u64)ehdr[1] << 8;
1763*4882a593Smuzhiyun pn |= (u64)ehdr[4] << 16;
1764*4882a593Smuzhiyun pn |= (u64)ehdr[5] << 24;
1765*4882a593Smuzhiyun pn |= (u64)ehdr[6] << 32;
1766*4882a593Smuzhiyun pn |= (u64)ehdr[7] << 40;
1767*4882a593Smuzhiyun }
1768*4882a593Smuzhiyun return pn;
1769*4882a593Smuzhiyun }
1770*4882a593Smuzhiyun
ath10k_htt_rx_h_frag_multicast_check(struct ath10k * ar,struct sk_buff * skb,u16 offset)1771*4882a593Smuzhiyun static bool ath10k_htt_rx_h_frag_multicast_check(struct ath10k *ar,
1772*4882a593Smuzhiyun struct sk_buff *skb,
1773*4882a593Smuzhiyun u16 offset)
1774*4882a593Smuzhiyun {
1775*4882a593Smuzhiyun struct ieee80211_hdr *hdr;
1776*4882a593Smuzhiyun
1777*4882a593Smuzhiyun hdr = (struct ieee80211_hdr *)(skb->data + offset);
1778*4882a593Smuzhiyun return !is_multicast_ether_addr(hdr->addr1);
1779*4882a593Smuzhiyun }
1780*4882a593Smuzhiyun
ath10k_htt_rx_h_frag_pn_check(struct ath10k * ar,struct sk_buff * skb,u16 peer_id,u16 offset,enum htt_rx_mpdu_encrypt_type enctype)1781*4882a593Smuzhiyun static bool ath10k_htt_rx_h_frag_pn_check(struct ath10k *ar,
1782*4882a593Smuzhiyun struct sk_buff *skb,
1783*4882a593Smuzhiyun u16 peer_id,
1784*4882a593Smuzhiyun u16 offset,
1785*4882a593Smuzhiyun enum htt_rx_mpdu_encrypt_type enctype)
1786*4882a593Smuzhiyun {
1787*4882a593Smuzhiyun struct ath10k_peer *peer;
1788*4882a593Smuzhiyun union htt_rx_pn_t *last_pn, new_pn = {0};
1789*4882a593Smuzhiyun struct ieee80211_hdr *hdr;
1790*4882a593Smuzhiyun bool more_frags;
1791*4882a593Smuzhiyun u8 tid, frag_number;
1792*4882a593Smuzhiyun u32 seq;
1793*4882a593Smuzhiyun
1794*4882a593Smuzhiyun peer = ath10k_peer_find_by_id(ar, peer_id);
1795*4882a593Smuzhiyun if (!peer) {
1796*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer for frag pn check\n");
1797*4882a593Smuzhiyun return false;
1798*4882a593Smuzhiyun }
1799*4882a593Smuzhiyun
1800*4882a593Smuzhiyun hdr = (struct ieee80211_hdr *)(skb->data + offset);
1801*4882a593Smuzhiyun if (ieee80211_is_data_qos(hdr->frame_control))
1802*4882a593Smuzhiyun tid = ieee80211_get_tid(hdr);
1803*4882a593Smuzhiyun else
1804*4882a593Smuzhiyun tid = ATH10K_TXRX_NON_QOS_TID;
1805*4882a593Smuzhiyun
1806*4882a593Smuzhiyun last_pn = &peer->frag_tids_last_pn[tid];
1807*4882a593Smuzhiyun new_pn.pn48 = ath10k_htt_rx_h_get_pn(ar, skb, offset, enctype);
1808*4882a593Smuzhiyun more_frags = ieee80211_has_morefrags(hdr->frame_control);
1809*4882a593Smuzhiyun frag_number = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
1810*4882a593Smuzhiyun seq = (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
1811*4882a593Smuzhiyun
1812*4882a593Smuzhiyun if (frag_number == 0) {
1813*4882a593Smuzhiyun last_pn->pn48 = new_pn.pn48;
1814*4882a593Smuzhiyun peer->frag_tids_seq[tid] = seq;
1815*4882a593Smuzhiyun } else {
1816*4882a593Smuzhiyun if (seq != peer->frag_tids_seq[tid])
1817*4882a593Smuzhiyun return false;
1818*4882a593Smuzhiyun
1819*4882a593Smuzhiyun if (new_pn.pn48 != last_pn->pn48 + 1)
1820*4882a593Smuzhiyun return false;
1821*4882a593Smuzhiyun
1822*4882a593Smuzhiyun last_pn->pn48 = new_pn.pn48;
1823*4882a593Smuzhiyun }
1824*4882a593Smuzhiyun
1825*4882a593Smuzhiyun return true;
1826*4882a593Smuzhiyun }
1827*4882a593Smuzhiyun
ath10k_htt_rx_h_mpdu(struct ath10k * ar,struct sk_buff_head * amsdu,struct ieee80211_rx_status * status,bool fill_crypt_header,u8 * rx_hdr,enum ath10k_pkt_rx_err * err,u16 peer_id,bool frag)1828*4882a593Smuzhiyun static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1829*4882a593Smuzhiyun struct sk_buff_head *amsdu,
1830*4882a593Smuzhiyun struct ieee80211_rx_status *status,
1831*4882a593Smuzhiyun bool fill_crypt_header,
1832*4882a593Smuzhiyun u8 *rx_hdr,
1833*4882a593Smuzhiyun enum ath10k_pkt_rx_err *err,
1834*4882a593Smuzhiyun u16 peer_id,
1835*4882a593Smuzhiyun bool frag)
1836*4882a593Smuzhiyun {
1837*4882a593Smuzhiyun struct sk_buff *first;
1838*4882a593Smuzhiyun struct sk_buff *last;
1839*4882a593Smuzhiyun struct sk_buff *msdu, *temp;
1840*4882a593Smuzhiyun struct htt_rx_desc *rxd;
1841*4882a593Smuzhiyun struct ieee80211_hdr *hdr;
1842*4882a593Smuzhiyun enum htt_rx_mpdu_encrypt_type enctype;
1843*4882a593Smuzhiyun u8 first_hdr[64];
1844*4882a593Smuzhiyun u8 *qos;
1845*4882a593Smuzhiyun bool has_fcs_err;
1846*4882a593Smuzhiyun bool has_crypto_err;
1847*4882a593Smuzhiyun bool has_tkip_err;
1848*4882a593Smuzhiyun bool has_peer_idx_invalid;
1849*4882a593Smuzhiyun bool is_decrypted;
1850*4882a593Smuzhiyun bool is_mgmt;
1851*4882a593Smuzhiyun u32 attention;
1852*4882a593Smuzhiyun bool frag_pn_check = true, multicast_check = true;
1853*4882a593Smuzhiyun
1854*4882a593Smuzhiyun if (skb_queue_empty(amsdu))
1855*4882a593Smuzhiyun return;
1856*4882a593Smuzhiyun
1857*4882a593Smuzhiyun first = skb_peek(amsdu);
1858*4882a593Smuzhiyun rxd = (void *)first->data - sizeof(*rxd);
1859*4882a593Smuzhiyun
1860*4882a593Smuzhiyun is_mgmt = !!(rxd->attention.flags &
1861*4882a593Smuzhiyun __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
1862*4882a593Smuzhiyun
1863*4882a593Smuzhiyun enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1864*4882a593Smuzhiyun RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1865*4882a593Smuzhiyun
1866*4882a593Smuzhiyun /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
1867*4882a593Smuzhiyun * decapped header. It'll be used for undecapping of each MSDU.
1868*4882a593Smuzhiyun */
1869*4882a593Smuzhiyun hdr = (void *)rxd->rx_hdr_status;
1870*4882a593Smuzhiyun memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
1871*4882a593Smuzhiyun
1872*4882a593Smuzhiyun if (rx_hdr)
1873*4882a593Smuzhiyun memcpy(rx_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
1874*4882a593Smuzhiyun
1875*4882a593Smuzhiyun /* Each A-MSDU subframe will use the original header as the base and be
1876*4882a593Smuzhiyun * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1877*4882a593Smuzhiyun */
1878*4882a593Smuzhiyun hdr = (void *)first_hdr;
1879*4882a593Smuzhiyun
1880*4882a593Smuzhiyun if (ieee80211_is_data_qos(hdr->frame_control)) {
1881*4882a593Smuzhiyun qos = ieee80211_get_qos_ctl(hdr);
1882*4882a593Smuzhiyun qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1883*4882a593Smuzhiyun }
1884*4882a593Smuzhiyun
1885*4882a593Smuzhiyun /* Some attention flags are valid only in the last MSDU. */
1886*4882a593Smuzhiyun last = skb_peek_tail(amsdu);
1887*4882a593Smuzhiyun rxd = (void *)last->data - sizeof(*rxd);
1888*4882a593Smuzhiyun attention = __le32_to_cpu(rxd->attention.flags);
1889*4882a593Smuzhiyun
1890*4882a593Smuzhiyun has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
1891*4882a593Smuzhiyun has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
1892*4882a593Smuzhiyun has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
1893*4882a593Smuzhiyun has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
1894*4882a593Smuzhiyun
1895*4882a593Smuzhiyun /* Note: If hardware captures an encrypted frame that it can't decrypt,
1896*4882a593Smuzhiyun * e.g. due to fcs error, missing peer or invalid key data it will
1897*4882a593Smuzhiyun * report the frame as raw.
1898*4882a593Smuzhiyun */
1899*4882a593Smuzhiyun is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
1900*4882a593Smuzhiyun !has_fcs_err &&
1901*4882a593Smuzhiyun !has_crypto_err &&
1902*4882a593Smuzhiyun !has_peer_idx_invalid);
1903*4882a593Smuzhiyun
1904*4882a593Smuzhiyun /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
1905*4882a593Smuzhiyun status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
1906*4882a593Smuzhiyun RX_FLAG_MMIC_ERROR |
1907*4882a593Smuzhiyun RX_FLAG_DECRYPTED |
1908*4882a593Smuzhiyun RX_FLAG_IV_STRIPPED |
1909*4882a593Smuzhiyun RX_FLAG_ONLY_MONITOR |
1910*4882a593Smuzhiyun RX_FLAG_MMIC_STRIPPED);
1911*4882a593Smuzhiyun
1912*4882a593Smuzhiyun if (has_fcs_err)
1913*4882a593Smuzhiyun status->flag |= RX_FLAG_FAILED_FCS_CRC;
1914*4882a593Smuzhiyun
1915*4882a593Smuzhiyun if (has_tkip_err)
1916*4882a593Smuzhiyun status->flag |= RX_FLAG_MMIC_ERROR;
1917*4882a593Smuzhiyun
1918*4882a593Smuzhiyun if (err) {
1919*4882a593Smuzhiyun if (has_fcs_err)
1920*4882a593Smuzhiyun *err = ATH10K_PKT_RX_ERR_FCS;
1921*4882a593Smuzhiyun else if (has_tkip_err)
1922*4882a593Smuzhiyun *err = ATH10K_PKT_RX_ERR_TKIP;
1923*4882a593Smuzhiyun else if (has_crypto_err)
1924*4882a593Smuzhiyun *err = ATH10K_PKT_RX_ERR_CRYPT;
1925*4882a593Smuzhiyun else if (has_peer_idx_invalid)
1926*4882a593Smuzhiyun *err = ATH10K_PKT_RX_ERR_PEER_IDX_INVAL;
1927*4882a593Smuzhiyun }
1928*4882a593Smuzhiyun
1929*4882a593Smuzhiyun /* Firmware reports all necessary management frames via WMI already.
1930*4882a593Smuzhiyun * They are not reported to monitor interfaces at all so pass the ones
1931*4882a593Smuzhiyun * coming via HTT to monitor interfaces instead. This simplifies
1932*4882a593Smuzhiyun * matters a lot.
1933*4882a593Smuzhiyun */
1934*4882a593Smuzhiyun if (is_mgmt)
1935*4882a593Smuzhiyun status->flag |= RX_FLAG_ONLY_MONITOR;
1936*4882a593Smuzhiyun
1937*4882a593Smuzhiyun if (is_decrypted) {
1938*4882a593Smuzhiyun status->flag |= RX_FLAG_DECRYPTED;
1939*4882a593Smuzhiyun
1940*4882a593Smuzhiyun if (likely(!is_mgmt))
1941*4882a593Smuzhiyun status->flag |= RX_FLAG_MMIC_STRIPPED;
1942*4882a593Smuzhiyun
1943*4882a593Smuzhiyun if (fill_crypt_header)
1944*4882a593Smuzhiyun status->flag |= RX_FLAG_MIC_STRIPPED |
1945*4882a593Smuzhiyun RX_FLAG_ICV_STRIPPED;
1946*4882a593Smuzhiyun else
1947*4882a593Smuzhiyun status->flag |= RX_FLAG_IV_STRIPPED;
1948*4882a593Smuzhiyun }
1949*4882a593Smuzhiyun
1950*4882a593Smuzhiyun skb_queue_walk(amsdu, msdu) {
1951*4882a593Smuzhiyun if (frag && !fill_crypt_header && is_decrypted &&
1952*4882a593Smuzhiyun enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
1953*4882a593Smuzhiyun frag_pn_check = ath10k_htt_rx_h_frag_pn_check(ar,
1954*4882a593Smuzhiyun msdu,
1955*4882a593Smuzhiyun peer_id,
1956*4882a593Smuzhiyun 0,
1957*4882a593Smuzhiyun enctype);
1958*4882a593Smuzhiyun
1959*4882a593Smuzhiyun if (frag)
1960*4882a593Smuzhiyun multicast_check = ath10k_htt_rx_h_frag_multicast_check(ar,
1961*4882a593Smuzhiyun msdu,
1962*4882a593Smuzhiyun 0);
1963*4882a593Smuzhiyun
1964*4882a593Smuzhiyun if (!frag_pn_check || !multicast_check) {
1965*4882a593Smuzhiyun /* Discard the fragment with invalid PN or multicast DA
1966*4882a593Smuzhiyun */
1967*4882a593Smuzhiyun temp = msdu->prev;
1968*4882a593Smuzhiyun __skb_unlink(msdu, amsdu);
1969*4882a593Smuzhiyun dev_kfree_skb_any(msdu);
1970*4882a593Smuzhiyun msdu = temp;
1971*4882a593Smuzhiyun frag_pn_check = true;
1972*4882a593Smuzhiyun multicast_check = true;
1973*4882a593Smuzhiyun continue;
1974*4882a593Smuzhiyun }
1975*4882a593Smuzhiyun
1976*4882a593Smuzhiyun ath10k_htt_rx_h_csum_offload(msdu);
1977*4882a593Smuzhiyun
1978*4882a593Smuzhiyun if (frag && !fill_crypt_header &&
1979*4882a593Smuzhiyun enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1980*4882a593Smuzhiyun status->flag &= ~RX_FLAG_MMIC_STRIPPED;
1981*4882a593Smuzhiyun
1982*4882a593Smuzhiyun ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
1983*4882a593Smuzhiyun is_decrypted);
1984*4882a593Smuzhiyun
1985*4882a593Smuzhiyun /* Undecapping involves copying the original 802.11 header back
1986*4882a593Smuzhiyun * to sk_buff. If frame is protected and hardware has decrypted
1987*4882a593Smuzhiyun * it then remove the protected bit.
1988*4882a593Smuzhiyun */
1989*4882a593Smuzhiyun if (!is_decrypted)
1990*4882a593Smuzhiyun continue;
1991*4882a593Smuzhiyun if (is_mgmt)
1992*4882a593Smuzhiyun continue;
1993*4882a593Smuzhiyun
1994*4882a593Smuzhiyun if (fill_crypt_header)
1995*4882a593Smuzhiyun continue;
1996*4882a593Smuzhiyun
1997*4882a593Smuzhiyun hdr = (void *)msdu->data;
1998*4882a593Smuzhiyun hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1999*4882a593Smuzhiyun
2000*4882a593Smuzhiyun if (frag && !fill_crypt_header &&
2001*4882a593Smuzhiyun enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
2002*4882a593Smuzhiyun status->flag &= ~RX_FLAG_IV_STRIPPED &
2003*4882a593Smuzhiyun ~RX_FLAG_MMIC_STRIPPED;
2004*4882a593Smuzhiyun }
2005*4882a593Smuzhiyun }
2006*4882a593Smuzhiyun
ath10k_htt_rx_h_enqueue(struct ath10k * ar,struct sk_buff_head * amsdu,struct ieee80211_rx_status * status)2007*4882a593Smuzhiyun static void ath10k_htt_rx_h_enqueue(struct ath10k *ar,
2008*4882a593Smuzhiyun struct sk_buff_head *amsdu,
2009*4882a593Smuzhiyun struct ieee80211_rx_status *status)
2010*4882a593Smuzhiyun {
2011*4882a593Smuzhiyun struct sk_buff *msdu;
2012*4882a593Smuzhiyun struct sk_buff *first_subframe;
2013*4882a593Smuzhiyun
2014*4882a593Smuzhiyun first_subframe = skb_peek(amsdu);
2015*4882a593Smuzhiyun
2016*4882a593Smuzhiyun while ((msdu = __skb_dequeue(amsdu))) {
2017*4882a593Smuzhiyun /* Setup per-MSDU flags */
2018*4882a593Smuzhiyun if (skb_queue_empty(amsdu))
2019*4882a593Smuzhiyun status->flag &= ~RX_FLAG_AMSDU_MORE;
2020*4882a593Smuzhiyun else
2021*4882a593Smuzhiyun status->flag |= RX_FLAG_AMSDU_MORE;
2022*4882a593Smuzhiyun
2023*4882a593Smuzhiyun if (msdu == first_subframe) {
2024*4882a593Smuzhiyun first_subframe = NULL;
2025*4882a593Smuzhiyun status->flag &= ~RX_FLAG_ALLOW_SAME_PN;
2026*4882a593Smuzhiyun } else {
2027*4882a593Smuzhiyun status->flag |= RX_FLAG_ALLOW_SAME_PN;
2028*4882a593Smuzhiyun }
2029*4882a593Smuzhiyun
2030*4882a593Smuzhiyun ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
2031*4882a593Smuzhiyun }
2032*4882a593Smuzhiyun }
2033*4882a593Smuzhiyun
ath10k_unchain_msdu(struct sk_buff_head * amsdu,unsigned long * unchain_cnt)2034*4882a593Smuzhiyun static int ath10k_unchain_msdu(struct sk_buff_head *amsdu,
2035*4882a593Smuzhiyun unsigned long *unchain_cnt)
2036*4882a593Smuzhiyun {
2037*4882a593Smuzhiyun struct sk_buff *skb, *first;
2038*4882a593Smuzhiyun int space;
2039*4882a593Smuzhiyun int total_len = 0;
2040*4882a593Smuzhiyun int amsdu_len = skb_queue_len(amsdu);
2041*4882a593Smuzhiyun
2042*4882a593Smuzhiyun /* TODO: Might could optimize this by using
2043*4882a593Smuzhiyun * skb_try_coalesce or similar method to
2044*4882a593Smuzhiyun * decrease copying, or maybe get mac80211 to
2045*4882a593Smuzhiyun * provide a way to just receive a list of
2046*4882a593Smuzhiyun * skb?
2047*4882a593Smuzhiyun */
2048*4882a593Smuzhiyun
2049*4882a593Smuzhiyun first = __skb_dequeue(amsdu);
2050*4882a593Smuzhiyun
2051*4882a593Smuzhiyun /* Allocate total length all at once. */
2052*4882a593Smuzhiyun skb_queue_walk(amsdu, skb)
2053*4882a593Smuzhiyun total_len += skb->len;
2054*4882a593Smuzhiyun
2055*4882a593Smuzhiyun space = total_len - skb_tailroom(first);
2056*4882a593Smuzhiyun if ((space > 0) &&
2057*4882a593Smuzhiyun (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
2058*4882a593Smuzhiyun /* TODO: bump some rx-oom error stat */
2059*4882a593Smuzhiyun /* put it back together so we can free the
2060*4882a593Smuzhiyun * whole list at once.
2061*4882a593Smuzhiyun */
2062*4882a593Smuzhiyun __skb_queue_head(amsdu, first);
2063*4882a593Smuzhiyun return -1;
2064*4882a593Smuzhiyun }
2065*4882a593Smuzhiyun
2066*4882a593Smuzhiyun /* Walk list again, copying contents into
2067*4882a593Smuzhiyun * msdu_head
2068*4882a593Smuzhiyun */
2069*4882a593Smuzhiyun while ((skb = __skb_dequeue(amsdu))) {
2070*4882a593Smuzhiyun skb_copy_from_linear_data(skb, skb_put(first, skb->len),
2071*4882a593Smuzhiyun skb->len);
2072*4882a593Smuzhiyun dev_kfree_skb_any(skb);
2073*4882a593Smuzhiyun }
2074*4882a593Smuzhiyun
2075*4882a593Smuzhiyun __skb_queue_head(amsdu, first);
2076*4882a593Smuzhiyun
2077*4882a593Smuzhiyun *unchain_cnt += amsdu_len - 1;
2078*4882a593Smuzhiyun
2079*4882a593Smuzhiyun return 0;
2080*4882a593Smuzhiyun }
2081*4882a593Smuzhiyun
ath10k_htt_rx_h_unchain(struct ath10k * ar,struct sk_buff_head * amsdu,unsigned long * drop_cnt,unsigned long * unchain_cnt)2082*4882a593Smuzhiyun static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
2083*4882a593Smuzhiyun struct sk_buff_head *amsdu,
2084*4882a593Smuzhiyun unsigned long *drop_cnt,
2085*4882a593Smuzhiyun unsigned long *unchain_cnt)
2086*4882a593Smuzhiyun {
2087*4882a593Smuzhiyun struct sk_buff *first;
2088*4882a593Smuzhiyun struct htt_rx_desc *rxd;
2089*4882a593Smuzhiyun enum rx_msdu_decap_format decap;
2090*4882a593Smuzhiyun
2091*4882a593Smuzhiyun first = skb_peek(amsdu);
2092*4882a593Smuzhiyun rxd = (void *)first->data - sizeof(*rxd);
2093*4882a593Smuzhiyun decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
2094*4882a593Smuzhiyun RX_MSDU_START_INFO1_DECAP_FORMAT);
2095*4882a593Smuzhiyun
2096*4882a593Smuzhiyun /* FIXME: Current unchaining logic can only handle simple case of raw
2097*4882a593Smuzhiyun * msdu chaining. If decapping is other than raw the chaining may be
2098*4882a593Smuzhiyun * more complex and this isn't handled by the current code. Don't even
2099*4882a593Smuzhiyun * try re-constructing such frames - it'll be pretty much garbage.
2100*4882a593Smuzhiyun */
2101*4882a593Smuzhiyun if (decap != RX_MSDU_DECAP_RAW ||
2102*4882a593Smuzhiyun skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
2103*4882a593Smuzhiyun *drop_cnt += skb_queue_len(amsdu);
2104*4882a593Smuzhiyun __skb_queue_purge(amsdu);
2105*4882a593Smuzhiyun return;
2106*4882a593Smuzhiyun }
2107*4882a593Smuzhiyun
2108*4882a593Smuzhiyun ath10k_unchain_msdu(amsdu, unchain_cnt);
2109*4882a593Smuzhiyun }
2110*4882a593Smuzhiyun
ath10k_htt_rx_validate_amsdu(struct ath10k * ar,struct sk_buff_head * amsdu)2111*4882a593Smuzhiyun static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar,
2112*4882a593Smuzhiyun struct sk_buff_head *amsdu)
2113*4882a593Smuzhiyun {
2114*4882a593Smuzhiyun u8 *subframe_hdr;
2115*4882a593Smuzhiyun struct sk_buff *first;
2116*4882a593Smuzhiyun bool is_first, is_last;
2117*4882a593Smuzhiyun struct htt_rx_desc *rxd;
2118*4882a593Smuzhiyun struct ieee80211_hdr *hdr;
2119*4882a593Smuzhiyun size_t hdr_len, crypto_len;
2120*4882a593Smuzhiyun enum htt_rx_mpdu_encrypt_type enctype;
2121*4882a593Smuzhiyun int bytes_aligned = ar->hw_params.decap_align_bytes;
2122*4882a593Smuzhiyun
2123*4882a593Smuzhiyun first = skb_peek(amsdu);
2124*4882a593Smuzhiyun
2125*4882a593Smuzhiyun rxd = (void *)first->data - sizeof(*rxd);
2126*4882a593Smuzhiyun hdr = (void *)rxd->rx_hdr_status;
2127*4882a593Smuzhiyun
2128*4882a593Smuzhiyun is_first = !!(rxd->msdu_end.common.info0 &
2129*4882a593Smuzhiyun __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
2130*4882a593Smuzhiyun is_last = !!(rxd->msdu_end.common.info0 &
2131*4882a593Smuzhiyun __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
2132*4882a593Smuzhiyun
2133*4882a593Smuzhiyun /* Return in case of non-aggregated msdu */
2134*4882a593Smuzhiyun if (is_first && is_last)
2135*4882a593Smuzhiyun return true;
2136*4882a593Smuzhiyun
2137*4882a593Smuzhiyun /* First msdu flag is not set for the first msdu of the list */
2138*4882a593Smuzhiyun if (!is_first)
2139*4882a593Smuzhiyun return false;
2140*4882a593Smuzhiyun
2141*4882a593Smuzhiyun enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
2142*4882a593Smuzhiyun RX_MPDU_START_INFO0_ENCRYPT_TYPE);
2143*4882a593Smuzhiyun
2144*4882a593Smuzhiyun hdr_len = ieee80211_hdrlen(hdr->frame_control);
2145*4882a593Smuzhiyun crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
2146*4882a593Smuzhiyun
2147*4882a593Smuzhiyun subframe_hdr = (u8 *)hdr + round_up(hdr_len, bytes_aligned) +
2148*4882a593Smuzhiyun crypto_len;
2149*4882a593Smuzhiyun
2150*4882a593Smuzhiyun /* Validate if the amsdu has a proper first subframe.
2151*4882a593Smuzhiyun * There are chances a single msdu can be received as amsdu when
2152*4882a593Smuzhiyun * the unauthenticated amsdu flag of a QoS header
2153*4882a593Smuzhiyun * gets flipped in non-SPP AMSDU's, in such cases the first
2154*4882a593Smuzhiyun * subframe has llc/snap header in place of a valid da.
2155*4882a593Smuzhiyun * return false if the da matches rfc1042 pattern
2156*4882a593Smuzhiyun */
2157*4882a593Smuzhiyun if (ether_addr_equal(subframe_hdr, rfc1042_header))
2158*4882a593Smuzhiyun return false;
2159*4882a593Smuzhiyun
2160*4882a593Smuzhiyun return true;
2161*4882a593Smuzhiyun }
2162*4882a593Smuzhiyun
ath10k_htt_rx_amsdu_allowed(struct ath10k * ar,struct sk_buff_head * amsdu,struct ieee80211_rx_status * rx_status)2163*4882a593Smuzhiyun static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
2164*4882a593Smuzhiyun struct sk_buff_head *amsdu,
2165*4882a593Smuzhiyun struct ieee80211_rx_status *rx_status)
2166*4882a593Smuzhiyun {
2167*4882a593Smuzhiyun if (!rx_status->freq) {
2168*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n");
2169*4882a593Smuzhiyun return false;
2170*4882a593Smuzhiyun }
2171*4882a593Smuzhiyun
2172*4882a593Smuzhiyun if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
2173*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
2174*4882a593Smuzhiyun return false;
2175*4882a593Smuzhiyun }
2176*4882a593Smuzhiyun
2177*4882a593Smuzhiyun if (!ath10k_htt_rx_validate_amsdu(ar, amsdu)) {
2178*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid amsdu received\n");
2179*4882a593Smuzhiyun return false;
2180*4882a593Smuzhiyun }
2181*4882a593Smuzhiyun
2182*4882a593Smuzhiyun return true;
2183*4882a593Smuzhiyun }
2184*4882a593Smuzhiyun
ath10k_htt_rx_h_filter(struct ath10k * ar,struct sk_buff_head * amsdu,struct ieee80211_rx_status * rx_status,unsigned long * drop_cnt)2185*4882a593Smuzhiyun static void ath10k_htt_rx_h_filter(struct ath10k *ar,
2186*4882a593Smuzhiyun struct sk_buff_head *amsdu,
2187*4882a593Smuzhiyun struct ieee80211_rx_status *rx_status,
2188*4882a593Smuzhiyun unsigned long *drop_cnt)
2189*4882a593Smuzhiyun {
2190*4882a593Smuzhiyun if (skb_queue_empty(amsdu))
2191*4882a593Smuzhiyun return;
2192*4882a593Smuzhiyun
2193*4882a593Smuzhiyun if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
2194*4882a593Smuzhiyun return;
2195*4882a593Smuzhiyun
2196*4882a593Smuzhiyun if (drop_cnt)
2197*4882a593Smuzhiyun *drop_cnt += skb_queue_len(amsdu);
2198*4882a593Smuzhiyun
2199*4882a593Smuzhiyun __skb_queue_purge(amsdu);
2200*4882a593Smuzhiyun }
2201*4882a593Smuzhiyun
ath10k_htt_rx_handle_amsdu(struct ath10k_htt * htt)2202*4882a593Smuzhiyun static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
2203*4882a593Smuzhiyun {
2204*4882a593Smuzhiyun struct ath10k *ar = htt->ar;
2205*4882a593Smuzhiyun struct ieee80211_rx_status *rx_status = &htt->rx_status;
2206*4882a593Smuzhiyun struct sk_buff_head amsdu;
2207*4882a593Smuzhiyun int ret;
2208*4882a593Smuzhiyun unsigned long drop_cnt = 0;
2209*4882a593Smuzhiyun unsigned long unchain_cnt = 0;
2210*4882a593Smuzhiyun unsigned long drop_cnt_filter = 0;
2211*4882a593Smuzhiyun unsigned long msdus_to_queue, num_msdus;
2212*4882a593Smuzhiyun enum ath10k_pkt_rx_err err = ATH10K_PKT_RX_ERR_MAX;
2213*4882a593Smuzhiyun u8 first_hdr[RX_HTT_HDR_STATUS_LEN];
2214*4882a593Smuzhiyun
2215*4882a593Smuzhiyun __skb_queue_head_init(&amsdu);
2216*4882a593Smuzhiyun
2217*4882a593Smuzhiyun spin_lock_bh(&htt->rx_ring.lock);
2218*4882a593Smuzhiyun if (htt->rx_confused) {
2219*4882a593Smuzhiyun spin_unlock_bh(&htt->rx_ring.lock);
2220*4882a593Smuzhiyun return -EIO;
2221*4882a593Smuzhiyun }
2222*4882a593Smuzhiyun ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
2223*4882a593Smuzhiyun spin_unlock_bh(&htt->rx_ring.lock);
2224*4882a593Smuzhiyun
2225*4882a593Smuzhiyun if (ret < 0) {
2226*4882a593Smuzhiyun ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
2227*4882a593Smuzhiyun __skb_queue_purge(&amsdu);
2228*4882a593Smuzhiyun /* FIXME: It's probably a good idea to reboot the
2229*4882a593Smuzhiyun * device instead of leaving it inoperable.
2230*4882a593Smuzhiyun */
2231*4882a593Smuzhiyun htt->rx_confused = true;
2232*4882a593Smuzhiyun return ret;
2233*4882a593Smuzhiyun }
2234*4882a593Smuzhiyun
2235*4882a593Smuzhiyun num_msdus = skb_queue_len(&amsdu);
2236*4882a593Smuzhiyun
2237*4882a593Smuzhiyun ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
2238*4882a593Smuzhiyun
2239*4882a593Smuzhiyun /* only for ret = 1 indicates chained msdus */
2240*4882a593Smuzhiyun if (ret > 0)
2241*4882a593Smuzhiyun ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt);
2242*4882a593Smuzhiyun
2243*4882a593Smuzhiyun ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter);
2244*4882a593Smuzhiyun ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err, 0,
2245*4882a593Smuzhiyun false);
2246*4882a593Smuzhiyun msdus_to_queue = skb_queue_len(&amsdu);
2247*4882a593Smuzhiyun ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status);
2248*4882a593Smuzhiyun
2249*4882a593Smuzhiyun ath10k_sta_update_rx_tid_stats(ar, first_hdr, num_msdus, err,
2250*4882a593Smuzhiyun unchain_cnt, drop_cnt, drop_cnt_filter,
2251*4882a593Smuzhiyun msdus_to_queue);
2252*4882a593Smuzhiyun
2253*4882a593Smuzhiyun return 0;
2254*4882a593Smuzhiyun }
2255*4882a593Smuzhiyun
ath10k_htt_rx_mpdu_desc_pn_hl(struct htt_hl_rx_desc * rx_desc,union htt_rx_pn_t * pn,int pn_len_bits)2256*4882a593Smuzhiyun static void ath10k_htt_rx_mpdu_desc_pn_hl(struct htt_hl_rx_desc *rx_desc,
2257*4882a593Smuzhiyun union htt_rx_pn_t *pn,
2258*4882a593Smuzhiyun int pn_len_bits)
2259*4882a593Smuzhiyun {
2260*4882a593Smuzhiyun switch (pn_len_bits) {
2261*4882a593Smuzhiyun case 48:
2262*4882a593Smuzhiyun pn->pn48 = __le32_to_cpu(rx_desc->pn_31_0) +
2263*4882a593Smuzhiyun ((u64)(__le32_to_cpu(rx_desc->u0.pn_63_32) & 0xFFFF) << 32);
2264*4882a593Smuzhiyun break;
2265*4882a593Smuzhiyun case 24:
2266*4882a593Smuzhiyun pn->pn24 = __le32_to_cpu(rx_desc->pn_31_0);
2267*4882a593Smuzhiyun break;
2268*4882a593Smuzhiyun }
2269*4882a593Smuzhiyun }
2270*4882a593Smuzhiyun
ath10k_htt_rx_pn_cmp48(union htt_rx_pn_t * new_pn,union htt_rx_pn_t * old_pn)2271*4882a593Smuzhiyun static bool ath10k_htt_rx_pn_cmp48(union htt_rx_pn_t *new_pn,
2272*4882a593Smuzhiyun union htt_rx_pn_t *old_pn)
2273*4882a593Smuzhiyun {
2274*4882a593Smuzhiyun return ((new_pn->pn48 & 0xffffffffffffULL) <=
2275*4882a593Smuzhiyun (old_pn->pn48 & 0xffffffffffffULL));
2276*4882a593Smuzhiyun }
2277*4882a593Smuzhiyun
ath10k_htt_rx_pn_check_replay_hl(struct ath10k * ar,struct ath10k_peer * peer,struct htt_rx_indication_hl * rx)2278*4882a593Smuzhiyun static bool ath10k_htt_rx_pn_check_replay_hl(struct ath10k *ar,
2279*4882a593Smuzhiyun struct ath10k_peer *peer,
2280*4882a593Smuzhiyun struct htt_rx_indication_hl *rx)
2281*4882a593Smuzhiyun {
2282*4882a593Smuzhiyun bool last_pn_valid, pn_invalid = false;
2283*4882a593Smuzhiyun enum htt_txrx_sec_cast_type sec_index;
2284*4882a593Smuzhiyun enum htt_security_types sec_type;
2285*4882a593Smuzhiyun union htt_rx_pn_t new_pn = {0};
2286*4882a593Smuzhiyun struct htt_hl_rx_desc *rx_desc;
2287*4882a593Smuzhiyun union htt_rx_pn_t *last_pn;
2288*4882a593Smuzhiyun u32 rx_desc_info, tid;
2289*4882a593Smuzhiyun int num_mpdu_ranges;
2290*4882a593Smuzhiyun
2291*4882a593Smuzhiyun lockdep_assert_held(&ar->data_lock);
2292*4882a593Smuzhiyun
2293*4882a593Smuzhiyun if (!peer)
2294*4882a593Smuzhiyun return false;
2295*4882a593Smuzhiyun
2296*4882a593Smuzhiyun if (!(rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU))
2297*4882a593Smuzhiyun return false;
2298*4882a593Smuzhiyun
2299*4882a593Smuzhiyun num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
2300*4882a593Smuzhiyun HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
2301*4882a593Smuzhiyun
2302*4882a593Smuzhiyun rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges];
2303*4882a593Smuzhiyun rx_desc_info = __le32_to_cpu(rx_desc->info);
2304*4882a593Smuzhiyun
2305*4882a593Smuzhiyun if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED))
2306*4882a593Smuzhiyun return false;
2307*4882a593Smuzhiyun
2308*4882a593Smuzhiyun tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
2309*4882a593Smuzhiyun last_pn_valid = peer->tids_last_pn_valid[tid];
2310*4882a593Smuzhiyun last_pn = &peer->tids_last_pn[tid];
2311*4882a593Smuzhiyun
2312*4882a593Smuzhiyun if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST))
2313*4882a593Smuzhiyun sec_index = HTT_TXRX_SEC_MCAST;
2314*4882a593Smuzhiyun else
2315*4882a593Smuzhiyun sec_index = HTT_TXRX_SEC_UCAST;
2316*4882a593Smuzhiyun
2317*4882a593Smuzhiyun sec_type = peer->rx_pn[sec_index].sec_type;
2318*4882a593Smuzhiyun ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
2319*4882a593Smuzhiyun
2320*4882a593Smuzhiyun if (sec_type != HTT_SECURITY_AES_CCMP &&
2321*4882a593Smuzhiyun sec_type != HTT_SECURITY_TKIP &&
2322*4882a593Smuzhiyun sec_type != HTT_SECURITY_TKIP_NOMIC)
2323*4882a593Smuzhiyun return false;
2324*4882a593Smuzhiyun
2325*4882a593Smuzhiyun if (last_pn_valid)
2326*4882a593Smuzhiyun pn_invalid = ath10k_htt_rx_pn_cmp48(&new_pn, last_pn);
2327*4882a593Smuzhiyun else
2328*4882a593Smuzhiyun peer->tids_last_pn_valid[tid] = true;
2329*4882a593Smuzhiyun
2330*4882a593Smuzhiyun if (!pn_invalid)
2331*4882a593Smuzhiyun last_pn->pn48 = new_pn.pn48;
2332*4882a593Smuzhiyun
2333*4882a593Smuzhiyun return pn_invalid;
2334*4882a593Smuzhiyun }
2335*4882a593Smuzhiyun
ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt * htt,struct htt_rx_indication_hl * rx,struct sk_buff * skb,enum htt_rx_pn_check_type check_pn_type,enum htt_rx_tkip_demic_type tkip_mic_type)2336*4882a593Smuzhiyun static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
2337*4882a593Smuzhiyun struct htt_rx_indication_hl *rx,
2338*4882a593Smuzhiyun struct sk_buff *skb,
2339*4882a593Smuzhiyun enum htt_rx_pn_check_type check_pn_type,
2340*4882a593Smuzhiyun enum htt_rx_tkip_demic_type tkip_mic_type)
2341*4882a593Smuzhiyun {
2342*4882a593Smuzhiyun struct ath10k *ar = htt->ar;
2343*4882a593Smuzhiyun struct ath10k_peer *peer;
2344*4882a593Smuzhiyun struct htt_rx_indication_mpdu_range *mpdu_ranges;
2345*4882a593Smuzhiyun struct fw_rx_desc_hl *fw_desc;
2346*4882a593Smuzhiyun enum htt_txrx_sec_cast_type sec_index;
2347*4882a593Smuzhiyun enum htt_security_types sec_type;
2348*4882a593Smuzhiyun union htt_rx_pn_t new_pn = {0};
2349*4882a593Smuzhiyun struct htt_hl_rx_desc *rx_desc;
2350*4882a593Smuzhiyun struct ieee80211_hdr *hdr;
2351*4882a593Smuzhiyun struct ieee80211_rx_status *rx_status;
2352*4882a593Smuzhiyun u16 peer_id;
2353*4882a593Smuzhiyun u8 rx_desc_len;
2354*4882a593Smuzhiyun int num_mpdu_ranges;
2355*4882a593Smuzhiyun size_t tot_hdr_len;
2356*4882a593Smuzhiyun struct ieee80211_channel *ch;
2357*4882a593Smuzhiyun bool pn_invalid, qos, first_msdu;
2358*4882a593Smuzhiyun u32 tid, rx_desc_info;
2359*4882a593Smuzhiyun
2360*4882a593Smuzhiyun peer_id = __le16_to_cpu(rx->hdr.peer_id);
2361*4882a593Smuzhiyun tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
2362*4882a593Smuzhiyun
2363*4882a593Smuzhiyun spin_lock_bh(&ar->data_lock);
2364*4882a593Smuzhiyun peer = ath10k_peer_find_by_id(ar, peer_id);
2365*4882a593Smuzhiyun spin_unlock_bh(&ar->data_lock);
2366*4882a593Smuzhiyun if (!peer && peer_id != HTT_INVALID_PEERID)
2367*4882a593Smuzhiyun ath10k_warn(ar, "Got RX ind from invalid peer: %u\n", peer_id);
2368*4882a593Smuzhiyun
2369*4882a593Smuzhiyun if (!peer)
2370*4882a593Smuzhiyun return true;
2371*4882a593Smuzhiyun
2372*4882a593Smuzhiyun num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
2373*4882a593Smuzhiyun HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
2374*4882a593Smuzhiyun mpdu_ranges = htt_rx_ind_get_mpdu_ranges_hl(rx);
2375*4882a593Smuzhiyun fw_desc = &rx->fw_desc;
2376*4882a593Smuzhiyun rx_desc_len = fw_desc->len;
2377*4882a593Smuzhiyun
2378*4882a593Smuzhiyun if (fw_desc->u.bits.discard) {
2379*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_HTT, "htt discard mpdu\n");
2380*4882a593Smuzhiyun goto err;
2381*4882a593Smuzhiyun }
2382*4882a593Smuzhiyun
2383*4882a593Smuzhiyun /* I have not yet seen any case where num_mpdu_ranges > 1.
2384*4882a593Smuzhiyun * qcacld does not seem handle that case either, so we introduce the
2385*4882a593Smuzhiyun * same limitiation here as well.
2386*4882a593Smuzhiyun */
2387*4882a593Smuzhiyun if (num_mpdu_ranges > 1)
2388*4882a593Smuzhiyun ath10k_warn(ar,
2389*4882a593Smuzhiyun "Unsupported number of MPDU ranges: %d, ignoring all but the first\n",
2390*4882a593Smuzhiyun num_mpdu_ranges);
2391*4882a593Smuzhiyun
2392*4882a593Smuzhiyun if (mpdu_ranges->mpdu_range_status !=
2393*4882a593Smuzhiyun HTT_RX_IND_MPDU_STATUS_OK &&
2394*4882a593Smuzhiyun mpdu_ranges->mpdu_range_status !=
2395*4882a593Smuzhiyun HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR) {
2396*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_HTT, "htt mpdu_range_status %d\n",
2397*4882a593Smuzhiyun mpdu_ranges->mpdu_range_status);
2398*4882a593Smuzhiyun goto err;
2399*4882a593Smuzhiyun }
2400*4882a593Smuzhiyun
2401*4882a593Smuzhiyun rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges];
2402*4882a593Smuzhiyun rx_desc_info = __le32_to_cpu(rx_desc->info);
2403*4882a593Smuzhiyun
2404*4882a593Smuzhiyun if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST))
2405*4882a593Smuzhiyun sec_index = HTT_TXRX_SEC_MCAST;
2406*4882a593Smuzhiyun else
2407*4882a593Smuzhiyun sec_index = HTT_TXRX_SEC_UCAST;
2408*4882a593Smuzhiyun
2409*4882a593Smuzhiyun sec_type = peer->rx_pn[sec_index].sec_type;
2410*4882a593Smuzhiyun first_msdu = rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU;
2411*4882a593Smuzhiyun
2412*4882a593Smuzhiyun ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
2413*4882a593Smuzhiyun
2414*4882a593Smuzhiyun if (check_pn_type == HTT_RX_PN_CHECK && tid >= IEEE80211_NUM_TIDS) {
2415*4882a593Smuzhiyun spin_lock_bh(&ar->data_lock);
2416*4882a593Smuzhiyun pn_invalid = ath10k_htt_rx_pn_check_replay_hl(ar, peer, rx);
2417*4882a593Smuzhiyun spin_unlock_bh(&ar->data_lock);
2418*4882a593Smuzhiyun
2419*4882a593Smuzhiyun if (pn_invalid)
2420*4882a593Smuzhiyun goto err;
2421*4882a593Smuzhiyun }
2422*4882a593Smuzhiyun
2423*4882a593Smuzhiyun /* Strip off all headers before the MAC header before delivery to
2424*4882a593Smuzhiyun * mac80211
2425*4882a593Smuzhiyun */
2426*4882a593Smuzhiyun tot_hdr_len = sizeof(struct htt_resp_hdr) + sizeof(rx->hdr) +
2427*4882a593Smuzhiyun sizeof(rx->ppdu) + sizeof(rx->prefix) +
2428*4882a593Smuzhiyun sizeof(rx->fw_desc) +
2429*4882a593Smuzhiyun sizeof(*mpdu_ranges) * num_mpdu_ranges + rx_desc_len;
2430*4882a593Smuzhiyun
2431*4882a593Smuzhiyun skb_pull(skb, tot_hdr_len);
2432*4882a593Smuzhiyun
2433*4882a593Smuzhiyun hdr = (struct ieee80211_hdr *)skb->data;
2434*4882a593Smuzhiyun qos = ieee80211_is_data_qos(hdr->frame_control);
2435*4882a593Smuzhiyun
2436*4882a593Smuzhiyun rx_status = IEEE80211_SKB_RXCB(skb);
2437*4882a593Smuzhiyun memset(rx_status, 0, sizeof(*rx_status));
2438*4882a593Smuzhiyun
2439*4882a593Smuzhiyun if (rx->ppdu.combined_rssi == 0) {
2440*4882a593Smuzhiyun /* SDIO firmware does not provide signal */
2441*4882a593Smuzhiyun rx_status->signal = 0;
2442*4882a593Smuzhiyun rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2443*4882a593Smuzhiyun } else {
2444*4882a593Smuzhiyun rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
2445*4882a593Smuzhiyun rx->ppdu.combined_rssi;
2446*4882a593Smuzhiyun rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
2447*4882a593Smuzhiyun }
2448*4882a593Smuzhiyun
2449*4882a593Smuzhiyun spin_lock_bh(&ar->data_lock);
2450*4882a593Smuzhiyun ch = ar->scan_channel;
2451*4882a593Smuzhiyun if (!ch)
2452*4882a593Smuzhiyun ch = ar->rx_channel;
2453*4882a593Smuzhiyun if (!ch)
2454*4882a593Smuzhiyun ch = ath10k_htt_rx_h_any_channel(ar);
2455*4882a593Smuzhiyun if (!ch)
2456*4882a593Smuzhiyun ch = ar->tgt_oper_chan;
2457*4882a593Smuzhiyun spin_unlock_bh(&ar->data_lock);
2458*4882a593Smuzhiyun
2459*4882a593Smuzhiyun if (ch) {
2460*4882a593Smuzhiyun rx_status->band = ch->band;
2461*4882a593Smuzhiyun rx_status->freq = ch->center_freq;
2462*4882a593Smuzhiyun }
2463*4882a593Smuzhiyun if (rx->fw_desc.flags & FW_RX_DESC_FLAGS_LAST_MSDU)
2464*4882a593Smuzhiyun rx_status->flag &= ~RX_FLAG_AMSDU_MORE;
2465*4882a593Smuzhiyun else
2466*4882a593Smuzhiyun rx_status->flag |= RX_FLAG_AMSDU_MORE;
2467*4882a593Smuzhiyun
2468*4882a593Smuzhiyun /* Not entirely sure about this, but all frames from the chipset has
2469*4882a593Smuzhiyun * the protected flag set even though they have already been decrypted.
2470*4882a593Smuzhiyun * Unmasking this flag is necessary in order for mac80211 not to drop
2471*4882a593Smuzhiyun * the frame.
2472*4882a593Smuzhiyun * TODO: Verify this is always the case or find out a way to check
2473*4882a593Smuzhiyun * if there has been hw decryption.
2474*4882a593Smuzhiyun */
2475*4882a593Smuzhiyun if (ieee80211_has_protected(hdr->frame_control)) {
2476*4882a593Smuzhiyun hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2477*4882a593Smuzhiyun rx_status->flag |= RX_FLAG_DECRYPTED |
2478*4882a593Smuzhiyun RX_FLAG_IV_STRIPPED |
2479*4882a593Smuzhiyun RX_FLAG_MMIC_STRIPPED;
2480*4882a593Smuzhiyun
2481*4882a593Smuzhiyun if (tid < IEEE80211_NUM_TIDS &&
2482*4882a593Smuzhiyun first_msdu &&
2483*4882a593Smuzhiyun check_pn_type == HTT_RX_PN_CHECK &&
2484*4882a593Smuzhiyun (sec_type == HTT_SECURITY_AES_CCMP ||
2485*4882a593Smuzhiyun sec_type == HTT_SECURITY_TKIP ||
2486*4882a593Smuzhiyun sec_type == HTT_SECURITY_TKIP_NOMIC)) {
2487*4882a593Smuzhiyun u8 offset, *ivp, i;
2488*4882a593Smuzhiyun s8 keyidx = 0;
2489*4882a593Smuzhiyun __le64 pn48 = cpu_to_le64(new_pn.pn48);
2490*4882a593Smuzhiyun
2491*4882a593Smuzhiyun hdr = (struct ieee80211_hdr *)skb->data;
2492*4882a593Smuzhiyun offset = ieee80211_hdrlen(hdr->frame_control);
2493*4882a593Smuzhiyun hdr->frame_control |= __cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2494*4882a593Smuzhiyun rx_status->flag &= ~RX_FLAG_IV_STRIPPED;
2495*4882a593Smuzhiyun
2496*4882a593Smuzhiyun memmove(skb->data - IEEE80211_CCMP_HDR_LEN,
2497*4882a593Smuzhiyun skb->data, offset);
2498*4882a593Smuzhiyun skb_push(skb, IEEE80211_CCMP_HDR_LEN);
2499*4882a593Smuzhiyun ivp = skb->data + offset;
2500*4882a593Smuzhiyun memset(skb->data + offset, 0, IEEE80211_CCMP_HDR_LEN);
2501*4882a593Smuzhiyun /* Ext IV */
2502*4882a593Smuzhiyun ivp[IEEE80211_WEP_IV_LEN - 1] |= ATH10K_IEEE80211_EXTIV;
2503*4882a593Smuzhiyun
2504*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
2505*4882a593Smuzhiyun if (peer->keys[i] &&
2506*4882a593Smuzhiyun peer->keys[i]->flags & IEEE80211_KEY_FLAG_PAIRWISE)
2507*4882a593Smuzhiyun keyidx = peer->keys[i]->keyidx;
2508*4882a593Smuzhiyun }
2509*4882a593Smuzhiyun
2510*4882a593Smuzhiyun /* Key ID */
2511*4882a593Smuzhiyun ivp[IEEE80211_WEP_IV_LEN - 1] |= keyidx << 6;
2512*4882a593Smuzhiyun
2513*4882a593Smuzhiyun if (sec_type == HTT_SECURITY_AES_CCMP) {
2514*4882a593Smuzhiyun rx_status->flag |= RX_FLAG_MIC_STRIPPED;
2515*4882a593Smuzhiyun /* pn 0, pn 1 */
2516*4882a593Smuzhiyun memcpy(skb->data + offset, &pn48, 2);
2517*4882a593Smuzhiyun /* pn 1, pn 3 , pn 34 , pn 5 */
2518*4882a593Smuzhiyun memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4);
2519*4882a593Smuzhiyun } else {
2520*4882a593Smuzhiyun rx_status->flag |= RX_FLAG_ICV_STRIPPED;
2521*4882a593Smuzhiyun /* TSC 0 */
2522*4882a593Smuzhiyun memcpy(skb->data + offset + 2, &pn48, 1);
2523*4882a593Smuzhiyun /* TSC 1 */
2524*4882a593Smuzhiyun memcpy(skb->data + offset, ((u8 *)&pn48) + 1, 1);
2525*4882a593Smuzhiyun /* TSC 2 , TSC 3 , TSC 4 , TSC 5*/
2526*4882a593Smuzhiyun memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4);
2527*4882a593Smuzhiyun }
2528*4882a593Smuzhiyun }
2529*4882a593Smuzhiyun }
2530*4882a593Smuzhiyun
2531*4882a593Smuzhiyun if (tkip_mic_type == HTT_RX_TKIP_MIC)
2532*4882a593Smuzhiyun rx_status->flag &= ~RX_FLAG_IV_STRIPPED &
2533*4882a593Smuzhiyun ~RX_FLAG_MMIC_STRIPPED;
2534*4882a593Smuzhiyun
2535*4882a593Smuzhiyun if (mpdu_ranges->mpdu_range_status == HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR)
2536*4882a593Smuzhiyun rx_status->flag |= RX_FLAG_MMIC_ERROR;
2537*4882a593Smuzhiyun
2538*4882a593Smuzhiyun if (!qos && tid < IEEE80211_NUM_TIDS) {
2539*4882a593Smuzhiyun u8 offset;
2540*4882a593Smuzhiyun __le16 qos_ctrl = 0;
2541*4882a593Smuzhiyun
2542*4882a593Smuzhiyun hdr = (struct ieee80211_hdr *)skb->data;
2543*4882a593Smuzhiyun offset = ieee80211_hdrlen(hdr->frame_control);
2544*4882a593Smuzhiyun
2545*4882a593Smuzhiyun hdr->frame_control |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
2546*4882a593Smuzhiyun memmove(skb->data - IEEE80211_QOS_CTL_LEN, skb->data, offset);
2547*4882a593Smuzhiyun skb_push(skb, IEEE80211_QOS_CTL_LEN);
2548*4882a593Smuzhiyun qos_ctrl = cpu_to_le16(tid);
2549*4882a593Smuzhiyun memcpy(skb->data + offset, &qos_ctrl, IEEE80211_QOS_CTL_LEN);
2550*4882a593Smuzhiyun }
2551*4882a593Smuzhiyun
2552*4882a593Smuzhiyun if (ar->napi.dev)
2553*4882a593Smuzhiyun ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
2554*4882a593Smuzhiyun else
2555*4882a593Smuzhiyun ieee80211_rx_ni(ar->hw, skb);
2556*4882a593Smuzhiyun
2557*4882a593Smuzhiyun /* We have delivered the skb to the upper layers (mac80211) so we
2558*4882a593Smuzhiyun * must not free it.
2559*4882a593Smuzhiyun */
2560*4882a593Smuzhiyun return false;
2561*4882a593Smuzhiyun err:
2562*4882a593Smuzhiyun /* Tell the caller that it must free the skb since we have not
2563*4882a593Smuzhiyun * consumed it
2564*4882a593Smuzhiyun */
2565*4882a593Smuzhiyun return true;
2566*4882a593Smuzhiyun }
2567*4882a593Smuzhiyun
ath10k_htt_rx_frag_tkip_decap_nomic(struct sk_buff * skb,u16 head_len,u16 hdr_len)2568*4882a593Smuzhiyun static int ath10k_htt_rx_frag_tkip_decap_nomic(struct sk_buff *skb,
2569*4882a593Smuzhiyun u16 head_len,
2570*4882a593Smuzhiyun u16 hdr_len)
2571*4882a593Smuzhiyun {
2572*4882a593Smuzhiyun u8 *ivp, *orig_hdr;
2573*4882a593Smuzhiyun
2574*4882a593Smuzhiyun orig_hdr = skb->data;
2575*4882a593Smuzhiyun ivp = orig_hdr + hdr_len + head_len;
2576*4882a593Smuzhiyun
2577*4882a593Smuzhiyun /* the ExtIV bit is always set to 1 for TKIP */
2578*4882a593Smuzhiyun if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
2579*4882a593Smuzhiyun return -EINVAL;
2580*4882a593Smuzhiyun
2581*4882a593Smuzhiyun memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len);
2582*4882a593Smuzhiyun skb_pull(skb, IEEE80211_TKIP_IV_LEN);
2583*4882a593Smuzhiyun skb_trim(skb, skb->len - ATH10K_IEEE80211_TKIP_MICLEN);
2584*4882a593Smuzhiyun return 0;
2585*4882a593Smuzhiyun }
2586*4882a593Smuzhiyun
ath10k_htt_rx_frag_tkip_decap_withmic(struct sk_buff * skb,u16 head_len,u16 hdr_len)2587*4882a593Smuzhiyun static int ath10k_htt_rx_frag_tkip_decap_withmic(struct sk_buff *skb,
2588*4882a593Smuzhiyun u16 head_len,
2589*4882a593Smuzhiyun u16 hdr_len)
2590*4882a593Smuzhiyun {
2591*4882a593Smuzhiyun u8 *ivp, *orig_hdr;
2592*4882a593Smuzhiyun
2593*4882a593Smuzhiyun orig_hdr = skb->data;
2594*4882a593Smuzhiyun ivp = orig_hdr + hdr_len + head_len;
2595*4882a593Smuzhiyun
2596*4882a593Smuzhiyun /* the ExtIV bit is always set to 1 for TKIP */
2597*4882a593Smuzhiyun if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
2598*4882a593Smuzhiyun return -EINVAL;
2599*4882a593Smuzhiyun
2600*4882a593Smuzhiyun memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len);
2601*4882a593Smuzhiyun skb_pull(skb, IEEE80211_TKIP_IV_LEN);
2602*4882a593Smuzhiyun skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN);
2603*4882a593Smuzhiyun return 0;
2604*4882a593Smuzhiyun }
2605*4882a593Smuzhiyun
ath10k_htt_rx_frag_ccmp_decap(struct sk_buff * skb,u16 head_len,u16 hdr_len)2606*4882a593Smuzhiyun static int ath10k_htt_rx_frag_ccmp_decap(struct sk_buff *skb,
2607*4882a593Smuzhiyun u16 head_len,
2608*4882a593Smuzhiyun u16 hdr_len)
2609*4882a593Smuzhiyun {
2610*4882a593Smuzhiyun u8 *ivp, *orig_hdr;
2611*4882a593Smuzhiyun
2612*4882a593Smuzhiyun orig_hdr = skb->data;
2613*4882a593Smuzhiyun ivp = orig_hdr + hdr_len + head_len;
2614*4882a593Smuzhiyun
2615*4882a593Smuzhiyun /* the ExtIV bit is always set to 1 for CCMP */
2616*4882a593Smuzhiyun if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
2617*4882a593Smuzhiyun return -EINVAL;
2618*4882a593Smuzhiyun
2619*4882a593Smuzhiyun skb_trim(skb, skb->len - IEEE80211_CCMP_MIC_LEN);
2620*4882a593Smuzhiyun memmove(orig_hdr + IEEE80211_CCMP_HDR_LEN, orig_hdr, head_len + hdr_len);
2621*4882a593Smuzhiyun skb_pull(skb, IEEE80211_CCMP_HDR_LEN);
2622*4882a593Smuzhiyun return 0;
2623*4882a593Smuzhiyun }
2624*4882a593Smuzhiyun
ath10k_htt_rx_frag_wep_decap(struct sk_buff * skb,u16 head_len,u16 hdr_len)2625*4882a593Smuzhiyun static int ath10k_htt_rx_frag_wep_decap(struct sk_buff *skb,
2626*4882a593Smuzhiyun u16 head_len,
2627*4882a593Smuzhiyun u16 hdr_len)
2628*4882a593Smuzhiyun {
2629*4882a593Smuzhiyun u8 *orig_hdr;
2630*4882a593Smuzhiyun
2631*4882a593Smuzhiyun orig_hdr = skb->data;
2632*4882a593Smuzhiyun
2633*4882a593Smuzhiyun memmove(orig_hdr + IEEE80211_WEP_IV_LEN,
2634*4882a593Smuzhiyun orig_hdr, head_len + hdr_len);
2635*4882a593Smuzhiyun skb_pull(skb, IEEE80211_WEP_IV_LEN);
2636*4882a593Smuzhiyun skb_trim(skb, skb->len - IEEE80211_WEP_ICV_LEN);
2637*4882a593Smuzhiyun return 0;
2638*4882a593Smuzhiyun }
2639*4882a593Smuzhiyun
ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt * htt,struct htt_rx_fragment_indication * rx,struct sk_buff * skb)2640*4882a593Smuzhiyun static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt,
2641*4882a593Smuzhiyun struct htt_rx_fragment_indication *rx,
2642*4882a593Smuzhiyun struct sk_buff *skb)
2643*4882a593Smuzhiyun {
2644*4882a593Smuzhiyun struct ath10k *ar = htt->ar;
2645*4882a593Smuzhiyun enum htt_rx_tkip_demic_type tkip_mic = HTT_RX_NON_TKIP_MIC;
2646*4882a593Smuzhiyun enum htt_txrx_sec_cast_type sec_index;
2647*4882a593Smuzhiyun struct htt_rx_indication_hl *rx_hl;
2648*4882a593Smuzhiyun enum htt_security_types sec_type;
2649*4882a593Smuzhiyun u32 tid, frag, seq, rx_desc_info;
2650*4882a593Smuzhiyun union htt_rx_pn_t new_pn = {0};
2651*4882a593Smuzhiyun struct htt_hl_rx_desc *rx_desc;
2652*4882a593Smuzhiyun u16 peer_id, sc, hdr_space;
2653*4882a593Smuzhiyun union htt_rx_pn_t *last_pn;
2654*4882a593Smuzhiyun struct ieee80211_hdr *hdr;
2655*4882a593Smuzhiyun int ret, num_mpdu_ranges;
2656*4882a593Smuzhiyun struct ath10k_peer *peer;
2657*4882a593Smuzhiyun struct htt_resp *resp;
2658*4882a593Smuzhiyun size_t tot_hdr_len;
2659*4882a593Smuzhiyun
2660*4882a593Smuzhiyun resp = (struct htt_resp *)(skb->data + HTT_RX_FRAG_IND_INFO0_HEADER_LEN);
2661*4882a593Smuzhiyun skb_pull(skb, HTT_RX_FRAG_IND_INFO0_HEADER_LEN);
2662*4882a593Smuzhiyun skb_trim(skb, skb->len - FCS_LEN);
2663*4882a593Smuzhiyun
2664*4882a593Smuzhiyun peer_id = __le16_to_cpu(rx->peer_id);
2665*4882a593Smuzhiyun rx_hl = (struct htt_rx_indication_hl *)(&resp->rx_ind_hl);
2666*4882a593Smuzhiyun
2667*4882a593Smuzhiyun spin_lock_bh(&ar->data_lock);
2668*4882a593Smuzhiyun peer = ath10k_peer_find_by_id(ar, peer_id);
2669*4882a593Smuzhiyun if (!peer) {
2670*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer: %u\n", peer_id);
2671*4882a593Smuzhiyun goto err;
2672*4882a593Smuzhiyun }
2673*4882a593Smuzhiyun
2674*4882a593Smuzhiyun num_mpdu_ranges = MS(__le32_to_cpu(rx_hl->hdr.info1),
2675*4882a593Smuzhiyun HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
2676*4882a593Smuzhiyun
2677*4882a593Smuzhiyun tot_hdr_len = sizeof(struct htt_resp_hdr) +
2678*4882a593Smuzhiyun sizeof(rx_hl->hdr) +
2679*4882a593Smuzhiyun sizeof(rx_hl->ppdu) +
2680*4882a593Smuzhiyun sizeof(rx_hl->prefix) +
2681*4882a593Smuzhiyun sizeof(rx_hl->fw_desc) +
2682*4882a593Smuzhiyun sizeof(struct htt_rx_indication_mpdu_range) * num_mpdu_ranges;
2683*4882a593Smuzhiyun
2684*4882a593Smuzhiyun tid = MS(rx_hl->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
2685*4882a593Smuzhiyun rx_desc = (struct htt_hl_rx_desc *)(skb->data + tot_hdr_len);
2686*4882a593Smuzhiyun rx_desc_info = __le32_to_cpu(rx_desc->info);
2687*4882a593Smuzhiyun
2688*4882a593Smuzhiyun hdr = (struct ieee80211_hdr *)((u8 *)rx_desc + rx_hl->fw_desc.len);
2689*4882a593Smuzhiyun
2690*4882a593Smuzhiyun if (is_multicast_ether_addr(hdr->addr1)) {
2691*4882a593Smuzhiyun /* Discard the fragment with multicast DA */
2692*4882a593Smuzhiyun goto err;
2693*4882a593Smuzhiyun }
2694*4882a593Smuzhiyun
2695*4882a593Smuzhiyun if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED)) {
2696*4882a593Smuzhiyun spin_unlock_bh(&ar->data_lock);
2697*4882a593Smuzhiyun return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
2698*4882a593Smuzhiyun HTT_RX_NON_PN_CHECK,
2699*4882a593Smuzhiyun HTT_RX_NON_TKIP_MIC);
2700*4882a593Smuzhiyun }
2701*4882a593Smuzhiyun
2702*4882a593Smuzhiyun if (ieee80211_has_retry(hdr->frame_control))
2703*4882a593Smuzhiyun goto err;
2704*4882a593Smuzhiyun
2705*4882a593Smuzhiyun hdr_space = ieee80211_hdrlen(hdr->frame_control);
2706*4882a593Smuzhiyun sc = __le16_to_cpu(hdr->seq_ctrl);
2707*4882a593Smuzhiyun seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
2708*4882a593Smuzhiyun frag = sc & IEEE80211_SCTL_FRAG;
2709*4882a593Smuzhiyun
2710*4882a593Smuzhiyun sec_index = MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST) ?
2711*4882a593Smuzhiyun HTT_TXRX_SEC_MCAST : HTT_TXRX_SEC_UCAST;
2712*4882a593Smuzhiyun sec_type = peer->rx_pn[sec_index].sec_type;
2713*4882a593Smuzhiyun ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
2714*4882a593Smuzhiyun
2715*4882a593Smuzhiyun switch (sec_type) {
2716*4882a593Smuzhiyun case HTT_SECURITY_TKIP:
2717*4882a593Smuzhiyun tkip_mic = HTT_RX_TKIP_MIC;
2718*4882a593Smuzhiyun ret = ath10k_htt_rx_frag_tkip_decap_withmic(skb,
2719*4882a593Smuzhiyun tot_hdr_len +
2720*4882a593Smuzhiyun rx_hl->fw_desc.len,
2721*4882a593Smuzhiyun hdr_space);
2722*4882a593Smuzhiyun if (ret)
2723*4882a593Smuzhiyun goto err;
2724*4882a593Smuzhiyun break;
2725*4882a593Smuzhiyun case HTT_SECURITY_TKIP_NOMIC:
2726*4882a593Smuzhiyun ret = ath10k_htt_rx_frag_tkip_decap_nomic(skb,
2727*4882a593Smuzhiyun tot_hdr_len +
2728*4882a593Smuzhiyun rx_hl->fw_desc.len,
2729*4882a593Smuzhiyun hdr_space);
2730*4882a593Smuzhiyun if (ret)
2731*4882a593Smuzhiyun goto err;
2732*4882a593Smuzhiyun break;
2733*4882a593Smuzhiyun case HTT_SECURITY_AES_CCMP:
2734*4882a593Smuzhiyun ret = ath10k_htt_rx_frag_ccmp_decap(skb,
2735*4882a593Smuzhiyun tot_hdr_len + rx_hl->fw_desc.len,
2736*4882a593Smuzhiyun hdr_space);
2737*4882a593Smuzhiyun if (ret)
2738*4882a593Smuzhiyun goto err;
2739*4882a593Smuzhiyun break;
2740*4882a593Smuzhiyun case HTT_SECURITY_WEP128:
2741*4882a593Smuzhiyun case HTT_SECURITY_WEP104:
2742*4882a593Smuzhiyun case HTT_SECURITY_WEP40:
2743*4882a593Smuzhiyun ret = ath10k_htt_rx_frag_wep_decap(skb,
2744*4882a593Smuzhiyun tot_hdr_len + rx_hl->fw_desc.len,
2745*4882a593Smuzhiyun hdr_space);
2746*4882a593Smuzhiyun if (ret)
2747*4882a593Smuzhiyun goto err;
2748*4882a593Smuzhiyun break;
2749*4882a593Smuzhiyun default:
2750*4882a593Smuzhiyun break;
2751*4882a593Smuzhiyun }
2752*4882a593Smuzhiyun
2753*4882a593Smuzhiyun resp = (struct htt_resp *)(skb->data);
2754*4882a593Smuzhiyun
2755*4882a593Smuzhiyun if (sec_type != HTT_SECURITY_AES_CCMP &&
2756*4882a593Smuzhiyun sec_type != HTT_SECURITY_TKIP &&
2757*4882a593Smuzhiyun sec_type != HTT_SECURITY_TKIP_NOMIC) {
2758*4882a593Smuzhiyun spin_unlock_bh(&ar->data_lock);
2759*4882a593Smuzhiyun return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
2760*4882a593Smuzhiyun HTT_RX_NON_PN_CHECK,
2761*4882a593Smuzhiyun HTT_RX_NON_TKIP_MIC);
2762*4882a593Smuzhiyun }
2763*4882a593Smuzhiyun
2764*4882a593Smuzhiyun last_pn = &peer->frag_tids_last_pn[tid];
2765*4882a593Smuzhiyun
2766*4882a593Smuzhiyun if (frag == 0) {
2767*4882a593Smuzhiyun if (ath10k_htt_rx_pn_check_replay_hl(ar, peer, &resp->rx_ind_hl))
2768*4882a593Smuzhiyun goto err;
2769*4882a593Smuzhiyun
2770*4882a593Smuzhiyun last_pn->pn48 = new_pn.pn48;
2771*4882a593Smuzhiyun peer->frag_tids_seq[tid] = seq;
2772*4882a593Smuzhiyun } else if (sec_type == HTT_SECURITY_AES_CCMP) {
2773*4882a593Smuzhiyun if (seq != peer->frag_tids_seq[tid])
2774*4882a593Smuzhiyun goto err;
2775*4882a593Smuzhiyun
2776*4882a593Smuzhiyun if (new_pn.pn48 != last_pn->pn48 + 1)
2777*4882a593Smuzhiyun goto err;
2778*4882a593Smuzhiyun
2779*4882a593Smuzhiyun last_pn->pn48 = new_pn.pn48;
2780*4882a593Smuzhiyun last_pn = &peer->tids_last_pn[tid];
2781*4882a593Smuzhiyun last_pn->pn48 = new_pn.pn48;
2782*4882a593Smuzhiyun }
2783*4882a593Smuzhiyun
2784*4882a593Smuzhiyun spin_unlock_bh(&ar->data_lock);
2785*4882a593Smuzhiyun
2786*4882a593Smuzhiyun return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
2787*4882a593Smuzhiyun HTT_RX_NON_PN_CHECK, tkip_mic);
2788*4882a593Smuzhiyun
2789*4882a593Smuzhiyun err:
2790*4882a593Smuzhiyun spin_unlock_bh(&ar->data_lock);
2791*4882a593Smuzhiyun
2792*4882a593Smuzhiyun /* Tell the caller that it must free the skb since we have not
2793*4882a593Smuzhiyun * consumed it
2794*4882a593Smuzhiyun */
2795*4882a593Smuzhiyun return true;
2796*4882a593Smuzhiyun }
2797*4882a593Smuzhiyun
ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt * htt,struct htt_rx_indication * rx)2798*4882a593Smuzhiyun static void ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt *htt,
2799*4882a593Smuzhiyun struct htt_rx_indication *rx)
2800*4882a593Smuzhiyun {
2801*4882a593Smuzhiyun struct ath10k *ar = htt->ar;
2802*4882a593Smuzhiyun struct htt_rx_indication_mpdu_range *mpdu_ranges;
2803*4882a593Smuzhiyun int num_mpdu_ranges;
2804*4882a593Smuzhiyun int i, mpdu_count = 0;
2805*4882a593Smuzhiyun u16 peer_id;
2806*4882a593Smuzhiyun u8 tid;
2807*4882a593Smuzhiyun
2808*4882a593Smuzhiyun num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
2809*4882a593Smuzhiyun HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
2810*4882a593Smuzhiyun peer_id = __le16_to_cpu(rx->hdr.peer_id);
2811*4882a593Smuzhiyun tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
2812*4882a593Smuzhiyun
2813*4882a593Smuzhiyun mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
2814*4882a593Smuzhiyun
2815*4882a593Smuzhiyun ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
2816*4882a593Smuzhiyun rx, struct_size(rx, mpdu_ranges, num_mpdu_ranges));
2817*4882a593Smuzhiyun
2818*4882a593Smuzhiyun for (i = 0; i < num_mpdu_ranges; i++)
2819*4882a593Smuzhiyun mpdu_count += mpdu_ranges[i].mpdu_count;
2820*4882a593Smuzhiyun
2821*4882a593Smuzhiyun atomic_add(mpdu_count, &htt->num_mpdus_ready);
2822*4882a593Smuzhiyun
2823*4882a593Smuzhiyun ath10k_sta_update_rx_tid_stats_ampdu(ar, peer_id, tid, mpdu_ranges,
2824*4882a593Smuzhiyun num_mpdu_ranges);
2825*4882a593Smuzhiyun }
2826*4882a593Smuzhiyun
ath10k_htt_rx_tx_compl_ind(struct ath10k * ar,struct sk_buff * skb)2827*4882a593Smuzhiyun static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
2828*4882a593Smuzhiyun struct sk_buff *skb)
2829*4882a593Smuzhiyun {
2830*4882a593Smuzhiyun struct ath10k_htt *htt = &ar->htt;
2831*4882a593Smuzhiyun struct htt_resp *resp = (struct htt_resp *)skb->data;
2832*4882a593Smuzhiyun struct htt_tx_done tx_done = {};
2833*4882a593Smuzhiyun int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
2834*4882a593Smuzhiyun __le16 msdu_id, *msdus;
2835*4882a593Smuzhiyun bool rssi_enabled = false;
2836*4882a593Smuzhiyun u8 msdu_count = 0, num_airtime_records, tid;
2837*4882a593Smuzhiyun int i, htt_pad = 0;
2838*4882a593Smuzhiyun struct htt_data_tx_compl_ppdu_dur *ppdu_info;
2839*4882a593Smuzhiyun struct ath10k_peer *peer;
2840*4882a593Smuzhiyun u16 ppdu_info_offset = 0, peer_id;
2841*4882a593Smuzhiyun u32 tx_duration;
2842*4882a593Smuzhiyun
2843*4882a593Smuzhiyun switch (status) {
2844*4882a593Smuzhiyun case HTT_DATA_TX_STATUS_NO_ACK:
2845*4882a593Smuzhiyun tx_done.status = HTT_TX_COMPL_STATE_NOACK;
2846*4882a593Smuzhiyun break;
2847*4882a593Smuzhiyun case HTT_DATA_TX_STATUS_OK:
2848*4882a593Smuzhiyun tx_done.status = HTT_TX_COMPL_STATE_ACK;
2849*4882a593Smuzhiyun break;
2850*4882a593Smuzhiyun case HTT_DATA_TX_STATUS_DISCARD:
2851*4882a593Smuzhiyun case HTT_DATA_TX_STATUS_POSTPONE:
2852*4882a593Smuzhiyun case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
2853*4882a593Smuzhiyun tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
2854*4882a593Smuzhiyun break;
2855*4882a593Smuzhiyun default:
2856*4882a593Smuzhiyun ath10k_warn(ar, "unhandled tx completion status %d\n", status);
2857*4882a593Smuzhiyun tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
2858*4882a593Smuzhiyun break;
2859*4882a593Smuzhiyun }
2860*4882a593Smuzhiyun
2861*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
2862*4882a593Smuzhiyun resp->data_tx_completion.num_msdus);
2863*4882a593Smuzhiyun
2864*4882a593Smuzhiyun msdu_count = resp->data_tx_completion.num_msdus;
2865*4882a593Smuzhiyun msdus = resp->data_tx_completion.msdus;
2866*4882a593Smuzhiyun rssi_enabled = ath10k_is_rssi_enable(&ar->hw_params, resp);
2867*4882a593Smuzhiyun
2868*4882a593Smuzhiyun if (rssi_enabled)
2869*4882a593Smuzhiyun htt_pad = ath10k_tx_data_rssi_get_pad_bytes(&ar->hw_params,
2870*4882a593Smuzhiyun resp);
2871*4882a593Smuzhiyun
2872*4882a593Smuzhiyun for (i = 0; i < msdu_count; i++) {
2873*4882a593Smuzhiyun msdu_id = msdus[i];
2874*4882a593Smuzhiyun tx_done.msdu_id = __le16_to_cpu(msdu_id);
2875*4882a593Smuzhiyun
2876*4882a593Smuzhiyun if (rssi_enabled) {
2877*4882a593Smuzhiyun /* Total no of MSDUs should be even,
2878*4882a593Smuzhiyun * if odd MSDUs are sent firmware fills
2879*4882a593Smuzhiyun * last msdu id with 0xffff
2880*4882a593Smuzhiyun */
2881*4882a593Smuzhiyun if (msdu_count & 0x01) {
2882*4882a593Smuzhiyun msdu_id = msdus[msdu_count + i + 1 + htt_pad];
2883*4882a593Smuzhiyun tx_done.ack_rssi = __le16_to_cpu(msdu_id);
2884*4882a593Smuzhiyun } else {
2885*4882a593Smuzhiyun msdu_id = msdus[msdu_count + i + htt_pad];
2886*4882a593Smuzhiyun tx_done.ack_rssi = __le16_to_cpu(msdu_id);
2887*4882a593Smuzhiyun }
2888*4882a593Smuzhiyun }
2889*4882a593Smuzhiyun
2890*4882a593Smuzhiyun /* kfifo_put: In practice firmware shouldn't fire off per-CE
2891*4882a593Smuzhiyun * interrupt and main interrupt (MSI/-X range case) for the same
2892*4882a593Smuzhiyun * HTC service so it should be safe to use kfifo_put w/o lock.
2893*4882a593Smuzhiyun *
2894*4882a593Smuzhiyun * From kfifo_put() documentation:
2895*4882a593Smuzhiyun * Note that with only one concurrent reader and one concurrent
2896*4882a593Smuzhiyun * writer, you don't need extra locking to use these macro.
2897*4882a593Smuzhiyun */
2898*4882a593Smuzhiyun if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) {
2899*4882a593Smuzhiyun ath10k_txrx_tx_unref(htt, &tx_done);
2900*4882a593Smuzhiyun } else if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
2901*4882a593Smuzhiyun ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
2902*4882a593Smuzhiyun tx_done.msdu_id, tx_done.status);
2903*4882a593Smuzhiyun ath10k_txrx_tx_unref(htt, &tx_done);
2904*4882a593Smuzhiyun }
2905*4882a593Smuzhiyun }
2906*4882a593Smuzhiyun
2907*4882a593Smuzhiyun if (!(resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT))
2908*4882a593Smuzhiyun return;
2909*4882a593Smuzhiyun
2910*4882a593Smuzhiyun ppdu_info_offset = (msdu_count & 0x01) ? msdu_count + 1 : msdu_count;
2911*4882a593Smuzhiyun
2912*4882a593Smuzhiyun if (rssi_enabled)
2913*4882a593Smuzhiyun ppdu_info_offset += ppdu_info_offset;
2914*4882a593Smuzhiyun
2915*4882a593Smuzhiyun if (resp->data_tx_completion.flags2 &
2916*4882a593Smuzhiyun (HTT_TX_CMPL_FLAG_PPID_PRESENT | HTT_TX_CMPL_FLAG_PA_PRESENT))
2917*4882a593Smuzhiyun ppdu_info_offset += 2;
2918*4882a593Smuzhiyun
2919*4882a593Smuzhiyun ppdu_info = (struct htt_data_tx_compl_ppdu_dur *)&msdus[ppdu_info_offset];
2920*4882a593Smuzhiyun num_airtime_records = FIELD_GET(HTT_TX_COMPL_PPDU_DUR_INFO0_NUM_ENTRIES_MASK,
2921*4882a593Smuzhiyun __le32_to_cpu(ppdu_info->info0));
2922*4882a593Smuzhiyun
2923*4882a593Smuzhiyun for (i = 0; i < num_airtime_records; i++) {
2924*4882a593Smuzhiyun struct htt_data_tx_ppdu_dur *ppdu_dur;
2925*4882a593Smuzhiyun u32 info0;
2926*4882a593Smuzhiyun
2927*4882a593Smuzhiyun ppdu_dur = &ppdu_info->ppdu_dur[i];
2928*4882a593Smuzhiyun info0 = __le32_to_cpu(ppdu_dur->info0);
2929*4882a593Smuzhiyun
2930*4882a593Smuzhiyun peer_id = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK,
2931*4882a593Smuzhiyun info0);
2932*4882a593Smuzhiyun rcu_read_lock();
2933*4882a593Smuzhiyun spin_lock_bh(&ar->data_lock);
2934*4882a593Smuzhiyun
2935*4882a593Smuzhiyun peer = ath10k_peer_find_by_id(ar, peer_id);
2936*4882a593Smuzhiyun if (!peer || !peer->sta) {
2937*4882a593Smuzhiyun spin_unlock_bh(&ar->data_lock);
2938*4882a593Smuzhiyun rcu_read_unlock();
2939*4882a593Smuzhiyun continue;
2940*4882a593Smuzhiyun }
2941*4882a593Smuzhiyun
2942*4882a593Smuzhiyun tid = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_TID_MASK, info0) &
2943*4882a593Smuzhiyun IEEE80211_QOS_CTL_TID_MASK;
2944*4882a593Smuzhiyun tx_duration = __le32_to_cpu(ppdu_dur->tx_duration);
2945*4882a593Smuzhiyun
2946*4882a593Smuzhiyun ieee80211_sta_register_airtime(peer->sta, tid, tx_duration, 0);
2947*4882a593Smuzhiyun
2948*4882a593Smuzhiyun spin_unlock_bh(&ar->data_lock);
2949*4882a593Smuzhiyun rcu_read_unlock();
2950*4882a593Smuzhiyun }
2951*4882a593Smuzhiyun }
2952*4882a593Smuzhiyun
ath10k_htt_rx_addba(struct ath10k * ar,struct htt_resp * resp)2953*4882a593Smuzhiyun static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
2954*4882a593Smuzhiyun {
2955*4882a593Smuzhiyun struct htt_rx_addba *ev = &resp->rx_addba;
2956*4882a593Smuzhiyun struct ath10k_peer *peer;
2957*4882a593Smuzhiyun struct ath10k_vif *arvif;
2958*4882a593Smuzhiyun u16 info0, tid, peer_id;
2959*4882a593Smuzhiyun
2960*4882a593Smuzhiyun info0 = __le16_to_cpu(ev->info0);
2961*4882a593Smuzhiyun tid = MS(info0, HTT_RX_BA_INFO0_TID);
2962*4882a593Smuzhiyun peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
2963*4882a593Smuzhiyun
2964*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_HTT,
2965*4882a593Smuzhiyun "htt rx addba tid %hu peer_id %hu size %hhu\n",
2966*4882a593Smuzhiyun tid, peer_id, ev->window_size);
2967*4882a593Smuzhiyun
2968*4882a593Smuzhiyun spin_lock_bh(&ar->data_lock);
2969*4882a593Smuzhiyun peer = ath10k_peer_find_by_id(ar, peer_id);
2970*4882a593Smuzhiyun if (!peer) {
2971*4882a593Smuzhiyun ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
2972*4882a593Smuzhiyun peer_id);
2973*4882a593Smuzhiyun spin_unlock_bh(&ar->data_lock);
2974*4882a593Smuzhiyun return;
2975*4882a593Smuzhiyun }
2976*4882a593Smuzhiyun
2977*4882a593Smuzhiyun arvif = ath10k_get_arvif(ar, peer->vdev_id);
2978*4882a593Smuzhiyun if (!arvif) {
2979*4882a593Smuzhiyun ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
2980*4882a593Smuzhiyun peer->vdev_id);
2981*4882a593Smuzhiyun spin_unlock_bh(&ar->data_lock);
2982*4882a593Smuzhiyun return;
2983*4882a593Smuzhiyun }
2984*4882a593Smuzhiyun
2985*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_HTT,
2986*4882a593Smuzhiyun "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
2987*4882a593Smuzhiyun peer->addr, tid, ev->window_size);
2988*4882a593Smuzhiyun
2989*4882a593Smuzhiyun ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
2990*4882a593Smuzhiyun spin_unlock_bh(&ar->data_lock);
2991*4882a593Smuzhiyun }
2992*4882a593Smuzhiyun
ath10k_htt_rx_delba(struct ath10k * ar,struct htt_resp * resp)2993*4882a593Smuzhiyun static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
2994*4882a593Smuzhiyun {
2995*4882a593Smuzhiyun struct htt_rx_delba *ev = &resp->rx_delba;
2996*4882a593Smuzhiyun struct ath10k_peer *peer;
2997*4882a593Smuzhiyun struct ath10k_vif *arvif;
2998*4882a593Smuzhiyun u16 info0, tid, peer_id;
2999*4882a593Smuzhiyun
3000*4882a593Smuzhiyun info0 = __le16_to_cpu(ev->info0);
3001*4882a593Smuzhiyun tid = MS(info0, HTT_RX_BA_INFO0_TID);
3002*4882a593Smuzhiyun peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
3003*4882a593Smuzhiyun
3004*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_HTT,
3005*4882a593Smuzhiyun "htt rx delba tid %hu peer_id %hu\n",
3006*4882a593Smuzhiyun tid, peer_id);
3007*4882a593Smuzhiyun
3008*4882a593Smuzhiyun spin_lock_bh(&ar->data_lock);
3009*4882a593Smuzhiyun peer = ath10k_peer_find_by_id(ar, peer_id);
3010*4882a593Smuzhiyun if (!peer) {
3011*4882a593Smuzhiyun ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
3012*4882a593Smuzhiyun peer_id);
3013*4882a593Smuzhiyun spin_unlock_bh(&ar->data_lock);
3014*4882a593Smuzhiyun return;
3015*4882a593Smuzhiyun }
3016*4882a593Smuzhiyun
3017*4882a593Smuzhiyun arvif = ath10k_get_arvif(ar, peer->vdev_id);
3018*4882a593Smuzhiyun if (!arvif) {
3019*4882a593Smuzhiyun ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
3020*4882a593Smuzhiyun peer->vdev_id);
3021*4882a593Smuzhiyun spin_unlock_bh(&ar->data_lock);
3022*4882a593Smuzhiyun return;
3023*4882a593Smuzhiyun }
3024*4882a593Smuzhiyun
3025*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_HTT,
3026*4882a593Smuzhiyun "htt rx stop rx ba session sta %pM tid %hu\n",
3027*4882a593Smuzhiyun peer->addr, tid);
3028*4882a593Smuzhiyun
3029*4882a593Smuzhiyun ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
3030*4882a593Smuzhiyun spin_unlock_bh(&ar->data_lock);
3031*4882a593Smuzhiyun }
3032*4882a593Smuzhiyun
ath10k_htt_rx_extract_amsdu(struct sk_buff_head * list,struct sk_buff_head * amsdu)3033*4882a593Smuzhiyun static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
3034*4882a593Smuzhiyun struct sk_buff_head *amsdu)
3035*4882a593Smuzhiyun {
3036*4882a593Smuzhiyun struct sk_buff *msdu;
3037*4882a593Smuzhiyun struct htt_rx_desc *rxd;
3038*4882a593Smuzhiyun
3039*4882a593Smuzhiyun if (skb_queue_empty(list))
3040*4882a593Smuzhiyun return -ENOBUFS;
3041*4882a593Smuzhiyun
3042*4882a593Smuzhiyun if (WARN_ON(!skb_queue_empty(amsdu)))
3043*4882a593Smuzhiyun return -EINVAL;
3044*4882a593Smuzhiyun
3045*4882a593Smuzhiyun while ((msdu = __skb_dequeue(list))) {
3046*4882a593Smuzhiyun __skb_queue_tail(amsdu, msdu);
3047*4882a593Smuzhiyun
3048*4882a593Smuzhiyun rxd = (void *)msdu->data - sizeof(*rxd);
3049*4882a593Smuzhiyun if (rxd->msdu_end.common.info0 &
3050*4882a593Smuzhiyun __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
3051*4882a593Smuzhiyun break;
3052*4882a593Smuzhiyun }
3053*4882a593Smuzhiyun
3054*4882a593Smuzhiyun msdu = skb_peek_tail(amsdu);
3055*4882a593Smuzhiyun rxd = (void *)msdu->data - sizeof(*rxd);
3056*4882a593Smuzhiyun if (!(rxd->msdu_end.common.info0 &
3057*4882a593Smuzhiyun __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
3058*4882a593Smuzhiyun skb_queue_splice_init(amsdu, list);
3059*4882a593Smuzhiyun return -EAGAIN;
3060*4882a593Smuzhiyun }
3061*4882a593Smuzhiyun
3062*4882a593Smuzhiyun return 0;
3063*4882a593Smuzhiyun }
3064*4882a593Smuzhiyun
ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status * status,struct sk_buff * skb)3065*4882a593Smuzhiyun static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
3066*4882a593Smuzhiyun struct sk_buff *skb)
3067*4882a593Smuzhiyun {
3068*4882a593Smuzhiyun struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
3069*4882a593Smuzhiyun
3070*4882a593Smuzhiyun if (!ieee80211_has_protected(hdr->frame_control))
3071*4882a593Smuzhiyun return;
3072*4882a593Smuzhiyun
3073*4882a593Smuzhiyun /* Offloaded frames are already decrypted but firmware insists they are
3074*4882a593Smuzhiyun * protected in the 802.11 header. Strip the flag. Otherwise mac80211
3075*4882a593Smuzhiyun * will drop the frame.
3076*4882a593Smuzhiyun */
3077*4882a593Smuzhiyun
3078*4882a593Smuzhiyun hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
3079*4882a593Smuzhiyun status->flag |= RX_FLAG_DECRYPTED |
3080*4882a593Smuzhiyun RX_FLAG_IV_STRIPPED |
3081*4882a593Smuzhiyun RX_FLAG_MMIC_STRIPPED;
3082*4882a593Smuzhiyun }
3083*4882a593Smuzhiyun
ath10k_htt_rx_h_rx_offload(struct ath10k * ar,struct sk_buff_head * list)3084*4882a593Smuzhiyun static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
3085*4882a593Smuzhiyun struct sk_buff_head *list)
3086*4882a593Smuzhiyun {
3087*4882a593Smuzhiyun struct ath10k_htt *htt = &ar->htt;
3088*4882a593Smuzhiyun struct ieee80211_rx_status *status = &htt->rx_status;
3089*4882a593Smuzhiyun struct htt_rx_offload_msdu *rx;
3090*4882a593Smuzhiyun struct sk_buff *msdu;
3091*4882a593Smuzhiyun size_t offset;
3092*4882a593Smuzhiyun
3093*4882a593Smuzhiyun while ((msdu = __skb_dequeue(list))) {
3094*4882a593Smuzhiyun /* Offloaded frames don't have Rx descriptor. Instead they have
3095*4882a593Smuzhiyun * a short meta information header.
3096*4882a593Smuzhiyun */
3097*4882a593Smuzhiyun
3098*4882a593Smuzhiyun rx = (void *)msdu->data;
3099*4882a593Smuzhiyun
3100*4882a593Smuzhiyun skb_put(msdu, sizeof(*rx));
3101*4882a593Smuzhiyun skb_pull(msdu, sizeof(*rx));
3102*4882a593Smuzhiyun
3103*4882a593Smuzhiyun if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
3104*4882a593Smuzhiyun ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
3105*4882a593Smuzhiyun dev_kfree_skb_any(msdu);
3106*4882a593Smuzhiyun continue;
3107*4882a593Smuzhiyun }
3108*4882a593Smuzhiyun
3109*4882a593Smuzhiyun skb_put(msdu, __le16_to_cpu(rx->msdu_len));
3110*4882a593Smuzhiyun
3111*4882a593Smuzhiyun /* Offloaded rx header length isn't multiple of 2 nor 4 so the
3112*4882a593Smuzhiyun * actual payload is unaligned. Align the frame. Otherwise
3113*4882a593Smuzhiyun * mac80211 complains. This shouldn't reduce performance much
3114*4882a593Smuzhiyun * because these offloaded frames are rare.
3115*4882a593Smuzhiyun */
3116*4882a593Smuzhiyun offset = 4 - ((unsigned long)msdu->data & 3);
3117*4882a593Smuzhiyun skb_put(msdu, offset);
3118*4882a593Smuzhiyun memmove(msdu->data + offset, msdu->data, msdu->len);
3119*4882a593Smuzhiyun skb_pull(msdu, offset);
3120*4882a593Smuzhiyun
3121*4882a593Smuzhiyun /* FIXME: The frame is NWifi. Re-construct QoS Control
3122*4882a593Smuzhiyun * if possible later.
3123*4882a593Smuzhiyun */
3124*4882a593Smuzhiyun
3125*4882a593Smuzhiyun memset(status, 0, sizeof(*status));
3126*4882a593Smuzhiyun status->flag |= RX_FLAG_NO_SIGNAL_VAL;
3127*4882a593Smuzhiyun
3128*4882a593Smuzhiyun ath10k_htt_rx_h_rx_offload_prot(status, msdu);
3129*4882a593Smuzhiyun ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
3130*4882a593Smuzhiyun ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
3131*4882a593Smuzhiyun }
3132*4882a593Smuzhiyun }
3133*4882a593Smuzhiyun
ath10k_htt_rx_in_ord_ind(struct ath10k * ar,struct sk_buff * skb)3134*4882a593Smuzhiyun static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
3135*4882a593Smuzhiyun {
3136*4882a593Smuzhiyun struct ath10k_htt *htt = &ar->htt;
3137*4882a593Smuzhiyun struct htt_resp *resp = (void *)skb->data;
3138*4882a593Smuzhiyun struct ieee80211_rx_status *status = &htt->rx_status;
3139*4882a593Smuzhiyun struct sk_buff_head list;
3140*4882a593Smuzhiyun struct sk_buff_head amsdu;
3141*4882a593Smuzhiyun u16 peer_id;
3142*4882a593Smuzhiyun u16 msdu_count;
3143*4882a593Smuzhiyun u8 vdev_id;
3144*4882a593Smuzhiyun u8 tid;
3145*4882a593Smuzhiyun bool offload;
3146*4882a593Smuzhiyun bool frag;
3147*4882a593Smuzhiyun int ret;
3148*4882a593Smuzhiyun
3149*4882a593Smuzhiyun lockdep_assert_held(&htt->rx_ring.lock);
3150*4882a593Smuzhiyun
3151*4882a593Smuzhiyun if (htt->rx_confused)
3152*4882a593Smuzhiyun return -EIO;
3153*4882a593Smuzhiyun
3154*4882a593Smuzhiyun skb_pull(skb, sizeof(resp->hdr));
3155*4882a593Smuzhiyun skb_pull(skb, sizeof(resp->rx_in_ord_ind));
3156*4882a593Smuzhiyun
3157*4882a593Smuzhiyun peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
3158*4882a593Smuzhiyun msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
3159*4882a593Smuzhiyun vdev_id = resp->rx_in_ord_ind.vdev_id;
3160*4882a593Smuzhiyun tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
3161*4882a593Smuzhiyun offload = !!(resp->rx_in_ord_ind.info &
3162*4882a593Smuzhiyun HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
3163*4882a593Smuzhiyun frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
3164*4882a593Smuzhiyun
3165*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_HTT,
3166*4882a593Smuzhiyun "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
3167*4882a593Smuzhiyun vdev_id, peer_id, tid, offload, frag, msdu_count);
3168*4882a593Smuzhiyun
3169*4882a593Smuzhiyun if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) {
3170*4882a593Smuzhiyun ath10k_warn(ar, "dropping invalid in order rx indication\n");
3171*4882a593Smuzhiyun return -EINVAL;
3172*4882a593Smuzhiyun }
3173*4882a593Smuzhiyun
3174*4882a593Smuzhiyun /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
3175*4882a593Smuzhiyun * extracted and processed.
3176*4882a593Smuzhiyun */
3177*4882a593Smuzhiyun __skb_queue_head_init(&list);
3178*4882a593Smuzhiyun if (ar->hw_params.target_64bit)
3179*4882a593Smuzhiyun ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind,
3180*4882a593Smuzhiyun &list);
3181*4882a593Smuzhiyun else
3182*4882a593Smuzhiyun ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind,
3183*4882a593Smuzhiyun &list);
3184*4882a593Smuzhiyun
3185*4882a593Smuzhiyun if (ret < 0) {
3186*4882a593Smuzhiyun ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
3187*4882a593Smuzhiyun htt->rx_confused = true;
3188*4882a593Smuzhiyun return -EIO;
3189*4882a593Smuzhiyun }
3190*4882a593Smuzhiyun
3191*4882a593Smuzhiyun /* Offloaded frames are very different and need to be handled
3192*4882a593Smuzhiyun * separately.
3193*4882a593Smuzhiyun */
3194*4882a593Smuzhiyun if (offload)
3195*4882a593Smuzhiyun ath10k_htt_rx_h_rx_offload(ar, &list);
3196*4882a593Smuzhiyun
3197*4882a593Smuzhiyun while (!skb_queue_empty(&list)) {
3198*4882a593Smuzhiyun __skb_queue_head_init(&amsdu);
3199*4882a593Smuzhiyun ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
3200*4882a593Smuzhiyun switch (ret) {
3201*4882a593Smuzhiyun case 0:
3202*4882a593Smuzhiyun /* Note: The in-order indication may report interleaved
3203*4882a593Smuzhiyun * frames from different PPDUs meaning reported rx rate
3204*4882a593Smuzhiyun * to mac80211 isn't accurate/reliable. It's still
3205*4882a593Smuzhiyun * better to report something than nothing though. This
3206*4882a593Smuzhiyun * should still give an idea about rx rate to the user.
3207*4882a593Smuzhiyun */
3208*4882a593Smuzhiyun ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
3209*4882a593Smuzhiyun ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL);
3210*4882a593Smuzhiyun ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL,
3211*4882a593Smuzhiyun NULL, peer_id, frag);
3212*4882a593Smuzhiyun ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
3213*4882a593Smuzhiyun break;
3214*4882a593Smuzhiyun case -EAGAIN:
3215*4882a593Smuzhiyun fallthrough;
3216*4882a593Smuzhiyun default:
3217*4882a593Smuzhiyun /* Should not happen. */
3218*4882a593Smuzhiyun ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
3219*4882a593Smuzhiyun htt->rx_confused = true;
3220*4882a593Smuzhiyun __skb_queue_purge(&list);
3221*4882a593Smuzhiyun return -EIO;
3222*4882a593Smuzhiyun }
3223*4882a593Smuzhiyun }
3224*4882a593Smuzhiyun return ret;
3225*4882a593Smuzhiyun }
3226*4882a593Smuzhiyun
ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k * ar,const __le32 * resp_ids,int num_resp_ids)3227*4882a593Smuzhiyun static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
3228*4882a593Smuzhiyun const __le32 *resp_ids,
3229*4882a593Smuzhiyun int num_resp_ids)
3230*4882a593Smuzhiyun {
3231*4882a593Smuzhiyun int i;
3232*4882a593Smuzhiyun u32 resp_id;
3233*4882a593Smuzhiyun
3234*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
3235*4882a593Smuzhiyun num_resp_ids);
3236*4882a593Smuzhiyun
3237*4882a593Smuzhiyun for (i = 0; i < num_resp_ids; i++) {
3238*4882a593Smuzhiyun resp_id = le32_to_cpu(resp_ids[i]);
3239*4882a593Smuzhiyun
3240*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
3241*4882a593Smuzhiyun resp_id);
3242*4882a593Smuzhiyun
3243*4882a593Smuzhiyun /* TODO: free resp_id */
3244*4882a593Smuzhiyun }
3245*4882a593Smuzhiyun }
3246*4882a593Smuzhiyun
ath10k_htt_rx_tx_fetch_ind(struct ath10k * ar,struct sk_buff * skb)3247*4882a593Smuzhiyun static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
3248*4882a593Smuzhiyun {
3249*4882a593Smuzhiyun struct ieee80211_hw *hw = ar->hw;
3250*4882a593Smuzhiyun struct ieee80211_txq *txq;
3251*4882a593Smuzhiyun struct htt_resp *resp = (struct htt_resp *)skb->data;
3252*4882a593Smuzhiyun struct htt_tx_fetch_record *record;
3253*4882a593Smuzhiyun size_t len;
3254*4882a593Smuzhiyun size_t max_num_bytes;
3255*4882a593Smuzhiyun size_t max_num_msdus;
3256*4882a593Smuzhiyun size_t num_bytes;
3257*4882a593Smuzhiyun size_t num_msdus;
3258*4882a593Smuzhiyun const __le32 *resp_ids;
3259*4882a593Smuzhiyun u16 num_records;
3260*4882a593Smuzhiyun u16 num_resp_ids;
3261*4882a593Smuzhiyun u16 peer_id;
3262*4882a593Smuzhiyun u8 tid;
3263*4882a593Smuzhiyun int ret;
3264*4882a593Smuzhiyun int i;
3265*4882a593Smuzhiyun bool may_tx;
3266*4882a593Smuzhiyun
3267*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
3268*4882a593Smuzhiyun
3269*4882a593Smuzhiyun len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
3270*4882a593Smuzhiyun if (unlikely(skb->len < len)) {
3271*4882a593Smuzhiyun ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
3272*4882a593Smuzhiyun return;
3273*4882a593Smuzhiyun }
3274*4882a593Smuzhiyun
3275*4882a593Smuzhiyun num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
3276*4882a593Smuzhiyun num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
3277*4882a593Smuzhiyun
3278*4882a593Smuzhiyun len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
3279*4882a593Smuzhiyun len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
3280*4882a593Smuzhiyun
3281*4882a593Smuzhiyun if (unlikely(skb->len < len)) {
3282*4882a593Smuzhiyun ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
3283*4882a593Smuzhiyun return;
3284*4882a593Smuzhiyun }
3285*4882a593Smuzhiyun
3286*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
3287*4882a593Smuzhiyun num_records, num_resp_ids,
3288*4882a593Smuzhiyun le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
3289*4882a593Smuzhiyun
3290*4882a593Smuzhiyun if (!ar->htt.tx_q_state.enabled) {
3291*4882a593Smuzhiyun ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
3292*4882a593Smuzhiyun return;
3293*4882a593Smuzhiyun }
3294*4882a593Smuzhiyun
3295*4882a593Smuzhiyun if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
3296*4882a593Smuzhiyun ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
3297*4882a593Smuzhiyun return;
3298*4882a593Smuzhiyun }
3299*4882a593Smuzhiyun
3300*4882a593Smuzhiyun rcu_read_lock();
3301*4882a593Smuzhiyun
3302*4882a593Smuzhiyun for (i = 0; i < num_records; i++) {
3303*4882a593Smuzhiyun record = &resp->tx_fetch_ind.records[i];
3304*4882a593Smuzhiyun peer_id = MS(le16_to_cpu(record->info),
3305*4882a593Smuzhiyun HTT_TX_FETCH_RECORD_INFO_PEER_ID);
3306*4882a593Smuzhiyun tid = MS(le16_to_cpu(record->info),
3307*4882a593Smuzhiyun HTT_TX_FETCH_RECORD_INFO_TID);
3308*4882a593Smuzhiyun max_num_msdus = le16_to_cpu(record->num_msdus);
3309*4882a593Smuzhiyun max_num_bytes = le32_to_cpu(record->num_bytes);
3310*4882a593Smuzhiyun
3311*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
3312*4882a593Smuzhiyun i, peer_id, tid, max_num_msdus, max_num_bytes);
3313*4882a593Smuzhiyun
3314*4882a593Smuzhiyun if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
3315*4882a593Smuzhiyun unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
3316*4882a593Smuzhiyun ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
3317*4882a593Smuzhiyun peer_id, tid);
3318*4882a593Smuzhiyun continue;
3319*4882a593Smuzhiyun }
3320*4882a593Smuzhiyun
3321*4882a593Smuzhiyun spin_lock_bh(&ar->data_lock);
3322*4882a593Smuzhiyun txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
3323*4882a593Smuzhiyun spin_unlock_bh(&ar->data_lock);
3324*4882a593Smuzhiyun
3325*4882a593Smuzhiyun /* It is okay to release the lock and use txq because RCU read
3326*4882a593Smuzhiyun * lock is held.
3327*4882a593Smuzhiyun */
3328*4882a593Smuzhiyun
3329*4882a593Smuzhiyun if (unlikely(!txq)) {
3330*4882a593Smuzhiyun ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
3331*4882a593Smuzhiyun peer_id, tid);
3332*4882a593Smuzhiyun continue;
3333*4882a593Smuzhiyun }
3334*4882a593Smuzhiyun
3335*4882a593Smuzhiyun num_msdus = 0;
3336*4882a593Smuzhiyun num_bytes = 0;
3337*4882a593Smuzhiyun
3338*4882a593Smuzhiyun ieee80211_txq_schedule_start(hw, txq->ac);
3339*4882a593Smuzhiyun may_tx = ieee80211_txq_may_transmit(hw, txq);
3340*4882a593Smuzhiyun while (num_msdus < max_num_msdus &&
3341*4882a593Smuzhiyun num_bytes < max_num_bytes) {
3342*4882a593Smuzhiyun if (!may_tx)
3343*4882a593Smuzhiyun break;
3344*4882a593Smuzhiyun
3345*4882a593Smuzhiyun ret = ath10k_mac_tx_push_txq(hw, txq);
3346*4882a593Smuzhiyun if (ret < 0)
3347*4882a593Smuzhiyun break;
3348*4882a593Smuzhiyun
3349*4882a593Smuzhiyun num_msdus++;
3350*4882a593Smuzhiyun num_bytes += ret;
3351*4882a593Smuzhiyun }
3352*4882a593Smuzhiyun ieee80211_return_txq(hw, txq, false);
3353*4882a593Smuzhiyun ieee80211_txq_schedule_end(hw, txq->ac);
3354*4882a593Smuzhiyun
3355*4882a593Smuzhiyun record->num_msdus = cpu_to_le16(num_msdus);
3356*4882a593Smuzhiyun record->num_bytes = cpu_to_le32(num_bytes);
3357*4882a593Smuzhiyun
3358*4882a593Smuzhiyun ath10k_htt_tx_txq_recalc(hw, txq);
3359*4882a593Smuzhiyun }
3360*4882a593Smuzhiyun
3361*4882a593Smuzhiyun rcu_read_unlock();
3362*4882a593Smuzhiyun
3363*4882a593Smuzhiyun resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
3364*4882a593Smuzhiyun ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
3365*4882a593Smuzhiyun
3366*4882a593Smuzhiyun ret = ath10k_htt_tx_fetch_resp(ar,
3367*4882a593Smuzhiyun resp->tx_fetch_ind.token,
3368*4882a593Smuzhiyun resp->tx_fetch_ind.fetch_seq_num,
3369*4882a593Smuzhiyun resp->tx_fetch_ind.records,
3370*4882a593Smuzhiyun num_records);
3371*4882a593Smuzhiyun if (unlikely(ret)) {
3372*4882a593Smuzhiyun ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
3373*4882a593Smuzhiyun le32_to_cpu(resp->tx_fetch_ind.token), ret);
3374*4882a593Smuzhiyun /* FIXME: request fw restart */
3375*4882a593Smuzhiyun }
3376*4882a593Smuzhiyun
3377*4882a593Smuzhiyun ath10k_htt_tx_txq_sync(ar);
3378*4882a593Smuzhiyun }
3379*4882a593Smuzhiyun
ath10k_htt_rx_tx_fetch_confirm(struct ath10k * ar,struct sk_buff * skb)3380*4882a593Smuzhiyun static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
3381*4882a593Smuzhiyun struct sk_buff *skb)
3382*4882a593Smuzhiyun {
3383*4882a593Smuzhiyun const struct htt_resp *resp = (void *)skb->data;
3384*4882a593Smuzhiyun size_t len;
3385*4882a593Smuzhiyun int num_resp_ids;
3386*4882a593Smuzhiyun
3387*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
3388*4882a593Smuzhiyun
3389*4882a593Smuzhiyun len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
3390*4882a593Smuzhiyun if (unlikely(skb->len < len)) {
3391*4882a593Smuzhiyun ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
3392*4882a593Smuzhiyun return;
3393*4882a593Smuzhiyun }
3394*4882a593Smuzhiyun
3395*4882a593Smuzhiyun num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
3396*4882a593Smuzhiyun len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
3397*4882a593Smuzhiyun
3398*4882a593Smuzhiyun if (unlikely(skb->len < len)) {
3399*4882a593Smuzhiyun ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
3400*4882a593Smuzhiyun return;
3401*4882a593Smuzhiyun }
3402*4882a593Smuzhiyun
3403*4882a593Smuzhiyun ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
3404*4882a593Smuzhiyun resp->tx_fetch_confirm.resp_ids,
3405*4882a593Smuzhiyun num_resp_ids);
3406*4882a593Smuzhiyun }
3407*4882a593Smuzhiyun
ath10k_htt_rx_tx_mode_switch_ind(struct ath10k * ar,struct sk_buff * skb)3408*4882a593Smuzhiyun static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
3409*4882a593Smuzhiyun struct sk_buff *skb)
3410*4882a593Smuzhiyun {
3411*4882a593Smuzhiyun const struct htt_resp *resp = (void *)skb->data;
3412*4882a593Smuzhiyun const struct htt_tx_mode_switch_record *record;
3413*4882a593Smuzhiyun struct ieee80211_txq *txq;
3414*4882a593Smuzhiyun struct ath10k_txq *artxq;
3415*4882a593Smuzhiyun size_t len;
3416*4882a593Smuzhiyun size_t num_records;
3417*4882a593Smuzhiyun enum htt_tx_mode_switch_mode mode;
3418*4882a593Smuzhiyun bool enable;
3419*4882a593Smuzhiyun u16 info0;
3420*4882a593Smuzhiyun u16 info1;
3421*4882a593Smuzhiyun u16 threshold;
3422*4882a593Smuzhiyun u16 peer_id;
3423*4882a593Smuzhiyun u8 tid;
3424*4882a593Smuzhiyun int i;
3425*4882a593Smuzhiyun
3426*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
3427*4882a593Smuzhiyun
3428*4882a593Smuzhiyun len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
3429*4882a593Smuzhiyun if (unlikely(skb->len < len)) {
3430*4882a593Smuzhiyun ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
3431*4882a593Smuzhiyun return;
3432*4882a593Smuzhiyun }
3433*4882a593Smuzhiyun
3434*4882a593Smuzhiyun info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
3435*4882a593Smuzhiyun info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
3436*4882a593Smuzhiyun
3437*4882a593Smuzhiyun enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
3438*4882a593Smuzhiyun num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
3439*4882a593Smuzhiyun mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
3440*4882a593Smuzhiyun threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
3441*4882a593Smuzhiyun
3442*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_HTT,
3443*4882a593Smuzhiyun "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
3444*4882a593Smuzhiyun info0, info1, enable, num_records, mode, threshold);
3445*4882a593Smuzhiyun
3446*4882a593Smuzhiyun len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
3447*4882a593Smuzhiyun
3448*4882a593Smuzhiyun if (unlikely(skb->len < len)) {
3449*4882a593Smuzhiyun ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
3450*4882a593Smuzhiyun return;
3451*4882a593Smuzhiyun }
3452*4882a593Smuzhiyun
3453*4882a593Smuzhiyun switch (mode) {
3454*4882a593Smuzhiyun case HTT_TX_MODE_SWITCH_PUSH:
3455*4882a593Smuzhiyun case HTT_TX_MODE_SWITCH_PUSH_PULL:
3456*4882a593Smuzhiyun break;
3457*4882a593Smuzhiyun default:
3458*4882a593Smuzhiyun ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
3459*4882a593Smuzhiyun mode);
3460*4882a593Smuzhiyun return;
3461*4882a593Smuzhiyun }
3462*4882a593Smuzhiyun
3463*4882a593Smuzhiyun if (!enable)
3464*4882a593Smuzhiyun return;
3465*4882a593Smuzhiyun
3466*4882a593Smuzhiyun ar->htt.tx_q_state.enabled = enable;
3467*4882a593Smuzhiyun ar->htt.tx_q_state.mode = mode;
3468*4882a593Smuzhiyun ar->htt.tx_q_state.num_push_allowed = threshold;
3469*4882a593Smuzhiyun
3470*4882a593Smuzhiyun rcu_read_lock();
3471*4882a593Smuzhiyun
3472*4882a593Smuzhiyun for (i = 0; i < num_records; i++) {
3473*4882a593Smuzhiyun record = &resp->tx_mode_switch_ind.records[i];
3474*4882a593Smuzhiyun info0 = le16_to_cpu(record->info0);
3475*4882a593Smuzhiyun peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
3476*4882a593Smuzhiyun tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
3477*4882a593Smuzhiyun
3478*4882a593Smuzhiyun if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
3479*4882a593Smuzhiyun unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
3480*4882a593Smuzhiyun ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
3481*4882a593Smuzhiyun peer_id, tid);
3482*4882a593Smuzhiyun continue;
3483*4882a593Smuzhiyun }
3484*4882a593Smuzhiyun
3485*4882a593Smuzhiyun spin_lock_bh(&ar->data_lock);
3486*4882a593Smuzhiyun txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
3487*4882a593Smuzhiyun spin_unlock_bh(&ar->data_lock);
3488*4882a593Smuzhiyun
3489*4882a593Smuzhiyun /* It is okay to release the lock and use txq because RCU read
3490*4882a593Smuzhiyun * lock is held.
3491*4882a593Smuzhiyun */
3492*4882a593Smuzhiyun
3493*4882a593Smuzhiyun if (unlikely(!txq)) {
3494*4882a593Smuzhiyun ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
3495*4882a593Smuzhiyun peer_id, tid);
3496*4882a593Smuzhiyun continue;
3497*4882a593Smuzhiyun }
3498*4882a593Smuzhiyun
3499*4882a593Smuzhiyun spin_lock_bh(&ar->htt.tx_lock);
3500*4882a593Smuzhiyun artxq = (void *)txq->drv_priv;
3501*4882a593Smuzhiyun artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
3502*4882a593Smuzhiyun spin_unlock_bh(&ar->htt.tx_lock);
3503*4882a593Smuzhiyun }
3504*4882a593Smuzhiyun
3505*4882a593Smuzhiyun rcu_read_unlock();
3506*4882a593Smuzhiyun
3507*4882a593Smuzhiyun ath10k_mac_tx_push_pending(ar);
3508*4882a593Smuzhiyun }
3509*4882a593Smuzhiyun
ath10k_htt_htc_t2h_msg_handler(struct ath10k * ar,struct sk_buff * skb)3510*4882a593Smuzhiyun void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
3511*4882a593Smuzhiyun {
3512*4882a593Smuzhiyun bool release;
3513*4882a593Smuzhiyun
3514*4882a593Smuzhiyun release = ath10k_htt_t2h_msg_handler(ar, skb);
3515*4882a593Smuzhiyun
3516*4882a593Smuzhiyun /* Free the indication buffer */
3517*4882a593Smuzhiyun if (release)
3518*4882a593Smuzhiyun dev_kfree_skb_any(skb);
3519*4882a593Smuzhiyun }
3520*4882a593Smuzhiyun
ath10k_get_legacy_rate_idx(struct ath10k * ar,u8 rate)3521*4882a593Smuzhiyun static inline s8 ath10k_get_legacy_rate_idx(struct ath10k *ar, u8 rate)
3522*4882a593Smuzhiyun {
3523*4882a593Smuzhiyun static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12,
3524*4882a593Smuzhiyun 18, 24, 36, 48, 54};
3525*4882a593Smuzhiyun int i;
3526*4882a593Smuzhiyun
3527*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) {
3528*4882a593Smuzhiyun if (rate == legacy_rates[i])
3529*4882a593Smuzhiyun return i;
3530*4882a593Smuzhiyun }
3531*4882a593Smuzhiyun
3532*4882a593Smuzhiyun ath10k_warn(ar, "Invalid legacy rate %hhd peer stats", rate);
3533*4882a593Smuzhiyun return -EINVAL;
3534*4882a593Smuzhiyun }
3535*4882a593Smuzhiyun
3536*4882a593Smuzhiyun static void
ath10k_accumulate_per_peer_tx_stats(struct ath10k * ar,struct ath10k_sta * arsta,struct ath10k_per_peer_tx_stats * pstats,s8 legacy_rate_idx)3537*4882a593Smuzhiyun ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
3538*4882a593Smuzhiyun struct ath10k_sta *arsta,
3539*4882a593Smuzhiyun struct ath10k_per_peer_tx_stats *pstats,
3540*4882a593Smuzhiyun s8 legacy_rate_idx)
3541*4882a593Smuzhiyun {
3542*4882a593Smuzhiyun struct rate_info *txrate = &arsta->txrate;
3543*4882a593Smuzhiyun struct ath10k_htt_tx_stats *tx_stats;
3544*4882a593Smuzhiyun int idx, ht_idx, gi, mcs, bw, nss;
3545*4882a593Smuzhiyun unsigned long flags;
3546*4882a593Smuzhiyun
3547*4882a593Smuzhiyun if (!arsta->tx_stats)
3548*4882a593Smuzhiyun return;
3549*4882a593Smuzhiyun
3550*4882a593Smuzhiyun tx_stats = arsta->tx_stats;
3551*4882a593Smuzhiyun flags = txrate->flags;
3552*4882a593Smuzhiyun gi = test_bit(ATH10K_RATE_INFO_FLAGS_SGI_BIT, &flags);
3553*4882a593Smuzhiyun mcs = ATH10K_HW_MCS_RATE(pstats->ratecode);
3554*4882a593Smuzhiyun bw = txrate->bw;
3555*4882a593Smuzhiyun nss = txrate->nss;
3556*4882a593Smuzhiyun ht_idx = mcs + (nss - 1) * 8;
3557*4882a593Smuzhiyun idx = mcs * 8 + 8 * 10 * (nss - 1);
3558*4882a593Smuzhiyun idx += bw * 2 + gi;
3559*4882a593Smuzhiyun
3560*4882a593Smuzhiyun #define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name]
3561*4882a593Smuzhiyun
3562*4882a593Smuzhiyun if (txrate->flags & RATE_INFO_FLAGS_VHT_MCS) {
3563*4882a593Smuzhiyun STATS_OP_FMT(SUCC).vht[0][mcs] += pstats->succ_bytes;
3564*4882a593Smuzhiyun STATS_OP_FMT(SUCC).vht[1][mcs] += pstats->succ_pkts;
3565*4882a593Smuzhiyun STATS_OP_FMT(FAIL).vht[0][mcs] += pstats->failed_bytes;
3566*4882a593Smuzhiyun STATS_OP_FMT(FAIL).vht[1][mcs] += pstats->failed_pkts;
3567*4882a593Smuzhiyun STATS_OP_FMT(RETRY).vht[0][mcs] += pstats->retry_bytes;
3568*4882a593Smuzhiyun STATS_OP_FMT(RETRY).vht[1][mcs] += pstats->retry_pkts;
3569*4882a593Smuzhiyun } else if (txrate->flags & RATE_INFO_FLAGS_MCS) {
3570*4882a593Smuzhiyun STATS_OP_FMT(SUCC).ht[0][ht_idx] += pstats->succ_bytes;
3571*4882a593Smuzhiyun STATS_OP_FMT(SUCC).ht[1][ht_idx] += pstats->succ_pkts;
3572*4882a593Smuzhiyun STATS_OP_FMT(FAIL).ht[0][ht_idx] += pstats->failed_bytes;
3573*4882a593Smuzhiyun STATS_OP_FMT(FAIL).ht[1][ht_idx] += pstats->failed_pkts;
3574*4882a593Smuzhiyun STATS_OP_FMT(RETRY).ht[0][ht_idx] += pstats->retry_bytes;
3575*4882a593Smuzhiyun STATS_OP_FMT(RETRY).ht[1][ht_idx] += pstats->retry_pkts;
3576*4882a593Smuzhiyun } else {
3577*4882a593Smuzhiyun mcs = legacy_rate_idx;
3578*4882a593Smuzhiyun
3579*4882a593Smuzhiyun STATS_OP_FMT(SUCC).legacy[0][mcs] += pstats->succ_bytes;
3580*4882a593Smuzhiyun STATS_OP_FMT(SUCC).legacy[1][mcs] += pstats->succ_pkts;
3581*4882a593Smuzhiyun STATS_OP_FMT(FAIL).legacy[0][mcs] += pstats->failed_bytes;
3582*4882a593Smuzhiyun STATS_OP_FMT(FAIL).legacy[1][mcs] += pstats->failed_pkts;
3583*4882a593Smuzhiyun STATS_OP_FMT(RETRY).legacy[0][mcs] += pstats->retry_bytes;
3584*4882a593Smuzhiyun STATS_OP_FMT(RETRY).legacy[1][mcs] += pstats->retry_pkts;
3585*4882a593Smuzhiyun }
3586*4882a593Smuzhiyun
3587*4882a593Smuzhiyun if (ATH10K_HW_AMPDU(pstats->flags)) {
3588*4882a593Smuzhiyun tx_stats->ba_fails += ATH10K_HW_BA_FAIL(pstats->flags);
3589*4882a593Smuzhiyun
3590*4882a593Smuzhiyun if (txrate->flags & RATE_INFO_FLAGS_MCS) {
3591*4882a593Smuzhiyun STATS_OP_FMT(AMPDU).ht[0][ht_idx] +=
3592*4882a593Smuzhiyun pstats->succ_bytes + pstats->retry_bytes;
3593*4882a593Smuzhiyun STATS_OP_FMT(AMPDU).ht[1][ht_idx] +=
3594*4882a593Smuzhiyun pstats->succ_pkts + pstats->retry_pkts;
3595*4882a593Smuzhiyun } else {
3596*4882a593Smuzhiyun STATS_OP_FMT(AMPDU).vht[0][mcs] +=
3597*4882a593Smuzhiyun pstats->succ_bytes + pstats->retry_bytes;
3598*4882a593Smuzhiyun STATS_OP_FMT(AMPDU).vht[1][mcs] +=
3599*4882a593Smuzhiyun pstats->succ_pkts + pstats->retry_pkts;
3600*4882a593Smuzhiyun }
3601*4882a593Smuzhiyun STATS_OP_FMT(AMPDU).bw[0][bw] +=
3602*4882a593Smuzhiyun pstats->succ_bytes + pstats->retry_bytes;
3603*4882a593Smuzhiyun STATS_OP_FMT(AMPDU).nss[0][nss - 1] +=
3604*4882a593Smuzhiyun pstats->succ_bytes + pstats->retry_bytes;
3605*4882a593Smuzhiyun STATS_OP_FMT(AMPDU).gi[0][gi] +=
3606*4882a593Smuzhiyun pstats->succ_bytes + pstats->retry_bytes;
3607*4882a593Smuzhiyun STATS_OP_FMT(AMPDU).rate_table[0][idx] +=
3608*4882a593Smuzhiyun pstats->succ_bytes + pstats->retry_bytes;
3609*4882a593Smuzhiyun STATS_OP_FMT(AMPDU).bw[1][bw] +=
3610*4882a593Smuzhiyun pstats->succ_pkts + pstats->retry_pkts;
3611*4882a593Smuzhiyun STATS_OP_FMT(AMPDU).nss[1][nss - 1] +=
3612*4882a593Smuzhiyun pstats->succ_pkts + pstats->retry_pkts;
3613*4882a593Smuzhiyun STATS_OP_FMT(AMPDU).gi[1][gi] +=
3614*4882a593Smuzhiyun pstats->succ_pkts + pstats->retry_pkts;
3615*4882a593Smuzhiyun STATS_OP_FMT(AMPDU).rate_table[1][idx] +=
3616*4882a593Smuzhiyun pstats->succ_pkts + pstats->retry_pkts;
3617*4882a593Smuzhiyun } else {
3618*4882a593Smuzhiyun tx_stats->ack_fails +=
3619*4882a593Smuzhiyun ATH10K_HW_BA_FAIL(pstats->flags);
3620*4882a593Smuzhiyun }
3621*4882a593Smuzhiyun
3622*4882a593Smuzhiyun STATS_OP_FMT(SUCC).bw[0][bw] += pstats->succ_bytes;
3623*4882a593Smuzhiyun STATS_OP_FMT(SUCC).nss[0][nss - 1] += pstats->succ_bytes;
3624*4882a593Smuzhiyun STATS_OP_FMT(SUCC).gi[0][gi] += pstats->succ_bytes;
3625*4882a593Smuzhiyun
3626*4882a593Smuzhiyun STATS_OP_FMT(SUCC).bw[1][bw] += pstats->succ_pkts;
3627*4882a593Smuzhiyun STATS_OP_FMT(SUCC).nss[1][nss - 1] += pstats->succ_pkts;
3628*4882a593Smuzhiyun STATS_OP_FMT(SUCC).gi[1][gi] += pstats->succ_pkts;
3629*4882a593Smuzhiyun
3630*4882a593Smuzhiyun STATS_OP_FMT(FAIL).bw[0][bw] += pstats->failed_bytes;
3631*4882a593Smuzhiyun STATS_OP_FMT(FAIL).nss[0][nss - 1] += pstats->failed_bytes;
3632*4882a593Smuzhiyun STATS_OP_FMT(FAIL).gi[0][gi] += pstats->failed_bytes;
3633*4882a593Smuzhiyun
3634*4882a593Smuzhiyun STATS_OP_FMT(FAIL).bw[1][bw] += pstats->failed_pkts;
3635*4882a593Smuzhiyun STATS_OP_FMT(FAIL).nss[1][nss - 1] += pstats->failed_pkts;
3636*4882a593Smuzhiyun STATS_OP_FMT(FAIL).gi[1][gi] += pstats->failed_pkts;
3637*4882a593Smuzhiyun
3638*4882a593Smuzhiyun STATS_OP_FMT(RETRY).bw[0][bw] += pstats->retry_bytes;
3639*4882a593Smuzhiyun STATS_OP_FMT(RETRY).nss[0][nss - 1] += pstats->retry_bytes;
3640*4882a593Smuzhiyun STATS_OP_FMT(RETRY).gi[0][gi] += pstats->retry_bytes;
3641*4882a593Smuzhiyun
3642*4882a593Smuzhiyun STATS_OP_FMT(RETRY).bw[1][bw] += pstats->retry_pkts;
3643*4882a593Smuzhiyun STATS_OP_FMT(RETRY).nss[1][nss - 1] += pstats->retry_pkts;
3644*4882a593Smuzhiyun STATS_OP_FMT(RETRY).gi[1][gi] += pstats->retry_pkts;
3645*4882a593Smuzhiyun
3646*4882a593Smuzhiyun if (txrate->flags >= RATE_INFO_FLAGS_MCS) {
3647*4882a593Smuzhiyun STATS_OP_FMT(SUCC).rate_table[0][idx] += pstats->succ_bytes;
3648*4882a593Smuzhiyun STATS_OP_FMT(SUCC).rate_table[1][idx] += pstats->succ_pkts;
3649*4882a593Smuzhiyun STATS_OP_FMT(FAIL).rate_table[0][idx] += pstats->failed_bytes;
3650*4882a593Smuzhiyun STATS_OP_FMT(FAIL).rate_table[1][idx] += pstats->failed_pkts;
3651*4882a593Smuzhiyun STATS_OP_FMT(RETRY).rate_table[0][idx] += pstats->retry_bytes;
3652*4882a593Smuzhiyun STATS_OP_FMT(RETRY).rate_table[1][idx] += pstats->retry_pkts;
3653*4882a593Smuzhiyun }
3654*4882a593Smuzhiyun
3655*4882a593Smuzhiyun tx_stats->tx_duration += pstats->duration;
3656*4882a593Smuzhiyun }
3657*4882a593Smuzhiyun
3658*4882a593Smuzhiyun static void
ath10k_update_per_peer_tx_stats(struct ath10k * ar,struct ieee80211_sta * sta,struct ath10k_per_peer_tx_stats * peer_stats)3659*4882a593Smuzhiyun ath10k_update_per_peer_tx_stats(struct ath10k *ar,
3660*4882a593Smuzhiyun struct ieee80211_sta *sta,
3661*4882a593Smuzhiyun struct ath10k_per_peer_tx_stats *peer_stats)
3662*4882a593Smuzhiyun {
3663*4882a593Smuzhiyun struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
3664*4882a593Smuzhiyun struct ieee80211_chanctx_conf *conf = NULL;
3665*4882a593Smuzhiyun u8 rate = 0, sgi;
3666*4882a593Smuzhiyun s8 rate_idx = 0;
3667*4882a593Smuzhiyun bool skip_auto_rate;
3668*4882a593Smuzhiyun struct rate_info txrate;
3669*4882a593Smuzhiyun
3670*4882a593Smuzhiyun lockdep_assert_held(&ar->data_lock);
3671*4882a593Smuzhiyun
3672*4882a593Smuzhiyun txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode);
3673*4882a593Smuzhiyun txrate.bw = ATH10K_HW_BW(peer_stats->flags);
3674*4882a593Smuzhiyun txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode);
3675*4882a593Smuzhiyun txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode);
3676*4882a593Smuzhiyun sgi = ATH10K_HW_GI(peer_stats->flags);
3677*4882a593Smuzhiyun skip_auto_rate = ATH10K_FW_SKIPPED_RATE_CTRL(peer_stats->flags);
3678*4882a593Smuzhiyun
3679*4882a593Smuzhiyun /* Firmware's rate control skips broadcast/management frames,
3680*4882a593Smuzhiyun * if host has configure fixed rates and in some other special cases.
3681*4882a593Smuzhiyun */
3682*4882a593Smuzhiyun if (skip_auto_rate)
3683*4882a593Smuzhiyun return;
3684*4882a593Smuzhiyun
3685*4882a593Smuzhiyun if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) {
3686*4882a593Smuzhiyun ath10k_warn(ar, "Invalid VHT mcs %hhd peer stats", txrate.mcs);
3687*4882a593Smuzhiyun return;
3688*4882a593Smuzhiyun }
3689*4882a593Smuzhiyun
3690*4882a593Smuzhiyun if (txrate.flags == WMI_RATE_PREAMBLE_HT &&
3691*4882a593Smuzhiyun (txrate.mcs > 7 || txrate.nss < 1)) {
3692*4882a593Smuzhiyun ath10k_warn(ar, "Invalid HT mcs %hhd nss %hhd peer stats",
3693*4882a593Smuzhiyun txrate.mcs, txrate.nss);
3694*4882a593Smuzhiyun return;
3695*4882a593Smuzhiyun }
3696*4882a593Smuzhiyun
3697*4882a593Smuzhiyun memset(&arsta->txrate, 0, sizeof(arsta->txrate));
3698*4882a593Smuzhiyun memset(&arsta->tx_info.status, 0, sizeof(arsta->tx_info.status));
3699*4882a593Smuzhiyun if (txrate.flags == WMI_RATE_PREAMBLE_CCK ||
3700*4882a593Smuzhiyun txrate.flags == WMI_RATE_PREAMBLE_OFDM) {
3701*4882a593Smuzhiyun rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode);
3702*4882a593Smuzhiyun /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */
3703*4882a593Smuzhiyun if (rate == 6 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
3704*4882a593Smuzhiyun rate = 5;
3705*4882a593Smuzhiyun rate_idx = ath10k_get_legacy_rate_idx(ar, rate);
3706*4882a593Smuzhiyun if (rate_idx < 0)
3707*4882a593Smuzhiyun return;
3708*4882a593Smuzhiyun arsta->txrate.legacy = rate;
3709*4882a593Smuzhiyun } else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {
3710*4882a593Smuzhiyun arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
3711*4882a593Smuzhiyun arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1);
3712*4882a593Smuzhiyun } else {
3713*4882a593Smuzhiyun arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
3714*4882a593Smuzhiyun arsta->txrate.mcs = txrate.mcs;
3715*4882a593Smuzhiyun }
3716*4882a593Smuzhiyun
3717*4882a593Smuzhiyun switch (txrate.flags) {
3718*4882a593Smuzhiyun case WMI_RATE_PREAMBLE_OFDM:
3719*4882a593Smuzhiyun if (arsta->arvif && arsta->arvif->vif)
3720*4882a593Smuzhiyun conf = rcu_dereference(arsta->arvif->vif->chanctx_conf);
3721*4882a593Smuzhiyun if (conf && conf->def.chan->band == NL80211_BAND_5GHZ)
3722*4882a593Smuzhiyun arsta->tx_info.status.rates[0].idx = rate_idx - 4;
3723*4882a593Smuzhiyun break;
3724*4882a593Smuzhiyun case WMI_RATE_PREAMBLE_CCK:
3725*4882a593Smuzhiyun arsta->tx_info.status.rates[0].idx = rate_idx;
3726*4882a593Smuzhiyun if (sgi)
3727*4882a593Smuzhiyun arsta->tx_info.status.rates[0].flags |=
3728*4882a593Smuzhiyun (IEEE80211_TX_RC_USE_SHORT_PREAMBLE |
3729*4882a593Smuzhiyun IEEE80211_TX_RC_SHORT_GI);
3730*4882a593Smuzhiyun break;
3731*4882a593Smuzhiyun case WMI_RATE_PREAMBLE_HT:
3732*4882a593Smuzhiyun arsta->tx_info.status.rates[0].idx =
3733*4882a593Smuzhiyun txrate.mcs + ((txrate.nss - 1) * 8);
3734*4882a593Smuzhiyun if (sgi)
3735*4882a593Smuzhiyun arsta->tx_info.status.rates[0].flags |=
3736*4882a593Smuzhiyun IEEE80211_TX_RC_SHORT_GI;
3737*4882a593Smuzhiyun arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_MCS;
3738*4882a593Smuzhiyun break;
3739*4882a593Smuzhiyun case WMI_RATE_PREAMBLE_VHT:
3740*4882a593Smuzhiyun ieee80211_rate_set_vht(&arsta->tx_info.status.rates[0],
3741*4882a593Smuzhiyun txrate.mcs, txrate.nss);
3742*4882a593Smuzhiyun if (sgi)
3743*4882a593Smuzhiyun arsta->tx_info.status.rates[0].flags |=
3744*4882a593Smuzhiyun IEEE80211_TX_RC_SHORT_GI;
3745*4882a593Smuzhiyun arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_VHT_MCS;
3746*4882a593Smuzhiyun break;
3747*4882a593Smuzhiyun }
3748*4882a593Smuzhiyun
3749*4882a593Smuzhiyun arsta->txrate.nss = txrate.nss;
3750*4882a593Smuzhiyun arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw);
3751*4882a593Smuzhiyun arsta->last_tx_bitrate = cfg80211_calculate_bitrate(&arsta->txrate);
3752*4882a593Smuzhiyun if (sgi)
3753*4882a593Smuzhiyun arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
3754*4882a593Smuzhiyun
3755*4882a593Smuzhiyun switch (arsta->txrate.bw) {
3756*4882a593Smuzhiyun case RATE_INFO_BW_40:
3757*4882a593Smuzhiyun arsta->tx_info.status.rates[0].flags |=
3758*4882a593Smuzhiyun IEEE80211_TX_RC_40_MHZ_WIDTH;
3759*4882a593Smuzhiyun break;
3760*4882a593Smuzhiyun case RATE_INFO_BW_80:
3761*4882a593Smuzhiyun arsta->tx_info.status.rates[0].flags |=
3762*4882a593Smuzhiyun IEEE80211_TX_RC_80_MHZ_WIDTH;
3763*4882a593Smuzhiyun break;
3764*4882a593Smuzhiyun }
3765*4882a593Smuzhiyun
3766*4882a593Smuzhiyun if (peer_stats->succ_pkts) {
3767*4882a593Smuzhiyun arsta->tx_info.flags = IEEE80211_TX_STAT_ACK;
3768*4882a593Smuzhiyun arsta->tx_info.status.rates[0].count = 1;
3769*4882a593Smuzhiyun ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info);
3770*4882a593Smuzhiyun }
3771*4882a593Smuzhiyun
3772*4882a593Smuzhiyun if (ar->htt.disable_tx_comp) {
3773*4882a593Smuzhiyun arsta->tx_failed += peer_stats->failed_pkts;
3774*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_HTT, "tx failed %d\n",
3775*4882a593Smuzhiyun arsta->tx_failed);
3776*4882a593Smuzhiyun }
3777*4882a593Smuzhiyun
3778*4882a593Smuzhiyun arsta->tx_retries += peer_stats->retry_pkts;
3779*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx retries %d", arsta->tx_retries);
3780*4882a593Smuzhiyun
3781*4882a593Smuzhiyun if (ath10k_debug_is_extd_tx_stats_enabled(ar))
3782*4882a593Smuzhiyun ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats,
3783*4882a593Smuzhiyun rate_idx);
3784*4882a593Smuzhiyun }
3785*4882a593Smuzhiyun
ath10k_htt_fetch_peer_stats(struct ath10k * ar,struct sk_buff * skb)3786*4882a593Smuzhiyun static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
3787*4882a593Smuzhiyun struct sk_buff *skb)
3788*4882a593Smuzhiyun {
3789*4882a593Smuzhiyun struct htt_resp *resp = (struct htt_resp *)skb->data;
3790*4882a593Smuzhiyun struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
3791*4882a593Smuzhiyun struct htt_per_peer_tx_stats_ind *tx_stats;
3792*4882a593Smuzhiyun struct ieee80211_sta *sta;
3793*4882a593Smuzhiyun struct ath10k_peer *peer;
3794*4882a593Smuzhiyun int peer_id, i;
3795*4882a593Smuzhiyun u8 ppdu_len, num_ppdu;
3796*4882a593Smuzhiyun
3797*4882a593Smuzhiyun num_ppdu = resp->peer_tx_stats.num_ppdu;
3798*4882a593Smuzhiyun ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32);
3799*4882a593Smuzhiyun
3800*4882a593Smuzhiyun if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) {
3801*4882a593Smuzhiyun ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len);
3802*4882a593Smuzhiyun return;
3803*4882a593Smuzhiyun }
3804*4882a593Smuzhiyun
3805*4882a593Smuzhiyun tx_stats = (struct htt_per_peer_tx_stats_ind *)
3806*4882a593Smuzhiyun (resp->peer_tx_stats.payload);
3807*4882a593Smuzhiyun peer_id = __le16_to_cpu(tx_stats->peer_id);
3808*4882a593Smuzhiyun
3809*4882a593Smuzhiyun rcu_read_lock();
3810*4882a593Smuzhiyun spin_lock_bh(&ar->data_lock);
3811*4882a593Smuzhiyun peer = ath10k_peer_find_by_id(ar, peer_id);
3812*4882a593Smuzhiyun if (!peer || !peer->sta) {
3813*4882a593Smuzhiyun ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n",
3814*4882a593Smuzhiyun peer_id);
3815*4882a593Smuzhiyun goto out;
3816*4882a593Smuzhiyun }
3817*4882a593Smuzhiyun
3818*4882a593Smuzhiyun sta = peer->sta;
3819*4882a593Smuzhiyun for (i = 0; i < num_ppdu; i++) {
3820*4882a593Smuzhiyun tx_stats = (struct htt_per_peer_tx_stats_ind *)
3821*4882a593Smuzhiyun (resp->peer_tx_stats.payload + i * ppdu_len);
3822*4882a593Smuzhiyun
3823*4882a593Smuzhiyun p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes);
3824*4882a593Smuzhiyun p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes);
3825*4882a593Smuzhiyun p_tx_stats->failed_bytes =
3826*4882a593Smuzhiyun __le32_to_cpu(tx_stats->failed_bytes);
3827*4882a593Smuzhiyun p_tx_stats->ratecode = tx_stats->ratecode;
3828*4882a593Smuzhiyun p_tx_stats->flags = tx_stats->flags;
3829*4882a593Smuzhiyun p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts);
3830*4882a593Smuzhiyun p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts);
3831*4882a593Smuzhiyun p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts);
3832*4882a593Smuzhiyun p_tx_stats->duration = __le16_to_cpu(tx_stats->tx_duration);
3833*4882a593Smuzhiyun
3834*4882a593Smuzhiyun ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
3835*4882a593Smuzhiyun }
3836*4882a593Smuzhiyun
3837*4882a593Smuzhiyun out:
3838*4882a593Smuzhiyun spin_unlock_bh(&ar->data_lock);
3839*4882a593Smuzhiyun rcu_read_unlock();
3840*4882a593Smuzhiyun }
3841*4882a593Smuzhiyun
ath10k_fetch_10_2_tx_stats(struct ath10k * ar,u8 * data)3842*4882a593Smuzhiyun static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data)
3843*4882a593Smuzhiyun {
3844*4882a593Smuzhiyun struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data;
3845*4882a593Smuzhiyun struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
3846*4882a593Smuzhiyun struct ath10k_10_2_peer_tx_stats *tx_stats;
3847*4882a593Smuzhiyun struct ieee80211_sta *sta;
3848*4882a593Smuzhiyun struct ath10k_peer *peer;
3849*4882a593Smuzhiyun u16 log_type = __le16_to_cpu(hdr->log_type);
3850*4882a593Smuzhiyun u32 peer_id = 0, i;
3851*4882a593Smuzhiyun
3852*4882a593Smuzhiyun if (log_type != ATH_PKTLOG_TYPE_TX_STAT)
3853*4882a593Smuzhiyun return;
3854*4882a593Smuzhiyun
3855*4882a593Smuzhiyun tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) +
3856*4882a593Smuzhiyun ATH10K_10_2_TX_STATS_OFFSET);
3857*4882a593Smuzhiyun
3858*4882a593Smuzhiyun if (!tx_stats->tx_ppdu_cnt)
3859*4882a593Smuzhiyun return;
3860*4882a593Smuzhiyun
3861*4882a593Smuzhiyun peer_id = tx_stats->peer_id;
3862*4882a593Smuzhiyun
3863*4882a593Smuzhiyun rcu_read_lock();
3864*4882a593Smuzhiyun spin_lock_bh(&ar->data_lock);
3865*4882a593Smuzhiyun peer = ath10k_peer_find_by_id(ar, peer_id);
3866*4882a593Smuzhiyun if (!peer || !peer->sta) {
3867*4882a593Smuzhiyun ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n",
3868*4882a593Smuzhiyun peer_id);
3869*4882a593Smuzhiyun goto out;
3870*4882a593Smuzhiyun }
3871*4882a593Smuzhiyun
3872*4882a593Smuzhiyun sta = peer->sta;
3873*4882a593Smuzhiyun for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) {
3874*4882a593Smuzhiyun p_tx_stats->succ_bytes =
3875*4882a593Smuzhiyun __le16_to_cpu(tx_stats->success_bytes[i]);
3876*4882a593Smuzhiyun p_tx_stats->retry_bytes =
3877*4882a593Smuzhiyun __le16_to_cpu(tx_stats->retry_bytes[i]);
3878*4882a593Smuzhiyun p_tx_stats->failed_bytes =
3879*4882a593Smuzhiyun __le16_to_cpu(tx_stats->failed_bytes[i]);
3880*4882a593Smuzhiyun p_tx_stats->ratecode = tx_stats->ratecode[i];
3881*4882a593Smuzhiyun p_tx_stats->flags = tx_stats->flags[i];
3882*4882a593Smuzhiyun p_tx_stats->succ_pkts = tx_stats->success_pkts[i];
3883*4882a593Smuzhiyun p_tx_stats->retry_pkts = tx_stats->retry_pkts[i];
3884*4882a593Smuzhiyun p_tx_stats->failed_pkts = tx_stats->failed_pkts[i];
3885*4882a593Smuzhiyun
3886*4882a593Smuzhiyun ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
3887*4882a593Smuzhiyun }
3888*4882a593Smuzhiyun spin_unlock_bh(&ar->data_lock);
3889*4882a593Smuzhiyun rcu_read_unlock();
3890*4882a593Smuzhiyun
3891*4882a593Smuzhiyun return;
3892*4882a593Smuzhiyun
3893*4882a593Smuzhiyun out:
3894*4882a593Smuzhiyun spin_unlock_bh(&ar->data_lock);
3895*4882a593Smuzhiyun rcu_read_unlock();
3896*4882a593Smuzhiyun }
3897*4882a593Smuzhiyun
ath10k_htt_rx_pn_len(enum htt_security_types sec_type)3898*4882a593Smuzhiyun static int ath10k_htt_rx_pn_len(enum htt_security_types sec_type)
3899*4882a593Smuzhiyun {
3900*4882a593Smuzhiyun switch (sec_type) {
3901*4882a593Smuzhiyun case HTT_SECURITY_TKIP:
3902*4882a593Smuzhiyun case HTT_SECURITY_TKIP_NOMIC:
3903*4882a593Smuzhiyun case HTT_SECURITY_AES_CCMP:
3904*4882a593Smuzhiyun return 48;
3905*4882a593Smuzhiyun default:
3906*4882a593Smuzhiyun return 0;
3907*4882a593Smuzhiyun }
3908*4882a593Smuzhiyun }
3909*4882a593Smuzhiyun
ath10k_htt_rx_sec_ind_handler(struct ath10k * ar,struct htt_security_indication * ev)3910*4882a593Smuzhiyun static void ath10k_htt_rx_sec_ind_handler(struct ath10k *ar,
3911*4882a593Smuzhiyun struct htt_security_indication *ev)
3912*4882a593Smuzhiyun {
3913*4882a593Smuzhiyun enum htt_txrx_sec_cast_type sec_index;
3914*4882a593Smuzhiyun enum htt_security_types sec_type;
3915*4882a593Smuzhiyun struct ath10k_peer *peer;
3916*4882a593Smuzhiyun
3917*4882a593Smuzhiyun spin_lock_bh(&ar->data_lock);
3918*4882a593Smuzhiyun
3919*4882a593Smuzhiyun peer = ath10k_peer_find_by_id(ar, __le16_to_cpu(ev->peer_id));
3920*4882a593Smuzhiyun if (!peer) {
3921*4882a593Smuzhiyun ath10k_warn(ar, "failed to find peer id %d for security indication",
3922*4882a593Smuzhiyun __le16_to_cpu(ev->peer_id));
3923*4882a593Smuzhiyun goto out;
3924*4882a593Smuzhiyun }
3925*4882a593Smuzhiyun
3926*4882a593Smuzhiyun sec_type = MS(ev->flags, HTT_SECURITY_TYPE);
3927*4882a593Smuzhiyun
3928*4882a593Smuzhiyun if (ev->flags & HTT_SECURITY_IS_UNICAST)
3929*4882a593Smuzhiyun sec_index = HTT_TXRX_SEC_UCAST;
3930*4882a593Smuzhiyun else
3931*4882a593Smuzhiyun sec_index = HTT_TXRX_SEC_MCAST;
3932*4882a593Smuzhiyun
3933*4882a593Smuzhiyun peer->rx_pn[sec_index].sec_type = sec_type;
3934*4882a593Smuzhiyun peer->rx_pn[sec_index].pn_len = ath10k_htt_rx_pn_len(sec_type);
3935*4882a593Smuzhiyun
3936*4882a593Smuzhiyun memset(peer->tids_last_pn_valid, 0, sizeof(peer->tids_last_pn_valid));
3937*4882a593Smuzhiyun memset(peer->tids_last_pn, 0, sizeof(peer->tids_last_pn));
3938*4882a593Smuzhiyun
3939*4882a593Smuzhiyun out:
3940*4882a593Smuzhiyun spin_unlock_bh(&ar->data_lock);
3941*4882a593Smuzhiyun }
3942*4882a593Smuzhiyun
ath10k_htt_t2h_msg_handler(struct ath10k * ar,struct sk_buff * skb)3943*4882a593Smuzhiyun bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
3944*4882a593Smuzhiyun {
3945*4882a593Smuzhiyun struct ath10k_htt *htt = &ar->htt;
3946*4882a593Smuzhiyun struct htt_resp *resp = (struct htt_resp *)skb->data;
3947*4882a593Smuzhiyun enum htt_t2h_msg_type type;
3948*4882a593Smuzhiyun
3949*4882a593Smuzhiyun /* confirm alignment */
3950*4882a593Smuzhiyun if (!IS_ALIGNED((unsigned long)skb->data, 4))
3951*4882a593Smuzhiyun ath10k_warn(ar, "unaligned htt message, expect trouble\n");
3952*4882a593Smuzhiyun
3953*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
3954*4882a593Smuzhiyun resp->hdr.msg_type);
3955*4882a593Smuzhiyun
3956*4882a593Smuzhiyun if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
3957*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
3958*4882a593Smuzhiyun resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
3959*4882a593Smuzhiyun return true;
3960*4882a593Smuzhiyun }
3961*4882a593Smuzhiyun type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
3962*4882a593Smuzhiyun
3963*4882a593Smuzhiyun switch (type) {
3964*4882a593Smuzhiyun case HTT_T2H_MSG_TYPE_VERSION_CONF: {
3965*4882a593Smuzhiyun htt->target_version_major = resp->ver_resp.major;
3966*4882a593Smuzhiyun htt->target_version_minor = resp->ver_resp.minor;
3967*4882a593Smuzhiyun complete(&htt->target_version_received);
3968*4882a593Smuzhiyun break;
3969*4882a593Smuzhiyun }
3970*4882a593Smuzhiyun case HTT_T2H_MSG_TYPE_RX_IND:
3971*4882a593Smuzhiyun if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) {
3972*4882a593Smuzhiyun ath10k_htt_rx_proc_rx_ind_ll(htt, &resp->rx_ind);
3973*4882a593Smuzhiyun } else {
3974*4882a593Smuzhiyun skb_queue_tail(&htt->rx_indication_head, skb);
3975*4882a593Smuzhiyun return false;
3976*4882a593Smuzhiyun }
3977*4882a593Smuzhiyun break;
3978*4882a593Smuzhiyun case HTT_T2H_MSG_TYPE_PEER_MAP: {
3979*4882a593Smuzhiyun struct htt_peer_map_event ev = {
3980*4882a593Smuzhiyun .vdev_id = resp->peer_map.vdev_id,
3981*4882a593Smuzhiyun .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
3982*4882a593Smuzhiyun };
3983*4882a593Smuzhiyun memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
3984*4882a593Smuzhiyun ath10k_peer_map_event(htt, &ev);
3985*4882a593Smuzhiyun break;
3986*4882a593Smuzhiyun }
3987*4882a593Smuzhiyun case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
3988*4882a593Smuzhiyun struct htt_peer_unmap_event ev = {
3989*4882a593Smuzhiyun .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
3990*4882a593Smuzhiyun };
3991*4882a593Smuzhiyun ath10k_peer_unmap_event(htt, &ev);
3992*4882a593Smuzhiyun break;
3993*4882a593Smuzhiyun }
3994*4882a593Smuzhiyun case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
3995*4882a593Smuzhiyun struct htt_tx_done tx_done = {};
3996*4882a593Smuzhiyun struct ath10k_htt *htt = &ar->htt;
3997*4882a593Smuzhiyun struct ath10k_htc *htc = &ar->htc;
3998*4882a593Smuzhiyun struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid];
3999*4882a593Smuzhiyun int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
4000*4882a593Smuzhiyun int info = __le32_to_cpu(resp->mgmt_tx_completion.info);
4001*4882a593Smuzhiyun
4002*4882a593Smuzhiyun tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
4003*4882a593Smuzhiyun
4004*4882a593Smuzhiyun switch (status) {
4005*4882a593Smuzhiyun case HTT_MGMT_TX_STATUS_OK:
4006*4882a593Smuzhiyun tx_done.status = HTT_TX_COMPL_STATE_ACK;
4007*4882a593Smuzhiyun if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
4008*4882a593Smuzhiyun ar->wmi.svc_map) &&
4009*4882a593Smuzhiyun (resp->mgmt_tx_completion.flags &
4010*4882a593Smuzhiyun HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI)) {
4011*4882a593Smuzhiyun tx_done.ack_rssi =
4012*4882a593Smuzhiyun FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK,
4013*4882a593Smuzhiyun info);
4014*4882a593Smuzhiyun }
4015*4882a593Smuzhiyun break;
4016*4882a593Smuzhiyun case HTT_MGMT_TX_STATUS_RETRY:
4017*4882a593Smuzhiyun tx_done.status = HTT_TX_COMPL_STATE_NOACK;
4018*4882a593Smuzhiyun break;
4019*4882a593Smuzhiyun case HTT_MGMT_TX_STATUS_DROP:
4020*4882a593Smuzhiyun tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
4021*4882a593Smuzhiyun break;
4022*4882a593Smuzhiyun }
4023*4882a593Smuzhiyun
4024*4882a593Smuzhiyun if (htt->disable_tx_comp) {
4025*4882a593Smuzhiyun spin_lock_bh(&htc->tx_lock);
4026*4882a593Smuzhiyun ep->tx_credits++;
4027*4882a593Smuzhiyun spin_unlock_bh(&htc->tx_lock);
4028*4882a593Smuzhiyun }
4029*4882a593Smuzhiyun
4030*4882a593Smuzhiyun status = ath10k_txrx_tx_unref(htt, &tx_done);
4031*4882a593Smuzhiyun if (!status) {
4032*4882a593Smuzhiyun spin_lock_bh(&htt->tx_lock);
4033*4882a593Smuzhiyun ath10k_htt_tx_mgmt_dec_pending(htt);
4034*4882a593Smuzhiyun spin_unlock_bh(&htt->tx_lock);
4035*4882a593Smuzhiyun }
4036*4882a593Smuzhiyun break;
4037*4882a593Smuzhiyun }
4038*4882a593Smuzhiyun case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
4039*4882a593Smuzhiyun ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
4040*4882a593Smuzhiyun break;
4041*4882a593Smuzhiyun case HTT_T2H_MSG_TYPE_SEC_IND: {
4042*4882a593Smuzhiyun struct ath10k *ar = htt->ar;
4043*4882a593Smuzhiyun struct htt_security_indication *ev = &resp->security_indication;
4044*4882a593Smuzhiyun
4045*4882a593Smuzhiyun ath10k_htt_rx_sec_ind_handler(ar, ev);
4046*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_HTT,
4047*4882a593Smuzhiyun "sec ind peer_id %d unicast %d type %d\n",
4048*4882a593Smuzhiyun __le16_to_cpu(ev->peer_id),
4049*4882a593Smuzhiyun !!(ev->flags & HTT_SECURITY_IS_UNICAST),
4050*4882a593Smuzhiyun MS(ev->flags, HTT_SECURITY_TYPE));
4051*4882a593Smuzhiyun complete(&ar->install_key_done);
4052*4882a593Smuzhiyun break;
4053*4882a593Smuzhiyun }
4054*4882a593Smuzhiyun case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
4055*4882a593Smuzhiyun ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
4056*4882a593Smuzhiyun skb->data, skb->len);
4057*4882a593Smuzhiyun atomic_inc(&htt->num_mpdus_ready);
4058*4882a593Smuzhiyun
4059*4882a593Smuzhiyun return ath10k_htt_rx_proc_rx_frag_ind(htt,
4060*4882a593Smuzhiyun &resp->rx_frag_ind,
4061*4882a593Smuzhiyun skb);
4062*4882a593Smuzhiyun break;
4063*4882a593Smuzhiyun }
4064*4882a593Smuzhiyun case HTT_T2H_MSG_TYPE_TEST:
4065*4882a593Smuzhiyun break;
4066*4882a593Smuzhiyun case HTT_T2H_MSG_TYPE_STATS_CONF:
4067*4882a593Smuzhiyun trace_ath10k_htt_stats(ar, skb->data, skb->len);
4068*4882a593Smuzhiyun break;
4069*4882a593Smuzhiyun case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
4070*4882a593Smuzhiyun /* Firmware can return tx frames if it's unable to fully
4071*4882a593Smuzhiyun * process them and suspects host may be able to fix it. ath10k
4072*4882a593Smuzhiyun * sends all tx frames as already inspected so this shouldn't
4073*4882a593Smuzhiyun * happen unless fw has a bug.
4074*4882a593Smuzhiyun */
4075*4882a593Smuzhiyun ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
4076*4882a593Smuzhiyun break;
4077*4882a593Smuzhiyun case HTT_T2H_MSG_TYPE_RX_ADDBA:
4078*4882a593Smuzhiyun ath10k_htt_rx_addba(ar, resp);
4079*4882a593Smuzhiyun break;
4080*4882a593Smuzhiyun case HTT_T2H_MSG_TYPE_RX_DELBA:
4081*4882a593Smuzhiyun ath10k_htt_rx_delba(ar, resp);
4082*4882a593Smuzhiyun break;
4083*4882a593Smuzhiyun case HTT_T2H_MSG_TYPE_PKTLOG: {
4084*4882a593Smuzhiyun trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
4085*4882a593Smuzhiyun skb->len -
4086*4882a593Smuzhiyun offsetof(struct htt_resp,
4087*4882a593Smuzhiyun pktlog_msg.payload));
4088*4882a593Smuzhiyun
4089*4882a593Smuzhiyun if (ath10k_peer_stats_enabled(ar))
4090*4882a593Smuzhiyun ath10k_fetch_10_2_tx_stats(ar,
4091*4882a593Smuzhiyun resp->pktlog_msg.payload);
4092*4882a593Smuzhiyun break;
4093*4882a593Smuzhiyun }
4094*4882a593Smuzhiyun case HTT_T2H_MSG_TYPE_RX_FLUSH: {
4095*4882a593Smuzhiyun /* Ignore this event because mac80211 takes care of Rx
4096*4882a593Smuzhiyun * aggregation reordering.
4097*4882a593Smuzhiyun */
4098*4882a593Smuzhiyun break;
4099*4882a593Smuzhiyun }
4100*4882a593Smuzhiyun case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
4101*4882a593Smuzhiyun skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
4102*4882a593Smuzhiyun return false;
4103*4882a593Smuzhiyun }
4104*4882a593Smuzhiyun case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND: {
4105*4882a593Smuzhiyun struct ath10k_htt *htt = &ar->htt;
4106*4882a593Smuzhiyun struct ath10k_htc *htc = &ar->htc;
4107*4882a593Smuzhiyun struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid];
4108*4882a593Smuzhiyun u32 msg_word = __le32_to_cpu(*(__le32 *)resp);
4109*4882a593Smuzhiyun int htt_credit_delta;
4110*4882a593Smuzhiyun
4111*4882a593Smuzhiyun htt_credit_delta = HTT_TX_CREDIT_DELTA_ABS_GET(msg_word);
4112*4882a593Smuzhiyun if (HTT_TX_CREDIT_SIGN_BIT_GET(msg_word))
4113*4882a593Smuzhiyun htt_credit_delta = -htt_credit_delta;
4114*4882a593Smuzhiyun
4115*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_HTT,
4116*4882a593Smuzhiyun "htt credit update delta %d\n",
4117*4882a593Smuzhiyun htt_credit_delta);
4118*4882a593Smuzhiyun
4119*4882a593Smuzhiyun if (htt->disable_tx_comp) {
4120*4882a593Smuzhiyun spin_lock_bh(&htc->tx_lock);
4121*4882a593Smuzhiyun ep->tx_credits += htt_credit_delta;
4122*4882a593Smuzhiyun spin_unlock_bh(&htc->tx_lock);
4123*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_HTT,
4124*4882a593Smuzhiyun "htt credit total %d\n",
4125*4882a593Smuzhiyun ep->tx_credits);
4126*4882a593Smuzhiyun ep->ep_ops.ep_tx_credits(htc->ar);
4127*4882a593Smuzhiyun }
4128*4882a593Smuzhiyun break;
4129*4882a593Smuzhiyun }
4130*4882a593Smuzhiyun case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
4131*4882a593Smuzhiyun u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
4132*4882a593Smuzhiyun u32 freq = __le32_to_cpu(resp->chan_change.freq);
4133*4882a593Smuzhiyun
4134*4882a593Smuzhiyun ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq);
4135*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_HTT,
4136*4882a593Smuzhiyun "htt chan change freq %u phymode %s\n",
4137*4882a593Smuzhiyun freq, ath10k_wmi_phymode_str(phymode));
4138*4882a593Smuzhiyun break;
4139*4882a593Smuzhiyun }
4140*4882a593Smuzhiyun case HTT_T2H_MSG_TYPE_AGGR_CONF:
4141*4882a593Smuzhiyun break;
4142*4882a593Smuzhiyun case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
4143*4882a593Smuzhiyun struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
4144*4882a593Smuzhiyun
4145*4882a593Smuzhiyun if (!tx_fetch_ind) {
4146*4882a593Smuzhiyun ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
4147*4882a593Smuzhiyun break;
4148*4882a593Smuzhiyun }
4149*4882a593Smuzhiyun skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
4150*4882a593Smuzhiyun break;
4151*4882a593Smuzhiyun }
4152*4882a593Smuzhiyun case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
4153*4882a593Smuzhiyun ath10k_htt_rx_tx_fetch_confirm(ar, skb);
4154*4882a593Smuzhiyun break;
4155*4882a593Smuzhiyun case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
4156*4882a593Smuzhiyun ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
4157*4882a593Smuzhiyun break;
4158*4882a593Smuzhiyun case HTT_T2H_MSG_TYPE_PEER_STATS:
4159*4882a593Smuzhiyun ath10k_htt_fetch_peer_stats(ar, skb);
4160*4882a593Smuzhiyun break;
4161*4882a593Smuzhiyun case HTT_T2H_MSG_TYPE_EN_STATS:
4162*4882a593Smuzhiyun default:
4163*4882a593Smuzhiyun ath10k_warn(ar, "htt event (%d) not handled\n",
4164*4882a593Smuzhiyun resp->hdr.msg_type);
4165*4882a593Smuzhiyun ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
4166*4882a593Smuzhiyun skb->data, skb->len);
4167*4882a593Smuzhiyun break;
4168*4882a593Smuzhiyun }
4169*4882a593Smuzhiyun return true;
4170*4882a593Smuzhiyun }
4171*4882a593Smuzhiyun EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
4172*4882a593Smuzhiyun
ath10k_htt_rx_pktlog_completion_handler(struct ath10k * ar,struct sk_buff * skb)4173*4882a593Smuzhiyun void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
4174*4882a593Smuzhiyun struct sk_buff *skb)
4175*4882a593Smuzhiyun {
4176*4882a593Smuzhiyun trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
4177*4882a593Smuzhiyun dev_kfree_skb_any(skb);
4178*4882a593Smuzhiyun }
4179*4882a593Smuzhiyun EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
4180*4882a593Smuzhiyun
ath10k_htt_rx_deliver_msdu(struct ath10k * ar,int quota,int budget)4181*4882a593Smuzhiyun static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
4182*4882a593Smuzhiyun {
4183*4882a593Smuzhiyun struct sk_buff *skb;
4184*4882a593Smuzhiyun
4185*4882a593Smuzhiyun while (quota < budget) {
4186*4882a593Smuzhiyun if (skb_queue_empty(&ar->htt.rx_msdus_q))
4187*4882a593Smuzhiyun break;
4188*4882a593Smuzhiyun
4189*4882a593Smuzhiyun skb = skb_dequeue(&ar->htt.rx_msdus_q);
4190*4882a593Smuzhiyun if (!skb)
4191*4882a593Smuzhiyun break;
4192*4882a593Smuzhiyun ath10k_process_rx(ar, skb);
4193*4882a593Smuzhiyun quota++;
4194*4882a593Smuzhiyun }
4195*4882a593Smuzhiyun
4196*4882a593Smuzhiyun return quota;
4197*4882a593Smuzhiyun }
4198*4882a593Smuzhiyun
ath10k_htt_rx_hl_indication(struct ath10k * ar,int budget)4199*4882a593Smuzhiyun int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget)
4200*4882a593Smuzhiyun {
4201*4882a593Smuzhiyun struct htt_resp *resp;
4202*4882a593Smuzhiyun struct ath10k_htt *htt = &ar->htt;
4203*4882a593Smuzhiyun struct sk_buff *skb;
4204*4882a593Smuzhiyun bool release;
4205*4882a593Smuzhiyun int quota;
4206*4882a593Smuzhiyun
4207*4882a593Smuzhiyun for (quota = 0; quota < budget; quota++) {
4208*4882a593Smuzhiyun skb = skb_dequeue(&htt->rx_indication_head);
4209*4882a593Smuzhiyun if (!skb)
4210*4882a593Smuzhiyun break;
4211*4882a593Smuzhiyun
4212*4882a593Smuzhiyun resp = (struct htt_resp *)skb->data;
4213*4882a593Smuzhiyun
4214*4882a593Smuzhiyun release = ath10k_htt_rx_proc_rx_ind_hl(htt,
4215*4882a593Smuzhiyun &resp->rx_ind_hl,
4216*4882a593Smuzhiyun skb,
4217*4882a593Smuzhiyun HTT_RX_PN_CHECK,
4218*4882a593Smuzhiyun HTT_RX_NON_TKIP_MIC);
4219*4882a593Smuzhiyun
4220*4882a593Smuzhiyun if (release)
4221*4882a593Smuzhiyun dev_kfree_skb_any(skb);
4222*4882a593Smuzhiyun
4223*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_HTT, "rx indication poll pending count:%d\n",
4224*4882a593Smuzhiyun skb_queue_len(&htt->rx_indication_head));
4225*4882a593Smuzhiyun }
4226*4882a593Smuzhiyun return quota;
4227*4882a593Smuzhiyun }
4228*4882a593Smuzhiyun EXPORT_SYMBOL(ath10k_htt_rx_hl_indication);
4229*4882a593Smuzhiyun
ath10k_htt_txrx_compl_task(struct ath10k * ar,int budget)4230*4882a593Smuzhiyun int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
4231*4882a593Smuzhiyun {
4232*4882a593Smuzhiyun struct ath10k_htt *htt = &ar->htt;
4233*4882a593Smuzhiyun struct htt_tx_done tx_done = {};
4234*4882a593Smuzhiyun struct sk_buff_head tx_ind_q;
4235*4882a593Smuzhiyun struct sk_buff *skb;
4236*4882a593Smuzhiyun unsigned long flags;
4237*4882a593Smuzhiyun int quota = 0, done, ret;
4238*4882a593Smuzhiyun bool resched_napi = false;
4239*4882a593Smuzhiyun
4240*4882a593Smuzhiyun __skb_queue_head_init(&tx_ind_q);
4241*4882a593Smuzhiyun
4242*4882a593Smuzhiyun /* Process pending frames before dequeuing more data
4243*4882a593Smuzhiyun * from hardware.
4244*4882a593Smuzhiyun */
4245*4882a593Smuzhiyun quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
4246*4882a593Smuzhiyun if (quota == budget) {
4247*4882a593Smuzhiyun resched_napi = true;
4248*4882a593Smuzhiyun goto exit;
4249*4882a593Smuzhiyun }
4250*4882a593Smuzhiyun
4251*4882a593Smuzhiyun while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) {
4252*4882a593Smuzhiyun spin_lock_bh(&htt->rx_ring.lock);
4253*4882a593Smuzhiyun ret = ath10k_htt_rx_in_ord_ind(ar, skb);
4254*4882a593Smuzhiyun spin_unlock_bh(&htt->rx_ring.lock);
4255*4882a593Smuzhiyun
4256*4882a593Smuzhiyun dev_kfree_skb_any(skb);
4257*4882a593Smuzhiyun if (ret == -EIO) {
4258*4882a593Smuzhiyun resched_napi = true;
4259*4882a593Smuzhiyun goto exit;
4260*4882a593Smuzhiyun }
4261*4882a593Smuzhiyun }
4262*4882a593Smuzhiyun
4263*4882a593Smuzhiyun while (atomic_read(&htt->num_mpdus_ready)) {
4264*4882a593Smuzhiyun ret = ath10k_htt_rx_handle_amsdu(htt);
4265*4882a593Smuzhiyun if (ret == -EIO) {
4266*4882a593Smuzhiyun resched_napi = true;
4267*4882a593Smuzhiyun goto exit;
4268*4882a593Smuzhiyun }
4269*4882a593Smuzhiyun atomic_dec(&htt->num_mpdus_ready);
4270*4882a593Smuzhiyun }
4271*4882a593Smuzhiyun
4272*4882a593Smuzhiyun /* Deliver received data after processing data from hardware */
4273*4882a593Smuzhiyun quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
4274*4882a593Smuzhiyun
4275*4882a593Smuzhiyun /* From NAPI documentation:
4276*4882a593Smuzhiyun * The napi poll() function may also process TX completions, in which
4277*4882a593Smuzhiyun * case if it processes the entire TX ring then it should count that
4278*4882a593Smuzhiyun * work as the rest of the budget.
4279*4882a593Smuzhiyun */
4280*4882a593Smuzhiyun if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo))
4281*4882a593Smuzhiyun quota = budget;
4282*4882a593Smuzhiyun
4283*4882a593Smuzhiyun /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
4284*4882a593Smuzhiyun * From kfifo_get() documentation:
4285*4882a593Smuzhiyun * Note that with only one concurrent reader and one concurrent writer,
4286*4882a593Smuzhiyun * you don't need extra locking to use these macro.
4287*4882a593Smuzhiyun */
4288*4882a593Smuzhiyun while (kfifo_get(&htt->txdone_fifo, &tx_done))
4289*4882a593Smuzhiyun ath10k_txrx_tx_unref(htt, &tx_done);
4290*4882a593Smuzhiyun
4291*4882a593Smuzhiyun ath10k_mac_tx_push_pending(ar);
4292*4882a593Smuzhiyun
4293*4882a593Smuzhiyun spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
4294*4882a593Smuzhiyun skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
4295*4882a593Smuzhiyun spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
4296*4882a593Smuzhiyun
4297*4882a593Smuzhiyun while ((skb = __skb_dequeue(&tx_ind_q))) {
4298*4882a593Smuzhiyun ath10k_htt_rx_tx_fetch_ind(ar, skb);
4299*4882a593Smuzhiyun dev_kfree_skb_any(skb);
4300*4882a593Smuzhiyun }
4301*4882a593Smuzhiyun
4302*4882a593Smuzhiyun exit:
4303*4882a593Smuzhiyun ath10k_htt_rx_msdu_buff_replenish(htt);
4304*4882a593Smuzhiyun /* In case of rx failure or more data to read, report budget
4305*4882a593Smuzhiyun * to reschedule NAPI poll
4306*4882a593Smuzhiyun */
4307*4882a593Smuzhiyun done = resched_napi ? budget : quota;
4308*4882a593Smuzhiyun
4309*4882a593Smuzhiyun return done;
4310*4882a593Smuzhiyun }
4311*4882a593Smuzhiyun EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);
4312*4882a593Smuzhiyun
4313*4882a593Smuzhiyun static const struct ath10k_htt_rx_ops htt_rx_ops_32 = {
4314*4882a593Smuzhiyun .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32,
4315*4882a593Smuzhiyun .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32,
4316*4882a593Smuzhiyun .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32,
4317*4882a593Smuzhiyun .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32,
4318*4882a593Smuzhiyun .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32,
4319*4882a593Smuzhiyun };
4320*4882a593Smuzhiyun
4321*4882a593Smuzhiyun static const struct ath10k_htt_rx_ops htt_rx_ops_64 = {
4322*4882a593Smuzhiyun .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64,
4323*4882a593Smuzhiyun .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64,
4324*4882a593Smuzhiyun .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64,
4325*4882a593Smuzhiyun .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64,
4326*4882a593Smuzhiyun .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64,
4327*4882a593Smuzhiyun };
4328*4882a593Smuzhiyun
4329*4882a593Smuzhiyun static const struct ath10k_htt_rx_ops htt_rx_ops_hl = {
4330*4882a593Smuzhiyun .htt_rx_proc_rx_frag_ind = ath10k_htt_rx_proc_rx_frag_ind_hl,
4331*4882a593Smuzhiyun };
4332*4882a593Smuzhiyun
ath10k_htt_set_rx_ops(struct ath10k_htt * htt)4333*4882a593Smuzhiyun void ath10k_htt_set_rx_ops(struct ath10k_htt *htt)
4334*4882a593Smuzhiyun {
4335*4882a593Smuzhiyun struct ath10k *ar = htt->ar;
4336*4882a593Smuzhiyun
4337*4882a593Smuzhiyun if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
4338*4882a593Smuzhiyun htt->rx_ops = &htt_rx_ops_hl;
4339*4882a593Smuzhiyun else if (ar->hw_params.target_64bit)
4340*4882a593Smuzhiyun htt->rx_ops = &htt_rx_ops_64;
4341*4882a593Smuzhiyun else
4342*4882a593Smuzhiyun htt->rx_ops = &htt_rx_ops_32;
4343*4882a593Smuzhiyun }
4344