xref: /OK3568_Linux_fs/kernel/drivers/net/wireless/ath/ath10k/txrx.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: ISC
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2005-2011 Atheros Communications Inc.
4*4882a593Smuzhiyun  * Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
5*4882a593Smuzhiyun  * Copyright (c) 2018, The Linux Foundation. All rights reserved.
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include "core.h"
9*4882a593Smuzhiyun #include "txrx.h"
10*4882a593Smuzhiyun #include "htt.h"
11*4882a593Smuzhiyun #include "mac.h"
12*4882a593Smuzhiyun #include "debug.h"
13*4882a593Smuzhiyun 
ath10k_report_offchan_tx(struct ath10k * ar,struct sk_buff * skb)14*4882a593Smuzhiyun static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
15*4882a593Smuzhiyun {
16*4882a593Smuzhiyun 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun 	if (likely(!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)))
19*4882a593Smuzhiyun 		return;
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun 	if (ath10k_mac_tx_frm_has_freq(ar))
22*4882a593Smuzhiyun 		return;
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun 	/* If the original wait_for_completion() timed out before
25*4882a593Smuzhiyun 	 * {data,mgmt}_tx_completed() was called then we could complete
26*4882a593Smuzhiyun 	 * offchan_tx_completed for a different skb. Prevent this by using
27*4882a593Smuzhiyun 	 * offchan_tx_skb.
28*4882a593Smuzhiyun 	 */
29*4882a593Smuzhiyun 	spin_lock_bh(&ar->data_lock);
30*4882a593Smuzhiyun 	if (ar->offchan_tx_skb != skb) {
31*4882a593Smuzhiyun 		ath10k_warn(ar, "completed old offchannel frame\n");
32*4882a593Smuzhiyun 		goto out;
33*4882a593Smuzhiyun 	}
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	complete(&ar->offchan_tx_completed);
36*4882a593Smuzhiyun 	ar->offchan_tx_skb = NULL; /* just for sanity */
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %pK\n", skb);
39*4882a593Smuzhiyun out:
40*4882a593Smuzhiyun 	spin_unlock_bh(&ar->data_lock);
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun 
ath10k_txrx_tx_unref(struct ath10k_htt * htt,const struct htt_tx_done * tx_done)43*4882a593Smuzhiyun int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
44*4882a593Smuzhiyun 			 const struct htt_tx_done *tx_done)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun 	struct ath10k *ar = htt->ar;
47*4882a593Smuzhiyun 	struct device *dev = ar->dev;
48*4882a593Smuzhiyun 	struct ieee80211_tx_info *info;
49*4882a593Smuzhiyun 	struct ieee80211_txq *txq;
50*4882a593Smuzhiyun 	struct ath10k_skb_cb *skb_cb;
51*4882a593Smuzhiyun 	struct ath10k_txq *artxq;
52*4882a593Smuzhiyun 	struct sk_buff *msdu;
53*4882a593Smuzhiyun 	u8 flags;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	ath10k_dbg(ar, ATH10K_DBG_HTT,
56*4882a593Smuzhiyun 		   "htt tx completion msdu_id %u status %d\n",
57*4882a593Smuzhiyun 		   tx_done->msdu_id, tx_done->status);
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	if (tx_done->msdu_id >= htt->max_num_pending_tx) {
60*4882a593Smuzhiyun 		ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n",
61*4882a593Smuzhiyun 			    tx_done->msdu_id);
62*4882a593Smuzhiyun 		return -EINVAL;
63*4882a593Smuzhiyun 	}
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	spin_lock_bh(&htt->tx_lock);
66*4882a593Smuzhiyun 	msdu = idr_find(&htt->pending_tx, tx_done->msdu_id);
67*4882a593Smuzhiyun 	if (!msdu) {
68*4882a593Smuzhiyun 		ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n",
69*4882a593Smuzhiyun 			    tx_done->msdu_id);
70*4882a593Smuzhiyun 		spin_unlock_bh(&htt->tx_lock);
71*4882a593Smuzhiyun 		return -ENOENT;
72*4882a593Smuzhiyun 	}
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	skb_cb = ATH10K_SKB_CB(msdu);
75*4882a593Smuzhiyun 	txq = skb_cb->txq;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	if (txq) {
78*4882a593Smuzhiyun 		artxq = (void *)txq->drv_priv;
79*4882a593Smuzhiyun 		artxq->num_fw_queued--;
80*4882a593Smuzhiyun 	}
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	flags = skb_cb->flags;
83*4882a593Smuzhiyun 	ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
84*4882a593Smuzhiyun 	ath10k_htt_tx_dec_pending(htt);
85*4882a593Smuzhiyun 	spin_unlock_bh(&htt->tx_lock);
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	rcu_read_lock();
88*4882a593Smuzhiyun 	if (txq && txq->sta && skb_cb->airtime_est)
89*4882a593Smuzhiyun 		ieee80211_sta_register_airtime(txq->sta, txq->tid,
90*4882a593Smuzhiyun 					       skb_cb->airtime_est, 0);
91*4882a593Smuzhiyun 	rcu_read_unlock();
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
94*4882a593Smuzhiyun 		dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	ath10k_report_offchan_tx(htt->ar, msdu);
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	info = IEEE80211_SKB_CB(msdu);
99*4882a593Smuzhiyun 	memset(&info->status, 0, sizeof(info->status));
100*4882a593Smuzhiyun 	info->status.rates[0].idx = -1;
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id);
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
105*4882a593Smuzhiyun 	    !(flags & ATH10K_SKB_F_NOACK_TID))
106*4882a593Smuzhiyun 		info->flags |= IEEE80211_TX_STAT_ACK;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	if (tx_done->status == HTT_TX_COMPL_STATE_NOACK)
109*4882a593Smuzhiyun 		info->flags &= ~IEEE80211_TX_STAT_ACK;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	if ((tx_done->status == HTT_TX_COMPL_STATE_ACK) &&
112*4882a593Smuzhiyun 	    ((info->flags & IEEE80211_TX_CTL_NO_ACK) ||
113*4882a593Smuzhiyun 	    (flags & ATH10K_SKB_F_NOACK_TID)))
114*4882a593Smuzhiyun 		info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	if (tx_done->status == HTT_TX_COMPL_STATE_DISCARD) {
117*4882a593Smuzhiyun 		if ((info->flags & IEEE80211_TX_CTL_NO_ACK) ||
118*4882a593Smuzhiyun 		    (flags & ATH10K_SKB_F_NOACK_TID))
119*4882a593Smuzhiyun 			info->flags &= ~IEEE80211_TX_STAT_NOACK_TRANSMITTED;
120*4882a593Smuzhiyun 		else
121*4882a593Smuzhiyun 			info->flags &= ~IEEE80211_TX_STAT_ACK;
122*4882a593Smuzhiyun 	}
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	if (tx_done->status == HTT_TX_COMPL_STATE_ACK &&
125*4882a593Smuzhiyun 	    tx_done->ack_rssi != ATH10K_INVALID_RSSI) {
126*4882a593Smuzhiyun 		info->status.ack_signal = ATH10K_DEFAULT_NOISE_FLOOR +
127*4882a593Smuzhiyun 						tx_done->ack_rssi;
128*4882a593Smuzhiyun 		info->status.is_valid_ack_signal = true;
129*4882a593Smuzhiyun 	}
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	ieee80211_tx_status(htt->ar->hw, msdu);
132*4882a593Smuzhiyun 	/* we do not own the msdu anymore */
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	return 0;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun 
ath10k_peer_find(struct ath10k * ar,int vdev_id,const u8 * addr)137*4882a593Smuzhiyun struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
138*4882a593Smuzhiyun 				     const u8 *addr)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun 	struct ath10k_peer *peer;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	lockdep_assert_held(&ar->data_lock);
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	list_for_each_entry(peer, &ar->peers, list) {
145*4882a593Smuzhiyun 		if (peer->vdev_id != vdev_id)
146*4882a593Smuzhiyun 			continue;
147*4882a593Smuzhiyun 		if (!ether_addr_equal(peer->addr, addr))
148*4882a593Smuzhiyun 			continue;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 		return peer;
151*4882a593Smuzhiyun 	}
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	return NULL;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun 
ath10k_peer_find_by_id(struct ath10k * ar,int peer_id)156*4882a593Smuzhiyun struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun 	struct ath10k_peer *peer;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	if (peer_id >= BITS_PER_TYPE(peer->peer_ids))
161*4882a593Smuzhiyun 		return NULL;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	lockdep_assert_held(&ar->data_lock);
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	list_for_each_entry(peer, &ar->peers, list)
166*4882a593Smuzhiyun 		if (test_bit(peer_id, peer->peer_ids))
167*4882a593Smuzhiyun 			return peer;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	return NULL;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun 
ath10k_wait_for_peer_common(struct ath10k * ar,int vdev_id,const u8 * addr,bool expect_mapped)172*4882a593Smuzhiyun static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
173*4882a593Smuzhiyun 				       const u8 *addr, bool expect_mapped)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun 	long time_left;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	time_left = wait_event_timeout(ar->peer_mapping_wq, ({
178*4882a593Smuzhiyun 			bool mapped;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 			spin_lock_bh(&ar->data_lock);
181*4882a593Smuzhiyun 			mapped = !!ath10k_peer_find(ar, vdev_id, addr);
182*4882a593Smuzhiyun 			spin_unlock_bh(&ar->data_lock);
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 			(mapped == expect_mapped ||
185*4882a593Smuzhiyun 			 test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags));
186*4882a593Smuzhiyun 		}), 3 * HZ);
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	if (time_left == 0)
189*4882a593Smuzhiyun 		return -ETIMEDOUT;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	return 0;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun 
ath10k_wait_for_peer_created(struct ath10k * ar,int vdev_id,const u8 * addr)194*4882a593Smuzhiyun int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id, const u8 *addr)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun 	return ath10k_wait_for_peer_common(ar, vdev_id, addr, true);
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun 
ath10k_wait_for_peer_deleted(struct ath10k * ar,int vdev_id,const u8 * addr)199*4882a593Smuzhiyun int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id, const u8 *addr)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun 	return ath10k_wait_for_peer_common(ar, vdev_id, addr, false);
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun 
ath10k_peer_map_event(struct ath10k_htt * htt,struct htt_peer_map_event * ev)204*4882a593Smuzhiyun void ath10k_peer_map_event(struct ath10k_htt *htt,
205*4882a593Smuzhiyun 			   struct htt_peer_map_event *ev)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun 	struct ath10k *ar = htt->ar;
208*4882a593Smuzhiyun 	struct ath10k_peer *peer;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	if (ev->peer_id >= ATH10K_MAX_NUM_PEER_IDS) {
211*4882a593Smuzhiyun 		ath10k_warn(ar,
212*4882a593Smuzhiyun 			    "received htt peer map event with idx out of bounds: %hu\n",
213*4882a593Smuzhiyun 			    ev->peer_id);
214*4882a593Smuzhiyun 		return;
215*4882a593Smuzhiyun 	}
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	spin_lock_bh(&ar->data_lock);
218*4882a593Smuzhiyun 	peer = ath10k_peer_find(ar, ev->vdev_id, ev->addr);
219*4882a593Smuzhiyun 	if (!peer) {
220*4882a593Smuzhiyun 		peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
221*4882a593Smuzhiyun 		if (!peer)
222*4882a593Smuzhiyun 			goto exit;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 		peer->vdev_id = ev->vdev_id;
225*4882a593Smuzhiyun 		ether_addr_copy(peer->addr, ev->addr);
226*4882a593Smuzhiyun 		list_add(&peer->list, &ar->peers);
227*4882a593Smuzhiyun 		wake_up(&ar->peer_mapping_wq);
228*4882a593Smuzhiyun 	}
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n",
231*4882a593Smuzhiyun 		   ev->vdev_id, ev->addr, ev->peer_id);
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	WARN_ON(ar->peer_map[ev->peer_id] && (ar->peer_map[ev->peer_id] != peer));
234*4882a593Smuzhiyun 	ar->peer_map[ev->peer_id] = peer;
235*4882a593Smuzhiyun 	set_bit(ev->peer_id, peer->peer_ids);
236*4882a593Smuzhiyun exit:
237*4882a593Smuzhiyun 	spin_unlock_bh(&ar->data_lock);
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun 
ath10k_peer_unmap_event(struct ath10k_htt * htt,struct htt_peer_unmap_event * ev)240*4882a593Smuzhiyun void ath10k_peer_unmap_event(struct ath10k_htt *htt,
241*4882a593Smuzhiyun 			     struct htt_peer_unmap_event *ev)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun 	struct ath10k *ar = htt->ar;
244*4882a593Smuzhiyun 	struct ath10k_peer *peer;
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	if (ev->peer_id >= ATH10K_MAX_NUM_PEER_IDS) {
247*4882a593Smuzhiyun 		ath10k_warn(ar,
248*4882a593Smuzhiyun 			    "received htt peer unmap event with idx out of bounds: %hu\n",
249*4882a593Smuzhiyun 			    ev->peer_id);
250*4882a593Smuzhiyun 		return;
251*4882a593Smuzhiyun 	}
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	spin_lock_bh(&ar->data_lock);
254*4882a593Smuzhiyun 	peer = ath10k_peer_find_by_id(ar, ev->peer_id);
255*4882a593Smuzhiyun 	if (!peer) {
256*4882a593Smuzhiyun 		ath10k_warn(ar, "peer-unmap-event: unknown peer id %d\n",
257*4882a593Smuzhiyun 			    ev->peer_id);
258*4882a593Smuzhiyun 		goto exit;
259*4882a593Smuzhiyun 	}
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
262*4882a593Smuzhiyun 		   peer->vdev_id, peer->addr, ev->peer_id);
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	ar->peer_map[ev->peer_id] = NULL;
265*4882a593Smuzhiyun 	clear_bit(ev->peer_id, peer->peer_ids);
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	if (bitmap_empty(peer->peer_ids, ATH10K_MAX_NUM_PEER_IDS)) {
268*4882a593Smuzhiyun 		list_del(&peer->list);
269*4882a593Smuzhiyun 		kfree(peer);
270*4882a593Smuzhiyun 		wake_up(&ar->peer_mapping_wq);
271*4882a593Smuzhiyun 	}
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun exit:
274*4882a593Smuzhiyun 	spin_unlock_bh(&ar->data_lock);
275*4882a593Smuzhiyun }
276