xref: /OK3568_Linux_fs/kernel/drivers/net/wireless/mediatek/mt76/agg-rx.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: ISC
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2018 Felix Fietkau <nbd@nbd.name>
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun #include "mt76.h"
6*4882a593Smuzhiyun 
mt76_aggr_tid_to_timeo(u8 tidno)7*4882a593Smuzhiyun static unsigned long mt76_aggr_tid_to_timeo(u8 tidno)
8*4882a593Smuzhiyun {
9*4882a593Smuzhiyun 	/* Currently voice traffic (AC_VO) always runs without aggregation,
10*4882a593Smuzhiyun 	 * no special handling is needed. AC_BE/AC_BK use tids 0-3. Just check
11*4882a593Smuzhiyun 	 * for non AC_BK/AC_BE and set smaller timeout for it. */
12*4882a593Smuzhiyun 	return HZ / (tidno >= 4 ? 25 : 10);
13*4882a593Smuzhiyun }
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun static void
mt76_aggr_release(struct mt76_rx_tid * tid,struct sk_buff_head * frames,int idx)16*4882a593Smuzhiyun mt76_aggr_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames, int idx)
17*4882a593Smuzhiyun {
18*4882a593Smuzhiyun 	struct sk_buff *skb;
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun 	tid->head = ieee80211_sn_inc(tid->head);
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun 	skb = tid->reorder_buf[idx];
23*4882a593Smuzhiyun 	if (!skb)
24*4882a593Smuzhiyun 		return;
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun 	tid->reorder_buf[idx] = NULL;
27*4882a593Smuzhiyun 	tid->nframes--;
28*4882a593Smuzhiyun 	__skb_queue_tail(frames, skb);
29*4882a593Smuzhiyun }
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun static void
mt76_rx_aggr_release_frames(struct mt76_rx_tid * tid,struct sk_buff_head * frames,u16 head)32*4882a593Smuzhiyun mt76_rx_aggr_release_frames(struct mt76_rx_tid *tid,
33*4882a593Smuzhiyun 			    struct sk_buff_head *frames,
34*4882a593Smuzhiyun 			    u16 head)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun 	int idx;
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	while (ieee80211_sn_less(tid->head, head)) {
39*4882a593Smuzhiyun 		idx = tid->head % tid->size;
40*4882a593Smuzhiyun 		mt76_aggr_release(tid, frames, idx);
41*4882a593Smuzhiyun 	}
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun static void
mt76_rx_aggr_release_head(struct mt76_rx_tid * tid,struct sk_buff_head * frames)45*4882a593Smuzhiyun mt76_rx_aggr_release_head(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun 	int idx = tid->head % tid->size;
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	while (tid->reorder_buf[idx]) {
50*4882a593Smuzhiyun 		mt76_aggr_release(tid, frames, idx);
51*4882a593Smuzhiyun 		idx = tid->head % tid->size;
52*4882a593Smuzhiyun 	}
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun static void
mt76_rx_aggr_check_release(struct mt76_rx_tid * tid,struct sk_buff_head * frames)56*4882a593Smuzhiyun mt76_rx_aggr_check_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun 	struct mt76_rx_status *status;
59*4882a593Smuzhiyun 	struct sk_buff *skb;
60*4882a593Smuzhiyun 	int start, idx, nframes;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	if (!tid->nframes)
63*4882a593Smuzhiyun 		return;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	mt76_rx_aggr_release_head(tid, frames);
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	start = tid->head % tid->size;
68*4882a593Smuzhiyun 	nframes = tid->nframes;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	for (idx = (tid->head + 1) % tid->size;
71*4882a593Smuzhiyun 	     idx != start && nframes;
72*4882a593Smuzhiyun 	     idx = (idx + 1) % tid->size) {
73*4882a593Smuzhiyun 		skb = tid->reorder_buf[idx];
74*4882a593Smuzhiyun 		if (!skb)
75*4882a593Smuzhiyun 			continue;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 		nframes--;
78*4882a593Smuzhiyun 		status = (struct mt76_rx_status *)skb->cb;
79*4882a593Smuzhiyun 		if (!time_after(jiffies,
80*4882a593Smuzhiyun 				status->reorder_time +
81*4882a593Smuzhiyun 				mt76_aggr_tid_to_timeo(tid->num)))
82*4882a593Smuzhiyun 			continue;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 		mt76_rx_aggr_release_frames(tid, frames, status->seqno);
85*4882a593Smuzhiyun 	}
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	mt76_rx_aggr_release_head(tid, frames);
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun static void
mt76_rx_aggr_reorder_work(struct work_struct * work)91*4882a593Smuzhiyun mt76_rx_aggr_reorder_work(struct work_struct *work)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun 	struct mt76_rx_tid *tid = container_of(work, struct mt76_rx_tid,
94*4882a593Smuzhiyun 					       reorder_work.work);
95*4882a593Smuzhiyun 	struct mt76_dev *dev = tid->dev;
96*4882a593Smuzhiyun 	struct sk_buff_head frames;
97*4882a593Smuzhiyun 	int nframes;
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	__skb_queue_head_init(&frames);
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	local_bh_disable();
102*4882a593Smuzhiyun 	rcu_read_lock();
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	spin_lock(&tid->lock);
105*4882a593Smuzhiyun 	mt76_rx_aggr_check_release(tid, &frames);
106*4882a593Smuzhiyun 	nframes = tid->nframes;
107*4882a593Smuzhiyun 	spin_unlock(&tid->lock);
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	if (nframes)
110*4882a593Smuzhiyun 		ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
111*4882a593Smuzhiyun 					     mt76_aggr_tid_to_timeo(tid->num));
112*4882a593Smuzhiyun 	mt76_rx_complete(dev, &frames, NULL);
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	rcu_read_unlock();
115*4882a593Smuzhiyun 	local_bh_enable();
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun static void
mt76_rx_aggr_check_ctl(struct sk_buff * skb,struct sk_buff_head * frames)119*4882a593Smuzhiyun mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
122*4882a593Smuzhiyun 	struct ieee80211_bar *bar = mt76_skb_get_hdr(skb);
123*4882a593Smuzhiyun 	struct mt76_wcid *wcid = status->wcid;
124*4882a593Smuzhiyun 	struct mt76_rx_tid *tid;
125*4882a593Smuzhiyun 	u16 seqno;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	if (!ieee80211_is_ctl(bar->frame_control))
128*4882a593Smuzhiyun 		return;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	if (!ieee80211_is_back_req(bar->frame_control))
131*4882a593Smuzhiyun 		return;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	status->tid = le16_to_cpu(bar->control) >> 12;
134*4882a593Smuzhiyun 	seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num));
135*4882a593Smuzhiyun 	tid = rcu_dereference(wcid->aggr[status->tid]);
136*4882a593Smuzhiyun 	if (!tid)
137*4882a593Smuzhiyun 		return;
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	spin_lock_bh(&tid->lock);
140*4882a593Smuzhiyun 	if (!tid->stopped) {
141*4882a593Smuzhiyun 		mt76_rx_aggr_release_frames(tid, frames, seqno);
142*4882a593Smuzhiyun 		mt76_rx_aggr_release_head(tid, frames);
143*4882a593Smuzhiyun 	}
144*4882a593Smuzhiyun 	spin_unlock_bh(&tid->lock);
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun 
mt76_rx_aggr_reorder(struct sk_buff * skb,struct sk_buff_head * frames)147*4882a593Smuzhiyun void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
150*4882a593Smuzhiyun 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
151*4882a593Smuzhiyun 	struct mt76_wcid *wcid = status->wcid;
152*4882a593Smuzhiyun 	struct ieee80211_sta *sta;
153*4882a593Smuzhiyun 	struct mt76_rx_tid *tid;
154*4882a593Smuzhiyun 	bool sn_less;
155*4882a593Smuzhiyun 	u16 seqno, head, size, idx;
156*4882a593Smuzhiyun 	u8 ackp;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	__skb_queue_tail(frames, skb);
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	sta = wcid_to_sta(wcid);
161*4882a593Smuzhiyun 	if (!sta)
162*4882a593Smuzhiyun 		return;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	if (!status->aggr) {
165*4882a593Smuzhiyun 		mt76_rx_aggr_check_ctl(skb, frames);
166*4882a593Smuzhiyun 		return;
167*4882a593Smuzhiyun 	}
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	/* not part of a BA session */
170*4882a593Smuzhiyun 	ackp = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_ACK_POLICY_MASK;
171*4882a593Smuzhiyun 	if (ackp != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
172*4882a593Smuzhiyun 	    ackp != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
173*4882a593Smuzhiyun 		return;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	tid = rcu_dereference(wcid->aggr[status->tid]);
176*4882a593Smuzhiyun 	if (!tid)
177*4882a593Smuzhiyun 		return;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	status->flag |= RX_FLAG_DUP_VALIDATED;
180*4882a593Smuzhiyun 	spin_lock_bh(&tid->lock);
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	if (tid->stopped)
183*4882a593Smuzhiyun 		goto out;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	head = tid->head;
186*4882a593Smuzhiyun 	seqno = status->seqno;
187*4882a593Smuzhiyun 	size = tid->size;
188*4882a593Smuzhiyun 	sn_less = ieee80211_sn_less(seqno, head);
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	if (!tid->started) {
191*4882a593Smuzhiyun 		if (sn_less)
192*4882a593Smuzhiyun 			goto out;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 		tid->started = true;
195*4882a593Smuzhiyun 	}
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	if (sn_less) {
198*4882a593Smuzhiyun 		__skb_unlink(skb, frames);
199*4882a593Smuzhiyun 		dev_kfree_skb(skb);
200*4882a593Smuzhiyun 		goto out;
201*4882a593Smuzhiyun 	}
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	if (seqno == head) {
204*4882a593Smuzhiyun 		tid->head = ieee80211_sn_inc(head);
205*4882a593Smuzhiyun 		if (tid->nframes)
206*4882a593Smuzhiyun 			mt76_rx_aggr_release_head(tid, frames);
207*4882a593Smuzhiyun 		goto out;
208*4882a593Smuzhiyun 	}
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	__skb_unlink(skb, frames);
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	/*
213*4882a593Smuzhiyun 	 * Frame sequence number exceeds buffering window, free up some space
214*4882a593Smuzhiyun 	 * by releasing previous frames
215*4882a593Smuzhiyun 	 */
216*4882a593Smuzhiyun 	if (!ieee80211_sn_less(seqno, head + size)) {
217*4882a593Smuzhiyun 		head = ieee80211_sn_inc(ieee80211_sn_sub(seqno, size));
218*4882a593Smuzhiyun 		mt76_rx_aggr_release_frames(tid, frames, head);
219*4882a593Smuzhiyun 	}
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	idx = seqno % size;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	/* Discard if the current slot is already in use */
224*4882a593Smuzhiyun 	if (tid->reorder_buf[idx]) {
225*4882a593Smuzhiyun 		dev_kfree_skb(skb);
226*4882a593Smuzhiyun 		goto out;
227*4882a593Smuzhiyun 	}
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	status->reorder_time = jiffies;
230*4882a593Smuzhiyun 	tid->reorder_buf[idx] = skb;
231*4882a593Smuzhiyun 	tid->nframes++;
232*4882a593Smuzhiyun 	mt76_rx_aggr_release_head(tid, frames);
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
235*4882a593Smuzhiyun 				     mt76_aggr_tid_to_timeo(tid->num));
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun out:
238*4882a593Smuzhiyun 	spin_unlock_bh(&tid->lock);
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun 
mt76_rx_aggr_start(struct mt76_dev * dev,struct mt76_wcid * wcid,u8 tidno,u16 ssn,u16 size)241*4882a593Smuzhiyun int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno,
242*4882a593Smuzhiyun 		       u16 ssn, u16 size)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun 	struct mt76_rx_tid *tid;
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	mt76_rx_aggr_stop(dev, wcid, tidno);
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	tid = kzalloc(struct_size(tid, reorder_buf, size), GFP_KERNEL);
249*4882a593Smuzhiyun 	if (!tid)
250*4882a593Smuzhiyun 		return -ENOMEM;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	tid->dev = dev;
253*4882a593Smuzhiyun 	tid->head = ssn;
254*4882a593Smuzhiyun 	tid->size = size;
255*4882a593Smuzhiyun 	tid->num = tidno;
256*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&tid->reorder_work, mt76_rx_aggr_reorder_work);
257*4882a593Smuzhiyun 	spin_lock_init(&tid->lock);
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	rcu_assign_pointer(wcid->aggr[tidno], tid);
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	return 0;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mt76_rx_aggr_start);
264*4882a593Smuzhiyun 
mt76_rx_aggr_shutdown(struct mt76_dev * dev,struct mt76_rx_tid * tid)265*4882a593Smuzhiyun static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun 	u16 size = tid->size;
268*4882a593Smuzhiyun 	int i;
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	spin_lock_bh(&tid->lock);
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	tid->stopped = true;
273*4882a593Smuzhiyun 	for (i = 0; tid->nframes && i < size; i++) {
274*4882a593Smuzhiyun 		struct sk_buff *skb = tid->reorder_buf[i];
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 		if (!skb)
277*4882a593Smuzhiyun 			continue;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 		tid->reorder_buf[i] = NULL;
280*4882a593Smuzhiyun 		tid->nframes--;
281*4882a593Smuzhiyun 		dev_kfree_skb(skb);
282*4882a593Smuzhiyun 	}
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	spin_unlock_bh(&tid->lock);
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	cancel_delayed_work_sync(&tid->reorder_work);
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun 
mt76_rx_aggr_stop(struct mt76_dev * dev,struct mt76_wcid * wcid,u8 tidno)289*4882a593Smuzhiyun void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun 	struct mt76_rx_tid *tid = NULL;
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	tid = rcu_replace_pointer(wcid->aggr[tidno], tid,
294*4882a593Smuzhiyun 				  lockdep_is_held(&dev->mutex));
295*4882a593Smuzhiyun 	if (tid) {
296*4882a593Smuzhiyun 		mt76_rx_aggr_shutdown(dev, tid);
297*4882a593Smuzhiyun 		kfree_rcu(tid, rcu_head);
298*4882a593Smuzhiyun 	}
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mt76_rx_aggr_stop);
301