xref: /OK3568_Linux_fs/kernel/net/mac80211/sta_info.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright 2002-2005, Instant802 Networks, Inc.
4*4882a593Smuzhiyun  * Copyright 2006-2007	Jiri Benc <jbenc@suse.cz>
5*4882a593Smuzhiyun  * Copyright 2013-2014  Intel Mobile Communications GmbH
6*4882a593Smuzhiyun  * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
7*4882a593Smuzhiyun  * Copyright (C) 2018-2021 Intel Corporation
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/module.h>
11*4882a593Smuzhiyun #include <linux/init.h>
12*4882a593Smuzhiyun #include <linux/etherdevice.h>
13*4882a593Smuzhiyun #include <linux/netdevice.h>
14*4882a593Smuzhiyun #include <linux/types.h>
15*4882a593Smuzhiyun #include <linux/slab.h>
16*4882a593Smuzhiyun #include <linux/skbuff.h>
17*4882a593Smuzhiyun #include <linux/if_arp.h>
18*4882a593Smuzhiyun #include <linux/timer.h>
19*4882a593Smuzhiyun #include <linux/rtnetlink.h>
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #include <net/codel.h>
22*4882a593Smuzhiyun #include <net/mac80211.h>
23*4882a593Smuzhiyun #include "ieee80211_i.h"
24*4882a593Smuzhiyun #include "driver-ops.h"
25*4882a593Smuzhiyun #include "rate.h"
26*4882a593Smuzhiyun #include "sta_info.h"
27*4882a593Smuzhiyun #include "debugfs_sta.h"
28*4882a593Smuzhiyun #include "mesh.h"
29*4882a593Smuzhiyun #include "wme.h"
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun /**
32*4882a593Smuzhiyun  * DOC: STA information lifetime rules
33*4882a593Smuzhiyun  *
34*4882a593Smuzhiyun  * STA info structures (&struct sta_info) are managed in a hash table
35*4882a593Smuzhiyun  * for faster lookup and a list for iteration. They are managed using
36*4882a593Smuzhiyun  * RCU, i.e. access to the list and hash table is protected by RCU.
37*4882a593Smuzhiyun  *
38*4882a593Smuzhiyun  * Upon allocating a STA info structure with sta_info_alloc(), the caller
39*4882a593Smuzhiyun  * owns that structure. It must then insert it into the hash table using
40*4882a593Smuzhiyun  * either sta_info_insert() or sta_info_insert_rcu(); only in the latter
41*4882a593Smuzhiyun  * case (which acquires an rcu read section but must not be called from
42*4882a593Smuzhiyun  * within one) will the pointer still be valid after the call. Note that
43*4882a593Smuzhiyun  * the caller may not do much with the STA info before inserting it, in
44*4882a593Smuzhiyun  * particular, it may not start any mesh peer link management or add
45*4882a593Smuzhiyun  * encryption keys.
46*4882a593Smuzhiyun  *
47*4882a593Smuzhiyun  * When the insertion fails (sta_info_insert()) returns non-zero), the
48*4882a593Smuzhiyun  * structure will have been freed by sta_info_insert()!
49*4882a593Smuzhiyun  *
50*4882a593Smuzhiyun  * Station entries are added by mac80211 when you establish a link with a
51*4882a593Smuzhiyun  * peer. This means different things for the different type of interfaces
52*4882a593Smuzhiyun  * we support. For a regular station this mean we add the AP sta when we
53*4882a593Smuzhiyun  * receive an association response from the AP. For IBSS this occurs when
54*4882a593Smuzhiyun  * get to know about a peer on the same IBSS. For WDS we add the sta for
55*4882a593Smuzhiyun  * the peer immediately upon device open. When using AP mode we add stations
56*4882a593Smuzhiyun  * for each respective station upon request from userspace through nl80211.
57*4882a593Smuzhiyun  *
58*4882a593Smuzhiyun  * In order to remove a STA info structure, various sta_info_destroy_*()
59*4882a593Smuzhiyun  * calls are available.
60*4882a593Smuzhiyun  *
61*4882a593Smuzhiyun  * There is no concept of ownership on a STA entry, each structure is
62*4882a593Smuzhiyun  * owned by the global hash table/list until it is removed. All users of
63*4882a593Smuzhiyun  * the structure need to be RCU protected so that the structure won't be
64*4882a593Smuzhiyun  * freed before they are done using it.
65*4882a593Smuzhiyun  */
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun static const struct rhashtable_params sta_rht_params = {
68*4882a593Smuzhiyun 	.nelem_hint = 3, /* start small */
69*4882a593Smuzhiyun 	.automatic_shrinking = true,
70*4882a593Smuzhiyun 	.head_offset = offsetof(struct sta_info, hash_node),
71*4882a593Smuzhiyun 	.key_offset = offsetof(struct sta_info, addr),
72*4882a593Smuzhiyun 	.key_len = ETH_ALEN,
73*4882a593Smuzhiyun 	.max_size = CONFIG_MAC80211_STA_HASH_MAX_SIZE,
74*4882a593Smuzhiyun };
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun /* Caller must hold local->sta_mtx */
sta_info_hash_del(struct ieee80211_local * local,struct sta_info * sta)77*4882a593Smuzhiyun static int sta_info_hash_del(struct ieee80211_local *local,
78*4882a593Smuzhiyun 			     struct sta_info *sta)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	return rhltable_remove(&local->sta_hash, &sta->hash_node,
81*4882a593Smuzhiyun 			       sta_rht_params);
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun 
__cleanup_single_sta(struct sta_info * sta)84*4882a593Smuzhiyun static void __cleanup_single_sta(struct sta_info *sta)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun 	int ac, i;
87*4882a593Smuzhiyun 	struct tid_ampdu_tx *tid_tx;
88*4882a593Smuzhiyun 	struct ieee80211_sub_if_data *sdata = sta->sdata;
89*4882a593Smuzhiyun 	struct ieee80211_local *local = sdata->local;
90*4882a593Smuzhiyun 	struct ps_data *ps;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
93*4882a593Smuzhiyun 	    test_sta_flag(sta, WLAN_STA_PS_DRIVER) ||
94*4882a593Smuzhiyun 	    test_sta_flag(sta, WLAN_STA_PS_DELIVER)) {
95*4882a593Smuzhiyun 		if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
96*4882a593Smuzhiyun 		    sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
97*4882a593Smuzhiyun 			ps = &sdata->bss->ps;
98*4882a593Smuzhiyun 		else if (ieee80211_vif_is_mesh(&sdata->vif))
99*4882a593Smuzhiyun 			ps = &sdata->u.mesh.ps;
100*4882a593Smuzhiyun 		else
101*4882a593Smuzhiyun 			return;
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 		clear_sta_flag(sta, WLAN_STA_PS_STA);
104*4882a593Smuzhiyun 		clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
105*4882a593Smuzhiyun 		clear_sta_flag(sta, WLAN_STA_PS_DELIVER);
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 		atomic_dec(&ps->num_sta_ps);
108*4882a593Smuzhiyun 	}
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	if (sta->sta.txq[0]) {
111*4882a593Smuzhiyun 		for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
112*4882a593Smuzhiyun 			struct txq_info *txqi;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 			if (!sta->sta.txq[i])
115*4882a593Smuzhiyun 				continue;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 			txqi = to_txq_info(sta->sta.txq[i]);
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 			ieee80211_txq_purge(local, txqi);
120*4882a593Smuzhiyun 		}
121*4882a593Smuzhiyun 	}
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
124*4882a593Smuzhiyun 		local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]);
125*4882a593Smuzhiyun 		ieee80211_purge_tx_queue(&local->hw, &sta->ps_tx_buf[ac]);
126*4882a593Smuzhiyun 		ieee80211_purge_tx_queue(&local->hw, &sta->tx_filtered[ac]);
127*4882a593Smuzhiyun 	}
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	if (ieee80211_vif_is_mesh(&sdata->vif))
130*4882a593Smuzhiyun 		mesh_sta_cleanup(sta);
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	cancel_work_sync(&sta->drv_deliver_wk);
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	/*
135*4882a593Smuzhiyun 	 * Destroy aggregation state here. It would be nice to wait for the
136*4882a593Smuzhiyun 	 * driver to finish aggregation stop and then clean up, but for now
137*4882a593Smuzhiyun 	 * drivers have to handle aggregation stop being requested, followed
138*4882a593Smuzhiyun 	 * directly by station destruction.
139*4882a593Smuzhiyun 	 */
140*4882a593Smuzhiyun 	for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
141*4882a593Smuzhiyun 		kfree(sta->ampdu_mlme.tid_start_tx[i]);
142*4882a593Smuzhiyun 		tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]);
143*4882a593Smuzhiyun 		if (!tid_tx)
144*4882a593Smuzhiyun 			continue;
145*4882a593Smuzhiyun 		ieee80211_purge_tx_queue(&local->hw, &tid_tx->pending);
146*4882a593Smuzhiyun 		kfree(tid_tx);
147*4882a593Smuzhiyun 	}
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun 
cleanup_single_sta(struct sta_info * sta)150*4882a593Smuzhiyun static void cleanup_single_sta(struct sta_info *sta)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun 	struct ieee80211_sub_if_data *sdata = sta->sdata;
153*4882a593Smuzhiyun 	struct ieee80211_local *local = sdata->local;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	__cleanup_single_sta(sta);
156*4882a593Smuzhiyun 	sta_info_free(local, sta);
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
sta_info_hash_lookup(struct ieee80211_local * local,const u8 * addr)159*4882a593Smuzhiyun struct rhlist_head *sta_info_hash_lookup(struct ieee80211_local *local,
160*4882a593Smuzhiyun 					 const u8 *addr)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun 	return rhltable_lookup(&local->sta_hash, addr, sta_rht_params);
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun /* protected by RCU */
sta_info_get(struct ieee80211_sub_if_data * sdata,const u8 * addr)166*4882a593Smuzhiyun struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
167*4882a593Smuzhiyun 			      const u8 *addr)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun 	struct ieee80211_local *local = sdata->local;
170*4882a593Smuzhiyun 	struct rhlist_head *tmp;
171*4882a593Smuzhiyun 	struct sta_info *sta;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	rcu_read_lock();
174*4882a593Smuzhiyun 	for_each_sta_info(local, addr, sta, tmp) {
175*4882a593Smuzhiyun 		if (sta->sdata == sdata) {
176*4882a593Smuzhiyun 			rcu_read_unlock();
177*4882a593Smuzhiyun 			/* this is safe as the caller must already hold
178*4882a593Smuzhiyun 			 * another rcu read section or the mutex
179*4882a593Smuzhiyun 			 */
180*4882a593Smuzhiyun 			return sta;
181*4882a593Smuzhiyun 		}
182*4882a593Smuzhiyun 	}
183*4882a593Smuzhiyun 	rcu_read_unlock();
184*4882a593Smuzhiyun 	return NULL;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun /*
188*4882a593Smuzhiyun  * Get sta info either from the specified interface
189*4882a593Smuzhiyun  * or from one of its vlans
190*4882a593Smuzhiyun  */
sta_info_get_bss(struct ieee80211_sub_if_data * sdata,const u8 * addr)191*4882a593Smuzhiyun struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
192*4882a593Smuzhiyun 				  const u8 *addr)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun 	struct ieee80211_local *local = sdata->local;
195*4882a593Smuzhiyun 	struct rhlist_head *tmp;
196*4882a593Smuzhiyun 	struct sta_info *sta;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	rcu_read_lock();
199*4882a593Smuzhiyun 	for_each_sta_info(local, addr, sta, tmp) {
200*4882a593Smuzhiyun 		if (sta->sdata == sdata ||
201*4882a593Smuzhiyun 		    (sta->sdata->bss && sta->sdata->bss == sdata->bss)) {
202*4882a593Smuzhiyun 			rcu_read_unlock();
203*4882a593Smuzhiyun 			/* this is safe as the caller must already hold
204*4882a593Smuzhiyun 			 * another rcu read section or the mutex
205*4882a593Smuzhiyun 			 */
206*4882a593Smuzhiyun 			return sta;
207*4882a593Smuzhiyun 		}
208*4882a593Smuzhiyun 	}
209*4882a593Smuzhiyun 	rcu_read_unlock();
210*4882a593Smuzhiyun 	return NULL;
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun 
sta_info_get_by_addrs(struct ieee80211_local * local,const u8 * sta_addr,const u8 * vif_addr)213*4882a593Smuzhiyun struct sta_info *sta_info_get_by_addrs(struct ieee80211_local *local,
214*4882a593Smuzhiyun 				       const u8 *sta_addr, const u8 *vif_addr)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun 	struct rhlist_head *tmp;
217*4882a593Smuzhiyun 	struct sta_info *sta;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	for_each_sta_info(local, sta_addr, sta, tmp) {
220*4882a593Smuzhiyun 		if (ether_addr_equal(vif_addr, sta->sdata->vif.addr))
221*4882a593Smuzhiyun 			return sta;
222*4882a593Smuzhiyun 	}
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	return NULL;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun 
sta_info_get_by_idx(struct ieee80211_sub_if_data * sdata,int idx)227*4882a593Smuzhiyun struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata,
228*4882a593Smuzhiyun 				     int idx)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun 	struct ieee80211_local *local = sdata->local;
231*4882a593Smuzhiyun 	struct sta_info *sta;
232*4882a593Smuzhiyun 	int i = 0;
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	list_for_each_entry_rcu(sta, &local->sta_list, list,
235*4882a593Smuzhiyun 				lockdep_is_held(&local->sta_mtx)) {
236*4882a593Smuzhiyun 		if (sdata != sta->sdata)
237*4882a593Smuzhiyun 			continue;
238*4882a593Smuzhiyun 		if (i < idx) {
239*4882a593Smuzhiyun 			++i;
240*4882a593Smuzhiyun 			continue;
241*4882a593Smuzhiyun 		}
242*4882a593Smuzhiyun 		return sta;
243*4882a593Smuzhiyun 	}
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	return NULL;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun /**
249*4882a593Smuzhiyun  * sta_info_free - free STA
250*4882a593Smuzhiyun  *
251*4882a593Smuzhiyun  * @local: pointer to the global information
252*4882a593Smuzhiyun  * @sta: STA info to free
253*4882a593Smuzhiyun  *
254*4882a593Smuzhiyun  * This function must undo everything done by sta_info_alloc()
255*4882a593Smuzhiyun  * that may happen before sta_info_insert(). It may only be
256*4882a593Smuzhiyun  * called when sta_info_insert() has not been attempted (and
257*4882a593Smuzhiyun  * if that fails, the station is freed anyway.)
258*4882a593Smuzhiyun  */
sta_info_free(struct ieee80211_local * local,struct sta_info * sta)259*4882a593Smuzhiyun void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun 	/*
262*4882a593Smuzhiyun 	 * If we had used sta_info_pre_move_state() then we might not
263*4882a593Smuzhiyun 	 * have gone through the state transitions down again, so do
264*4882a593Smuzhiyun 	 * it here now (and warn if it's inserted).
265*4882a593Smuzhiyun 	 *
266*4882a593Smuzhiyun 	 * This will clear state such as fast TX/RX that may have been
267*4882a593Smuzhiyun 	 * allocated during state transitions.
268*4882a593Smuzhiyun 	 */
269*4882a593Smuzhiyun 	while (sta->sta_state > IEEE80211_STA_NONE) {
270*4882a593Smuzhiyun 		int ret;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 		WARN_ON_ONCE(test_sta_flag(sta, WLAN_STA_INSERTED));
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 		ret = sta_info_move_state(sta, sta->sta_state - 1);
275*4882a593Smuzhiyun 		if (WARN_ONCE(ret, "sta_info_move_state() returned %d\n", ret))
276*4882a593Smuzhiyun 			break;
277*4882a593Smuzhiyun 	}
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	if (sta->rate_ctrl)
280*4882a593Smuzhiyun 		rate_control_free_sta(sta);
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	sta_dbg(sta->sdata, "Destroyed STA %pM\n", sta->sta.addr);
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	if (sta->sta.txq[0])
285*4882a593Smuzhiyun 		kfree(to_txq_info(sta->sta.txq[0]));
286*4882a593Smuzhiyun 	kfree(rcu_dereference_raw(sta->sta.rates));
287*4882a593Smuzhiyun #ifdef CONFIG_MAC80211_MESH
288*4882a593Smuzhiyun 	kfree(sta->mesh);
289*4882a593Smuzhiyun #endif
290*4882a593Smuzhiyun 	free_percpu(sta->pcpu_rx_stats);
291*4882a593Smuzhiyun 	kfree(sta);
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun /* Caller must hold local->sta_mtx */
sta_info_hash_add(struct ieee80211_local * local,struct sta_info * sta)295*4882a593Smuzhiyun static int sta_info_hash_add(struct ieee80211_local *local,
296*4882a593Smuzhiyun 			     struct sta_info *sta)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun 	return rhltable_insert(&local->sta_hash, &sta->hash_node,
299*4882a593Smuzhiyun 			       sta_rht_params);
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun 
sta_deliver_ps_frames(struct work_struct * wk)302*4882a593Smuzhiyun static void sta_deliver_ps_frames(struct work_struct *wk)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun 	struct sta_info *sta;
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	sta = container_of(wk, struct sta_info, drv_deliver_wk);
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	if (sta->dead)
309*4882a593Smuzhiyun 		return;
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	local_bh_disable();
312*4882a593Smuzhiyun 	if (!test_sta_flag(sta, WLAN_STA_PS_STA))
313*4882a593Smuzhiyun 		ieee80211_sta_ps_deliver_wakeup(sta);
314*4882a593Smuzhiyun 	else if (test_and_clear_sta_flag(sta, WLAN_STA_PSPOLL))
315*4882a593Smuzhiyun 		ieee80211_sta_ps_deliver_poll_response(sta);
316*4882a593Smuzhiyun 	else if (test_and_clear_sta_flag(sta, WLAN_STA_UAPSD))
317*4882a593Smuzhiyun 		ieee80211_sta_ps_deliver_uapsd(sta);
318*4882a593Smuzhiyun 	local_bh_enable();
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun 
sta_prepare_rate_control(struct ieee80211_local * local,struct sta_info * sta,gfp_t gfp)321*4882a593Smuzhiyun static int sta_prepare_rate_control(struct ieee80211_local *local,
322*4882a593Smuzhiyun 				    struct sta_info *sta, gfp_t gfp)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun 	if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL))
325*4882a593Smuzhiyun 		return 0;
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	sta->rate_ctrl = local->rate_ctrl;
328*4882a593Smuzhiyun 	sta->rate_ctrl_priv = rate_control_alloc_sta(sta->rate_ctrl,
329*4882a593Smuzhiyun 						     sta, gfp);
330*4882a593Smuzhiyun 	if (!sta->rate_ctrl_priv)
331*4882a593Smuzhiyun 		return -ENOMEM;
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	return 0;
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun 
sta_info_alloc(struct ieee80211_sub_if_data * sdata,const u8 * addr,gfp_t gfp)336*4882a593Smuzhiyun struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
337*4882a593Smuzhiyun 				const u8 *addr, gfp_t gfp)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun 	struct ieee80211_local *local = sdata->local;
340*4882a593Smuzhiyun 	struct ieee80211_hw *hw = &local->hw;
341*4882a593Smuzhiyun 	struct sta_info *sta;
342*4882a593Smuzhiyun 	int i;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	sta = kzalloc(sizeof(*sta) + hw->sta_data_size, gfp);
345*4882a593Smuzhiyun 	if (!sta)
346*4882a593Smuzhiyun 		return NULL;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	if (ieee80211_hw_check(hw, USES_RSS)) {
349*4882a593Smuzhiyun 		sta->pcpu_rx_stats =
350*4882a593Smuzhiyun 			alloc_percpu_gfp(struct ieee80211_sta_rx_stats, gfp);
351*4882a593Smuzhiyun 		if (!sta->pcpu_rx_stats)
352*4882a593Smuzhiyun 			goto free;
353*4882a593Smuzhiyun 	}
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	spin_lock_init(&sta->lock);
356*4882a593Smuzhiyun 	spin_lock_init(&sta->ps_lock);
357*4882a593Smuzhiyun 	INIT_WORK(&sta->drv_deliver_wk, sta_deliver_ps_frames);
358*4882a593Smuzhiyun 	INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
359*4882a593Smuzhiyun 	mutex_init(&sta->ampdu_mlme.mtx);
360*4882a593Smuzhiyun #ifdef CONFIG_MAC80211_MESH
361*4882a593Smuzhiyun 	if (ieee80211_vif_is_mesh(&sdata->vif)) {
362*4882a593Smuzhiyun 		sta->mesh = kzalloc(sizeof(*sta->mesh), gfp);
363*4882a593Smuzhiyun 		if (!sta->mesh)
364*4882a593Smuzhiyun 			goto free;
365*4882a593Smuzhiyun 		sta->mesh->plink_sta = sta;
366*4882a593Smuzhiyun 		spin_lock_init(&sta->mesh->plink_lock);
367*4882a593Smuzhiyun 		if (ieee80211_vif_is_mesh(&sdata->vif) &&
368*4882a593Smuzhiyun 		    !sdata->u.mesh.user_mpm)
369*4882a593Smuzhiyun 			timer_setup(&sta->mesh->plink_timer, mesh_plink_timer,
370*4882a593Smuzhiyun 				    0);
371*4882a593Smuzhiyun 		sta->mesh->nonpeer_pm = NL80211_MESH_POWER_ACTIVE;
372*4882a593Smuzhiyun 	}
373*4882a593Smuzhiyun #endif
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	memcpy(sta->addr, addr, ETH_ALEN);
376*4882a593Smuzhiyun 	memcpy(sta->sta.addr, addr, ETH_ALEN);
377*4882a593Smuzhiyun 	sta->sta.max_rx_aggregation_subframes =
378*4882a593Smuzhiyun 		local->hw.max_rx_aggregation_subframes;
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	/* Extended Key ID needs to install keys for keyid 0 and 1 Rx-only.
381*4882a593Smuzhiyun 	 * The Tx path starts to use a key as soon as the key slot ptk_idx
382*4882a593Smuzhiyun 	 * references to is not NULL. To not use the initial Rx-only key
383*4882a593Smuzhiyun 	 * prematurely for Tx initialize ptk_idx to an impossible PTK keyid
384*4882a593Smuzhiyun 	 * which always will refer to a NULL key.
385*4882a593Smuzhiyun 	 */
386*4882a593Smuzhiyun 	BUILD_BUG_ON(ARRAY_SIZE(sta->ptk) <= INVALID_PTK_KEYIDX);
387*4882a593Smuzhiyun 	sta->ptk_idx = INVALID_PTK_KEYIDX;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	sta->local = local;
390*4882a593Smuzhiyun 	sta->sdata = sdata;
391*4882a593Smuzhiyun 	sta->rx_stats.last_rx = jiffies;
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	u64_stats_init(&sta->rx_stats.syncp);
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	ieee80211_init_frag_cache(&sta->frags);
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	sta->sta_state = IEEE80211_STA_NONE;
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	/* Mark TID as unreserved */
400*4882a593Smuzhiyun 	sta->reserved_tid = IEEE80211_TID_UNRESERVED;
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	sta->last_connected = ktime_get_seconds();
403*4882a593Smuzhiyun 	ewma_signal_init(&sta->rx_stats_avg.signal);
404*4882a593Smuzhiyun 	ewma_avg_signal_init(&sta->status_stats.avg_ack_signal);
405*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(sta->rx_stats_avg.chain_signal); i++)
406*4882a593Smuzhiyun 		ewma_signal_init(&sta->rx_stats_avg.chain_signal[i]);
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	if (local->ops->wake_tx_queue) {
409*4882a593Smuzhiyun 		void *txq_data;
410*4882a593Smuzhiyun 		int size = sizeof(struct txq_info) +
411*4882a593Smuzhiyun 			   ALIGN(hw->txq_data_size, sizeof(void *));
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 		txq_data = kcalloc(ARRAY_SIZE(sta->sta.txq), size, gfp);
414*4882a593Smuzhiyun 		if (!txq_data)
415*4882a593Smuzhiyun 			goto free;
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 		for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
418*4882a593Smuzhiyun 			struct txq_info *txq = txq_data + i * size;
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 			/* might not do anything for the bufferable MMPDU TXQ */
421*4882a593Smuzhiyun 			ieee80211_txq_init(sdata, sta, txq, i);
422*4882a593Smuzhiyun 		}
423*4882a593Smuzhiyun 	}
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	if (sta_prepare_rate_control(local, sta, gfp))
426*4882a593Smuzhiyun 		goto free_txq;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	sta->airtime_weight = IEEE80211_DEFAULT_AIRTIME_WEIGHT;
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
431*4882a593Smuzhiyun 		skb_queue_head_init(&sta->ps_tx_buf[i]);
432*4882a593Smuzhiyun 		skb_queue_head_init(&sta->tx_filtered[i]);
433*4882a593Smuzhiyun 		sta->airtime[i].deficit = sta->airtime_weight;
434*4882a593Smuzhiyun 		atomic_set(&sta->airtime[i].aql_tx_pending, 0);
435*4882a593Smuzhiyun 		sta->airtime[i].aql_limit_low = local->aql_txq_limit_low[i];
436*4882a593Smuzhiyun 		sta->airtime[i].aql_limit_high = local->aql_txq_limit_high[i];
437*4882a593Smuzhiyun 	}
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	for (i = 0; i < IEEE80211_NUM_TIDS; i++)
440*4882a593Smuzhiyun 		sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX);
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	for (i = 0; i < NUM_NL80211_BANDS; i++) {
443*4882a593Smuzhiyun 		u32 mandatory = 0;
444*4882a593Smuzhiyun 		int r;
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 		if (!hw->wiphy->bands[i])
447*4882a593Smuzhiyun 			continue;
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 		switch (i) {
450*4882a593Smuzhiyun 		case NL80211_BAND_2GHZ:
451*4882a593Smuzhiyun 			/*
452*4882a593Smuzhiyun 			 * We use both here, even if we cannot really know for
453*4882a593Smuzhiyun 			 * sure the station will support both, but the only use
454*4882a593Smuzhiyun 			 * for this is when we don't know anything yet and send
455*4882a593Smuzhiyun 			 * management frames, and then we'll pick the lowest
456*4882a593Smuzhiyun 			 * possible rate anyway.
457*4882a593Smuzhiyun 			 * If we don't include _G here, we cannot find a rate
458*4882a593Smuzhiyun 			 * in P2P, and thus trigger the WARN_ONCE() in rate.c
459*4882a593Smuzhiyun 			 */
460*4882a593Smuzhiyun 			mandatory = IEEE80211_RATE_MANDATORY_B |
461*4882a593Smuzhiyun 				    IEEE80211_RATE_MANDATORY_G;
462*4882a593Smuzhiyun 			break;
463*4882a593Smuzhiyun 		case NL80211_BAND_5GHZ:
464*4882a593Smuzhiyun 			mandatory = IEEE80211_RATE_MANDATORY_A;
465*4882a593Smuzhiyun 			break;
466*4882a593Smuzhiyun 		case NL80211_BAND_60GHZ:
467*4882a593Smuzhiyun 			WARN_ON(1);
468*4882a593Smuzhiyun 			mandatory = 0;
469*4882a593Smuzhiyun 			break;
470*4882a593Smuzhiyun 		}
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 		for (r = 0; r < hw->wiphy->bands[i]->n_bitrates; r++) {
473*4882a593Smuzhiyun 			struct ieee80211_rate *rate;
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 			rate = &hw->wiphy->bands[i]->bitrates[r];
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 			if (!(rate->flags & mandatory))
478*4882a593Smuzhiyun 				continue;
479*4882a593Smuzhiyun 			sta->sta.supp_rates[i] |= BIT(r);
480*4882a593Smuzhiyun 		}
481*4882a593Smuzhiyun 	}
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	sta->sta.smps_mode = IEEE80211_SMPS_OFF;
484*4882a593Smuzhiyun 	if (sdata->vif.type == NL80211_IFTYPE_AP ||
485*4882a593Smuzhiyun 	    sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
486*4882a593Smuzhiyun 		struct ieee80211_supported_band *sband;
487*4882a593Smuzhiyun 		u8 smps;
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 		sband = ieee80211_get_sband(sdata);
490*4882a593Smuzhiyun 		if (!sband)
491*4882a593Smuzhiyun 			goto free_txq;
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 		smps = (sband->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >>
494*4882a593Smuzhiyun 			IEEE80211_HT_CAP_SM_PS_SHIFT;
495*4882a593Smuzhiyun 		/*
496*4882a593Smuzhiyun 		 * Assume that hostapd advertises our caps in the beacon and
497*4882a593Smuzhiyun 		 * this is the known_smps_mode for a station that just assciated
498*4882a593Smuzhiyun 		 */
499*4882a593Smuzhiyun 		switch (smps) {
500*4882a593Smuzhiyun 		case WLAN_HT_SMPS_CONTROL_DISABLED:
501*4882a593Smuzhiyun 			sta->known_smps_mode = IEEE80211_SMPS_OFF;
502*4882a593Smuzhiyun 			break;
503*4882a593Smuzhiyun 		case WLAN_HT_SMPS_CONTROL_STATIC:
504*4882a593Smuzhiyun 			sta->known_smps_mode = IEEE80211_SMPS_STATIC;
505*4882a593Smuzhiyun 			break;
506*4882a593Smuzhiyun 		case WLAN_HT_SMPS_CONTROL_DYNAMIC:
507*4882a593Smuzhiyun 			sta->known_smps_mode = IEEE80211_SMPS_DYNAMIC;
508*4882a593Smuzhiyun 			break;
509*4882a593Smuzhiyun 		default:
510*4882a593Smuzhiyun 			WARN_ON(1);
511*4882a593Smuzhiyun 		}
512*4882a593Smuzhiyun 	}
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	sta->sta.max_rc_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_BA;
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	sta->cparams.ce_threshold = CODEL_DISABLED_THRESHOLD;
517*4882a593Smuzhiyun 	sta->cparams.target = MS2TIME(20);
518*4882a593Smuzhiyun 	sta->cparams.interval = MS2TIME(100);
519*4882a593Smuzhiyun 	sta->cparams.ecn = true;
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr);
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	return sta;
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun free_txq:
526*4882a593Smuzhiyun 	if (sta->sta.txq[0])
527*4882a593Smuzhiyun 		kfree(to_txq_info(sta->sta.txq[0]));
528*4882a593Smuzhiyun free:
529*4882a593Smuzhiyun 	free_percpu(sta->pcpu_rx_stats);
530*4882a593Smuzhiyun #ifdef CONFIG_MAC80211_MESH
531*4882a593Smuzhiyun 	kfree(sta->mesh);
532*4882a593Smuzhiyun #endif
533*4882a593Smuzhiyun 	kfree(sta);
534*4882a593Smuzhiyun 	return NULL;
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun 
sta_info_insert_check(struct sta_info * sta)537*4882a593Smuzhiyun static int sta_info_insert_check(struct sta_info *sta)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun 	struct ieee80211_sub_if_data *sdata = sta->sdata;
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	/*
542*4882a593Smuzhiyun 	 * Can't be a WARN_ON because it can be triggered through a race:
543*4882a593Smuzhiyun 	 * something inserts a STA (on one CPU) without holding the RTNL
544*4882a593Smuzhiyun 	 * and another CPU turns off the net device.
545*4882a593Smuzhiyun 	 */
546*4882a593Smuzhiyun 	if (unlikely(!ieee80211_sdata_running(sdata)))
547*4882a593Smuzhiyun 		return -ENETDOWN;
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	if (WARN_ON(ether_addr_equal(sta->sta.addr, sdata->vif.addr) ||
550*4882a593Smuzhiyun 		    is_multicast_ether_addr(sta->sta.addr)))
551*4882a593Smuzhiyun 		return -EINVAL;
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	/* The RCU read lock is required by rhashtable due to
554*4882a593Smuzhiyun 	 * asynchronous resize/rehash.  We also require the mutex
555*4882a593Smuzhiyun 	 * for correctness.
556*4882a593Smuzhiyun 	 */
557*4882a593Smuzhiyun 	rcu_read_lock();
558*4882a593Smuzhiyun 	lockdep_assert_held(&sdata->local->sta_mtx);
559*4882a593Smuzhiyun 	if (ieee80211_hw_check(&sdata->local->hw, NEEDS_UNIQUE_STA_ADDR) &&
560*4882a593Smuzhiyun 	    ieee80211_find_sta_by_ifaddr(&sdata->local->hw, sta->addr, NULL)) {
561*4882a593Smuzhiyun 		rcu_read_unlock();
562*4882a593Smuzhiyun 		return -ENOTUNIQ;
563*4882a593Smuzhiyun 	}
564*4882a593Smuzhiyun 	rcu_read_unlock();
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	return 0;
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun 
sta_info_insert_drv_state(struct ieee80211_local * local,struct ieee80211_sub_if_data * sdata,struct sta_info * sta)569*4882a593Smuzhiyun static int sta_info_insert_drv_state(struct ieee80211_local *local,
570*4882a593Smuzhiyun 				     struct ieee80211_sub_if_data *sdata,
571*4882a593Smuzhiyun 				     struct sta_info *sta)
572*4882a593Smuzhiyun {
573*4882a593Smuzhiyun 	enum ieee80211_sta_state state;
574*4882a593Smuzhiyun 	int err = 0;
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	for (state = IEEE80211_STA_NOTEXIST; state < sta->sta_state; state++) {
577*4882a593Smuzhiyun 		err = drv_sta_state(local, sdata, sta, state, state + 1);
578*4882a593Smuzhiyun 		if (err)
579*4882a593Smuzhiyun 			break;
580*4882a593Smuzhiyun 	}
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	if (!err) {
583*4882a593Smuzhiyun 		/*
584*4882a593Smuzhiyun 		 * Drivers using legacy sta_add/sta_remove callbacks only
585*4882a593Smuzhiyun 		 * get uploaded set to true after sta_add is called.
586*4882a593Smuzhiyun 		 */
587*4882a593Smuzhiyun 		if (!local->ops->sta_add)
588*4882a593Smuzhiyun 			sta->uploaded = true;
589*4882a593Smuzhiyun 		return 0;
590*4882a593Smuzhiyun 	}
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 	if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
593*4882a593Smuzhiyun 		sdata_info(sdata,
594*4882a593Smuzhiyun 			   "failed to move IBSS STA %pM to state %d (%d) - keeping it anyway\n",
595*4882a593Smuzhiyun 			   sta->sta.addr, state + 1, err);
596*4882a593Smuzhiyun 		err = 0;
597*4882a593Smuzhiyun 	}
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	/* unwind on error */
600*4882a593Smuzhiyun 	for (; state > IEEE80211_STA_NOTEXIST; state--)
601*4882a593Smuzhiyun 		WARN_ON(drv_sta_state(local, sdata, sta, state, state - 1));
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 	return err;
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun static void
ieee80211_recalc_p2p_go_ps_allowed(struct ieee80211_sub_if_data * sdata)607*4882a593Smuzhiyun ieee80211_recalc_p2p_go_ps_allowed(struct ieee80211_sub_if_data *sdata)
608*4882a593Smuzhiyun {
609*4882a593Smuzhiyun 	struct ieee80211_local *local = sdata->local;
610*4882a593Smuzhiyun 	bool allow_p2p_go_ps = sdata->vif.p2p;
611*4882a593Smuzhiyun 	struct sta_info *sta;
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 	rcu_read_lock();
614*4882a593Smuzhiyun 	list_for_each_entry_rcu(sta, &local->sta_list, list) {
615*4882a593Smuzhiyun 		if (sdata != sta->sdata ||
616*4882a593Smuzhiyun 		    !test_sta_flag(sta, WLAN_STA_ASSOC))
617*4882a593Smuzhiyun 			continue;
618*4882a593Smuzhiyun 		if (!sta->sta.support_p2p_ps) {
619*4882a593Smuzhiyun 			allow_p2p_go_ps = false;
620*4882a593Smuzhiyun 			break;
621*4882a593Smuzhiyun 		}
622*4882a593Smuzhiyun 	}
623*4882a593Smuzhiyun 	rcu_read_unlock();
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	if (allow_p2p_go_ps != sdata->vif.bss_conf.allow_p2p_go_ps) {
626*4882a593Smuzhiyun 		sdata->vif.bss_conf.allow_p2p_go_ps = allow_p2p_go_ps;
627*4882a593Smuzhiyun 		ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_P2P_PS);
628*4882a593Smuzhiyun 	}
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun /*
632*4882a593Smuzhiyun  * should be called with sta_mtx locked
633*4882a593Smuzhiyun  * this function replaces the mutex lock
634*4882a593Smuzhiyun  * with a RCU lock
635*4882a593Smuzhiyun  */
sta_info_insert_finish(struct sta_info * sta)636*4882a593Smuzhiyun static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
637*4882a593Smuzhiyun {
638*4882a593Smuzhiyun 	struct ieee80211_local *local = sta->local;
639*4882a593Smuzhiyun 	struct ieee80211_sub_if_data *sdata = sta->sdata;
640*4882a593Smuzhiyun 	struct station_info *sinfo = NULL;
641*4882a593Smuzhiyun 	int err = 0;
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	lockdep_assert_held(&local->sta_mtx);
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	/* check if STA exists already */
646*4882a593Smuzhiyun 	if (sta_info_get_bss(sdata, sta->sta.addr)) {
647*4882a593Smuzhiyun 		err = -EEXIST;
648*4882a593Smuzhiyun 		goto out_cleanup;
649*4882a593Smuzhiyun 	}
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	sinfo = kzalloc(sizeof(struct station_info), GFP_KERNEL);
652*4882a593Smuzhiyun 	if (!sinfo) {
653*4882a593Smuzhiyun 		err = -ENOMEM;
654*4882a593Smuzhiyun 		goto out_cleanup;
655*4882a593Smuzhiyun 	}
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 	local->num_sta++;
658*4882a593Smuzhiyun 	local->sta_generation++;
659*4882a593Smuzhiyun 	smp_mb();
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun 	/* simplify things and don't accept BA sessions yet */
662*4882a593Smuzhiyun 	set_sta_flag(sta, WLAN_STA_BLOCK_BA);
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	/* make the station visible */
665*4882a593Smuzhiyun 	err = sta_info_hash_add(local, sta);
666*4882a593Smuzhiyun 	if (err)
667*4882a593Smuzhiyun 		goto out_drop_sta;
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 	list_add_tail_rcu(&sta->list, &local->sta_list);
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 	/* notify driver */
672*4882a593Smuzhiyun 	err = sta_info_insert_drv_state(local, sdata, sta);
673*4882a593Smuzhiyun 	if (err)
674*4882a593Smuzhiyun 		goto out_remove;
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 	set_sta_flag(sta, WLAN_STA_INSERTED);
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 	if (sta->sta_state >= IEEE80211_STA_ASSOC) {
679*4882a593Smuzhiyun 		ieee80211_recalc_min_chandef(sta->sdata);
680*4882a593Smuzhiyun 		if (!sta->sta.support_p2p_ps)
681*4882a593Smuzhiyun 			ieee80211_recalc_p2p_go_ps_allowed(sta->sdata);
682*4882a593Smuzhiyun 	}
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	/* accept BA sessions now */
685*4882a593Smuzhiyun 	clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	ieee80211_sta_debugfs_add(sta);
688*4882a593Smuzhiyun 	rate_control_add_sta_debugfs(sta);
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	sinfo->generation = local->sta_generation;
691*4882a593Smuzhiyun 	cfg80211_new_sta(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL);
692*4882a593Smuzhiyun 	kfree(sinfo);
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun 	sta_dbg(sdata, "Inserted STA %pM\n", sta->sta.addr);
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	/* move reference to rcu-protected */
697*4882a593Smuzhiyun 	rcu_read_lock();
698*4882a593Smuzhiyun 	mutex_unlock(&local->sta_mtx);
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	if (ieee80211_vif_is_mesh(&sdata->vif))
701*4882a593Smuzhiyun 		mesh_accept_plinks_update(sdata);
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	return 0;
704*4882a593Smuzhiyun  out_remove:
705*4882a593Smuzhiyun 	sta_info_hash_del(local, sta);
706*4882a593Smuzhiyun 	list_del_rcu(&sta->list);
707*4882a593Smuzhiyun  out_drop_sta:
708*4882a593Smuzhiyun 	local->num_sta--;
709*4882a593Smuzhiyun 	synchronize_net();
710*4882a593Smuzhiyun  out_cleanup:
711*4882a593Smuzhiyun 	cleanup_single_sta(sta);
712*4882a593Smuzhiyun 	mutex_unlock(&local->sta_mtx);
713*4882a593Smuzhiyun 	kfree(sinfo);
714*4882a593Smuzhiyun 	rcu_read_lock();
715*4882a593Smuzhiyun 	return err;
716*4882a593Smuzhiyun }
717*4882a593Smuzhiyun 
sta_info_insert_rcu(struct sta_info * sta)718*4882a593Smuzhiyun int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
719*4882a593Smuzhiyun {
720*4882a593Smuzhiyun 	struct ieee80211_local *local = sta->local;
721*4882a593Smuzhiyun 	int err;
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 	might_sleep();
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 	mutex_lock(&local->sta_mtx);
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 	err = sta_info_insert_check(sta);
728*4882a593Smuzhiyun 	if (err) {
729*4882a593Smuzhiyun 		sta_info_free(local, sta);
730*4882a593Smuzhiyun 		mutex_unlock(&local->sta_mtx);
731*4882a593Smuzhiyun 		rcu_read_lock();
732*4882a593Smuzhiyun 		return err;
733*4882a593Smuzhiyun 	}
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun 	return sta_info_insert_finish(sta);
736*4882a593Smuzhiyun }
737*4882a593Smuzhiyun 
sta_info_insert(struct sta_info * sta)738*4882a593Smuzhiyun int sta_info_insert(struct sta_info *sta)
739*4882a593Smuzhiyun {
740*4882a593Smuzhiyun 	int err = sta_info_insert_rcu(sta);
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 	rcu_read_unlock();
743*4882a593Smuzhiyun 
744*4882a593Smuzhiyun 	return err;
745*4882a593Smuzhiyun }
746*4882a593Smuzhiyun 
__bss_tim_set(u8 * tim,u16 id)747*4882a593Smuzhiyun static inline void __bss_tim_set(u8 *tim, u16 id)
748*4882a593Smuzhiyun {
749*4882a593Smuzhiyun 	/*
750*4882a593Smuzhiyun 	 * This format has been mandated by the IEEE specifications,
751*4882a593Smuzhiyun 	 * so this line may not be changed to use the __set_bit() format.
752*4882a593Smuzhiyun 	 */
753*4882a593Smuzhiyun 	tim[id / 8] |= (1 << (id % 8));
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun 
__bss_tim_clear(u8 * tim,u16 id)756*4882a593Smuzhiyun static inline void __bss_tim_clear(u8 *tim, u16 id)
757*4882a593Smuzhiyun {
758*4882a593Smuzhiyun 	/*
759*4882a593Smuzhiyun 	 * This format has been mandated by the IEEE specifications,
760*4882a593Smuzhiyun 	 * so this line may not be changed to use the __clear_bit() format.
761*4882a593Smuzhiyun 	 */
762*4882a593Smuzhiyun 	tim[id / 8] &= ~(1 << (id % 8));
763*4882a593Smuzhiyun }
764*4882a593Smuzhiyun 
__bss_tim_get(u8 * tim,u16 id)765*4882a593Smuzhiyun static inline bool __bss_tim_get(u8 *tim, u16 id)
766*4882a593Smuzhiyun {
767*4882a593Smuzhiyun 	/*
768*4882a593Smuzhiyun 	 * This format has been mandated by the IEEE specifications,
769*4882a593Smuzhiyun 	 * so this line may not be changed to use the test_bit() format.
770*4882a593Smuzhiyun 	 */
771*4882a593Smuzhiyun 	return tim[id / 8] & (1 << (id % 8));
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun 
ieee80211_tids_for_ac(int ac)774*4882a593Smuzhiyun static unsigned long ieee80211_tids_for_ac(int ac)
775*4882a593Smuzhiyun {
776*4882a593Smuzhiyun 	/* If we ever support TIDs > 7, this obviously needs to be adjusted */
777*4882a593Smuzhiyun 	switch (ac) {
778*4882a593Smuzhiyun 	case IEEE80211_AC_VO:
779*4882a593Smuzhiyun 		return BIT(6) | BIT(7);
780*4882a593Smuzhiyun 	case IEEE80211_AC_VI:
781*4882a593Smuzhiyun 		return BIT(4) | BIT(5);
782*4882a593Smuzhiyun 	case IEEE80211_AC_BE:
783*4882a593Smuzhiyun 		return BIT(0) | BIT(3);
784*4882a593Smuzhiyun 	case IEEE80211_AC_BK:
785*4882a593Smuzhiyun 		return BIT(1) | BIT(2);
786*4882a593Smuzhiyun 	default:
787*4882a593Smuzhiyun 		WARN_ON(1);
788*4882a593Smuzhiyun 		return 0;
789*4882a593Smuzhiyun 	}
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun 
__sta_info_recalc_tim(struct sta_info * sta,bool ignore_pending)792*4882a593Smuzhiyun static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending)
793*4882a593Smuzhiyun {
794*4882a593Smuzhiyun 	struct ieee80211_local *local = sta->local;
795*4882a593Smuzhiyun 	struct ps_data *ps;
796*4882a593Smuzhiyun 	bool indicate_tim = false;
797*4882a593Smuzhiyun 	u8 ignore_for_tim = sta->sta.uapsd_queues;
798*4882a593Smuzhiyun 	int ac;
799*4882a593Smuzhiyun 	u16 id = sta->sta.aid;
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 	if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
802*4882a593Smuzhiyun 	    sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
803*4882a593Smuzhiyun 		if (WARN_ON_ONCE(!sta->sdata->bss))
804*4882a593Smuzhiyun 			return;
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 		ps = &sta->sdata->bss->ps;
807*4882a593Smuzhiyun #ifdef CONFIG_MAC80211_MESH
808*4882a593Smuzhiyun 	} else if (ieee80211_vif_is_mesh(&sta->sdata->vif)) {
809*4882a593Smuzhiyun 		ps = &sta->sdata->u.mesh.ps;
810*4882a593Smuzhiyun #endif
811*4882a593Smuzhiyun 	} else {
812*4882a593Smuzhiyun 		return;
813*4882a593Smuzhiyun 	}
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun 	/* No need to do anything if the driver does all */
816*4882a593Smuzhiyun 	if (ieee80211_hw_check(&local->hw, AP_LINK_PS) && !local->ops->set_tim)
817*4882a593Smuzhiyun 		return;
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 	if (sta->dead)
820*4882a593Smuzhiyun 		goto done;
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 	/*
823*4882a593Smuzhiyun 	 * If all ACs are delivery-enabled then we should build
824*4882a593Smuzhiyun 	 * the TIM bit for all ACs anyway; if only some are then
825*4882a593Smuzhiyun 	 * we ignore those and build the TIM bit using only the
826*4882a593Smuzhiyun 	 * non-enabled ones.
827*4882a593Smuzhiyun 	 */
828*4882a593Smuzhiyun 	if (ignore_for_tim == BIT(IEEE80211_NUM_ACS) - 1)
829*4882a593Smuzhiyun 		ignore_for_tim = 0;
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	if (ignore_pending)
832*4882a593Smuzhiyun 		ignore_for_tim = BIT(IEEE80211_NUM_ACS) - 1;
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun 	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
835*4882a593Smuzhiyun 		unsigned long tids;
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 		if (ignore_for_tim & ieee80211_ac_to_qos_mask[ac])
838*4882a593Smuzhiyun 			continue;
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun 		indicate_tim |= !skb_queue_empty(&sta->tx_filtered[ac]) ||
841*4882a593Smuzhiyun 				!skb_queue_empty(&sta->ps_tx_buf[ac]);
842*4882a593Smuzhiyun 		if (indicate_tim)
843*4882a593Smuzhiyun 			break;
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 		tids = ieee80211_tids_for_ac(ac);
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun 		indicate_tim |=
848*4882a593Smuzhiyun 			sta->driver_buffered_tids & tids;
849*4882a593Smuzhiyun 		indicate_tim |=
850*4882a593Smuzhiyun 			sta->txq_buffered_tids & tids;
851*4882a593Smuzhiyun 	}
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun  done:
854*4882a593Smuzhiyun 	spin_lock_bh(&local->tim_lock);
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun 	if (indicate_tim == __bss_tim_get(ps->tim, id))
857*4882a593Smuzhiyun 		goto out_unlock;
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun 	if (indicate_tim)
860*4882a593Smuzhiyun 		__bss_tim_set(ps->tim, id);
861*4882a593Smuzhiyun 	else
862*4882a593Smuzhiyun 		__bss_tim_clear(ps->tim, id);
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun 	if (local->ops->set_tim && !WARN_ON(sta->dead)) {
865*4882a593Smuzhiyun 		local->tim_in_locked_section = true;
866*4882a593Smuzhiyun 		drv_set_tim(local, &sta->sta, indicate_tim);
867*4882a593Smuzhiyun 		local->tim_in_locked_section = false;
868*4882a593Smuzhiyun 	}
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun out_unlock:
871*4882a593Smuzhiyun 	spin_unlock_bh(&local->tim_lock);
872*4882a593Smuzhiyun }
873*4882a593Smuzhiyun 
sta_info_recalc_tim(struct sta_info * sta)874*4882a593Smuzhiyun void sta_info_recalc_tim(struct sta_info *sta)
875*4882a593Smuzhiyun {
876*4882a593Smuzhiyun 	__sta_info_recalc_tim(sta, false);
877*4882a593Smuzhiyun }
878*4882a593Smuzhiyun 
sta_info_buffer_expired(struct sta_info * sta,struct sk_buff * skb)879*4882a593Smuzhiyun static bool sta_info_buffer_expired(struct sta_info *sta, struct sk_buff *skb)
880*4882a593Smuzhiyun {
881*4882a593Smuzhiyun 	struct ieee80211_tx_info *info;
882*4882a593Smuzhiyun 	int timeout;
883*4882a593Smuzhiyun 
884*4882a593Smuzhiyun 	if (!skb)
885*4882a593Smuzhiyun 		return false;
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 	info = IEEE80211_SKB_CB(skb);
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 	/* Timeout: (2 * listen_interval * beacon_int * 1024 / 1000000) sec */
890*4882a593Smuzhiyun 	timeout = (sta->listen_interval *
891*4882a593Smuzhiyun 		   sta->sdata->vif.bss_conf.beacon_int *
892*4882a593Smuzhiyun 		   32 / 15625) * HZ;
893*4882a593Smuzhiyun 	if (timeout < STA_TX_BUFFER_EXPIRE)
894*4882a593Smuzhiyun 		timeout = STA_TX_BUFFER_EXPIRE;
895*4882a593Smuzhiyun 	return time_after(jiffies, info->control.jiffies + timeout);
896*4882a593Smuzhiyun }
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun 
sta_info_cleanup_expire_buffered_ac(struct ieee80211_local * local,struct sta_info * sta,int ac)899*4882a593Smuzhiyun static bool sta_info_cleanup_expire_buffered_ac(struct ieee80211_local *local,
900*4882a593Smuzhiyun 						struct sta_info *sta, int ac)
901*4882a593Smuzhiyun {
902*4882a593Smuzhiyun 	unsigned long flags;
903*4882a593Smuzhiyun 	struct sk_buff *skb;
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun 	/*
906*4882a593Smuzhiyun 	 * First check for frames that should expire on the filtered
907*4882a593Smuzhiyun 	 * queue. Frames here were rejected by the driver and are on
908*4882a593Smuzhiyun 	 * a separate queue to avoid reordering with normal PS-buffered
909*4882a593Smuzhiyun 	 * frames. They also aren't accounted for right now in the
910*4882a593Smuzhiyun 	 * total_ps_buffered counter.
911*4882a593Smuzhiyun 	 */
912*4882a593Smuzhiyun 	for (;;) {
913*4882a593Smuzhiyun 		spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags);
914*4882a593Smuzhiyun 		skb = skb_peek(&sta->tx_filtered[ac]);
915*4882a593Smuzhiyun 		if (sta_info_buffer_expired(sta, skb))
916*4882a593Smuzhiyun 			skb = __skb_dequeue(&sta->tx_filtered[ac]);
917*4882a593Smuzhiyun 		else
918*4882a593Smuzhiyun 			skb = NULL;
919*4882a593Smuzhiyun 		spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags);
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 		/*
922*4882a593Smuzhiyun 		 * Frames are queued in order, so if this one
923*4882a593Smuzhiyun 		 * hasn't expired yet we can stop testing. If
924*4882a593Smuzhiyun 		 * we actually reached the end of the queue we
925*4882a593Smuzhiyun 		 * also need to stop, of course.
926*4882a593Smuzhiyun 		 */
927*4882a593Smuzhiyun 		if (!skb)
928*4882a593Smuzhiyun 			break;
929*4882a593Smuzhiyun 		ieee80211_free_txskb(&local->hw, skb);
930*4882a593Smuzhiyun 	}
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 	/*
933*4882a593Smuzhiyun 	 * Now also check the normal PS-buffered queue, this will
934*4882a593Smuzhiyun 	 * only find something if the filtered queue was emptied
935*4882a593Smuzhiyun 	 * since the filtered frames are all before the normal PS
936*4882a593Smuzhiyun 	 * buffered frames.
937*4882a593Smuzhiyun 	 */
938*4882a593Smuzhiyun 	for (;;) {
939*4882a593Smuzhiyun 		spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags);
940*4882a593Smuzhiyun 		skb = skb_peek(&sta->ps_tx_buf[ac]);
941*4882a593Smuzhiyun 		if (sta_info_buffer_expired(sta, skb))
942*4882a593Smuzhiyun 			skb = __skb_dequeue(&sta->ps_tx_buf[ac]);
943*4882a593Smuzhiyun 		else
944*4882a593Smuzhiyun 			skb = NULL;
945*4882a593Smuzhiyun 		spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags);
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 		/*
948*4882a593Smuzhiyun 		 * frames are queued in order, so if this one
949*4882a593Smuzhiyun 		 * hasn't expired yet (or we reached the end of
950*4882a593Smuzhiyun 		 * the queue) we can stop testing
951*4882a593Smuzhiyun 		 */
952*4882a593Smuzhiyun 		if (!skb)
953*4882a593Smuzhiyun 			break;
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 		local->total_ps_buffered--;
956*4882a593Smuzhiyun 		ps_dbg(sta->sdata, "Buffered frame expired (STA %pM)\n",
957*4882a593Smuzhiyun 		       sta->sta.addr);
958*4882a593Smuzhiyun 		ieee80211_free_txskb(&local->hw, skb);
959*4882a593Smuzhiyun 	}
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun 	/*
962*4882a593Smuzhiyun 	 * Finally, recalculate the TIM bit for this station -- it might
963*4882a593Smuzhiyun 	 * now be clear because the station was too slow to retrieve its
964*4882a593Smuzhiyun 	 * frames.
965*4882a593Smuzhiyun 	 */
966*4882a593Smuzhiyun 	sta_info_recalc_tim(sta);
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun 	/*
969*4882a593Smuzhiyun 	 * Return whether there are any frames still buffered, this is
970*4882a593Smuzhiyun 	 * used to check whether the cleanup timer still needs to run,
971*4882a593Smuzhiyun 	 * if there are no frames we don't need to rearm the timer.
972*4882a593Smuzhiyun 	 */
973*4882a593Smuzhiyun 	return !(skb_queue_empty(&sta->ps_tx_buf[ac]) &&
974*4882a593Smuzhiyun 		 skb_queue_empty(&sta->tx_filtered[ac]));
975*4882a593Smuzhiyun }
976*4882a593Smuzhiyun 
sta_info_cleanup_expire_buffered(struct ieee80211_local * local,struct sta_info * sta)977*4882a593Smuzhiyun static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
978*4882a593Smuzhiyun 					     struct sta_info *sta)
979*4882a593Smuzhiyun {
980*4882a593Smuzhiyun 	bool have_buffered = false;
981*4882a593Smuzhiyun 	int ac;
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun 	/* This is only necessary for stations on BSS/MBSS interfaces */
984*4882a593Smuzhiyun 	if (!sta->sdata->bss &&
985*4882a593Smuzhiyun 	    !ieee80211_vif_is_mesh(&sta->sdata->vif))
986*4882a593Smuzhiyun 		return false;
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun 	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
989*4882a593Smuzhiyun 		have_buffered |=
990*4882a593Smuzhiyun 			sta_info_cleanup_expire_buffered_ac(local, sta, ac);
991*4882a593Smuzhiyun 
992*4882a593Smuzhiyun 	return have_buffered;
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun 
__sta_info_destroy_part1(struct sta_info * sta)995*4882a593Smuzhiyun static int __must_check __sta_info_destroy_part1(struct sta_info *sta)
996*4882a593Smuzhiyun {
997*4882a593Smuzhiyun 	struct ieee80211_local *local;
998*4882a593Smuzhiyun 	struct ieee80211_sub_if_data *sdata;
999*4882a593Smuzhiyun 	int ret;
1000*4882a593Smuzhiyun 
1001*4882a593Smuzhiyun 	might_sleep();
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun 	if (!sta)
1004*4882a593Smuzhiyun 		return -ENOENT;
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun 	local = sta->local;
1007*4882a593Smuzhiyun 	sdata = sta->sdata;
1008*4882a593Smuzhiyun 
1009*4882a593Smuzhiyun 	lockdep_assert_held(&local->sta_mtx);
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun 	/*
1012*4882a593Smuzhiyun 	 * Before removing the station from the driver and
1013*4882a593Smuzhiyun 	 * rate control, it might still start new aggregation
1014*4882a593Smuzhiyun 	 * sessions -- block that to make sure the tear-down
1015*4882a593Smuzhiyun 	 * will be sufficient.
1016*4882a593Smuzhiyun 	 */
1017*4882a593Smuzhiyun 	set_sta_flag(sta, WLAN_STA_BLOCK_BA);
1018*4882a593Smuzhiyun 	ieee80211_sta_tear_down_BA_sessions(sta, AGG_STOP_DESTROY_STA);
1019*4882a593Smuzhiyun 
1020*4882a593Smuzhiyun 	/*
1021*4882a593Smuzhiyun 	 * Before removing the station from the driver there might be pending
1022*4882a593Smuzhiyun 	 * rx frames on RSS queues sent prior to the disassociation - wait for
1023*4882a593Smuzhiyun 	 * all such frames to be processed.
1024*4882a593Smuzhiyun 	 */
1025*4882a593Smuzhiyun 	drv_sync_rx_queues(local, sta);
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun 	ret = sta_info_hash_del(local, sta);
1028*4882a593Smuzhiyun 	if (WARN_ON(ret))
1029*4882a593Smuzhiyun 		return ret;
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun 	/*
1032*4882a593Smuzhiyun 	 * for TDLS peers, make sure to return to the base channel before
1033*4882a593Smuzhiyun 	 * removal.
1034*4882a593Smuzhiyun 	 */
1035*4882a593Smuzhiyun 	if (test_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL)) {
1036*4882a593Smuzhiyun 		drv_tdls_cancel_channel_switch(local, sdata, &sta->sta);
1037*4882a593Smuzhiyun 		clear_sta_flag(sta, WLAN_STA_TDLS_OFF_CHANNEL);
1038*4882a593Smuzhiyun 	}
1039*4882a593Smuzhiyun 
1040*4882a593Smuzhiyun 	list_del_rcu(&sta->list);
1041*4882a593Smuzhiyun 	sta->removed = true;
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun 	drv_sta_pre_rcu_remove(local, sta->sdata, sta);
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 	if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1046*4882a593Smuzhiyun 	    rcu_access_pointer(sdata->u.vlan.sta) == sta)
1047*4882a593Smuzhiyun 		RCU_INIT_POINTER(sdata->u.vlan.sta, NULL);
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun 	return 0;
1050*4882a593Smuzhiyun }
1051*4882a593Smuzhiyun 
__sta_info_destroy_part2(struct sta_info * sta)1052*4882a593Smuzhiyun static void __sta_info_destroy_part2(struct sta_info *sta)
1053*4882a593Smuzhiyun {
1054*4882a593Smuzhiyun 	struct ieee80211_local *local = sta->local;
1055*4882a593Smuzhiyun 	struct ieee80211_sub_if_data *sdata = sta->sdata;
1056*4882a593Smuzhiyun 	struct station_info *sinfo;
1057*4882a593Smuzhiyun 	int ret;
1058*4882a593Smuzhiyun 
1059*4882a593Smuzhiyun 	/*
1060*4882a593Smuzhiyun 	 * NOTE: This assumes at least synchronize_net() was done
1061*4882a593Smuzhiyun 	 *	 after _part1 and before _part2!
1062*4882a593Smuzhiyun 	 */
1063*4882a593Smuzhiyun 
1064*4882a593Smuzhiyun 	might_sleep();
1065*4882a593Smuzhiyun 	lockdep_assert_held(&local->sta_mtx);
1066*4882a593Smuzhiyun 
1067*4882a593Smuzhiyun 	if (sta->sta_state == IEEE80211_STA_AUTHORIZED) {
1068*4882a593Smuzhiyun 		ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
1069*4882a593Smuzhiyun 		WARN_ON_ONCE(ret);
1070*4882a593Smuzhiyun 	}
1071*4882a593Smuzhiyun 
1072*4882a593Smuzhiyun 	/* now keys can no longer be reached */
1073*4882a593Smuzhiyun 	ieee80211_free_sta_keys(local, sta);
1074*4882a593Smuzhiyun 
1075*4882a593Smuzhiyun 	/* disable TIM bit - last chance to tell driver */
1076*4882a593Smuzhiyun 	__sta_info_recalc_tim(sta, true);
1077*4882a593Smuzhiyun 
1078*4882a593Smuzhiyun 	sta->dead = true;
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun 	local->num_sta--;
1081*4882a593Smuzhiyun 	local->sta_generation++;
1082*4882a593Smuzhiyun 
1083*4882a593Smuzhiyun 	while (sta->sta_state > IEEE80211_STA_NONE) {
1084*4882a593Smuzhiyun 		ret = sta_info_move_state(sta, sta->sta_state - 1);
1085*4882a593Smuzhiyun 		if (ret) {
1086*4882a593Smuzhiyun 			WARN_ON_ONCE(1);
1087*4882a593Smuzhiyun 			break;
1088*4882a593Smuzhiyun 		}
1089*4882a593Smuzhiyun 	}
1090*4882a593Smuzhiyun 
1091*4882a593Smuzhiyun 	if (sta->uploaded) {
1092*4882a593Smuzhiyun 		ret = drv_sta_state(local, sdata, sta, IEEE80211_STA_NONE,
1093*4882a593Smuzhiyun 				    IEEE80211_STA_NOTEXIST);
1094*4882a593Smuzhiyun 		WARN_ON_ONCE(ret != 0);
1095*4882a593Smuzhiyun 	}
1096*4882a593Smuzhiyun 
1097*4882a593Smuzhiyun 	sta_dbg(sdata, "Removed STA %pM\n", sta->sta.addr);
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun 	sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL);
1100*4882a593Smuzhiyun 	if (sinfo)
1101*4882a593Smuzhiyun 		sta_set_sinfo(sta, sinfo, true);
1102*4882a593Smuzhiyun 	cfg80211_del_sta_sinfo(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL);
1103*4882a593Smuzhiyun 	kfree(sinfo);
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun 	ieee80211_sta_debugfs_remove(sta);
1106*4882a593Smuzhiyun 
1107*4882a593Smuzhiyun 	ieee80211_destroy_frag_cache(&sta->frags);
1108*4882a593Smuzhiyun 
1109*4882a593Smuzhiyun 	cleanup_single_sta(sta);
1110*4882a593Smuzhiyun }
1111*4882a593Smuzhiyun 
__sta_info_destroy(struct sta_info * sta)1112*4882a593Smuzhiyun int __must_check __sta_info_destroy(struct sta_info *sta)
1113*4882a593Smuzhiyun {
1114*4882a593Smuzhiyun 	int err = __sta_info_destroy_part1(sta);
1115*4882a593Smuzhiyun 
1116*4882a593Smuzhiyun 	if (err)
1117*4882a593Smuzhiyun 		return err;
1118*4882a593Smuzhiyun 
1119*4882a593Smuzhiyun 	synchronize_net();
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun 	__sta_info_destroy_part2(sta);
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun 	return 0;
1124*4882a593Smuzhiyun }
1125*4882a593Smuzhiyun 
sta_info_destroy_addr(struct ieee80211_sub_if_data * sdata,const u8 * addr)1126*4882a593Smuzhiyun int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata, const u8 *addr)
1127*4882a593Smuzhiyun {
1128*4882a593Smuzhiyun 	struct sta_info *sta;
1129*4882a593Smuzhiyun 	int ret;
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun 	mutex_lock(&sdata->local->sta_mtx);
1132*4882a593Smuzhiyun 	sta = sta_info_get(sdata, addr);
1133*4882a593Smuzhiyun 	ret = __sta_info_destroy(sta);
1134*4882a593Smuzhiyun 	mutex_unlock(&sdata->local->sta_mtx);
1135*4882a593Smuzhiyun 
1136*4882a593Smuzhiyun 	return ret;
1137*4882a593Smuzhiyun }
1138*4882a593Smuzhiyun 
sta_info_destroy_addr_bss(struct ieee80211_sub_if_data * sdata,const u8 * addr)1139*4882a593Smuzhiyun int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata,
1140*4882a593Smuzhiyun 			      const u8 *addr)
1141*4882a593Smuzhiyun {
1142*4882a593Smuzhiyun 	struct sta_info *sta;
1143*4882a593Smuzhiyun 	int ret;
1144*4882a593Smuzhiyun 
1145*4882a593Smuzhiyun 	mutex_lock(&sdata->local->sta_mtx);
1146*4882a593Smuzhiyun 	sta = sta_info_get_bss(sdata, addr);
1147*4882a593Smuzhiyun 	ret = __sta_info_destroy(sta);
1148*4882a593Smuzhiyun 	mutex_unlock(&sdata->local->sta_mtx);
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun 	return ret;
1151*4882a593Smuzhiyun }
1152*4882a593Smuzhiyun 
sta_info_cleanup(struct timer_list * t)1153*4882a593Smuzhiyun static void sta_info_cleanup(struct timer_list *t)
1154*4882a593Smuzhiyun {
1155*4882a593Smuzhiyun 	struct ieee80211_local *local = from_timer(local, t, sta_cleanup);
1156*4882a593Smuzhiyun 	struct sta_info *sta;
1157*4882a593Smuzhiyun 	bool timer_needed = false;
1158*4882a593Smuzhiyun 
1159*4882a593Smuzhiyun 	rcu_read_lock();
1160*4882a593Smuzhiyun 	list_for_each_entry_rcu(sta, &local->sta_list, list)
1161*4882a593Smuzhiyun 		if (sta_info_cleanup_expire_buffered(local, sta))
1162*4882a593Smuzhiyun 			timer_needed = true;
1163*4882a593Smuzhiyun 	rcu_read_unlock();
1164*4882a593Smuzhiyun 
1165*4882a593Smuzhiyun 	if (local->quiescing)
1166*4882a593Smuzhiyun 		return;
1167*4882a593Smuzhiyun 
1168*4882a593Smuzhiyun 	if (!timer_needed)
1169*4882a593Smuzhiyun 		return;
1170*4882a593Smuzhiyun 
1171*4882a593Smuzhiyun 	mod_timer(&local->sta_cleanup,
1172*4882a593Smuzhiyun 		  round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL));
1173*4882a593Smuzhiyun }
1174*4882a593Smuzhiyun 
sta_info_init(struct ieee80211_local * local)1175*4882a593Smuzhiyun int sta_info_init(struct ieee80211_local *local)
1176*4882a593Smuzhiyun {
1177*4882a593Smuzhiyun 	int err;
1178*4882a593Smuzhiyun 
1179*4882a593Smuzhiyun 	err = rhltable_init(&local->sta_hash, &sta_rht_params);
1180*4882a593Smuzhiyun 	if (err)
1181*4882a593Smuzhiyun 		return err;
1182*4882a593Smuzhiyun 
1183*4882a593Smuzhiyun 	spin_lock_init(&local->tim_lock);
1184*4882a593Smuzhiyun 	mutex_init(&local->sta_mtx);
1185*4882a593Smuzhiyun 	INIT_LIST_HEAD(&local->sta_list);
1186*4882a593Smuzhiyun 
1187*4882a593Smuzhiyun 	timer_setup(&local->sta_cleanup, sta_info_cleanup, 0);
1188*4882a593Smuzhiyun 	return 0;
1189*4882a593Smuzhiyun }
1190*4882a593Smuzhiyun 
sta_info_stop(struct ieee80211_local * local)1191*4882a593Smuzhiyun void sta_info_stop(struct ieee80211_local *local)
1192*4882a593Smuzhiyun {
1193*4882a593Smuzhiyun 	del_timer_sync(&local->sta_cleanup);
1194*4882a593Smuzhiyun 	rhltable_destroy(&local->sta_hash);
1195*4882a593Smuzhiyun }
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun 
__sta_info_flush(struct ieee80211_sub_if_data * sdata,bool vlans)1198*4882a593Smuzhiyun int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans)
1199*4882a593Smuzhiyun {
1200*4882a593Smuzhiyun 	struct ieee80211_local *local = sdata->local;
1201*4882a593Smuzhiyun 	struct sta_info *sta, *tmp;
1202*4882a593Smuzhiyun 	LIST_HEAD(free_list);
1203*4882a593Smuzhiyun 	int ret = 0;
1204*4882a593Smuzhiyun 
1205*4882a593Smuzhiyun 	might_sleep();
1206*4882a593Smuzhiyun 
1207*4882a593Smuzhiyun 	WARN_ON(vlans && sdata->vif.type != NL80211_IFTYPE_AP);
1208*4882a593Smuzhiyun 	WARN_ON(vlans && !sdata->bss);
1209*4882a593Smuzhiyun 
1210*4882a593Smuzhiyun 	mutex_lock(&local->sta_mtx);
1211*4882a593Smuzhiyun 	list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
1212*4882a593Smuzhiyun 		if (sdata == sta->sdata ||
1213*4882a593Smuzhiyun 		    (vlans && sdata->bss == sta->sdata->bss)) {
1214*4882a593Smuzhiyun 			if (!WARN_ON(__sta_info_destroy_part1(sta)))
1215*4882a593Smuzhiyun 				list_add(&sta->free_list, &free_list);
1216*4882a593Smuzhiyun 			ret++;
1217*4882a593Smuzhiyun 		}
1218*4882a593Smuzhiyun 	}
1219*4882a593Smuzhiyun 
1220*4882a593Smuzhiyun 	if (!list_empty(&free_list)) {
1221*4882a593Smuzhiyun 		synchronize_net();
1222*4882a593Smuzhiyun 		list_for_each_entry_safe(sta, tmp, &free_list, free_list)
1223*4882a593Smuzhiyun 			__sta_info_destroy_part2(sta);
1224*4882a593Smuzhiyun 	}
1225*4882a593Smuzhiyun 	mutex_unlock(&local->sta_mtx);
1226*4882a593Smuzhiyun 
1227*4882a593Smuzhiyun 	return ret;
1228*4882a593Smuzhiyun }
1229*4882a593Smuzhiyun 
ieee80211_sta_expire(struct ieee80211_sub_if_data * sdata,unsigned long exp_time)1230*4882a593Smuzhiyun void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
1231*4882a593Smuzhiyun 			  unsigned long exp_time)
1232*4882a593Smuzhiyun {
1233*4882a593Smuzhiyun 	struct ieee80211_local *local = sdata->local;
1234*4882a593Smuzhiyun 	struct sta_info *sta, *tmp;
1235*4882a593Smuzhiyun 
1236*4882a593Smuzhiyun 	mutex_lock(&local->sta_mtx);
1237*4882a593Smuzhiyun 
1238*4882a593Smuzhiyun 	list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
1239*4882a593Smuzhiyun 		unsigned long last_active = ieee80211_sta_last_active(sta);
1240*4882a593Smuzhiyun 
1241*4882a593Smuzhiyun 		if (sdata != sta->sdata)
1242*4882a593Smuzhiyun 			continue;
1243*4882a593Smuzhiyun 
1244*4882a593Smuzhiyun 		if (time_is_before_jiffies(last_active + exp_time)) {
1245*4882a593Smuzhiyun 			sta_dbg(sta->sdata, "expiring inactive STA %pM\n",
1246*4882a593Smuzhiyun 				sta->sta.addr);
1247*4882a593Smuzhiyun 
1248*4882a593Smuzhiyun 			if (ieee80211_vif_is_mesh(&sdata->vif) &&
1249*4882a593Smuzhiyun 			    test_sta_flag(sta, WLAN_STA_PS_STA))
1250*4882a593Smuzhiyun 				atomic_dec(&sdata->u.mesh.ps.num_sta_ps);
1251*4882a593Smuzhiyun 
1252*4882a593Smuzhiyun 			WARN_ON(__sta_info_destroy(sta));
1253*4882a593Smuzhiyun 		}
1254*4882a593Smuzhiyun 	}
1255*4882a593Smuzhiyun 
1256*4882a593Smuzhiyun 	mutex_unlock(&local->sta_mtx);
1257*4882a593Smuzhiyun }
1258*4882a593Smuzhiyun 
ieee80211_find_sta_by_ifaddr(struct ieee80211_hw * hw,const u8 * addr,const u8 * localaddr)1259*4882a593Smuzhiyun struct ieee80211_sta *ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw,
1260*4882a593Smuzhiyun 						   const u8 *addr,
1261*4882a593Smuzhiyun 						   const u8 *localaddr)
1262*4882a593Smuzhiyun {
1263*4882a593Smuzhiyun 	struct ieee80211_local *local = hw_to_local(hw);
1264*4882a593Smuzhiyun 	struct rhlist_head *tmp;
1265*4882a593Smuzhiyun 	struct sta_info *sta;
1266*4882a593Smuzhiyun 
1267*4882a593Smuzhiyun 	/*
1268*4882a593Smuzhiyun 	 * Just return a random station if localaddr is NULL
1269*4882a593Smuzhiyun 	 * ... first in list.
1270*4882a593Smuzhiyun 	 */
1271*4882a593Smuzhiyun 	for_each_sta_info(local, addr, sta, tmp) {
1272*4882a593Smuzhiyun 		if (localaddr &&
1273*4882a593Smuzhiyun 		    !ether_addr_equal(sta->sdata->vif.addr, localaddr))
1274*4882a593Smuzhiyun 			continue;
1275*4882a593Smuzhiyun 		if (!sta->uploaded)
1276*4882a593Smuzhiyun 			return NULL;
1277*4882a593Smuzhiyun 		return &sta->sta;
1278*4882a593Smuzhiyun 	}
1279*4882a593Smuzhiyun 
1280*4882a593Smuzhiyun 	return NULL;
1281*4882a593Smuzhiyun }
1282*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_ifaddr);
1283*4882a593Smuzhiyun 
ieee80211_find_sta(struct ieee80211_vif * vif,const u8 * addr)1284*4882a593Smuzhiyun struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_vif *vif,
1285*4882a593Smuzhiyun 					 const u8 *addr)
1286*4882a593Smuzhiyun {
1287*4882a593Smuzhiyun 	struct sta_info *sta;
1288*4882a593Smuzhiyun 
1289*4882a593Smuzhiyun 	if (!vif)
1290*4882a593Smuzhiyun 		return NULL;
1291*4882a593Smuzhiyun 
1292*4882a593Smuzhiyun 	sta = sta_info_get_bss(vif_to_sdata(vif), addr);
1293*4882a593Smuzhiyun 	if (!sta)
1294*4882a593Smuzhiyun 		return NULL;
1295*4882a593Smuzhiyun 
1296*4882a593Smuzhiyun 	if (!sta->uploaded)
1297*4882a593Smuzhiyun 		return NULL;
1298*4882a593Smuzhiyun 
1299*4882a593Smuzhiyun 	return &sta->sta;
1300*4882a593Smuzhiyun }
1301*4882a593Smuzhiyun EXPORT_SYMBOL(ieee80211_find_sta);
1302*4882a593Smuzhiyun 
1303*4882a593Smuzhiyun /* powersave support code */
ieee80211_sta_ps_deliver_wakeup(struct sta_info * sta)1304*4882a593Smuzhiyun void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
1305*4882a593Smuzhiyun {
1306*4882a593Smuzhiyun 	struct ieee80211_sub_if_data *sdata = sta->sdata;
1307*4882a593Smuzhiyun 	struct ieee80211_local *local = sdata->local;
1308*4882a593Smuzhiyun 	struct sk_buff_head pending;
1309*4882a593Smuzhiyun 	int filtered = 0, buffered = 0, ac, i;
1310*4882a593Smuzhiyun 	unsigned long flags;
1311*4882a593Smuzhiyun 	struct ps_data *ps;
1312*4882a593Smuzhiyun 
1313*4882a593Smuzhiyun 	if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1314*4882a593Smuzhiyun 		sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
1315*4882a593Smuzhiyun 				     u.ap);
1316*4882a593Smuzhiyun 
1317*4882a593Smuzhiyun 	if (sdata->vif.type == NL80211_IFTYPE_AP)
1318*4882a593Smuzhiyun 		ps = &sdata->bss->ps;
1319*4882a593Smuzhiyun 	else if (ieee80211_vif_is_mesh(&sdata->vif))
1320*4882a593Smuzhiyun 		ps = &sdata->u.mesh.ps;
1321*4882a593Smuzhiyun 	else
1322*4882a593Smuzhiyun 		return;
1323*4882a593Smuzhiyun 
1324*4882a593Smuzhiyun 	clear_sta_flag(sta, WLAN_STA_SP);
1325*4882a593Smuzhiyun 
1326*4882a593Smuzhiyun 	BUILD_BUG_ON(BITS_TO_LONGS(IEEE80211_NUM_TIDS) > 1);
1327*4882a593Smuzhiyun 	sta->driver_buffered_tids = 0;
1328*4882a593Smuzhiyun 	sta->txq_buffered_tids = 0;
1329*4882a593Smuzhiyun 
1330*4882a593Smuzhiyun 	if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
1331*4882a593Smuzhiyun 		drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta);
1332*4882a593Smuzhiyun 
1333*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
1334*4882a593Smuzhiyun 		if (!sta->sta.txq[i] || !txq_has_queue(sta->sta.txq[i]))
1335*4882a593Smuzhiyun 			continue;
1336*4882a593Smuzhiyun 
1337*4882a593Smuzhiyun 		schedule_and_wake_txq(local, to_txq_info(sta->sta.txq[i]));
1338*4882a593Smuzhiyun 	}
1339*4882a593Smuzhiyun 
1340*4882a593Smuzhiyun 	skb_queue_head_init(&pending);
1341*4882a593Smuzhiyun 
1342*4882a593Smuzhiyun 	/* sync with ieee80211_tx_h_unicast_ps_buf */
1343*4882a593Smuzhiyun 	spin_lock(&sta->ps_lock);
1344*4882a593Smuzhiyun 	/* Send all buffered frames to the station */
1345*4882a593Smuzhiyun 	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
1346*4882a593Smuzhiyun 		int count = skb_queue_len(&pending), tmp;
1347*4882a593Smuzhiyun 
1348*4882a593Smuzhiyun 		spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags);
1349*4882a593Smuzhiyun 		skb_queue_splice_tail_init(&sta->tx_filtered[ac], &pending);
1350*4882a593Smuzhiyun 		spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags);
1351*4882a593Smuzhiyun 		tmp = skb_queue_len(&pending);
1352*4882a593Smuzhiyun 		filtered += tmp - count;
1353*4882a593Smuzhiyun 		count = tmp;
1354*4882a593Smuzhiyun 
1355*4882a593Smuzhiyun 		spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags);
1356*4882a593Smuzhiyun 		skb_queue_splice_tail_init(&sta->ps_tx_buf[ac], &pending);
1357*4882a593Smuzhiyun 		spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags);
1358*4882a593Smuzhiyun 		tmp = skb_queue_len(&pending);
1359*4882a593Smuzhiyun 		buffered += tmp - count;
1360*4882a593Smuzhiyun 	}
1361*4882a593Smuzhiyun 
1362*4882a593Smuzhiyun 	ieee80211_add_pending_skbs(local, &pending);
1363*4882a593Smuzhiyun 
1364*4882a593Smuzhiyun 	/* now we're no longer in the deliver code */
1365*4882a593Smuzhiyun 	clear_sta_flag(sta, WLAN_STA_PS_DELIVER);
1366*4882a593Smuzhiyun 
1367*4882a593Smuzhiyun 	/* The station might have polled and then woken up before we responded,
1368*4882a593Smuzhiyun 	 * so clear these flags now to avoid them sticking around.
1369*4882a593Smuzhiyun 	 */
1370*4882a593Smuzhiyun 	clear_sta_flag(sta, WLAN_STA_PSPOLL);
1371*4882a593Smuzhiyun 	clear_sta_flag(sta, WLAN_STA_UAPSD);
1372*4882a593Smuzhiyun 	spin_unlock(&sta->ps_lock);
1373*4882a593Smuzhiyun 
1374*4882a593Smuzhiyun 	atomic_dec(&ps->num_sta_ps);
1375*4882a593Smuzhiyun 
1376*4882a593Smuzhiyun 	local->total_ps_buffered -= buffered;
1377*4882a593Smuzhiyun 
1378*4882a593Smuzhiyun 	sta_info_recalc_tim(sta);
1379*4882a593Smuzhiyun 
1380*4882a593Smuzhiyun 	ps_dbg(sdata,
1381*4882a593Smuzhiyun 	       "STA %pM aid %d sending %d filtered/%d PS frames since STA woke up\n",
1382*4882a593Smuzhiyun 	       sta->sta.addr, sta->sta.aid, filtered, buffered);
1383*4882a593Smuzhiyun 
1384*4882a593Smuzhiyun 	ieee80211_check_fast_xmit(sta);
1385*4882a593Smuzhiyun }
1386*4882a593Smuzhiyun 
ieee80211_send_null_response(struct sta_info * sta,int tid,enum ieee80211_frame_release_type reason,bool call_driver,bool more_data)1387*4882a593Smuzhiyun static void ieee80211_send_null_response(struct sta_info *sta, int tid,
1388*4882a593Smuzhiyun 					 enum ieee80211_frame_release_type reason,
1389*4882a593Smuzhiyun 					 bool call_driver, bool more_data)
1390*4882a593Smuzhiyun {
1391*4882a593Smuzhiyun 	struct ieee80211_sub_if_data *sdata = sta->sdata;
1392*4882a593Smuzhiyun 	struct ieee80211_local *local = sdata->local;
1393*4882a593Smuzhiyun 	struct ieee80211_qos_hdr *nullfunc;
1394*4882a593Smuzhiyun 	struct sk_buff *skb;
1395*4882a593Smuzhiyun 	int size = sizeof(*nullfunc);
1396*4882a593Smuzhiyun 	__le16 fc;
1397*4882a593Smuzhiyun 	bool qos = sta->sta.wme;
1398*4882a593Smuzhiyun 	struct ieee80211_tx_info *info;
1399*4882a593Smuzhiyun 	struct ieee80211_chanctx_conf *chanctx_conf;
1400*4882a593Smuzhiyun 
1401*4882a593Smuzhiyun 	if (qos) {
1402*4882a593Smuzhiyun 		fc = cpu_to_le16(IEEE80211_FTYPE_DATA |
1403*4882a593Smuzhiyun 				 IEEE80211_STYPE_QOS_NULLFUNC |
1404*4882a593Smuzhiyun 				 IEEE80211_FCTL_FROMDS);
1405*4882a593Smuzhiyun 	} else {
1406*4882a593Smuzhiyun 		size -= 2;
1407*4882a593Smuzhiyun 		fc = cpu_to_le16(IEEE80211_FTYPE_DATA |
1408*4882a593Smuzhiyun 				 IEEE80211_STYPE_NULLFUNC |
1409*4882a593Smuzhiyun 				 IEEE80211_FCTL_FROMDS);
1410*4882a593Smuzhiyun 	}
1411*4882a593Smuzhiyun 
1412*4882a593Smuzhiyun 	skb = dev_alloc_skb(local->hw.extra_tx_headroom + size);
1413*4882a593Smuzhiyun 	if (!skb)
1414*4882a593Smuzhiyun 		return;
1415*4882a593Smuzhiyun 
1416*4882a593Smuzhiyun 	skb_reserve(skb, local->hw.extra_tx_headroom);
1417*4882a593Smuzhiyun 
1418*4882a593Smuzhiyun 	nullfunc = skb_put(skb, size);
1419*4882a593Smuzhiyun 	nullfunc->frame_control = fc;
1420*4882a593Smuzhiyun 	nullfunc->duration_id = 0;
1421*4882a593Smuzhiyun 	memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN);
1422*4882a593Smuzhiyun 	memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN);
1423*4882a593Smuzhiyun 	memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN);
1424*4882a593Smuzhiyun 	nullfunc->seq_ctrl = 0;
1425*4882a593Smuzhiyun 
1426*4882a593Smuzhiyun 	skb->priority = tid;
1427*4882a593Smuzhiyun 	skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]);
1428*4882a593Smuzhiyun 	if (qos) {
1429*4882a593Smuzhiyun 		nullfunc->qos_ctrl = cpu_to_le16(tid);
1430*4882a593Smuzhiyun 
1431*4882a593Smuzhiyun 		if (reason == IEEE80211_FRAME_RELEASE_UAPSD) {
1432*4882a593Smuzhiyun 			nullfunc->qos_ctrl |=
1433*4882a593Smuzhiyun 				cpu_to_le16(IEEE80211_QOS_CTL_EOSP);
1434*4882a593Smuzhiyun 			if (more_data)
1435*4882a593Smuzhiyun 				nullfunc->frame_control |=
1436*4882a593Smuzhiyun 					cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1437*4882a593Smuzhiyun 		}
1438*4882a593Smuzhiyun 	}
1439*4882a593Smuzhiyun 
1440*4882a593Smuzhiyun 	info = IEEE80211_SKB_CB(skb);
1441*4882a593Smuzhiyun 
1442*4882a593Smuzhiyun 	/*
1443*4882a593Smuzhiyun 	 * Tell TX path to send this frame even though the
1444*4882a593Smuzhiyun 	 * STA may still remain is PS mode after this frame
1445*4882a593Smuzhiyun 	 * exchange. Also set EOSP to indicate this packet
1446*4882a593Smuzhiyun 	 * ends the poll/service period.
1447*4882a593Smuzhiyun 	 */
1448*4882a593Smuzhiyun 	info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER |
1449*4882a593Smuzhiyun 		       IEEE80211_TX_STATUS_EOSP |
1450*4882a593Smuzhiyun 		       IEEE80211_TX_CTL_REQ_TX_STATUS;
1451*4882a593Smuzhiyun 
1452*4882a593Smuzhiyun 	info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
1453*4882a593Smuzhiyun 
1454*4882a593Smuzhiyun 	if (call_driver)
1455*4882a593Smuzhiyun 		drv_allow_buffered_frames(local, sta, BIT(tid), 1,
1456*4882a593Smuzhiyun 					  reason, false);
1457*4882a593Smuzhiyun 
1458*4882a593Smuzhiyun 	skb->dev = sdata->dev;
1459*4882a593Smuzhiyun 
1460*4882a593Smuzhiyun 	rcu_read_lock();
1461*4882a593Smuzhiyun 	chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
1462*4882a593Smuzhiyun 	if (WARN_ON(!chanctx_conf)) {
1463*4882a593Smuzhiyun 		rcu_read_unlock();
1464*4882a593Smuzhiyun 		kfree_skb(skb);
1465*4882a593Smuzhiyun 		return;
1466*4882a593Smuzhiyun 	}
1467*4882a593Smuzhiyun 
1468*4882a593Smuzhiyun 	info->band = chanctx_conf->def.chan->band;
1469*4882a593Smuzhiyun 	ieee80211_xmit(sdata, sta, skb);
1470*4882a593Smuzhiyun 	rcu_read_unlock();
1471*4882a593Smuzhiyun }
1472*4882a593Smuzhiyun 
find_highest_prio_tid(unsigned long tids)1473*4882a593Smuzhiyun static int find_highest_prio_tid(unsigned long tids)
1474*4882a593Smuzhiyun {
1475*4882a593Smuzhiyun 	/* lower 3 TIDs aren't ordered perfectly */
1476*4882a593Smuzhiyun 	if (tids & 0xF8)
1477*4882a593Smuzhiyun 		return fls(tids) - 1;
1478*4882a593Smuzhiyun 	/* TID 0 is BE just like TID 3 */
1479*4882a593Smuzhiyun 	if (tids & BIT(0))
1480*4882a593Smuzhiyun 		return 0;
1481*4882a593Smuzhiyun 	return fls(tids) - 1;
1482*4882a593Smuzhiyun }
1483*4882a593Smuzhiyun 
1484*4882a593Smuzhiyun /* Indicates if the MORE_DATA bit should be set in the last
1485*4882a593Smuzhiyun  * frame obtained by ieee80211_sta_ps_get_frames.
1486*4882a593Smuzhiyun  * Note that driver_release_tids is relevant only if
1487*4882a593Smuzhiyun  * reason = IEEE80211_FRAME_RELEASE_PSPOLL
1488*4882a593Smuzhiyun  */
1489*4882a593Smuzhiyun static bool
ieee80211_sta_ps_more_data(struct sta_info * sta,u8 ignored_acs,enum ieee80211_frame_release_type reason,unsigned long driver_release_tids)1490*4882a593Smuzhiyun ieee80211_sta_ps_more_data(struct sta_info *sta, u8 ignored_acs,
1491*4882a593Smuzhiyun 			   enum ieee80211_frame_release_type reason,
1492*4882a593Smuzhiyun 			   unsigned long driver_release_tids)
1493*4882a593Smuzhiyun {
1494*4882a593Smuzhiyun 	int ac;
1495*4882a593Smuzhiyun 
1496*4882a593Smuzhiyun 	/* If the driver has data on more than one TID then
1497*4882a593Smuzhiyun 	 * certainly there's more data if we release just a
1498*4882a593Smuzhiyun 	 * single frame now (from a single TID). This will
1499*4882a593Smuzhiyun 	 * only happen for PS-Poll.
1500*4882a593Smuzhiyun 	 */
1501*4882a593Smuzhiyun 	if (reason == IEEE80211_FRAME_RELEASE_PSPOLL &&
1502*4882a593Smuzhiyun 	    hweight16(driver_release_tids) > 1)
1503*4882a593Smuzhiyun 		return true;
1504*4882a593Smuzhiyun 
1505*4882a593Smuzhiyun 	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
1506*4882a593Smuzhiyun 		if (ignored_acs & ieee80211_ac_to_qos_mask[ac])
1507*4882a593Smuzhiyun 			continue;
1508*4882a593Smuzhiyun 
1509*4882a593Smuzhiyun 		if (!skb_queue_empty(&sta->tx_filtered[ac]) ||
1510*4882a593Smuzhiyun 		    !skb_queue_empty(&sta->ps_tx_buf[ac]))
1511*4882a593Smuzhiyun 			return true;
1512*4882a593Smuzhiyun 	}
1513*4882a593Smuzhiyun 
1514*4882a593Smuzhiyun 	return false;
1515*4882a593Smuzhiyun }
1516*4882a593Smuzhiyun 
1517*4882a593Smuzhiyun static void
ieee80211_sta_ps_get_frames(struct sta_info * sta,int n_frames,u8 ignored_acs,enum ieee80211_frame_release_type reason,struct sk_buff_head * frames,unsigned long * driver_release_tids)1518*4882a593Smuzhiyun ieee80211_sta_ps_get_frames(struct sta_info *sta, int n_frames, u8 ignored_acs,
1519*4882a593Smuzhiyun 			    enum ieee80211_frame_release_type reason,
1520*4882a593Smuzhiyun 			    struct sk_buff_head *frames,
1521*4882a593Smuzhiyun 			    unsigned long *driver_release_tids)
1522*4882a593Smuzhiyun {
1523*4882a593Smuzhiyun 	struct ieee80211_sub_if_data *sdata = sta->sdata;
1524*4882a593Smuzhiyun 	struct ieee80211_local *local = sdata->local;
1525*4882a593Smuzhiyun 	int ac;
1526*4882a593Smuzhiyun 
1527*4882a593Smuzhiyun 	/* Get response frame(s) and more data bit for the last one. */
1528*4882a593Smuzhiyun 	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
1529*4882a593Smuzhiyun 		unsigned long tids;
1530*4882a593Smuzhiyun 
1531*4882a593Smuzhiyun 		if (ignored_acs & ieee80211_ac_to_qos_mask[ac])
1532*4882a593Smuzhiyun 			continue;
1533*4882a593Smuzhiyun 
1534*4882a593Smuzhiyun 		tids = ieee80211_tids_for_ac(ac);
1535*4882a593Smuzhiyun 
1536*4882a593Smuzhiyun 		/* if we already have frames from software, then we can't also
1537*4882a593Smuzhiyun 		 * release from hardware queues
1538*4882a593Smuzhiyun 		 */
1539*4882a593Smuzhiyun 		if (skb_queue_empty(frames)) {
1540*4882a593Smuzhiyun 			*driver_release_tids |=
1541*4882a593Smuzhiyun 				sta->driver_buffered_tids & tids;
1542*4882a593Smuzhiyun 			*driver_release_tids |= sta->txq_buffered_tids & tids;
1543*4882a593Smuzhiyun 		}
1544*4882a593Smuzhiyun 
1545*4882a593Smuzhiyun 		if (!*driver_release_tids) {
1546*4882a593Smuzhiyun 			struct sk_buff *skb;
1547*4882a593Smuzhiyun 
1548*4882a593Smuzhiyun 			while (n_frames > 0) {
1549*4882a593Smuzhiyun 				skb = skb_dequeue(&sta->tx_filtered[ac]);
1550*4882a593Smuzhiyun 				if (!skb) {
1551*4882a593Smuzhiyun 					skb = skb_dequeue(
1552*4882a593Smuzhiyun 						&sta->ps_tx_buf[ac]);
1553*4882a593Smuzhiyun 					if (skb)
1554*4882a593Smuzhiyun 						local->total_ps_buffered--;
1555*4882a593Smuzhiyun 				}
1556*4882a593Smuzhiyun 				if (!skb)
1557*4882a593Smuzhiyun 					break;
1558*4882a593Smuzhiyun 				n_frames--;
1559*4882a593Smuzhiyun 				__skb_queue_tail(frames, skb);
1560*4882a593Smuzhiyun 			}
1561*4882a593Smuzhiyun 		}
1562*4882a593Smuzhiyun 
1563*4882a593Smuzhiyun 		/* If we have more frames buffered on this AC, then abort the
1564*4882a593Smuzhiyun 		 * loop since we can't send more data from other ACs before
1565*4882a593Smuzhiyun 		 * the buffered frames from this.
1566*4882a593Smuzhiyun 		 */
1567*4882a593Smuzhiyun 		if (!skb_queue_empty(&sta->tx_filtered[ac]) ||
1568*4882a593Smuzhiyun 		    !skb_queue_empty(&sta->ps_tx_buf[ac]))
1569*4882a593Smuzhiyun 			break;
1570*4882a593Smuzhiyun 	}
1571*4882a593Smuzhiyun }
1572*4882a593Smuzhiyun 
1573*4882a593Smuzhiyun static void
ieee80211_sta_ps_deliver_response(struct sta_info * sta,int n_frames,u8 ignored_acs,enum ieee80211_frame_release_type reason)1574*4882a593Smuzhiyun ieee80211_sta_ps_deliver_response(struct sta_info *sta,
1575*4882a593Smuzhiyun 				  int n_frames, u8 ignored_acs,
1576*4882a593Smuzhiyun 				  enum ieee80211_frame_release_type reason)
1577*4882a593Smuzhiyun {
1578*4882a593Smuzhiyun 	struct ieee80211_sub_if_data *sdata = sta->sdata;
1579*4882a593Smuzhiyun 	struct ieee80211_local *local = sdata->local;
1580*4882a593Smuzhiyun 	unsigned long driver_release_tids = 0;
1581*4882a593Smuzhiyun 	struct sk_buff_head frames;
1582*4882a593Smuzhiyun 	bool more_data;
1583*4882a593Smuzhiyun 
1584*4882a593Smuzhiyun 	/* Service or PS-Poll period starts */
1585*4882a593Smuzhiyun 	set_sta_flag(sta, WLAN_STA_SP);
1586*4882a593Smuzhiyun 
1587*4882a593Smuzhiyun 	__skb_queue_head_init(&frames);
1588*4882a593Smuzhiyun 
1589*4882a593Smuzhiyun 	ieee80211_sta_ps_get_frames(sta, n_frames, ignored_acs, reason,
1590*4882a593Smuzhiyun 				    &frames, &driver_release_tids);
1591*4882a593Smuzhiyun 
1592*4882a593Smuzhiyun 	more_data = ieee80211_sta_ps_more_data(sta, ignored_acs, reason, driver_release_tids);
1593*4882a593Smuzhiyun 
1594*4882a593Smuzhiyun 	if (driver_release_tids && reason == IEEE80211_FRAME_RELEASE_PSPOLL)
1595*4882a593Smuzhiyun 		driver_release_tids =
1596*4882a593Smuzhiyun 			BIT(find_highest_prio_tid(driver_release_tids));
1597*4882a593Smuzhiyun 
1598*4882a593Smuzhiyun 	if (skb_queue_empty(&frames) && !driver_release_tids) {
1599*4882a593Smuzhiyun 		int tid, ac;
1600*4882a593Smuzhiyun 
1601*4882a593Smuzhiyun 		/*
1602*4882a593Smuzhiyun 		 * For PS-Poll, this can only happen due to a race condition
1603*4882a593Smuzhiyun 		 * when we set the TIM bit and the station notices it, but
1604*4882a593Smuzhiyun 		 * before it can poll for the frame we expire it.
1605*4882a593Smuzhiyun 		 *
1606*4882a593Smuzhiyun 		 * For uAPSD, this is said in the standard (11.2.1.5 h):
1607*4882a593Smuzhiyun 		 *	At each unscheduled SP for a non-AP STA, the AP shall
1608*4882a593Smuzhiyun 		 *	attempt to transmit at least one MSDU or MMPDU, but no
1609*4882a593Smuzhiyun 		 *	more than the value specified in the Max SP Length field
1610*4882a593Smuzhiyun 		 *	in the QoS Capability element from delivery-enabled ACs,
1611*4882a593Smuzhiyun 		 *	that are destined for the non-AP STA.
1612*4882a593Smuzhiyun 		 *
1613*4882a593Smuzhiyun 		 * Since we have no other MSDU/MMPDU, transmit a QoS null frame.
1614*4882a593Smuzhiyun 		 */
1615*4882a593Smuzhiyun 
1616*4882a593Smuzhiyun 		/* This will evaluate to 1, 3, 5 or 7. */
1617*4882a593Smuzhiyun 		for (ac = IEEE80211_AC_VO; ac < IEEE80211_NUM_ACS; ac++)
1618*4882a593Smuzhiyun 			if (!(ignored_acs & ieee80211_ac_to_qos_mask[ac]))
1619*4882a593Smuzhiyun 				break;
1620*4882a593Smuzhiyun 		tid = 7 - 2 * ac;
1621*4882a593Smuzhiyun 
1622*4882a593Smuzhiyun 		ieee80211_send_null_response(sta, tid, reason, true, false);
1623*4882a593Smuzhiyun 	} else if (!driver_release_tids) {
1624*4882a593Smuzhiyun 		struct sk_buff_head pending;
1625*4882a593Smuzhiyun 		struct sk_buff *skb;
1626*4882a593Smuzhiyun 		int num = 0;
1627*4882a593Smuzhiyun 		u16 tids = 0;
1628*4882a593Smuzhiyun 		bool need_null = false;
1629*4882a593Smuzhiyun 
1630*4882a593Smuzhiyun 		skb_queue_head_init(&pending);
1631*4882a593Smuzhiyun 
1632*4882a593Smuzhiyun 		while ((skb = __skb_dequeue(&frames))) {
1633*4882a593Smuzhiyun 			struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1634*4882a593Smuzhiyun 			struct ieee80211_hdr *hdr = (void *) skb->data;
1635*4882a593Smuzhiyun 			u8 *qoshdr = NULL;
1636*4882a593Smuzhiyun 
1637*4882a593Smuzhiyun 			num++;
1638*4882a593Smuzhiyun 
1639*4882a593Smuzhiyun 			/*
1640*4882a593Smuzhiyun 			 * Tell TX path to send this frame even though the
1641*4882a593Smuzhiyun 			 * STA may still remain is PS mode after this frame
1642*4882a593Smuzhiyun 			 * exchange.
1643*4882a593Smuzhiyun 			 */
1644*4882a593Smuzhiyun 			info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
1645*4882a593Smuzhiyun 			info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
1646*4882a593Smuzhiyun 
1647*4882a593Smuzhiyun 			/*
1648*4882a593Smuzhiyun 			 * Use MoreData flag to indicate whether there are
1649*4882a593Smuzhiyun 			 * more buffered frames for this STA
1650*4882a593Smuzhiyun 			 */
1651*4882a593Smuzhiyun 			if (more_data || !skb_queue_empty(&frames))
1652*4882a593Smuzhiyun 				hdr->frame_control |=
1653*4882a593Smuzhiyun 					cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1654*4882a593Smuzhiyun 			else
1655*4882a593Smuzhiyun 				hdr->frame_control &=
1656*4882a593Smuzhiyun 					cpu_to_le16(~IEEE80211_FCTL_MOREDATA);
1657*4882a593Smuzhiyun 
1658*4882a593Smuzhiyun 			if (ieee80211_is_data_qos(hdr->frame_control) ||
1659*4882a593Smuzhiyun 			    ieee80211_is_qos_nullfunc(hdr->frame_control))
1660*4882a593Smuzhiyun 				qoshdr = ieee80211_get_qos_ctl(hdr);
1661*4882a593Smuzhiyun 
1662*4882a593Smuzhiyun 			tids |= BIT(skb->priority);
1663*4882a593Smuzhiyun 
1664*4882a593Smuzhiyun 			__skb_queue_tail(&pending, skb);
1665*4882a593Smuzhiyun 
1666*4882a593Smuzhiyun 			/* end service period after last frame or add one */
1667*4882a593Smuzhiyun 			if (!skb_queue_empty(&frames))
1668*4882a593Smuzhiyun 				continue;
1669*4882a593Smuzhiyun 
1670*4882a593Smuzhiyun 			if (reason != IEEE80211_FRAME_RELEASE_UAPSD) {
1671*4882a593Smuzhiyun 				/* for PS-Poll, there's only one frame */
1672*4882a593Smuzhiyun 				info->flags |= IEEE80211_TX_STATUS_EOSP |
1673*4882a593Smuzhiyun 					       IEEE80211_TX_CTL_REQ_TX_STATUS;
1674*4882a593Smuzhiyun 				break;
1675*4882a593Smuzhiyun 			}
1676*4882a593Smuzhiyun 
1677*4882a593Smuzhiyun 			/* For uAPSD, things are a bit more complicated. If the
1678*4882a593Smuzhiyun 			 * last frame has a QoS header (i.e. is a QoS-data or
1679*4882a593Smuzhiyun 			 * QoS-nulldata frame) then just set the EOSP bit there
1680*4882a593Smuzhiyun 			 * and be done.
1681*4882a593Smuzhiyun 			 * If the frame doesn't have a QoS header (which means
1682*4882a593Smuzhiyun 			 * it should be a bufferable MMPDU) then we can't set
1683*4882a593Smuzhiyun 			 * the EOSP bit in the QoS header; add a QoS-nulldata
1684*4882a593Smuzhiyun 			 * frame to the list to send it after the MMPDU.
1685*4882a593Smuzhiyun 			 *
1686*4882a593Smuzhiyun 			 * Note that this code is only in the mac80211-release
1687*4882a593Smuzhiyun 			 * code path, we assume that the driver will not buffer
1688*4882a593Smuzhiyun 			 * anything but QoS-data frames, or if it does, will
1689*4882a593Smuzhiyun 			 * create the QoS-nulldata frame by itself if needed.
1690*4882a593Smuzhiyun 			 *
1691*4882a593Smuzhiyun 			 * Cf. 802.11-2012 10.2.1.10 (c).
1692*4882a593Smuzhiyun 			 */
1693*4882a593Smuzhiyun 			if (qoshdr) {
1694*4882a593Smuzhiyun 				*qoshdr |= IEEE80211_QOS_CTL_EOSP;
1695*4882a593Smuzhiyun 
1696*4882a593Smuzhiyun 				info->flags |= IEEE80211_TX_STATUS_EOSP |
1697*4882a593Smuzhiyun 					       IEEE80211_TX_CTL_REQ_TX_STATUS;
1698*4882a593Smuzhiyun 			} else {
1699*4882a593Smuzhiyun 				/* The standard isn't completely clear on this
1700*4882a593Smuzhiyun 				 * as it says the more-data bit should be set
1701*4882a593Smuzhiyun 				 * if there are more BUs. The QoS-Null frame
1702*4882a593Smuzhiyun 				 * we're about to send isn't buffered yet, we
1703*4882a593Smuzhiyun 				 * only create it below, but let's pretend it
1704*4882a593Smuzhiyun 				 * was buffered just in case some clients only
1705*4882a593Smuzhiyun 				 * expect more-data=0 when eosp=1.
1706*4882a593Smuzhiyun 				 */
1707*4882a593Smuzhiyun 				hdr->frame_control |=
1708*4882a593Smuzhiyun 					cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1709*4882a593Smuzhiyun 				need_null = true;
1710*4882a593Smuzhiyun 				num++;
1711*4882a593Smuzhiyun 			}
1712*4882a593Smuzhiyun 			break;
1713*4882a593Smuzhiyun 		}
1714*4882a593Smuzhiyun 
1715*4882a593Smuzhiyun 		drv_allow_buffered_frames(local, sta, tids, num,
1716*4882a593Smuzhiyun 					  reason, more_data);
1717*4882a593Smuzhiyun 
1718*4882a593Smuzhiyun 		ieee80211_add_pending_skbs(local, &pending);
1719*4882a593Smuzhiyun 
1720*4882a593Smuzhiyun 		if (need_null)
1721*4882a593Smuzhiyun 			ieee80211_send_null_response(
1722*4882a593Smuzhiyun 				sta, find_highest_prio_tid(tids),
1723*4882a593Smuzhiyun 				reason, false, false);
1724*4882a593Smuzhiyun 
1725*4882a593Smuzhiyun 		sta_info_recalc_tim(sta);
1726*4882a593Smuzhiyun 	} else {
1727*4882a593Smuzhiyun 		int tid;
1728*4882a593Smuzhiyun 
1729*4882a593Smuzhiyun 		/*
1730*4882a593Smuzhiyun 		 * We need to release a frame that is buffered somewhere in the
1731*4882a593Smuzhiyun 		 * driver ... it'll have to handle that.
1732*4882a593Smuzhiyun 		 * Note that the driver also has to check the number of frames
1733*4882a593Smuzhiyun 		 * on the TIDs we're releasing from - if there are more than
1734*4882a593Smuzhiyun 		 * n_frames it has to set the more-data bit (if we didn't ask
1735*4882a593Smuzhiyun 		 * it to set it anyway due to other buffered frames); if there
1736*4882a593Smuzhiyun 		 * are fewer than n_frames it has to make sure to adjust that
1737*4882a593Smuzhiyun 		 * to allow the service period to end properly.
1738*4882a593Smuzhiyun 		 */
1739*4882a593Smuzhiyun 		drv_release_buffered_frames(local, sta, driver_release_tids,
1740*4882a593Smuzhiyun 					    n_frames, reason, more_data);
1741*4882a593Smuzhiyun 
1742*4882a593Smuzhiyun 		/*
1743*4882a593Smuzhiyun 		 * Note that we don't recalculate the TIM bit here as it would
1744*4882a593Smuzhiyun 		 * most likely have no effect at all unless the driver told us
1745*4882a593Smuzhiyun 		 * that the TID(s) became empty before returning here from the
1746*4882a593Smuzhiyun 		 * release function.
1747*4882a593Smuzhiyun 		 * Either way, however, when the driver tells us that the TID(s)
1748*4882a593Smuzhiyun 		 * became empty or we find that a txq became empty, we'll do the
1749*4882a593Smuzhiyun 		 * TIM recalculation.
1750*4882a593Smuzhiyun 		 */
1751*4882a593Smuzhiyun 
1752*4882a593Smuzhiyun 		if (!sta->sta.txq[0])
1753*4882a593Smuzhiyun 			return;
1754*4882a593Smuzhiyun 
1755*4882a593Smuzhiyun 		for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) {
1756*4882a593Smuzhiyun 			if (!sta->sta.txq[tid] ||
1757*4882a593Smuzhiyun 			    !(driver_release_tids & BIT(tid)) ||
1758*4882a593Smuzhiyun 			    txq_has_queue(sta->sta.txq[tid]))
1759*4882a593Smuzhiyun 				continue;
1760*4882a593Smuzhiyun 
1761*4882a593Smuzhiyun 			sta_info_recalc_tim(sta);
1762*4882a593Smuzhiyun 			break;
1763*4882a593Smuzhiyun 		}
1764*4882a593Smuzhiyun 	}
1765*4882a593Smuzhiyun }
1766*4882a593Smuzhiyun 
ieee80211_sta_ps_deliver_poll_response(struct sta_info * sta)1767*4882a593Smuzhiyun void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta)
1768*4882a593Smuzhiyun {
1769*4882a593Smuzhiyun 	u8 ignore_for_response = sta->sta.uapsd_queues;
1770*4882a593Smuzhiyun 
1771*4882a593Smuzhiyun 	/*
1772*4882a593Smuzhiyun 	 * If all ACs are delivery-enabled then we should reply
1773*4882a593Smuzhiyun 	 * from any of them, if only some are enabled we reply
1774*4882a593Smuzhiyun 	 * only from the non-enabled ones.
1775*4882a593Smuzhiyun 	 */
1776*4882a593Smuzhiyun 	if (ignore_for_response == BIT(IEEE80211_NUM_ACS) - 1)
1777*4882a593Smuzhiyun 		ignore_for_response = 0;
1778*4882a593Smuzhiyun 
1779*4882a593Smuzhiyun 	ieee80211_sta_ps_deliver_response(sta, 1, ignore_for_response,
1780*4882a593Smuzhiyun 					  IEEE80211_FRAME_RELEASE_PSPOLL);
1781*4882a593Smuzhiyun }
1782*4882a593Smuzhiyun 
ieee80211_sta_ps_deliver_uapsd(struct sta_info * sta)1783*4882a593Smuzhiyun void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta)
1784*4882a593Smuzhiyun {
1785*4882a593Smuzhiyun 	int n_frames = sta->sta.max_sp;
1786*4882a593Smuzhiyun 	u8 delivery_enabled = sta->sta.uapsd_queues;
1787*4882a593Smuzhiyun 
1788*4882a593Smuzhiyun 	/*
1789*4882a593Smuzhiyun 	 * If we ever grow support for TSPEC this might happen if
1790*4882a593Smuzhiyun 	 * the TSPEC update from hostapd comes in between a trigger
1791*4882a593Smuzhiyun 	 * frame setting WLAN_STA_UAPSD in the RX path and this
1792*4882a593Smuzhiyun 	 * actually getting called.
1793*4882a593Smuzhiyun 	 */
1794*4882a593Smuzhiyun 	if (!delivery_enabled)
1795*4882a593Smuzhiyun 		return;
1796*4882a593Smuzhiyun 
1797*4882a593Smuzhiyun 	switch (sta->sta.max_sp) {
1798*4882a593Smuzhiyun 	case 1:
1799*4882a593Smuzhiyun 		n_frames = 2;
1800*4882a593Smuzhiyun 		break;
1801*4882a593Smuzhiyun 	case 2:
1802*4882a593Smuzhiyun 		n_frames = 4;
1803*4882a593Smuzhiyun 		break;
1804*4882a593Smuzhiyun 	case 3:
1805*4882a593Smuzhiyun 		n_frames = 6;
1806*4882a593Smuzhiyun 		break;
1807*4882a593Smuzhiyun 	case 0:
1808*4882a593Smuzhiyun 		/* XXX: what is a good value? */
1809*4882a593Smuzhiyun 		n_frames = 128;
1810*4882a593Smuzhiyun 		break;
1811*4882a593Smuzhiyun 	}
1812*4882a593Smuzhiyun 
1813*4882a593Smuzhiyun 	ieee80211_sta_ps_deliver_response(sta, n_frames, ~delivery_enabled,
1814*4882a593Smuzhiyun 					  IEEE80211_FRAME_RELEASE_UAPSD);
1815*4882a593Smuzhiyun }
1816*4882a593Smuzhiyun 
ieee80211_sta_block_awake(struct ieee80211_hw * hw,struct ieee80211_sta * pubsta,bool block)1817*4882a593Smuzhiyun void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
1818*4882a593Smuzhiyun 			       struct ieee80211_sta *pubsta, bool block)
1819*4882a593Smuzhiyun {
1820*4882a593Smuzhiyun 	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1821*4882a593Smuzhiyun 
1822*4882a593Smuzhiyun 	trace_api_sta_block_awake(sta->local, pubsta, block);
1823*4882a593Smuzhiyun 
1824*4882a593Smuzhiyun 	if (block) {
1825*4882a593Smuzhiyun 		set_sta_flag(sta, WLAN_STA_PS_DRIVER);
1826*4882a593Smuzhiyun 		ieee80211_clear_fast_xmit(sta);
1827*4882a593Smuzhiyun 		return;
1828*4882a593Smuzhiyun 	}
1829*4882a593Smuzhiyun 
1830*4882a593Smuzhiyun 	if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER))
1831*4882a593Smuzhiyun 		return;
1832*4882a593Smuzhiyun 
1833*4882a593Smuzhiyun 	if (!test_sta_flag(sta, WLAN_STA_PS_STA)) {
1834*4882a593Smuzhiyun 		set_sta_flag(sta, WLAN_STA_PS_DELIVER);
1835*4882a593Smuzhiyun 		clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
1836*4882a593Smuzhiyun 		ieee80211_queue_work(hw, &sta->drv_deliver_wk);
1837*4882a593Smuzhiyun 	} else if (test_sta_flag(sta, WLAN_STA_PSPOLL) ||
1838*4882a593Smuzhiyun 		   test_sta_flag(sta, WLAN_STA_UAPSD)) {
1839*4882a593Smuzhiyun 		/* must be asleep in this case */
1840*4882a593Smuzhiyun 		clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
1841*4882a593Smuzhiyun 		ieee80211_queue_work(hw, &sta->drv_deliver_wk);
1842*4882a593Smuzhiyun 	} else {
1843*4882a593Smuzhiyun 		clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
1844*4882a593Smuzhiyun 		ieee80211_check_fast_xmit(sta);
1845*4882a593Smuzhiyun 	}
1846*4882a593Smuzhiyun }
1847*4882a593Smuzhiyun EXPORT_SYMBOL(ieee80211_sta_block_awake);
1848*4882a593Smuzhiyun 
ieee80211_sta_eosp(struct ieee80211_sta * pubsta)1849*4882a593Smuzhiyun void ieee80211_sta_eosp(struct ieee80211_sta *pubsta)
1850*4882a593Smuzhiyun {
1851*4882a593Smuzhiyun 	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1852*4882a593Smuzhiyun 	struct ieee80211_local *local = sta->local;
1853*4882a593Smuzhiyun 
1854*4882a593Smuzhiyun 	trace_api_eosp(local, pubsta);
1855*4882a593Smuzhiyun 
1856*4882a593Smuzhiyun 	clear_sta_flag(sta, WLAN_STA_SP);
1857*4882a593Smuzhiyun }
1858*4882a593Smuzhiyun EXPORT_SYMBOL(ieee80211_sta_eosp);
1859*4882a593Smuzhiyun 
ieee80211_send_eosp_nullfunc(struct ieee80211_sta * pubsta,int tid)1860*4882a593Smuzhiyun void ieee80211_send_eosp_nullfunc(struct ieee80211_sta *pubsta, int tid)
1861*4882a593Smuzhiyun {
1862*4882a593Smuzhiyun 	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1863*4882a593Smuzhiyun 	enum ieee80211_frame_release_type reason;
1864*4882a593Smuzhiyun 	bool more_data;
1865*4882a593Smuzhiyun 
1866*4882a593Smuzhiyun 	trace_api_send_eosp_nullfunc(sta->local, pubsta, tid);
1867*4882a593Smuzhiyun 
1868*4882a593Smuzhiyun 	reason = IEEE80211_FRAME_RELEASE_UAPSD;
1869*4882a593Smuzhiyun 	more_data = ieee80211_sta_ps_more_data(sta, ~sta->sta.uapsd_queues,
1870*4882a593Smuzhiyun 					       reason, 0);
1871*4882a593Smuzhiyun 
1872*4882a593Smuzhiyun 	ieee80211_send_null_response(sta, tid, reason, false, more_data);
1873*4882a593Smuzhiyun }
1874*4882a593Smuzhiyun EXPORT_SYMBOL(ieee80211_send_eosp_nullfunc);
1875*4882a593Smuzhiyun 
ieee80211_sta_set_buffered(struct ieee80211_sta * pubsta,u8 tid,bool buffered)1876*4882a593Smuzhiyun void ieee80211_sta_set_buffered(struct ieee80211_sta *pubsta,
1877*4882a593Smuzhiyun 				u8 tid, bool buffered)
1878*4882a593Smuzhiyun {
1879*4882a593Smuzhiyun 	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1880*4882a593Smuzhiyun 
1881*4882a593Smuzhiyun 	if (WARN_ON(tid >= IEEE80211_NUM_TIDS))
1882*4882a593Smuzhiyun 		return;
1883*4882a593Smuzhiyun 
1884*4882a593Smuzhiyun 	trace_api_sta_set_buffered(sta->local, pubsta, tid, buffered);
1885*4882a593Smuzhiyun 
1886*4882a593Smuzhiyun 	if (buffered)
1887*4882a593Smuzhiyun 		set_bit(tid, &sta->driver_buffered_tids);
1888*4882a593Smuzhiyun 	else
1889*4882a593Smuzhiyun 		clear_bit(tid, &sta->driver_buffered_tids);
1890*4882a593Smuzhiyun 
1891*4882a593Smuzhiyun 	sta_info_recalc_tim(sta);
1892*4882a593Smuzhiyun }
1893*4882a593Smuzhiyun EXPORT_SYMBOL(ieee80211_sta_set_buffered);
1894*4882a593Smuzhiyun 
ieee80211_sta_register_airtime(struct ieee80211_sta * pubsta,u8 tid,u32 tx_airtime,u32 rx_airtime)1895*4882a593Smuzhiyun void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid,
1896*4882a593Smuzhiyun 				    u32 tx_airtime, u32 rx_airtime)
1897*4882a593Smuzhiyun {
1898*4882a593Smuzhiyun 	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1899*4882a593Smuzhiyun 	struct ieee80211_local *local = sta->sdata->local;
1900*4882a593Smuzhiyun 	u8 ac = ieee80211_ac_from_tid(tid);
1901*4882a593Smuzhiyun 	u32 airtime = 0;
1902*4882a593Smuzhiyun 
1903*4882a593Smuzhiyun 	if (sta->local->airtime_flags & AIRTIME_USE_TX)
1904*4882a593Smuzhiyun 		airtime += tx_airtime;
1905*4882a593Smuzhiyun 	if (sta->local->airtime_flags & AIRTIME_USE_RX)
1906*4882a593Smuzhiyun 		airtime += rx_airtime;
1907*4882a593Smuzhiyun 
1908*4882a593Smuzhiyun 	spin_lock_bh(&local->active_txq_lock[ac]);
1909*4882a593Smuzhiyun 	sta->airtime[ac].tx_airtime += tx_airtime;
1910*4882a593Smuzhiyun 	sta->airtime[ac].rx_airtime += rx_airtime;
1911*4882a593Smuzhiyun 	sta->airtime[ac].deficit -= airtime;
1912*4882a593Smuzhiyun 	spin_unlock_bh(&local->active_txq_lock[ac]);
1913*4882a593Smuzhiyun }
1914*4882a593Smuzhiyun EXPORT_SYMBOL(ieee80211_sta_register_airtime);
1915*4882a593Smuzhiyun 
ieee80211_sta_update_pending_airtime(struct ieee80211_local * local,struct sta_info * sta,u8 ac,u16 tx_airtime,bool tx_completed)1916*4882a593Smuzhiyun void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local,
1917*4882a593Smuzhiyun 					  struct sta_info *sta, u8 ac,
1918*4882a593Smuzhiyun 					  u16 tx_airtime, bool tx_completed)
1919*4882a593Smuzhiyun {
1920*4882a593Smuzhiyun 	int tx_pending;
1921*4882a593Smuzhiyun 
1922*4882a593Smuzhiyun 	if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL))
1923*4882a593Smuzhiyun 		return;
1924*4882a593Smuzhiyun 
1925*4882a593Smuzhiyun 	if (!tx_completed) {
1926*4882a593Smuzhiyun 		if (sta)
1927*4882a593Smuzhiyun 			atomic_add(tx_airtime,
1928*4882a593Smuzhiyun 				   &sta->airtime[ac].aql_tx_pending);
1929*4882a593Smuzhiyun 
1930*4882a593Smuzhiyun 		atomic_add(tx_airtime, &local->aql_total_pending_airtime);
1931*4882a593Smuzhiyun 		return;
1932*4882a593Smuzhiyun 	}
1933*4882a593Smuzhiyun 
1934*4882a593Smuzhiyun 	if (sta) {
1935*4882a593Smuzhiyun 		tx_pending = atomic_sub_return(tx_airtime,
1936*4882a593Smuzhiyun 					       &sta->airtime[ac].aql_tx_pending);
1937*4882a593Smuzhiyun 		if (tx_pending < 0)
1938*4882a593Smuzhiyun 			atomic_cmpxchg(&sta->airtime[ac].aql_tx_pending,
1939*4882a593Smuzhiyun 				       tx_pending, 0);
1940*4882a593Smuzhiyun 	}
1941*4882a593Smuzhiyun 
1942*4882a593Smuzhiyun 	tx_pending = atomic_sub_return(tx_airtime,
1943*4882a593Smuzhiyun 				       &local->aql_total_pending_airtime);
1944*4882a593Smuzhiyun 	if (WARN_ONCE(tx_pending < 0,
1945*4882a593Smuzhiyun 		      "Device %s AC %d pending airtime underflow: %u, %u",
1946*4882a593Smuzhiyun 		      wiphy_name(local->hw.wiphy), ac, tx_pending,
1947*4882a593Smuzhiyun 		      tx_airtime))
1948*4882a593Smuzhiyun 		atomic_cmpxchg(&local->aql_total_pending_airtime,
1949*4882a593Smuzhiyun 			       tx_pending, 0);
1950*4882a593Smuzhiyun }
1951*4882a593Smuzhiyun 
sta_info_move_state(struct sta_info * sta,enum ieee80211_sta_state new_state)1952*4882a593Smuzhiyun int sta_info_move_state(struct sta_info *sta,
1953*4882a593Smuzhiyun 			enum ieee80211_sta_state new_state)
1954*4882a593Smuzhiyun {
1955*4882a593Smuzhiyun 	might_sleep();
1956*4882a593Smuzhiyun 
1957*4882a593Smuzhiyun 	if (sta->sta_state == new_state)
1958*4882a593Smuzhiyun 		return 0;
1959*4882a593Smuzhiyun 
1960*4882a593Smuzhiyun 	/* check allowed transitions first */
1961*4882a593Smuzhiyun 
1962*4882a593Smuzhiyun 	switch (new_state) {
1963*4882a593Smuzhiyun 	case IEEE80211_STA_NONE:
1964*4882a593Smuzhiyun 		if (sta->sta_state != IEEE80211_STA_AUTH)
1965*4882a593Smuzhiyun 			return -EINVAL;
1966*4882a593Smuzhiyun 		break;
1967*4882a593Smuzhiyun 	case IEEE80211_STA_AUTH:
1968*4882a593Smuzhiyun 		if (sta->sta_state != IEEE80211_STA_NONE &&
1969*4882a593Smuzhiyun 		    sta->sta_state != IEEE80211_STA_ASSOC)
1970*4882a593Smuzhiyun 			return -EINVAL;
1971*4882a593Smuzhiyun 		break;
1972*4882a593Smuzhiyun 	case IEEE80211_STA_ASSOC:
1973*4882a593Smuzhiyun 		if (sta->sta_state != IEEE80211_STA_AUTH &&
1974*4882a593Smuzhiyun 		    sta->sta_state != IEEE80211_STA_AUTHORIZED)
1975*4882a593Smuzhiyun 			return -EINVAL;
1976*4882a593Smuzhiyun 		break;
1977*4882a593Smuzhiyun 	case IEEE80211_STA_AUTHORIZED:
1978*4882a593Smuzhiyun 		if (sta->sta_state != IEEE80211_STA_ASSOC)
1979*4882a593Smuzhiyun 			return -EINVAL;
1980*4882a593Smuzhiyun 		break;
1981*4882a593Smuzhiyun 	default:
1982*4882a593Smuzhiyun 		WARN(1, "invalid state %d", new_state);
1983*4882a593Smuzhiyun 		return -EINVAL;
1984*4882a593Smuzhiyun 	}
1985*4882a593Smuzhiyun 
1986*4882a593Smuzhiyun 	sta_dbg(sta->sdata, "moving STA %pM to state %d\n",
1987*4882a593Smuzhiyun 		sta->sta.addr, new_state);
1988*4882a593Smuzhiyun 
1989*4882a593Smuzhiyun 	/*
1990*4882a593Smuzhiyun 	 * notify the driver before the actual changes so it can
1991*4882a593Smuzhiyun 	 * fail the transition
1992*4882a593Smuzhiyun 	 */
1993*4882a593Smuzhiyun 	if (test_sta_flag(sta, WLAN_STA_INSERTED)) {
1994*4882a593Smuzhiyun 		int err = drv_sta_state(sta->local, sta->sdata, sta,
1995*4882a593Smuzhiyun 					sta->sta_state, new_state);
1996*4882a593Smuzhiyun 		if (err)
1997*4882a593Smuzhiyun 			return err;
1998*4882a593Smuzhiyun 	}
1999*4882a593Smuzhiyun 
2000*4882a593Smuzhiyun 	/* reflect the change in all state variables */
2001*4882a593Smuzhiyun 
2002*4882a593Smuzhiyun 	switch (new_state) {
2003*4882a593Smuzhiyun 	case IEEE80211_STA_NONE:
2004*4882a593Smuzhiyun 		if (sta->sta_state == IEEE80211_STA_AUTH)
2005*4882a593Smuzhiyun 			clear_bit(WLAN_STA_AUTH, &sta->_flags);
2006*4882a593Smuzhiyun 		break;
2007*4882a593Smuzhiyun 	case IEEE80211_STA_AUTH:
2008*4882a593Smuzhiyun 		if (sta->sta_state == IEEE80211_STA_NONE) {
2009*4882a593Smuzhiyun 			set_bit(WLAN_STA_AUTH, &sta->_flags);
2010*4882a593Smuzhiyun 		} else if (sta->sta_state == IEEE80211_STA_ASSOC) {
2011*4882a593Smuzhiyun 			clear_bit(WLAN_STA_ASSOC, &sta->_flags);
2012*4882a593Smuzhiyun 			ieee80211_recalc_min_chandef(sta->sdata);
2013*4882a593Smuzhiyun 			if (!sta->sta.support_p2p_ps)
2014*4882a593Smuzhiyun 				ieee80211_recalc_p2p_go_ps_allowed(sta->sdata);
2015*4882a593Smuzhiyun 		}
2016*4882a593Smuzhiyun 		break;
2017*4882a593Smuzhiyun 	case IEEE80211_STA_ASSOC:
2018*4882a593Smuzhiyun 		if (sta->sta_state == IEEE80211_STA_AUTH) {
2019*4882a593Smuzhiyun 			set_bit(WLAN_STA_ASSOC, &sta->_flags);
2020*4882a593Smuzhiyun 			sta->assoc_at = ktime_get_boottime_ns();
2021*4882a593Smuzhiyun 			ieee80211_recalc_min_chandef(sta->sdata);
2022*4882a593Smuzhiyun 			if (!sta->sta.support_p2p_ps)
2023*4882a593Smuzhiyun 				ieee80211_recalc_p2p_go_ps_allowed(sta->sdata);
2024*4882a593Smuzhiyun 		} else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) {
2025*4882a593Smuzhiyun 			ieee80211_vif_dec_num_mcast(sta->sdata);
2026*4882a593Smuzhiyun 			clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
2027*4882a593Smuzhiyun 			ieee80211_clear_fast_xmit(sta);
2028*4882a593Smuzhiyun 			ieee80211_clear_fast_rx(sta);
2029*4882a593Smuzhiyun 		}
2030*4882a593Smuzhiyun 		break;
2031*4882a593Smuzhiyun 	case IEEE80211_STA_AUTHORIZED:
2032*4882a593Smuzhiyun 		if (sta->sta_state == IEEE80211_STA_ASSOC) {
2033*4882a593Smuzhiyun 			ieee80211_vif_inc_num_mcast(sta->sdata);
2034*4882a593Smuzhiyun 			set_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
2035*4882a593Smuzhiyun 			ieee80211_check_fast_xmit(sta);
2036*4882a593Smuzhiyun 			ieee80211_check_fast_rx(sta);
2037*4882a593Smuzhiyun 		}
2038*4882a593Smuzhiyun 		if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
2039*4882a593Smuzhiyun 		    sta->sdata->vif.type == NL80211_IFTYPE_AP)
2040*4882a593Smuzhiyun 			cfg80211_send_layer2_update(sta->sdata->dev,
2041*4882a593Smuzhiyun 						    sta->sta.addr);
2042*4882a593Smuzhiyun 		break;
2043*4882a593Smuzhiyun 	default:
2044*4882a593Smuzhiyun 		break;
2045*4882a593Smuzhiyun 	}
2046*4882a593Smuzhiyun 
2047*4882a593Smuzhiyun 	sta->sta_state = new_state;
2048*4882a593Smuzhiyun 
2049*4882a593Smuzhiyun 	return 0;
2050*4882a593Smuzhiyun }
2051*4882a593Smuzhiyun 
sta_info_tx_streams(struct sta_info * sta)2052*4882a593Smuzhiyun u8 sta_info_tx_streams(struct sta_info *sta)
2053*4882a593Smuzhiyun {
2054*4882a593Smuzhiyun 	struct ieee80211_sta_ht_cap *ht_cap = &sta->sta.ht_cap;
2055*4882a593Smuzhiyun 	u8 rx_streams;
2056*4882a593Smuzhiyun 
2057*4882a593Smuzhiyun 	if (!sta->sta.ht_cap.ht_supported)
2058*4882a593Smuzhiyun 		return 1;
2059*4882a593Smuzhiyun 
2060*4882a593Smuzhiyun 	if (sta->sta.vht_cap.vht_supported) {
2061*4882a593Smuzhiyun 		int i;
2062*4882a593Smuzhiyun 		u16 tx_mcs_map =
2063*4882a593Smuzhiyun 			le16_to_cpu(sta->sta.vht_cap.vht_mcs.tx_mcs_map);
2064*4882a593Smuzhiyun 
2065*4882a593Smuzhiyun 		for (i = 7; i >= 0; i--)
2066*4882a593Smuzhiyun 			if ((tx_mcs_map & (0x3 << (i * 2))) !=
2067*4882a593Smuzhiyun 			    IEEE80211_VHT_MCS_NOT_SUPPORTED)
2068*4882a593Smuzhiyun 				return i + 1;
2069*4882a593Smuzhiyun 	}
2070*4882a593Smuzhiyun 
2071*4882a593Smuzhiyun 	if (ht_cap->mcs.rx_mask[3])
2072*4882a593Smuzhiyun 		rx_streams = 4;
2073*4882a593Smuzhiyun 	else if (ht_cap->mcs.rx_mask[2])
2074*4882a593Smuzhiyun 		rx_streams = 3;
2075*4882a593Smuzhiyun 	else if (ht_cap->mcs.rx_mask[1])
2076*4882a593Smuzhiyun 		rx_streams = 2;
2077*4882a593Smuzhiyun 	else
2078*4882a593Smuzhiyun 		rx_streams = 1;
2079*4882a593Smuzhiyun 
2080*4882a593Smuzhiyun 	if (!(ht_cap->mcs.tx_params & IEEE80211_HT_MCS_TX_RX_DIFF))
2081*4882a593Smuzhiyun 		return rx_streams;
2082*4882a593Smuzhiyun 
2083*4882a593Smuzhiyun 	return ((ht_cap->mcs.tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
2084*4882a593Smuzhiyun 			>> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT) + 1;
2085*4882a593Smuzhiyun }
2086*4882a593Smuzhiyun 
2087*4882a593Smuzhiyun static struct ieee80211_sta_rx_stats *
sta_get_last_rx_stats(struct sta_info * sta)2088*4882a593Smuzhiyun sta_get_last_rx_stats(struct sta_info *sta)
2089*4882a593Smuzhiyun {
2090*4882a593Smuzhiyun 	struct ieee80211_sta_rx_stats *stats = &sta->rx_stats;
2091*4882a593Smuzhiyun 	int cpu;
2092*4882a593Smuzhiyun 
2093*4882a593Smuzhiyun 	if (!sta->pcpu_rx_stats)
2094*4882a593Smuzhiyun 		return stats;
2095*4882a593Smuzhiyun 
2096*4882a593Smuzhiyun 	for_each_possible_cpu(cpu) {
2097*4882a593Smuzhiyun 		struct ieee80211_sta_rx_stats *cpustats;
2098*4882a593Smuzhiyun 
2099*4882a593Smuzhiyun 		cpustats = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
2100*4882a593Smuzhiyun 
2101*4882a593Smuzhiyun 		if (time_after(cpustats->last_rx, stats->last_rx))
2102*4882a593Smuzhiyun 			stats = cpustats;
2103*4882a593Smuzhiyun 	}
2104*4882a593Smuzhiyun 
2105*4882a593Smuzhiyun 	return stats;
2106*4882a593Smuzhiyun }
2107*4882a593Smuzhiyun 
sta_stats_decode_rate(struct ieee80211_local * local,u32 rate,struct rate_info * rinfo)2108*4882a593Smuzhiyun static void sta_stats_decode_rate(struct ieee80211_local *local, u32 rate,
2109*4882a593Smuzhiyun 				  struct rate_info *rinfo)
2110*4882a593Smuzhiyun {
2111*4882a593Smuzhiyun 	rinfo->bw = STA_STATS_GET(BW, rate);
2112*4882a593Smuzhiyun 
2113*4882a593Smuzhiyun 	switch (STA_STATS_GET(TYPE, rate)) {
2114*4882a593Smuzhiyun 	case STA_STATS_RATE_TYPE_VHT:
2115*4882a593Smuzhiyun 		rinfo->flags = RATE_INFO_FLAGS_VHT_MCS;
2116*4882a593Smuzhiyun 		rinfo->mcs = STA_STATS_GET(VHT_MCS, rate);
2117*4882a593Smuzhiyun 		rinfo->nss = STA_STATS_GET(VHT_NSS, rate);
2118*4882a593Smuzhiyun 		if (STA_STATS_GET(SGI, rate))
2119*4882a593Smuzhiyun 			rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
2120*4882a593Smuzhiyun 		break;
2121*4882a593Smuzhiyun 	case STA_STATS_RATE_TYPE_HT:
2122*4882a593Smuzhiyun 		rinfo->flags = RATE_INFO_FLAGS_MCS;
2123*4882a593Smuzhiyun 		rinfo->mcs = STA_STATS_GET(HT_MCS, rate);
2124*4882a593Smuzhiyun 		if (STA_STATS_GET(SGI, rate))
2125*4882a593Smuzhiyun 			rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
2126*4882a593Smuzhiyun 		break;
2127*4882a593Smuzhiyun 	case STA_STATS_RATE_TYPE_LEGACY: {
2128*4882a593Smuzhiyun 		struct ieee80211_supported_band *sband;
2129*4882a593Smuzhiyun 		u16 brate;
2130*4882a593Smuzhiyun 		unsigned int shift;
2131*4882a593Smuzhiyun 		int band = STA_STATS_GET(LEGACY_BAND, rate);
2132*4882a593Smuzhiyun 		int rate_idx = STA_STATS_GET(LEGACY_IDX, rate);
2133*4882a593Smuzhiyun 
2134*4882a593Smuzhiyun 		sband = local->hw.wiphy->bands[band];
2135*4882a593Smuzhiyun 
2136*4882a593Smuzhiyun 		if (WARN_ON_ONCE(!sband->bitrates))
2137*4882a593Smuzhiyun 			break;
2138*4882a593Smuzhiyun 
2139*4882a593Smuzhiyun 		brate = sband->bitrates[rate_idx].bitrate;
2140*4882a593Smuzhiyun 		if (rinfo->bw == RATE_INFO_BW_5)
2141*4882a593Smuzhiyun 			shift = 2;
2142*4882a593Smuzhiyun 		else if (rinfo->bw == RATE_INFO_BW_10)
2143*4882a593Smuzhiyun 			shift = 1;
2144*4882a593Smuzhiyun 		else
2145*4882a593Smuzhiyun 			shift = 0;
2146*4882a593Smuzhiyun 		rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift);
2147*4882a593Smuzhiyun 		break;
2148*4882a593Smuzhiyun 		}
2149*4882a593Smuzhiyun 	case STA_STATS_RATE_TYPE_HE:
2150*4882a593Smuzhiyun 		rinfo->flags = RATE_INFO_FLAGS_HE_MCS;
2151*4882a593Smuzhiyun 		rinfo->mcs = STA_STATS_GET(HE_MCS, rate);
2152*4882a593Smuzhiyun 		rinfo->nss = STA_STATS_GET(HE_NSS, rate);
2153*4882a593Smuzhiyun 		rinfo->he_gi = STA_STATS_GET(HE_GI, rate);
2154*4882a593Smuzhiyun 		rinfo->he_ru_alloc = STA_STATS_GET(HE_RU, rate);
2155*4882a593Smuzhiyun 		rinfo->he_dcm = STA_STATS_GET(HE_DCM, rate);
2156*4882a593Smuzhiyun 		break;
2157*4882a593Smuzhiyun 	}
2158*4882a593Smuzhiyun }
2159*4882a593Smuzhiyun 
sta_set_rate_info_rx(struct sta_info * sta,struct rate_info * rinfo)2160*4882a593Smuzhiyun static int sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo)
2161*4882a593Smuzhiyun {
2162*4882a593Smuzhiyun 	u16 rate = READ_ONCE(sta_get_last_rx_stats(sta)->last_rate);
2163*4882a593Smuzhiyun 
2164*4882a593Smuzhiyun 	if (rate == STA_STATS_RATE_INVALID)
2165*4882a593Smuzhiyun 		return -EINVAL;
2166*4882a593Smuzhiyun 
2167*4882a593Smuzhiyun 	sta_stats_decode_rate(sta->local, rate, rinfo);
2168*4882a593Smuzhiyun 	return 0;
2169*4882a593Smuzhiyun }
2170*4882a593Smuzhiyun 
sta_get_tidstats_msdu(struct ieee80211_sta_rx_stats * rxstats,int tid)2171*4882a593Smuzhiyun static inline u64 sta_get_tidstats_msdu(struct ieee80211_sta_rx_stats *rxstats,
2172*4882a593Smuzhiyun 					int tid)
2173*4882a593Smuzhiyun {
2174*4882a593Smuzhiyun 	unsigned int start;
2175*4882a593Smuzhiyun 	u64 value;
2176*4882a593Smuzhiyun 
2177*4882a593Smuzhiyun 	do {
2178*4882a593Smuzhiyun 		start = u64_stats_fetch_begin_irq(&rxstats->syncp);
2179*4882a593Smuzhiyun 		value = rxstats->msdu[tid];
2180*4882a593Smuzhiyun 	} while (u64_stats_fetch_retry_irq(&rxstats->syncp, start));
2181*4882a593Smuzhiyun 
2182*4882a593Smuzhiyun 	return value;
2183*4882a593Smuzhiyun }
2184*4882a593Smuzhiyun 
sta_set_tidstats(struct sta_info * sta,struct cfg80211_tid_stats * tidstats,int tid)2185*4882a593Smuzhiyun static void sta_set_tidstats(struct sta_info *sta,
2186*4882a593Smuzhiyun 			     struct cfg80211_tid_stats *tidstats,
2187*4882a593Smuzhiyun 			     int tid)
2188*4882a593Smuzhiyun {
2189*4882a593Smuzhiyun 	struct ieee80211_local *local = sta->local;
2190*4882a593Smuzhiyun 	int cpu;
2191*4882a593Smuzhiyun 
2192*4882a593Smuzhiyun 	if (!(tidstats->filled & BIT(NL80211_TID_STATS_RX_MSDU))) {
2193*4882a593Smuzhiyun 		tidstats->rx_msdu += sta_get_tidstats_msdu(&sta->rx_stats, tid);
2194*4882a593Smuzhiyun 
2195*4882a593Smuzhiyun 		if (sta->pcpu_rx_stats) {
2196*4882a593Smuzhiyun 			for_each_possible_cpu(cpu) {
2197*4882a593Smuzhiyun 				struct ieee80211_sta_rx_stats *cpurxs;
2198*4882a593Smuzhiyun 
2199*4882a593Smuzhiyun 				cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
2200*4882a593Smuzhiyun 				tidstats->rx_msdu +=
2201*4882a593Smuzhiyun 					sta_get_tidstats_msdu(cpurxs, tid);
2202*4882a593Smuzhiyun 			}
2203*4882a593Smuzhiyun 		}
2204*4882a593Smuzhiyun 
2205*4882a593Smuzhiyun 		tidstats->filled |= BIT(NL80211_TID_STATS_RX_MSDU);
2206*4882a593Smuzhiyun 	}
2207*4882a593Smuzhiyun 
2208*4882a593Smuzhiyun 	if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU))) {
2209*4882a593Smuzhiyun 		tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU);
2210*4882a593Smuzhiyun 		tidstats->tx_msdu = sta->tx_stats.msdu[tid];
2211*4882a593Smuzhiyun 	}
2212*4882a593Smuzhiyun 
2213*4882a593Smuzhiyun 	if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_RETRIES)) &&
2214*4882a593Smuzhiyun 	    ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
2215*4882a593Smuzhiyun 		tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_RETRIES);
2216*4882a593Smuzhiyun 		tidstats->tx_msdu_retries = sta->status_stats.msdu_retries[tid];
2217*4882a593Smuzhiyun 	}
2218*4882a593Smuzhiyun 
2219*4882a593Smuzhiyun 	if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU_FAILED)) &&
2220*4882a593Smuzhiyun 	    ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
2221*4882a593Smuzhiyun 		tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU_FAILED);
2222*4882a593Smuzhiyun 		tidstats->tx_msdu_failed = sta->status_stats.msdu_failed[tid];
2223*4882a593Smuzhiyun 	}
2224*4882a593Smuzhiyun 
2225*4882a593Smuzhiyun 	if (local->ops->wake_tx_queue && tid < IEEE80211_NUM_TIDS) {
2226*4882a593Smuzhiyun 		spin_lock_bh(&local->fq.lock);
2227*4882a593Smuzhiyun 		rcu_read_lock();
2228*4882a593Smuzhiyun 
2229*4882a593Smuzhiyun 		tidstats->filled |= BIT(NL80211_TID_STATS_TXQ_STATS);
2230*4882a593Smuzhiyun 		ieee80211_fill_txq_stats(&tidstats->txq_stats,
2231*4882a593Smuzhiyun 					 to_txq_info(sta->sta.txq[tid]));
2232*4882a593Smuzhiyun 
2233*4882a593Smuzhiyun 		rcu_read_unlock();
2234*4882a593Smuzhiyun 		spin_unlock_bh(&local->fq.lock);
2235*4882a593Smuzhiyun 	}
2236*4882a593Smuzhiyun }
2237*4882a593Smuzhiyun 
sta_get_stats_bytes(struct ieee80211_sta_rx_stats * rxstats)2238*4882a593Smuzhiyun static inline u64 sta_get_stats_bytes(struct ieee80211_sta_rx_stats *rxstats)
2239*4882a593Smuzhiyun {
2240*4882a593Smuzhiyun 	unsigned int start;
2241*4882a593Smuzhiyun 	u64 value;
2242*4882a593Smuzhiyun 
2243*4882a593Smuzhiyun 	do {
2244*4882a593Smuzhiyun 		start = u64_stats_fetch_begin_irq(&rxstats->syncp);
2245*4882a593Smuzhiyun 		value = rxstats->bytes;
2246*4882a593Smuzhiyun 	} while (u64_stats_fetch_retry_irq(&rxstats->syncp, start));
2247*4882a593Smuzhiyun 
2248*4882a593Smuzhiyun 	return value;
2249*4882a593Smuzhiyun }
2250*4882a593Smuzhiyun 
sta_set_sinfo(struct sta_info * sta,struct station_info * sinfo,bool tidstats)2251*4882a593Smuzhiyun void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
2252*4882a593Smuzhiyun 		   bool tidstats)
2253*4882a593Smuzhiyun {
2254*4882a593Smuzhiyun 	struct ieee80211_sub_if_data *sdata = sta->sdata;
2255*4882a593Smuzhiyun 	struct ieee80211_local *local = sdata->local;
2256*4882a593Smuzhiyun 	u32 thr = 0;
2257*4882a593Smuzhiyun 	int i, ac, cpu;
2258*4882a593Smuzhiyun 	struct ieee80211_sta_rx_stats *last_rxstats;
2259*4882a593Smuzhiyun 
2260*4882a593Smuzhiyun 	last_rxstats = sta_get_last_rx_stats(sta);
2261*4882a593Smuzhiyun 
2262*4882a593Smuzhiyun 	sinfo->generation = sdata->local->sta_generation;
2263*4882a593Smuzhiyun 
2264*4882a593Smuzhiyun 	/* do before driver, so beacon filtering drivers have a
2265*4882a593Smuzhiyun 	 * chance to e.g. just add the number of filtered beacons
2266*4882a593Smuzhiyun 	 * (or just modify the value entirely, of course)
2267*4882a593Smuzhiyun 	 */
2268*4882a593Smuzhiyun 	if (sdata->vif.type == NL80211_IFTYPE_STATION)
2269*4882a593Smuzhiyun 		sinfo->rx_beacon = sdata->u.mgd.count_beacon_signal;
2270*4882a593Smuzhiyun 
2271*4882a593Smuzhiyun 	drv_sta_statistics(local, sdata, &sta->sta, sinfo);
2272*4882a593Smuzhiyun 	sinfo->filled |= BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME) |
2273*4882a593Smuzhiyun 			 BIT_ULL(NL80211_STA_INFO_STA_FLAGS) |
2274*4882a593Smuzhiyun 			 BIT_ULL(NL80211_STA_INFO_BSS_PARAM) |
2275*4882a593Smuzhiyun 			 BIT_ULL(NL80211_STA_INFO_CONNECTED_TIME) |
2276*4882a593Smuzhiyun 			 BIT_ULL(NL80211_STA_INFO_ASSOC_AT_BOOTTIME) |
2277*4882a593Smuzhiyun 			 BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC);
2278*4882a593Smuzhiyun 
2279*4882a593Smuzhiyun 	if (sdata->vif.type == NL80211_IFTYPE_STATION) {
2280*4882a593Smuzhiyun 		sinfo->beacon_loss_count = sdata->u.mgd.beacon_loss_count;
2281*4882a593Smuzhiyun 		sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_LOSS);
2282*4882a593Smuzhiyun 	}
2283*4882a593Smuzhiyun 
2284*4882a593Smuzhiyun 	sinfo->connected_time = ktime_get_seconds() - sta->last_connected;
2285*4882a593Smuzhiyun 	sinfo->assoc_at = sta->assoc_at;
2286*4882a593Smuzhiyun 	sinfo->inactive_time =
2287*4882a593Smuzhiyun 		jiffies_to_msecs(jiffies - ieee80211_sta_last_active(sta));
2288*4882a593Smuzhiyun 
2289*4882a593Smuzhiyun 	if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_TX_BYTES64) |
2290*4882a593Smuzhiyun 			       BIT_ULL(NL80211_STA_INFO_TX_BYTES)))) {
2291*4882a593Smuzhiyun 		sinfo->tx_bytes = 0;
2292*4882a593Smuzhiyun 		for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
2293*4882a593Smuzhiyun 			sinfo->tx_bytes += sta->tx_stats.bytes[ac];
2294*4882a593Smuzhiyun 		sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64);
2295*4882a593Smuzhiyun 	}
2296*4882a593Smuzhiyun 
2297*4882a593Smuzhiyun 	if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_PACKETS))) {
2298*4882a593Smuzhiyun 		sinfo->tx_packets = 0;
2299*4882a593Smuzhiyun 		for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
2300*4882a593Smuzhiyun 			sinfo->tx_packets += sta->tx_stats.packets[ac];
2301*4882a593Smuzhiyun 		sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS);
2302*4882a593Smuzhiyun 	}
2303*4882a593Smuzhiyun 
2304*4882a593Smuzhiyun 	if (!(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_RX_BYTES64) |
2305*4882a593Smuzhiyun 			       BIT_ULL(NL80211_STA_INFO_RX_BYTES)))) {
2306*4882a593Smuzhiyun 		sinfo->rx_bytes += sta_get_stats_bytes(&sta->rx_stats);
2307*4882a593Smuzhiyun 
2308*4882a593Smuzhiyun 		if (sta->pcpu_rx_stats) {
2309*4882a593Smuzhiyun 			for_each_possible_cpu(cpu) {
2310*4882a593Smuzhiyun 				struct ieee80211_sta_rx_stats *cpurxs;
2311*4882a593Smuzhiyun 
2312*4882a593Smuzhiyun 				cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
2313*4882a593Smuzhiyun 				sinfo->rx_bytes += sta_get_stats_bytes(cpurxs);
2314*4882a593Smuzhiyun 			}
2315*4882a593Smuzhiyun 		}
2316*4882a593Smuzhiyun 
2317*4882a593Smuzhiyun 		sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64);
2318*4882a593Smuzhiyun 	}
2319*4882a593Smuzhiyun 
2320*4882a593Smuzhiyun 	if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_PACKETS))) {
2321*4882a593Smuzhiyun 		sinfo->rx_packets = sta->rx_stats.packets;
2322*4882a593Smuzhiyun 		if (sta->pcpu_rx_stats) {
2323*4882a593Smuzhiyun 			for_each_possible_cpu(cpu) {
2324*4882a593Smuzhiyun 				struct ieee80211_sta_rx_stats *cpurxs;
2325*4882a593Smuzhiyun 
2326*4882a593Smuzhiyun 				cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
2327*4882a593Smuzhiyun 				sinfo->rx_packets += cpurxs->packets;
2328*4882a593Smuzhiyun 			}
2329*4882a593Smuzhiyun 		}
2330*4882a593Smuzhiyun 		sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS);
2331*4882a593Smuzhiyun 	}
2332*4882a593Smuzhiyun 
2333*4882a593Smuzhiyun 	if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_RETRIES))) {
2334*4882a593Smuzhiyun 		sinfo->tx_retries = sta->status_stats.retry_count;
2335*4882a593Smuzhiyun 		sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
2336*4882a593Smuzhiyun 	}
2337*4882a593Smuzhiyun 
2338*4882a593Smuzhiyun 	if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED))) {
2339*4882a593Smuzhiyun 		sinfo->tx_failed = sta->status_stats.retry_failed;
2340*4882a593Smuzhiyun 		sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
2341*4882a593Smuzhiyun 	}
2342*4882a593Smuzhiyun 
2343*4882a593Smuzhiyun 	if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_DURATION))) {
2344*4882a593Smuzhiyun 		for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
2345*4882a593Smuzhiyun 			sinfo->rx_duration += sta->airtime[ac].rx_airtime;
2346*4882a593Smuzhiyun 		sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
2347*4882a593Smuzhiyun 	}
2348*4882a593Smuzhiyun 
2349*4882a593Smuzhiyun 	if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_DURATION))) {
2350*4882a593Smuzhiyun 		for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
2351*4882a593Smuzhiyun 			sinfo->tx_duration += sta->airtime[ac].tx_airtime;
2352*4882a593Smuzhiyun 		sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION);
2353*4882a593Smuzhiyun 	}
2354*4882a593Smuzhiyun 
2355*4882a593Smuzhiyun 	if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT))) {
2356*4882a593Smuzhiyun 		sinfo->airtime_weight = sta->airtime_weight;
2357*4882a593Smuzhiyun 		sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT);
2358*4882a593Smuzhiyun 	}
2359*4882a593Smuzhiyun 
2360*4882a593Smuzhiyun 	sinfo->rx_dropped_misc = sta->rx_stats.dropped;
2361*4882a593Smuzhiyun 	if (sta->pcpu_rx_stats) {
2362*4882a593Smuzhiyun 		for_each_possible_cpu(cpu) {
2363*4882a593Smuzhiyun 			struct ieee80211_sta_rx_stats *cpurxs;
2364*4882a593Smuzhiyun 
2365*4882a593Smuzhiyun 			cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
2366*4882a593Smuzhiyun 			sinfo->rx_dropped_misc += cpurxs->dropped;
2367*4882a593Smuzhiyun 		}
2368*4882a593Smuzhiyun 	}
2369*4882a593Smuzhiyun 
2370*4882a593Smuzhiyun 	if (sdata->vif.type == NL80211_IFTYPE_STATION &&
2371*4882a593Smuzhiyun 	    !(sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)) {
2372*4882a593Smuzhiyun 		sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX) |
2373*4882a593Smuzhiyun 				 BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
2374*4882a593Smuzhiyun 		sinfo->rx_beacon_signal_avg = ieee80211_ave_rssi(&sdata->vif);
2375*4882a593Smuzhiyun 	}
2376*4882a593Smuzhiyun 
2377*4882a593Smuzhiyun 	if (ieee80211_hw_check(&sta->local->hw, SIGNAL_DBM) ||
2378*4882a593Smuzhiyun 	    ieee80211_hw_check(&sta->local->hw, SIGNAL_UNSPEC)) {
2379*4882a593Smuzhiyun 		if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL))) {
2380*4882a593Smuzhiyun 			sinfo->signal = (s8)last_rxstats->last_signal;
2381*4882a593Smuzhiyun 			sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
2382*4882a593Smuzhiyun 		}
2383*4882a593Smuzhiyun 
2384*4882a593Smuzhiyun 		if (!sta->pcpu_rx_stats &&
2385*4882a593Smuzhiyun 		    !(sinfo->filled & BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG))) {
2386*4882a593Smuzhiyun 			sinfo->signal_avg =
2387*4882a593Smuzhiyun 				-ewma_signal_read(&sta->rx_stats_avg.signal);
2388*4882a593Smuzhiyun 			sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
2389*4882a593Smuzhiyun 		}
2390*4882a593Smuzhiyun 	}
2391*4882a593Smuzhiyun 
2392*4882a593Smuzhiyun 	/* for the average - if pcpu_rx_stats isn't set - rxstats must point to
2393*4882a593Smuzhiyun 	 * the sta->rx_stats struct, so the check here is fine with and without
2394*4882a593Smuzhiyun 	 * pcpu statistics
2395*4882a593Smuzhiyun 	 */
2396*4882a593Smuzhiyun 	if (last_rxstats->chains &&
2397*4882a593Smuzhiyun 	    !(sinfo->filled & (BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL) |
2398*4882a593Smuzhiyun 			       BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)))) {
2399*4882a593Smuzhiyun 		sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL);
2400*4882a593Smuzhiyun 		if (!sta->pcpu_rx_stats)
2401*4882a593Smuzhiyun 			sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG);
2402*4882a593Smuzhiyun 
2403*4882a593Smuzhiyun 		sinfo->chains = last_rxstats->chains;
2404*4882a593Smuzhiyun 
2405*4882a593Smuzhiyun 		for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) {
2406*4882a593Smuzhiyun 			sinfo->chain_signal[i] =
2407*4882a593Smuzhiyun 				last_rxstats->chain_signal_last[i];
2408*4882a593Smuzhiyun 			sinfo->chain_signal_avg[i] =
2409*4882a593Smuzhiyun 				-ewma_signal_read(&sta->rx_stats_avg.chain_signal[i]);
2410*4882a593Smuzhiyun 		}
2411*4882a593Smuzhiyun 	}
2412*4882a593Smuzhiyun 
2413*4882a593Smuzhiyun 	if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE))) {
2414*4882a593Smuzhiyun 		sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate,
2415*4882a593Smuzhiyun 				     &sinfo->txrate);
2416*4882a593Smuzhiyun 		sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
2417*4882a593Smuzhiyun 	}
2418*4882a593Smuzhiyun 
2419*4882a593Smuzhiyun 	if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE))) {
2420*4882a593Smuzhiyun 		if (sta_set_rate_info_rx(sta, &sinfo->rxrate) == 0)
2421*4882a593Smuzhiyun 			sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
2422*4882a593Smuzhiyun 	}
2423*4882a593Smuzhiyun 
2424*4882a593Smuzhiyun 	if (tidstats && !cfg80211_sinfo_alloc_tid_stats(sinfo, GFP_KERNEL)) {
2425*4882a593Smuzhiyun 		for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++)
2426*4882a593Smuzhiyun 			sta_set_tidstats(sta, &sinfo->pertid[i], i);
2427*4882a593Smuzhiyun 	}
2428*4882a593Smuzhiyun 
2429*4882a593Smuzhiyun 	if (ieee80211_vif_is_mesh(&sdata->vif)) {
2430*4882a593Smuzhiyun #ifdef CONFIG_MAC80211_MESH
2431*4882a593Smuzhiyun 		sinfo->filled |= BIT_ULL(NL80211_STA_INFO_LLID) |
2432*4882a593Smuzhiyun 				 BIT_ULL(NL80211_STA_INFO_PLID) |
2433*4882a593Smuzhiyun 				 BIT_ULL(NL80211_STA_INFO_PLINK_STATE) |
2434*4882a593Smuzhiyun 				 BIT_ULL(NL80211_STA_INFO_LOCAL_PM) |
2435*4882a593Smuzhiyun 				 BIT_ULL(NL80211_STA_INFO_PEER_PM) |
2436*4882a593Smuzhiyun 				 BIT_ULL(NL80211_STA_INFO_NONPEER_PM) |
2437*4882a593Smuzhiyun 				 BIT_ULL(NL80211_STA_INFO_CONNECTED_TO_GATE) |
2438*4882a593Smuzhiyun 				 BIT_ULL(NL80211_STA_INFO_CONNECTED_TO_AS);
2439*4882a593Smuzhiyun 
2440*4882a593Smuzhiyun 		sinfo->llid = sta->mesh->llid;
2441*4882a593Smuzhiyun 		sinfo->plid = sta->mesh->plid;
2442*4882a593Smuzhiyun 		sinfo->plink_state = sta->mesh->plink_state;
2443*4882a593Smuzhiyun 		if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) {
2444*4882a593Smuzhiyun 			sinfo->filled |= BIT_ULL(NL80211_STA_INFO_T_OFFSET);
2445*4882a593Smuzhiyun 			sinfo->t_offset = sta->mesh->t_offset;
2446*4882a593Smuzhiyun 		}
2447*4882a593Smuzhiyun 		sinfo->local_pm = sta->mesh->local_pm;
2448*4882a593Smuzhiyun 		sinfo->peer_pm = sta->mesh->peer_pm;
2449*4882a593Smuzhiyun 		sinfo->nonpeer_pm = sta->mesh->nonpeer_pm;
2450*4882a593Smuzhiyun 		sinfo->connected_to_gate = sta->mesh->connected_to_gate;
2451*4882a593Smuzhiyun 		sinfo->connected_to_as = sta->mesh->connected_to_as;
2452*4882a593Smuzhiyun #endif
2453*4882a593Smuzhiyun 	}
2454*4882a593Smuzhiyun 
2455*4882a593Smuzhiyun 	sinfo->bss_param.flags = 0;
2456*4882a593Smuzhiyun 	if (sdata->vif.bss_conf.use_cts_prot)
2457*4882a593Smuzhiyun 		sinfo->bss_param.flags |= BSS_PARAM_FLAGS_CTS_PROT;
2458*4882a593Smuzhiyun 	if (sdata->vif.bss_conf.use_short_preamble)
2459*4882a593Smuzhiyun 		sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE;
2460*4882a593Smuzhiyun 	if (sdata->vif.bss_conf.use_short_slot)
2461*4882a593Smuzhiyun 		sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME;
2462*4882a593Smuzhiyun 	sinfo->bss_param.dtim_period = sdata->vif.bss_conf.dtim_period;
2463*4882a593Smuzhiyun 	sinfo->bss_param.beacon_interval = sdata->vif.bss_conf.beacon_int;
2464*4882a593Smuzhiyun 
2465*4882a593Smuzhiyun 	sinfo->sta_flags.set = 0;
2466*4882a593Smuzhiyun 	sinfo->sta_flags.mask = BIT(NL80211_STA_FLAG_AUTHORIZED) |
2467*4882a593Smuzhiyun 				BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) |
2468*4882a593Smuzhiyun 				BIT(NL80211_STA_FLAG_WME) |
2469*4882a593Smuzhiyun 				BIT(NL80211_STA_FLAG_MFP) |
2470*4882a593Smuzhiyun 				BIT(NL80211_STA_FLAG_AUTHENTICATED) |
2471*4882a593Smuzhiyun 				BIT(NL80211_STA_FLAG_ASSOCIATED) |
2472*4882a593Smuzhiyun 				BIT(NL80211_STA_FLAG_TDLS_PEER);
2473*4882a593Smuzhiyun 	if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
2474*4882a593Smuzhiyun 		sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHORIZED);
2475*4882a593Smuzhiyun 	if (test_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE))
2476*4882a593Smuzhiyun 		sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_SHORT_PREAMBLE);
2477*4882a593Smuzhiyun 	if (sta->sta.wme)
2478*4882a593Smuzhiyun 		sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_WME);
2479*4882a593Smuzhiyun 	if (test_sta_flag(sta, WLAN_STA_MFP))
2480*4882a593Smuzhiyun 		sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_MFP);
2481*4882a593Smuzhiyun 	if (test_sta_flag(sta, WLAN_STA_AUTH))
2482*4882a593Smuzhiyun 		sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHENTICATED);
2483*4882a593Smuzhiyun 	if (test_sta_flag(sta, WLAN_STA_ASSOC))
2484*4882a593Smuzhiyun 		sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_ASSOCIATED);
2485*4882a593Smuzhiyun 	if (test_sta_flag(sta, WLAN_STA_TDLS_PEER))
2486*4882a593Smuzhiyun 		sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER);
2487*4882a593Smuzhiyun 
2488*4882a593Smuzhiyun 	thr = sta_get_expected_throughput(sta);
2489*4882a593Smuzhiyun 
2490*4882a593Smuzhiyun 	if (thr != 0) {
2491*4882a593Smuzhiyun 		sinfo->filled |= BIT_ULL(NL80211_STA_INFO_EXPECTED_THROUGHPUT);
2492*4882a593Smuzhiyun 		sinfo->expected_throughput = thr;
2493*4882a593Smuzhiyun 	}
2494*4882a593Smuzhiyun 
2495*4882a593Smuzhiyun 	if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL)) &&
2496*4882a593Smuzhiyun 	    sta->status_stats.ack_signal_filled) {
2497*4882a593Smuzhiyun 		sinfo->ack_signal = sta->status_stats.last_ack_signal;
2498*4882a593Smuzhiyun 		sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL);
2499*4882a593Smuzhiyun 	}
2500*4882a593Smuzhiyun 
2501*4882a593Smuzhiyun 	if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG)) &&
2502*4882a593Smuzhiyun 	    sta->status_stats.ack_signal_filled) {
2503*4882a593Smuzhiyun 		sinfo->avg_ack_signal =
2504*4882a593Smuzhiyun 			-(s8)ewma_avg_signal_read(
2505*4882a593Smuzhiyun 				&sta->status_stats.avg_ack_signal);
2506*4882a593Smuzhiyun 		sinfo->filled |=
2507*4882a593Smuzhiyun 			BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG);
2508*4882a593Smuzhiyun 	}
2509*4882a593Smuzhiyun 
2510*4882a593Smuzhiyun 	if (ieee80211_vif_is_mesh(&sdata->vif)) {
2511*4882a593Smuzhiyun 		sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_LINK_METRIC);
2512*4882a593Smuzhiyun 		sinfo->airtime_link_metric =
2513*4882a593Smuzhiyun 			airtime_link_metric_get(local, sta);
2514*4882a593Smuzhiyun 	}
2515*4882a593Smuzhiyun }
2516*4882a593Smuzhiyun 
sta_get_expected_throughput(struct sta_info * sta)2517*4882a593Smuzhiyun u32 sta_get_expected_throughput(struct sta_info *sta)
2518*4882a593Smuzhiyun {
2519*4882a593Smuzhiyun 	struct ieee80211_sub_if_data *sdata = sta->sdata;
2520*4882a593Smuzhiyun 	struct ieee80211_local *local = sdata->local;
2521*4882a593Smuzhiyun 	struct rate_control_ref *ref = NULL;
2522*4882a593Smuzhiyun 	u32 thr = 0;
2523*4882a593Smuzhiyun 
2524*4882a593Smuzhiyun 	if (test_sta_flag(sta, WLAN_STA_RATE_CONTROL))
2525*4882a593Smuzhiyun 		ref = local->rate_ctrl;
2526*4882a593Smuzhiyun 
2527*4882a593Smuzhiyun 	/* check if the driver has a SW RC implementation */
2528*4882a593Smuzhiyun 	if (ref && ref->ops->get_expected_throughput)
2529*4882a593Smuzhiyun 		thr = ref->ops->get_expected_throughput(sta->rate_ctrl_priv);
2530*4882a593Smuzhiyun 	else
2531*4882a593Smuzhiyun 		thr = drv_get_expected_throughput(local, sta);
2532*4882a593Smuzhiyun 
2533*4882a593Smuzhiyun 	return thr;
2534*4882a593Smuzhiyun }
2535*4882a593Smuzhiyun 
ieee80211_sta_last_active(struct sta_info * sta)2536*4882a593Smuzhiyun unsigned long ieee80211_sta_last_active(struct sta_info *sta)
2537*4882a593Smuzhiyun {
2538*4882a593Smuzhiyun 	struct ieee80211_sta_rx_stats *stats = sta_get_last_rx_stats(sta);
2539*4882a593Smuzhiyun 
2540*4882a593Smuzhiyun 	if (!sta->status_stats.last_ack ||
2541*4882a593Smuzhiyun 	    time_after(stats->last_rx, sta->status_stats.last_ack))
2542*4882a593Smuzhiyun 		return stats->last_rx;
2543*4882a593Smuzhiyun 	return sta->status_stats.last_ack;
2544*4882a593Smuzhiyun }
2545*4882a593Smuzhiyun 
sta_update_codel_params(struct sta_info * sta,u32 thr)2546*4882a593Smuzhiyun static void sta_update_codel_params(struct sta_info *sta, u32 thr)
2547*4882a593Smuzhiyun {
2548*4882a593Smuzhiyun 	if (!sta->sdata->local->ops->wake_tx_queue)
2549*4882a593Smuzhiyun 		return;
2550*4882a593Smuzhiyun 
2551*4882a593Smuzhiyun 	if (thr && thr < STA_SLOW_THRESHOLD * sta->local->num_sta) {
2552*4882a593Smuzhiyun 		sta->cparams.target = MS2TIME(50);
2553*4882a593Smuzhiyun 		sta->cparams.interval = MS2TIME(300);
2554*4882a593Smuzhiyun 		sta->cparams.ecn = false;
2555*4882a593Smuzhiyun 	} else {
2556*4882a593Smuzhiyun 		sta->cparams.target = MS2TIME(20);
2557*4882a593Smuzhiyun 		sta->cparams.interval = MS2TIME(100);
2558*4882a593Smuzhiyun 		sta->cparams.ecn = true;
2559*4882a593Smuzhiyun 	}
2560*4882a593Smuzhiyun }
2561*4882a593Smuzhiyun 
ieee80211_sta_set_expected_throughput(struct ieee80211_sta * pubsta,u32 thr)2562*4882a593Smuzhiyun void ieee80211_sta_set_expected_throughput(struct ieee80211_sta *pubsta,
2563*4882a593Smuzhiyun 					   u32 thr)
2564*4882a593Smuzhiyun {
2565*4882a593Smuzhiyun 	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
2566*4882a593Smuzhiyun 
2567*4882a593Smuzhiyun 	sta_update_codel_params(sta, thr);
2568*4882a593Smuzhiyun }
2569