1*4882a593Smuzhiyun /******************************************************************************
2*4882a593Smuzhiyun *
3*4882a593Smuzhiyun * This file is provided under a dual BSD/GPLv2 license. When using or
4*4882a593Smuzhiyun * redistributing this file, you may do so under either license.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * GPL LICENSE SUMMARY
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Copyright(c) 2012 - 2015, 2018 - 2020 Intel Corporation. All rights reserved.
9*4882a593Smuzhiyun * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10*4882a593Smuzhiyun * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify
13*4882a593Smuzhiyun * it under the terms of version 2 of the GNU General Public License as
14*4882a593Smuzhiyun * published by the Free Software Foundation.
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * This program is distributed in the hope that it will be useful, but
17*4882a593Smuzhiyun * WITHOUT ANY WARRANTY; without even the implied warranty of
18*4882a593Smuzhiyun * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19*4882a593Smuzhiyun * General Public License for more details.
20*4882a593Smuzhiyun *
21*4882a593Smuzhiyun * The full GNU General Public License is included in this distribution
22*4882a593Smuzhiyun * in the file called COPYING.
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * Contact Information:
25*4882a593Smuzhiyun * Intel Linux Wireless <linuxwifi@intel.com>
26*4882a593Smuzhiyun * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27*4882a593Smuzhiyun *
28*4882a593Smuzhiyun * BSD LICENSE
29*4882a593Smuzhiyun *
30*4882a593Smuzhiyun * Copyright(c) 2012 - 2015, 2018 - 2020 Intel Corporation. All rights reserved.
31*4882a593Smuzhiyun * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
32*4882a593Smuzhiyun * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
33*4882a593Smuzhiyun * All rights reserved.
34*4882a593Smuzhiyun *
35*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or without
36*4882a593Smuzhiyun * modification, are permitted provided that the following conditions
37*4882a593Smuzhiyun * are met:
38*4882a593Smuzhiyun *
39*4882a593Smuzhiyun * * Redistributions of source code must retain the above copyright
40*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer.
41*4882a593Smuzhiyun * * Redistributions in binary form must reproduce the above copyright
42*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer in
43*4882a593Smuzhiyun * the documentation and/or other materials provided with the
44*4882a593Smuzhiyun * distribution.
45*4882a593Smuzhiyun * * Neither the name Intel Corporation nor the names of its
46*4882a593Smuzhiyun * contributors may be used to endorse or promote products derived
47*4882a593Smuzhiyun * from this software without specific prior written permission.
48*4882a593Smuzhiyun *
49*4882a593Smuzhiyun * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
50*4882a593Smuzhiyun * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
51*4882a593Smuzhiyun * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
52*4882a593Smuzhiyun * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
53*4882a593Smuzhiyun * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
54*4882a593Smuzhiyun * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
55*4882a593Smuzhiyun * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
56*4882a593Smuzhiyun * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
57*4882a593Smuzhiyun * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
58*4882a593Smuzhiyun * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
59*4882a593Smuzhiyun * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60*4882a593Smuzhiyun *
61*4882a593Smuzhiyun *****************************************************************************/
62*4882a593Smuzhiyun #include <net/mac80211.h>
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun #include "mvm.h"
65*4882a593Smuzhiyun #include "sta.h"
66*4882a593Smuzhiyun #include "rs.h"
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun /*
69*4882a593Smuzhiyun * New version of ADD_STA_sta command added new fields at the end of the
70*4882a593Smuzhiyun * structure, so sending the size of the relevant API's structure is enough to
71*4882a593Smuzhiyun * support both API versions.
72*4882a593Smuzhiyun */
iwl_mvm_add_sta_cmd_size(struct iwl_mvm * mvm)73*4882a593Smuzhiyun static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun if (iwl_mvm_has_new_rx_api(mvm) ||
76*4882a593Smuzhiyun fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
77*4882a593Smuzhiyun return sizeof(struct iwl_mvm_add_sta_cmd);
78*4882a593Smuzhiyun else
79*4882a593Smuzhiyun return sizeof(struct iwl_mvm_add_sta_cmd_v7);
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun
iwl_mvm_find_free_sta_id(struct iwl_mvm * mvm,enum nl80211_iftype iftype)82*4882a593Smuzhiyun static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
83*4882a593Smuzhiyun enum nl80211_iftype iftype)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun int sta_id;
86*4882a593Smuzhiyun u32 reserved_ids = 0;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun BUILD_BUG_ON(IWL_MVM_STATION_COUNT_MAX > 32);
89*4882a593Smuzhiyun WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
94*4882a593Smuzhiyun if (iftype != NL80211_IFTYPE_STATION)
95*4882a593Smuzhiyun reserved_ids = BIT(0);
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
98*4882a593Smuzhiyun for (sta_id = 0; sta_id < mvm->fw->ucode_capa.num_stations; sta_id++) {
99*4882a593Smuzhiyun if (BIT(sta_id) & reserved_ids)
100*4882a593Smuzhiyun continue;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
103*4882a593Smuzhiyun lockdep_is_held(&mvm->mutex)))
104*4882a593Smuzhiyun return sta_id;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun return IWL_MVM_INVALID_STA;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /* send station add/update command to firmware */
iwl_mvm_sta_send_to_fw(struct iwl_mvm * mvm,struct ieee80211_sta * sta,bool update,unsigned int flags)110*4882a593Smuzhiyun int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
111*4882a593Smuzhiyun bool update, unsigned int flags)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
114*4882a593Smuzhiyun struct iwl_mvm_add_sta_cmd add_sta_cmd = {
115*4882a593Smuzhiyun .sta_id = mvm_sta->sta_id,
116*4882a593Smuzhiyun .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
117*4882a593Smuzhiyun .add_modify = update ? 1 : 0,
118*4882a593Smuzhiyun .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
119*4882a593Smuzhiyun STA_FLG_MIMO_EN_MSK |
120*4882a593Smuzhiyun STA_FLG_RTS_MIMO_PROT),
121*4882a593Smuzhiyun .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
122*4882a593Smuzhiyun };
123*4882a593Smuzhiyun int ret;
124*4882a593Smuzhiyun u32 status;
125*4882a593Smuzhiyun u32 agg_size = 0, mpdu_dens = 0;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
128*4882a593Smuzhiyun add_sta_cmd.station_type = mvm_sta->sta_type;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun if (!update || (flags & STA_MODIFY_QUEUES)) {
131*4882a593Smuzhiyun memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun if (!iwl_mvm_has_new_tx_api(mvm)) {
134*4882a593Smuzhiyun add_sta_cmd.tfd_queue_msk =
135*4882a593Smuzhiyun cpu_to_le32(mvm_sta->tfd_queue_msk);
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun if (flags & STA_MODIFY_QUEUES)
138*4882a593Smuzhiyun add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
139*4882a593Smuzhiyun } else {
140*4882a593Smuzhiyun WARN_ON(flags & STA_MODIFY_QUEUES);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun switch (sta->bandwidth) {
145*4882a593Smuzhiyun case IEEE80211_STA_RX_BW_160:
146*4882a593Smuzhiyun add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
147*4882a593Smuzhiyun /* fall through */
148*4882a593Smuzhiyun case IEEE80211_STA_RX_BW_80:
149*4882a593Smuzhiyun add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
150*4882a593Smuzhiyun /* fall through */
151*4882a593Smuzhiyun case IEEE80211_STA_RX_BW_40:
152*4882a593Smuzhiyun add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
153*4882a593Smuzhiyun /* fall through */
154*4882a593Smuzhiyun case IEEE80211_STA_RX_BW_20:
155*4882a593Smuzhiyun if (sta->ht_cap.ht_supported)
156*4882a593Smuzhiyun add_sta_cmd.station_flags |=
157*4882a593Smuzhiyun cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
158*4882a593Smuzhiyun break;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun switch (sta->rx_nss) {
162*4882a593Smuzhiyun case 1:
163*4882a593Smuzhiyun add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
164*4882a593Smuzhiyun break;
165*4882a593Smuzhiyun case 2:
166*4882a593Smuzhiyun add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
167*4882a593Smuzhiyun break;
168*4882a593Smuzhiyun case 3 ... 8:
169*4882a593Smuzhiyun add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
170*4882a593Smuzhiyun break;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun switch (sta->smps_mode) {
174*4882a593Smuzhiyun case IEEE80211_SMPS_AUTOMATIC:
175*4882a593Smuzhiyun case IEEE80211_SMPS_NUM_MODES:
176*4882a593Smuzhiyun WARN_ON(1);
177*4882a593Smuzhiyun break;
178*4882a593Smuzhiyun case IEEE80211_SMPS_STATIC:
179*4882a593Smuzhiyun /* override NSS */
180*4882a593Smuzhiyun add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
181*4882a593Smuzhiyun add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
182*4882a593Smuzhiyun break;
183*4882a593Smuzhiyun case IEEE80211_SMPS_DYNAMIC:
184*4882a593Smuzhiyun add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
185*4882a593Smuzhiyun break;
186*4882a593Smuzhiyun case IEEE80211_SMPS_OFF:
187*4882a593Smuzhiyun /* nothing */
188*4882a593Smuzhiyun break;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun if (sta->ht_cap.ht_supported) {
192*4882a593Smuzhiyun add_sta_cmd.station_flags_msk |=
193*4882a593Smuzhiyun cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
194*4882a593Smuzhiyun STA_FLG_AGG_MPDU_DENS_MSK);
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun mpdu_dens = sta->ht_cap.ampdu_density;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun if (sta->vht_cap.vht_supported) {
201*4882a593Smuzhiyun agg_size = sta->vht_cap.cap &
202*4882a593Smuzhiyun IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
203*4882a593Smuzhiyun agg_size >>=
204*4882a593Smuzhiyun IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
205*4882a593Smuzhiyun } else if (sta->ht_cap.ht_supported) {
206*4882a593Smuzhiyun agg_size = sta->ht_cap.ampdu_factor;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun /* D6.0 10.12.2 A-MPDU length limit rules
210*4882a593Smuzhiyun * A STA indicates the maximum length of the A-MPDU preEOF padding
211*4882a593Smuzhiyun * that it can receive in an HE PPDU in the Maximum A-MPDU Length
212*4882a593Smuzhiyun * Exponent field in its HT Capabilities, VHT Capabilities,
213*4882a593Smuzhiyun * and HE 6 GHz Band Capabilities elements (if present) and the
214*4882a593Smuzhiyun * Maximum AMPDU Length Exponent Extension field in its HE
215*4882a593Smuzhiyun * Capabilities element
216*4882a593Smuzhiyun */
217*4882a593Smuzhiyun if (sta->he_cap.has_he)
218*4882a593Smuzhiyun agg_size += u8_get_bits(sta->he_cap.he_cap_elem.mac_cap_info[3],
219*4882a593Smuzhiyun IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun /* Limit to max A-MPDU supported by FW */
222*4882a593Smuzhiyun if (agg_size > (STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT))
223*4882a593Smuzhiyun agg_size = (STA_FLG_MAX_AGG_SIZE_4M >>
224*4882a593Smuzhiyun STA_FLG_MAX_AGG_SIZE_SHIFT);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun add_sta_cmd.station_flags |=
227*4882a593Smuzhiyun cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
228*4882a593Smuzhiyun add_sta_cmd.station_flags |=
229*4882a593Smuzhiyun cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
230*4882a593Smuzhiyun if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC)
231*4882a593Smuzhiyun add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun if (sta->wme) {
234*4882a593Smuzhiyun add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
237*4882a593Smuzhiyun add_sta_cmd.uapsd_acs |= BIT(AC_BK);
238*4882a593Smuzhiyun if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
239*4882a593Smuzhiyun add_sta_cmd.uapsd_acs |= BIT(AC_BE);
240*4882a593Smuzhiyun if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
241*4882a593Smuzhiyun add_sta_cmd.uapsd_acs |= BIT(AC_VI);
242*4882a593Smuzhiyun if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
243*4882a593Smuzhiyun add_sta_cmd.uapsd_acs |= BIT(AC_VO);
244*4882a593Smuzhiyun add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
245*4882a593Smuzhiyun add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun status = ADD_STA_SUCCESS;
249*4882a593Smuzhiyun ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
250*4882a593Smuzhiyun iwl_mvm_add_sta_cmd_size(mvm),
251*4882a593Smuzhiyun &add_sta_cmd, &status);
252*4882a593Smuzhiyun if (ret)
253*4882a593Smuzhiyun return ret;
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun switch (status & IWL_ADD_STA_STATUS_MASK) {
256*4882a593Smuzhiyun case ADD_STA_SUCCESS:
257*4882a593Smuzhiyun IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
258*4882a593Smuzhiyun break;
259*4882a593Smuzhiyun default:
260*4882a593Smuzhiyun ret = -EIO;
261*4882a593Smuzhiyun IWL_ERR(mvm, "ADD_STA failed\n");
262*4882a593Smuzhiyun break;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun return ret;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
iwl_mvm_rx_agg_session_expired(struct timer_list * t)268*4882a593Smuzhiyun static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun struct iwl_mvm_baid_data *data =
271*4882a593Smuzhiyun from_timer(data, t, session_timer);
272*4882a593Smuzhiyun struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
273*4882a593Smuzhiyun struct iwl_mvm_baid_data *ba_data;
274*4882a593Smuzhiyun struct ieee80211_sta *sta;
275*4882a593Smuzhiyun struct iwl_mvm_sta *mvm_sta;
276*4882a593Smuzhiyun unsigned long timeout;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun rcu_read_lock();
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun ba_data = rcu_dereference(*rcu_ptr);
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun if (WARN_ON(!ba_data))
283*4882a593Smuzhiyun goto unlock;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun if (!ba_data->timeout)
286*4882a593Smuzhiyun goto unlock;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
289*4882a593Smuzhiyun if (time_is_after_jiffies(timeout)) {
290*4882a593Smuzhiyun mod_timer(&ba_data->session_timer, timeout);
291*4882a593Smuzhiyun goto unlock;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun /* Timer expired */
295*4882a593Smuzhiyun sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun /*
298*4882a593Smuzhiyun * sta should be valid unless the following happens:
299*4882a593Smuzhiyun * The firmware asserts which triggers a reconfig flow, but
300*4882a593Smuzhiyun * the reconfig fails before we set the pointer to sta into
301*4882a593Smuzhiyun * the fw_id_to_mac_id pointer table. Mac80211 can't stop
302*4882a593Smuzhiyun * A-MDPU and hence the timer continues to run. Then, the
303*4882a593Smuzhiyun * timer expires and sta is NULL.
304*4882a593Smuzhiyun */
305*4882a593Smuzhiyun if (!sta)
306*4882a593Smuzhiyun goto unlock;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun mvm_sta = iwl_mvm_sta_from_mac80211(sta);
309*4882a593Smuzhiyun ieee80211_rx_ba_timer_expired(mvm_sta->vif,
310*4882a593Smuzhiyun sta->addr, ba_data->tid);
311*4882a593Smuzhiyun unlock:
312*4882a593Smuzhiyun rcu_read_unlock();
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun /* Disable aggregations for a bitmap of TIDs for a given station */
iwl_mvm_invalidate_sta_queue(struct iwl_mvm * mvm,int queue,unsigned long disable_agg_tids,bool remove_queue)316*4882a593Smuzhiyun static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
317*4882a593Smuzhiyun unsigned long disable_agg_tids,
318*4882a593Smuzhiyun bool remove_queue)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun struct iwl_mvm_add_sta_cmd cmd = {};
321*4882a593Smuzhiyun struct ieee80211_sta *sta;
322*4882a593Smuzhiyun struct iwl_mvm_sta *mvmsta;
323*4882a593Smuzhiyun u32 status;
324*4882a593Smuzhiyun u8 sta_id;
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
327*4882a593Smuzhiyun return -EINVAL;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun sta_id = mvm->queue_info[queue].ra_sta_id;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun rcu_read_lock();
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
336*4882a593Smuzhiyun rcu_read_unlock();
337*4882a593Smuzhiyun return -EINVAL;
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun mvmsta = iwl_mvm_sta_from_mac80211(sta);
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun mvmsta->tid_disable_agg |= disable_agg_tids;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
345*4882a593Smuzhiyun cmd.sta_id = mvmsta->sta_id;
346*4882a593Smuzhiyun cmd.add_modify = STA_MODE_MODIFY;
347*4882a593Smuzhiyun cmd.modify_mask = STA_MODIFY_QUEUES;
348*4882a593Smuzhiyun if (disable_agg_tids)
349*4882a593Smuzhiyun cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
350*4882a593Smuzhiyun if (remove_queue)
351*4882a593Smuzhiyun cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
352*4882a593Smuzhiyun cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
353*4882a593Smuzhiyun cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun rcu_read_unlock();
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun /* Notify FW of queue removal from the STA queues */
358*4882a593Smuzhiyun status = ADD_STA_SUCCESS;
359*4882a593Smuzhiyun return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
360*4882a593Smuzhiyun iwl_mvm_add_sta_cmd_size(mvm),
361*4882a593Smuzhiyun &cmd, &status);
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun
iwl_mvm_disable_txq(struct iwl_mvm * mvm,struct ieee80211_sta * sta,u16 * queueptr,u8 tid,u8 flags)364*4882a593Smuzhiyun static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
365*4882a593Smuzhiyun u16 *queueptr, u8 tid, u8 flags)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun int queue = *queueptr;
368*4882a593Smuzhiyun struct iwl_scd_txq_cfg_cmd cmd = {
369*4882a593Smuzhiyun .scd_queue = queue,
370*4882a593Smuzhiyun .action = SCD_CFG_DISABLE_QUEUE,
371*4882a593Smuzhiyun };
372*4882a593Smuzhiyun int ret;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun if (iwl_mvm_has_new_tx_api(mvm)) {
375*4882a593Smuzhiyun iwl_trans_txq_free(mvm->trans, queue);
376*4882a593Smuzhiyun *queueptr = IWL_MVM_INVALID_QUEUE;
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun return 0;
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))
382*4882a593Smuzhiyun return 0;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun cmd.action = mvm->queue_info[queue].tid_bitmap ?
387*4882a593Smuzhiyun SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
388*4882a593Smuzhiyun if (cmd.action == SCD_CFG_DISABLE_QUEUE)
389*4882a593Smuzhiyun mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun IWL_DEBUG_TX_QUEUES(mvm,
392*4882a593Smuzhiyun "Disabling TXQ #%d tids=0x%x\n",
393*4882a593Smuzhiyun queue,
394*4882a593Smuzhiyun mvm->queue_info[queue].tid_bitmap);
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun /* If the queue is still enabled - nothing left to do in this func */
397*4882a593Smuzhiyun if (cmd.action == SCD_CFG_ENABLE_QUEUE)
398*4882a593Smuzhiyun return 0;
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
401*4882a593Smuzhiyun cmd.tid = mvm->queue_info[queue].txq_tid;
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun /* Make sure queue info is correct even though we overwrite it */
404*4882a593Smuzhiyun WARN(mvm->queue_info[queue].tid_bitmap,
405*4882a593Smuzhiyun "TXQ #%d info out-of-sync - tids=0x%x\n",
406*4882a593Smuzhiyun queue, mvm->queue_info[queue].tid_bitmap);
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun /* If we are here - the queue is freed and we can zero out these vals */
409*4882a593Smuzhiyun mvm->queue_info[queue].tid_bitmap = 0;
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun if (sta) {
412*4882a593Smuzhiyun struct iwl_mvm_txq *mvmtxq =
413*4882a593Smuzhiyun iwl_mvm_txq_from_tid(sta, tid);
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun /* Regardless if this is a reserved TXQ for a STA - mark it as false */
419*4882a593Smuzhiyun mvm->queue_info[queue].reserved = false;
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun iwl_trans_txq_disable(mvm->trans, queue, false);
422*4882a593Smuzhiyun ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
423*4882a593Smuzhiyun sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun if (ret)
426*4882a593Smuzhiyun IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
427*4882a593Smuzhiyun queue, ret);
428*4882a593Smuzhiyun return ret;
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun
iwl_mvm_get_queue_agg_tids(struct iwl_mvm * mvm,int queue)431*4882a593Smuzhiyun static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun struct ieee80211_sta *sta;
434*4882a593Smuzhiyun struct iwl_mvm_sta *mvmsta;
435*4882a593Smuzhiyun unsigned long tid_bitmap;
436*4882a593Smuzhiyun unsigned long agg_tids = 0;
437*4882a593Smuzhiyun u8 sta_id;
438*4882a593Smuzhiyun int tid;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
443*4882a593Smuzhiyun return -EINVAL;
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun sta_id = mvm->queue_info[queue].ra_sta_id;
446*4882a593Smuzhiyun tid_bitmap = mvm->queue_info[queue].tid_bitmap;
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
449*4882a593Smuzhiyun lockdep_is_held(&mvm->mutex));
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
452*4882a593Smuzhiyun return -EINVAL;
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun mvmsta = iwl_mvm_sta_from_mac80211(sta);
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun spin_lock_bh(&mvmsta->lock);
457*4882a593Smuzhiyun for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
458*4882a593Smuzhiyun if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
459*4882a593Smuzhiyun agg_tids |= BIT(tid);
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun spin_unlock_bh(&mvmsta->lock);
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun return agg_tids;
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun /*
467*4882a593Smuzhiyun * Remove a queue from a station's resources.
468*4882a593Smuzhiyun * Note that this only marks as free. It DOESN'T delete a BA agreement, and
469*4882a593Smuzhiyun * doesn't disable the queue
470*4882a593Smuzhiyun */
iwl_mvm_remove_sta_queue_marking(struct iwl_mvm * mvm,int queue)471*4882a593Smuzhiyun static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun struct ieee80211_sta *sta;
474*4882a593Smuzhiyun struct iwl_mvm_sta *mvmsta;
475*4882a593Smuzhiyun unsigned long tid_bitmap;
476*4882a593Smuzhiyun unsigned long disable_agg_tids = 0;
477*4882a593Smuzhiyun u8 sta_id;
478*4882a593Smuzhiyun int tid;
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
483*4882a593Smuzhiyun return -EINVAL;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun sta_id = mvm->queue_info[queue].ra_sta_id;
486*4882a593Smuzhiyun tid_bitmap = mvm->queue_info[queue].tid_bitmap;
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun rcu_read_lock();
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
493*4882a593Smuzhiyun rcu_read_unlock();
494*4882a593Smuzhiyun return 0;
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun mvmsta = iwl_mvm_sta_from_mac80211(sta);
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun spin_lock_bh(&mvmsta->lock);
500*4882a593Smuzhiyun /* Unmap MAC queues and TIDs from this queue */
501*4882a593Smuzhiyun for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
502*4882a593Smuzhiyun struct iwl_mvm_txq *mvmtxq =
503*4882a593Smuzhiyun iwl_mvm_txq_from_tid(sta, tid);
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
506*4882a593Smuzhiyun disable_agg_tids |= BIT(tid);
507*4882a593Smuzhiyun mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
513*4882a593Smuzhiyun spin_unlock_bh(&mvmsta->lock);
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun rcu_read_unlock();
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun /*
518*4882a593Smuzhiyun * The TX path may have been using this TXQ_ID from the tid_data,
519*4882a593Smuzhiyun * so make sure it's no longer running so that we can safely reuse
520*4882a593Smuzhiyun * this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE
521*4882a593Smuzhiyun * above, but nothing guarantees we've stopped using them. Thus,
522*4882a593Smuzhiyun * without this, we could get to iwl_mvm_disable_txq() and remove
523*4882a593Smuzhiyun * the queue while still sending frames to it.
524*4882a593Smuzhiyun */
525*4882a593Smuzhiyun synchronize_net();
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun return disable_agg_tids;
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun
iwl_mvm_free_inactive_queue(struct iwl_mvm * mvm,int queue,struct ieee80211_sta * old_sta,u8 new_sta_id)530*4882a593Smuzhiyun static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
531*4882a593Smuzhiyun struct ieee80211_sta *old_sta,
532*4882a593Smuzhiyun u8 new_sta_id)
533*4882a593Smuzhiyun {
534*4882a593Smuzhiyun struct iwl_mvm_sta *mvmsta;
535*4882a593Smuzhiyun u8 sta_id, tid;
536*4882a593Smuzhiyun unsigned long disable_agg_tids = 0;
537*4882a593Smuzhiyun bool same_sta;
538*4882a593Smuzhiyun u16 queue_tmp = queue;
539*4882a593Smuzhiyun int ret;
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
544*4882a593Smuzhiyun return -EINVAL;
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun sta_id = mvm->queue_info[queue].ra_sta_id;
547*4882a593Smuzhiyun tid = mvm->queue_info[queue].txq_tid;
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun same_sta = sta_id == new_sta_id;
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
552*4882a593Smuzhiyun if (WARN_ON(!mvmsta))
553*4882a593Smuzhiyun return -EINVAL;
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
556*4882a593Smuzhiyun /* Disable the queue */
557*4882a593Smuzhiyun if (disable_agg_tids)
558*4882a593Smuzhiyun iwl_mvm_invalidate_sta_queue(mvm, queue,
559*4882a593Smuzhiyun disable_agg_tids, false);
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun ret = iwl_mvm_disable_txq(mvm, old_sta, &queue_tmp, tid, 0);
562*4882a593Smuzhiyun if (ret) {
563*4882a593Smuzhiyun IWL_ERR(mvm,
564*4882a593Smuzhiyun "Failed to free inactive queue %d (ret=%d)\n",
565*4882a593Smuzhiyun queue, ret);
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun return ret;
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun /* If TXQ is allocated to another STA, update removal in FW */
571*4882a593Smuzhiyun if (!same_sta)
572*4882a593Smuzhiyun iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun return 0;
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun
iwl_mvm_get_shared_queue(struct iwl_mvm * mvm,unsigned long tfd_queue_mask,u8 ac)577*4882a593Smuzhiyun static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
578*4882a593Smuzhiyun unsigned long tfd_queue_mask, u8 ac)
579*4882a593Smuzhiyun {
580*4882a593Smuzhiyun int queue = 0;
581*4882a593Smuzhiyun u8 ac_to_queue[IEEE80211_NUM_ACS];
582*4882a593Smuzhiyun int i;
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun /*
585*4882a593Smuzhiyun * This protects us against grabbing a queue that's being reconfigured
586*4882a593Smuzhiyun * by the inactivity checker.
587*4882a593Smuzhiyun */
588*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
591*4882a593Smuzhiyun return -EINVAL;
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun /* See what ACs the existing queues for this STA have */
596*4882a593Smuzhiyun for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
597*4882a593Smuzhiyun /* Only DATA queues can be shared */
598*4882a593Smuzhiyun if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
599*4882a593Smuzhiyun i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
600*4882a593Smuzhiyun continue;
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun /*
606*4882a593Smuzhiyun * The queue to share is chosen only from DATA queues as follows (in
607*4882a593Smuzhiyun * descending priority):
608*4882a593Smuzhiyun * 1. An AC_BE queue
609*4882a593Smuzhiyun * 2. Same AC queue
610*4882a593Smuzhiyun * 3. Highest AC queue that is lower than new AC
611*4882a593Smuzhiyun * 4. Any existing AC (there always is at least 1 DATA queue)
612*4882a593Smuzhiyun */
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun /* Priority 1: An AC_BE queue */
615*4882a593Smuzhiyun if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
616*4882a593Smuzhiyun queue = ac_to_queue[IEEE80211_AC_BE];
617*4882a593Smuzhiyun /* Priority 2: Same AC queue */
618*4882a593Smuzhiyun else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
619*4882a593Smuzhiyun queue = ac_to_queue[ac];
620*4882a593Smuzhiyun /* Priority 3a: If new AC is VO and VI exists - use VI */
621*4882a593Smuzhiyun else if (ac == IEEE80211_AC_VO &&
622*4882a593Smuzhiyun ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
623*4882a593Smuzhiyun queue = ac_to_queue[IEEE80211_AC_VI];
624*4882a593Smuzhiyun /* Priority 3b: No BE so only AC less than the new one is BK */
625*4882a593Smuzhiyun else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
626*4882a593Smuzhiyun queue = ac_to_queue[IEEE80211_AC_BK];
627*4882a593Smuzhiyun /* Priority 4a: No BE nor BK - use VI if exists */
628*4882a593Smuzhiyun else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
629*4882a593Smuzhiyun queue = ac_to_queue[IEEE80211_AC_VI];
630*4882a593Smuzhiyun /* Priority 4b: No BE, BK nor VI - use VO if exists */
631*4882a593Smuzhiyun else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
632*4882a593Smuzhiyun queue = ac_to_queue[IEEE80211_AC_VO];
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun /* Make sure queue found (or not) is legal */
635*4882a593Smuzhiyun if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
636*4882a593Smuzhiyun !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
637*4882a593Smuzhiyun (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
638*4882a593Smuzhiyun IWL_ERR(mvm, "No DATA queues available to share\n");
639*4882a593Smuzhiyun return -ENOSPC;
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun return queue;
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun /*
646*4882a593Smuzhiyun * If a given queue has a higher AC than the TID stream that is being compared
647*4882a593Smuzhiyun * to, the queue needs to be redirected to the lower AC. This function does that
648*4882a593Smuzhiyun * in such a case, otherwise - if no redirection required - it does nothing,
649*4882a593Smuzhiyun * unless the %force param is true.
650*4882a593Smuzhiyun */
iwl_mvm_redirect_queue(struct iwl_mvm * mvm,int queue,int tid,int ac,int ssn,unsigned int wdg_timeout,bool force,struct iwl_mvm_txq * txq)651*4882a593Smuzhiyun static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
652*4882a593Smuzhiyun int ac, int ssn, unsigned int wdg_timeout,
653*4882a593Smuzhiyun bool force, struct iwl_mvm_txq *txq)
654*4882a593Smuzhiyun {
655*4882a593Smuzhiyun struct iwl_scd_txq_cfg_cmd cmd = {
656*4882a593Smuzhiyun .scd_queue = queue,
657*4882a593Smuzhiyun .action = SCD_CFG_DISABLE_QUEUE,
658*4882a593Smuzhiyun };
659*4882a593Smuzhiyun bool shared_queue;
660*4882a593Smuzhiyun int ret;
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
663*4882a593Smuzhiyun return -EINVAL;
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun /*
666*4882a593Smuzhiyun * If the AC is lower than current one - FIFO needs to be redirected to
667*4882a593Smuzhiyun * the lowest one of the streams in the queue. Check if this is needed
668*4882a593Smuzhiyun * here.
669*4882a593Smuzhiyun * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
670*4882a593Smuzhiyun * value 3 and VO with value 0, so to check if ac X is lower than ac Y
671*4882a593Smuzhiyun * we need to check if the numerical value of X is LARGER than of Y.
672*4882a593Smuzhiyun */
673*4882a593Smuzhiyun if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
674*4882a593Smuzhiyun IWL_DEBUG_TX_QUEUES(mvm,
675*4882a593Smuzhiyun "No redirection needed on TXQ #%d\n",
676*4882a593Smuzhiyun queue);
677*4882a593Smuzhiyun return 0;
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
681*4882a593Smuzhiyun cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
682*4882a593Smuzhiyun cmd.tid = mvm->queue_info[queue].txq_tid;
683*4882a593Smuzhiyun shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
686*4882a593Smuzhiyun queue, iwl_mvm_ac_to_tx_fifo[ac]);
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun /* Stop the queue and wait for it to empty */
689*4882a593Smuzhiyun txq->stopped = true;
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
692*4882a593Smuzhiyun if (ret) {
693*4882a593Smuzhiyun IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
694*4882a593Smuzhiyun queue);
695*4882a593Smuzhiyun ret = -EIO;
696*4882a593Smuzhiyun goto out;
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun /* Before redirecting the queue we need to de-activate it */
700*4882a593Smuzhiyun iwl_trans_txq_disable(mvm->trans, queue, false);
701*4882a593Smuzhiyun ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
702*4882a593Smuzhiyun if (ret)
703*4882a593Smuzhiyun IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
704*4882a593Smuzhiyun ret);
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun /* Make sure the SCD wrptr is correctly set before reconfiguring */
707*4882a593Smuzhiyun iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun /* Update the TID "owner" of the queue */
710*4882a593Smuzhiyun mvm->queue_info[queue].txq_tid = tid;
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun /* Redirect to lower AC */
715*4882a593Smuzhiyun iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
716*4882a593Smuzhiyun cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun /* Update AC marking of the queue */
719*4882a593Smuzhiyun mvm->queue_info[queue].mac80211_ac = ac;
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun /*
722*4882a593Smuzhiyun * Mark queue as shared in transport if shared
723*4882a593Smuzhiyun * Note this has to be done after queue enablement because enablement
724*4882a593Smuzhiyun * can also set this value, and there is no indication there to shared
725*4882a593Smuzhiyun * queues
726*4882a593Smuzhiyun */
727*4882a593Smuzhiyun if (shared_queue)
728*4882a593Smuzhiyun iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun out:
731*4882a593Smuzhiyun /* Continue using the queue */
732*4882a593Smuzhiyun txq->stopped = false;
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun return ret;
735*4882a593Smuzhiyun }
736*4882a593Smuzhiyun
iwl_mvm_find_free_queue(struct iwl_mvm * mvm,u8 sta_id,u8 minq,u8 maxq)737*4882a593Smuzhiyun static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
738*4882a593Smuzhiyun u8 minq, u8 maxq)
739*4882a593Smuzhiyun {
740*4882a593Smuzhiyun int i;
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun if (WARN(maxq >= mvm->trans->trans_cfg->base_params->num_of_queues,
745*4882a593Smuzhiyun "max queue %d >= num_of_queues (%d)", maxq,
746*4882a593Smuzhiyun mvm->trans->trans_cfg->base_params->num_of_queues))
747*4882a593Smuzhiyun maxq = mvm->trans->trans_cfg->base_params->num_of_queues - 1;
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun /* This should not be hit with new TX path */
750*4882a593Smuzhiyun if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
751*4882a593Smuzhiyun return -ENOSPC;
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun /* Start by looking for a free queue */
754*4882a593Smuzhiyun for (i = minq; i <= maxq; i++)
755*4882a593Smuzhiyun if (mvm->queue_info[i].tid_bitmap == 0 &&
756*4882a593Smuzhiyun mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
757*4882a593Smuzhiyun return i;
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun return -ENOSPC;
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun
iwl_mvm_tvqm_enable_txq(struct iwl_mvm * mvm,u8 sta_id,u8 tid,unsigned int timeout)762*4882a593Smuzhiyun static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
763*4882a593Smuzhiyun u8 sta_id, u8 tid, unsigned int timeout)
764*4882a593Smuzhiyun {
765*4882a593Smuzhiyun int queue, size = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
766*4882a593Smuzhiyun mvm->trans->cfg->min_256_ba_txq_size);
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun if (tid == IWL_MAX_TID_COUNT) {
769*4882a593Smuzhiyun tid = IWL_MGMT_TID;
770*4882a593Smuzhiyun size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
771*4882a593Smuzhiyun mvm->trans->cfg->min_txq_size);
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun do {
775*4882a593Smuzhiyun __le16 enable = cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE);
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun queue = iwl_trans_txq_alloc(mvm->trans, enable,
778*4882a593Smuzhiyun sta_id, tid, SCD_QUEUE_CFG,
779*4882a593Smuzhiyun size, timeout);
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun if (queue < 0)
782*4882a593Smuzhiyun IWL_DEBUG_TX_QUEUES(mvm,
783*4882a593Smuzhiyun "Failed allocating TXQ of size %d for sta %d tid %d, ret: %d\n",
784*4882a593Smuzhiyun size, sta_id, tid, queue);
785*4882a593Smuzhiyun size /= 2;
786*4882a593Smuzhiyun } while (queue < 0 && size >= 16);
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun if (queue < 0)
789*4882a593Smuzhiyun return queue;
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
792*4882a593Smuzhiyun queue, sta_id, tid);
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun return queue;
795*4882a593Smuzhiyun }
796*4882a593Smuzhiyun
iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm * mvm,struct ieee80211_sta * sta,u8 ac,int tid)797*4882a593Smuzhiyun static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
798*4882a593Smuzhiyun struct ieee80211_sta *sta, u8 ac,
799*4882a593Smuzhiyun int tid)
800*4882a593Smuzhiyun {
801*4882a593Smuzhiyun struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
802*4882a593Smuzhiyun struct iwl_mvm_txq *mvmtxq =
803*4882a593Smuzhiyun iwl_mvm_txq_from_tid(sta, tid);
804*4882a593Smuzhiyun unsigned int wdg_timeout =
805*4882a593Smuzhiyun iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
806*4882a593Smuzhiyun int queue = -1;
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun IWL_DEBUG_TX_QUEUES(mvm,
811*4882a593Smuzhiyun "Allocating queue for sta %d on tid %d\n",
812*4882a593Smuzhiyun mvmsta->sta_id, tid);
813*4882a593Smuzhiyun queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout);
814*4882a593Smuzhiyun if (queue < 0)
815*4882a593Smuzhiyun return queue;
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun mvmtxq->txq_id = queue;
818*4882a593Smuzhiyun mvm->tvqm_info[queue].txq_tid = tid;
819*4882a593Smuzhiyun mvm->tvqm_info[queue].sta_id = mvmsta->sta_id;
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun spin_lock_bh(&mvmsta->lock);
824*4882a593Smuzhiyun mvmsta->tid_data[tid].txq_id = queue;
825*4882a593Smuzhiyun spin_unlock_bh(&mvmsta->lock);
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun return 0;
828*4882a593Smuzhiyun }
829*4882a593Smuzhiyun
iwl_mvm_update_txq_mapping(struct iwl_mvm * mvm,struct ieee80211_sta * sta,int queue,u8 sta_id,u8 tid)830*4882a593Smuzhiyun static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm,
831*4882a593Smuzhiyun struct ieee80211_sta *sta,
832*4882a593Smuzhiyun int queue, u8 sta_id, u8 tid)
833*4882a593Smuzhiyun {
834*4882a593Smuzhiyun bool enable_queue = true;
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun /* Make sure this TID isn't already enabled */
837*4882a593Smuzhiyun if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
838*4882a593Smuzhiyun IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
839*4882a593Smuzhiyun queue, tid);
840*4882a593Smuzhiyun return false;
841*4882a593Smuzhiyun }
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun /* Update mappings and refcounts */
844*4882a593Smuzhiyun if (mvm->queue_info[queue].tid_bitmap)
845*4882a593Smuzhiyun enable_queue = false;
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun mvm->queue_info[queue].tid_bitmap |= BIT(tid);
848*4882a593Smuzhiyun mvm->queue_info[queue].ra_sta_id = sta_id;
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun if (enable_queue) {
851*4882a593Smuzhiyun if (tid != IWL_MAX_TID_COUNT)
852*4882a593Smuzhiyun mvm->queue_info[queue].mac80211_ac =
853*4882a593Smuzhiyun tid_to_mac80211_ac[tid];
854*4882a593Smuzhiyun else
855*4882a593Smuzhiyun mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun mvm->queue_info[queue].txq_tid = tid;
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun if (sta) {
861*4882a593Smuzhiyun struct iwl_mvm_txq *mvmtxq =
862*4882a593Smuzhiyun iwl_mvm_txq_from_tid(sta, tid);
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun mvmtxq->txq_id = queue;
865*4882a593Smuzhiyun }
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun IWL_DEBUG_TX_QUEUES(mvm,
868*4882a593Smuzhiyun "Enabling TXQ #%d tids=0x%x\n",
869*4882a593Smuzhiyun queue, mvm->queue_info[queue].tid_bitmap);
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun return enable_queue;
872*4882a593Smuzhiyun }
873*4882a593Smuzhiyun
iwl_mvm_enable_txq(struct iwl_mvm * mvm,struct ieee80211_sta * sta,int queue,u16 ssn,const struct iwl_trans_txq_scd_cfg * cfg,unsigned int wdg_timeout)874*4882a593Smuzhiyun static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
875*4882a593Smuzhiyun int queue, u16 ssn,
876*4882a593Smuzhiyun const struct iwl_trans_txq_scd_cfg *cfg,
877*4882a593Smuzhiyun unsigned int wdg_timeout)
878*4882a593Smuzhiyun {
879*4882a593Smuzhiyun struct iwl_scd_txq_cfg_cmd cmd = {
880*4882a593Smuzhiyun .scd_queue = queue,
881*4882a593Smuzhiyun .action = SCD_CFG_ENABLE_QUEUE,
882*4882a593Smuzhiyun .window = cfg->frame_limit,
883*4882a593Smuzhiyun .sta_id = cfg->sta_id,
884*4882a593Smuzhiyun .ssn = cpu_to_le16(ssn),
885*4882a593Smuzhiyun .tx_fifo = cfg->fifo,
886*4882a593Smuzhiyun .aggregate = cfg->aggregate,
887*4882a593Smuzhiyun .tid = cfg->tid,
888*4882a593Smuzhiyun };
889*4882a593Smuzhiyun bool inc_ssn;
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
892*4882a593Smuzhiyun return false;
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun /* Send the enabling command if we need to */
895*4882a593Smuzhiyun if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid))
896*4882a593Smuzhiyun return false;
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
899*4882a593Smuzhiyun NULL, wdg_timeout);
900*4882a593Smuzhiyun if (inc_ssn)
901*4882a593Smuzhiyun le16_add_cpu(&cmd.ssn, 1);
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
904*4882a593Smuzhiyun "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun return inc_ssn;
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun
iwl_mvm_change_queue_tid(struct iwl_mvm * mvm,int queue)909*4882a593Smuzhiyun static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
910*4882a593Smuzhiyun {
911*4882a593Smuzhiyun struct iwl_scd_txq_cfg_cmd cmd = {
912*4882a593Smuzhiyun .scd_queue = queue,
913*4882a593Smuzhiyun .action = SCD_CFG_UPDATE_QUEUE_TID,
914*4882a593Smuzhiyun };
915*4882a593Smuzhiyun int tid;
916*4882a593Smuzhiyun unsigned long tid_bitmap;
917*4882a593Smuzhiyun int ret;
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
922*4882a593Smuzhiyun return;
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun tid_bitmap = mvm->queue_info[queue].tid_bitmap;
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
927*4882a593Smuzhiyun return;
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun /* Find any TID for queue */
930*4882a593Smuzhiyun tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
931*4882a593Smuzhiyun cmd.tid = tid;
932*4882a593Smuzhiyun cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
935*4882a593Smuzhiyun if (ret) {
936*4882a593Smuzhiyun IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
937*4882a593Smuzhiyun queue, ret);
938*4882a593Smuzhiyun return;
939*4882a593Smuzhiyun }
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun mvm->queue_info[queue].txq_tid = tid;
942*4882a593Smuzhiyun IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
943*4882a593Smuzhiyun queue, tid);
944*4882a593Smuzhiyun }
945*4882a593Smuzhiyun
iwl_mvm_unshare_queue(struct iwl_mvm * mvm,int queue)946*4882a593Smuzhiyun static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
947*4882a593Smuzhiyun {
948*4882a593Smuzhiyun struct ieee80211_sta *sta;
949*4882a593Smuzhiyun struct iwl_mvm_sta *mvmsta;
950*4882a593Smuzhiyun u8 sta_id;
951*4882a593Smuzhiyun int tid = -1;
952*4882a593Smuzhiyun unsigned long tid_bitmap;
953*4882a593Smuzhiyun unsigned int wdg_timeout;
954*4882a593Smuzhiyun int ssn;
955*4882a593Smuzhiyun int ret = true;
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun /* queue sharing is disabled on new TX path */
958*4882a593Smuzhiyun if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
959*4882a593Smuzhiyun return;
960*4882a593Smuzhiyun
961*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun sta_id = mvm->queue_info[queue].ra_sta_id;
964*4882a593Smuzhiyun tid_bitmap = mvm->queue_info[queue].tid_bitmap;
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun /* Find TID for queue, and make sure it is the only one on the queue */
967*4882a593Smuzhiyun tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
968*4882a593Smuzhiyun if (tid_bitmap != BIT(tid)) {
969*4882a593Smuzhiyun IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
970*4882a593Smuzhiyun queue, tid_bitmap);
971*4882a593Smuzhiyun return;
972*4882a593Smuzhiyun }
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
975*4882a593Smuzhiyun tid);
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
978*4882a593Smuzhiyun lockdep_is_held(&mvm->mutex));
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
981*4882a593Smuzhiyun return;
982*4882a593Smuzhiyun
983*4882a593Smuzhiyun mvmsta = iwl_mvm_sta_from_mac80211(sta);
984*4882a593Smuzhiyun wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun ret = iwl_mvm_redirect_queue(mvm, queue, tid,
989*4882a593Smuzhiyun tid_to_mac80211_ac[tid], ssn,
990*4882a593Smuzhiyun wdg_timeout, true,
991*4882a593Smuzhiyun iwl_mvm_txq_from_tid(sta, tid));
992*4882a593Smuzhiyun if (ret) {
993*4882a593Smuzhiyun IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
994*4882a593Smuzhiyun return;
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun /* If aggs should be turned back on - do it */
998*4882a593Smuzhiyun if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
999*4882a593Smuzhiyun struct iwl_mvm_add_sta_cmd cmd = {0};
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun mvmsta->tid_disable_agg &= ~BIT(tid);
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1004*4882a593Smuzhiyun cmd.sta_id = mvmsta->sta_id;
1005*4882a593Smuzhiyun cmd.add_modify = STA_MODE_MODIFY;
1006*4882a593Smuzhiyun cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
1007*4882a593Smuzhiyun cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
1008*4882a593Smuzhiyun cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
1011*4882a593Smuzhiyun iwl_mvm_add_sta_cmd_size(mvm), &cmd);
1012*4882a593Smuzhiyun if (!ret) {
1013*4882a593Smuzhiyun IWL_DEBUG_TX_QUEUES(mvm,
1014*4882a593Smuzhiyun "TXQ #%d is now aggregated again\n",
1015*4882a593Smuzhiyun queue);
1016*4882a593Smuzhiyun
1017*4882a593Smuzhiyun /* Mark queue intenally as aggregating again */
1018*4882a593Smuzhiyun iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
1019*4882a593Smuzhiyun }
1020*4882a593Smuzhiyun }
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1023*4882a593Smuzhiyun }
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun /*
1026*4882a593Smuzhiyun * Remove inactive TIDs of a given queue.
1027*4882a593Smuzhiyun * If all queue TIDs are inactive - mark the queue as inactive
1028*4882a593Smuzhiyun * If only some the queue TIDs are inactive - unmap them from the queue
1029*4882a593Smuzhiyun *
1030*4882a593Smuzhiyun * Returns %true if all TIDs were removed and the queue could be reused.
1031*4882a593Smuzhiyun */
iwl_mvm_remove_inactive_tids(struct iwl_mvm * mvm,struct iwl_mvm_sta * mvmsta,int queue,unsigned long tid_bitmap,unsigned long * unshare_queues,unsigned long * changetid_queues)1032*4882a593Smuzhiyun static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
1033*4882a593Smuzhiyun struct iwl_mvm_sta *mvmsta, int queue,
1034*4882a593Smuzhiyun unsigned long tid_bitmap,
1035*4882a593Smuzhiyun unsigned long *unshare_queues,
1036*4882a593Smuzhiyun unsigned long *changetid_queues)
1037*4882a593Smuzhiyun {
1038*4882a593Smuzhiyun int tid;
1039*4882a593Smuzhiyun
1040*4882a593Smuzhiyun lockdep_assert_held(&mvmsta->lock);
1041*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1044*4882a593Smuzhiyun return false;
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
1047*4882a593Smuzhiyun for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1048*4882a593Smuzhiyun /* If some TFDs are still queued - don't mark TID as inactive */
1049*4882a593Smuzhiyun if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
1050*4882a593Smuzhiyun tid_bitmap &= ~BIT(tid);
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun /* Don't mark as inactive any TID that has an active BA */
1053*4882a593Smuzhiyun if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
1054*4882a593Smuzhiyun tid_bitmap &= ~BIT(tid);
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun
1057*4882a593Smuzhiyun /* If all TIDs in the queue are inactive - return it can be reused */
1058*4882a593Smuzhiyun if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
1059*4882a593Smuzhiyun IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue);
1060*4882a593Smuzhiyun return true;
1061*4882a593Smuzhiyun }
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun /*
1064*4882a593Smuzhiyun * If we are here, this is a shared queue and not all TIDs timed-out.
1065*4882a593Smuzhiyun * Remove the ones that did.
1066*4882a593Smuzhiyun */
1067*4882a593Smuzhiyun for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1068*4882a593Smuzhiyun u16 tid_bitmap;
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
1071*4882a593Smuzhiyun mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun /*
1076*4882a593Smuzhiyun * We need to take into account a situation in which a TXQ was
1077*4882a593Smuzhiyun * allocated to TID x, and then turned shared by adding TIDs y
1078*4882a593Smuzhiyun * and z. If TID x becomes inactive and is removed from the TXQ,
1079*4882a593Smuzhiyun * ownership must be given to one of the remaining TIDs.
1080*4882a593Smuzhiyun * This is mainly because if TID x continues - a new queue can't
1081*4882a593Smuzhiyun * be allocated for it as long as it is an owner of another TXQ.
1082*4882a593Smuzhiyun *
1083*4882a593Smuzhiyun * Mark this queue in the right bitmap, we'll send the command
1084*4882a593Smuzhiyun * to the firmware later.
1085*4882a593Smuzhiyun */
1086*4882a593Smuzhiyun if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
1087*4882a593Smuzhiyun set_bit(queue, changetid_queues);
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyun IWL_DEBUG_TX_QUEUES(mvm,
1090*4882a593Smuzhiyun "Removing inactive TID %d from shared Q:%d\n",
1091*4882a593Smuzhiyun tid, queue);
1092*4882a593Smuzhiyun }
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun IWL_DEBUG_TX_QUEUES(mvm,
1095*4882a593Smuzhiyun "TXQ #%d left with tid bitmap 0x%x\n", queue,
1096*4882a593Smuzhiyun mvm->queue_info[queue].tid_bitmap);
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun /*
1099*4882a593Smuzhiyun * There may be different TIDs with the same mac queues, so make
1100*4882a593Smuzhiyun * sure all TIDs have existing corresponding mac queues enabled
1101*4882a593Smuzhiyun */
1102*4882a593Smuzhiyun tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun /* If the queue is marked as shared - "unshare" it */
1105*4882a593Smuzhiyun if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
1106*4882a593Smuzhiyun mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
1107*4882a593Smuzhiyun IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
1108*4882a593Smuzhiyun queue);
1109*4882a593Smuzhiyun set_bit(queue, unshare_queues);
1110*4882a593Smuzhiyun }
1111*4882a593Smuzhiyun
1112*4882a593Smuzhiyun return false;
1113*4882a593Smuzhiyun }
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun /*
1116*4882a593Smuzhiyun * Check for inactivity - this includes checking if any queue
1117*4882a593Smuzhiyun * can be unshared and finding one (and only one) that can be
1118*4882a593Smuzhiyun * reused.
1119*4882a593Smuzhiyun * This function is also invoked as a sort of clean-up task,
1120*4882a593Smuzhiyun * in which case @alloc_for_sta is IWL_MVM_INVALID_STA.
1121*4882a593Smuzhiyun *
1122*4882a593Smuzhiyun * Returns the queue number, or -ENOSPC.
1123*4882a593Smuzhiyun */
iwl_mvm_inactivity_check(struct iwl_mvm * mvm,u8 alloc_for_sta)1124*4882a593Smuzhiyun static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
1125*4882a593Smuzhiyun {
1126*4882a593Smuzhiyun unsigned long now = jiffies;
1127*4882a593Smuzhiyun unsigned long unshare_queues = 0;
1128*4882a593Smuzhiyun unsigned long changetid_queues = 0;
1129*4882a593Smuzhiyun int i, ret, free_queue = -ENOSPC;
1130*4882a593Smuzhiyun struct ieee80211_sta *queue_owner = NULL;
1131*4882a593Smuzhiyun
1132*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun if (iwl_mvm_has_new_tx_api(mvm))
1135*4882a593Smuzhiyun return -ENOSPC;
1136*4882a593Smuzhiyun
1137*4882a593Smuzhiyun rcu_read_lock();
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun /* we skip the CMD queue below by starting at 1 */
1140*4882a593Smuzhiyun BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
1141*4882a593Smuzhiyun
1142*4882a593Smuzhiyun for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
1143*4882a593Smuzhiyun struct ieee80211_sta *sta;
1144*4882a593Smuzhiyun struct iwl_mvm_sta *mvmsta;
1145*4882a593Smuzhiyun u8 sta_id;
1146*4882a593Smuzhiyun int tid;
1147*4882a593Smuzhiyun unsigned long inactive_tid_bitmap = 0;
1148*4882a593Smuzhiyun unsigned long queue_tid_bitmap;
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
1151*4882a593Smuzhiyun if (!queue_tid_bitmap)
1152*4882a593Smuzhiyun continue;
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun /* If TXQ isn't in active use anyway - nothing to do here... */
1155*4882a593Smuzhiyun if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
1156*4882a593Smuzhiyun mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED)
1157*4882a593Smuzhiyun continue;
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun /* Check to see if there are inactive TIDs on this queue */
1160*4882a593Smuzhiyun for_each_set_bit(tid, &queue_tid_bitmap,
1161*4882a593Smuzhiyun IWL_MAX_TID_COUNT + 1) {
1162*4882a593Smuzhiyun if (time_after(mvm->queue_info[i].last_frame_time[tid] +
1163*4882a593Smuzhiyun IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1164*4882a593Smuzhiyun continue;
1165*4882a593Smuzhiyun
1166*4882a593Smuzhiyun inactive_tid_bitmap |= BIT(tid);
1167*4882a593Smuzhiyun }
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun /* If all TIDs are active - finish check on this queue */
1170*4882a593Smuzhiyun if (!inactive_tid_bitmap)
1171*4882a593Smuzhiyun continue;
1172*4882a593Smuzhiyun
1173*4882a593Smuzhiyun /*
1174*4882a593Smuzhiyun * If we are here - the queue hadn't been served recently and is
1175*4882a593Smuzhiyun * in use
1176*4882a593Smuzhiyun */
1177*4882a593Smuzhiyun
1178*4882a593Smuzhiyun sta_id = mvm->queue_info[i].ra_sta_id;
1179*4882a593Smuzhiyun sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1180*4882a593Smuzhiyun
1181*4882a593Smuzhiyun /*
1182*4882a593Smuzhiyun * If the STA doesn't exist anymore, it isn't an error. It could
1183*4882a593Smuzhiyun * be that it was removed since getting the queues, and in this
1184*4882a593Smuzhiyun * case it should've inactivated its queues anyway.
1185*4882a593Smuzhiyun */
1186*4882a593Smuzhiyun if (IS_ERR_OR_NULL(sta))
1187*4882a593Smuzhiyun continue;
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun mvmsta = iwl_mvm_sta_from_mac80211(sta);
1190*4882a593Smuzhiyun
1191*4882a593Smuzhiyun spin_lock_bh(&mvmsta->lock);
1192*4882a593Smuzhiyun ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
1193*4882a593Smuzhiyun inactive_tid_bitmap,
1194*4882a593Smuzhiyun &unshare_queues,
1195*4882a593Smuzhiyun &changetid_queues);
1196*4882a593Smuzhiyun if (ret && free_queue < 0) {
1197*4882a593Smuzhiyun queue_owner = sta;
1198*4882a593Smuzhiyun free_queue = i;
1199*4882a593Smuzhiyun }
1200*4882a593Smuzhiyun /* only unlock sta lock - we still need the queue info lock */
1201*4882a593Smuzhiyun spin_unlock_bh(&mvmsta->lock);
1202*4882a593Smuzhiyun }
1203*4882a593Smuzhiyun
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun /* Reconfigure queues requiring reconfiguation */
1206*4882a593Smuzhiyun for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
1207*4882a593Smuzhiyun iwl_mvm_unshare_queue(mvm, i);
1208*4882a593Smuzhiyun for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)
1209*4882a593Smuzhiyun iwl_mvm_change_queue_tid(mvm, i);
1210*4882a593Smuzhiyun
1211*4882a593Smuzhiyun rcu_read_unlock();
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
1214*4882a593Smuzhiyun ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner,
1215*4882a593Smuzhiyun alloc_for_sta);
1216*4882a593Smuzhiyun if (ret)
1217*4882a593Smuzhiyun return ret;
1218*4882a593Smuzhiyun }
1219*4882a593Smuzhiyun
1220*4882a593Smuzhiyun return free_queue;
1221*4882a593Smuzhiyun }
1222*4882a593Smuzhiyun
iwl_mvm_sta_alloc_queue(struct iwl_mvm * mvm,struct ieee80211_sta * sta,u8 ac,int tid)1223*4882a593Smuzhiyun static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
1224*4882a593Smuzhiyun struct ieee80211_sta *sta, u8 ac, int tid)
1225*4882a593Smuzhiyun {
1226*4882a593Smuzhiyun struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1227*4882a593Smuzhiyun struct iwl_trans_txq_scd_cfg cfg = {
1228*4882a593Smuzhiyun .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
1229*4882a593Smuzhiyun .sta_id = mvmsta->sta_id,
1230*4882a593Smuzhiyun .tid = tid,
1231*4882a593Smuzhiyun .frame_limit = IWL_FRAME_LIMIT,
1232*4882a593Smuzhiyun };
1233*4882a593Smuzhiyun unsigned int wdg_timeout =
1234*4882a593Smuzhiyun iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1235*4882a593Smuzhiyun int queue = -1;
1236*4882a593Smuzhiyun u16 queue_tmp;
1237*4882a593Smuzhiyun unsigned long disable_agg_tids = 0;
1238*4882a593Smuzhiyun enum iwl_mvm_agg_state queue_state;
1239*4882a593Smuzhiyun bool shared_queue = false, inc_ssn;
1240*4882a593Smuzhiyun int ssn;
1241*4882a593Smuzhiyun unsigned long tfd_queue_mask;
1242*4882a593Smuzhiyun int ret;
1243*4882a593Smuzhiyun
1244*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
1245*4882a593Smuzhiyun
1246*4882a593Smuzhiyun if (iwl_mvm_has_new_tx_api(mvm))
1247*4882a593Smuzhiyun return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun spin_lock_bh(&mvmsta->lock);
1250*4882a593Smuzhiyun tfd_queue_mask = mvmsta->tfd_queue_msk;
1251*4882a593Smuzhiyun ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1252*4882a593Smuzhiyun spin_unlock_bh(&mvmsta->lock);
1253*4882a593Smuzhiyun
1254*4882a593Smuzhiyun if (tid == IWL_MAX_TID_COUNT) {
1255*4882a593Smuzhiyun queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1256*4882a593Smuzhiyun IWL_MVM_DQA_MIN_MGMT_QUEUE,
1257*4882a593Smuzhiyun IWL_MVM_DQA_MAX_MGMT_QUEUE);
1258*4882a593Smuzhiyun if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
1259*4882a593Smuzhiyun IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
1260*4882a593Smuzhiyun queue);
1261*4882a593Smuzhiyun
1262*4882a593Smuzhiyun /* If no such queue is found, we'll use a DATA queue instead */
1263*4882a593Smuzhiyun }
1264*4882a593Smuzhiyun
1265*4882a593Smuzhiyun if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
1266*4882a593Smuzhiyun (mvm->queue_info[mvmsta->reserved_queue].status ==
1267*4882a593Smuzhiyun IWL_MVM_QUEUE_RESERVED)) {
1268*4882a593Smuzhiyun queue = mvmsta->reserved_queue;
1269*4882a593Smuzhiyun mvm->queue_info[queue].reserved = true;
1270*4882a593Smuzhiyun IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
1271*4882a593Smuzhiyun }
1272*4882a593Smuzhiyun
1273*4882a593Smuzhiyun if (queue < 0)
1274*4882a593Smuzhiyun queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1275*4882a593Smuzhiyun IWL_MVM_DQA_MIN_DATA_QUEUE,
1276*4882a593Smuzhiyun IWL_MVM_DQA_MAX_DATA_QUEUE);
1277*4882a593Smuzhiyun if (queue < 0) {
1278*4882a593Smuzhiyun /* try harder - perhaps kill an inactive queue */
1279*4882a593Smuzhiyun queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1280*4882a593Smuzhiyun }
1281*4882a593Smuzhiyun
1282*4882a593Smuzhiyun /* No free queue - we'll have to share */
1283*4882a593Smuzhiyun if (queue <= 0) {
1284*4882a593Smuzhiyun queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
1285*4882a593Smuzhiyun if (queue > 0) {
1286*4882a593Smuzhiyun shared_queue = true;
1287*4882a593Smuzhiyun mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
1288*4882a593Smuzhiyun }
1289*4882a593Smuzhiyun }
1290*4882a593Smuzhiyun
1291*4882a593Smuzhiyun /*
1292*4882a593Smuzhiyun * Mark TXQ as ready, even though it hasn't been fully configured yet,
1293*4882a593Smuzhiyun * to make sure no one else takes it.
1294*4882a593Smuzhiyun * This will allow avoiding re-acquiring the lock at the end of the
1295*4882a593Smuzhiyun * configuration. On error we'll mark it back as free.
1296*4882a593Smuzhiyun */
1297*4882a593Smuzhiyun if (queue > 0 && !shared_queue)
1298*4882a593Smuzhiyun mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1299*4882a593Smuzhiyun
1300*4882a593Smuzhiyun /* This shouldn't happen - out of queues */
1301*4882a593Smuzhiyun if (WARN_ON(queue <= 0)) {
1302*4882a593Smuzhiyun IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
1303*4882a593Smuzhiyun tid, cfg.sta_id);
1304*4882a593Smuzhiyun return queue;
1305*4882a593Smuzhiyun }
1306*4882a593Smuzhiyun
1307*4882a593Smuzhiyun /*
1308*4882a593Smuzhiyun * Actual en/disablement of aggregations is through the ADD_STA HCMD,
1309*4882a593Smuzhiyun * but for configuring the SCD to send A-MPDUs we need to mark the queue
1310*4882a593Smuzhiyun * as aggregatable.
1311*4882a593Smuzhiyun * Mark all DATA queues as allowing to be aggregated at some point
1312*4882a593Smuzhiyun */
1313*4882a593Smuzhiyun cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1314*4882a593Smuzhiyun queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1315*4882a593Smuzhiyun
1316*4882a593Smuzhiyun IWL_DEBUG_TX_QUEUES(mvm,
1317*4882a593Smuzhiyun "Allocating %squeue #%d to sta %d on tid %d\n",
1318*4882a593Smuzhiyun shared_queue ? "shared " : "", queue,
1319*4882a593Smuzhiyun mvmsta->sta_id, tid);
1320*4882a593Smuzhiyun
1321*4882a593Smuzhiyun if (shared_queue) {
1322*4882a593Smuzhiyun /* Disable any open aggs on this queue */
1323*4882a593Smuzhiyun disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
1324*4882a593Smuzhiyun
1325*4882a593Smuzhiyun if (disable_agg_tids) {
1326*4882a593Smuzhiyun IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
1327*4882a593Smuzhiyun queue);
1328*4882a593Smuzhiyun iwl_mvm_invalidate_sta_queue(mvm, queue,
1329*4882a593Smuzhiyun disable_agg_tids, false);
1330*4882a593Smuzhiyun }
1331*4882a593Smuzhiyun }
1332*4882a593Smuzhiyun
1333*4882a593Smuzhiyun inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout);
1334*4882a593Smuzhiyun
1335*4882a593Smuzhiyun /*
1336*4882a593Smuzhiyun * Mark queue as shared in transport if shared
1337*4882a593Smuzhiyun * Note this has to be done after queue enablement because enablement
1338*4882a593Smuzhiyun * can also set this value, and there is no indication there to shared
1339*4882a593Smuzhiyun * queues
1340*4882a593Smuzhiyun */
1341*4882a593Smuzhiyun if (shared_queue)
1342*4882a593Smuzhiyun iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
1343*4882a593Smuzhiyun
1344*4882a593Smuzhiyun spin_lock_bh(&mvmsta->lock);
1345*4882a593Smuzhiyun /*
1346*4882a593Smuzhiyun * This looks racy, but it is not. We have only one packet for
1347*4882a593Smuzhiyun * this ra/tid in our Tx path since we stop the Qdisc when we
1348*4882a593Smuzhiyun * need to allocate a new TFD queue.
1349*4882a593Smuzhiyun */
1350*4882a593Smuzhiyun if (inc_ssn) {
1351*4882a593Smuzhiyun mvmsta->tid_data[tid].seq_number += 0x10;
1352*4882a593Smuzhiyun ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
1353*4882a593Smuzhiyun }
1354*4882a593Smuzhiyun mvmsta->tid_data[tid].txq_id = queue;
1355*4882a593Smuzhiyun mvmsta->tfd_queue_msk |= BIT(queue);
1356*4882a593Smuzhiyun queue_state = mvmsta->tid_data[tid].state;
1357*4882a593Smuzhiyun
1358*4882a593Smuzhiyun if (mvmsta->reserved_queue == queue)
1359*4882a593Smuzhiyun mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
1360*4882a593Smuzhiyun spin_unlock_bh(&mvmsta->lock);
1361*4882a593Smuzhiyun
1362*4882a593Smuzhiyun if (!shared_queue) {
1363*4882a593Smuzhiyun ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
1364*4882a593Smuzhiyun if (ret)
1365*4882a593Smuzhiyun goto out_err;
1366*4882a593Smuzhiyun
1367*4882a593Smuzhiyun /* If we need to re-enable aggregations... */
1368*4882a593Smuzhiyun if (queue_state == IWL_AGG_ON) {
1369*4882a593Smuzhiyun ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1370*4882a593Smuzhiyun if (ret)
1371*4882a593Smuzhiyun goto out_err;
1372*4882a593Smuzhiyun }
1373*4882a593Smuzhiyun } else {
1374*4882a593Smuzhiyun /* Redirect queue, if needed */
1375*4882a593Smuzhiyun ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn,
1376*4882a593Smuzhiyun wdg_timeout, false,
1377*4882a593Smuzhiyun iwl_mvm_txq_from_tid(sta, tid));
1378*4882a593Smuzhiyun if (ret)
1379*4882a593Smuzhiyun goto out_err;
1380*4882a593Smuzhiyun }
1381*4882a593Smuzhiyun
1382*4882a593Smuzhiyun return 0;
1383*4882a593Smuzhiyun
1384*4882a593Smuzhiyun out_err:
1385*4882a593Smuzhiyun queue_tmp = queue;
1386*4882a593Smuzhiyun iwl_mvm_disable_txq(mvm, sta, &queue_tmp, tid, 0);
1387*4882a593Smuzhiyun
1388*4882a593Smuzhiyun return ret;
1389*4882a593Smuzhiyun }
1390*4882a593Smuzhiyun
iwl_mvm_add_new_dqa_stream_wk(struct work_struct * wk)1391*4882a593Smuzhiyun void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1392*4882a593Smuzhiyun {
1393*4882a593Smuzhiyun struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1394*4882a593Smuzhiyun add_stream_wk);
1395*4882a593Smuzhiyun
1396*4882a593Smuzhiyun mutex_lock(&mvm->mutex);
1397*4882a593Smuzhiyun
1398*4882a593Smuzhiyun iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1399*4882a593Smuzhiyun
1400*4882a593Smuzhiyun while (!list_empty(&mvm->add_stream_txqs)) {
1401*4882a593Smuzhiyun struct iwl_mvm_txq *mvmtxq;
1402*4882a593Smuzhiyun struct ieee80211_txq *txq;
1403*4882a593Smuzhiyun u8 tid;
1404*4882a593Smuzhiyun
1405*4882a593Smuzhiyun mvmtxq = list_first_entry(&mvm->add_stream_txqs,
1406*4882a593Smuzhiyun struct iwl_mvm_txq, list);
1407*4882a593Smuzhiyun
1408*4882a593Smuzhiyun txq = container_of((void *)mvmtxq, struct ieee80211_txq,
1409*4882a593Smuzhiyun drv_priv);
1410*4882a593Smuzhiyun tid = txq->tid;
1411*4882a593Smuzhiyun if (tid == IEEE80211_NUM_TIDS)
1412*4882a593Smuzhiyun tid = IWL_MAX_TID_COUNT;
1413*4882a593Smuzhiyun
1414*4882a593Smuzhiyun /*
1415*4882a593Smuzhiyun * We can't really do much here, but if this fails we can't
1416*4882a593Smuzhiyun * transmit anyway - so just don't transmit the frame etc.
1417*4882a593Smuzhiyun * and let them back up ... we've tried our best to allocate
1418*4882a593Smuzhiyun * a queue in the function itself.
1419*4882a593Smuzhiyun */
1420*4882a593Smuzhiyun if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) {
1421*4882a593Smuzhiyun list_del_init(&mvmtxq->list);
1422*4882a593Smuzhiyun continue;
1423*4882a593Smuzhiyun }
1424*4882a593Smuzhiyun
1425*4882a593Smuzhiyun list_del_init(&mvmtxq->list);
1426*4882a593Smuzhiyun local_bh_disable();
1427*4882a593Smuzhiyun iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
1428*4882a593Smuzhiyun local_bh_enable();
1429*4882a593Smuzhiyun }
1430*4882a593Smuzhiyun
1431*4882a593Smuzhiyun mutex_unlock(&mvm->mutex);
1432*4882a593Smuzhiyun }
1433*4882a593Smuzhiyun
iwl_mvm_reserve_sta_stream(struct iwl_mvm * mvm,struct ieee80211_sta * sta,enum nl80211_iftype vif_type)1434*4882a593Smuzhiyun static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
1435*4882a593Smuzhiyun struct ieee80211_sta *sta,
1436*4882a593Smuzhiyun enum nl80211_iftype vif_type)
1437*4882a593Smuzhiyun {
1438*4882a593Smuzhiyun struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1439*4882a593Smuzhiyun int queue;
1440*4882a593Smuzhiyun
1441*4882a593Smuzhiyun /* queue reserving is disabled on new TX path */
1442*4882a593Smuzhiyun if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1443*4882a593Smuzhiyun return 0;
1444*4882a593Smuzhiyun
1445*4882a593Smuzhiyun /* run the general cleanup/unsharing of queues */
1446*4882a593Smuzhiyun iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1447*4882a593Smuzhiyun
1448*4882a593Smuzhiyun /* Make sure we have free resources for this STA */
1449*4882a593Smuzhiyun if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1450*4882a593Smuzhiyun !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
1451*4882a593Smuzhiyun (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1452*4882a593Smuzhiyun IWL_MVM_QUEUE_FREE))
1453*4882a593Smuzhiyun queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1454*4882a593Smuzhiyun else
1455*4882a593Smuzhiyun queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1456*4882a593Smuzhiyun IWL_MVM_DQA_MIN_DATA_QUEUE,
1457*4882a593Smuzhiyun IWL_MVM_DQA_MAX_DATA_QUEUE);
1458*4882a593Smuzhiyun if (queue < 0) {
1459*4882a593Smuzhiyun /* try again - this time kick out a queue if needed */
1460*4882a593Smuzhiyun queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1461*4882a593Smuzhiyun if (queue < 0) {
1462*4882a593Smuzhiyun IWL_ERR(mvm, "No available queues for new station\n");
1463*4882a593Smuzhiyun return -ENOSPC;
1464*4882a593Smuzhiyun }
1465*4882a593Smuzhiyun }
1466*4882a593Smuzhiyun mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
1467*4882a593Smuzhiyun
1468*4882a593Smuzhiyun mvmsta->reserved_queue = queue;
1469*4882a593Smuzhiyun
1470*4882a593Smuzhiyun IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1471*4882a593Smuzhiyun queue, mvmsta->sta_id);
1472*4882a593Smuzhiyun
1473*4882a593Smuzhiyun return 0;
1474*4882a593Smuzhiyun }
1475*4882a593Smuzhiyun
1476*4882a593Smuzhiyun /*
1477*4882a593Smuzhiyun * In DQA mode, after a HW restart the queues should be allocated as before, in
1478*4882a593Smuzhiyun * order to avoid race conditions when there are shared queues. This function
1479*4882a593Smuzhiyun * does the re-mapping and queue allocation.
1480*4882a593Smuzhiyun *
1481*4882a593Smuzhiyun * Note that re-enabling aggregations isn't done in this function.
1482*4882a593Smuzhiyun */
iwl_mvm_realloc_queues_after_restart(struct iwl_mvm * mvm,struct ieee80211_sta * sta)1483*4882a593Smuzhiyun static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1484*4882a593Smuzhiyun struct ieee80211_sta *sta)
1485*4882a593Smuzhiyun {
1486*4882a593Smuzhiyun struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1487*4882a593Smuzhiyun unsigned int wdg =
1488*4882a593Smuzhiyun iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1489*4882a593Smuzhiyun int i;
1490*4882a593Smuzhiyun struct iwl_trans_txq_scd_cfg cfg = {
1491*4882a593Smuzhiyun .sta_id = mvm_sta->sta_id,
1492*4882a593Smuzhiyun .frame_limit = IWL_FRAME_LIMIT,
1493*4882a593Smuzhiyun };
1494*4882a593Smuzhiyun
1495*4882a593Smuzhiyun /* Make sure reserved queue is still marked as such (if allocated) */
1496*4882a593Smuzhiyun if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1497*4882a593Smuzhiyun mvm->queue_info[mvm_sta->reserved_queue].status =
1498*4882a593Smuzhiyun IWL_MVM_QUEUE_RESERVED;
1499*4882a593Smuzhiyun
1500*4882a593Smuzhiyun for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1501*4882a593Smuzhiyun struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1502*4882a593Smuzhiyun int txq_id = tid_data->txq_id;
1503*4882a593Smuzhiyun int ac;
1504*4882a593Smuzhiyun
1505*4882a593Smuzhiyun if (txq_id == IWL_MVM_INVALID_QUEUE)
1506*4882a593Smuzhiyun continue;
1507*4882a593Smuzhiyun
1508*4882a593Smuzhiyun ac = tid_to_mac80211_ac[i];
1509*4882a593Smuzhiyun
1510*4882a593Smuzhiyun if (iwl_mvm_has_new_tx_api(mvm)) {
1511*4882a593Smuzhiyun IWL_DEBUG_TX_QUEUES(mvm,
1512*4882a593Smuzhiyun "Re-mapping sta %d tid %d\n",
1513*4882a593Smuzhiyun mvm_sta->sta_id, i);
1514*4882a593Smuzhiyun txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id,
1515*4882a593Smuzhiyun i, wdg);
1516*4882a593Smuzhiyun /*
1517*4882a593Smuzhiyun * on failures, just set it to IWL_MVM_INVALID_QUEUE
1518*4882a593Smuzhiyun * to try again later, we have no other good way of
1519*4882a593Smuzhiyun * failing here
1520*4882a593Smuzhiyun */
1521*4882a593Smuzhiyun if (txq_id < 0)
1522*4882a593Smuzhiyun txq_id = IWL_MVM_INVALID_QUEUE;
1523*4882a593Smuzhiyun tid_data->txq_id = txq_id;
1524*4882a593Smuzhiyun
1525*4882a593Smuzhiyun /*
1526*4882a593Smuzhiyun * Since we don't set the seq number after reset, and HW
1527*4882a593Smuzhiyun * sets it now, FW reset will cause the seq num to start
1528*4882a593Smuzhiyun * at 0 again, so driver will need to update it
1529*4882a593Smuzhiyun * internally as well, so it keeps in sync with real val
1530*4882a593Smuzhiyun */
1531*4882a593Smuzhiyun tid_data->seq_number = 0;
1532*4882a593Smuzhiyun } else {
1533*4882a593Smuzhiyun u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
1534*4882a593Smuzhiyun
1535*4882a593Smuzhiyun cfg.tid = i;
1536*4882a593Smuzhiyun cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
1537*4882a593Smuzhiyun cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1538*4882a593Smuzhiyun txq_id ==
1539*4882a593Smuzhiyun IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1540*4882a593Smuzhiyun
1541*4882a593Smuzhiyun IWL_DEBUG_TX_QUEUES(mvm,
1542*4882a593Smuzhiyun "Re-mapping sta %d tid %d to queue %d\n",
1543*4882a593Smuzhiyun mvm_sta->sta_id, i, txq_id);
1544*4882a593Smuzhiyun
1545*4882a593Smuzhiyun iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg);
1546*4882a593Smuzhiyun mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1547*4882a593Smuzhiyun }
1548*4882a593Smuzhiyun }
1549*4882a593Smuzhiyun }
1550*4882a593Smuzhiyun
iwl_mvm_add_int_sta_common(struct iwl_mvm * mvm,struct iwl_mvm_int_sta * sta,const u8 * addr,u16 mac_id,u16 color)1551*4882a593Smuzhiyun static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1552*4882a593Smuzhiyun struct iwl_mvm_int_sta *sta,
1553*4882a593Smuzhiyun const u8 *addr,
1554*4882a593Smuzhiyun u16 mac_id, u16 color)
1555*4882a593Smuzhiyun {
1556*4882a593Smuzhiyun struct iwl_mvm_add_sta_cmd cmd;
1557*4882a593Smuzhiyun int ret;
1558*4882a593Smuzhiyun u32 status = ADD_STA_SUCCESS;
1559*4882a593Smuzhiyun
1560*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
1561*4882a593Smuzhiyun
1562*4882a593Smuzhiyun memset(&cmd, 0, sizeof(cmd));
1563*4882a593Smuzhiyun cmd.sta_id = sta->sta_id;
1564*4882a593Smuzhiyun
1565*4882a593Smuzhiyun if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, ADD_STA,
1566*4882a593Smuzhiyun 0) >= 12 &&
1567*4882a593Smuzhiyun sta->type == IWL_STA_AUX_ACTIVITY)
1568*4882a593Smuzhiyun cmd.mac_id_n_color = cpu_to_le32(mac_id);
1569*4882a593Smuzhiyun else
1570*4882a593Smuzhiyun cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1571*4882a593Smuzhiyun color));
1572*4882a593Smuzhiyun
1573*4882a593Smuzhiyun if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1574*4882a593Smuzhiyun cmd.station_type = sta->type;
1575*4882a593Smuzhiyun
1576*4882a593Smuzhiyun if (!iwl_mvm_has_new_tx_api(mvm))
1577*4882a593Smuzhiyun cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1578*4882a593Smuzhiyun cmd.tid_disable_tx = cpu_to_le16(0xffff);
1579*4882a593Smuzhiyun
1580*4882a593Smuzhiyun if (addr)
1581*4882a593Smuzhiyun memcpy(cmd.addr, addr, ETH_ALEN);
1582*4882a593Smuzhiyun
1583*4882a593Smuzhiyun ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1584*4882a593Smuzhiyun iwl_mvm_add_sta_cmd_size(mvm),
1585*4882a593Smuzhiyun &cmd, &status);
1586*4882a593Smuzhiyun if (ret)
1587*4882a593Smuzhiyun return ret;
1588*4882a593Smuzhiyun
1589*4882a593Smuzhiyun switch (status & IWL_ADD_STA_STATUS_MASK) {
1590*4882a593Smuzhiyun case ADD_STA_SUCCESS:
1591*4882a593Smuzhiyun IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1592*4882a593Smuzhiyun return 0;
1593*4882a593Smuzhiyun default:
1594*4882a593Smuzhiyun ret = -EIO;
1595*4882a593Smuzhiyun IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1596*4882a593Smuzhiyun status);
1597*4882a593Smuzhiyun break;
1598*4882a593Smuzhiyun }
1599*4882a593Smuzhiyun return ret;
1600*4882a593Smuzhiyun }
1601*4882a593Smuzhiyun
iwl_mvm_add_sta(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1602*4882a593Smuzhiyun int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1603*4882a593Smuzhiyun struct ieee80211_vif *vif,
1604*4882a593Smuzhiyun struct ieee80211_sta *sta)
1605*4882a593Smuzhiyun {
1606*4882a593Smuzhiyun struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1607*4882a593Smuzhiyun struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1608*4882a593Smuzhiyun struct iwl_mvm_rxq_dup_data *dup_data;
1609*4882a593Smuzhiyun int i, ret, sta_id;
1610*4882a593Smuzhiyun bool sta_update = false;
1611*4882a593Smuzhiyun unsigned int sta_flags = 0;
1612*4882a593Smuzhiyun
1613*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
1614*4882a593Smuzhiyun
1615*4882a593Smuzhiyun if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1616*4882a593Smuzhiyun sta_id = iwl_mvm_find_free_sta_id(mvm,
1617*4882a593Smuzhiyun ieee80211_vif_type_p2p(vif));
1618*4882a593Smuzhiyun else
1619*4882a593Smuzhiyun sta_id = mvm_sta->sta_id;
1620*4882a593Smuzhiyun
1621*4882a593Smuzhiyun if (sta_id == IWL_MVM_INVALID_STA)
1622*4882a593Smuzhiyun return -ENOSPC;
1623*4882a593Smuzhiyun
1624*4882a593Smuzhiyun spin_lock_init(&mvm_sta->lock);
1625*4882a593Smuzhiyun
1626*4882a593Smuzhiyun /* if this is a HW restart re-alloc existing queues */
1627*4882a593Smuzhiyun if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1628*4882a593Smuzhiyun struct iwl_mvm_int_sta tmp_sta = {
1629*4882a593Smuzhiyun .sta_id = sta_id,
1630*4882a593Smuzhiyun .type = mvm_sta->sta_type,
1631*4882a593Smuzhiyun };
1632*4882a593Smuzhiyun
1633*4882a593Smuzhiyun /*
1634*4882a593Smuzhiyun * First add an empty station since allocating
1635*4882a593Smuzhiyun * a queue requires a valid station
1636*4882a593Smuzhiyun */
1637*4882a593Smuzhiyun ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
1638*4882a593Smuzhiyun mvmvif->id, mvmvif->color);
1639*4882a593Smuzhiyun if (ret)
1640*4882a593Smuzhiyun goto err;
1641*4882a593Smuzhiyun
1642*4882a593Smuzhiyun iwl_mvm_realloc_queues_after_restart(mvm, sta);
1643*4882a593Smuzhiyun sta_update = true;
1644*4882a593Smuzhiyun sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
1645*4882a593Smuzhiyun goto update_fw;
1646*4882a593Smuzhiyun }
1647*4882a593Smuzhiyun
1648*4882a593Smuzhiyun mvm_sta->sta_id = sta_id;
1649*4882a593Smuzhiyun mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1650*4882a593Smuzhiyun mvmvif->color);
1651*4882a593Smuzhiyun mvm_sta->vif = vif;
1652*4882a593Smuzhiyun if (!mvm->trans->trans_cfg->gen2)
1653*4882a593Smuzhiyun mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1654*4882a593Smuzhiyun else
1655*4882a593Smuzhiyun mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
1656*4882a593Smuzhiyun mvm_sta->tx_protection = 0;
1657*4882a593Smuzhiyun mvm_sta->tt_tx_protection = false;
1658*4882a593Smuzhiyun mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
1659*4882a593Smuzhiyun
1660*4882a593Smuzhiyun /* HW restart, don't assume the memory has been zeroed */
1661*4882a593Smuzhiyun mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
1662*4882a593Smuzhiyun mvm_sta->tfd_queue_msk = 0;
1663*4882a593Smuzhiyun
1664*4882a593Smuzhiyun /* for HW restart - reset everything but the sequence number */
1665*4882a593Smuzhiyun for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1666*4882a593Smuzhiyun u16 seq = mvm_sta->tid_data[i].seq_number;
1667*4882a593Smuzhiyun memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1668*4882a593Smuzhiyun mvm_sta->tid_data[i].seq_number = seq;
1669*4882a593Smuzhiyun
1670*4882a593Smuzhiyun /*
1671*4882a593Smuzhiyun * Mark all queues for this STA as unallocated and defer TX
1672*4882a593Smuzhiyun * frames until the queue is allocated
1673*4882a593Smuzhiyun */
1674*4882a593Smuzhiyun mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1675*4882a593Smuzhiyun }
1676*4882a593Smuzhiyun
1677*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1678*4882a593Smuzhiyun struct iwl_mvm_txq *mvmtxq =
1679*4882a593Smuzhiyun iwl_mvm_txq_from_mac80211(sta->txq[i]);
1680*4882a593Smuzhiyun
1681*4882a593Smuzhiyun mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1682*4882a593Smuzhiyun INIT_LIST_HEAD(&mvmtxq->list);
1683*4882a593Smuzhiyun atomic_set(&mvmtxq->tx_request, 0);
1684*4882a593Smuzhiyun }
1685*4882a593Smuzhiyun
1686*4882a593Smuzhiyun mvm_sta->agg_tids = 0;
1687*4882a593Smuzhiyun
1688*4882a593Smuzhiyun if (iwl_mvm_has_new_rx_api(mvm) &&
1689*4882a593Smuzhiyun !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1690*4882a593Smuzhiyun int q;
1691*4882a593Smuzhiyun
1692*4882a593Smuzhiyun dup_data = kcalloc(mvm->trans->num_rx_queues,
1693*4882a593Smuzhiyun sizeof(*dup_data), GFP_KERNEL);
1694*4882a593Smuzhiyun if (!dup_data)
1695*4882a593Smuzhiyun return -ENOMEM;
1696*4882a593Smuzhiyun /*
1697*4882a593Smuzhiyun * Initialize all the last_seq values to 0xffff which can never
1698*4882a593Smuzhiyun * compare equal to the frame's seq_ctrl in the check in
1699*4882a593Smuzhiyun * iwl_mvm_is_dup() since the lower 4 bits are the fragment
1700*4882a593Smuzhiyun * number and fragmented packets don't reach that function.
1701*4882a593Smuzhiyun *
1702*4882a593Smuzhiyun * This thus allows receiving a packet with seqno 0 and the
1703*4882a593Smuzhiyun * retry bit set as the very first packet on a new TID.
1704*4882a593Smuzhiyun */
1705*4882a593Smuzhiyun for (q = 0; q < mvm->trans->num_rx_queues; q++)
1706*4882a593Smuzhiyun memset(dup_data[q].last_seq, 0xff,
1707*4882a593Smuzhiyun sizeof(dup_data[q].last_seq));
1708*4882a593Smuzhiyun mvm_sta->dup_data = dup_data;
1709*4882a593Smuzhiyun }
1710*4882a593Smuzhiyun
1711*4882a593Smuzhiyun if (!iwl_mvm_has_new_tx_api(mvm)) {
1712*4882a593Smuzhiyun ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1713*4882a593Smuzhiyun ieee80211_vif_type_p2p(vif));
1714*4882a593Smuzhiyun if (ret)
1715*4882a593Smuzhiyun goto err;
1716*4882a593Smuzhiyun }
1717*4882a593Smuzhiyun
1718*4882a593Smuzhiyun /*
1719*4882a593Smuzhiyun * if rs is registered with mac80211, then "add station" will be handled
1720*4882a593Smuzhiyun * via the corresponding ops, otherwise need to notify rate scaling here
1721*4882a593Smuzhiyun */
1722*4882a593Smuzhiyun if (iwl_mvm_has_tlc_offload(mvm))
1723*4882a593Smuzhiyun iwl_mvm_rs_add_sta(mvm, mvm_sta);
1724*4882a593Smuzhiyun else
1725*4882a593Smuzhiyun spin_lock_init(&mvm_sta->lq_sta.rs_drv.pers.lock);
1726*4882a593Smuzhiyun
1727*4882a593Smuzhiyun iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
1728*4882a593Smuzhiyun
1729*4882a593Smuzhiyun update_fw:
1730*4882a593Smuzhiyun ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
1731*4882a593Smuzhiyun if (ret)
1732*4882a593Smuzhiyun goto err;
1733*4882a593Smuzhiyun
1734*4882a593Smuzhiyun if (vif->type == NL80211_IFTYPE_STATION) {
1735*4882a593Smuzhiyun if (!sta->tdls) {
1736*4882a593Smuzhiyun WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
1737*4882a593Smuzhiyun mvmvif->ap_sta_id = sta_id;
1738*4882a593Smuzhiyun } else {
1739*4882a593Smuzhiyun WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
1740*4882a593Smuzhiyun }
1741*4882a593Smuzhiyun }
1742*4882a593Smuzhiyun
1743*4882a593Smuzhiyun rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1744*4882a593Smuzhiyun
1745*4882a593Smuzhiyun return 0;
1746*4882a593Smuzhiyun
1747*4882a593Smuzhiyun err:
1748*4882a593Smuzhiyun return ret;
1749*4882a593Smuzhiyun }
1750*4882a593Smuzhiyun
iwl_mvm_drain_sta(struct iwl_mvm * mvm,struct iwl_mvm_sta * mvmsta,bool drain)1751*4882a593Smuzhiyun int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1752*4882a593Smuzhiyun bool drain)
1753*4882a593Smuzhiyun {
1754*4882a593Smuzhiyun struct iwl_mvm_add_sta_cmd cmd = {};
1755*4882a593Smuzhiyun int ret;
1756*4882a593Smuzhiyun u32 status;
1757*4882a593Smuzhiyun
1758*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
1759*4882a593Smuzhiyun
1760*4882a593Smuzhiyun cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1761*4882a593Smuzhiyun cmd.sta_id = mvmsta->sta_id;
1762*4882a593Smuzhiyun cmd.add_modify = STA_MODE_MODIFY;
1763*4882a593Smuzhiyun cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1764*4882a593Smuzhiyun cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1765*4882a593Smuzhiyun
1766*4882a593Smuzhiyun status = ADD_STA_SUCCESS;
1767*4882a593Smuzhiyun ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1768*4882a593Smuzhiyun iwl_mvm_add_sta_cmd_size(mvm),
1769*4882a593Smuzhiyun &cmd, &status);
1770*4882a593Smuzhiyun if (ret)
1771*4882a593Smuzhiyun return ret;
1772*4882a593Smuzhiyun
1773*4882a593Smuzhiyun switch (status & IWL_ADD_STA_STATUS_MASK) {
1774*4882a593Smuzhiyun case ADD_STA_SUCCESS:
1775*4882a593Smuzhiyun IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1776*4882a593Smuzhiyun mvmsta->sta_id);
1777*4882a593Smuzhiyun break;
1778*4882a593Smuzhiyun default:
1779*4882a593Smuzhiyun ret = -EIO;
1780*4882a593Smuzhiyun IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1781*4882a593Smuzhiyun mvmsta->sta_id);
1782*4882a593Smuzhiyun break;
1783*4882a593Smuzhiyun }
1784*4882a593Smuzhiyun
1785*4882a593Smuzhiyun return ret;
1786*4882a593Smuzhiyun }
1787*4882a593Smuzhiyun
1788*4882a593Smuzhiyun /*
1789*4882a593Smuzhiyun * Remove a station from the FW table. Before sending the command to remove
1790*4882a593Smuzhiyun * the station validate that the station is indeed known to the driver (sanity
1791*4882a593Smuzhiyun * only).
1792*4882a593Smuzhiyun */
iwl_mvm_rm_sta_common(struct iwl_mvm * mvm,u8 sta_id)1793*4882a593Smuzhiyun static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1794*4882a593Smuzhiyun {
1795*4882a593Smuzhiyun struct ieee80211_sta *sta;
1796*4882a593Smuzhiyun struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1797*4882a593Smuzhiyun .sta_id = sta_id,
1798*4882a593Smuzhiyun };
1799*4882a593Smuzhiyun int ret;
1800*4882a593Smuzhiyun
1801*4882a593Smuzhiyun sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1802*4882a593Smuzhiyun lockdep_is_held(&mvm->mutex));
1803*4882a593Smuzhiyun
1804*4882a593Smuzhiyun /* Note: internal stations are marked as error values */
1805*4882a593Smuzhiyun if (!sta) {
1806*4882a593Smuzhiyun IWL_ERR(mvm, "Invalid station id\n");
1807*4882a593Smuzhiyun return -EINVAL;
1808*4882a593Smuzhiyun }
1809*4882a593Smuzhiyun
1810*4882a593Smuzhiyun ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
1811*4882a593Smuzhiyun sizeof(rm_sta_cmd), &rm_sta_cmd);
1812*4882a593Smuzhiyun if (ret) {
1813*4882a593Smuzhiyun IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1814*4882a593Smuzhiyun return ret;
1815*4882a593Smuzhiyun }
1816*4882a593Smuzhiyun
1817*4882a593Smuzhiyun return 0;
1818*4882a593Smuzhiyun }
1819*4882a593Smuzhiyun
iwl_mvm_disable_sta_queues(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1820*4882a593Smuzhiyun static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1821*4882a593Smuzhiyun struct ieee80211_vif *vif,
1822*4882a593Smuzhiyun struct ieee80211_sta *sta)
1823*4882a593Smuzhiyun {
1824*4882a593Smuzhiyun struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1825*4882a593Smuzhiyun int i;
1826*4882a593Smuzhiyun
1827*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
1828*4882a593Smuzhiyun
1829*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1830*4882a593Smuzhiyun if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
1831*4882a593Smuzhiyun continue;
1832*4882a593Smuzhiyun
1833*4882a593Smuzhiyun iwl_mvm_disable_txq(mvm, sta, &mvm_sta->tid_data[i].txq_id, i,
1834*4882a593Smuzhiyun 0);
1835*4882a593Smuzhiyun mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1836*4882a593Smuzhiyun }
1837*4882a593Smuzhiyun
1838*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1839*4882a593Smuzhiyun struct iwl_mvm_txq *mvmtxq =
1840*4882a593Smuzhiyun iwl_mvm_txq_from_mac80211(sta->txq[i]);
1841*4882a593Smuzhiyun
1842*4882a593Smuzhiyun mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1843*4882a593Smuzhiyun list_del_init(&mvmtxq->list);
1844*4882a593Smuzhiyun }
1845*4882a593Smuzhiyun }
1846*4882a593Smuzhiyun
iwl_mvm_wait_sta_queues_empty(struct iwl_mvm * mvm,struct iwl_mvm_sta * mvm_sta)1847*4882a593Smuzhiyun int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
1848*4882a593Smuzhiyun struct iwl_mvm_sta *mvm_sta)
1849*4882a593Smuzhiyun {
1850*4882a593Smuzhiyun int i;
1851*4882a593Smuzhiyun
1852*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1853*4882a593Smuzhiyun u16 txq_id;
1854*4882a593Smuzhiyun int ret;
1855*4882a593Smuzhiyun
1856*4882a593Smuzhiyun spin_lock_bh(&mvm_sta->lock);
1857*4882a593Smuzhiyun txq_id = mvm_sta->tid_data[i].txq_id;
1858*4882a593Smuzhiyun spin_unlock_bh(&mvm_sta->lock);
1859*4882a593Smuzhiyun
1860*4882a593Smuzhiyun if (txq_id == IWL_MVM_INVALID_QUEUE)
1861*4882a593Smuzhiyun continue;
1862*4882a593Smuzhiyun
1863*4882a593Smuzhiyun ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
1864*4882a593Smuzhiyun if (ret)
1865*4882a593Smuzhiyun return ret;
1866*4882a593Smuzhiyun }
1867*4882a593Smuzhiyun
1868*4882a593Smuzhiyun return 0;
1869*4882a593Smuzhiyun }
1870*4882a593Smuzhiyun
iwl_mvm_rm_sta(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1871*4882a593Smuzhiyun int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1872*4882a593Smuzhiyun struct ieee80211_vif *vif,
1873*4882a593Smuzhiyun struct ieee80211_sta *sta)
1874*4882a593Smuzhiyun {
1875*4882a593Smuzhiyun struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1876*4882a593Smuzhiyun struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1877*4882a593Smuzhiyun u8 sta_id = mvm_sta->sta_id;
1878*4882a593Smuzhiyun int ret;
1879*4882a593Smuzhiyun
1880*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
1881*4882a593Smuzhiyun
1882*4882a593Smuzhiyun if (iwl_mvm_has_new_rx_api(mvm))
1883*4882a593Smuzhiyun kfree(mvm_sta->dup_data);
1884*4882a593Smuzhiyun
1885*4882a593Smuzhiyun ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1886*4882a593Smuzhiyun if (ret)
1887*4882a593Smuzhiyun return ret;
1888*4882a593Smuzhiyun
1889*4882a593Smuzhiyun /* flush its queues here since we are freeing mvm_sta */
1890*4882a593Smuzhiyun ret = iwl_mvm_flush_sta(mvm, mvm_sta, false);
1891*4882a593Smuzhiyun if (ret)
1892*4882a593Smuzhiyun return ret;
1893*4882a593Smuzhiyun if (iwl_mvm_has_new_tx_api(mvm)) {
1894*4882a593Smuzhiyun ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
1895*4882a593Smuzhiyun } else {
1896*4882a593Smuzhiyun u32 q_mask = mvm_sta->tfd_queue_msk;
1897*4882a593Smuzhiyun
1898*4882a593Smuzhiyun ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
1899*4882a593Smuzhiyun q_mask);
1900*4882a593Smuzhiyun }
1901*4882a593Smuzhiyun if (ret)
1902*4882a593Smuzhiyun return ret;
1903*4882a593Smuzhiyun
1904*4882a593Smuzhiyun ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
1905*4882a593Smuzhiyun
1906*4882a593Smuzhiyun iwl_mvm_disable_sta_queues(mvm, vif, sta);
1907*4882a593Smuzhiyun
1908*4882a593Smuzhiyun /* If there is a TXQ still marked as reserved - free it */
1909*4882a593Smuzhiyun if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
1910*4882a593Smuzhiyun u8 reserved_txq = mvm_sta->reserved_queue;
1911*4882a593Smuzhiyun enum iwl_mvm_queue_status *status;
1912*4882a593Smuzhiyun
1913*4882a593Smuzhiyun /*
1914*4882a593Smuzhiyun * If no traffic has gone through the reserved TXQ - it
1915*4882a593Smuzhiyun * is still marked as IWL_MVM_QUEUE_RESERVED, and
1916*4882a593Smuzhiyun * should be manually marked as free again
1917*4882a593Smuzhiyun */
1918*4882a593Smuzhiyun status = &mvm->queue_info[reserved_txq].status;
1919*4882a593Smuzhiyun if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
1920*4882a593Smuzhiyun (*status != IWL_MVM_QUEUE_FREE),
1921*4882a593Smuzhiyun "sta_id %d reserved txq %d status %d",
1922*4882a593Smuzhiyun sta_id, reserved_txq, *status))
1923*4882a593Smuzhiyun return -EINVAL;
1924*4882a593Smuzhiyun
1925*4882a593Smuzhiyun *status = IWL_MVM_QUEUE_FREE;
1926*4882a593Smuzhiyun }
1927*4882a593Smuzhiyun
1928*4882a593Smuzhiyun if (vif->type == NL80211_IFTYPE_STATION &&
1929*4882a593Smuzhiyun mvmvif->ap_sta_id == sta_id) {
1930*4882a593Smuzhiyun /* if associated - we can't remove the AP STA now */
1931*4882a593Smuzhiyun if (vif->bss_conf.assoc)
1932*4882a593Smuzhiyun return ret;
1933*4882a593Smuzhiyun
1934*4882a593Smuzhiyun /* unassoc - go ahead - remove the AP STA now */
1935*4882a593Smuzhiyun mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
1936*4882a593Smuzhiyun }
1937*4882a593Smuzhiyun
1938*4882a593Smuzhiyun /*
1939*4882a593Smuzhiyun * This shouldn't happen - the TDLS channel switch should be canceled
1940*4882a593Smuzhiyun * before the STA is removed.
1941*4882a593Smuzhiyun */
1942*4882a593Smuzhiyun if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
1943*4882a593Smuzhiyun mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
1944*4882a593Smuzhiyun cancel_delayed_work(&mvm->tdls_cs.dwork);
1945*4882a593Smuzhiyun }
1946*4882a593Smuzhiyun
1947*4882a593Smuzhiyun /*
1948*4882a593Smuzhiyun * Make sure that the tx response code sees the station as -EBUSY and
1949*4882a593Smuzhiyun * calls the drain worker.
1950*4882a593Smuzhiyun */
1951*4882a593Smuzhiyun spin_lock_bh(&mvm_sta->lock);
1952*4882a593Smuzhiyun spin_unlock_bh(&mvm_sta->lock);
1953*4882a593Smuzhiyun
1954*4882a593Smuzhiyun ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
1955*4882a593Smuzhiyun RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
1956*4882a593Smuzhiyun
1957*4882a593Smuzhiyun return ret;
1958*4882a593Smuzhiyun }
1959*4882a593Smuzhiyun
iwl_mvm_rm_sta_id(struct iwl_mvm * mvm,struct ieee80211_vif * vif,u8 sta_id)1960*4882a593Smuzhiyun int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
1961*4882a593Smuzhiyun struct ieee80211_vif *vif,
1962*4882a593Smuzhiyun u8 sta_id)
1963*4882a593Smuzhiyun {
1964*4882a593Smuzhiyun int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1965*4882a593Smuzhiyun
1966*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
1967*4882a593Smuzhiyun
1968*4882a593Smuzhiyun RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
1969*4882a593Smuzhiyun return ret;
1970*4882a593Smuzhiyun }
1971*4882a593Smuzhiyun
iwl_mvm_allocate_int_sta(struct iwl_mvm * mvm,struct iwl_mvm_int_sta * sta,u32 qmask,enum nl80211_iftype iftype,enum iwl_sta_type type)1972*4882a593Smuzhiyun int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
1973*4882a593Smuzhiyun struct iwl_mvm_int_sta *sta,
1974*4882a593Smuzhiyun u32 qmask, enum nl80211_iftype iftype,
1975*4882a593Smuzhiyun enum iwl_sta_type type)
1976*4882a593Smuzhiyun {
1977*4882a593Smuzhiyun if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
1978*4882a593Smuzhiyun sta->sta_id == IWL_MVM_INVALID_STA) {
1979*4882a593Smuzhiyun sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
1980*4882a593Smuzhiyun if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
1981*4882a593Smuzhiyun return -ENOSPC;
1982*4882a593Smuzhiyun }
1983*4882a593Smuzhiyun
1984*4882a593Smuzhiyun sta->tfd_queue_msk = qmask;
1985*4882a593Smuzhiyun sta->type = type;
1986*4882a593Smuzhiyun
1987*4882a593Smuzhiyun /* put a non-NULL value so iterating over the stations won't stop */
1988*4882a593Smuzhiyun rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
1989*4882a593Smuzhiyun return 0;
1990*4882a593Smuzhiyun }
1991*4882a593Smuzhiyun
iwl_mvm_dealloc_int_sta(struct iwl_mvm * mvm,struct iwl_mvm_int_sta * sta)1992*4882a593Smuzhiyun void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
1993*4882a593Smuzhiyun {
1994*4882a593Smuzhiyun RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
1995*4882a593Smuzhiyun memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
1996*4882a593Smuzhiyun sta->sta_id = IWL_MVM_INVALID_STA;
1997*4882a593Smuzhiyun }
1998*4882a593Smuzhiyun
iwl_mvm_enable_aux_snif_queue(struct iwl_mvm * mvm,u16 queue,u8 sta_id,u8 fifo)1999*4882a593Smuzhiyun static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue,
2000*4882a593Smuzhiyun u8 sta_id, u8 fifo)
2001*4882a593Smuzhiyun {
2002*4882a593Smuzhiyun unsigned int wdg_timeout =
2003*4882a593Smuzhiyun mvm->trans->trans_cfg->base_params->wd_timeout;
2004*4882a593Smuzhiyun struct iwl_trans_txq_scd_cfg cfg = {
2005*4882a593Smuzhiyun .fifo = fifo,
2006*4882a593Smuzhiyun .sta_id = sta_id,
2007*4882a593Smuzhiyun .tid = IWL_MAX_TID_COUNT,
2008*4882a593Smuzhiyun .aggregate = false,
2009*4882a593Smuzhiyun .frame_limit = IWL_FRAME_LIMIT,
2010*4882a593Smuzhiyun };
2011*4882a593Smuzhiyun
2012*4882a593Smuzhiyun WARN_ON(iwl_mvm_has_new_tx_api(mvm));
2013*4882a593Smuzhiyun
2014*4882a593Smuzhiyun iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
2015*4882a593Smuzhiyun }
2016*4882a593Smuzhiyun
iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm * mvm,u8 sta_id)2017*4882a593Smuzhiyun static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id)
2018*4882a593Smuzhiyun {
2019*4882a593Smuzhiyun unsigned int wdg_timeout =
2020*4882a593Smuzhiyun mvm->trans->trans_cfg->base_params->wd_timeout;
2021*4882a593Smuzhiyun
2022*4882a593Smuzhiyun WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
2023*4882a593Smuzhiyun
2024*4882a593Smuzhiyun return iwl_mvm_tvqm_enable_txq(mvm, sta_id, IWL_MAX_TID_COUNT,
2025*4882a593Smuzhiyun wdg_timeout);
2026*4882a593Smuzhiyun }
2027*4882a593Smuzhiyun
iwl_mvm_add_int_sta_with_queue(struct iwl_mvm * mvm,int macidx,int maccolor,u8 * addr,struct iwl_mvm_int_sta * sta,u16 * queue,int fifo)2028*4882a593Smuzhiyun static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
2029*4882a593Smuzhiyun int maccolor, u8 *addr,
2030*4882a593Smuzhiyun struct iwl_mvm_int_sta *sta,
2031*4882a593Smuzhiyun u16 *queue, int fifo)
2032*4882a593Smuzhiyun {
2033*4882a593Smuzhiyun int ret;
2034*4882a593Smuzhiyun
2035*4882a593Smuzhiyun /* Map queue to fifo - needs to happen before adding station */
2036*4882a593Smuzhiyun if (!iwl_mvm_has_new_tx_api(mvm))
2037*4882a593Smuzhiyun iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo);
2038*4882a593Smuzhiyun
2039*4882a593Smuzhiyun ret = iwl_mvm_add_int_sta_common(mvm, sta, addr, macidx, maccolor);
2040*4882a593Smuzhiyun if (ret) {
2041*4882a593Smuzhiyun if (!iwl_mvm_has_new_tx_api(mvm))
2042*4882a593Smuzhiyun iwl_mvm_disable_txq(mvm, NULL, queue,
2043*4882a593Smuzhiyun IWL_MAX_TID_COUNT, 0);
2044*4882a593Smuzhiyun return ret;
2045*4882a593Smuzhiyun }
2046*4882a593Smuzhiyun
2047*4882a593Smuzhiyun /*
2048*4882a593Smuzhiyun * For 22000 firmware and on we cannot add queue to a station unknown
2049*4882a593Smuzhiyun * to firmware so enable queue here - after the station was added
2050*4882a593Smuzhiyun */
2051*4882a593Smuzhiyun if (iwl_mvm_has_new_tx_api(mvm)) {
2052*4882a593Smuzhiyun int txq;
2053*4882a593Smuzhiyun
2054*4882a593Smuzhiyun txq = iwl_mvm_enable_aux_snif_queue_tvqm(mvm, sta->sta_id);
2055*4882a593Smuzhiyun if (txq < 0) {
2056*4882a593Smuzhiyun iwl_mvm_rm_sta_common(mvm, sta->sta_id);
2057*4882a593Smuzhiyun return txq;
2058*4882a593Smuzhiyun }
2059*4882a593Smuzhiyun
2060*4882a593Smuzhiyun *queue = txq;
2061*4882a593Smuzhiyun }
2062*4882a593Smuzhiyun
2063*4882a593Smuzhiyun return 0;
2064*4882a593Smuzhiyun }
2065*4882a593Smuzhiyun
iwl_mvm_add_aux_sta(struct iwl_mvm * mvm,u32 lmac_id)2066*4882a593Smuzhiyun int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id)
2067*4882a593Smuzhiyun {
2068*4882a593Smuzhiyun int ret;
2069*4882a593Smuzhiyun
2070*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
2071*4882a593Smuzhiyun
2072*4882a593Smuzhiyun /* Allocate aux station and assign to it the aux queue */
2073*4882a593Smuzhiyun ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
2074*4882a593Smuzhiyun NL80211_IFTYPE_UNSPECIFIED,
2075*4882a593Smuzhiyun IWL_STA_AUX_ACTIVITY);
2076*4882a593Smuzhiyun if (ret)
2077*4882a593Smuzhiyun return ret;
2078*4882a593Smuzhiyun
2079*4882a593Smuzhiyun /*
2080*4882a593Smuzhiyun * In CDB NICs we need to specify which lmac to use for aux activity
2081*4882a593Smuzhiyun * using the mac_id argument place to send lmac_id to the function
2082*4882a593Smuzhiyun */
2083*4882a593Smuzhiyun ret = iwl_mvm_add_int_sta_with_queue(mvm, lmac_id, 0, NULL,
2084*4882a593Smuzhiyun &mvm->aux_sta, &mvm->aux_queue,
2085*4882a593Smuzhiyun IWL_MVM_TX_FIFO_MCAST);
2086*4882a593Smuzhiyun if (ret) {
2087*4882a593Smuzhiyun iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2088*4882a593Smuzhiyun return ret;
2089*4882a593Smuzhiyun }
2090*4882a593Smuzhiyun
2091*4882a593Smuzhiyun return 0;
2092*4882a593Smuzhiyun }
2093*4882a593Smuzhiyun
iwl_mvm_add_snif_sta(struct iwl_mvm * mvm,struct ieee80211_vif * vif)2094*4882a593Smuzhiyun int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2095*4882a593Smuzhiyun {
2096*4882a593Smuzhiyun struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2097*4882a593Smuzhiyun
2098*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
2099*4882a593Smuzhiyun
2100*4882a593Smuzhiyun return iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
2101*4882a593Smuzhiyun NULL, &mvm->snif_sta,
2102*4882a593Smuzhiyun &mvm->snif_queue,
2103*4882a593Smuzhiyun IWL_MVM_TX_FIFO_BE);
2104*4882a593Smuzhiyun }
2105*4882a593Smuzhiyun
iwl_mvm_rm_snif_sta(struct iwl_mvm * mvm,struct ieee80211_vif * vif)2106*4882a593Smuzhiyun int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2107*4882a593Smuzhiyun {
2108*4882a593Smuzhiyun int ret;
2109*4882a593Smuzhiyun
2110*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
2111*4882a593Smuzhiyun
2112*4882a593Smuzhiyun if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA))
2113*4882a593Smuzhiyun return -EINVAL;
2114*4882a593Smuzhiyun
2115*4882a593Smuzhiyun iwl_mvm_disable_txq(mvm, NULL, &mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
2116*4882a593Smuzhiyun ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
2117*4882a593Smuzhiyun if (ret)
2118*4882a593Smuzhiyun IWL_WARN(mvm, "Failed sending remove station\n");
2119*4882a593Smuzhiyun
2120*4882a593Smuzhiyun return ret;
2121*4882a593Smuzhiyun }
2122*4882a593Smuzhiyun
iwl_mvm_rm_aux_sta(struct iwl_mvm * mvm)2123*4882a593Smuzhiyun int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
2124*4882a593Smuzhiyun {
2125*4882a593Smuzhiyun int ret;
2126*4882a593Smuzhiyun
2127*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
2128*4882a593Smuzhiyun
2129*4882a593Smuzhiyun if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA))
2130*4882a593Smuzhiyun return -EINVAL;
2131*4882a593Smuzhiyun
2132*4882a593Smuzhiyun iwl_mvm_disable_txq(mvm, NULL, &mvm->aux_queue, IWL_MAX_TID_COUNT, 0);
2133*4882a593Smuzhiyun ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
2134*4882a593Smuzhiyun if (ret)
2135*4882a593Smuzhiyun IWL_WARN(mvm, "Failed sending remove station\n");
2136*4882a593Smuzhiyun iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2137*4882a593Smuzhiyun
2138*4882a593Smuzhiyun return ret;
2139*4882a593Smuzhiyun }
2140*4882a593Smuzhiyun
iwl_mvm_dealloc_snif_sta(struct iwl_mvm * mvm)2141*4882a593Smuzhiyun void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
2142*4882a593Smuzhiyun {
2143*4882a593Smuzhiyun iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
2144*4882a593Smuzhiyun }
2145*4882a593Smuzhiyun
2146*4882a593Smuzhiyun /*
2147*4882a593Smuzhiyun * Send the add station command for the vif's broadcast station.
2148*4882a593Smuzhiyun * Assumes that the station was already allocated.
2149*4882a593Smuzhiyun *
2150*4882a593Smuzhiyun * @mvm: the mvm component
2151*4882a593Smuzhiyun * @vif: the interface to which the broadcast station is added
2152*4882a593Smuzhiyun * @bsta: the broadcast station to add.
2153*4882a593Smuzhiyun */
iwl_mvm_send_add_bcast_sta(struct iwl_mvm * mvm,struct ieee80211_vif * vif)2154*4882a593Smuzhiyun int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2155*4882a593Smuzhiyun {
2156*4882a593Smuzhiyun struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2157*4882a593Smuzhiyun struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2158*4882a593Smuzhiyun static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
2159*4882a593Smuzhiyun const u8 *baddr = _baddr;
2160*4882a593Smuzhiyun int queue;
2161*4882a593Smuzhiyun int ret;
2162*4882a593Smuzhiyun unsigned int wdg_timeout =
2163*4882a593Smuzhiyun iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2164*4882a593Smuzhiyun struct iwl_trans_txq_scd_cfg cfg = {
2165*4882a593Smuzhiyun .fifo = IWL_MVM_TX_FIFO_VO,
2166*4882a593Smuzhiyun .sta_id = mvmvif->bcast_sta.sta_id,
2167*4882a593Smuzhiyun .tid = IWL_MAX_TID_COUNT,
2168*4882a593Smuzhiyun .aggregate = false,
2169*4882a593Smuzhiyun .frame_limit = IWL_FRAME_LIMIT,
2170*4882a593Smuzhiyun };
2171*4882a593Smuzhiyun
2172*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
2173*4882a593Smuzhiyun
2174*4882a593Smuzhiyun if (!iwl_mvm_has_new_tx_api(mvm)) {
2175*4882a593Smuzhiyun if (vif->type == NL80211_IFTYPE_AP ||
2176*4882a593Smuzhiyun vif->type == NL80211_IFTYPE_ADHOC) {
2177*4882a593Smuzhiyun queue = mvm->probe_queue;
2178*4882a593Smuzhiyun } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2179*4882a593Smuzhiyun queue = mvm->p2p_dev_queue;
2180*4882a593Smuzhiyun } else {
2181*4882a593Smuzhiyun WARN(1, "Missing required TXQ for adding bcast STA\n");
2182*4882a593Smuzhiyun return -EINVAL;
2183*4882a593Smuzhiyun }
2184*4882a593Smuzhiyun
2185*4882a593Smuzhiyun bsta->tfd_queue_msk |= BIT(queue);
2186*4882a593Smuzhiyun
2187*4882a593Smuzhiyun iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
2188*4882a593Smuzhiyun }
2189*4882a593Smuzhiyun
2190*4882a593Smuzhiyun if (vif->type == NL80211_IFTYPE_ADHOC)
2191*4882a593Smuzhiyun baddr = vif->bss_conf.bssid;
2192*4882a593Smuzhiyun
2193*4882a593Smuzhiyun if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
2194*4882a593Smuzhiyun return -ENOSPC;
2195*4882a593Smuzhiyun
2196*4882a593Smuzhiyun ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
2197*4882a593Smuzhiyun mvmvif->id, mvmvif->color);
2198*4882a593Smuzhiyun if (ret)
2199*4882a593Smuzhiyun return ret;
2200*4882a593Smuzhiyun
2201*4882a593Smuzhiyun /*
2202*4882a593Smuzhiyun * For 22000 firmware and on we cannot add queue to a station unknown
2203*4882a593Smuzhiyun * to firmware so enable queue here - after the station was added
2204*4882a593Smuzhiyun */
2205*4882a593Smuzhiyun if (iwl_mvm_has_new_tx_api(mvm)) {
2206*4882a593Smuzhiyun queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id,
2207*4882a593Smuzhiyun IWL_MAX_TID_COUNT,
2208*4882a593Smuzhiyun wdg_timeout);
2209*4882a593Smuzhiyun if (queue < 0) {
2210*4882a593Smuzhiyun iwl_mvm_rm_sta_common(mvm, bsta->sta_id);
2211*4882a593Smuzhiyun return queue;
2212*4882a593Smuzhiyun }
2213*4882a593Smuzhiyun
2214*4882a593Smuzhiyun if (vif->type == NL80211_IFTYPE_AP ||
2215*4882a593Smuzhiyun vif->type == NL80211_IFTYPE_ADHOC)
2216*4882a593Smuzhiyun mvm->probe_queue = queue;
2217*4882a593Smuzhiyun else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
2218*4882a593Smuzhiyun mvm->p2p_dev_queue = queue;
2219*4882a593Smuzhiyun }
2220*4882a593Smuzhiyun
2221*4882a593Smuzhiyun return 0;
2222*4882a593Smuzhiyun }
2223*4882a593Smuzhiyun
iwl_mvm_free_bcast_sta_queues(struct iwl_mvm * mvm,struct ieee80211_vif * vif)2224*4882a593Smuzhiyun static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
2225*4882a593Smuzhiyun struct ieee80211_vif *vif)
2226*4882a593Smuzhiyun {
2227*4882a593Smuzhiyun struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2228*4882a593Smuzhiyun u16 *queueptr, queue;
2229*4882a593Smuzhiyun
2230*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
2231*4882a593Smuzhiyun
2232*4882a593Smuzhiyun iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true);
2233*4882a593Smuzhiyun
2234*4882a593Smuzhiyun switch (vif->type) {
2235*4882a593Smuzhiyun case NL80211_IFTYPE_AP:
2236*4882a593Smuzhiyun case NL80211_IFTYPE_ADHOC:
2237*4882a593Smuzhiyun queueptr = &mvm->probe_queue;
2238*4882a593Smuzhiyun break;
2239*4882a593Smuzhiyun case NL80211_IFTYPE_P2P_DEVICE:
2240*4882a593Smuzhiyun queueptr = &mvm->p2p_dev_queue;
2241*4882a593Smuzhiyun break;
2242*4882a593Smuzhiyun default:
2243*4882a593Smuzhiyun WARN(1, "Can't free bcast queue on vif type %d\n",
2244*4882a593Smuzhiyun vif->type);
2245*4882a593Smuzhiyun return;
2246*4882a593Smuzhiyun }
2247*4882a593Smuzhiyun
2248*4882a593Smuzhiyun queue = *queueptr;
2249*4882a593Smuzhiyun iwl_mvm_disable_txq(mvm, NULL, queueptr, IWL_MAX_TID_COUNT, 0);
2250*4882a593Smuzhiyun if (iwl_mvm_has_new_tx_api(mvm))
2251*4882a593Smuzhiyun return;
2252*4882a593Smuzhiyun
2253*4882a593Smuzhiyun WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
2254*4882a593Smuzhiyun mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
2255*4882a593Smuzhiyun }
2256*4882a593Smuzhiyun
2257*4882a593Smuzhiyun /* Send the FW a request to remove the station from it's internal data
2258*4882a593Smuzhiyun * structures, but DO NOT remove the entry from the local data structures. */
iwl_mvm_send_rm_bcast_sta(struct iwl_mvm * mvm,struct ieee80211_vif * vif)2259*4882a593Smuzhiyun int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2260*4882a593Smuzhiyun {
2261*4882a593Smuzhiyun struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2262*4882a593Smuzhiyun int ret;
2263*4882a593Smuzhiyun
2264*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
2265*4882a593Smuzhiyun
2266*4882a593Smuzhiyun iwl_mvm_free_bcast_sta_queues(mvm, vif);
2267*4882a593Smuzhiyun
2268*4882a593Smuzhiyun ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
2269*4882a593Smuzhiyun if (ret)
2270*4882a593Smuzhiyun IWL_WARN(mvm, "Failed sending remove station\n");
2271*4882a593Smuzhiyun return ret;
2272*4882a593Smuzhiyun }
2273*4882a593Smuzhiyun
iwl_mvm_alloc_bcast_sta(struct iwl_mvm * mvm,struct ieee80211_vif * vif)2274*4882a593Smuzhiyun int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2275*4882a593Smuzhiyun {
2276*4882a593Smuzhiyun struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2277*4882a593Smuzhiyun
2278*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
2279*4882a593Smuzhiyun
2280*4882a593Smuzhiyun return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
2281*4882a593Smuzhiyun ieee80211_vif_type_p2p(vif),
2282*4882a593Smuzhiyun IWL_STA_GENERAL_PURPOSE);
2283*4882a593Smuzhiyun }
2284*4882a593Smuzhiyun
2285*4882a593Smuzhiyun /* Allocate a new station entry for the broadcast station to the given vif,
2286*4882a593Smuzhiyun * and send it to the FW.
2287*4882a593Smuzhiyun * Note that each P2P mac should have its own broadcast station.
2288*4882a593Smuzhiyun *
2289*4882a593Smuzhiyun * @mvm: the mvm component
2290*4882a593Smuzhiyun * @vif: the interface to which the broadcast station is added
2291*4882a593Smuzhiyun * @bsta: the broadcast station to add. */
iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm * mvm,struct ieee80211_vif * vif)2292*4882a593Smuzhiyun int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2293*4882a593Smuzhiyun {
2294*4882a593Smuzhiyun struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2295*4882a593Smuzhiyun struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2296*4882a593Smuzhiyun int ret;
2297*4882a593Smuzhiyun
2298*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
2299*4882a593Smuzhiyun
2300*4882a593Smuzhiyun ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
2301*4882a593Smuzhiyun if (ret)
2302*4882a593Smuzhiyun return ret;
2303*4882a593Smuzhiyun
2304*4882a593Smuzhiyun ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2305*4882a593Smuzhiyun
2306*4882a593Smuzhiyun if (ret)
2307*4882a593Smuzhiyun iwl_mvm_dealloc_int_sta(mvm, bsta);
2308*4882a593Smuzhiyun
2309*4882a593Smuzhiyun return ret;
2310*4882a593Smuzhiyun }
2311*4882a593Smuzhiyun
iwl_mvm_dealloc_bcast_sta(struct iwl_mvm * mvm,struct ieee80211_vif * vif)2312*4882a593Smuzhiyun void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2313*4882a593Smuzhiyun {
2314*4882a593Smuzhiyun struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2315*4882a593Smuzhiyun
2316*4882a593Smuzhiyun iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
2317*4882a593Smuzhiyun }
2318*4882a593Smuzhiyun
2319*4882a593Smuzhiyun /*
2320*4882a593Smuzhiyun * Send the FW a request to remove the station from it's internal data
2321*4882a593Smuzhiyun * structures, and in addition remove it from the local data structure.
2322*4882a593Smuzhiyun */
iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm * mvm,struct ieee80211_vif * vif)2323*4882a593Smuzhiyun int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2324*4882a593Smuzhiyun {
2325*4882a593Smuzhiyun int ret;
2326*4882a593Smuzhiyun
2327*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
2328*4882a593Smuzhiyun
2329*4882a593Smuzhiyun ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
2330*4882a593Smuzhiyun
2331*4882a593Smuzhiyun iwl_mvm_dealloc_bcast_sta(mvm, vif);
2332*4882a593Smuzhiyun
2333*4882a593Smuzhiyun return ret;
2334*4882a593Smuzhiyun }
2335*4882a593Smuzhiyun
2336*4882a593Smuzhiyun /*
2337*4882a593Smuzhiyun * Allocate a new station entry for the multicast station to the given vif,
2338*4882a593Smuzhiyun * and send it to the FW.
2339*4882a593Smuzhiyun * Note that each AP/GO mac should have its own multicast station.
2340*4882a593Smuzhiyun *
2341*4882a593Smuzhiyun * @mvm: the mvm component
2342*4882a593Smuzhiyun * @vif: the interface to which the multicast station is added
2343*4882a593Smuzhiyun */
iwl_mvm_add_mcast_sta(struct iwl_mvm * mvm,struct ieee80211_vif * vif)2344*4882a593Smuzhiyun int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2345*4882a593Smuzhiyun {
2346*4882a593Smuzhiyun struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2347*4882a593Smuzhiyun struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
2348*4882a593Smuzhiyun static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2349*4882a593Smuzhiyun const u8 *maddr = _maddr;
2350*4882a593Smuzhiyun struct iwl_trans_txq_scd_cfg cfg = {
2351*4882a593Smuzhiyun .fifo = vif->type == NL80211_IFTYPE_AP ?
2352*4882a593Smuzhiyun IWL_MVM_TX_FIFO_MCAST : IWL_MVM_TX_FIFO_BE,
2353*4882a593Smuzhiyun .sta_id = msta->sta_id,
2354*4882a593Smuzhiyun .tid = 0,
2355*4882a593Smuzhiyun .aggregate = false,
2356*4882a593Smuzhiyun .frame_limit = IWL_FRAME_LIMIT,
2357*4882a593Smuzhiyun };
2358*4882a593Smuzhiyun unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2359*4882a593Smuzhiyun int ret;
2360*4882a593Smuzhiyun
2361*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
2362*4882a593Smuzhiyun
2363*4882a593Smuzhiyun if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2364*4882a593Smuzhiyun vif->type != NL80211_IFTYPE_ADHOC))
2365*4882a593Smuzhiyun return -ENOTSUPP;
2366*4882a593Smuzhiyun
2367*4882a593Smuzhiyun /*
2368*4882a593Smuzhiyun * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2369*4882a593Smuzhiyun * invalid, so make sure we use the queue we want.
2370*4882a593Smuzhiyun * Note that this is done here as we want to avoid making DQA
2371*4882a593Smuzhiyun * changes in mac80211 layer.
2372*4882a593Smuzhiyun */
2373*4882a593Smuzhiyun if (vif->type == NL80211_IFTYPE_ADHOC)
2374*4882a593Smuzhiyun mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2375*4882a593Smuzhiyun
2376*4882a593Smuzhiyun /*
2377*4882a593Smuzhiyun * While in previous FWs we had to exclude cab queue from TFD queue
2378*4882a593Smuzhiyun * mask, now it is needed as any other queue.
2379*4882a593Smuzhiyun */
2380*4882a593Smuzhiyun if (!iwl_mvm_has_new_tx_api(mvm) &&
2381*4882a593Smuzhiyun fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2382*4882a593Smuzhiyun iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2383*4882a593Smuzhiyun timeout);
2384*4882a593Smuzhiyun msta->tfd_queue_msk |= BIT(mvmvif->cab_queue);
2385*4882a593Smuzhiyun }
2386*4882a593Smuzhiyun ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2387*4882a593Smuzhiyun mvmvif->id, mvmvif->color);
2388*4882a593Smuzhiyun if (ret)
2389*4882a593Smuzhiyun goto err;
2390*4882a593Smuzhiyun
2391*4882a593Smuzhiyun /*
2392*4882a593Smuzhiyun * Enable cab queue after the ADD_STA command is sent.
2393*4882a593Smuzhiyun * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
2394*4882a593Smuzhiyun * command with unknown station id, and for FW that doesn't support
2395*4882a593Smuzhiyun * station API since the cab queue is not included in the
2396*4882a593Smuzhiyun * tfd_queue_mask.
2397*4882a593Smuzhiyun */
2398*4882a593Smuzhiyun if (iwl_mvm_has_new_tx_api(mvm)) {
2399*4882a593Smuzhiyun int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id,
2400*4882a593Smuzhiyun 0,
2401*4882a593Smuzhiyun timeout);
2402*4882a593Smuzhiyun if (queue < 0) {
2403*4882a593Smuzhiyun ret = queue;
2404*4882a593Smuzhiyun goto err;
2405*4882a593Smuzhiyun }
2406*4882a593Smuzhiyun mvmvif->cab_queue = queue;
2407*4882a593Smuzhiyun } else if (!fw_has_api(&mvm->fw->ucode_capa,
2408*4882a593Smuzhiyun IWL_UCODE_TLV_API_STA_TYPE))
2409*4882a593Smuzhiyun iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2410*4882a593Smuzhiyun timeout);
2411*4882a593Smuzhiyun
2412*4882a593Smuzhiyun return 0;
2413*4882a593Smuzhiyun err:
2414*4882a593Smuzhiyun iwl_mvm_dealloc_int_sta(mvm, msta);
2415*4882a593Smuzhiyun return ret;
2416*4882a593Smuzhiyun }
2417*4882a593Smuzhiyun
__iwl_mvm_remove_sta_key(struct iwl_mvm * mvm,u8 sta_id,struct ieee80211_key_conf * keyconf,bool mcast)2418*4882a593Smuzhiyun static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
2419*4882a593Smuzhiyun struct ieee80211_key_conf *keyconf,
2420*4882a593Smuzhiyun bool mcast)
2421*4882a593Smuzhiyun {
2422*4882a593Smuzhiyun union {
2423*4882a593Smuzhiyun struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
2424*4882a593Smuzhiyun struct iwl_mvm_add_sta_key_cmd cmd;
2425*4882a593Smuzhiyun } u = {};
2426*4882a593Smuzhiyun bool new_api = fw_has_api(&mvm->fw->ucode_capa,
2427*4882a593Smuzhiyun IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
2428*4882a593Smuzhiyun __le16 key_flags;
2429*4882a593Smuzhiyun int ret, size;
2430*4882a593Smuzhiyun u32 status;
2431*4882a593Smuzhiyun
2432*4882a593Smuzhiyun /* This is a valid situation for GTK removal */
2433*4882a593Smuzhiyun if (sta_id == IWL_MVM_INVALID_STA)
2434*4882a593Smuzhiyun return 0;
2435*4882a593Smuzhiyun
2436*4882a593Smuzhiyun key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
2437*4882a593Smuzhiyun STA_KEY_FLG_KEYID_MSK);
2438*4882a593Smuzhiyun key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
2439*4882a593Smuzhiyun key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
2440*4882a593Smuzhiyun
2441*4882a593Smuzhiyun if (mcast)
2442*4882a593Smuzhiyun key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2443*4882a593Smuzhiyun
2444*4882a593Smuzhiyun /*
2445*4882a593Smuzhiyun * The fields assigned here are in the same location at the start
2446*4882a593Smuzhiyun * of the command, so we can do this union trick.
2447*4882a593Smuzhiyun */
2448*4882a593Smuzhiyun u.cmd.common.key_flags = key_flags;
2449*4882a593Smuzhiyun u.cmd.common.key_offset = keyconf->hw_key_idx;
2450*4882a593Smuzhiyun u.cmd.common.sta_id = sta_id;
2451*4882a593Smuzhiyun
2452*4882a593Smuzhiyun size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
2453*4882a593Smuzhiyun
2454*4882a593Smuzhiyun status = ADD_STA_SUCCESS;
2455*4882a593Smuzhiyun ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
2456*4882a593Smuzhiyun &status);
2457*4882a593Smuzhiyun
2458*4882a593Smuzhiyun switch (status) {
2459*4882a593Smuzhiyun case ADD_STA_SUCCESS:
2460*4882a593Smuzhiyun IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
2461*4882a593Smuzhiyun break;
2462*4882a593Smuzhiyun default:
2463*4882a593Smuzhiyun ret = -EIO;
2464*4882a593Smuzhiyun IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
2465*4882a593Smuzhiyun break;
2466*4882a593Smuzhiyun }
2467*4882a593Smuzhiyun
2468*4882a593Smuzhiyun return ret;
2469*4882a593Smuzhiyun }
2470*4882a593Smuzhiyun
2471*4882a593Smuzhiyun /*
2472*4882a593Smuzhiyun * Send the FW a request to remove the station from it's internal data
2473*4882a593Smuzhiyun * structures, and in addition remove it from the local data structure.
2474*4882a593Smuzhiyun */
iwl_mvm_rm_mcast_sta(struct iwl_mvm * mvm,struct ieee80211_vif * vif)2475*4882a593Smuzhiyun int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2476*4882a593Smuzhiyun {
2477*4882a593Smuzhiyun struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2478*4882a593Smuzhiyun int ret;
2479*4882a593Smuzhiyun
2480*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
2481*4882a593Smuzhiyun
2482*4882a593Smuzhiyun iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true);
2483*4882a593Smuzhiyun
2484*4882a593Smuzhiyun iwl_mvm_disable_txq(mvm, NULL, &mvmvif->cab_queue, 0, 0);
2485*4882a593Smuzhiyun
2486*4882a593Smuzhiyun ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2487*4882a593Smuzhiyun if (ret)
2488*4882a593Smuzhiyun IWL_WARN(mvm, "Failed sending remove station\n");
2489*4882a593Smuzhiyun
2490*4882a593Smuzhiyun return ret;
2491*4882a593Smuzhiyun }
2492*4882a593Smuzhiyun
2493*4882a593Smuzhiyun #define IWL_MAX_RX_BA_SESSIONS 16
2494*4882a593Smuzhiyun
iwl_mvm_sync_rxq_del_ba(struct iwl_mvm * mvm,u8 baid)2495*4882a593Smuzhiyun static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
2496*4882a593Smuzhiyun {
2497*4882a593Smuzhiyun struct iwl_mvm_rss_sync_notif notif = {
2498*4882a593Smuzhiyun .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
2499*4882a593Smuzhiyun .metadata.sync = 1,
2500*4882a593Smuzhiyun .delba.baid = baid,
2501*4882a593Smuzhiyun };
2502*4882a593Smuzhiyun iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif));
2503*4882a593Smuzhiyun };
2504*4882a593Smuzhiyun
iwl_mvm_free_reorder(struct iwl_mvm * mvm,struct iwl_mvm_baid_data * data)2505*4882a593Smuzhiyun static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2506*4882a593Smuzhiyun struct iwl_mvm_baid_data *data)
2507*4882a593Smuzhiyun {
2508*4882a593Smuzhiyun int i;
2509*4882a593Smuzhiyun
2510*4882a593Smuzhiyun iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2511*4882a593Smuzhiyun
2512*4882a593Smuzhiyun for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2513*4882a593Smuzhiyun int j;
2514*4882a593Smuzhiyun struct iwl_mvm_reorder_buffer *reorder_buf =
2515*4882a593Smuzhiyun &data->reorder_buf[i];
2516*4882a593Smuzhiyun struct iwl_mvm_reorder_buf_entry *entries =
2517*4882a593Smuzhiyun &data->entries[i * data->entries_per_queue];
2518*4882a593Smuzhiyun
2519*4882a593Smuzhiyun spin_lock_bh(&reorder_buf->lock);
2520*4882a593Smuzhiyun if (likely(!reorder_buf->num_stored)) {
2521*4882a593Smuzhiyun spin_unlock_bh(&reorder_buf->lock);
2522*4882a593Smuzhiyun continue;
2523*4882a593Smuzhiyun }
2524*4882a593Smuzhiyun
2525*4882a593Smuzhiyun /*
2526*4882a593Smuzhiyun * This shouldn't happen in regular DELBA since the internal
2527*4882a593Smuzhiyun * delBA notification should trigger a release of all frames in
2528*4882a593Smuzhiyun * the reorder buffer.
2529*4882a593Smuzhiyun */
2530*4882a593Smuzhiyun WARN_ON(1);
2531*4882a593Smuzhiyun
2532*4882a593Smuzhiyun for (j = 0; j < reorder_buf->buf_size; j++)
2533*4882a593Smuzhiyun __skb_queue_purge(&entries[j].e.frames);
2534*4882a593Smuzhiyun /*
2535*4882a593Smuzhiyun * Prevent timer re-arm. This prevents a very far fetched case
2536*4882a593Smuzhiyun * where we timed out on the notification. There may be prior
2537*4882a593Smuzhiyun * RX frames pending in the RX queue before the notification
2538*4882a593Smuzhiyun * that might get processed between now and the actual deletion
2539*4882a593Smuzhiyun * and we would re-arm the timer although we are deleting the
2540*4882a593Smuzhiyun * reorder buffer.
2541*4882a593Smuzhiyun */
2542*4882a593Smuzhiyun reorder_buf->removed = true;
2543*4882a593Smuzhiyun spin_unlock_bh(&reorder_buf->lock);
2544*4882a593Smuzhiyun del_timer_sync(&reorder_buf->reorder_timer);
2545*4882a593Smuzhiyun }
2546*4882a593Smuzhiyun }
2547*4882a593Smuzhiyun
iwl_mvm_init_reorder_buffer(struct iwl_mvm * mvm,struct iwl_mvm_baid_data * data,u16 ssn,u16 buf_size)2548*4882a593Smuzhiyun static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
2549*4882a593Smuzhiyun struct iwl_mvm_baid_data *data,
2550*4882a593Smuzhiyun u16 ssn, u16 buf_size)
2551*4882a593Smuzhiyun {
2552*4882a593Smuzhiyun int i;
2553*4882a593Smuzhiyun
2554*4882a593Smuzhiyun for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2555*4882a593Smuzhiyun struct iwl_mvm_reorder_buffer *reorder_buf =
2556*4882a593Smuzhiyun &data->reorder_buf[i];
2557*4882a593Smuzhiyun struct iwl_mvm_reorder_buf_entry *entries =
2558*4882a593Smuzhiyun &data->entries[i * data->entries_per_queue];
2559*4882a593Smuzhiyun int j;
2560*4882a593Smuzhiyun
2561*4882a593Smuzhiyun reorder_buf->num_stored = 0;
2562*4882a593Smuzhiyun reorder_buf->head_sn = ssn;
2563*4882a593Smuzhiyun reorder_buf->buf_size = buf_size;
2564*4882a593Smuzhiyun /* rx reorder timer */
2565*4882a593Smuzhiyun timer_setup(&reorder_buf->reorder_timer,
2566*4882a593Smuzhiyun iwl_mvm_reorder_timer_expired, 0);
2567*4882a593Smuzhiyun spin_lock_init(&reorder_buf->lock);
2568*4882a593Smuzhiyun reorder_buf->mvm = mvm;
2569*4882a593Smuzhiyun reorder_buf->queue = i;
2570*4882a593Smuzhiyun reorder_buf->valid = false;
2571*4882a593Smuzhiyun for (j = 0; j < reorder_buf->buf_size; j++)
2572*4882a593Smuzhiyun __skb_queue_head_init(&entries[j].e.frames);
2573*4882a593Smuzhiyun }
2574*4882a593Smuzhiyun }
2575*4882a593Smuzhiyun
iwl_mvm_sta_rx_agg(struct iwl_mvm * mvm,struct ieee80211_sta * sta,int tid,u16 ssn,bool start,u16 buf_size,u16 timeout)2576*4882a593Smuzhiyun int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2577*4882a593Smuzhiyun int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
2578*4882a593Smuzhiyun {
2579*4882a593Smuzhiyun struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2580*4882a593Smuzhiyun struct iwl_mvm_add_sta_cmd cmd = {};
2581*4882a593Smuzhiyun struct iwl_mvm_baid_data *baid_data = NULL;
2582*4882a593Smuzhiyun int ret;
2583*4882a593Smuzhiyun u32 status;
2584*4882a593Smuzhiyun
2585*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
2586*4882a593Smuzhiyun
2587*4882a593Smuzhiyun if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
2588*4882a593Smuzhiyun IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2589*4882a593Smuzhiyun return -ENOSPC;
2590*4882a593Smuzhiyun }
2591*4882a593Smuzhiyun
2592*4882a593Smuzhiyun if (iwl_mvm_has_new_rx_api(mvm) && start) {
2593*4882a593Smuzhiyun u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
2594*4882a593Smuzhiyun
2595*4882a593Smuzhiyun /* sparse doesn't like the __align() so don't check */
2596*4882a593Smuzhiyun #ifndef __CHECKER__
2597*4882a593Smuzhiyun /*
2598*4882a593Smuzhiyun * The division below will be OK if either the cache line size
2599*4882a593Smuzhiyun * can be divided by the entry size (ALIGN will round up) or if
2600*4882a593Smuzhiyun * if the entry size can be divided by the cache line size, in
2601*4882a593Smuzhiyun * which case the ALIGN() will do nothing.
2602*4882a593Smuzhiyun */
2603*4882a593Smuzhiyun BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
2604*4882a593Smuzhiyun sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
2605*4882a593Smuzhiyun #endif
2606*4882a593Smuzhiyun
2607*4882a593Smuzhiyun /*
2608*4882a593Smuzhiyun * Upward align the reorder buffer size to fill an entire cache
2609*4882a593Smuzhiyun * line for each queue, to avoid sharing cache lines between
2610*4882a593Smuzhiyun * different queues.
2611*4882a593Smuzhiyun */
2612*4882a593Smuzhiyun reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
2613*4882a593Smuzhiyun
2614*4882a593Smuzhiyun /*
2615*4882a593Smuzhiyun * Allocate here so if allocation fails we can bail out early
2616*4882a593Smuzhiyun * before starting the BA session in the firmware
2617*4882a593Smuzhiyun */
2618*4882a593Smuzhiyun baid_data = kzalloc(sizeof(*baid_data) +
2619*4882a593Smuzhiyun mvm->trans->num_rx_queues *
2620*4882a593Smuzhiyun reorder_buf_size,
2621*4882a593Smuzhiyun GFP_KERNEL);
2622*4882a593Smuzhiyun if (!baid_data)
2623*4882a593Smuzhiyun return -ENOMEM;
2624*4882a593Smuzhiyun
2625*4882a593Smuzhiyun /*
2626*4882a593Smuzhiyun * This division is why we need the above BUILD_BUG_ON(),
2627*4882a593Smuzhiyun * if that doesn't hold then this will not be right.
2628*4882a593Smuzhiyun */
2629*4882a593Smuzhiyun baid_data->entries_per_queue =
2630*4882a593Smuzhiyun reorder_buf_size / sizeof(baid_data->entries[0]);
2631*4882a593Smuzhiyun }
2632*4882a593Smuzhiyun
2633*4882a593Smuzhiyun cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2634*4882a593Smuzhiyun cmd.sta_id = mvm_sta->sta_id;
2635*4882a593Smuzhiyun cmd.add_modify = STA_MODE_MODIFY;
2636*4882a593Smuzhiyun if (start) {
2637*4882a593Smuzhiyun cmd.add_immediate_ba_tid = (u8) tid;
2638*4882a593Smuzhiyun cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
2639*4882a593Smuzhiyun cmd.rx_ba_window = cpu_to_le16(buf_size);
2640*4882a593Smuzhiyun } else {
2641*4882a593Smuzhiyun cmd.remove_immediate_ba_tid = (u8) tid;
2642*4882a593Smuzhiyun }
2643*4882a593Smuzhiyun cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
2644*4882a593Smuzhiyun STA_MODIFY_REMOVE_BA_TID;
2645*4882a593Smuzhiyun
2646*4882a593Smuzhiyun status = ADD_STA_SUCCESS;
2647*4882a593Smuzhiyun ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2648*4882a593Smuzhiyun iwl_mvm_add_sta_cmd_size(mvm),
2649*4882a593Smuzhiyun &cmd, &status);
2650*4882a593Smuzhiyun if (ret)
2651*4882a593Smuzhiyun goto out_free;
2652*4882a593Smuzhiyun
2653*4882a593Smuzhiyun switch (status & IWL_ADD_STA_STATUS_MASK) {
2654*4882a593Smuzhiyun case ADD_STA_SUCCESS:
2655*4882a593Smuzhiyun IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2656*4882a593Smuzhiyun start ? "start" : "stopp");
2657*4882a593Smuzhiyun break;
2658*4882a593Smuzhiyun case ADD_STA_IMMEDIATE_BA_FAILURE:
2659*4882a593Smuzhiyun IWL_WARN(mvm, "RX BA Session refused by fw\n");
2660*4882a593Smuzhiyun ret = -ENOSPC;
2661*4882a593Smuzhiyun break;
2662*4882a593Smuzhiyun default:
2663*4882a593Smuzhiyun ret = -EIO;
2664*4882a593Smuzhiyun IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2665*4882a593Smuzhiyun start ? "start" : "stopp", status);
2666*4882a593Smuzhiyun break;
2667*4882a593Smuzhiyun }
2668*4882a593Smuzhiyun
2669*4882a593Smuzhiyun if (ret)
2670*4882a593Smuzhiyun goto out_free;
2671*4882a593Smuzhiyun
2672*4882a593Smuzhiyun if (start) {
2673*4882a593Smuzhiyun u8 baid;
2674*4882a593Smuzhiyun
2675*4882a593Smuzhiyun mvm->rx_ba_sessions++;
2676*4882a593Smuzhiyun
2677*4882a593Smuzhiyun if (!iwl_mvm_has_new_rx_api(mvm))
2678*4882a593Smuzhiyun return 0;
2679*4882a593Smuzhiyun
2680*4882a593Smuzhiyun if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
2681*4882a593Smuzhiyun ret = -EINVAL;
2682*4882a593Smuzhiyun goto out_free;
2683*4882a593Smuzhiyun }
2684*4882a593Smuzhiyun baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
2685*4882a593Smuzhiyun IWL_ADD_STA_BAID_SHIFT);
2686*4882a593Smuzhiyun baid_data->baid = baid;
2687*4882a593Smuzhiyun baid_data->timeout = timeout;
2688*4882a593Smuzhiyun baid_data->last_rx = jiffies;
2689*4882a593Smuzhiyun baid_data->rcu_ptr = &mvm->baid_map[baid];
2690*4882a593Smuzhiyun timer_setup(&baid_data->session_timer,
2691*4882a593Smuzhiyun iwl_mvm_rx_agg_session_expired, 0);
2692*4882a593Smuzhiyun baid_data->mvm = mvm;
2693*4882a593Smuzhiyun baid_data->tid = tid;
2694*4882a593Smuzhiyun baid_data->sta_id = mvm_sta->sta_id;
2695*4882a593Smuzhiyun
2696*4882a593Smuzhiyun mvm_sta->tid_to_baid[tid] = baid;
2697*4882a593Smuzhiyun if (timeout)
2698*4882a593Smuzhiyun mod_timer(&baid_data->session_timer,
2699*4882a593Smuzhiyun TU_TO_EXP_TIME(timeout * 2));
2700*4882a593Smuzhiyun
2701*4882a593Smuzhiyun iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
2702*4882a593Smuzhiyun /*
2703*4882a593Smuzhiyun * protect the BA data with RCU to cover a case where our
2704*4882a593Smuzhiyun * internal RX sync mechanism will timeout (not that it's
2705*4882a593Smuzhiyun * supposed to happen) and we will free the session data while
2706*4882a593Smuzhiyun * RX is being processed in parallel
2707*4882a593Smuzhiyun */
2708*4882a593Smuzhiyun IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2709*4882a593Smuzhiyun mvm_sta->sta_id, tid, baid);
2710*4882a593Smuzhiyun WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2711*4882a593Smuzhiyun rcu_assign_pointer(mvm->baid_map[baid], baid_data);
2712*4882a593Smuzhiyun } else {
2713*4882a593Smuzhiyun u8 baid = mvm_sta->tid_to_baid[tid];
2714*4882a593Smuzhiyun
2715*4882a593Smuzhiyun if (mvm->rx_ba_sessions > 0)
2716*4882a593Smuzhiyun /* check that restart flow didn't zero the counter */
2717*4882a593Smuzhiyun mvm->rx_ba_sessions--;
2718*4882a593Smuzhiyun if (!iwl_mvm_has_new_rx_api(mvm))
2719*4882a593Smuzhiyun return 0;
2720*4882a593Smuzhiyun
2721*4882a593Smuzhiyun if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2722*4882a593Smuzhiyun return -EINVAL;
2723*4882a593Smuzhiyun
2724*4882a593Smuzhiyun baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2725*4882a593Smuzhiyun if (WARN_ON(!baid_data))
2726*4882a593Smuzhiyun return -EINVAL;
2727*4882a593Smuzhiyun
2728*4882a593Smuzhiyun /* synchronize all rx queues so we can safely delete */
2729*4882a593Smuzhiyun iwl_mvm_free_reorder(mvm, baid_data);
2730*4882a593Smuzhiyun del_timer_sync(&baid_data->session_timer);
2731*4882a593Smuzhiyun RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2732*4882a593Smuzhiyun kfree_rcu(baid_data, rcu_head);
2733*4882a593Smuzhiyun IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
2734*4882a593Smuzhiyun }
2735*4882a593Smuzhiyun return 0;
2736*4882a593Smuzhiyun
2737*4882a593Smuzhiyun out_free:
2738*4882a593Smuzhiyun kfree(baid_data);
2739*4882a593Smuzhiyun return ret;
2740*4882a593Smuzhiyun }
2741*4882a593Smuzhiyun
iwl_mvm_sta_tx_agg(struct iwl_mvm * mvm,struct ieee80211_sta * sta,int tid,u8 queue,bool start)2742*4882a593Smuzhiyun int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2743*4882a593Smuzhiyun int tid, u8 queue, bool start)
2744*4882a593Smuzhiyun {
2745*4882a593Smuzhiyun struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2746*4882a593Smuzhiyun struct iwl_mvm_add_sta_cmd cmd = {};
2747*4882a593Smuzhiyun int ret;
2748*4882a593Smuzhiyun u32 status;
2749*4882a593Smuzhiyun
2750*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
2751*4882a593Smuzhiyun
2752*4882a593Smuzhiyun if (start) {
2753*4882a593Smuzhiyun mvm_sta->tfd_queue_msk |= BIT(queue);
2754*4882a593Smuzhiyun mvm_sta->tid_disable_agg &= ~BIT(tid);
2755*4882a593Smuzhiyun } else {
2756*4882a593Smuzhiyun /* In DQA-mode the queue isn't removed on agg termination */
2757*4882a593Smuzhiyun mvm_sta->tid_disable_agg |= BIT(tid);
2758*4882a593Smuzhiyun }
2759*4882a593Smuzhiyun
2760*4882a593Smuzhiyun cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2761*4882a593Smuzhiyun cmd.sta_id = mvm_sta->sta_id;
2762*4882a593Smuzhiyun cmd.add_modify = STA_MODE_MODIFY;
2763*4882a593Smuzhiyun if (!iwl_mvm_has_new_tx_api(mvm))
2764*4882a593Smuzhiyun cmd.modify_mask = STA_MODIFY_QUEUES;
2765*4882a593Smuzhiyun cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
2766*4882a593Smuzhiyun cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2767*4882a593Smuzhiyun cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2768*4882a593Smuzhiyun
2769*4882a593Smuzhiyun status = ADD_STA_SUCCESS;
2770*4882a593Smuzhiyun ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2771*4882a593Smuzhiyun iwl_mvm_add_sta_cmd_size(mvm),
2772*4882a593Smuzhiyun &cmd, &status);
2773*4882a593Smuzhiyun if (ret)
2774*4882a593Smuzhiyun return ret;
2775*4882a593Smuzhiyun
2776*4882a593Smuzhiyun switch (status & IWL_ADD_STA_STATUS_MASK) {
2777*4882a593Smuzhiyun case ADD_STA_SUCCESS:
2778*4882a593Smuzhiyun break;
2779*4882a593Smuzhiyun default:
2780*4882a593Smuzhiyun ret = -EIO;
2781*4882a593Smuzhiyun IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2782*4882a593Smuzhiyun start ? "start" : "stopp", status);
2783*4882a593Smuzhiyun break;
2784*4882a593Smuzhiyun }
2785*4882a593Smuzhiyun
2786*4882a593Smuzhiyun return ret;
2787*4882a593Smuzhiyun }
2788*4882a593Smuzhiyun
2789*4882a593Smuzhiyun const u8 tid_to_mac80211_ac[] = {
2790*4882a593Smuzhiyun IEEE80211_AC_BE,
2791*4882a593Smuzhiyun IEEE80211_AC_BK,
2792*4882a593Smuzhiyun IEEE80211_AC_BK,
2793*4882a593Smuzhiyun IEEE80211_AC_BE,
2794*4882a593Smuzhiyun IEEE80211_AC_VI,
2795*4882a593Smuzhiyun IEEE80211_AC_VI,
2796*4882a593Smuzhiyun IEEE80211_AC_VO,
2797*4882a593Smuzhiyun IEEE80211_AC_VO,
2798*4882a593Smuzhiyun IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
2799*4882a593Smuzhiyun };
2800*4882a593Smuzhiyun
2801*4882a593Smuzhiyun static const u8 tid_to_ucode_ac[] = {
2802*4882a593Smuzhiyun AC_BE,
2803*4882a593Smuzhiyun AC_BK,
2804*4882a593Smuzhiyun AC_BK,
2805*4882a593Smuzhiyun AC_BE,
2806*4882a593Smuzhiyun AC_VI,
2807*4882a593Smuzhiyun AC_VI,
2808*4882a593Smuzhiyun AC_VO,
2809*4882a593Smuzhiyun AC_VO,
2810*4882a593Smuzhiyun };
2811*4882a593Smuzhiyun
iwl_mvm_sta_tx_agg_start(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u16 tid,u16 * ssn)2812*4882a593Smuzhiyun int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2813*4882a593Smuzhiyun struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2814*4882a593Smuzhiyun {
2815*4882a593Smuzhiyun struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2816*4882a593Smuzhiyun struct iwl_mvm_tid_data *tid_data;
2817*4882a593Smuzhiyun u16 normalized_ssn;
2818*4882a593Smuzhiyun u16 txq_id;
2819*4882a593Smuzhiyun int ret;
2820*4882a593Smuzhiyun
2821*4882a593Smuzhiyun if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2822*4882a593Smuzhiyun return -EINVAL;
2823*4882a593Smuzhiyun
2824*4882a593Smuzhiyun if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
2825*4882a593Smuzhiyun mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2826*4882a593Smuzhiyun IWL_ERR(mvm,
2827*4882a593Smuzhiyun "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
2828*4882a593Smuzhiyun mvmsta->tid_data[tid].state);
2829*4882a593Smuzhiyun return -ENXIO;
2830*4882a593Smuzhiyun }
2831*4882a593Smuzhiyun
2832*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
2833*4882a593Smuzhiyun
2834*4882a593Smuzhiyun if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&
2835*4882a593Smuzhiyun iwl_mvm_has_new_tx_api(mvm)) {
2836*4882a593Smuzhiyun u8 ac = tid_to_mac80211_ac[tid];
2837*4882a593Smuzhiyun
2838*4882a593Smuzhiyun ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
2839*4882a593Smuzhiyun if (ret)
2840*4882a593Smuzhiyun return ret;
2841*4882a593Smuzhiyun }
2842*4882a593Smuzhiyun
2843*4882a593Smuzhiyun spin_lock_bh(&mvmsta->lock);
2844*4882a593Smuzhiyun
2845*4882a593Smuzhiyun /*
2846*4882a593Smuzhiyun * Note the possible cases:
2847*4882a593Smuzhiyun * 1. An enabled TXQ - TXQ needs to become agg'ed
2848*4882a593Smuzhiyun * 2. The TXQ hasn't yet been enabled, so find a free one and mark
2849*4882a593Smuzhiyun * it as reserved
2850*4882a593Smuzhiyun */
2851*4882a593Smuzhiyun txq_id = mvmsta->tid_data[tid].txq_id;
2852*4882a593Smuzhiyun if (txq_id == IWL_MVM_INVALID_QUEUE) {
2853*4882a593Smuzhiyun ret = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
2854*4882a593Smuzhiyun IWL_MVM_DQA_MIN_DATA_QUEUE,
2855*4882a593Smuzhiyun IWL_MVM_DQA_MAX_DATA_QUEUE);
2856*4882a593Smuzhiyun if (ret < 0) {
2857*4882a593Smuzhiyun IWL_ERR(mvm, "Failed to allocate agg queue\n");
2858*4882a593Smuzhiyun goto out;
2859*4882a593Smuzhiyun }
2860*4882a593Smuzhiyun
2861*4882a593Smuzhiyun txq_id = ret;
2862*4882a593Smuzhiyun
2863*4882a593Smuzhiyun /* TXQ hasn't yet been enabled, so mark it only as reserved */
2864*4882a593Smuzhiyun mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
2865*4882a593Smuzhiyun } else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) {
2866*4882a593Smuzhiyun ret = -ENXIO;
2867*4882a593Smuzhiyun IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n",
2868*4882a593Smuzhiyun tid, IWL_MAX_HW_QUEUES - 1);
2869*4882a593Smuzhiyun goto out;
2870*4882a593Smuzhiyun
2871*4882a593Smuzhiyun } else if (unlikely(mvm->queue_info[txq_id].status ==
2872*4882a593Smuzhiyun IWL_MVM_QUEUE_SHARED)) {
2873*4882a593Smuzhiyun ret = -ENXIO;
2874*4882a593Smuzhiyun IWL_DEBUG_TX_QUEUES(mvm,
2875*4882a593Smuzhiyun "Can't start tid %d agg on shared queue!\n",
2876*4882a593Smuzhiyun tid);
2877*4882a593Smuzhiyun goto out;
2878*4882a593Smuzhiyun }
2879*4882a593Smuzhiyun
2880*4882a593Smuzhiyun IWL_DEBUG_TX_QUEUES(mvm,
2881*4882a593Smuzhiyun "AGG for tid %d will be on queue #%d\n",
2882*4882a593Smuzhiyun tid, txq_id);
2883*4882a593Smuzhiyun
2884*4882a593Smuzhiyun tid_data = &mvmsta->tid_data[tid];
2885*4882a593Smuzhiyun tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2886*4882a593Smuzhiyun tid_data->txq_id = txq_id;
2887*4882a593Smuzhiyun *ssn = tid_data->ssn;
2888*4882a593Smuzhiyun
2889*4882a593Smuzhiyun IWL_DEBUG_TX_QUEUES(mvm,
2890*4882a593Smuzhiyun "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2891*4882a593Smuzhiyun mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2892*4882a593Smuzhiyun tid_data->next_reclaimed);
2893*4882a593Smuzhiyun
2894*4882a593Smuzhiyun /*
2895*4882a593Smuzhiyun * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
2896*4882a593Smuzhiyun * to align the wrap around of ssn so we compare relevant values.
2897*4882a593Smuzhiyun */
2898*4882a593Smuzhiyun normalized_ssn = tid_data->ssn;
2899*4882a593Smuzhiyun if (mvm->trans->trans_cfg->gen2)
2900*4882a593Smuzhiyun normalized_ssn &= 0xff;
2901*4882a593Smuzhiyun
2902*4882a593Smuzhiyun if (normalized_ssn == tid_data->next_reclaimed) {
2903*4882a593Smuzhiyun tid_data->state = IWL_AGG_STARTING;
2904*4882a593Smuzhiyun ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
2905*4882a593Smuzhiyun } else {
2906*4882a593Smuzhiyun tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
2907*4882a593Smuzhiyun ret = IEEE80211_AMPDU_TX_START_DELAY_ADDBA;
2908*4882a593Smuzhiyun }
2909*4882a593Smuzhiyun
2910*4882a593Smuzhiyun out:
2911*4882a593Smuzhiyun spin_unlock_bh(&mvmsta->lock);
2912*4882a593Smuzhiyun
2913*4882a593Smuzhiyun return ret;
2914*4882a593Smuzhiyun }
2915*4882a593Smuzhiyun
iwl_mvm_sta_tx_agg_oper(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u16 tid,u16 buf_size,bool amsdu)2916*4882a593Smuzhiyun int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2917*4882a593Smuzhiyun struct ieee80211_sta *sta, u16 tid, u16 buf_size,
2918*4882a593Smuzhiyun bool amsdu)
2919*4882a593Smuzhiyun {
2920*4882a593Smuzhiyun struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2921*4882a593Smuzhiyun struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2922*4882a593Smuzhiyun unsigned int wdg_timeout =
2923*4882a593Smuzhiyun iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
2924*4882a593Smuzhiyun int queue, ret;
2925*4882a593Smuzhiyun bool alloc_queue = true;
2926*4882a593Smuzhiyun enum iwl_mvm_queue_status queue_status;
2927*4882a593Smuzhiyun u16 ssn;
2928*4882a593Smuzhiyun
2929*4882a593Smuzhiyun struct iwl_trans_txq_scd_cfg cfg = {
2930*4882a593Smuzhiyun .sta_id = mvmsta->sta_id,
2931*4882a593Smuzhiyun .tid = tid,
2932*4882a593Smuzhiyun .frame_limit = buf_size,
2933*4882a593Smuzhiyun .aggregate = true,
2934*4882a593Smuzhiyun };
2935*4882a593Smuzhiyun
2936*4882a593Smuzhiyun /*
2937*4882a593Smuzhiyun * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
2938*4882a593Smuzhiyun * manager, so this function should never be called in this case.
2939*4882a593Smuzhiyun */
2940*4882a593Smuzhiyun if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
2941*4882a593Smuzhiyun return -EINVAL;
2942*4882a593Smuzhiyun
2943*4882a593Smuzhiyun BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
2944*4882a593Smuzhiyun != IWL_MAX_TID_COUNT);
2945*4882a593Smuzhiyun
2946*4882a593Smuzhiyun spin_lock_bh(&mvmsta->lock);
2947*4882a593Smuzhiyun ssn = tid_data->ssn;
2948*4882a593Smuzhiyun queue = tid_data->txq_id;
2949*4882a593Smuzhiyun tid_data->state = IWL_AGG_ON;
2950*4882a593Smuzhiyun mvmsta->agg_tids |= BIT(tid);
2951*4882a593Smuzhiyun tid_data->ssn = 0xffff;
2952*4882a593Smuzhiyun tid_data->amsdu_in_ampdu_allowed = amsdu;
2953*4882a593Smuzhiyun spin_unlock_bh(&mvmsta->lock);
2954*4882a593Smuzhiyun
2955*4882a593Smuzhiyun if (iwl_mvm_has_new_tx_api(mvm)) {
2956*4882a593Smuzhiyun /*
2957*4882a593Smuzhiyun * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
2958*4882a593Smuzhiyun * would have failed, so if we are here there is no need to
2959*4882a593Smuzhiyun * allocate a queue.
2960*4882a593Smuzhiyun * However, if aggregation size is different than the default
2961*4882a593Smuzhiyun * size, the scheduler should be reconfigured.
2962*4882a593Smuzhiyun * We cannot do this with the new TX API, so return unsupported
2963*4882a593Smuzhiyun * for now, until it will be offloaded to firmware..
2964*4882a593Smuzhiyun * Note that if SCD default value changes - this condition
2965*4882a593Smuzhiyun * should be updated as well.
2966*4882a593Smuzhiyun */
2967*4882a593Smuzhiyun if (buf_size < IWL_FRAME_LIMIT)
2968*4882a593Smuzhiyun return -ENOTSUPP;
2969*4882a593Smuzhiyun
2970*4882a593Smuzhiyun ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2971*4882a593Smuzhiyun if (ret)
2972*4882a593Smuzhiyun return -EIO;
2973*4882a593Smuzhiyun goto out;
2974*4882a593Smuzhiyun }
2975*4882a593Smuzhiyun
2976*4882a593Smuzhiyun cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
2977*4882a593Smuzhiyun
2978*4882a593Smuzhiyun queue_status = mvm->queue_info[queue].status;
2979*4882a593Smuzhiyun
2980*4882a593Smuzhiyun /* Maybe there is no need to even alloc a queue... */
2981*4882a593Smuzhiyun if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
2982*4882a593Smuzhiyun alloc_queue = false;
2983*4882a593Smuzhiyun
2984*4882a593Smuzhiyun /*
2985*4882a593Smuzhiyun * Only reconfig the SCD for the queue if the window size has
2986*4882a593Smuzhiyun * changed from current (become smaller)
2987*4882a593Smuzhiyun */
2988*4882a593Smuzhiyun if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
2989*4882a593Smuzhiyun /*
2990*4882a593Smuzhiyun * If reconfiguring an existing queue, it first must be
2991*4882a593Smuzhiyun * drained
2992*4882a593Smuzhiyun */
2993*4882a593Smuzhiyun ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
2994*4882a593Smuzhiyun BIT(queue));
2995*4882a593Smuzhiyun if (ret) {
2996*4882a593Smuzhiyun IWL_ERR(mvm,
2997*4882a593Smuzhiyun "Error draining queue before reconfig\n");
2998*4882a593Smuzhiyun return ret;
2999*4882a593Smuzhiyun }
3000*4882a593Smuzhiyun
3001*4882a593Smuzhiyun ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
3002*4882a593Smuzhiyun mvmsta->sta_id, tid,
3003*4882a593Smuzhiyun buf_size, ssn);
3004*4882a593Smuzhiyun if (ret) {
3005*4882a593Smuzhiyun IWL_ERR(mvm,
3006*4882a593Smuzhiyun "Error reconfiguring TXQ #%d\n", queue);
3007*4882a593Smuzhiyun return ret;
3008*4882a593Smuzhiyun }
3009*4882a593Smuzhiyun }
3010*4882a593Smuzhiyun
3011*4882a593Smuzhiyun if (alloc_queue)
3012*4882a593Smuzhiyun iwl_mvm_enable_txq(mvm, sta, queue, ssn,
3013*4882a593Smuzhiyun &cfg, wdg_timeout);
3014*4882a593Smuzhiyun
3015*4882a593Smuzhiyun /* Send ADD_STA command to enable aggs only if the queue isn't shared */
3016*4882a593Smuzhiyun if (queue_status != IWL_MVM_QUEUE_SHARED) {
3017*4882a593Smuzhiyun ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3018*4882a593Smuzhiyun if (ret)
3019*4882a593Smuzhiyun return -EIO;
3020*4882a593Smuzhiyun }
3021*4882a593Smuzhiyun
3022*4882a593Smuzhiyun /* No need to mark as reserved */
3023*4882a593Smuzhiyun mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
3024*4882a593Smuzhiyun
3025*4882a593Smuzhiyun out:
3026*4882a593Smuzhiyun /*
3027*4882a593Smuzhiyun * Even though in theory the peer could have different
3028*4882a593Smuzhiyun * aggregation reorder buffer sizes for different sessions,
3029*4882a593Smuzhiyun * our ucode doesn't allow for that and has a global limit
3030*4882a593Smuzhiyun * for each station. Therefore, use the minimum of all the
3031*4882a593Smuzhiyun * aggregation sessions and our default value.
3032*4882a593Smuzhiyun */
3033*4882a593Smuzhiyun mvmsta->max_agg_bufsize =
3034*4882a593Smuzhiyun min(mvmsta->max_agg_bufsize, buf_size);
3035*4882a593Smuzhiyun mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
3036*4882a593Smuzhiyun
3037*4882a593Smuzhiyun IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
3038*4882a593Smuzhiyun sta->addr, tid);
3039*4882a593Smuzhiyun
3040*4882a593Smuzhiyun return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq);
3041*4882a593Smuzhiyun }
3042*4882a593Smuzhiyun
iwl_mvm_unreserve_agg_queue(struct iwl_mvm * mvm,struct iwl_mvm_sta * mvmsta,struct iwl_mvm_tid_data * tid_data)3043*4882a593Smuzhiyun static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
3044*4882a593Smuzhiyun struct iwl_mvm_sta *mvmsta,
3045*4882a593Smuzhiyun struct iwl_mvm_tid_data *tid_data)
3046*4882a593Smuzhiyun {
3047*4882a593Smuzhiyun u16 txq_id = tid_data->txq_id;
3048*4882a593Smuzhiyun
3049*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
3050*4882a593Smuzhiyun
3051*4882a593Smuzhiyun if (iwl_mvm_has_new_tx_api(mvm))
3052*4882a593Smuzhiyun return;
3053*4882a593Smuzhiyun
3054*4882a593Smuzhiyun /*
3055*4882a593Smuzhiyun * The TXQ is marked as reserved only if no traffic came through yet
3056*4882a593Smuzhiyun * This means no traffic has been sent on this TID (agg'd or not), so
3057*4882a593Smuzhiyun * we no longer have use for the queue. Since it hasn't even been
3058*4882a593Smuzhiyun * allocated through iwl_mvm_enable_txq, so we can just mark it back as
3059*4882a593Smuzhiyun * free.
3060*4882a593Smuzhiyun */
3061*4882a593Smuzhiyun if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
3062*4882a593Smuzhiyun mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
3063*4882a593Smuzhiyun tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
3064*4882a593Smuzhiyun }
3065*4882a593Smuzhiyun }
3066*4882a593Smuzhiyun
iwl_mvm_sta_tx_agg_stop(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u16 tid)3067*4882a593Smuzhiyun int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3068*4882a593Smuzhiyun struct ieee80211_sta *sta, u16 tid)
3069*4882a593Smuzhiyun {
3070*4882a593Smuzhiyun struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3071*4882a593Smuzhiyun struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3072*4882a593Smuzhiyun u16 txq_id;
3073*4882a593Smuzhiyun int err;
3074*4882a593Smuzhiyun
3075*4882a593Smuzhiyun /*
3076*4882a593Smuzhiyun * If mac80211 is cleaning its state, then say that we finished since
3077*4882a593Smuzhiyun * our state has been cleared anyway.
3078*4882a593Smuzhiyun */
3079*4882a593Smuzhiyun if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
3080*4882a593Smuzhiyun ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3081*4882a593Smuzhiyun return 0;
3082*4882a593Smuzhiyun }
3083*4882a593Smuzhiyun
3084*4882a593Smuzhiyun spin_lock_bh(&mvmsta->lock);
3085*4882a593Smuzhiyun
3086*4882a593Smuzhiyun txq_id = tid_data->txq_id;
3087*4882a593Smuzhiyun
3088*4882a593Smuzhiyun IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
3089*4882a593Smuzhiyun mvmsta->sta_id, tid, txq_id, tid_data->state);
3090*4882a593Smuzhiyun
3091*4882a593Smuzhiyun mvmsta->agg_tids &= ~BIT(tid);
3092*4882a593Smuzhiyun
3093*4882a593Smuzhiyun iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3094*4882a593Smuzhiyun
3095*4882a593Smuzhiyun switch (tid_data->state) {
3096*4882a593Smuzhiyun case IWL_AGG_ON:
3097*4882a593Smuzhiyun tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3098*4882a593Smuzhiyun
3099*4882a593Smuzhiyun IWL_DEBUG_TX_QUEUES(mvm,
3100*4882a593Smuzhiyun "ssn = %d, next_recl = %d\n",
3101*4882a593Smuzhiyun tid_data->ssn, tid_data->next_reclaimed);
3102*4882a593Smuzhiyun
3103*4882a593Smuzhiyun tid_data->ssn = 0xffff;
3104*4882a593Smuzhiyun tid_data->state = IWL_AGG_OFF;
3105*4882a593Smuzhiyun spin_unlock_bh(&mvmsta->lock);
3106*4882a593Smuzhiyun
3107*4882a593Smuzhiyun ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3108*4882a593Smuzhiyun
3109*4882a593Smuzhiyun iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3110*4882a593Smuzhiyun return 0;
3111*4882a593Smuzhiyun case IWL_AGG_STARTING:
3112*4882a593Smuzhiyun case IWL_EMPTYING_HW_QUEUE_ADDBA:
3113*4882a593Smuzhiyun /*
3114*4882a593Smuzhiyun * The agg session has been stopped before it was set up. This
3115*4882a593Smuzhiyun * can happen when the AddBA timer times out for example.
3116*4882a593Smuzhiyun */
3117*4882a593Smuzhiyun
3118*4882a593Smuzhiyun /* No barriers since we are under mutex */
3119*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
3120*4882a593Smuzhiyun
3121*4882a593Smuzhiyun ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3122*4882a593Smuzhiyun tid_data->state = IWL_AGG_OFF;
3123*4882a593Smuzhiyun err = 0;
3124*4882a593Smuzhiyun break;
3125*4882a593Smuzhiyun default:
3126*4882a593Smuzhiyun IWL_ERR(mvm,
3127*4882a593Smuzhiyun "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
3128*4882a593Smuzhiyun mvmsta->sta_id, tid, tid_data->state);
3129*4882a593Smuzhiyun IWL_ERR(mvm,
3130*4882a593Smuzhiyun "\ttid_data->txq_id = %d\n", tid_data->txq_id);
3131*4882a593Smuzhiyun err = -EINVAL;
3132*4882a593Smuzhiyun }
3133*4882a593Smuzhiyun
3134*4882a593Smuzhiyun spin_unlock_bh(&mvmsta->lock);
3135*4882a593Smuzhiyun
3136*4882a593Smuzhiyun return err;
3137*4882a593Smuzhiyun }
3138*4882a593Smuzhiyun
iwl_mvm_sta_tx_agg_flush(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u16 tid)3139*4882a593Smuzhiyun int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3140*4882a593Smuzhiyun struct ieee80211_sta *sta, u16 tid)
3141*4882a593Smuzhiyun {
3142*4882a593Smuzhiyun struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3143*4882a593Smuzhiyun struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3144*4882a593Smuzhiyun u16 txq_id;
3145*4882a593Smuzhiyun enum iwl_mvm_agg_state old_state;
3146*4882a593Smuzhiyun
3147*4882a593Smuzhiyun /*
3148*4882a593Smuzhiyun * First set the agg state to OFF to avoid calling
3149*4882a593Smuzhiyun * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
3150*4882a593Smuzhiyun */
3151*4882a593Smuzhiyun spin_lock_bh(&mvmsta->lock);
3152*4882a593Smuzhiyun txq_id = tid_data->txq_id;
3153*4882a593Smuzhiyun IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
3154*4882a593Smuzhiyun mvmsta->sta_id, tid, txq_id, tid_data->state);
3155*4882a593Smuzhiyun old_state = tid_data->state;
3156*4882a593Smuzhiyun tid_data->state = IWL_AGG_OFF;
3157*4882a593Smuzhiyun mvmsta->agg_tids &= ~BIT(tid);
3158*4882a593Smuzhiyun spin_unlock_bh(&mvmsta->lock);
3159*4882a593Smuzhiyun
3160*4882a593Smuzhiyun iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3161*4882a593Smuzhiyun
3162*4882a593Smuzhiyun if (old_state >= IWL_AGG_ON) {
3163*4882a593Smuzhiyun iwl_mvm_drain_sta(mvm, mvmsta, true);
3164*4882a593Smuzhiyun
3165*4882a593Smuzhiyun if (iwl_mvm_has_new_tx_api(mvm)) {
3166*4882a593Smuzhiyun if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
3167*4882a593Smuzhiyun BIT(tid), 0))
3168*4882a593Smuzhiyun IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3169*4882a593Smuzhiyun iwl_trans_wait_txq_empty(mvm->trans, txq_id);
3170*4882a593Smuzhiyun } else {
3171*4882a593Smuzhiyun if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
3172*4882a593Smuzhiyun IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3173*4882a593Smuzhiyun iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
3174*4882a593Smuzhiyun }
3175*4882a593Smuzhiyun
3176*4882a593Smuzhiyun iwl_mvm_drain_sta(mvm, mvmsta, false);
3177*4882a593Smuzhiyun
3178*4882a593Smuzhiyun iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3179*4882a593Smuzhiyun }
3180*4882a593Smuzhiyun
3181*4882a593Smuzhiyun return 0;
3182*4882a593Smuzhiyun }
3183*4882a593Smuzhiyun
iwl_mvm_set_fw_key_idx(struct iwl_mvm * mvm)3184*4882a593Smuzhiyun static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
3185*4882a593Smuzhiyun {
3186*4882a593Smuzhiyun int i, max = -1, max_offs = -1;
3187*4882a593Smuzhiyun
3188*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
3189*4882a593Smuzhiyun
3190*4882a593Smuzhiyun /* Pick the unused key offset with the highest 'deleted'
3191*4882a593Smuzhiyun * counter. Every time a key is deleted, all the counters
3192*4882a593Smuzhiyun * are incremented and the one that was just deleted is
3193*4882a593Smuzhiyun * reset to zero. Thus, the highest counter is the one
3194*4882a593Smuzhiyun * that was deleted longest ago. Pick that one.
3195*4882a593Smuzhiyun */
3196*4882a593Smuzhiyun for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3197*4882a593Smuzhiyun if (test_bit(i, mvm->fw_key_table))
3198*4882a593Smuzhiyun continue;
3199*4882a593Smuzhiyun if (mvm->fw_key_deleted[i] > max) {
3200*4882a593Smuzhiyun max = mvm->fw_key_deleted[i];
3201*4882a593Smuzhiyun max_offs = i;
3202*4882a593Smuzhiyun }
3203*4882a593Smuzhiyun }
3204*4882a593Smuzhiyun
3205*4882a593Smuzhiyun if (max_offs < 0)
3206*4882a593Smuzhiyun return STA_KEY_IDX_INVALID;
3207*4882a593Smuzhiyun
3208*4882a593Smuzhiyun return max_offs;
3209*4882a593Smuzhiyun }
3210*4882a593Smuzhiyun
iwl_mvm_get_key_sta(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_sta * sta)3211*4882a593Smuzhiyun static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
3212*4882a593Smuzhiyun struct ieee80211_vif *vif,
3213*4882a593Smuzhiyun struct ieee80211_sta *sta)
3214*4882a593Smuzhiyun {
3215*4882a593Smuzhiyun struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3216*4882a593Smuzhiyun
3217*4882a593Smuzhiyun if (sta)
3218*4882a593Smuzhiyun return iwl_mvm_sta_from_mac80211(sta);
3219*4882a593Smuzhiyun
3220*4882a593Smuzhiyun /*
3221*4882a593Smuzhiyun * The device expects GTKs for station interfaces to be
3222*4882a593Smuzhiyun * installed as GTKs for the AP station. If we have no
3223*4882a593Smuzhiyun * station ID, then use AP's station ID.
3224*4882a593Smuzhiyun */
3225*4882a593Smuzhiyun if (vif->type == NL80211_IFTYPE_STATION &&
3226*4882a593Smuzhiyun mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3227*4882a593Smuzhiyun u8 sta_id = mvmvif->ap_sta_id;
3228*4882a593Smuzhiyun
3229*4882a593Smuzhiyun sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
3230*4882a593Smuzhiyun lockdep_is_held(&mvm->mutex));
3231*4882a593Smuzhiyun
3232*4882a593Smuzhiyun /*
3233*4882a593Smuzhiyun * It is possible that the 'sta' parameter is NULL,
3234*4882a593Smuzhiyun * for example when a GTK is removed - the sta_id will then
3235*4882a593Smuzhiyun * be the AP ID, and no station was passed by mac80211.
3236*4882a593Smuzhiyun */
3237*4882a593Smuzhiyun if (IS_ERR_OR_NULL(sta))
3238*4882a593Smuzhiyun return NULL;
3239*4882a593Smuzhiyun
3240*4882a593Smuzhiyun return iwl_mvm_sta_from_mac80211(sta);
3241*4882a593Smuzhiyun }
3242*4882a593Smuzhiyun
3243*4882a593Smuzhiyun return NULL;
3244*4882a593Smuzhiyun }
3245*4882a593Smuzhiyun
iwl_mvm_send_sta_key(struct iwl_mvm * mvm,u32 sta_id,struct ieee80211_key_conf * key,bool mcast,u32 tkip_iv32,u16 * tkip_p1k,u32 cmd_flags,u8 key_offset,bool mfp)3246*4882a593Smuzhiyun static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
3247*4882a593Smuzhiyun u32 sta_id,
3248*4882a593Smuzhiyun struct ieee80211_key_conf *key, bool mcast,
3249*4882a593Smuzhiyun u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
3250*4882a593Smuzhiyun u8 key_offset, bool mfp)
3251*4882a593Smuzhiyun {
3252*4882a593Smuzhiyun union {
3253*4882a593Smuzhiyun struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3254*4882a593Smuzhiyun struct iwl_mvm_add_sta_key_cmd cmd;
3255*4882a593Smuzhiyun } u = {};
3256*4882a593Smuzhiyun __le16 key_flags;
3257*4882a593Smuzhiyun int ret;
3258*4882a593Smuzhiyun u32 status;
3259*4882a593Smuzhiyun u16 keyidx;
3260*4882a593Smuzhiyun u64 pn = 0;
3261*4882a593Smuzhiyun int i, size;
3262*4882a593Smuzhiyun bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3263*4882a593Smuzhiyun IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
3264*4882a593Smuzhiyun
3265*4882a593Smuzhiyun if (sta_id == IWL_MVM_INVALID_STA)
3266*4882a593Smuzhiyun return -EINVAL;
3267*4882a593Smuzhiyun
3268*4882a593Smuzhiyun keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
3269*4882a593Smuzhiyun STA_KEY_FLG_KEYID_MSK;
3270*4882a593Smuzhiyun key_flags = cpu_to_le16(keyidx);
3271*4882a593Smuzhiyun key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
3272*4882a593Smuzhiyun
3273*4882a593Smuzhiyun switch (key->cipher) {
3274*4882a593Smuzhiyun case WLAN_CIPHER_SUITE_TKIP:
3275*4882a593Smuzhiyun key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
3276*4882a593Smuzhiyun if (new_api) {
3277*4882a593Smuzhiyun memcpy((void *)&u.cmd.tx_mic_key,
3278*4882a593Smuzhiyun &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
3279*4882a593Smuzhiyun IWL_MIC_KEY_SIZE);
3280*4882a593Smuzhiyun
3281*4882a593Smuzhiyun memcpy((void *)&u.cmd.rx_mic_key,
3282*4882a593Smuzhiyun &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
3283*4882a593Smuzhiyun IWL_MIC_KEY_SIZE);
3284*4882a593Smuzhiyun pn = atomic64_read(&key->tx_pn);
3285*4882a593Smuzhiyun
3286*4882a593Smuzhiyun } else {
3287*4882a593Smuzhiyun u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
3288*4882a593Smuzhiyun for (i = 0; i < 5; i++)
3289*4882a593Smuzhiyun u.cmd_v1.tkip_rx_ttak[i] =
3290*4882a593Smuzhiyun cpu_to_le16(tkip_p1k[i]);
3291*4882a593Smuzhiyun }
3292*4882a593Smuzhiyun memcpy(u.cmd.common.key, key->key, key->keylen);
3293*4882a593Smuzhiyun break;
3294*4882a593Smuzhiyun case WLAN_CIPHER_SUITE_CCMP:
3295*4882a593Smuzhiyun key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
3296*4882a593Smuzhiyun memcpy(u.cmd.common.key, key->key, key->keylen);
3297*4882a593Smuzhiyun if (new_api)
3298*4882a593Smuzhiyun pn = atomic64_read(&key->tx_pn);
3299*4882a593Smuzhiyun break;
3300*4882a593Smuzhiyun case WLAN_CIPHER_SUITE_WEP104:
3301*4882a593Smuzhiyun key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
3302*4882a593Smuzhiyun /* fall through */
3303*4882a593Smuzhiyun case WLAN_CIPHER_SUITE_WEP40:
3304*4882a593Smuzhiyun key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
3305*4882a593Smuzhiyun memcpy(u.cmd.common.key + 3, key->key, key->keylen);
3306*4882a593Smuzhiyun break;
3307*4882a593Smuzhiyun case WLAN_CIPHER_SUITE_GCMP_256:
3308*4882a593Smuzhiyun key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
3309*4882a593Smuzhiyun /* fall through */
3310*4882a593Smuzhiyun case WLAN_CIPHER_SUITE_GCMP:
3311*4882a593Smuzhiyun key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
3312*4882a593Smuzhiyun memcpy(u.cmd.common.key, key->key, key->keylen);
3313*4882a593Smuzhiyun if (new_api)
3314*4882a593Smuzhiyun pn = atomic64_read(&key->tx_pn);
3315*4882a593Smuzhiyun break;
3316*4882a593Smuzhiyun default:
3317*4882a593Smuzhiyun key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
3318*4882a593Smuzhiyun memcpy(u.cmd.common.key, key->key, key->keylen);
3319*4882a593Smuzhiyun }
3320*4882a593Smuzhiyun
3321*4882a593Smuzhiyun if (mcast)
3322*4882a593Smuzhiyun key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3323*4882a593Smuzhiyun if (mfp)
3324*4882a593Smuzhiyun key_flags |= cpu_to_le16(STA_KEY_MFP);
3325*4882a593Smuzhiyun
3326*4882a593Smuzhiyun u.cmd.common.key_offset = key_offset;
3327*4882a593Smuzhiyun u.cmd.common.key_flags = key_flags;
3328*4882a593Smuzhiyun u.cmd.common.sta_id = sta_id;
3329*4882a593Smuzhiyun
3330*4882a593Smuzhiyun if (new_api) {
3331*4882a593Smuzhiyun u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
3332*4882a593Smuzhiyun size = sizeof(u.cmd);
3333*4882a593Smuzhiyun } else {
3334*4882a593Smuzhiyun size = sizeof(u.cmd_v1);
3335*4882a593Smuzhiyun }
3336*4882a593Smuzhiyun
3337*4882a593Smuzhiyun status = ADD_STA_SUCCESS;
3338*4882a593Smuzhiyun if (cmd_flags & CMD_ASYNC)
3339*4882a593Smuzhiyun ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
3340*4882a593Smuzhiyun &u.cmd);
3341*4882a593Smuzhiyun else
3342*4882a593Smuzhiyun ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
3343*4882a593Smuzhiyun &u.cmd, &status);
3344*4882a593Smuzhiyun
3345*4882a593Smuzhiyun switch (status) {
3346*4882a593Smuzhiyun case ADD_STA_SUCCESS:
3347*4882a593Smuzhiyun IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
3348*4882a593Smuzhiyun break;
3349*4882a593Smuzhiyun default:
3350*4882a593Smuzhiyun ret = -EIO;
3351*4882a593Smuzhiyun IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
3352*4882a593Smuzhiyun break;
3353*4882a593Smuzhiyun }
3354*4882a593Smuzhiyun
3355*4882a593Smuzhiyun return ret;
3356*4882a593Smuzhiyun }
3357*4882a593Smuzhiyun
iwl_mvm_send_sta_igtk(struct iwl_mvm * mvm,struct ieee80211_key_conf * keyconf,u8 sta_id,bool remove_key)3358*4882a593Smuzhiyun static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
3359*4882a593Smuzhiyun struct ieee80211_key_conf *keyconf,
3360*4882a593Smuzhiyun u8 sta_id, bool remove_key)
3361*4882a593Smuzhiyun {
3362*4882a593Smuzhiyun struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
3363*4882a593Smuzhiyun
3364*4882a593Smuzhiyun /* verify the key details match the required command's expectations */
3365*4882a593Smuzhiyun if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
3366*4882a593Smuzhiyun (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
3367*4882a593Smuzhiyun (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
3368*4882a593Smuzhiyun keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
3369*4882a593Smuzhiyun keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
3370*4882a593Smuzhiyun return -EINVAL;
3371*4882a593Smuzhiyun
3372*4882a593Smuzhiyun if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
3373*4882a593Smuzhiyun keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
3374*4882a593Smuzhiyun return -EINVAL;
3375*4882a593Smuzhiyun
3376*4882a593Smuzhiyun igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
3377*4882a593Smuzhiyun igtk_cmd.sta_id = cpu_to_le32(sta_id);
3378*4882a593Smuzhiyun
3379*4882a593Smuzhiyun if (remove_key) {
3380*4882a593Smuzhiyun /* This is a valid situation for IGTK */
3381*4882a593Smuzhiyun if (sta_id == IWL_MVM_INVALID_STA)
3382*4882a593Smuzhiyun return 0;
3383*4882a593Smuzhiyun
3384*4882a593Smuzhiyun igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
3385*4882a593Smuzhiyun } else {
3386*4882a593Smuzhiyun struct ieee80211_key_seq seq;
3387*4882a593Smuzhiyun const u8 *pn;
3388*4882a593Smuzhiyun
3389*4882a593Smuzhiyun switch (keyconf->cipher) {
3390*4882a593Smuzhiyun case WLAN_CIPHER_SUITE_AES_CMAC:
3391*4882a593Smuzhiyun igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
3392*4882a593Smuzhiyun break;
3393*4882a593Smuzhiyun case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3394*4882a593Smuzhiyun case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3395*4882a593Smuzhiyun igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3396*4882a593Smuzhiyun break;
3397*4882a593Smuzhiyun default:
3398*4882a593Smuzhiyun return -EINVAL;
3399*4882a593Smuzhiyun }
3400*4882a593Smuzhiyun
3401*4882a593Smuzhiyun memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3402*4882a593Smuzhiyun if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3403*4882a593Smuzhiyun igtk_cmd.ctrl_flags |=
3404*4882a593Smuzhiyun cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
3405*4882a593Smuzhiyun ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3406*4882a593Smuzhiyun pn = seq.aes_cmac.pn;
3407*4882a593Smuzhiyun igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3408*4882a593Smuzhiyun ((u64) pn[4] << 8) |
3409*4882a593Smuzhiyun ((u64) pn[3] << 16) |
3410*4882a593Smuzhiyun ((u64) pn[2] << 24) |
3411*4882a593Smuzhiyun ((u64) pn[1] << 32) |
3412*4882a593Smuzhiyun ((u64) pn[0] << 40));
3413*4882a593Smuzhiyun }
3414*4882a593Smuzhiyun
3415*4882a593Smuzhiyun IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
3416*4882a593Smuzhiyun remove_key ? "removing" : "installing",
3417*4882a593Smuzhiyun igtk_cmd.sta_id);
3418*4882a593Smuzhiyun
3419*4882a593Smuzhiyun if (!iwl_mvm_has_new_rx_api(mvm)) {
3420*4882a593Smuzhiyun struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3421*4882a593Smuzhiyun .ctrl_flags = igtk_cmd.ctrl_flags,
3422*4882a593Smuzhiyun .key_id = igtk_cmd.key_id,
3423*4882a593Smuzhiyun .sta_id = igtk_cmd.sta_id,
3424*4882a593Smuzhiyun .receive_seq_cnt = igtk_cmd.receive_seq_cnt
3425*4882a593Smuzhiyun };
3426*4882a593Smuzhiyun
3427*4882a593Smuzhiyun memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3428*4882a593Smuzhiyun ARRAY_SIZE(igtk_cmd_v1.igtk));
3429*4882a593Smuzhiyun return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3430*4882a593Smuzhiyun sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3431*4882a593Smuzhiyun }
3432*4882a593Smuzhiyun return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3433*4882a593Smuzhiyun sizeof(igtk_cmd), &igtk_cmd);
3434*4882a593Smuzhiyun }
3435*4882a593Smuzhiyun
3436*4882a593Smuzhiyun
iwl_mvm_get_mac_addr(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_sta * sta)3437*4882a593Smuzhiyun static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3438*4882a593Smuzhiyun struct ieee80211_vif *vif,
3439*4882a593Smuzhiyun struct ieee80211_sta *sta)
3440*4882a593Smuzhiyun {
3441*4882a593Smuzhiyun struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3442*4882a593Smuzhiyun
3443*4882a593Smuzhiyun if (sta)
3444*4882a593Smuzhiyun return sta->addr;
3445*4882a593Smuzhiyun
3446*4882a593Smuzhiyun if (vif->type == NL80211_IFTYPE_STATION &&
3447*4882a593Smuzhiyun mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3448*4882a593Smuzhiyun u8 sta_id = mvmvif->ap_sta_id;
3449*4882a593Smuzhiyun sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3450*4882a593Smuzhiyun lockdep_is_held(&mvm->mutex));
3451*4882a593Smuzhiyun return sta->addr;
3452*4882a593Smuzhiyun }
3453*4882a593Smuzhiyun
3454*4882a593Smuzhiyun
3455*4882a593Smuzhiyun return NULL;
3456*4882a593Smuzhiyun }
3457*4882a593Smuzhiyun
__iwl_mvm_set_sta_key(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * keyconf,u8 key_offset,bool mcast)3458*4882a593Smuzhiyun static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3459*4882a593Smuzhiyun struct ieee80211_vif *vif,
3460*4882a593Smuzhiyun struct ieee80211_sta *sta,
3461*4882a593Smuzhiyun struct ieee80211_key_conf *keyconf,
3462*4882a593Smuzhiyun u8 key_offset,
3463*4882a593Smuzhiyun bool mcast)
3464*4882a593Smuzhiyun {
3465*4882a593Smuzhiyun int ret;
3466*4882a593Smuzhiyun const u8 *addr;
3467*4882a593Smuzhiyun struct ieee80211_key_seq seq;
3468*4882a593Smuzhiyun u16 p1k[5];
3469*4882a593Smuzhiyun u32 sta_id;
3470*4882a593Smuzhiyun bool mfp = false;
3471*4882a593Smuzhiyun
3472*4882a593Smuzhiyun if (sta) {
3473*4882a593Smuzhiyun struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3474*4882a593Smuzhiyun
3475*4882a593Smuzhiyun sta_id = mvm_sta->sta_id;
3476*4882a593Smuzhiyun mfp = sta->mfp;
3477*4882a593Smuzhiyun } else if (vif->type == NL80211_IFTYPE_AP &&
3478*4882a593Smuzhiyun !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3479*4882a593Smuzhiyun struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3480*4882a593Smuzhiyun
3481*4882a593Smuzhiyun sta_id = mvmvif->mcast_sta.sta_id;
3482*4882a593Smuzhiyun } else {
3483*4882a593Smuzhiyun IWL_ERR(mvm, "Failed to find station id\n");
3484*4882a593Smuzhiyun return -EINVAL;
3485*4882a593Smuzhiyun }
3486*4882a593Smuzhiyun
3487*4882a593Smuzhiyun switch (keyconf->cipher) {
3488*4882a593Smuzhiyun case WLAN_CIPHER_SUITE_TKIP:
3489*4882a593Smuzhiyun addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3490*4882a593Smuzhiyun /* get phase 1 key from mac80211 */
3491*4882a593Smuzhiyun ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3492*4882a593Smuzhiyun ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
3493*4882a593Smuzhiyun ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3494*4882a593Smuzhiyun seq.tkip.iv32, p1k, 0, key_offset,
3495*4882a593Smuzhiyun mfp);
3496*4882a593Smuzhiyun break;
3497*4882a593Smuzhiyun case WLAN_CIPHER_SUITE_CCMP:
3498*4882a593Smuzhiyun case WLAN_CIPHER_SUITE_WEP40:
3499*4882a593Smuzhiyun case WLAN_CIPHER_SUITE_WEP104:
3500*4882a593Smuzhiyun case WLAN_CIPHER_SUITE_GCMP:
3501*4882a593Smuzhiyun case WLAN_CIPHER_SUITE_GCMP_256:
3502*4882a593Smuzhiyun ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3503*4882a593Smuzhiyun 0, NULL, 0, key_offset, mfp);
3504*4882a593Smuzhiyun break;
3505*4882a593Smuzhiyun default:
3506*4882a593Smuzhiyun ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3507*4882a593Smuzhiyun 0, NULL, 0, key_offset, mfp);
3508*4882a593Smuzhiyun }
3509*4882a593Smuzhiyun
3510*4882a593Smuzhiyun return ret;
3511*4882a593Smuzhiyun }
3512*4882a593Smuzhiyun
iwl_mvm_set_sta_key(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * keyconf,u8 key_offset)3513*4882a593Smuzhiyun int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3514*4882a593Smuzhiyun struct ieee80211_vif *vif,
3515*4882a593Smuzhiyun struct ieee80211_sta *sta,
3516*4882a593Smuzhiyun struct ieee80211_key_conf *keyconf,
3517*4882a593Smuzhiyun u8 key_offset)
3518*4882a593Smuzhiyun {
3519*4882a593Smuzhiyun bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3520*4882a593Smuzhiyun struct iwl_mvm_sta *mvm_sta;
3521*4882a593Smuzhiyun u8 sta_id = IWL_MVM_INVALID_STA;
3522*4882a593Smuzhiyun int ret;
3523*4882a593Smuzhiyun static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
3524*4882a593Smuzhiyun
3525*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
3526*4882a593Smuzhiyun
3527*4882a593Smuzhiyun if (vif->type != NL80211_IFTYPE_AP ||
3528*4882a593Smuzhiyun keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3529*4882a593Smuzhiyun /* Get the station id from the mvm local station table */
3530*4882a593Smuzhiyun mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3531*4882a593Smuzhiyun if (!mvm_sta) {
3532*4882a593Smuzhiyun IWL_ERR(mvm, "Failed to find station\n");
3533*4882a593Smuzhiyun return -EINVAL;
3534*4882a593Smuzhiyun }
3535*4882a593Smuzhiyun sta_id = mvm_sta->sta_id;
3536*4882a593Smuzhiyun
3537*4882a593Smuzhiyun /*
3538*4882a593Smuzhiyun * It is possible that the 'sta' parameter is NULL, and thus
3539*4882a593Smuzhiyun * there is a need to retrieve the sta from the local station
3540*4882a593Smuzhiyun * table.
3541*4882a593Smuzhiyun */
3542*4882a593Smuzhiyun if (!sta) {
3543*4882a593Smuzhiyun sta = rcu_dereference_protected(
3544*4882a593Smuzhiyun mvm->fw_id_to_mac_id[sta_id],
3545*4882a593Smuzhiyun lockdep_is_held(&mvm->mutex));
3546*4882a593Smuzhiyun if (IS_ERR_OR_NULL(sta)) {
3547*4882a593Smuzhiyun IWL_ERR(mvm, "Invalid station id\n");
3548*4882a593Smuzhiyun return -EINVAL;
3549*4882a593Smuzhiyun }
3550*4882a593Smuzhiyun }
3551*4882a593Smuzhiyun
3552*4882a593Smuzhiyun if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3553*4882a593Smuzhiyun return -EINVAL;
3554*4882a593Smuzhiyun } else {
3555*4882a593Smuzhiyun struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3556*4882a593Smuzhiyun
3557*4882a593Smuzhiyun sta_id = mvmvif->mcast_sta.sta_id;
3558*4882a593Smuzhiyun }
3559*4882a593Smuzhiyun
3560*4882a593Smuzhiyun if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3561*4882a593Smuzhiyun keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3562*4882a593Smuzhiyun keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3563*4882a593Smuzhiyun ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
3564*4882a593Smuzhiyun goto end;
3565*4882a593Smuzhiyun }
3566*4882a593Smuzhiyun
3567*4882a593Smuzhiyun /* If the key_offset is not pre-assigned, we need to find a
3568*4882a593Smuzhiyun * new offset to use. In normal cases, the offset is not
3569*4882a593Smuzhiyun * pre-assigned, but during HW_RESTART we want to reuse the
3570*4882a593Smuzhiyun * same indices, so we pass them when this function is called.
3571*4882a593Smuzhiyun *
3572*4882a593Smuzhiyun * In D3 entry, we need to hardcoded the indices (because the
3573*4882a593Smuzhiyun * firmware hardcodes the PTK offset to 0). In this case, we
3574*4882a593Smuzhiyun * need to make sure we don't overwrite the hw_key_idx in the
3575*4882a593Smuzhiyun * keyconf structure, because otherwise we cannot configure
3576*4882a593Smuzhiyun * the original ones back when resuming.
3577*4882a593Smuzhiyun */
3578*4882a593Smuzhiyun if (key_offset == STA_KEY_IDX_INVALID) {
3579*4882a593Smuzhiyun key_offset = iwl_mvm_set_fw_key_idx(mvm);
3580*4882a593Smuzhiyun if (key_offset == STA_KEY_IDX_INVALID)
3581*4882a593Smuzhiyun return -ENOSPC;
3582*4882a593Smuzhiyun keyconf->hw_key_idx = key_offset;
3583*4882a593Smuzhiyun }
3584*4882a593Smuzhiyun
3585*4882a593Smuzhiyun ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
3586*4882a593Smuzhiyun if (ret)
3587*4882a593Smuzhiyun goto end;
3588*4882a593Smuzhiyun
3589*4882a593Smuzhiyun /*
3590*4882a593Smuzhiyun * For WEP, the same key is used for multicast and unicast. Upload it
3591*4882a593Smuzhiyun * again, using the same key offset, and now pointing the other one
3592*4882a593Smuzhiyun * to the same key slot (offset).
3593*4882a593Smuzhiyun * If this fails, remove the original as well.
3594*4882a593Smuzhiyun */
3595*4882a593Smuzhiyun if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3596*4882a593Smuzhiyun keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3597*4882a593Smuzhiyun sta) {
3598*4882a593Smuzhiyun ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3599*4882a593Smuzhiyun key_offset, !mcast);
3600*4882a593Smuzhiyun if (ret) {
3601*4882a593Smuzhiyun __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3602*4882a593Smuzhiyun goto end;
3603*4882a593Smuzhiyun }
3604*4882a593Smuzhiyun }
3605*4882a593Smuzhiyun
3606*4882a593Smuzhiyun __set_bit(key_offset, mvm->fw_key_table);
3607*4882a593Smuzhiyun
3608*4882a593Smuzhiyun end:
3609*4882a593Smuzhiyun IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3610*4882a593Smuzhiyun keyconf->cipher, keyconf->keylen, keyconf->keyidx,
3611*4882a593Smuzhiyun sta ? sta->addr : zero_addr, ret);
3612*4882a593Smuzhiyun return ret;
3613*4882a593Smuzhiyun }
3614*4882a593Smuzhiyun
iwl_mvm_remove_sta_key(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * keyconf)3615*4882a593Smuzhiyun int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3616*4882a593Smuzhiyun struct ieee80211_vif *vif,
3617*4882a593Smuzhiyun struct ieee80211_sta *sta,
3618*4882a593Smuzhiyun struct ieee80211_key_conf *keyconf)
3619*4882a593Smuzhiyun {
3620*4882a593Smuzhiyun bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3621*4882a593Smuzhiyun struct iwl_mvm_sta *mvm_sta;
3622*4882a593Smuzhiyun u8 sta_id = IWL_MVM_INVALID_STA;
3623*4882a593Smuzhiyun int ret, i;
3624*4882a593Smuzhiyun
3625*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
3626*4882a593Smuzhiyun
3627*4882a593Smuzhiyun /* Get the station from the mvm local station table */
3628*4882a593Smuzhiyun mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3629*4882a593Smuzhiyun if (mvm_sta)
3630*4882a593Smuzhiyun sta_id = mvm_sta->sta_id;
3631*4882a593Smuzhiyun else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3632*4882a593Smuzhiyun sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
3633*4882a593Smuzhiyun
3634*4882a593Smuzhiyun
3635*4882a593Smuzhiyun IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3636*4882a593Smuzhiyun keyconf->keyidx, sta_id);
3637*4882a593Smuzhiyun
3638*4882a593Smuzhiyun if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3639*4882a593Smuzhiyun keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3640*4882a593Smuzhiyun keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3641*4882a593Smuzhiyun return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3642*4882a593Smuzhiyun
3643*4882a593Smuzhiyun if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3644*4882a593Smuzhiyun IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3645*4882a593Smuzhiyun keyconf->hw_key_idx);
3646*4882a593Smuzhiyun return -ENOENT;
3647*4882a593Smuzhiyun }
3648*4882a593Smuzhiyun
3649*4882a593Smuzhiyun /* track which key was deleted last */
3650*4882a593Smuzhiyun for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3651*4882a593Smuzhiyun if (mvm->fw_key_deleted[i] < U8_MAX)
3652*4882a593Smuzhiyun mvm->fw_key_deleted[i]++;
3653*4882a593Smuzhiyun }
3654*4882a593Smuzhiyun mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3655*4882a593Smuzhiyun
3656*4882a593Smuzhiyun if (sta && !mvm_sta) {
3657*4882a593Smuzhiyun IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3658*4882a593Smuzhiyun return 0;
3659*4882a593Smuzhiyun }
3660*4882a593Smuzhiyun
3661*4882a593Smuzhiyun ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3662*4882a593Smuzhiyun if (ret)
3663*4882a593Smuzhiyun return ret;
3664*4882a593Smuzhiyun
3665*4882a593Smuzhiyun /* delete WEP key twice to get rid of (now useless) offset */
3666*4882a593Smuzhiyun if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3667*4882a593Smuzhiyun keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3668*4882a593Smuzhiyun ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3669*4882a593Smuzhiyun
3670*4882a593Smuzhiyun return ret;
3671*4882a593Smuzhiyun }
3672*4882a593Smuzhiyun
iwl_mvm_update_tkip_key(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_key_conf * keyconf,struct ieee80211_sta * sta,u32 iv32,u16 * phase1key)3673*4882a593Smuzhiyun void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3674*4882a593Smuzhiyun struct ieee80211_vif *vif,
3675*4882a593Smuzhiyun struct ieee80211_key_conf *keyconf,
3676*4882a593Smuzhiyun struct ieee80211_sta *sta, u32 iv32,
3677*4882a593Smuzhiyun u16 *phase1key)
3678*4882a593Smuzhiyun {
3679*4882a593Smuzhiyun struct iwl_mvm_sta *mvm_sta;
3680*4882a593Smuzhiyun bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3681*4882a593Smuzhiyun bool mfp = sta ? sta->mfp : false;
3682*4882a593Smuzhiyun
3683*4882a593Smuzhiyun rcu_read_lock();
3684*4882a593Smuzhiyun
3685*4882a593Smuzhiyun mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3686*4882a593Smuzhiyun if (WARN_ON_ONCE(!mvm_sta))
3687*4882a593Smuzhiyun goto unlock;
3688*4882a593Smuzhiyun iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
3689*4882a593Smuzhiyun iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx,
3690*4882a593Smuzhiyun mfp);
3691*4882a593Smuzhiyun
3692*4882a593Smuzhiyun unlock:
3693*4882a593Smuzhiyun rcu_read_unlock();
3694*4882a593Smuzhiyun }
3695*4882a593Smuzhiyun
iwl_mvm_sta_modify_ps_wake(struct iwl_mvm * mvm,struct ieee80211_sta * sta)3696*4882a593Smuzhiyun void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3697*4882a593Smuzhiyun struct ieee80211_sta *sta)
3698*4882a593Smuzhiyun {
3699*4882a593Smuzhiyun struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3700*4882a593Smuzhiyun struct iwl_mvm_add_sta_cmd cmd = {
3701*4882a593Smuzhiyun .add_modify = STA_MODE_MODIFY,
3702*4882a593Smuzhiyun .sta_id = mvmsta->sta_id,
3703*4882a593Smuzhiyun .station_flags_msk = cpu_to_le32(STA_FLG_PS),
3704*4882a593Smuzhiyun .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3705*4882a593Smuzhiyun };
3706*4882a593Smuzhiyun int ret;
3707*4882a593Smuzhiyun
3708*4882a593Smuzhiyun ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3709*4882a593Smuzhiyun iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3710*4882a593Smuzhiyun if (ret)
3711*4882a593Smuzhiyun IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3712*4882a593Smuzhiyun }
3713*4882a593Smuzhiyun
iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm * mvm,struct ieee80211_sta * sta,enum ieee80211_frame_release_type reason,u16 cnt,u16 tids,bool more_data,bool single_sta_queue)3714*4882a593Smuzhiyun void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3715*4882a593Smuzhiyun struct ieee80211_sta *sta,
3716*4882a593Smuzhiyun enum ieee80211_frame_release_type reason,
3717*4882a593Smuzhiyun u16 cnt, u16 tids, bool more_data,
3718*4882a593Smuzhiyun bool single_sta_queue)
3719*4882a593Smuzhiyun {
3720*4882a593Smuzhiyun struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3721*4882a593Smuzhiyun struct iwl_mvm_add_sta_cmd cmd = {
3722*4882a593Smuzhiyun .add_modify = STA_MODE_MODIFY,
3723*4882a593Smuzhiyun .sta_id = mvmsta->sta_id,
3724*4882a593Smuzhiyun .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3725*4882a593Smuzhiyun .sleep_tx_count = cpu_to_le16(cnt),
3726*4882a593Smuzhiyun .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3727*4882a593Smuzhiyun };
3728*4882a593Smuzhiyun int tid, ret;
3729*4882a593Smuzhiyun unsigned long _tids = tids;
3730*4882a593Smuzhiyun
3731*4882a593Smuzhiyun /* convert TIDs to ACs - we don't support TSPEC so that's OK
3732*4882a593Smuzhiyun * Note that this field is reserved and unused by firmware not
3733*4882a593Smuzhiyun * supporting GO uAPSD, so it's safe to always do this.
3734*4882a593Smuzhiyun */
3735*4882a593Smuzhiyun for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3736*4882a593Smuzhiyun cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3737*4882a593Smuzhiyun
3738*4882a593Smuzhiyun /* If we're releasing frames from aggregation or dqa queues then check
3739*4882a593Smuzhiyun * if all the queues that we're releasing frames from, combined, have:
3740*4882a593Smuzhiyun * - more frames than the service period, in which case more_data
3741*4882a593Smuzhiyun * needs to be set
3742*4882a593Smuzhiyun * - fewer than 'cnt' frames, in which case we need to adjust the
3743*4882a593Smuzhiyun * firmware command (but do that unconditionally)
3744*4882a593Smuzhiyun */
3745*4882a593Smuzhiyun if (single_sta_queue) {
3746*4882a593Smuzhiyun int remaining = cnt;
3747*4882a593Smuzhiyun int sleep_tx_count;
3748*4882a593Smuzhiyun
3749*4882a593Smuzhiyun spin_lock_bh(&mvmsta->lock);
3750*4882a593Smuzhiyun for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3751*4882a593Smuzhiyun struct iwl_mvm_tid_data *tid_data;
3752*4882a593Smuzhiyun u16 n_queued;
3753*4882a593Smuzhiyun
3754*4882a593Smuzhiyun tid_data = &mvmsta->tid_data[tid];
3755*4882a593Smuzhiyun
3756*4882a593Smuzhiyun n_queued = iwl_mvm_tid_queued(mvm, tid_data);
3757*4882a593Smuzhiyun if (n_queued > remaining) {
3758*4882a593Smuzhiyun more_data = true;
3759*4882a593Smuzhiyun remaining = 0;
3760*4882a593Smuzhiyun break;
3761*4882a593Smuzhiyun }
3762*4882a593Smuzhiyun remaining -= n_queued;
3763*4882a593Smuzhiyun }
3764*4882a593Smuzhiyun sleep_tx_count = cnt - remaining;
3765*4882a593Smuzhiyun if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3766*4882a593Smuzhiyun mvmsta->sleep_tx_count = sleep_tx_count;
3767*4882a593Smuzhiyun spin_unlock_bh(&mvmsta->lock);
3768*4882a593Smuzhiyun
3769*4882a593Smuzhiyun cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
3770*4882a593Smuzhiyun if (WARN_ON(cnt - remaining == 0)) {
3771*4882a593Smuzhiyun ieee80211_sta_eosp(sta);
3772*4882a593Smuzhiyun return;
3773*4882a593Smuzhiyun }
3774*4882a593Smuzhiyun }
3775*4882a593Smuzhiyun
3776*4882a593Smuzhiyun /* Note: this is ignored by firmware not supporting GO uAPSD */
3777*4882a593Smuzhiyun if (more_data)
3778*4882a593Smuzhiyun cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
3779*4882a593Smuzhiyun
3780*4882a593Smuzhiyun if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3781*4882a593Smuzhiyun mvmsta->next_status_eosp = true;
3782*4882a593Smuzhiyun cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
3783*4882a593Smuzhiyun } else {
3784*4882a593Smuzhiyun cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
3785*4882a593Smuzhiyun }
3786*4882a593Smuzhiyun
3787*4882a593Smuzhiyun /* block the Tx queues until the FW updated the sleep Tx count */
3788*4882a593Smuzhiyun iwl_trans_block_txq_ptrs(mvm->trans, true);
3789*4882a593Smuzhiyun
3790*4882a593Smuzhiyun ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3791*4882a593Smuzhiyun CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
3792*4882a593Smuzhiyun iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3793*4882a593Smuzhiyun if (ret)
3794*4882a593Smuzhiyun IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3795*4882a593Smuzhiyun }
3796*4882a593Smuzhiyun
iwl_mvm_rx_eosp_notif(struct iwl_mvm * mvm,struct iwl_rx_cmd_buffer * rxb)3797*4882a593Smuzhiyun void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3798*4882a593Smuzhiyun struct iwl_rx_cmd_buffer *rxb)
3799*4882a593Smuzhiyun {
3800*4882a593Smuzhiyun struct iwl_rx_packet *pkt = rxb_addr(rxb);
3801*4882a593Smuzhiyun struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3802*4882a593Smuzhiyun struct ieee80211_sta *sta;
3803*4882a593Smuzhiyun u32 sta_id = le32_to_cpu(notif->sta_id);
3804*4882a593Smuzhiyun
3805*4882a593Smuzhiyun if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations))
3806*4882a593Smuzhiyun return;
3807*4882a593Smuzhiyun
3808*4882a593Smuzhiyun rcu_read_lock();
3809*4882a593Smuzhiyun sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3810*4882a593Smuzhiyun if (!IS_ERR_OR_NULL(sta))
3811*4882a593Smuzhiyun ieee80211_sta_eosp(sta);
3812*4882a593Smuzhiyun rcu_read_unlock();
3813*4882a593Smuzhiyun }
3814*4882a593Smuzhiyun
iwl_mvm_sta_modify_disable_tx(struct iwl_mvm * mvm,struct iwl_mvm_sta * mvmsta,bool disable)3815*4882a593Smuzhiyun void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3816*4882a593Smuzhiyun struct iwl_mvm_sta *mvmsta, bool disable)
3817*4882a593Smuzhiyun {
3818*4882a593Smuzhiyun struct iwl_mvm_add_sta_cmd cmd = {
3819*4882a593Smuzhiyun .add_modify = STA_MODE_MODIFY,
3820*4882a593Smuzhiyun .sta_id = mvmsta->sta_id,
3821*4882a593Smuzhiyun .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3822*4882a593Smuzhiyun .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3823*4882a593Smuzhiyun .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3824*4882a593Smuzhiyun };
3825*4882a593Smuzhiyun int ret;
3826*4882a593Smuzhiyun
3827*4882a593Smuzhiyun ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3828*4882a593Smuzhiyun iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3829*4882a593Smuzhiyun if (ret)
3830*4882a593Smuzhiyun IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3831*4882a593Smuzhiyun }
3832*4882a593Smuzhiyun
iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm * mvm,struct ieee80211_sta * sta,bool disable)3833*4882a593Smuzhiyun void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3834*4882a593Smuzhiyun struct ieee80211_sta *sta,
3835*4882a593Smuzhiyun bool disable)
3836*4882a593Smuzhiyun {
3837*4882a593Smuzhiyun struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3838*4882a593Smuzhiyun
3839*4882a593Smuzhiyun spin_lock_bh(&mvm_sta->lock);
3840*4882a593Smuzhiyun
3841*4882a593Smuzhiyun if (mvm_sta->disable_tx == disable) {
3842*4882a593Smuzhiyun spin_unlock_bh(&mvm_sta->lock);
3843*4882a593Smuzhiyun return;
3844*4882a593Smuzhiyun }
3845*4882a593Smuzhiyun
3846*4882a593Smuzhiyun mvm_sta->disable_tx = disable;
3847*4882a593Smuzhiyun
3848*4882a593Smuzhiyun /* Tell mac80211 to start/stop queuing tx for this station */
3849*4882a593Smuzhiyun ieee80211_sta_block_awake(mvm->hw, sta, disable);
3850*4882a593Smuzhiyun
3851*4882a593Smuzhiyun iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
3852*4882a593Smuzhiyun
3853*4882a593Smuzhiyun spin_unlock_bh(&mvm_sta->lock);
3854*4882a593Smuzhiyun }
3855*4882a593Smuzhiyun
iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm * mvm,struct iwl_mvm_vif * mvmvif,struct iwl_mvm_int_sta * sta,bool disable)3856*4882a593Smuzhiyun static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
3857*4882a593Smuzhiyun struct iwl_mvm_vif *mvmvif,
3858*4882a593Smuzhiyun struct iwl_mvm_int_sta *sta,
3859*4882a593Smuzhiyun bool disable)
3860*4882a593Smuzhiyun {
3861*4882a593Smuzhiyun u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
3862*4882a593Smuzhiyun struct iwl_mvm_add_sta_cmd cmd = {
3863*4882a593Smuzhiyun .add_modify = STA_MODE_MODIFY,
3864*4882a593Smuzhiyun .sta_id = sta->sta_id,
3865*4882a593Smuzhiyun .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3866*4882a593Smuzhiyun .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3867*4882a593Smuzhiyun .mac_id_n_color = cpu_to_le32(id),
3868*4882a593Smuzhiyun };
3869*4882a593Smuzhiyun int ret;
3870*4882a593Smuzhiyun
3871*4882a593Smuzhiyun ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0,
3872*4882a593Smuzhiyun iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3873*4882a593Smuzhiyun if (ret)
3874*4882a593Smuzhiyun IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3875*4882a593Smuzhiyun }
3876*4882a593Smuzhiyun
iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm * mvm,struct iwl_mvm_vif * mvmvif,bool disable)3877*4882a593Smuzhiyun void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
3878*4882a593Smuzhiyun struct iwl_mvm_vif *mvmvif,
3879*4882a593Smuzhiyun bool disable)
3880*4882a593Smuzhiyun {
3881*4882a593Smuzhiyun struct ieee80211_sta *sta;
3882*4882a593Smuzhiyun struct iwl_mvm_sta *mvm_sta;
3883*4882a593Smuzhiyun int i;
3884*4882a593Smuzhiyun
3885*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
3886*4882a593Smuzhiyun
3887*4882a593Smuzhiyun /* Block/unblock all the stations of the given mvmvif */
3888*4882a593Smuzhiyun for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
3889*4882a593Smuzhiyun sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3890*4882a593Smuzhiyun lockdep_is_held(&mvm->mutex));
3891*4882a593Smuzhiyun if (IS_ERR_OR_NULL(sta))
3892*4882a593Smuzhiyun continue;
3893*4882a593Smuzhiyun
3894*4882a593Smuzhiyun mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3895*4882a593Smuzhiyun if (mvm_sta->mac_id_n_color !=
3896*4882a593Smuzhiyun FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
3897*4882a593Smuzhiyun continue;
3898*4882a593Smuzhiyun
3899*4882a593Smuzhiyun iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
3900*4882a593Smuzhiyun }
3901*4882a593Smuzhiyun
3902*4882a593Smuzhiyun if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
3903*4882a593Smuzhiyun return;
3904*4882a593Smuzhiyun
3905*4882a593Smuzhiyun /* Need to block/unblock also multicast station */
3906*4882a593Smuzhiyun if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
3907*4882a593Smuzhiyun iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3908*4882a593Smuzhiyun &mvmvif->mcast_sta, disable);
3909*4882a593Smuzhiyun
3910*4882a593Smuzhiyun /*
3911*4882a593Smuzhiyun * Only unblock the broadcast station (FW blocks it for immediate
3912*4882a593Smuzhiyun * quiet, not the driver)
3913*4882a593Smuzhiyun */
3914*4882a593Smuzhiyun if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
3915*4882a593Smuzhiyun iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3916*4882a593Smuzhiyun &mvmvif->bcast_sta, disable);
3917*4882a593Smuzhiyun }
3918*4882a593Smuzhiyun
iwl_mvm_csa_client_absent(struct iwl_mvm * mvm,struct ieee80211_vif * vif)3919*4882a593Smuzhiyun void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
3920*4882a593Smuzhiyun {
3921*4882a593Smuzhiyun struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3922*4882a593Smuzhiyun struct iwl_mvm_sta *mvmsta;
3923*4882a593Smuzhiyun
3924*4882a593Smuzhiyun rcu_read_lock();
3925*4882a593Smuzhiyun
3926*4882a593Smuzhiyun mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
3927*4882a593Smuzhiyun
3928*4882a593Smuzhiyun if (!WARN_ON(!mvmsta))
3929*4882a593Smuzhiyun iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
3930*4882a593Smuzhiyun
3931*4882a593Smuzhiyun rcu_read_unlock();
3932*4882a593Smuzhiyun }
3933*4882a593Smuzhiyun
iwl_mvm_tid_queued(struct iwl_mvm * mvm,struct iwl_mvm_tid_data * tid_data)3934*4882a593Smuzhiyun u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
3935*4882a593Smuzhiyun {
3936*4882a593Smuzhiyun u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3937*4882a593Smuzhiyun
3938*4882a593Smuzhiyun /*
3939*4882a593Smuzhiyun * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
3940*4882a593Smuzhiyun * to align the wrap around of ssn so we compare relevant values.
3941*4882a593Smuzhiyun */
3942*4882a593Smuzhiyun if (mvm->trans->trans_cfg->gen2)
3943*4882a593Smuzhiyun sn &= 0xff;
3944*4882a593Smuzhiyun
3945*4882a593Smuzhiyun return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
3946*4882a593Smuzhiyun }
3947*4882a593Smuzhiyun
iwl_mvm_add_pasn_sta(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct iwl_mvm_int_sta * sta,u8 * addr,u32 cipher,u8 * key,u32 key_len)3948*4882a593Smuzhiyun int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3949*4882a593Smuzhiyun struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
3950*4882a593Smuzhiyun u8 *key, u32 key_len)
3951*4882a593Smuzhiyun {
3952*4882a593Smuzhiyun int ret;
3953*4882a593Smuzhiyun u16 queue;
3954*4882a593Smuzhiyun struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3955*4882a593Smuzhiyun struct ieee80211_key_conf *keyconf;
3956*4882a593Smuzhiyun
3957*4882a593Smuzhiyun ret = iwl_mvm_allocate_int_sta(mvm, sta, 0,
3958*4882a593Smuzhiyun NL80211_IFTYPE_UNSPECIFIED,
3959*4882a593Smuzhiyun IWL_STA_LINK);
3960*4882a593Smuzhiyun if (ret)
3961*4882a593Smuzhiyun return ret;
3962*4882a593Smuzhiyun
3963*4882a593Smuzhiyun ret = iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
3964*4882a593Smuzhiyun addr, sta, &queue,
3965*4882a593Smuzhiyun IWL_MVM_TX_FIFO_BE);
3966*4882a593Smuzhiyun if (ret)
3967*4882a593Smuzhiyun goto out;
3968*4882a593Smuzhiyun
3969*4882a593Smuzhiyun keyconf = kzalloc(sizeof(*keyconf) + key_len, GFP_KERNEL);
3970*4882a593Smuzhiyun if (!keyconf) {
3971*4882a593Smuzhiyun ret = -ENOBUFS;
3972*4882a593Smuzhiyun goto out;
3973*4882a593Smuzhiyun }
3974*4882a593Smuzhiyun
3975*4882a593Smuzhiyun keyconf->cipher = cipher;
3976*4882a593Smuzhiyun memcpy(keyconf->key, key, key_len);
3977*4882a593Smuzhiyun keyconf->keylen = key_len;
3978*4882a593Smuzhiyun
3979*4882a593Smuzhiyun ret = iwl_mvm_send_sta_key(mvm, sta->sta_id, keyconf, false,
3980*4882a593Smuzhiyun 0, NULL, 0, 0, true);
3981*4882a593Smuzhiyun kfree(keyconf);
3982*4882a593Smuzhiyun return 0;
3983*4882a593Smuzhiyun out:
3984*4882a593Smuzhiyun iwl_mvm_dealloc_int_sta(mvm, sta);
3985*4882a593Smuzhiyun return ret;
3986*4882a593Smuzhiyun }
3987