1*4882a593Smuzhiyun /******************************************************************************
2*4882a593Smuzhiyun *
3*4882a593Smuzhiyun * This file is provided under a dual BSD/GPLv2 license. When using or
4*4882a593Smuzhiyun * redistributing this file, you may do so under either license.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * GPL LICENSE SUMMARY
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9*4882a593Smuzhiyun * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10*4882a593Smuzhiyun * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11*4882a593Smuzhiyun * Copyright(c) 2018 - 2019 Intel Corporation
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify
14*4882a593Smuzhiyun * it under the terms of version 2 of the GNU General Public License as
15*4882a593Smuzhiyun * published by the Free Software Foundation.
16*4882a593Smuzhiyun *
17*4882a593Smuzhiyun * This program is distributed in the hope that it will be useful, but
18*4882a593Smuzhiyun * WITHOUT ANY WARRANTY; without even the implied warranty of
19*4882a593Smuzhiyun * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20*4882a593Smuzhiyun * General Public License for more details.
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun * The full GNU General Public License is included in this distribution
23*4882a593Smuzhiyun * in the file called COPYING.
24*4882a593Smuzhiyun *
25*4882a593Smuzhiyun * Contact Information:
26*4882a593Smuzhiyun * Intel Linux Wireless <linuxwifi@intel.com>
27*4882a593Smuzhiyun * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28*4882a593Smuzhiyun *
29*4882a593Smuzhiyun * BSD LICENSE
30*4882a593Smuzhiyun *
31*4882a593Smuzhiyun * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
32*4882a593Smuzhiyun * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33*4882a593Smuzhiyun * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34*4882a593Smuzhiyun * Copyright(c) 2018 - 2019 Intel Corporation
35*4882a593Smuzhiyun * All rights reserved.
36*4882a593Smuzhiyun *
37*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or without
38*4882a593Smuzhiyun * modification, are permitted provided that the following conditions
39*4882a593Smuzhiyun * are met:
40*4882a593Smuzhiyun *
41*4882a593Smuzhiyun * * Redistributions of source code must retain the above copyright
42*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer.
43*4882a593Smuzhiyun * * Redistributions in binary form must reproduce the above copyright
44*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer in
45*4882a593Smuzhiyun * the documentation and/or other materials provided with the
46*4882a593Smuzhiyun * distribution.
47*4882a593Smuzhiyun * * Neither the name Intel Corporation nor the names of its
48*4882a593Smuzhiyun * contributors may be used to endorse or promote products derived
49*4882a593Smuzhiyun * from this software without specific prior written permission.
50*4882a593Smuzhiyun *
51*4882a593Smuzhiyun * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52*4882a593Smuzhiyun * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53*4882a593Smuzhiyun * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54*4882a593Smuzhiyun * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55*4882a593Smuzhiyun * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56*4882a593Smuzhiyun * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57*4882a593Smuzhiyun * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58*4882a593Smuzhiyun * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59*4882a593Smuzhiyun * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60*4882a593Smuzhiyun * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61*4882a593Smuzhiyun * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62*4882a593Smuzhiyun *
63*4882a593Smuzhiyun *****************************************************************************/
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun #include <linux/etherdevice.h>
66*4882a593Smuzhiyun #include <net/mac80211.h>
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun #include "mvm.h"
69*4882a593Smuzhiyun #include "fw/api/scan.h"
70*4882a593Smuzhiyun #include "iwl-io.h"
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun #define IWL_DENSE_EBS_SCAN_RATIO 5
73*4882a593Smuzhiyun #define IWL_SPARSE_EBS_SCAN_RATIO 1
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun #define IWL_SCAN_DWELL_ACTIVE 10
76*4882a593Smuzhiyun #define IWL_SCAN_DWELL_PASSIVE 110
77*4882a593Smuzhiyun #define IWL_SCAN_DWELL_FRAGMENTED 44
78*4882a593Smuzhiyun #define IWL_SCAN_DWELL_EXTENDED 90
79*4882a593Smuzhiyun #define IWL_SCAN_NUM_OF_FRAGS 3
80*4882a593Smuzhiyun #define IWL_SCAN_LAST_2_4_CHN 14
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun /* adaptive dwell max budget time [TU] for full scan */
83*4882a593Smuzhiyun #define IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
84*4882a593Smuzhiyun /* adaptive dwell max budget time [TU] for directed scan */
85*4882a593Smuzhiyun #define IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
86*4882a593Smuzhiyun /* adaptive dwell default high band APs number */
87*4882a593Smuzhiyun #define IWL_SCAN_ADWELL_DEFAULT_HB_N_APS 8
88*4882a593Smuzhiyun /* adaptive dwell default low band APs number */
89*4882a593Smuzhiyun #define IWL_SCAN_ADWELL_DEFAULT_LB_N_APS 2
90*4882a593Smuzhiyun /* adaptive dwell default APs number in social channels (1, 6, 11) */
91*4882a593Smuzhiyun #define IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
92*4882a593Smuzhiyun /* number of scan channels */
93*4882a593Smuzhiyun #define IWL_SCAN_NUM_CHANNELS 112
94*4882a593Smuzhiyun /* adaptive dwell number of APs override mask for p2p friendly GO */
95*4882a593Smuzhiyun #define IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY_BIT BIT(20)
96*4882a593Smuzhiyun /* adaptive dwell number of APs override mask for social channels */
97*4882a593Smuzhiyun #define IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS_BIT BIT(21)
98*4882a593Smuzhiyun /* adaptive dwell number of APs override for p2p friendly GO channels */
99*4882a593Smuzhiyun #define IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY 10
100*4882a593Smuzhiyun /* adaptive dwell number of APs override for social channels */
101*4882a593Smuzhiyun #define IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS 2
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun struct iwl_mvm_scan_timing_params {
104*4882a593Smuzhiyun u32 suspend_time;
105*4882a593Smuzhiyun u32 max_out_time;
106*4882a593Smuzhiyun };
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun static struct iwl_mvm_scan_timing_params scan_timing[] = {
109*4882a593Smuzhiyun [IWL_SCAN_TYPE_UNASSOC] = {
110*4882a593Smuzhiyun .suspend_time = 0,
111*4882a593Smuzhiyun .max_out_time = 0,
112*4882a593Smuzhiyun },
113*4882a593Smuzhiyun [IWL_SCAN_TYPE_WILD] = {
114*4882a593Smuzhiyun .suspend_time = 30,
115*4882a593Smuzhiyun .max_out_time = 120,
116*4882a593Smuzhiyun },
117*4882a593Smuzhiyun [IWL_SCAN_TYPE_MILD] = {
118*4882a593Smuzhiyun .suspend_time = 120,
119*4882a593Smuzhiyun .max_out_time = 120,
120*4882a593Smuzhiyun },
121*4882a593Smuzhiyun [IWL_SCAN_TYPE_FRAGMENTED] = {
122*4882a593Smuzhiyun .suspend_time = 95,
123*4882a593Smuzhiyun .max_out_time = 44,
124*4882a593Smuzhiyun },
125*4882a593Smuzhiyun [IWL_SCAN_TYPE_FAST_BALANCE] = {
126*4882a593Smuzhiyun .suspend_time = 30,
127*4882a593Smuzhiyun .max_out_time = 37,
128*4882a593Smuzhiyun },
129*4882a593Smuzhiyun };
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun struct iwl_mvm_scan_params {
132*4882a593Smuzhiyun /* For CDB this is low band scan type, for non-CDB - type. */
133*4882a593Smuzhiyun enum iwl_mvm_scan_type type;
134*4882a593Smuzhiyun enum iwl_mvm_scan_type hb_type;
135*4882a593Smuzhiyun u32 n_channels;
136*4882a593Smuzhiyun u16 delay;
137*4882a593Smuzhiyun int n_ssids;
138*4882a593Smuzhiyun struct cfg80211_ssid *ssids;
139*4882a593Smuzhiyun struct ieee80211_channel **channels;
140*4882a593Smuzhiyun u32 flags;
141*4882a593Smuzhiyun u8 *mac_addr;
142*4882a593Smuzhiyun u8 *mac_addr_mask;
143*4882a593Smuzhiyun bool no_cck;
144*4882a593Smuzhiyun bool pass_all;
145*4882a593Smuzhiyun int n_match_sets;
146*4882a593Smuzhiyun struct iwl_scan_probe_req preq;
147*4882a593Smuzhiyun struct cfg80211_match_set *match_sets;
148*4882a593Smuzhiyun int n_scan_plans;
149*4882a593Smuzhiyun struct cfg80211_sched_scan_plan *scan_plans;
150*4882a593Smuzhiyun bool iter_notif;
151*4882a593Smuzhiyun };
152*4882a593Smuzhiyun
iwl_mvm_get_scan_req_umac_data(struct iwl_mvm * mvm)153*4882a593Smuzhiyun static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm))
158*4882a593Smuzhiyun return (void *)&cmd->v8.data;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun if (iwl_mvm_is_adaptive_dwell_supported(mvm))
161*4882a593Smuzhiyun return (void *)&cmd->v7.data;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun if (iwl_mvm_cdb_scan_api(mvm))
164*4882a593Smuzhiyun return (void *)&cmd->v6.data;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun return (void *)&cmd->v1.data;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun static inline struct iwl_scan_umac_chan_param *
iwl_mvm_get_scan_req_umac_channel(struct iwl_mvm * mvm)170*4882a593Smuzhiyun iwl_mvm_get_scan_req_umac_channel(struct iwl_mvm *mvm)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm))
175*4882a593Smuzhiyun return &cmd->v8.channel;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun if (iwl_mvm_is_adaptive_dwell_supported(mvm))
178*4882a593Smuzhiyun return &cmd->v7.channel;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun if (iwl_mvm_cdb_scan_api(mvm))
181*4882a593Smuzhiyun return &cmd->v6.channel;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun return &cmd->v1.channel;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
iwl_mvm_scan_rx_ant(struct iwl_mvm * mvm)186*4882a593Smuzhiyun static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun if (mvm->scan_rx_ant != ANT_NONE)
189*4882a593Smuzhiyun return mvm->scan_rx_ant;
190*4882a593Smuzhiyun return iwl_mvm_get_valid_rx_ant(mvm);
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
iwl_mvm_scan_rx_chain(struct iwl_mvm * mvm)193*4882a593Smuzhiyun static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun u16 rx_chain;
196*4882a593Smuzhiyun u8 rx_ant;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun rx_ant = iwl_mvm_scan_rx_ant(mvm);
199*4882a593Smuzhiyun rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS;
200*4882a593Smuzhiyun rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
201*4882a593Smuzhiyun rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS;
202*4882a593Smuzhiyun rx_chain |= 0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS;
203*4882a593Smuzhiyun return cpu_to_le16(rx_chain);
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun static inline __le32
iwl_mvm_scan_rate_n_flags(struct iwl_mvm * mvm,enum nl80211_band band,bool no_cck)207*4882a593Smuzhiyun iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band,
208*4882a593Smuzhiyun bool no_cck)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun u32 tx_ant;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun iwl_mvm_toggle_tx_ant(mvm, &mvm->scan_last_antenna_idx);
213*4882a593Smuzhiyun tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun if (band == NL80211_BAND_2GHZ && !no_cck)
216*4882a593Smuzhiyun return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK |
217*4882a593Smuzhiyun tx_ant);
218*4882a593Smuzhiyun else
219*4882a593Smuzhiyun return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant);
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
iwl_mvm_scan_condition_iterator(void * data,u8 * mac,struct ieee80211_vif * vif)222*4882a593Smuzhiyun static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
223*4882a593Smuzhiyun struct ieee80211_vif *vif)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
226*4882a593Smuzhiyun int *global_cnt = data;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt &&
229*4882a593Smuzhiyun mvmvif->phy_ctxt->id < NUM_PHY_CTX)
230*4882a593Smuzhiyun *global_cnt += 1;
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun
iwl_mvm_get_traffic_load(struct iwl_mvm * mvm)233*4882a593Smuzhiyun static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun return mvm->tcm.result.global_load;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun static enum iwl_mvm_traffic_load
iwl_mvm_get_traffic_load_band(struct iwl_mvm * mvm,enum nl80211_band band)239*4882a593Smuzhiyun iwl_mvm_get_traffic_load_band(struct iwl_mvm *mvm, enum nl80211_band band)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun return mvm->tcm.result.band_load[band];
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun struct iwl_is_dcm_with_go_iterator_data {
245*4882a593Smuzhiyun struct ieee80211_vif *current_vif;
246*4882a593Smuzhiyun bool is_dcm_with_p2p_go;
247*4882a593Smuzhiyun };
248*4882a593Smuzhiyun
iwl_mvm_is_dcm_with_go_iterator(void * _data,u8 * mac,struct ieee80211_vif * vif)249*4882a593Smuzhiyun static void iwl_mvm_is_dcm_with_go_iterator(void *_data, u8 *mac,
250*4882a593Smuzhiyun struct ieee80211_vif *vif)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun struct iwl_is_dcm_with_go_iterator_data *data = _data;
253*4882a593Smuzhiyun struct iwl_mvm_vif *other_mvmvif = iwl_mvm_vif_from_mac80211(vif);
254*4882a593Smuzhiyun struct iwl_mvm_vif *curr_mvmvif =
255*4882a593Smuzhiyun iwl_mvm_vif_from_mac80211(data->current_vif);
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun /* exclude the given vif */
258*4882a593Smuzhiyun if (vif == data->current_vif)
259*4882a593Smuzhiyun return;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
262*4882a593Smuzhiyun other_mvmvif->phy_ctxt && curr_mvmvif->phy_ctxt &&
263*4882a593Smuzhiyun other_mvmvif->phy_ctxt->id != curr_mvmvif->phy_ctxt->id)
264*4882a593Smuzhiyun data->is_dcm_with_p2p_go = true;
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun static enum
_iwl_mvm_get_scan_type(struct iwl_mvm * mvm,struct ieee80211_vif * vif,enum iwl_mvm_traffic_load load,bool low_latency)268*4882a593Smuzhiyun iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
269*4882a593Smuzhiyun struct ieee80211_vif *vif,
270*4882a593Smuzhiyun enum iwl_mvm_traffic_load load,
271*4882a593Smuzhiyun bool low_latency)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun int global_cnt = 0;
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun ieee80211_iterate_active_interfaces_atomic(mvm->hw,
276*4882a593Smuzhiyun IEEE80211_IFACE_ITER_NORMAL,
277*4882a593Smuzhiyun iwl_mvm_scan_condition_iterator,
278*4882a593Smuzhiyun &global_cnt);
279*4882a593Smuzhiyun if (!global_cnt)
280*4882a593Smuzhiyun return IWL_SCAN_TYPE_UNASSOC;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun if (fw_has_api(&mvm->fw->ucode_capa,
283*4882a593Smuzhiyun IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
284*4882a593Smuzhiyun if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) &&
285*4882a593Smuzhiyun (!vif || vif->type != NL80211_IFTYPE_P2P_DEVICE))
286*4882a593Smuzhiyun return IWL_SCAN_TYPE_FRAGMENTED;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun /* in case of DCM with GO where BSS DTIM interval < 220msec
289*4882a593Smuzhiyun * set all scan requests as fast-balance scan
290*4882a593Smuzhiyun * */
291*4882a593Smuzhiyun if (vif && vif->type == NL80211_IFTYPE_STATION &&
292*4882a593Smuzhiyun vif->bss_conf.dtim_period < 220) {
293*4882a593Smuzhiyun struct iwl_is_dcm_with_go_iterator_data data = {
294*4882a593Smuzhiyun .current_vif = vif,
295*4882a593Smuzhiyun .is_dcm_with_p2p_go = false,
296*4882a593Smuzhiyun };
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun ieee80211_iterate_active_interfaces_atomic(mvm->hw,
299*4882a593Smuzhiyun IEEE80211_IFACE_ITER_NORMAL,
300*4882a593Smuzhiyun iwl_mvm_is_dcm_with_go_iterator,
301*4882a593Smuzhiyun &data);
302*4882a593Smuzhiyun if (data.is_dcm_with_p2p_go)
303*4882a593Smuzhiyun return IWL_SCAN_TYPE_FAST_BALANCE;
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency)
308*4882a593Smuzhiyun return IWL_SCAN_TYPE_MILD;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun return IWL_SCAN_TYPE_WILD;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun static enum
iwl_mvm_get_scan_type(struct iwl_mvm * mvm,struct ieee80211_vif * vif)314*4882a593Smuzhiyun iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
315*4882a593Smuzhiyun struct ieee80211_vif *vif)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun enum iwl_mvm_traffic_load load;
318*4882a593Smuzhiyun bool low_latency;
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun load = iwl_mvm_get_traffic_load(mvm);
321*4882a593Smuzhiyun low_latency = iwl_mvm_low_latency(mvm);
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency);
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun static enum
iwl_mvm_get_scan_type_band(struct iwl_mvm * mvm,struct ieee80211_vif * vif,enum nl80211_band band)327*4882a593Smuzhiyun iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm,
328*4882a593Smuzhiyun struct ieee80211_vif *vif,
329*4882a593Smuzhiyun enum nl80211_band band)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun enum iwl_mvm_traffic_load load;
332*4882a593Smuzhiyun bool low_latency;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun load = iwl_mvm_get_traffic_load_band(mvm, band);
335*4882a593Smuzhiyun low_latency = iwl_mvm_low_latency_band(mvm, band);
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency);
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun
iwl_mvm_rrm_scan_needed(struct iwl_mvm * mvm)340*4882a593Smuzhiyun static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun /* require rrm scan whenever the fw supports it */
343*4882a593Smuzhiyun return fw_has_capa(&mvm->fw->ucode_capa,
344*4882a593Smuzhiyun IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT);
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm * mvm)347*4882a593Smuzhiyun static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun int max_probe_len;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun /* we create the 802.11 header and SSID element */
354*4882a593Smuzhiyun max_probe_len -= 24 + 2;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun /* DS parameter set element is added on 2.4GHZ band if required */
357*4882a593Smuzhiyun if (iwl_mvm_rrm_scan_needed(mvm))
358*4882a593Smuzhiyun max_probe_len -= 3;
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun return max_probe_len;
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun
iwl_mvm_max_scan_ie_len(struct iwl_mvm * mvm)363*4882a593Smuzhiyun int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun int max_ie_len = iwl_mvm_max_scan_ie_fw_cmd_room(mvm);
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun /* TODO: [BUG] This function should return the maximum allowed size of
368*4882a593Smuzhiyun * scan IEs, however the LMAC scan api contains both 2GHZ and 5GHZ IEs
369*4882a593Smuzhiyun * in the same command. So the correct implementation of this function
370*4882a593Smuzhiyun * is just iwl_mvm_max_scan_ie_fw_cmd_room() / 2. Currently the scan
371*4882a593Smuzhiyun * command has only 512 bytes and it would leave us with about 240
372*4882a593Smuzhiyun * bytes for scan IEs, which is clearly not enough. So meanwhile
373*4882a593Smuzhiyun * we will report an incorrect value. This may result in a failure to
374*4882a593Smuzhiyun * issue a scan in unified_scan_lmac and unified_sched_scan_lmac
375*4882a593Smuzhiyun * functions with -ENOBUFS, if a large enough probe will be provided.
376*4882a593Smuzhiyun */
377*4882a593Smuzhiyun return max_ie_len;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm * mvm,struct iwl_rx_cmd_buffer * rxb)380*4882a593Smuzhiyun void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
381*4882a593Smuzhiyun struct iwl_rx_cmd_buffer *rxb)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun struct iwl_rx_packet *pkt = rxb_addr(rxb);
384*4882a593Smuzhiyun struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun IWL_DEBUG_SCAN(mvm,
387*4882a593Smuzhiyun "Scan offload iteration complete: status=0x%x scanned channels=%d\n",
388*4882a593Smuzhiyun notif->status, notif->scanned_channels);
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) {
391*4882a593Smuzhiyun IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n");
392*4882a593Smuzhiyun ieee80211_sched_scan_results(mvm->hw);
393*4882a593Smuzhiyun mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun
iwl_mvm_rx_scan_match_found(struct iwl_mvm * mvm,struct iwl_rx_cmd_buffer * rxb)397*4882a593Smuzhiyun void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
398*4882a593Smuzhiyun struct iwl_rx_cmd_buffer *rxb)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
401*4882a593Smuzhiyun ieee80211_sched_scan_results(mvm->hw);
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun
iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status)404*4882a593Smuzhiyun static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun switch (status) {
407*4882a593Smuzhiyun case IWL_SCAN_EBS_SUCCESS:
408*4882a593Smuzhiyun return "successful";
409*4882a593Smuzhiyun case IWL_SCAN_EBS_INACTIVE:
410*4882a593Smuzhiyun return "inactive";
411*4882a593Smuzhiyun case IWL_SCAN_EBS_FAILED:
412*4882a593Smuzhiyun case IWL_SCAN_EBS_CHAN_NOT_FOUND:
413*4882a593Smuzhiyun default:
414*4882a593Smuzhiyun return "failed";
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun
iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm * mvm,struct iwl_rx_cmd_buffer * rxb)418*4882a593Smuzhiyun void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
419*4882a593Smuzhiyun struct iwl_rx_cmd_buffer *rxb)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun struct iwl_rx_packet *pkt = rxb_addr(rxb);
422*4882a593Smuzhiyun struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data;
423*4882a593Smuzhiyun bool aborted = (scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun /* If this happens, the firmware has mistakenly sent an LMAC
426*4882a593Smuzhiyun * notification during UMAC scans -- warn and ignore it.
427*4882a593Smuzhiyun */
428*4882a593Smuzhiyun if (WARN_ON_ONCE(fw_has_capa(&mvm->fw->ucode_capa,
429*4882a593Smuzhiyun IWL_UCODE_TLV_CAPA_UMAC_SCAN)))
430*4882a593Smuzhiyun return;
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun /* scan status must be locked for proper checking */
433*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun /* We first check if we were stopping a scan, in which case we
436*4882a593Smuzhiyun * just clear the stopping flag. Then we check if it was a
437*4882a593Smuzhiyun * firmware initiated stop, in which case we need to inform
438*4882a593Smuzhiyun * mac80211.
439*4882a593Smuzhiyun * Note that we can have a stopping and a running scan
440*4882a593Smuzhiyun * simultaneously, but we can't have two different types of
441*4882a593Smuzhiyun * scans stopping or running at the same time (since LMAC
442*4882a593Smuzhiyun * doesn't support it).
443*4882a593Smuzhiyun */
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_SCHED) {
446*4882a593Smuzhiyun WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR);
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
449*4882a593Smuzhiyun aborted ? "aborted" : "completed",
450*4882a593Smuzhiyun iwl_mvm_ebs_status_str(scan_notif->ebs_status));
451*4882a593Smuzhiyun IWL_DEBUG_SCAN(mvm,
452*4882a593Smuzhiyun "Last line %d, Last iteration %d, Time after last iteration %d\n",
453*4882a593Smuzhiyun scan_notif->last_schedule_line,
454*4882a593Smuzhiyun scan_notif->last_schedule_iteration,
455*4882a593Smuzhiyun __le32_to_cpu(scan_notif->time_after_last_iter));
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_SCHED;
458*4882a593Smuzhiyun } else if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR) {
459*4882a593Smuzhiyun IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s\n",
460*4882a593Smuzhiyun aborted ? "aborted" : "completed",
461*4882a593Smuzhiyun iwl_mvm_ebs_status_str(scan_notif->ebs_status));
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_REGULAR;
464*4882a593Smuzhiyun } else if (mvm->scan_status & IWL_MVM_SCAN_SCHED) {
465*4882a593Smuzhiyun WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_REGULAR);
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
468*4882a593Smuzhiyun aborted ? "aborted" : "completed",
469*4882a593Smuzhiyun iwl_mvm_ebs_status_str(scan_notif->ebs_status));
470*4882a593Smuzhiyun IWL_DEBUG_SCAN(mvm,
471*4882a593Smuzhiyun "Last line %d, Last iteration %d, Time after last iteration %d (FW)\n",
472*4882a593Smuzhiyun scan_notif->last_schedule_line,
473*4882a593Smuzhiyun scan_notif->last_schedule_iteration,
474*4882a593Smuzhiyun __le32_to_cpu(scan_notif->time_after_last_iter));
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun mvm->scan_status &= ~IWL_MVM_SCAN_SCHED;
477*4882a593Smuzhiyun ieee80211_sched_scan_stopped(mvm->hw);
478*4882a593Smuzhiyun mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
479*4882a593Smuzhiyun } else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) {
480*4882a593Smuzhiyun struct cfg80211_scan_info info = {
481*4882a593Smuzhiyun .aborted = aborted,
482*4882a593Smuzhiyun };
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n",
485*4882a593Smuzhiyun aborted ? "aborted" : "completed",
486*4882a593Smuzhiyun iwl_mvm_ebs_status_str(scan_notif->ebs_status));
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR;
489*4882a593Smuzhiyun ieee80211_scan_completed(mvm->hw, &info);
490*4882a593Smuzhiyun cancel_delayed_work(&mvm->scan_timeout_dwork);
491*4882a593Smuzhiyun iwl_mvm_resume_tcm(mvm);
492*4882a593Smuzhiyun } else {
493*4882a593Smuzhiyun IWL_ERR(mvm,
494*4882a593Smuzhiyun "got scan complete notification but no scan is running\n");
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun mvm->last_ebs_successful =
498*4882a593Smuzhiyun scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS ||
499*4882a593Smuzhiyun scan_notif->ebs_status == IWL_SCAN_EBS_INACTIVE;
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun
iwl_ssid_exist(u8 * ssid,u8 ssid_len,struct iwl_ssid_ie * ssid_list)502*4882a593Smuzhiyun static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
503*4882a593Smuzhiyun {
504*4882a593Smuzhiyun int i;
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun for (i = 0; i < PROBE_OPTION_MAX; i++) {
507*4882a593Smuzhiyun if (!ssid_list[i].len)
508*4882a593Smuzhiyun break;
509*4882a593Smuzhiyun if (ssid_list[i].len == ssid_len &&
510*4882a593Smuzhiyun !memcmp(ssid_list->ssid, ssid, ssid_len))
511*4882a593Smuzhiyun return i;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun return -1;
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun /* We insert the SSIDs in an inverted order, because the FW will
517*4882a593Smuzhiyun * invert it back.
518*4882a593Smuzhiyun */
iwl_scan_build_ssids(struct iwl_mvm_scan_params * params,struct iwl_ssid_ie * ssids,u32 * ssid_bitmap)519*4882a593Smuzhiyun static void iwl_scan_build_ssids(struct iwl_mvm_scan_params *params,
520*4882a593Smuzhiyun struct iwl_ssid_ie *ssids,
521*4882a593Smuzhiyun u32 *ssid_bitmap)
522*4882a593Smuzhiyun {
523*4882a593Smuzhiyun int i, j;
524*4882a593Smuzhiyun int index;
525*4882a593Smuzhiyun u32 tmp_bitmap = 0;
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun /*
528*4882a593Smuzhiyun * copy SSIDs from match list.
529*4882a593Smuzhiyun * iwl_config_sched_scan_profiles() uses the order of these ssids to
530*4882a593Smuzhiyun * config match list.
531*4882a593Smuzhiyun */
532*4882a593Smuzhiyun for (i = 0, j = params->n_match_sets - 1;
533*4882a593Smuzhiyun j >= 0 && i < PROBE_OPTION_MAX;
534*4882a593Smuzhiyun i++, j--) {
535*4882a593Smuzhiyun /* skip empty SSID matchsets */
536*4882a593Smuzhiyun if (!params->match_sets[j].ssid.ssid_len)
537*4882a593Smuzhiyun continue;
538*4882a593Smuzhiyun ssids[i].id = WLAN_EID_SSID;
539*4882a593Smuzhiyun ssids[i].len = params->match_sets[j].ssid.ssid_len;
540*4882a593Smuzhiyun memcpy(ssids[i].ssid, params->match_sets[j].ssid.ssid,
541*4882a593Smuzhiyun ssids[i].len);
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun /* add SSIDs from scan SSID list */
545*4882a593Smuzhiyun for (j = params->n_ssids - 1;
546*4882a593Smuzhiyun j >= 0 && i < PROBE_OPTION_MAX;
547*4882a593Smuzhiyun i++, j--) {
548*4882a593Smuzhiyun index = iwl_ssid_exist(params->ssids[j].ssid,
549*4882a593Smuzhiyun params->ssids[j].ssid_len,
550*4882a593Smuzhiyun ssids);
551*4882a593Smuzhiyun if (index < 0) {
552*4882a593Smuzhiyun ssids[i].id = WLAN_EID_SSID;
553*4882a593Smuzhiyun ssids[i].len = params->ssids[j].ssid_len;
554*4882a593Smuzhiyun memcpy(ssids[i].ssid, params->ssids[j].ssid,
555*4882a593Smuzhiyun ssids[i].len);
556*4882a593Smuzhiyun tmp_bitmap |= BIT(i);
557*4882a593Smuzhiyun } else {
558*4882a593Smuzhiyun tmp_bitmap |= BIT(index);
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun if (ssid_bitmap)
562*4882a593Smuzhiyun *ssid_bitmap = tmp_bitmap;
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun static int
iwl_mvm_config_sched_scan_profiles(struct iwl_mvm * mvm,struct cfg80211_sched_scan_request * req)566*4882a593Smuzhiyun iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
567*4882a593Smuzhiyun struct cfg80211_sched_scan_request *req)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun struct iwl_scan_offload_profile *profile;
570*4882a593Smuzhiyun struct iwl_scan_offload_profile_cfg_v1 *profile_cfg_v1;
571*4882a593Smuzhiyun struct iwl_scan_offload_blocklist *blocklist;
572*4882a593Smuzhiyun struct iwl_scan_offload_profile_cfg_data *data;
573*4882a593Smuzhiyun int max_profiles = iwl_umac_scan_get_max_profiles(mvm->fw);
574*4882a593Smuzhiyun int profile_cfg_size = sizeof(*data) +
575*4882a593Smuzhiyun sizeof(*profile) * max_profiles;
576*4882a593Smuzhiyun struct iwl_host_cmd cmd = {
577*4882a593Smuzhiyun .id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
578*4882a593Smuzhiyun .len[1] = profile_cfg_size,
579*4882a593Smuzhiyun .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
580*4882a593Smuzhiyun .dataflags[1] = IWL_HCMD_DFL_NOCOPY,
581*4882a593Smuzhiyun };
582*4882a593Smuzhiyun int blocklist_len;
583*4882a593Smuzhiyun int i;
584*4882a593Smuzhiyun int ret;
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun if (WARN_ON(req->n_match_sets > max_profiles))
587*4882a593Smuzhiyun return -EIO;
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SHORT_BL)
590*4882a593Smuzhiyun blocklist_len = IWL_SCAN_SHORT_BLACKLIST_LEN;
591*4882a593Smuzhiyun else
592*4882a593Smuzhiyun blocklist_len = IWL_SCAN_MAX_BLACKLIST_LEN;
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun blocklist = kcalloc(blocklist_len, sizeof(*blocklist), GFP_KERNEL);
595*4882a593Smuzhiyun if (!blocklist)
596*4882a593Smuzhiyun return -ENOMEM;
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun profile_cfg_v1 = kzalloc(profile_cfg_size, GFP_KERNEL);
599*4882a593Smuzhiyun if (!profile_cfg_v1) {
600*4882a593Smuzhiyun ret = -ENOMEM;
601*4882a593Smuzhiyun goto free_blocklist;
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun cmd.data[0] = blocklist;
605*4882a593Smuzhiyun cmd.len[0] = sizeof(*blocklist) * blocklist_len;
606*4882a593Smuzhiyun cmd.data[1] = profile_cfg_v1;
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun /* if max_profile is MAX_PROFILES_V2, we have the new API */
609*4882a593Smuzhiyun if (max_profiles == IWL_SCAN_MAX_PROFILES_V2) {
610*4882a593Smuzhiyun struct iwl_scan_offload_profile_cfg *profile_cfg =
611*4882a593Smuzhiyun (struct iwl_scan_offload_profile_cfg *)profile_cfg_v1;
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun data = &profile_cfg->data;
614*4882a593Smuzhiyun } else {
615*4882a593Smuzhiyun data = &profile_cfg_v1->data;
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun /* No blocklist configuration */
619*4882a593Smuzhiyun data->num_profiles = req->n_match_sets;
620*4882a593Smuzhiyun data->active_clients = SCAN_CLIENT_SCHED_SCAN;
621*4882a593Smuzhiyun data->pass_match = SCAN_CLIENT_SCHED_SCAN;
622*4882a593Smuzhiyun data->match_notify = SCAN_CLIENT_SCHED_SCAN;
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len)
625*4882a593Smuzhiyun data->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN;
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun for (i = 0; i < req->n_match_sets; i++) {
628*4882a593Smuzhiyun profile = &profile_cfg_v1->profiles[i];
629*4882a593Smuzhiyun profile->ssid_index = i;
630*4882a593Smuzhiyun /* Support any cipher and auth algorithm */
631*4882a593Smuzhiyun profile->unicast_cipher = 0xff;
632*4882a593Smuzhiyun profile->auth_alg = 0xff;
633*4882a593Smuzhiyun profile->network_type = IWL_NETWORK_TYPE_ANY;
634*4882a593Smuzhiyun profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY;
635*4882a593Smuzhiyun profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN;
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun IWL_DEBUG_SCAN(mvm, "Sending scheduled scan profile config\n");
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun ret = iwl_mvm_send_cmd(mvm, &cmd);
641*4882a593Smuzhiyun kfree(profile_cfg_v1);
642*4882a593Smuzhiyun free_blocklist:
643*4882a593Smuzhiyun kfree(blocklist);
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun return ret;
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun
iwl_mvm_scan_pass_all(struct iwl_mvm * mvm,struct cfg80211_sched_scan_request * req)648*4882a593Smuzhiyun static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm,
649*4882a593Smuzhiyun struct cfg80211_sched_scan_request *req)
650*4882a593Smuzhiyun {
651*4882a593Smuzhiyun if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) {
652*4882a593Smuzhiyun IWL_DEBUG_SCAN(mvm,
653*4882a593Smuzhiyun "Sending scheduled scan with filtering, n_match_sets %d\n",
654*4882a593Smuzhiyun req->n_match_sets);
655*4882a593Smuzhiyun mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
656*4882a593Smuzhiyun return false;
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun IWL_DEBUG_SCAN(mvm, "Sending Scheduled scan without filtering\n");
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
662*4882a593Smuzhiyun return true;
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun
iwl_mvm_lmac_scan_abort(struct iwl_mvm * mvm)665*4882a593Smuzhiyun static int iwl_mvm_lmac_scan_abort(struct iwl_mvm *mvm)
666*4882a593Smuzhiyun {
667*4882a593Smuzhiyun int ret;
668*4882a593Smuzhiyun struct iwl_host_cmd cmd = {
669*4882a593Smuzhiyun .id = SCAN_OFFLOAD_ABORT_CMD,
670*4882a593Smuzhiyun };
671*4882a593Smuzhiyun u32 status = CAN_ABORT_STATUS;
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
674*4882a593Smuzhiyun if (ret)
675*4882a593Smuzhiyun return ret;
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun if (status != CAN_ABORT_STATUS) {
678*4882a593Smuzhiyun /*
679*4882a593Smuzhiyun * The scan abort will return 1 for success or
680*4882a593Smuzhiyun * 2 for "failure". A failure condition can be
681*4882a593Smuzhiyun * due to simply not being in an active scan which
682*4882a593Smuzhiyun * can occur if we send the scan abort before the
683*4882a593Smuzhiyun * microcode has notified us that a scan is completed.
684*4882a593Smuzhiyun */
685*4882a593Smuzhiyun IWL_DEBUG_SCAN(mvm, "SCAN OFFLOAD ABORT ret %d.\n", status);
686*4882a593Smuzhiyun ret = -ENOENT;
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun return ret;
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun
iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm * mvm,struct iwl_scan_req_tx_cmd * tx_cmd,bool no_cck)692*4882a593Smuzhiyun static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm,
693*4882a593Smuzhiyun struct iwl_scan_req_tx_cmd *tx_cmd,
694*4882a593Smuzhiyun bool no_cck)
695*4882a593Smuzhiyun {
696*4882a593Smuzhiyun tx_cmd[0].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
697*4882a593Smuzhiyun TX_CMD_FLG_BT_DIS);
698*4882a593Smuzhiyun tx_cmd[0].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
699*4882a593Smuzhiyun NL80211_BAND_2GHZ,
700*4882a593Smuzhiyun no_cck);
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
703*4882a593Smuzhiyun ADD_STA,
704*4882a593Smuzhiyun 0) < 12) {
705*4882a593Smuzhiyun tx_cmd[0].sta_id = mvm->aux_sta.sta_id;
706*4882a593Smuzhiyun tx_cmd[1].sta_id = mvm->aux_sta.sta_id;
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun /*
709*4882a593Smuzhiyun * Fw doesn't use this sta anymore, pending deprecation via HOST API
710*4882a593Smuzhiyun * change
711*4882a593Smuzhiyun */
712*4882a593Smuzhiyun } else {
713*4882a593Smuzhiyun tx_cmd[0].sta_id = 0xff;
714*4882a593Smuzhiyun tx_cmd[1].sta_id = 0xff;
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun tx_cmd[1].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
718*4882a593Smuzhiyun TX_CMD_FLG_BT_DIS);
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun tx_cmd[1].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
721*4882a593Smuzhiyun NL80211_BAND_5GHZ,
722*4882a593Smuzhiyun no_cck);
723*4882a593Smuzhiyun }
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun static void
iwl_mvm_lmac_scan_cfg_channels(struct iwl_mvm * mvm,struct ieee80211_channel ** channels,int n_channels,u32 ssid_bitmap,struct iwl_scan_req_lmac * cmd)726*4882a593Smuzhiyun iwl_mvm_lmac_scan_cfg_channels(struct iwl_mvm *mvm,
727*4882a593Smuzhiyun struct ieee80211_channel **channels,
728*4882a593Smuzhiyun int n_channels, u32 ssid_bitmap,
729*4882a593Smuzhiyun struct iwl_scan_req_lmac *cmd)
730*4882a593Smuzhiyun {
731*4882a593Smuzhiyun struct iwl_scan_channel_cfg_lmac *channel_cfg = (void *)&cmd->data;
732*4882a593Smuzhiyun int i;
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun for (i = 0; i < n_channels; i++) {
735*4882a593Smuzhiyun channel_cfg[i].channel_num =
736*4882a593Smuzhiyun cpu_to_le16(channels[i]->hw_value);
737*4882a593Smuzhiyun channel_cfg[i].iter_count = cpu_to_le16(1);
738*4882a593Smuzhiyun channel_cfg[i].iter_interval = 0;
739*4882a593Smuzhiyun channel_cfg[i].flags =
740*4882a593Smuzhiyun cpu_to_le32(IWL_UNIFIED_SCAN_CHANNEL_PARTIAL |
741*4882a593Smuzhiyun ssid_bitmap);
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun
iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm * mvm,const u8 * ies,size_t len,u8 * const pos)745*4882a593Smuzhiyun static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies,
746*4882a593Smuzhiyun size_t len, u8 *const pos)
747*4882a593Smuzhiyun {
748*4882a593Smuzhiyun static const u8 before_ds_params[] = {
749*4882a593Smuzhiyun WLAN_EID_SSID,
750*4882a593Smuzhiyun WLAN_EID_SUPP_RATES,
751*4882a593Smuzhiyun WLAN_EID_REQUEST,
752*4882a593Smuzhiyun WLAN_EID_EXT_SUPP_RATES,
753*4882a593Smuzhiyun };
754*4882a593Smuzhiyun size_t offs;
755*4882a593Smuzhiyun u8 *newpos = pos;
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun if (!iwl_mvm_rrm_scan_needed(mvm)) {
758*4882a593Smuzhiyun memcpy(newpos, ies, len);
759*4882a593Smuzhiyun return newpos + len;
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun offs = ieee80211_ie_split(ies, len,
763*4882a593Smuzhiyun before_ds_params,
764*4882a593Smuzhiyun ARRAY_SIZE(before_ds_params),
765*4882a593Smuzhiyun 0);
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun memcpy(newpos, ies, offs);
768*4882a593Smuzhiyun newpos += offs;
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun /* Add a placeholder for DS Parameter Set element */
771*4882a593Smuzhiyun *newpos++ = WLAN_EID_DS_PARAMS;
772*4882a593Smuzhiyun *newpos++ = 1;
773*4882a593Smuzhiyun *newpos++ = 0;
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun memcpy(newpos, ies + offs, len - offs);
776*4882a593Smuzhiyun newpos += len - offs;
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun return newpos;
779*4882a593Smuzhiyun }
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun #define WFA_TPC_IE_LEN 9
782*4882a593Smuzhiyun
iwl_mvm_add_tpc_report_ie(u8 * pos)783*4882a593Smuzhiyun static void iwl_mvm_add_tpc_report_ie(u8 *pos)
784*4882a593Smuzhiyun {
785*4882a593Smuzhiyun pos[0] = WLAN_EID_VENDOR_SPECIFIC;
786*4882a593Smuzhiyun pos[1] = WFA_TPC_IE_LEN - 2;
787*4882a593Smuzhiyun pos[2] = (WLAN_OUI_MICROSOFT >> 16) & 0xff;
788*4882a593Smuzhiyun pos[3] = (WLAN_OUI_MICROSOFT >> 8) & 0xff;
789*4882a593Smuzhiyun pos[4] = WLAN_OUI_MICROSOFT & 0xff;
790*4882a593Smuzhiyun pos[5] = WLAN_OUI_TYPE_MICROSOFT_TPC;
791*4882a593Smuzhiyun pos[6] = 0;
792*4882a593Smuzhiyun /* pos[7] - tx power will be inserted by the FW */
793*4882a593Smuzhiyun pos[7] = 0;
794*4882a593Smuzhiyun pos[8] = 0;
795*4882a593Smuzhiyun }
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun static void
iwl_mvm_build_scan_probe(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_scan_ies * ies,struct iwl_mvm_scan_params * params)798*4882a593Smuzhiyun iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
799*4882a593Smuzhiyun struct ieee80211_scan_ies *ies,
800*4882a593Smuzhiyun struct iwl_mvm_scan_params *params)
801*4882a593Smuzhiyun {
802*4882a593Smuzhiyun struct ieee80211_mgmt *frame = (void *)params->preq.buf;
803*4882a593Smuzhiyun u8 *pos, *newpos;
804*4882a593Smuzhiyun const u8 *mac_addr = params->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
805*4882a593Smuzhiyun params->mac_addr : NULL;
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun /*
808*4882a593Smuzhiyun * Unfortunately, right now the offload scan doesn't support randomising
809*4882a593Smuzhiyun * within the firmware, so until the firmware API is ready we implement
810*4882a593Smuzhiyun * it in the driver. This means that the scan iterations won't really be
811*4882a593Smuzhiyun * random, only when it's restarted, but at least that helps a bit.
812*4882a593Smuzhiyun */
813*4882a593Smuzhiyun if (mac_addr)
814*4882a593Smuzhiyun get_random_mask_addr(frame->sa, mac_addr,
815*4882a593Smuzhiyun params->mac_addr_mask);
816*4882a593Smuzhiyun else
817*4882a593Smuzhiyun memcpy(frame->sa, vif->addr, ETH_ALEN);
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
820*4882a593Smuzhiyun eth_broadcast_addr(frame->da);
821*4882a593Smuzhiyun eth_broadcast_addr(frame->bssid);
822*4882a593Smuzhiyun frame->seq_ctrl = 0;
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun pos = frame->u.probe_req.variable;
825*4882a593Smuzhiyun *pos++ = WLAN_EID_SSID;
826*4882a593Smuzhiyun *pos++ = 0;
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun params->preq.mac_header.offset = 0;
829*4882a593Smuzhiyun params->preq.mac_header.len = cpu_to_le16(24 + 2);
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun /* Insert ds parameter set element on 2.4 GHz band */
832*4882a593Smuzhiyun newpos = iwl_mvm_copy_and_insert_ds_elem(mvm,
833*4882a593Smuzhiyun ies->ies[NL80211_BAND_2GHZ],
834*4882a593Smuzhiyun ies->len[NL80211_BAND_2GHZ],
835*4882a593Smuzhiyun pos);
836*4882a593Smuzhiyun params->preq.band_data[0].offset = cpu_to_le16(pos - params->preq.buf);
837*4882a593Smuzhiyun params->preq.band_data[0].len = cpu_to_le16(newpos - pos);
838*4882a593Smuzhiyun pos = newpos;
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun memcpy(pos, ies->ies[NL80211_BAND_5GHZ],
841*4882a593Smuzhiyun ies->len[NL80211_BAND_5GHZ]);
842*4882a593Smuzhiyun params->preq.band_data[1].offset = cpu_to_le16(pos - params->preq.buf);
843*4882a593Smuzhiyun params->preq.band_data[1].len =
844*4882a593Smuzhiyun cpu_to_le16(ies->len[NL80211_BAND_5GHZ]);
845*4882a593Smuzhiyun pos += ies->len[NL80211_BAND_5GHZ];
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun memcpy(pos, ies->common_ies, ies->common_ie_len);
848*4882a593Smuzhiyun params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf);
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun if (iwl_mvm_rrm_scan_needed(mvm) &&
851*4882a593Smuzhiyun !fw_has_capa(&mvm->fw->ucode_capa,
852*4882a593Smuzhiyun IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) {
853*4882a593Smuzhiyun iwl_mvm_add_tpc_report_ie(pos + ies->common_ie_len);
854*4882a593Smuzhiyun params->preq.common_data.len = cpu_to_le16(ies->common_ie_len +
855*4882a593Smuzhiyun WFA_TPC_IE_LEN);
856*4882a593Smuzhiyun } else {
857*4882a593Smuzhiyun params->preq.common_data.len = cpu_to_le16(ies->common_ie_len);
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun
iwl_mvm_scan_lmac_dwell(struct iwl_mvm * mvm,struct iwl_scan_req_lmac * cmd,struct iwl_mvm_scan_params * params)861*4882a593Smuzhiyun static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
862*4882a593Smuzhiyun struct iwl_scan_req_lmac *cmd,
863*4882a593Smuzhiyun struct iwl_mvm_scan_params *params)
864*4882a593Smuzhiyun {
865*4882a593Smuzhiyun cmd->active_dwell = IWL_SCAN_DWELL_ACTIVE;
866*4882a593Smuzhiyun cmd->passive_dwell = IWL_SCAN_DWELL_PASSIVE;
867*4882a593Smuzhiyun cmd->fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
868*4882a593Smuzhiyun cmd->extended_dwell = IWL_SCAN_DWELL_EXTENDED;
869*4882a593Smuzhiyun cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
870*4882a593Smuzhiyun cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
871*4882a593Smuzhiyun cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
872*4882a593Smuzhiyun }
873*4882a593Smuzhiyun
iwl_mvm_scan_fits(struct iwl_mvm * mvm,int n_ssids,struct ieee80211_scan_ies * ies,int n_channels)874*4882a593Smuzhiyun static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids,
875*4882a593Smuzhiyun struct ieee80211_scan_ies *ies,
876*4882a593Smuzhiyun int n_channels)
877*4882a593Smuzhiyun {
878*4882a593Smuzhiyun return ((n_ssids <= PROBE_OPTION_MAX) &&
879*4882a593Smuzhiyun (n_channels <= mvm->fw->ucode_capa.n_scan_channels) &
880*4882a593Smuzhiyun (ies->common_ie_len +
881*4882a593Smuzhiyun ies->len[NL80211_BAND_2GHZ] +
882*4882a593Smuzhiyun ies->len[NL80211_BAND_5GHZ] <=
883*4882a593Smuzhiyun iwl_mvm_max_scan_ie_fw_cmd_room(mvm)));
884*4882a593Smuzhiyun }
885*4882a593Smuzhiyun
iwl_mvm_scan_use_ebs(struct iwl_mvm * mvm,struct ieee80211_vif * vif)886*4882a593Smuzhiyun static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm,
887*4882a593Smuzhiyun struct ieee80211_vif *vif)
888*4882a593Smuzhiyun {
889*4882a593Smuzhiyun const struct iwl_ucode_capabilities *capa = &mvm->fw->ucode_capa;
890*4882a593Smuzhiyun bool low_latency;
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun if (iwl_mvm_is_cdb_supported(mvm))
893*4882a593Smuzhiyun low_latency = iwl_mvm_low_latency_band(mvm, NL80211_BAND_5GHZ);
894*4882a593Smuzhiyun else
895*4882a593Smuzhiyun low_latency = iwl_mvm_low_latency(mvm);
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun /* We can only use EBS if:
898*4882a593Smuzhiyun * 1. the feature is supported;
899*4882a593Smuzhiyun * 2. the last EBS was successful;
900*4882a593Smuzhiyun * 3. if only single scan, the single scan EBS API is supported;
901*4882a593Smuzhiyun * 4. it's not a p2p find operation.
902*4882a593Smuzhiyun * 5. we are not in low latency mode,
903*4882a593Smuzhiyun * or if fragmented ebs is supported by the FW
904*4882a593Smuzhiyun */
905*4882a593Smuzhiyun return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) &&
906*4882a593Smuzhiyun mvm->last_ebs_successful && IWL_MVM_ENABLE_EBS &&
907*4882a593Smuzhiyun vif->type != NL80211_IFTYPE_P2P_DEVICE &&
908*4882a593Smuzhiyun (!low_latency || iwl_mvm_is_frag_ebs_supported(mvm)));
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun
iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params * params)911*4882a593Smuzhiyun static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params)
912*4882a593Smuzhiyun {
913*4882a593Smuzhiyun return params->n_scan_plans == 1 &&
914*4882a593Smuzhiyun params->scan_plans[0].iterations == 1;
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun
iwl_mvm_is_scan_fragmented(enum iwl_mvm_scan_type type)917*4882a593Smuzhiyun static bool iwl_mvm_is_scan_fragmented(enum iwl_mvm_scan_type type)
918*4882a593Smuzhiyun {
919*4882a593Smuzhiyun return (type == IWL_SCAN_TYPE_FRAGMENTED ||
920*4882a593Smuzhiyun type == IWL_SCAN_TYPE_FAST_BALANCE);
921*4882a593Smuzhiyun }
922*4882a593Smuzhiyun
iwl_mvm_scan_lmac_flags(struct iwl_mvm * mvm,struct iwl_mvm_scan_params * params,struct ieee80211_vif * vif)923*4882a593Smuzhiyun static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
924*4882a593Smuzhiyun struct iwl_mvm_scan_params *params,
925*4882a593Smuzhiyun struct ieee80211_vif *vif)
926*4882a593Smuzhiyun {
927*4882a593Smuzhiyun int flags = 0;
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun if (params->n_ssids == 0)
930*4882a593Smuzhiyun flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
933*4882a593Smuzhiyun flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun if (iwl_mvm_is_scan_fragmented(params->type))
936*4882a593Smuzhiyun flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun if (iwl_mvm_rrm_scan_needed(mvm) &&
939*4882a593Smuzhiyun fw_has_capa(&mvm->fw->ucode_capa,
940*4882a593Smuzhiyun IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
941*4882a593Smuzhiyun flags |= IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED;
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun if (params->pass_all)
944*4882a593Smuzhiyun flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
945*4882a593Smuzhiyun else
946*4882a593Smuzhiyun flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH;
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun #ifdef CONFIG_IWLWIFI_DEBUGFS
949*4882a593Smuzhiyun if (mvm->scan_iter_notif_enabled)
950*4882a593Smuzhiyun flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
951*4882a593Smuzhiyun #endif
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
954*4882a593Smuzhiyun flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
955*4882a593Smuzhiyun
956*4882a593Smuzhiyun if (iwl_mvm_is_regular_scan(params) &&
957*4882a593Smuzhiyun vif->type != NL80211_IFTYPE_P2P_DEVICE &&
958*4882a593Smuzhiyun !iwl_mvm_is_scan_fragmented(params->type))
959*4882a593Smuzhiyun flags |= IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL;
960*4882a593Smuzhiyun
961*4882a593Smuzhiyun return flags;
962*4882a593Smuzhiyun }
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun static void
iwl_mvm_scan_set_legacy_probe_req(struct iwl_scan_probe_req_v1 * p_req,struct iwl_scan_probe_req * src_p_req)965*4882a593Smuzhiyun iwl_mvm_scan_set_legacy_probe_req(struct iwl_scan_probe_req_v1 *p_req,
966*4882a593Smuzhiyun struct iwl_scan_probe_req *src_p_req)
967*4882a593Smuzhiyun {
968*4882a593Smuzhiyun int i;
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun p_req->mac_header = src_p_req->mac_header;
971*4882a593Smuzhiyun for (i = 0; i < SCAN_NUM_BAND_PROBE_DATA_V_1; i++)
972*4882a593Smuzhiyun p_req->band_data[i] = src_p_req->band_data[i];
973*4882a593Smuzhiyun p_req->common_data = src_p_req->common_data;
974*4882a593Smuzhiyun memcpy(p_req->buf, src_p_req->buf, sizeof(p_req->buf));
975*4882a593Smuzhiyun }
976*4882a593Smuzhiyun
iwl_mvm_scan_lmac(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct iwl_mvm_scan_params * params)977*4882a593Smuzhiyun static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
978*4882a593Smuzhiyun struct iwl_mvm_scan_params *params)
979*4882a593Smuzhiyun {
980*4882a593Smuzhiyun struct iwl_scan_req_lmac *cmd = mvm->scan_cmd;
981*4882a593Smuzhiyun struct iwl_scan_probe_req_v1 *preq =
982*4882a593Smuzhiyun (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
983*4882a593Smuzhiyun mvm->fw->ucode_capa.n_scan_channels);
984*4882a593Smuzhiyun u32 ssid_bitmap = 0;
985*4882a593Smuzhiyun int i;
986*4882a593Smuzhiyun u8 band;
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun if (WARN_ON(params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
989*4882a593Smuzhiyun return -EINVAL;
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun iwl_mvm_scan_lmac_dwell(mvm, cmd, params);
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm);
994*4882a593Smuzhiyun cmd->iter_num = cpu_to_le32(1);
995*4882a593Smuzhiyun cmd->n_channels = (u8)params->n_channels;
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun cmd->delay = cpu_to_le32(params->delay);
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun cmd->scan_flags = cpu_to_le32(iwl_mvm_scan_lmac_flags(mvm, params,
1000*4882a593Smuzhiyun vif));
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun band = iwl_mvm_phy_band_from_nl80211(params->channels[0]->band);
1003*4882a593Smuzhiyun cmd->flags = cpu_to_le32(band);
1004*4882a593Smuzhiyun cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
1005*4882a593Smuzhiyun MAC_FILTER_IN_BEACON);
1006*4882a593Smuzhiyun iwl_mvm_scan_fill_tx_cmd(mvm, cmd->tx_cmd, params->no_cck);
1007*4882a593Smuzhiyun iwl_scan_build_ssids(params, cmd->direct_scan, &ssid_bitmap);
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun /* this API uses bits 1-20 instead of 0-19 */
1010*4882a593Smuzhiyun ssid_bitmap <<= 1;
1011*4882a593Smuzhiyun
1012*4882a593Smuzhiyun for (i = 0; i < params->n_scan_plans; i++) {
1013*4882a593Smuzhiyun struct cfg80211_sched_scan_plan *scan_plan =
1014*4882a593Smuzhiyun ¶ms->scan_plans[i];
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun cmd->schedule[i].delay =
1017*4882a593Smuzhiyun cpu_to_le16(scan_plan->interval);
1018*4882a593Smuzhiyun cmd->schedule[i].iterations = scan_plan->iterations;
1019*4882a593Smuzhiyun cmd->schedule[i].full_scan_mul = 1;
1020*4882a593Smuzhiyun }
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun /*
1023*4882a593Smuzhiyun * If the number of iterations of the last scan plan is set to
1024*4882a593Smuzhiyun * zero, it should run infinitely. However, this is not always the case.
1025*4882a593Smuzhiyun * For example, when regular scan is requested the driver sets one scan
1026*4882a593Smuzhiyun * plan with one iteration.
1027*4882a593Smuzhiyun */
1028*4882a593Smuzhiyun if (!cmd->schedule[i - 1].iterations)
1029*4882a593Smuzhiyun cmd->schedule[i - 1].iterations = 0xff;
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun if (iwl_mvm_scan_use_ebs(mvm, vif)) {
1032*4882a593Smuzhiyun cmd->channel_opt[0].flags =
1033*4882a593Smuzhiyun cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
1034*4882a593Smuzhiyun IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
1035*4882a593Smuzhiyun IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
1036*4882a593Smuzhiyun cmd->channel_opt[0].non_ebs_ratio =
1037*4882a593Smuzhiyun cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
1038*4882a593Smuzhiyun cmd->channel_opt[1].flags =
1039*4882a593Smuzhiyun cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
1040*4882a593Smuzhiyun IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
1041*4882a593Smuzhiyun IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
1042*4882a593Smuzhiyun cmd->channel_opt[1].non_ebs_ratio =
1043*4882a593Smuzhiyun cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
1044*4882a593Smuzhiyun }
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun iwl_mvm_lmac_scan_cfg_channels(mvm, params->channels,
1047*4882a593Smuzhiyun params->n_channels, ssid_bitmap, cmd);
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun iwl_mvm_scan_set_legacy_probe_req(preq, ¶ms->preq);
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun return 0;
1052*4882a593Smuzhiyun }
1053*4882a593Smuzhiyun
rate_to_scan_rate_flag(unsigned int rate)1054*4882a593Smuzhiyun static int rate_to_scan_rate_flag(unsigned int rate)
1055*4882a593Smuzhiyun {
1056*4882a593Smuzhiyun static const int rate_to_scan_rate[IWL_RATE_COUNT] = {
1057*4882a593Smuzhiyun [IWL_RATE_1M_INDEX] = SCAN_CONFIG_RATE_1M,
1058*4882a593Smuzhiyun [IWL_RATE_2M_INDEX] = SCAN_CONFIG_RATE_2M,
1059*4882a593Smuzhiyun [IWL_RATE_5M_INDEX] = SCAN_CONFIG_RATE_5M,
1060*4882a593Smuzhiyun [IWL_RATE_11M_INDEX] = SCAN_CONFIG_RATE_11M,
1061*4882a593Smuzhiyun [IWL_RATE_6M_INDEX] = SCAN_CONFIG_RATE_6M,
1062*4882a593Smuzhiyun [IWL_RATE_9M_INDEX] = SCAN_CONFIG_RATE_9M,
1063*4882a593Smuzhiyun [IWL_RATE_12M_INDEX] = SCAN_CONFIG_RATE_12M,
1064*4882a593Smuzhiyun [IWL_RATE_18M_INDEX] = SCAN_CONFIG_RATE_18M,
1065*4882a593Smuzhiyun [IWL_RATE_24M_INDEX] = SCAN_CONFIG_RATE_24M,
1066*4882a593Smuzhiyun [IWL_RATE_36M_INDEX] = SCAN_CONFIG_RATE_36M,
1067*4882a593Smuzhiyun [IWL_RATE_48M_INDEX] = SCAN_CONFIG_RATE_48M,
1068*4882a593Smuzhiyun [IWL_RATE_54M_INDEX] = SCAN_CONFIG_RATE_54M,
1069*4882a593Smuzhiyun };
1070*4882a593Smuzhiyun
1071*4882a593Smuzhiyun return rate_to_scan_rate[rate];
1072*4882a593Smuzhiyun }
1073*4882a593Smuzhiyun
iwl_mvm_scan_config_rates(struct iwl_mvm * mvm)1074*4882a593Smuzhiyun static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm)
1075*4882a593Smuzhiyun {
1076*4882a593Smuzhiyun struct ieee80211_supported_band *band;
1077*4882a593Smuzhiyun unsigned int rates = 0;
1078*4882a593Smuzhiyun int i;
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
1081*4882a593Smuzhiyun for (i = 0; i < band->n_bitrates; i++)
1082*4882a593Smuzhiyun rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
1083*4882a593Smuzhiyun band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
1084*4882a593Smuzhiyun for (i = 0; i < band->n_bitrates; i++)
1085*4882a593Smuzhiyun rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun /* Set both basic rates and supported rates */
1088*4882a593Smuzhiyun rates |= SCAN_CONFIG_SUPPORTED_RATE(rates);
1089*4882a593Smuzhiyun
1090*4882a593Smuzhiyun return cpu_to_le32(rates);
1091*4882a593Smuzhiyun }
1092*4882a593Smuzhiyun
iwl_mvm_fill_scan_dwell(struct iwl_mvm * mvm,struct iwl_scan_dwell * dwell)1093*4882a593Smuzhiyun static void iwl_mvm_fill_scan_dwell(struct iwl_mvm *mvm,
1094*4882a593Smuzhiyun struct iwl_scan_dwell *dwell)
1095*4882a593Smuzhiyun {
1096*4882a593Smuzhiyun dwell->active = IWL_SCAN_DWELL_ACTIVE;
1097*4882a593Smuzhiyun dwell->passive = IWL_SCAN_DWELL_PASSIVE;
1098*4882a593Smuzhiyun dwell->fragmented = IWL_SCAN_DWELL_FRAGMENTED;
1099*4882a593Smuzhiyun dwell->extended = IWL_SCAN_DWELL_EXTENDED;
1100*4882a593Smuzhiyun }
1101*4882a593Smuzhiyun
iwl_mvm_fill_channels(struct iwl_mvm * mvm,u8 * channels,u32 max_channels)1102*4882a593Smuzhiyun static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels,
1103*4882a593Smuzhiyun u32 max_channels)
1104*4882a593Smuzhiyun {
1105*4882a593Smuzhiyun struct ieee80211_supported_band *band;
1106*4882a593Smuzhiyun int i, j = 0;
1107*4882a593Smuzhiyun
1108*4882a593Smuzhiyun band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
1109*4882a593Smuzhiyun for (i = 0; i < band->n_channels && j < max_channels; i++, j++)
1110*4882a593Smuzhiyun channels[j] = band->channels[i].hw_value;
1111*4882a593Smuzhiyun band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
1112*4882a593Smuzhiyun for (i = 0; i < band->n_channels && j < max_channels; i++, j++)
1113*4882a593Smuzhiyun channels[j] = band->channels[i].hw_value;
1114*4882a593Smuzhiyun }
1115*4882a593Smuzhiyun
iwl_mvm_fill_scan_config_v1(struct iwl_mvm * mvm,void * config,u32 flags,u8 channel_flags,u32 max_channels)1116*4882a593Smuzhiyun static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
1117*4882a593Smuzhiyun u32 flags, u8 channel_flags,
1118*4882a593Smuzhiyun u32 max_channels)
1119*4882a593Smuzhiyun {
1120*4882a593Smuzhiyun enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, NULL);
1121*4882a593Smuzhiyun struct iwl_scan_config_v1 *cfg = config;
1122*4882a593Smuzhiyun
1123*4882a593Smuzhiyun cfg->flags = cpu_to_le32(flags);
1124*4882a593Smuzhiyun cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
1125*4882a593Smuzhiyun cfg->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
1126*4882a593Smuzhiyun cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm);
1127*4882a593Smuzhiyun cfg->out_of_channel_time = cpu_to_le32(scan_timing[type].max_out_time);
1128*4882a593Smuzhiyun cfg->suspend_time = cpu_to_le32(scan_timing[type].suspend_time);
1129*4882a593Smuzhiyun
1130*4882a593Smuzhiyun iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell);
1131*4882a593Smuzhiyun
1132*4882a593Smuzhiyun memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun /* This function should not be called when using ADD_STA ver >=12 */
1135*4882a593Smuzhiyun WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
1136*4882a593Smuzhiyun ADD_STA, 0) >= 12);
1137*4882a593Smuzhiyun
1138*4882a593Smuzhiyun cfg->bcast_sta_id = mvm->aux_sta.sta_id;
1139*4882a593Smuzhiyun cfg->channel_flags = channel_flags;
1140*4882a593Smuzhiyun
1141*4882a593Smuzhiyun iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels);
1142*4882a593Smuzhiyun }
1143*4882a593Smuzhiyun
iwl_mvm_fill_scan_config_v2(struct iwl_mvm * mvm,void * config,u32 flags,u8 channel_flags,u32 max_channels)1144*4882a593Smuzhiyun static void iwl_mvm_fill_scan_config_v2(struct iwl_mvm *mvm, void *config,
1145*4882a593Smuzhiyun u32 flags, u8 channel_flags,
1146*4882a593Smuzhiyun u32 max_channels)
1147*4882a593Smuzhiyun {
1148*4882a593Smuzhiyun struct iwl_scan_config_v2 *cfg = config;
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun cfg->flags = cpu_to_le32(flags);
1151*4882a593Smuzhiyun cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
1152*4882a593Smuzhiyun cfg->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
1153*4882a593Smuzhiyun cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm);
1154*4882a593Smuzhiyun
1155*4882a593Smuzhiyun if (iwl_mvm_is_cdb_supported(mvm)) {
1156*4882a593Smuzhiyun enum iwl_mvm_scan_type lb_type, hb_type;
1157*4882a593Smuzhiyun
1158*4882a593Smuzhiyun lb_type = iwl_mvm_get_scan_type_band(mvm, NULL,
1159*4882a593Smuzhiyun NL80211_BAND_2GHZ);
1160*4882a593Smuzhiyun hb_type = iwl_mvm_get_scan_type_band(mvm, NULL,
1161*4882a593Smuzhiyun NL80211_BAND_5GHZ);
1162*4882a593Smuzhiyun
1163*4882a593Smuzhiyun cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] =
1164*4882a593Smuzhiyun cpu_to_le32(scan_timing[lb_type].max_out_time);
1165*4882a593Smuzhiyun cfg->suspend_time[SCAN_LB_LMAC_IDX] =
1166*4882a593Smuzhiyun cpu_to_le32(scan_timing[lb_type].suspend_time);
1167*4882a593Smuzhiyun
1168*4882a593Smuzhiyun cfg->out_of_channel_time[SCAN_HB_LMAC_IDX] =
1169*4882a593Smuzhiyun cpu_to_le32(scan_timing[hb_type].max_out_time);
1170*4882a593Smuzhiyun cfg->suspend_time[SCAN_HB_LMAC_IDX] =
1171*4882a593Smuzhiyun cpu_to_le32(scan_timing[hb_type].suspend_time);
1172*4882a593Smuzhiyun } else {
1173*4882a593Smuzhiyun enum iwl_mvm_scan_type type =
1174*4882a593Smuzhiyun iwl_mvm_get_scan_type(mvm, NULL);
1175*4882a593Smuzhiyun
1176*4882a593Smuzhiyun cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] =
1177*4882a593Smuzhiyun cpu_to_le32(scan_timing[type].max_out_time);
1178*4882a593Smuzhiyun cfg->suspend_time[SCAN_LB_LMAC_IDX] =
1179*4882a593Smuzhiyun cpu_to_le32(scan_timing[type].suspend_time);
1180*4882a593Smuzhiyun }
1181*4882a593Smuzhiyun
1182*4882a593Smuzhiyun iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell);
1183*4882a593Smuzhiyun
1184*4882a593Smuzhiyun memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
1185*4882a593Smuzhiyun
1186*4882a593Smuzhiyun /* This function should not be called when using ADD_STA ver >=12 */
1187*4882a593Smuzhiyun WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
1188*4882a593Smuzhiyun ADD_STA, 0) >= 12);
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun cfg->bcast_sta_id = mvm->aux_sta.sta_id;
1191*4882a593Smuzhiyun cfg->channel_flags = channel_flags;
1192*4882a593Smuzhiyun
1193*4882a593Smuzhiyun iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels);
1194*4882a593Smuzhiyun }
1195*4882a593Smuzhiyun
iwl_mvm_legacy_config_scan(struct iwl_mvm * mvm)1196*4882a593Smuzhiyun static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm)
1197*4882a593Smuzhiyun {
1198*4882a593Smuzhiyun void *cfg;
1199*4882a593Smuzhiyun int ret, cmd_size;
1200*4882a593Smuzhiyun struct iwl_host_cmd cmd = {
1201*4882a593Smuzhiyun .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
1202*4882a593Smuzhiyun };
1203*4882a593Smuzhiyun enum iwl_mvm_scan_type type;
1204*4882a593Smuzhiyun enum iwl_mvm_scan_type hb_type = IWL_SCAN_TYPE_NOT_SET;
1205*4882a593Smuzhiyun int num_channels =
1206*4882a593Smuzhiyun mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels +
1207*4882a593Smuzhiyun mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels;
1208*4882a593Smuzhiyun u32 flags;
1209*4882a593Smuzhiyun u8 channel_flags;
1210*4882a593Smuzhiyun
1211*4882a593Smuzhiyun if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
1212*4882a593Smuzhiyun num_channels = mvm->fw->ucode_capa.n_scan_channels;
1213*4882a593Smuzhiyun
1214*4882a593Smuzhiyun if (iwl_mvm_is_cdb_supported(mvm)) {
1215*4882a593Smuzhiyun type = iwl_mvm_get_scan_type_band(mvm, NULL,
1216*4882a593Smuzhiyun NL80211_BAND_2GHZ);
1217*4882a593Smuzhiyun hb_type = iwl_mvm_get_scan_type_band(mvm, NULL,
1218*4882a593Smuzhiyun NL80211_BAND_5GHZ);
1219*4882a593Smuzhiyun if (type == mvm->scan_type && hb_type == mvm->hb_scan_type)
1220*4882a593Smuzhiyun return 0;
1221*4882a593Smuzhiyun } else {
1222*4882a593Smuzhiyun type = iwl_mvm_get_scan_type(mvm, NULL);
1223*4882a593Smuzhiyun if (type == mvm->scan_type)
1224*4882a593Smuzhiyun return 0;
1225*4882a593Smuzhiyun }
1226*4882a593Smuzhiyun
1227*4882a593Smuzhiyun if (iwl_mvm_cdb_scan_api(mvm))
1228*4882a593Smuzhiyun cmd_size = sizeof(struct iwl_scan_config_v2);
1229*4882a593Smuzhiyun else
1230*4882a593Smuzhiyun cmd_size = sizeof(struct iwl_scan_config_v1);
1231*4882a593Smuzhiyun cmd_size += mvm->fw->ucode_capa.n_scan_channels;
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun cfg = kzalloc(cmd_size, GFP_KERNEL);
1234*4882a593Smuzhiyun if (!cfg)
1235*4882a593Smuzhiyun return -ENOMEM;
1236*4882a593Smuzhiyun
1237*4882a593Smuzhiyun flags = SCAN_CONFIG_FLAG_ACTIVATE |
1238*4882a593Smuzhiyun SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
1239*4882a593Smuzhiyun SCAN_CONFIG_FLAG_SET_TX_CHAINS |
1240*4882a593Smuzhiyun SCAN_CONFIG_FLAG_SET_RX_CHAINS |
1241*4882a593Smuzhiyun SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
1242*4882a593Smuzhiyun SCAN_CONFIG_FLAG_SET_ALL_TIMES |
1243*4882a593Smuzhiyun SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
1244*4882a593Smuzhiyun SCAN_CONFIG_FLAG_SET_MAC_ADDR |
1245*4882a593Smuzhiyun SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS |
1246*4882a593Smuzhiyun SCAN_CONFIG_N_CHANNELS(num_channels) |
1247*4882a593Smuzhiyun (iwl_mvm_is_scan_fragmented(type) ?
1248*4882a593Smuzhiyun SCAN_CONFIG_FLAG_SET_FRAGMENTED :
1249*4882a593Smuzhiyun SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
1250*4882a593Smuzhiyun
1251*4882a593Smuzhiyun channel_flags = IWL_CHANNEL_FLAG_EBS |
1252*4882a593Smuzhiyun IWL_CHANNEL_FLAG_ACCURATE_EBS |
1253*4882a593Smuzhiyun IWL_CHANNEL_FLAG_EBS_ADD |
1254*4882a593Smuzhiyun IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
1255*4882a593Smuzhiyun
1256*4882a593Smuzhiyun /*
1257*4882a593Smuzhiyun * Check for fragmented scan on LMAC2 - high band.
1258*4882a593Smuzhiyun * LMAC1 - low band is checked above.
1259*4882a593Smuzhiyun */
1260*4882a593Smuzhiyun if (iwl_mvm_cdb_scan_api(mvm)) {
1261*4882a593Smuzhiyun if (iwl_mvm_is_cdb_supported(mvm))
1262*4882a593Smuzhiyun flags |= (iwl_mvm_is_scan_fragmented(hb_type)) ?
1263*4882a593Smuzhiyun SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
1264*4882a593Smuzhiyun SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
1265*4882a593Smuzhiyun iwl_mvm_fill_scan_config_v2(mvm, cfg, flags, channel_flags,
1266*4882a593Smuzhiyun num_channels);
1267*4882a593Smuzhiyun } else {
1268*4882a593Smuzhiyun iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags,
1269*4882a593Smuzhiyun num_channels);
1270*4882a593Smuzhiyun }
1271*4882a593Smuzhiyun
1272*4882a593Smuzhiyun cmd.data[0] = cfg;
1273*4882a593Smuzhiyun cmd.len[0] = cmd_size;
1274*4882a593Smuzhiyun cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
1275*4882a593Smuzhiyun
1276*4882a593Smuzhiyun IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n");
1277*4882a593Smuzhiyun
1278*4882a593Smuzhiyun ret = iwl_mvm_send_cmd(mvm, &cmd);
1279*4882a593Smuzhiyun if (!ret) {
1280*4882a593Smuzhiyun mvm->scan_type = type;
1281*4882a593Smuzhiyun mvm->hb_scan_type = hb_type;
1282*4882a593Smuzhiyun }
1283*4882a593Smuzhiyun
1284*4882a593Smuzhiyun kfree(cfg);
1285*4882a593Smuzhiyun return ret;
1286*4882a593Smuzhiyun }
1287*4882a593Smuzhiyun
iwl_mvm_config_scan(struct iwl_mvm * mvm)1288*4882a593Smuzhiyun int iwl_mvm_config_scan(struct iwl_mvm *mvm)
1289*4882a593Smuzhiyun {
1290*4882a593Smuzhiyun struct iwl_scan_config cfg;
1291*4882a593Smuzhiyun struct iwl_host_cmd cmd = {
1292*4882a593Smuzhiyun .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
1293*4882a593Smuzhiyun .len[0] = sizeof(cfg),
1294*4882a593Smuzhiyun .data[0] = &cfg,
1295*4882a593Smuzhiyun .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
1296*4882a593Smuzhiyun };
1297*4882a593Smuzhiyun
1298*4882a593Smuzhiyun if (!iwl_mvm_is_reduced_config_scan_supported(mvm))
1299*4882a593Smuzhiyun return iwl_mvm_legacy_config_scan(mvm);
1300*4882a593Smuzhiyun
1301*4882a593Smuzhiyun memset(&cfg, 0, sizeof(cfg));
1302*4882a593Smuzhiyun
1303*4882a593Smuzhiyun if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
1304*4882a593Smuzhiyun ADD_STA, 0) < 12)
1305*4882a593Smuzhiyun cfg.bcast_sta_id = mvm->aux_sta.sta_id;
1306*4882a593Smuzhiyun /*
1307*4882a593Smuzhiyun * Fw doesn't use this sta anymore, pending deprecation via HOST API
1308*4882a593Smuzhiyun * change.
1309*4882a593Smuzhiyun */
1310*4882a593Smuzhiyun else
1311*4882a593Smuzhiyun cfg.bcast_sta_id = 0xff;
1312*4882a593Smuzhiyun
1313*4882a593Smuzhiyun cfg.tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
1314*4882a593Smuzhiyun cfg.rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
1315*4882a593Smuzhiyun
1316*4882a593Smuzhiyun IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n");
1317*4882a593Smuzhiyun
1318*4882a593Smuzhiyun return iwl_mvm_send_cmd(mvm, &cmd);
1319*4882a593Smuzhiyun }
1320*4882a593Smuzhiyun
iwl_mvm_scan_uid_by_status(struct iwl_mvm * mvm,int status)1321*4882a593Smuzhiyun static int iwl_mvm_scan_uid_by_status(struct iwl_mvm *mvm, int status)
1322*4882a593Smuzhiyun {
1323*4882a593Smuzhiyun int i;
1324*4882a593Smuzhiyun
1325*4882a593Smuzhiyun for (i = 0; i < mvm->max_scans; i++)
1326*4882a593Smuzhiyun if (mvm->scan_uid_status[i] == status)
1327*4882a593Smuzhiyun return i;
1328*4882a593Smuzhiyun
1329*4882a593Smuzhiyun return -ENOENT;
1330*4882a593Smuzhiyun }
1331*4882a593Smuzhiyun
iwl_mvm_scan_umac_dwell(struct iwl_mvm * mvm,struct iwl_scan_req_umac * cmd,struct iwl_mvm_scan_params * params)1332*4882a593Smuzhiyun static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
1333*4882a593Smuzhiyun struct iwl_scan_req_umac *cmd,
1334*4882a593Smuzhiyun struct iwl_mvm_scan_params *params)
1335*4882a593Smuzhiyun {
1336*4882a593Smuzhiyun struct iwl_mvm_scan_timing_params *timing, *hb_timing;
1337*4882a593Smuzhiyun u8 active_dwell, passive_dwell;
1338*4882a593Smuzhiyun
1339*4882a593Smuzhiyun timing = &scan_timing[params->type];
1340*4882a593Smuzhiyun active_dwell = IWL_SCAN_DWELL_ACTIVE;
1341*4882a593Smuzhiyun passive_dwell = IWL_SCAN_DWELL_PASSIVE;
1342*4882a593Smuzhiyun
1343*4882a593Smuzhiyun if (iwl_mvm_is_adaptive_dwell_supported(mvm)) {
1344*4882a593Smuzhiyun cmd->v7.adwell_default_n_aps_social =
1345*4882a593Smuzhiyun IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
1346*4882a593Smuzhiyun cmd->v7.adwell_default_n_aps =
1347*4882a593Smuzhiyun IWL_SCAN_ADWELL_DEFAULT_LB_N_APS;
1348*4882a593Smuzhiyun
1349*4882a593Smuzhiyun if (iwl_mvm_is_adwell_hb_ap_num_supported(mvm))
1350*4882a593Smuzhiyun cmd->v9.adwell_default_hb_n_aps =
1351*4882a593Smuzhiyun IWL_SCAN_ADWELL_DEFAULT_HB_N_APS;
1352*4882a593Smuzhiyun
1353*4882a593Smuzhiyun /* if custom max budget was configured with debugfs */
1354*4882a593Smuzhiyun if (IWL_MVM_ADWELL_MAX_BUDGET)
1355*4882a593Smuzhiyun cmd->v7.adwell_max_budget =
1356*4882a593Smuzhiyun cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
1357*4882a593Smuzhiyun else if (params->ssids && params->ssids[0].ssid_len)
1358*4882a593Smuzhiyun cmd->v7.adwell_max_budget =
1359*4882a593Smuzhiyun cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
1360*4882a593Smuzhiyun else
1361*4882a593Smuzhiyun cmd->v7.adwell_max_budget =
1362*4882a593Smuzhiyun cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
1363*4882a593Smuzhiyun
1364*4882a593Smuzhiyun cmd->v7.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
1365*4882a593Smuzhiyun cmd->v7.max_out_time[SCAN_LB_LMAC_IDX] =
1366*4882a593Smuzhiyun cpu_to_le32(timing->max_out_time);
1367*4882a593Smuzhiyun cmd->v7.suspend_time[SCAN_LB_LMAC_IDX] =
1368*4882a593Smuzhiyun cpu_to_le32(timing->suspend_time);
1369*4882a593Smuzhiyun
1370*4882a593Smuzhiyun if (iwl_mvm_is_cdb_supported(mvm)) {
1371*4882a593Smuzhiyun hb_timing = &scan_timing[params->hb_type];
1372*4882a593Smuzhiyun
1373*4882a593Smuzhiyun cmd->v7.max_out_time[SCAN_HB_LMAC_IDX] =
1374*4882a593Smuzhiyun cpu_to_le32(hb_timing->max_out_time);
1375*4882a593Smuzhiyun cmd->v7.suspend_time[SCAN_HB_LMAC_IDX] =
1376*4882a593Smuzhiyun cpu_to_le32(hb_timing->suspend_time);
1377*4882a593Smuzhiyun }
1378*4882a593Smuzhiyun
1379*4882a593Smuzhiyun if (!iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) {
1380*4882a593Smuzhiyun cmd->v7.active_dwell = active_dwell;
1381*4882a593Smuzhiyun cmd->v7.passive_dwell = passive_dwell;
1382*4882a593Smuzhiyun cmd->v7.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
1383*4882a593Smuzhiyun } else {
1384*4882a593Smuzhiyun cmd->v8.active_dwell[SCAN_LB_LMAC_IDX] = active_dwell;
1385*4882a593Smuzhiyun cmd->v8.passive_dwell[SCAN_LB_LMAC_IDX] = passive_dwell;
1386*4882a593Smuzhiyun if (iwl_mvm_is_cdb_supported(mvm)) {
1387*4882a593Smuzhiyun cmd->v8.active_dwell[SCAN_HB_LMAC_IDX] =
1388*4882a593Smuzhiyun active_dwell;
1389*4882a593Smuzhiyun cmd->v8.passive_dwell[SCAN_HB_LMAC_IDX] =
1390*4882a593Smuzhiyun passive_dwell;
1391*4882a593Smuzhiyun }
1392*4882a593Smuzhiyun }
1393*4882a593Smuzhiyun } else {
1394*4882a593Smuzhiyun cmd->v1.extended_dwell = IWL_SCAN_DWELL_EXTENDED;
1395*4882a593Smuzhiyun cmd->v1.active_dwell = active_dwell;
1396*4882a593Smuzhiyun cmd->v1.passive_dwell = passive_dwell;
1397*4882a593Smuzhiyun cmd->v1.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
1398*4882a593Smuzhiyun
1399*4882a593Smuzhiyun if (iwl_mvm_is_cdb_supported(mvm)) {
1400*4882a593Smuzhiyun hb_timing = &scan_timing[params->hb_type];
1401*4882a593Smuzhiyun
1402*4882a593Smuzhiyun cmd->v6.max_out_time[SCAN_HB_LMAC_IDX] =
1403*4882a593Smuzhiyun cpu_to_le32(hb_timing->max_out_time);
1404*4882a593Smuzhiyun cmd->v6.suspend_time[SCAN_HB_LMAC_IDX] =
1405*4882a593Smuzhiyun cpu_to_le32(hb_timing->suspend_time);
1406*4882a593Smuzhiyun }
1407*4882a593Smuzhiyun
1408*4882a593Smuzhiyun if (iwl_mvm_cdb_scan_api(mvm)) {
1409*4882a593Smuzhiyun cmd->v6.scan_priority =
1410*4882a593Smuzhiyun cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
1411*4882a593Smuzhiyun cmd->v6.max_out_time[SCAN_LB_LMAC_IDX] =
1412*4882a593Smuzhiyun cpu_to_le32(timing->max_out_time);
1413*4882a593Smuzhiyun cmd->v6.suspend_time[SCAN_LB_LMAC_IDX] =
1414*4882a593Smuzhiyun cpu_to_le32(timing->suspend_time);
1415*4882a593Smuzhiyun } else {
1416*4882a593Smuzhiyun cmd->v1.scan_priority =
1417*4882a593Smuzhiyun cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
1418*4882a593Smuzhiyun cmd->v1.max_out_time =
1419*4882a593Smuzhiyun cpu_to_le32(timing->max_out_time);
1420*4882a593Smuzhiyun cmd->v1.suspend_time =
1421*4882a593Smuzhiyun cpu_to_le32(timing->suspend_time);
1422*4882a593Smuzhiyun }
1423*4882a593Smuzhiyun }
1424*4882a593Smuzhiyun
1425*4882a593Smuzhiyun if (iwl_mvm_is_regular_scan(params))
1426*4882a593Smuzhiyun cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
1427*4882a593Smuzhiyun else
1428*4882a593Smuzhiyun cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
1429*4882a593Smuzhiyun }
1430*4882a593Smuzhiyun
iwl_mvm_scan_umac_ooc_priority(struct iwl_mvm_scan_params * params)1431*4882a593Smuzhiyun static u32 iwl_mvm_scan_umac_ooc_priority(struct iwl_mvm_scan_params *params)
1432*4882a593Smuzhiyun {
1433*4882a593Smuzhiyun return iwl_mvm_is_regular_scan(params) ?
1434*4882a593Smuzhiyun IWL_SCAN_PRIORITY_EXT_6 :
1435*4882a593Smuzhiyun IWL_SCAN_PRIORITY_EXT_2;
1436*4882a593Smuzhiyun }
1437*4882a593Smuzhiyun
1438*4882a593Smuzhiyun static void
iwl_mvm_scan_umac_dwell_v10(struct iwl_mvm * mvm,struct iwl_scan_general_params_v10 * general_params,struct iwl_mvm_scan_params * params)1439*4882a593Smuzhiyun iwl_mvm_scan_umac_dwell_v10(struct iwl_mvm *mvm,
1440*4882a593Smuzhiyun struct iwl_scan_general_params_v10 *general_params,
1441*4882a593Smuzhiyun struct iwl_mvm_scan_params *params)
1442*4882a593Smuzhiyun {
1443*4882a593Smuzhiyun struct iwl_mvm_scan_timing_params *timing, *hb_timing;
1444*4882a593Smuzhiyun u8 active_dwell, passive_dwell;
1445*4882a593Smuzhiyun
1446*4882a593Smuzhiyun timing = &scan_timing[params->type];
1447*4882a593Smuzhiyun active_dwell = IWL_SCAN_DWELL_ACTIVE;
1448*4882a593Smuzhiyun passive_dwell = IWL_SCAN_DWELL_PASSIVE;
1449*4882a593Smuzhiyun
1450*4882a593Smuzhiyun general_params->adwell_default_social_chn =
1451*4882a593Smuzhiyun IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
1452*4882a593Smuzhiyun general_params->adwell_default_2g = IWL_SCAN_ADWELL_DEFAULT_LB_N_APS;
1453*4882a593Smuzhiyun general_params->adwell_default_5g = IWL_SCAN_ADWELL_DEFAULT_HB_N_APS;
1454*4882a593Smuzhiyun
1455*4882a593Smuzhiyun /* if custom max budget was configured with debugfs */
1456*4882a593Smuzhiyun if (IWL_MVM_ADWELL_MAX_BUDGET)
1457*4882a593Smuzhiyun general_params->adwell_max_budget =
1458*4882a593Smuzhiyun cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
1459*4882a593Smuzhiyun else if (params->ssids && params->ssids[0].ssid_len)
1460*4882a593Smuzhiyun general_params->adwell_max_budget =
1461*4882a593Smuzhiyun cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
1462*4882a593Smuzhiyun else
1463*4882a593Smuzhiyun general_params->adwell_max_budget =
1464*4882a593Smuzhiyun cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
1465*4882a593Smuzhiyun
1466*4882a593Smuzhiyun general_params->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
1467*4882a593Smuzhiyun general_params->max_out_of_time[SCAN_LB_LMAC_IDX] =
1468*4882a593Smuzhiyun cpu_to_le32(timing->max_out_time);
1469*4882a593Smuzhiyun general_params->suspend_time[SCAN_LB_LMAC_IDX] =
1470*4882a593Smuzhiyun cpu_to_le32(timing->suspend_time);
1471*4882a593Smuzhiyun
1472*4882a593Smuzhiyun hb_timing = &scan_timing[params->hb_type];
1473*4882a593Smuzhiyun
1474*4882a593Smuzhiyun general_params->max_out_of_time[SCAN_HB_LMAC_IDX] =
1475*4882a593Smuzhiyun cpu_to_le32(hb_timing->max_out_time);
1476*4882a593Smuzhiyun general_params->suspend_time[SCAN_HB_LMAC_IDX] =
1477*4882a593Smuzhiyun cpu_to_le32(hb_timing->suspend_time);
1478*4882a593Smuzhiyun
1479*4882a593Smuzhiyun general_params->active_dwell[SCAN_LB_LMAC_IDX] = active_dwell;
1480*4882a593Smuzhiyun general_params->passive_dwell[SCAN_LB_LMAC_IDX] = passive_dwell;
1481*4882a593Smuzhiyun general_params->active_dwell[SCAN_HB_LMAC_IDX] = active_dwell;
1482*4882a593Smuzhiyun general_params->passive_dwell[SCAN_HB_LMAC_IDX] = passive_dwell;
1483*4882a593Smuzhiyun }
1484*4882a593Smuzhiyun
1485*4882a593Smuzhiyun struct iwl_mvm_scan_channel_segment {
1486*4882a593Smuzhiyun u8 start_idx;
1487*4882a593Smuzhiyun u8 end_idx;
1488*4882a593Smuzhiyun u8 first_channel_id;
1489*4882a593Smuzhiyun u8 last_channel_id;
1490*4882a593Smuzhiyun u8 channel_spacing_shift;
1491*4882a593Smuzhiyun u8 band;
1492*4882a593Smuzhiyun };
1493*4882a593Smuzhiyun
1494*4882a593Smuzhiyun static const struct iwl_mvm_scan_channel_segment scan_channel_segments[] = {
1495*4882a593Smuzhiyun {
1496*4882a593Smuzhiyun .start_idx = 0,
1497*4882a593Smuzhiyun .end_idx = 13,
1498*4882a593Smuzhiyun .first_channel_id = 1,
1499*4882a593Smuzhiyun .last_channel_id = 14,
1500*4882a593Smuzhiyun .channel_spacing_shift = 0,
1501*4882a593Smuzhiyun .band = PHY_BAND_24
1502*4882a593Smuzhiyun },
1503*4882a593Smuzhiyun {
1504*4882a593Smuzhiyun .start_idx = 14,
1505*4882a593Smuzhiyun .end_idx = 41,
1506*4882a593Smuzhiyun .first_channel_id = 36,
1507*4882a593Smuzhiyun .last_channel_id = 144,
1508*4882a593Smuzhiyun .channel_spacing_shift = 2,
1509*4882a593Smuzhiyun .band = PHY_BAND_5
1510*4882a593Smuzhiyun },
1511*4882a593Smuzhiyun {
1512*4882a593Smuzhiyun .start_idx = 42,
1513*4882a593Smuzhiyun .end_idx = 50,
1514*4882a593Smuzhiyun .first_channel_id = 149,
1515*4882a593Smuzhiyun .last_channel_id = 181,
1516*4882a593Smuzhiyun .channel_spacing_shift = 2,
1517*4882a593Smuzhiyun .band = PHY_BAND_5
1518*4882a593Smuzhiyun },
1519*4882a593Smuzhiyun };
1520*4882a593Smuzhiyun
iwl_mvm_scan_ch_and_band_to_idx(u8 channel_id,u8 band)1521*4882a593Smuzhiyun static int iwl_mvm_scan_ch_and_band_to_idx(u8 channel_id, u8 band)
1522*4882a593Smuzhiyun {
1523*4882a593Smuzhiyun int i, index;
1524*4882a593Smuzhiyun
1525*4882a593Smuzhiyun if (!channel_id)
1526*4882a593Smuzhiyun return -EINVAL;
1527*4882a593Smuzhiyun
1528*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(scan_channel_segments); i++) {
1529*4882a593Smuzhiyun const struct iwl_mvm_scan_channel_segment *ch_segment =
1530*4882a593Smuzhiyun &scan_channel_segments[i];
1531*4882a593Smuzhiyun u32 ch_offset;
1532*4882a593Smuzhiyun
1533*4882a593Smuzhiyun if (ch_segment->band != band ||
1534*4882a593Smuzhiyun ch_segment->first_channel_id > channel_id ||
1535*4882a593Smuzhiyun ch_segment->last_channel_id < channel_id)
1536*4882a593Smuzhiyun continue;
1537*4882a593Smuzhiyun
1538*4882a593Smuzhiyun ch_offset = (channel_id - ch_segment->first_channel_id) >>
1539*4882a593Smuzhiyun ch_segment->channel_spacing_shift;
1540*4882a593Smuzhiyun
1541*4882a593Smuzhiyun index = scan_channel_segments[i].start_idx + ch_offset;
1542*4882a593Smuzhiyun if (index < IWL_SCAN_NUM_CHANNELS)
1543*4882a593Smuzhiyun return index;
1544*4882a593Smuzhiyun
1545*4882a593Smuzhiyun break;
1546*4882a593Smuzhiyun }
1547*4882a593Smuzhiyun
1548*4882a593Smuzhiyun return -EINVAL;
1549*4882a593Smuzhiyun }
1550*4882a593Smuzhiyun
1551*4882a593Smuzhiyun static const u8 p2p_go_friendly_chs[] = {
1552*4882a593Smuzhiyun 36, 40, 44, 48, 149, 153, 157, 161, 165,
1553*4882a593Smuzhiyun };
1554*4882a593Smuzhiyun
1555*4882a593Smuzhiyun static const u8 social_chs[] = {
1556*4882a593Smuzhiyun 1, 6, 11
1557*4882a593Smuzhiyun };
1558*4882a593Smuzhiyun
iwl_mvm_scan_ch_add_n_aps_override(enum nl80211_iftype vif_type,u8 ch_id,u8 band,u8 * ch_bitmap,size_t bitmap_n_entries)1559*4882a593Smuzhiyun static void iwl_mvm_scan_ch_add_n_aps_override(enum nl80211_iftype vif_type,
1560*4882a593Smuzhiyun u8 ch_id, u8 band, u8 *ch_bitmap,
1561*4882a593Smuzhiyun size_t bitmap_n_entries)
1562*4882a593Smuzhiyun {
1563*4882a593Smuzhiyun int i;
1564*4882a593Smuzhiyun
1565*4882a593Smuzhiyun if (vif_type != NL80211_IFTYPE_P2P_DEVICE)
1566*4882a593Smuzhiyun return;
1567*4882a593Smuzhiyun
1568*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(p2p_go_friendly_chs); i++) {
1569*4882a593Smuzhiyun if (p2p_go_friendly_chs[i] == ch_id) {
1570*4882a593Smuzhiyun int ch_idx, bitmap_idx;
1571*4882a593Smuzhiyun
1572*4882a593Smuzhiyun ch_idx = iwl_mvm_scan_ch_and_band_to_idx(ch_id, band);
1573*4882a593Smuzhiyun if (ch_idx < 0)
1574*4882a593Smuzhiyun return;
1575*4882a593Smuzhiyun
1576*4882a593Smuzhiyun bitmap_idx = ch_idx / 8;
1577*4882a593Smuzhiyun if (bitmap_idx >= bitmap_n_entries)
1578*4882a593Smuzhiyun return;
1579*4882a593Smuzhiyun
1580*4882a593Smuzhiyun ch_idx = ch_idx % 8;
1581*4882a593Smuzhiyun ch_bitmap[bitmap_idx] |= BIT(ch_idx);
1582*4882a593Smuzhiyun
1583*4882a593Smuzhiyun return;
1584*4882a593Smuzhiyun }
1585*4882a593Smuzhiyun }
1586*4882a593Smuzhiyun }
1587*4882a593Smuzhiyun
iwl_mvm_scan_ch_n_aps_flag(enum nl80211_iftype vif_type,u8 ch_id)1588*4882a593Smuzhiyun static u32 iwl_mvm_scan_ch_n_aps_flag(enum nl80211_iftype vif_type, u8 ch_id)
1589*4882a593Smuzhiyun {
1590*4882a593Smuzhiyun int i;
1591*4882a593Smuzhiyun u32 flags = 0;
1592*4882a593Smuzhiyun
1593*4882a593Smuzhiyun if (vif_type != NL80211_IFTYPE_P2P_DEVICE)
1594*4882a593Smuzhiyun goto out;
1595*4882a593Smuzhiyun
1596*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(p2p_go_friendly_chs); i++) {
1597*4882a593Smuzhiyun if (p2p_go_friendly_chs[i] == ch_id) {
1598*4882a593Smuzhiyun flags |= IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY_BIT;
1599*4882a593Smuzhiyun break;
1600*4882a593Smuzhiyun }
1601*4882a593Smuzhiyun }
1602*4882a593Smuzhiyun
1603*4882a593Smuzhiyun if (flags)
1604*4882a593Smuzhiyun goto out;
1605*4882a593Smuzhiyun
1606*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(social_chs); i++) {
1607*4882a593Smuzhiyun if (social_chs[i] == ch_id) {
1608*4882a593Smuzhiyun flags |= IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS_BIT;
1609*4882a593Smuzhiyun break;
1610*4882a593Smuzhiyun }
1611*4882a593Smuzhiyun }
1612*4882a593Smuzhiyun
1613*4882a593Smuzhiyun out:
1614*4882a593Smuzhiyun return flags;
1615*4882a593Smuzhiyun }
1616*4882a593Smuzhiyun
1617*4882a593Smuzhiyun static void
iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm * mvm,struct ieee80211_channel ** channels,int n_channels,u32 flags,struct iwl_scan_channel_cfg_umac * channel_cfg)1618*4882a593Smuzhiyun iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
1619*4882a593Smuzhiyun struct ieee80211_channel **channels,
1620*4882a593Smuzhiyun int n_channels, u32 flags,
1621*4882a593Smuzhiyun struct iwl_scan_channel_cfg_umac *channel_cfg)
1622*4882a593Smuzhiyun {
1623*4882a593Smuzhiyun int i;
1624*4882a593Smuzhiyun
1625*4882a593Smuzhiyun for (i = 0; i < n_channels; i++) {
1626*4882a593Smuzhiyun channel_cfg[i].flags = cpu_to_le32(flags);
1627*4882a593Smuzhiyun channel_cfg[i].v1.channel_num = channels[i]->hw_value;
1628*4882a593Smuzhiyun if (iwl_mvm_is_scan_ext_chan_supported(mvm)) {
1629*4882a593Smuzhiyun enum nl80211_band band = channels[i]->band;
1630*4882a593Smuzhiyun
1631*4882a593Smuzhiyun channel_cfg[i].v2.band =
1632*4882a593Smuzhiyun iwl_mvm_phy_band_from_nl80211(band);
1633*4882a593Smuzhiyun channel_cfg[i].v2.iter_count = 1;
1634*4882a593Smuzhiyun channel_cfg[i].v2.iter_interval = 0;
1635*4882a593Smuzhiyun } else {
1636*4882a593Smuzhiyun channel_cfg[i].v1.iter_count = 1;
1637*4882a593Smuzhiyun channel_cfg[i].v1.iter_interval = 0;
1638*4882a593Smuzhiyun }
1639*4882a593Smuzhiyun }
1640*4882a593Smuzhiyun }
1641*4882a593Smuzhiyun
1642*4882a593Smuzhiyun static void
iwl_mvm_umac_scan_cfg_channels_v4(struct iwl_mvm * mvm,struct ieee80211_channel ** channels,struct iwl_scan_channel_params_v4 * cp,int n_channels,u32 flags,enum nl80211_iftype vif_type)1643*4882a593Smuzhiyun iwl_mvm_umac_scan_cfg_channels_v4(struct iwl_mvm *mvm,
1644*4882a593Smuzhiyun struct ieee80211_channel **channels,
1645*4882a593Smuzhiyun struct iwl_scan_channel_params_v4 *cp,
1646*4882a593Smuzhiyun int n_channels, u32 flags,
1647*4882a593Smuzhiyun enum nl80211_iftype vif_type)
1648*4882a593Smuzhiyun {
1649*4882a593Smuzhiyun u8 *bitmap = cp->adwell_ch_override_bitmap;
1650*4882a593Smuzhiyun size_t bitmap_n_entries = ARRAY_SIZE(cp->adwell_ch_override_bitmap);
1651*4882a593Smuzhiyun int i;
1652*4882a593Smuzhiyun
1653*4882a593Smuzhiyun for (i = 0; i < n_channels; i++) {
1654*4882a593Smuzhiyun enum nl80211_band band = channels[i]->band;
1655*4882a593Smuzhiyun struct iwl_scan_channel_cfg_umac *cfg =
1656*4882a593Smuzhiyun &cp->channel_config[i];
1657*4882a593Smuzhiyun
1658*4882a593Smuzhiyun cfg->flags = cpu_to_le32(flags);
1659*4882a593Smuzhiyun cfg->v2.channel_num = channels[i]->hw_value;
1660*4882a593Smuzhiyun cfg->v2.band = iwl_mvm_phy_band_from_nl80211(band);
1661*4882a593Smuzhiyun cfg->v2.iter_count = 1;
1662*4882a593Smuzhiyun cfg->v2.iter_interval = 0;
1663*4882a593Smuzhiyun
1664*4882a593Smuzhiyun iwl_mvm_scan_ch_add_n_aps_override(vif_type,
1665*4882a593Smuzhiyun cfg->v2.channel_num,
1666*4882a593Smuzhiyun cfg->v2.band, bitmap,
1667*4882a593Smuzhiyun bitmap_n_entries);
1668*4882a593Smuzhiyun }
1669*4882a593Smuzhiyun }
1670*4882a593Smuzhiyun
1671*4882a593Smuzhiyun static void
iwl_mvm_umac_scan_cfg_channels_v6(struct iwl_mvm * mvm,struct ieee80211_channel ** channels,struct iwl_scan_channel_params_v6 * cp,int n_channels,u32 flags,enum nl80211_iftype vif_type)1672*4882a593Smuzhiyun iwl_mvm_umac_scan_cfg_channels_v6(struct iwl_mvm *mvm,
1673*4882a593Smuzhiyun struct ieee80211_channel **channels,
1674*4882a593Smuzhiyun struct iwl_scan_channel_params_v6 *cp,
1675*4882a593Smuzhiyun int n_channels, u32 flags,
1676*4882a593Smuzhiyun enum nl80211_iftype vif_type)
1677*4882a593Smuzhiyun {
1678*4882a593Smuzhiyun int i;
1679*4882a593Smuzhiyun
1680*4882a593Smuzhiyun for (i = 0; i < n_channels; i++) {
1681*4882a593Smuzhiyun enum nl80211_band band = channels[i]->band;
1682*4882a593Smuzhiyun struct iwl_scan_channel_cfg_umac *cfg = &cp->channel_config[i];
1683*4882a593Smuzhiyun u32 n_aps_flag =
1684*4882a593Smuzhiyun iwl_mvm_scan_ch_n_aps_flag(vif_type,
1685*4882a593Smuzhiyun channels[i]->hw_value);
1686*4882a593Smuzhiyun
1687*4882a593Smuzhiyun cfg->flags = cpu_to_le32(flags | n_aps_flag);
1688*4882a593Smuzhiyun cfg->v2.channel_num = channels[i]->hw_value;
1689*4882a593Smuzhiyun cfg->v2.band = iwl_mvm_phy_band_from_nl80211(band);
1690*4882a593Smuzhiyun cfg->v2.iter_count = 1;
1691*4882a593Smuzhiyun cfg->v2.iter_interval = 0;
1692*4882a593Smuzhiyun }
1693*4882a593Smuzhiyun }
1694*4882a593Smuzhiyun
iwl_mvm_scan_umac_chan_flags_v2(struct iwl_mvm * mvm,struct iwl_mvm_scan_params * params,struct ieee80211_vif * vif)1695*4882a593Smuzhiyun static u8 iwl_mvm_scan_umac_chan_flags_v2(struct iwl_mvm *mvm,
1696*4882a593Smuzhiyun struct iwl_mvm_scan_params *params,
1697*4882a593Smuzhiyun struct ieee80211_vif *vif)
1698*4882a593Smuzhiyun {
1699*4882a593Smuzhiyun u8 flags = 0;
1700*4882a593Smuzhiyun
1701*4882a593Smuzhiyun flags |= IWL_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER;
1702*4882a593Smuzhiyun
1703*4882a593Smuzhiyun if (iwl_mvm_scan_use_ebs(mvm, vif))
1704*4882a593Smuzhiyun flags |= IWL_SCAN_CHANNEL_FLAG_EBS |
1705*4882a593Smuzhiyun IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
1706*4882a593Smuzhiyun IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
1707*4882a593Smuzhiyun
1708*4882a593Smuzhiyun /* set fragmented ebs for fragmented scan on HB channels */
1709*4882a593Smuzhiyun if ((!iwl_mvm_is_cdb_supported(mvm) &&
1710*4882a593Smuzhiyun iwl_mvm_is_scan_fragmented(params->type)) ||
1711*4882a593Smuzhiyun (iwl_mvm_is_cdb_supported(mvm) &&
1712*4882a593Smuzhiyun iwl_mvm_is_scan_fragmented(params->hb_type)))
1713*4882a593Smuzhiyun flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG;
1714*4882a593Smuzhiyun
1715*4882a593Smuzhiyun return flags;
1716*4882a593Smuzhiyun }
1717*4882a593Smuzhiyun
iwl_mvm_scan_umac_flags_v2(struct iwl_mvm * mvm,struct iwl_mvm_scan_params * params,struct ieee80211_vif * vif,int type)1718*4882a593Smuzhiyun static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm,
1719*4882a593Smuzhiyun struct iwl_mvm_scan_params *params,
1720*4882a593Smuzhiyun struct ieee80211_vif *vif,
1721*4882a593Smuzhiyun int type)
1722*4882a593Smuzhiyun {
1723*4882a593Smuzhiyun u16 flags = 0;
1724*4882a593Smuzhiyun
1725*4882a593Smuzhiyun if (params->n_ssids == 0)
1726*4882a593Smuzhiyun flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE;
1727*4882a593Smuzhiyun
1728*4882a593Smuzhiyun if (iwl_mvm_is_scan_fragmented(params->type))
1729*4882a593Smuzhiyun flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1;
1730*4882a593Smuzhiyun
1731*4882a593Smuzhiyun if (iwl_mvm_is_scan_fragmented(params->hb_type))
1732*4882a593Smuzhiyun flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2;
1733*4882a593Smuzhiyun
1734*4882a593Smuzhiyun if (params->pass_all)
1735*4882a593Smuzhiyun flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL;
1736*4882a593Smuzhiyun else
1737*4882a593Smuzhiyun flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_MATCH;
1738*4882a593Smuzhiyun
1739*4882a593Smuzhiyun if (!iwl_mvm_is_regular_scan(params))
1740*4882a593Smuzhiyun flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC;
1741*4882a593Smuzhiyun
1742*4882a593Smuzhiyun if (params->iter_notif ||
1743*4882a593Smuzhiyun mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
1744*4882a593Smuzhiyun flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE;
1745*4882a593Smuzhiyun
1746*4882a593Smuzhiyun if (IWL_MVM_ADWELL_ENABLE)
1747*4882a593Smuzhiyun flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL;
1748*4882a593Smuzhiyun
1749*4882a593Smuzhiyun if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT)
1750*4882a593Smuzhiyun flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PREEMPTIVE;
1751*4882a593Smuzhiyun
1752*4882a593Smuzhiyun return flags;
1753*4882a593Smuzhiyun }
1754*4882a593Smuzhiyun
iwl_mvm_scan_umac_flags(struct iwl_mvm * mvm,struct iwl_mvm_scan_params * params,struct ieee80211_vif * vif)1755*4882a593Smuzhiyun static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
1756*4882a593Smuzhiyun struct iwl_mvm_scan_params *params,
1757*4882a593Smuzhiyun struct ieee80211_vif *vif)
1758*4882a593Smuzhiyun {
1759*4882a593Smuzhiyun u16 flags = 0;
1760*4882a593Smuzhiyun
1761*4882a593Smuzhiyun if (params->n_ssids == 0)
1762*4882a593Smuzhiyun flags = IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE;
1763*4882a593Smuzhiyun
1764*4882a593Smuzhiyun if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
1765*4882a593Smuzhiyun flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
1766*4882a593Smuzhiyun
1767*4882a593Smuzhiyun if (iwl_mvm_is_scan_fragmented(params->type))
1768*4882a593Smuzhiyun flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
1769*4882a593Smuzhiyun
1770*4882a593Smuzhiyun if (iwl_mvm_is_cdb_supported(mvm) &&
1771*4882a593Smuzhiyun iwl_mvm_is_scan_fragmented(params->hb_type))
1772*4882a593Smuzhiyun flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED;
1773*4882a593Smuzhiyun
1774*4882a593Smuzhiyun if (iwl_mvm_rrm_scan_needed(mvm) &&
1775*4882a593Smuzhiyun fw_has_capa(&mvm->fw->ucode_capa,
1776*4882a593Smuzhiyun IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
1777*4882a593Smuzhiyun flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED;
1778*4882a593Smuzhiyun
1779*4882a593Smuzhiyun if (params->pass_all)
1780*4882a593Smuzhiyun flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
1781*4882a593Smuzhiyun else
1782*4882a593Smuzhiyun flags |= IWL_UMAC_SCAN_GEN_FLAGS_MATCH;
1783*4882a593Smuzhiyun
1784*4882a593Smuzhiyun if (!iwl_mvm_is_regular_scan(params))
1785*4882a593Smuzhiyun flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC;
1786*4882a593Smuzhiyun
1787*4882a593Smuzhiyun if (params->iter_notif)
1788*4882a593Smuzhiyun flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
1789*4882a593Smuzhiyun
1790*4882a593Smuzhiyun #ifdef CONFIG_IWLWIFI_DEBUGFS
1791*4882a593Smuzhiyun if (mvm->scan_iter_notif_enabled)
1792*4882a593Smuzhiyun flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
1793*4882a593Smuzhiyun #endif
1794*4882a593Smuzhiyun
1795*4882a593Smuzhiyun if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
1796*4882a593Smuzhiyun flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
1797*4882a593Smuzhiyun
1798*4882a593Smuzhiyun if (iwl_mvm_is_adaptive_dwell_supported(mvm) && IWL_MVM_ADWELL_ENABLE)
1799*4882a593Smuzhiyun flags |= IWL_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL;
1800*4882a593Smuzhiyun
1801*4882a593Smuzhiyun /*
1802*4882a593Smuzhiyun * Extended dwell is relevant only for low band to start with, as it is
1803*4882a593Smuzhiyun * being used for social channles only (1, 6, 11), so we can check
1804*4882a593Smuzhiyun * only scan type on low band also for CDB.
1805*4882a593Smuzhiyun */
1806*4882a593Smuzhiyun if (iwl_mvm_is_regular_scan(params) &&
1807*4882a593Smuzhiyun vif->type != NL80211_IFTYPE_P2P_DEVICE &&
1808*4882a593Smuzhiyun !iwl_mvm_is_scan_fragmented(params->type) &&
1809*4882a593Smuzhiyun !iwl_mvm_is_adaptive_dwell_supported(mvm) &&
1810*4882a593Smuzhiyun !iwl_mvm_is_oce_supported(mvm))
1811*4882a593Smuzhiyun flags |= IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL;
1812*4882a593Smuzhiyun
1813*4882a593Smuzhiyun if (iwl_mvm_is_oce_supported(mvm)) {
1814*4882a593Smuzhiyun if ((params->flags &
1815*4882a593Smuzhiyun NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE))
1816*4882a593Smuzhiyun flags |= IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_HIGH_TX_RATE;
1817*4882a593Smuzhiyun /* Since IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL and
1818*4882a593Smuzhiyun * NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION shares
1819*4882a593Smuzhiyun * the same bit, we need to make sure that we use this bit here
1820*4882a593Smuzhiyun * only when IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL cannot be
1821*4882a593Smuzhiyun * used. */
1822*4882a593Smuzhiyun if ((params->flags &
1823*4882a593Smuzhiyun NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION) &&
1824*4882a593Smuzhiyun !WARN_ON_ONCE(!iwl_mvm_is_adaptive_dwell_supported(mvm)))
1825*4882a593Smuzhiyun flags |= IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_DEFER_SUPP;
1826*4882a593Smuzhiyun if ((params->flags & NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME))
1827*4882a593Smuzhiyun flags |= IWL_UMAC_SCAN_GEN_FLAGS_MAX_CHNL_TIME;
1828*4882a593Smuzhiyun }
1829*4882a593Smuzhiyun
1830*4882a593Smuzhiyun return flags;
1831*4882a593Smuzhiyun }
1832*4882a593Smuzhiyun
1833*4882a593Smuzhiyun static int
iwl_mvm_fill_scan_sched_params(struct iwl_mvm_scan_params * params,struct iwl_scan_umac_schedule * schedule,__le16 * delay)1834*4882a593Smuzhiyun iwl_mvm_fill_scan_sched_params(struct iwl_mvm_scan_params *params,
1835*4882a593Smuzhiyun struct iwl_scan_umac_schedule *schedule,
1836*4882a593Smuzhiyun __le16 *delay)
1837*4882a593Smuzhiyun {
1838*4882a593Smuzhiyun int i;
1839*4882a593Smuzhiyun if (WARN_ON(!params->n_scan_plans ||
1840*4882a593Smuzhiyun params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
1841*4882a593Smuzhiyun return -EINVAL;
1842*4882a593Smuzhiyun
1843*4882a593Smuzhiyun for (i = 0; i < params->n_scan_plans; i++) {
1844*4882a593Smuzhiyun struct cfg80211_sched_scan_plan *scan_plan =
1845*4882a593Smuzhiyun ¶ms->scan_plans[i];
1846*4882a593Smuzhiyun
1847*4882a593Smuzhiyun schedule[i].iter_count = scan_plan->iterations;
1848*4882a593Smuzhiyun schedule[i].interval =
1849*4882a593Smuzhiyun cpu_to_le16(scan_plan->interval);
1850*4882a593Smuzhiyun }
1851*4882a593Smuzhiyun
1852*4882a593Smuzhiyun /*
1853*4882a593Smuzhiyun * If the number of iterations of the last scan plan is set to
1854*4882a593Smuzhiyun * zero, it should run infinitely. However, this is not always the case.
1855*4882a593Smuzhiyun * For example, when regular scan is requested the driver sets one scan
1856*4882a593Smuzhiyun * plan with one iteration.
1857*4882a593Smuzhiyun */
1858*4882a593Smuzhiyun if (!schedule[params->n_scan_plans - 1].iter_count)
1859*4882a593Smuzhiyun schedule[params->n_scan_plans - 1].iter_count = 0xff;
1860*4882a593Smuzhiyun
1861*4882a593Smuzhiyun *delay = cpu_to_le16(params->delay);
1862*4882a593Smuzhiyun
1863*4882a593Smuzhiyun return 0;
1864*4882a593Smuzhiyun }
1865*4882a593Smuzhiyun
iwl_mvm_scan_umac(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct iwl_mvm_scan_params * params,int type,int uid)1866*4882a593Smuzhiyun static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1867*4882a593Smuzhiyun struct iwl_mvm_scan_params *params,
1868*4882a593Smuzhiyun int type, int uid)
1869*4882a593Smuzhiyun {
1870*4882a593Smuzhiyun struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
1871*4882a593Smuzhiyun struct iwl_scan_umac_chan_param *chan_param;
1872*4882a593Smuzhiyun void *cmd_data = iwl_mvm_get_scan_req_umac_data(mvm);
1873*4882a593Smuzhiyun void *sec_part = cmd_data + sizeof(struct iwl_scan_channel_cfg_umac) *
1874*4882a593Smuzhiyun mvm->fw->ucode_capa.n_scan_channels;
1875*4882a593Smuzhiyun struct iwl_scan_req_umac_tail_v2 *tail_v2 =
1876*4882a593Smuzhiyun (struct iwl_scan_req_umac_tail_v2 *)sec_part;
1877*4882a593Smuzhiyun struct iwl_scan_req_umac_tail_v1 *tail_v1;
1878*4882a593Smuzhiyun struct iwl_ssid_ie *direct_scan;
1879*4882a593Smuzhiyun int ret = 0;
1880*4882a593Smuzhiyun u32 ssid_bitmap = 0;
1881*4882a593Smuzhiyun u8 channel_flags = 0;
1882*4882a593Smuzhiyun u16 gen_flags;
1883*4882a593Smuzhiyun struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif);
1884*4882a593Smuzhiyun
1885*4882a593Smuzhiyun chan_param = iwl_mvm_get_scan_req_umac_channel(mvm);
1886*4882a593Smuzhiyun
1887*4882a593Smuzhiyun iwl_mvm_scan_umac_dwell(mvm, cmd, params);
1888*4882a593Smuzhiyun
1889*4882a593Smuzhiyun mvm->scan_uid_status[uid] = type;
1890*4882a593Smuzhiyun
1891*4882a593Smuzhiyun cmd->uid = cpu_to_le32(uid);
1892*4882a593Smuzhiyun gen_flags = iwl_mvm_scan_umac_flags(mvm, params, vif);
1893*4882a593Smuzhiyun cmd->general_flags = cpu_to_le16(gen_flags);
1894*4882a593Smuzhiyun if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) {
1895*4882a593Smuzhiyun if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED)
1896*4882a593Smuzhiyun cmd->v8.num_of_fragments[SCAN_LB_LMAC_IDX] =
1897*4882a593Smuzhiyun IWL_SCAN_NUM_OF_FRAGS;
1898*4882a593Smuzhiyun if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED)
1899*4882a593Smuzhiyun cmd->v8.num_of_fragments[SCAN_HB_LMAC_IDX] =
1900*4882a593Smuzhiyun IWL_SCAN_NUM_OF_FRAGS;
1901*4882a593Smuzhiyun
1902*4882a593Smuzhiyun cmd->v8.general_flags2 =
1903*4882a593Smuzhiyun IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER;
1904*4882a593Smuzhiyun }
1905*4882a593Smuzhiyun
1906*4882a593Smuzhiyun cmd->scan_start_mac_id = scan_vif->id;
1907*4882a593Smuzhiyun
1908*4882a593Smuzhiyun if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT)
1909*4882a593Smuzhiyun cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
1910*4882a593Smuzhiyun
1911*4882a593Smuzhiyun if (iwl_mvm_scan_use_ebs(mvm, vif)) {
1912*4882a593Smuzhiyun channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
1913*4882a593Smuzhiyun IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
1914*4882a593Smuzhiyun IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
1915*4882a593Smuzhiyun
1916*4882a593Smuzhiyun /* set fragmented ebs for fragmented scan on HB channels */
1917*4882a593Smuzhiyun if (iwl_mvm_is_frag_ebs_supported(mvm)) {
1918*4882a593Smuzhiyun if (gen_flags &
1919*4882a593Smuzhiyun IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED ||
1920*4882a593Smuzhiyun (!iwl_mvm_is_cdb_supported(mvm) &&
1921*4882a593Smuzhiyun gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED))
1922*4882a593Smuzhiyun channel_flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG;
1923*4882a593Smuzhiyun }
1924*4882a593Smuzhiyun }
1925*4882a593Smuzhiyun
1926*4882a593Smuzhiyun chan_param->flags = channel_flags;
1927*4882a593Smuzhiyun chan_param->count = params->n_channels;
1928*4882a593Smuzhiyun
1929*4882a593Smuzhiyun ret = iwl_mvm_fill_scan_sched_params(params, tail_v2->schedule,
1930*4882a593Smuzhiyun &tail_v2->delay);
1931*4882a593Smuzhiyun if (ret)
1932*4882a593Smuzhiyun return ret;
1933*4882a593Smuzhiyun
1934*4882a593Smuzhiyun if (iwl_mvm_is_scan_ext_chan_supported(mvm)) {
1935*4882a593Smuzhiyun tail_v2->preq = params->preq;
1936*4882a593Smuzhiyun direct_scan = tail_v2->direct_scan;
1937*4882a593Smuzhiyun } else {
1938*4882a593Smuzhiyun tail_v1 = (struct iwl_scan_req_umac_tail_v1 *)sec_part;
1939*4882a593Smuzhiyun iwl_mvm_scan_set_legacy_probe_req(&tail_v1->preq,
1940*4882a593Smuzhiyun ¶ms->preq);
1941*4882a593Smuzhiyun direct_scan = tail_v1->direct_scan;
1942*4882a593Smuzhiyun }
1943*4882a593Smuzhiyun iwl_scan_build_ssids(params, direct_scan, &ssid_bitmap);
1944*4882a593Smuzhiyun iwl_mvm_umac_scan_cfg_channels(mvm, params->channels,
1945*4882a593Smuzhiyun params->n_channels, ssid_bitmap,
1946*4882a593Smuzhiyun cmd_data);
1947*4882a593Smuzhiyun return 0;
1948*4882a593Smuzhiyun }
1949*4882a593Smuzhiyun
1950*4882a593Smuzhiyun static void
iwl_mvm_scan_umac_fill_general_p_v10(struct iwl_mvm * mvm,struct iwl_mvm_scan_params * params,struct ieee80211_vif * vif,struct iwl_scan_general_params_v10 * gp,u16 gen_flags)1951*4882a593Smuzhiyun iwl_mvm_scan_umac_fill_general_p_v10(struct iwl_mvm *mvm,
1952*4882a593Smuzhiyun struct iwl_mvm_scan_params *params,
1953*4882a593Smuzhiyun struct ieee80211_vif *vif,
1954*4882a593Smuzhiyun struct iwl_scan_general_params_v10 *gp,
1955*4882a593Smuzhiyun u16 gen_flags)
1956*4882a593Smuzhiyun {
1957*4882a593Smuzhiyun struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif);
1958*4882a593Smuzhiyun
1959*4882a593Smuzhiyun iwl_mvm_scan_umac_dwell_v10(mvm, gp, params);
1960*4882a593Smuzhiyun
1961*4882a593Smuzhiyun gp->flags = cpu_to_le16(gen_flags);
1962*4882a593Smuzhiyun
1963*4882a593Smuzhiyun if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1)
1964*4882a593Smuzhiyun gp->num_of_fragments[SCAN_LB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS;
1965*4882a593Smuzhiyun if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2)
1966*4882a593Smuzhiyun gp->num_of_fragments[SCAN_HB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS;
1967*4882a593Smuzhiyun
1968*4882a593Smuzhiyun gp->scan_start_mac_id = scan_vif->id;
1969*4882a593Smuzhiyun }
1970*4882a593Smuzhiyun
1971*4882a593Smuzhiyun static void
iwl_mvm_scan_umac_fill_probe_p_v3(struct iwl_mvm_scan_params * params,struct iwl_scan_probe_params_v3 * pp)1972*4882a593Smuzhiyun iwl_mvm_scan_umac_fill_probe_p_v3(struct iwl_mvm_scan_params *params,
1973*4882a593Smuzhiyun struct iwl_scan_probe_params_v3 *pp)
1974*4882a593Smuzhiyun {
1975*4882a593Smuzhiyun pp->preq = params->preq;
1976*4882a593Smuzhiyun pp->ssid_num = params->n_ssids;
1977*4882a593Smuzhiyun iwl_scan_build_ssids(params, pp->direct_scan, NULL);
1978*4882a593Smuzhiyun }
1979*4882a593Smuzhiyun
1980*4882a593Smuzhiyun static void
iwl_mvm_scan_umac_fill_probe_p_v4(struct iwl_mvm_scan_params * params,struct iwl_scan_probe_params_v4 * pp,u32 * bitmap_ssid)1981*4882a593Smuzhiyun iwl_mvm_scan_umac_fill_probe_p_v4(struct iwl_mvm_scan_params *params,
1982*4882a593Smuzhiyun struct iwl_scan_probe_params_v4 *pp,
1983*4882a593Smuzhiyun u32 *bitmap_ssid)
1984*4882a593Smuzhiyun {
1985*4882a593Smuzhiyun pp->preq = params->preq;
1986*4882a593Smuzhiyun iwl_scan_build_ssids(params, pp->direct_scan, bitmap_ssid);
1987*4882a593Smuzhiyun }
1988*4882a593Smuzhiyun
1989*4882a593Smuzhiyun static void
iwl_mvm_scan_umac_fill_ch_p_v4(struct iwl_mvm * mvm,struct iwl_mvm_scan_params * params,struct ieee80211_vif * vif,struct iwl_scan_channel_params_v4 * cp,u32 channel_cfg_flags)1990*4882a593Smuzhiyun iwl_mvm_scan_umac_fill_ch_p_v4(struct iwl_mvm *mvm,
1991*4882a593Smuzhiyun struct iwl_mvm_scan_params *params,
1992*4882a593Smuzhiyun struct ieee80211_vif *vif,
1993*4882a593Smuzhiyun struct iwl_scan_channel_params_v4 *cp,
1994*4882a593Smuzhiyun u32 channel_cfg_flags)
1995*4882a593Smuzhiyun {
1996*4882a593Smuzhiyun cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif);
1997*4882a593Smuzhiyun cp->count = params->n_channels;
1998*4882a593Smuzhiyun cp->num_of_aps_override = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY;
1999*4882a593Smuzhiyun
2000*4882a593Smuzhiyun iwl_mvm_umac_scan_cfg_channels_v4(mvm, params->channels, cp,
2001*4882a593Smuzhiyun params->n_channels,
2002*4882a593Smuzhiyun channel_cfg_flags,
2003*4882a593Smuzhiyun vif->type);
2004*4882a593Smuzhiyun }
2005*4882a593Smuzhiyun
2006*4882a593Smuzhiyun static void
iwl_mvm_scan_umac_fill_ch_p_v6(struct iwl_mvm * mvm,struct iwl_mvm_scan_params * params,struct ieee80211_vif * vif,struct iwl_scan_channel_params_v6 * cp,u32 channel_cfg_flags)2007*4882a593Smuzhiyun iwl_mvm_scan_umac_fill_ch_p_v6(struct iwl_mvm *mvm,
2008*4882a593Smuzhiyun struct iwl_mvm_scan_params *params,
2009*4882a593Smuzhiyun struct ieee80211_vif *vif,
2010*4882a593Smuzhiyun struct iwl_scan_channel_params_v6 *cp,
2011*4882a593Smuzhiyun u32 channel_cfg_flags)
2012*4882a593Smuzhiyun {
2013*4882a593Smuzhiyun cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif);
2014*4882a593Smuzhiyun cp->count = params->n_channels;
2015*4882a593Smuzhiyun cp->n_aps_override[0] = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY;
2016*4882a593Smuzhiyun cp->n_aps_override[1] = IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS;
2017*4882a593Smuzhiyun
2018*4882a593Smuzhiyun iwl_mvm_umac_scan_cfg_channels_v6(mvm, params->channels, cp,
2019*4882a593Smuzhiyun params->n_channels,
2020*4882a593Smuzhiyun channel_cfg_flags,
2021*4882a593Smuzhiyun vif->type);
2022*4882a593Smuzhiyun }
2023*4882a593Smuzhiyun
iwl_mvm_scan_umac_v12(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct iwl_mvm_scan_params * params,int type,int uid)2024*4882a593Smuzhiyun static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2025*4882a593Smuzhiyun struct iwl_mvm_scan_params *params, int type,
2026*4882a593Smuzhiyun int uid)
2027*4882a593Smuzhiyun {
2028*4882a593Smuzhiyun struct iwl_scan_req_umac_v12 *cmd = mvm->scan_cmd;
2029*4882a593Smuzhiyun struct iwl_scan_req_params_v12 *scan_p = &cmd->scan_params;
2030*4882a593Smuzhiyun int ret;
2031*4882a593Smuzhiyun u16 gen_flags;
2032*4882a593Smuzhiyun
2033*4882a593Smuzhiyun mvm->scan_uid_status[uid] = type;
2034*4882a593Smuzhiyun
2035*4882a593Smuzhiyun cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params));
2036*4882a593Smuzhiyun cmd->uid = cpu_to_le32(uid);
2037*4882a593Smuzhiyun
2038*4882a593Smuzhiyun gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
2039*4882a593Smuzhiyun iwl_mvm_scan_umac_fill_general_p_v10(mvm, params, vif,
2040*4882a593Smuzhiyun &scan_p->general_params,
2041*4882a593Smuzhiyun gen_flags);
2042*4882a593Smuzhiyun
2043*4882a593Smuzhiyun ret = iwl_mvm_fill_scan_sched_params(params,
2044*4882a593Smuzhiyun scan_p->periodic_params.schedule,
2045*4882a593Smuzhiyun &scan_p->periodic_params.delay);
2046*4882a593Smuzhiyun if (ret)
2047*4882a593Smuzhiyun return ret;
2048*4882a593Smuzhiyun
2049*4882a593Smuzhiyun iwl_mvm_scan_umac_fill_probe_p_v3(params, &scan_p->probe_params);
2050*4882a593Smuzhiyun iwl_mvm_scan_umac_fill_ch_p_v4(mvm, params, vif,
2051*4882a593Smuzhiyun &scan_p->channel_params, 0);
2052*4882a593Smuzhiyun
2053*4882a593Smuzhiyun return 0;
2054*4882a593Smuzhiyun }
2055*4882a593Smuzhiyun
iwl_mvm_scan_umac_v14(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct iwl_mvm_scan_params * params,int type,int uid)2056*4882a593Smuzhiyun static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2057*4882a593Smuzhiyun struct iwl_mvm_scan_params *params, int type,
2058*4882a593Smuzhiyun int uid)
2059*4882a593Smuzhiyun {
2060*4882a593Smuzhiyun struct iwl_scan_req_umac_v14 *cmd = mvm->scan_cmd;
2061*4882a593Smuzhiyun struct iwl_scan_req_params_v14 *scan_p = &cmd->scan_params;
2062*4882a593Smuzhiyun int ret;
2063*4882a593Smuzhiyun u16 gen_flags;
2064*4882a593Smuzhiyun u32 bitmap_ssid = 0;
2065*4882a593Smuzhiyun
2066*4882a593Smuzhiyun mvm->scan_uid_status[uid] = type;
2067*4882a593Smuzhiyun
2068*4882a593Smuzhiyun cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params));
2069*4882a593Smuzhiyun cmd->uid = cpu_to_le32(uid);
2070*4882a593Smuzhiyun
2071*4882a593Smuzhiyun gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
2072*4882a593Smuzhiyun iwl_mvm_scan_umac_fill_general_p_v10(mvm, params, vif,
2073*4882a593Smuzhiyun &scan_p->general_params,
2074*4882a593Smuzhiyun gen_flags);
2075*4882a593Smuzhiyun
2076*4882a593Smuzhiyun ret = iwl_mvm_fill_scan_sched_params(params,
2077*4882a593Smuzhiyun scan_p->periodic_params.schedule,
2078*4882a593Smuzhiyun &scan_p->periodic_params.delay);
2079*4882a593Smuzhiyun if (ret)
2080*4882a593Smuzhiyun return ret;
2081*4882a593Smuzhiyun
2082*4882a593Smuzhiyun iwl_mvm_scan_umac_fill_probe_p_v4(params, &scan_p->probe_params,
2083*4882a593Smuzhiyun &bitmap_ssid);
2084*4882a593Smuzhiyun iwl_mvm_scan_umac_fill_ch_p_v6(mvm, params, vif,
2085*4882a593Smuzhiyun &scan_p->channel_params, bitmap_ssid);
2086*4882a593Smuzhiyun
2087*4882a593Smuzhiyun return 0;
2088*4882a593Smuzhiyun }
2089*4882a593Smuzhiyun
iwl_mvm_num_scans(struct iwl_mvm * mvm)2090*4882a593Smuzhiyun static int iwl_mvm_num_scans(struct iwl_mvm *mvm)
2091*4882a593Smuzhiyun {
2092*4882a593Smuzhiyun return hweight32(mvm->scan_status & IWL_MVM_SCAN_MASK);
2093*4882a593Smuzhiyun }
2094*4882a593Smuzhiyun
iwl_mvm_check_running_scans(struct iwl_mvm * mvm,int type)2095*4882a593Smuzhiyun static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
2096*4882a593Smuzhiyun {
2097*4882a593Smuzhiyun bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
2098*4882a593Smuzhiyun IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
2099*4882a593Smuzhiyun
2100*4882a593Smuzhiyun /* This looks a bit arbitrary, but the idea is that if we run
2101*4882a593Smuzhiyun * out of possible simultaneous scans and the userspace is
2102*4882a593Smuzhiyun * trying to run a scan type that is already running, we
2103*4882a593Smuzhiyun * return -EBUSY. But if the userspace wants to start a
2104*4882a593Smuzhiyun * different type of scan, we stop the opposite type to make
2105*4882a593Smuzhiyun * space for the new request. The reason is backwards
2106*4882a593Smuzhiyun * compatibility with old wpa_supplicant that wouldn't stop a
2107*4882a593Smuzhiyun * scheduled scan before starting a normal scan.
2108*4882a593Smuzhiyun */
2109*4882a593Smuzhiyun
2110*4882a593Smuzhiyun /* FW supports only a single periodic scan */
2111*4882a593Smuzhiyun if ((type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT) &&
2112*4882a593Smuzhiyun mvm->scan_status & (IWL_MVM_SCAN_SCHED | IWL_MVM_SCAN_NETDETECT))
2113*4882a593Smuzhiyun return -EBUSY;
2114*4882a593Smuzhiyun
2115*4882a593Smuzhiyun if (iwl_mvm_num_scans(mvm) < mvm->max_scans)
2116*4882a593Smuzhiyun return 0;
2117*4882a593Smuzhiyun
2118*4882a593Smuzhiyun /* Use a switch, even though this is a bitmask, so that more
2119*4882a593Smuzhiyun * than one bits set will fall in default and we will warn.
2120*4882a593Smuzhiyun */
2121*4882a593Smuzhiyun switch (type) {
2122*4882a593Smuzhiyun case IWL_MVM_SCAN_REGULAR:
2123*4882a593Smuzhiyun if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
2124*4882a593Smuzhiyun return -EBUSY;
2125*4882a593Smuzhiyun return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
2126*4882a593Smuzhiyun case IWL_MVM_SCAN_SCHED:
2127*4882a593Smuzhiyun if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
2128*4882a593Smuzhiyun return -EBUSY;
2129*4882a593Smuzhiyun return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
2130*4882a593Smuzhiyun case IWL_MVM_SCAN_NETDETECT:
2131*4882a593Smuzhiyun /* For non-unified images, there's no need to stop
2132*4882a593Smuzhiyun * anything for net-detect since the firmware is
2133*4882a593Smuzhiyun * restarted anyway. This way, any sched scans that
2134*4882a593Smuzhiyun * were running will be restarted when we resume.
2135*4882a593Smuzhiyun */
2136*4882a593Smuzhiyun if (!unified_image)
2137*4882a593Smuzhiyun return 0;
2138*4882a593Smuzhiyun
2139*4882a593Smuzhiyun /* If this is a unified image and we ran out of scans,
2140*4882a593Smuzhiyun * we need to stop something. Prefer stopping regular
2141*4882a593Smuzhiyun * scans, because the results are useless at this
2142*4882a593Smuzhiyun * point, and we should be able to keep running
2143*4882a593Smuzhiyun * another scheduled scan while suspended.
2144*4882a593Smuzhiyun */
2145*4882a593Smuzhiyun if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
2146*4882a593Smuzhiyun return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR,
2147*4882a593Smuzhiyun true);
2148*4882a593Smuzhiyun if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
2149*4882a593Smuzhiyun return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED,
2150*4882a593Smuzhiyun true);
2151*4882a593Smuzhiyun /* Something is wrong if no scan was running but we
2152*4882a593Smuzhiyun * ran out of scans.
2153*4882a593Smuzhiyun */
2154*4882a593Smuzhiyun /* fall through */
2155*4882a593Smuzhiyun default:
2156*4882a593Smuzhiyun WARN_ON(1);
2157*4882a593Smuzhiyun break;
2158*4882a593Smuzhiyun }
2159*4882a593Smuzhiyun
2160*4882a593Smuzhiyun return -EIO;
2161*4882a593Smuzhiyun }
2162*4882a593Smuzhiyun
2163*4882a593Smuzhiyun #define SCAN_TIMEOUT 30000
2164*4882a593Smuzhiyun
iwl_mvm_scan_timeout_wk(struct work_struct * work)2165*4882a593Smuzhiyun void iwl_mvm_scan_timeout_wk(struct work_struct *work)
2166*4882a593Smuzhiyun {
2167*4882a593Smuzhiyun struct delayed_work *delayed_work = to_delayed_work(work);
2168*4882a593Smuzhiyun struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm,
2169*4882a593Smuzhiyun scan_timeout_dwork);
2170*4882a593Smuzhiyun
2171*4882a593Smuzhiyun IWL_ERR(mvm, "regular scan timed out\n");
2172*4882a593Smuzhiyun
2173*4882a593Smuzhiyun iwl_force_nmi(mvm->trans);
2174*4882a593Smuzhiyun }
2175*4882a593Smuzhiyun
iwl_mvm_fill_scan_type(struct iwl_mvm * mvm,struct iwl_mvm_scan_params * params,struct ieee80211_vif * vif)2176*4882a593Smuzhiyun static void iwl_mvm_fill_scan_type(struct iwl_mvm *mvm,
2177*4882a593Smuzhiyun struct iwl_mvm_scan_params *params,
2178*4882a593Smuzhiyun struct ieee80211_vif *vif)
2179*4882a593Smuzhiyun {
2180*4882a593Smuzhiyun if (iwl_mvm_is_cdb_supported(mvm)) {
2181*4882a593Smuzhiyun params->type =
2182*4882a593Smuzhiyun iwl_mvm_get_scan_type_band(mvm, vif,
2183*4882a593Smuzhiyun NL80211_BAND_2GHZ);
2184*4882a593Smuzhiyun params->hb_type =
2185*4882a593Smuzhiyun iwl_mvm_get_scan_type_band(mvm, vif,
2186*4882a593Smuzhiyun NL80211_BAND_5GHZ);
2187*4882a593Smuzhiyun } else {
2188*4882a593Smuzhiyun params->type = iwl_mvm_get_scan_type(mvm, vif);
2189*4882a593Smuzhiyun }
2190*4882a593Smuzhiyun }
2191*4882a593Smuzhiyun
2192*4882a593Smuzhiyun struct iwl_scan_umac_handler {
2193*4882a593Smuzhiyun u8 version;
2194*4882a593Smuzhiyun int (*handler)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2195*4882a593Smuzhiyun struct iwl_mvm_scan_params *params, int type, int uid);
2196*4882a593Smuzhiyun };
2197*4882a593Smuzhiyun
2198*4882a593Smuzhiyun #define IWL_SCAN_UMAC_HANDLER(_ver) { \
2199*4882a593Smuzhiyun .version = _ver, \
2200*4882a593Smuzhiyun .handler = iwl_mvm_scan_umac_v##_ver, \
2201*4882a593Smuzhiyun }
2202*4882a593Smuzhiyun
2203*4882a593Smuzhiyun static const struct iwl_scan_umac_handler iwl_scan_umac_handlers[] = {
2204*4882a593Smuzhiyun /* set the newest version first to shorten the list traverse time */
2205*4882a593Smuzhiyun IWL_SCAN_UMAC_HANDLER(14),
2206*4882a593Smuzhiyun IWL_SCAN_UMAC_HANDLER(12),
2207*4882a593Smuzhiyun };
2208*4882a593Smuzhiyun
iwl_mvm_build_scan_cmd(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct iwl_host_cmd * hcmd,struct iwl_mvm_scan_params * params,int type)2209*4882a593Smuzhiyun static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
2210*4882a593Smuzhiyun struct ieee80211_vif *vif,
2211*4882a593Smuzhiyun struct iwl_host_cmd *hcmd,
2212*4882a593Smuzhiyun struct iwl_mvm_scan_params *params,
2213*4882a593Smuzhiyun int type)
2214*4882a593Smuzhiyun {
2215*4882a593Smuzhiyun int uid, i;
2216*4882a593Smuzhiyun u8 scan_ver;
2217*4882a593Smuzhiyun
2218*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
2219*4882a593Smuzhiyun memset(mvm->scan_cmd, 0, ksize(mvm->scan_cmd));
2220*4882a593Smuzhiyun
2221*4882a593Smuzhiyun if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
2222*4882a593Smuzhiyun hcmd->id = SCAN_OFFLOAD_REQUEST_CMD;
2223*4882a593Smuzhiyun
2224*4882a593Smuzhiyun return iwl_mvm_scan_lmac(mvm, vif, params);
2225*4882a593Smuzhiyun }
2226*4882a593Smuzhiyun
2227*4882a593Smuzhiyun uid = iwl_mvm_scan_uid_by_status(mvm, 0);
2228*4882a593Smuzhiyun if (uid < 0)
2229*4882a593Smuzhiyun return uid;
2230*4882a593Smuzhiyun
2231*4882a593Smuzhiyun hcmd->id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
2232*4882a593Smuzhiyun
2233*4882a593Smuzhiyun scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
2234*4882a593Smuzhiyun SCAN_REQ_UMAC,
2235*4882a593Smuzhiyun IWL_FW_CMD_VER_UNKNOWN);
2236*4882a593Smuzhiyun
2237*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(iwl_scan_umac_handlers); i++) {
2238*4882a593Smuzhiyun const struct iwl_scan_umac_handler *ver_handler =
2239*4882a593Smuzhiyun &iwl_scan_umac_handlers[i];
2240*4882a593Smuzhiyun
2241*4882a593Smuzhiyun if (ver_handler->version != scan_ver)
2242*4882a593Smuzhiyun continue;
2243*4882a593Smuzhiyun
2244*4882a593Smuzhiyun return ver_handler->handler(mvm, vif, params, type, uid);
2245*4882a593Smuzhiyun }
2246*4882a593Smuzhiyun
2247*4882a593Smuzhiyun return iwl_mvm_scan_umac(mvm, vif, params, type, uid);
2248*4882a593Smuzhiyun }
2249*4882a593Smuzhiyun
iwl_mvm_reg_scan_start(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct cfg80211_scan_request * req,struct ieee80211_scan_ies * ies)2250*4882a593Smuzhiyun int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2251*4882a593Smuzhiyun struct cfg80211_scan_request *req,
2252*4882a593Smuzhiyun struct ieee80211_scan_ies *ies)
2253*4882a593Smuzhiyun {
2254*4882a593Smuzhiyun struct iwl_host_cmd hcmd = {
2255*4882a593Smuzhiyun .len = { iwl_mvm_scan_size(mvm), },
2256*4882a593Smuzhiyun .data = { mvm->scan_cmd, },
2257*4882a593Smuzhiyun .dataflags = { IWL_HCMD_DFL_NOCOPY, },
2258*4882a593Smuzhiyun };
2259*4882a593Smuzhiyun struct iwl_mvm_scan_params params = {};
2260*4882a593Smuzhiyun int ret;
2261*4882a593Smuzhiyun struct cfg80211_sched_scan_plan scan_plan = { .iterations = 1 };
2262*4882a593Smuzhiyun
2263*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
2264*4882a593Smuzhiyun
2265*4882a593Smuzhiyun if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
2266*4882a593Smuzhiyun IWL_ERR(mvm, "scan while LAR regdomain is not set\n");
2267*4882a593Smuzhiyun return -EBUSY;
2268*4882a593Smuzhiyun }
2269*4882a593Smuzhiyun
2270*4882a593Smuzhiyun ret = iwl_mvm_check_running_scans(mvm, IWL_MVM_SCAN_REGULAR);
2271*4882a593Smuzhiyun if (ret)
2272*4882a593Smuzhiyun return ret;
2273*4882a593Smuzhiyun
2274*4882a593Smuzhiyun /* we should have failed registration if scan_cmd was NULL */
2275*4882a593Smuzhiyun if (WARN_ON(!mvm->scan_cmd))
2276*4882a593Smuzhiyun return -ENOMEM;
2277*4882a593Smuzhiyun
2278*4882a593Smuzhiyun if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels))
2279*4882a593Smuzhiyun return -ENOBUFS;
2280*4882a593Smuzhiyun
2281*4882a593Smuzhiyun params.n_ssids = req->n_ssids;
2282*4882a593Smuzhiyun params.flags = req->flags;
2283*4882a593Smuzhiyun params.n_channels = req->n_channels;
2284*4882a593Smuzhiyun params.delay = 0;
2285*4882a593Smuzhiyun params.ssids = req->ssids;
2286*4882a593Smuzhiyun params.channels = req->channels;
2287*4882a593Smuzhiyun params.mac_addr = req->mac_addr;
2288*4882a593Smuzhiyun params.mac_addr_mask = req->mac_addr_mask;
2289*4882a593Smuzhiyun params.no_cck = req->no_cck;
2290*4882a593Smuzhiyun params.pass_all = true;
2291*4882a593Smuzhiyun params.n_match_sets = 0;
2292*4882a593Smuzhiyun params.match_sets = NULL;
2293*4882a593Smuzhiyun
2294*4882a593Smuzhiyun params.scan_plans = &scan_plan;
2295*4882a593Smuzhiyun params.n_scan_plans = 1;
2296*4882a593Smuzhiyun
2297*4882a593Smuzhiyun iwl_mvm_fill_scan_type(mvm, ¶ms, vif);
2298*4882a593Smuzhiyun
2299*4882a593Smuzhiyun if (req->duration)
2300*4882a593Smuzhiyun params.iter_notif = true;
2301*4882a593Smuzhiyun
2302*4882a593Smuzhiyun iwl_mvm_build_scan_probe(mvm, vif, ies, ¶ms);
2303*4882a593Smuzhiyun
2304*4882a593Smuzhiyun ret = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, ¶ms,
2305*4882a593Smuzhiyun IWL_MVM_SCAN_REGULAR);
2306*4882a593Smuzhiyun
2307*4882a593Smuzhiyun if (ret)
2308*4882a593Smuzhiyun return ret;
2309*4882a593Smuzhiyun
2310*4882a593Smuzhiyun iwl_mvm_pause_tcm(mvm, false);
2311*4882a593Smuzhiyun
2312*4882a593Smuzhiyun ret = iwl_mvm_send_cmd(mvm, &hcmd);
2313*4882a593Smuzhiyun if (ret) {
2314*4882a593Smuzhiyun /* If the scan failed, it usually means that the FW was unable
2315*4882a593Smuzhiyun * to allocate the time events. Warn on it, but maybe we
2316*4882a593Smuzhiyun * should try to send the command again with different params.
2317*4882a593Smuzhiyun */
2318*4882a593Smuzhiyun IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
2319*4882a593Smuzhiyun iwl_mvm_resume_tcm(mvm);
2320*4882a593Smuzhiyun return ret;
2321*4882a593Smuzhiyun }
2322*4882a593Smuzhiyun
2323*4882a593Smuzhiyun IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
2324*4882a593Smuzhiyun mvm->scan_status |= IWL_MVM_SCAN_REGULAR;
2325*4882a593Smuzhiyun mvm->scan_vif = iwl_mvm_vif_from_mac80211(vif);
2326*4882a593Smuzhiyun
2327*4882a593Smuzhiyun schedule_delayed_work(&mvm->scan_timeout_dwork,
2328*4882a593Smuzhiyun msecs_to_jiffies(SCAN_TIMEOUT));
2329*4882a593Smuzhiyun
2330*4882a593Smuzhiyun return 0;
2331*4882a593Smuzhiyun }
2332*4882a593Smuzhiyun
iwl_mvm_sched_scan_start(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct cfg80211_sched_scan_request * req,struct ieee80211_scan_ies * ies,int type)2333*4882a593Smuzhiyun int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
2334*4882a593Smuzhiyun struct ieee80211_vif *vif,
2335*4882a593Smuzhiyun struct cfg80211_sched_scan_request *req,
2336*4882a593Smuzhiyun struct ieee80211_scan_ies *ies,
2337*4882a593Smuzhiyun int type)
2338*4882a593Smuzhiyun {
2339*4882a593Smuzhiyun struct iwl_host_cmd hcmd = {
2340*4882a593Smuzhiyun .len = { iwl_mvm_scan_size(mvm), },
2341*4882a593Smuzhiyun .data = { mvm->scan_cmd, },
2342*4882a593Smuzhiyun .dataflags = { IWL_HCMD_DFL_NOCOPY, },
2343*4882a593Smuzhiyun };
2344*4882a593Smuzhiyun struct iwl_mvm_scan_params params = {};
2345*4882a593Smuzhiyun int ret;
2346*4882a593Smuzhiyun
2347*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
2348*4882a593Smuzhiyun
2349*4882a593Smuzhiyun if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
2350*4882a593Smuzhiyun IWL_ERR(mvm, "sched-scan while LAR regdomain is not set\n");
2351*4882a593Smuzhiyun return -EBUSY;
2352*4882a593Smuzhiyun }
2353*4882a593Smuzhiyun
2354*4882a593Smuzhiyun ret = iwl_mvm_check_running_scans(mvm, type);
2355*4882a593Smuzhiyun if (ret)
2356*4882a593Smuzhiyun return ret;
2357*4882a593Smuzhiyun
2358*4882a593Smuzhiyun /* we should have failed registration if scan_cmd was NULL */
2359*4882a593Smuzhiyun if (WARN_ON(!mvm->scan_cmd))
2360*4882a593Smuzhiyun return -ENOMEM;
2361*4882a593Smuzhiyun
2362*4882a593Smuzhiyun if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels))
2363*4882a593Smuzhiyun return -ENOBUFS;
2364*4882a593Smuzhiyun
2365*4882a593Smuzhiyun params.n_ssids = req->n_ssids;
2366*4882a593Smuzhiyun params.flags = req->flags;
2367*4882a593Smuzhiyun params.n_channels = req->n_channels;
2368*4882a593Smuzhiyun params.ssids = req->ssids;
2369*4882a593Smuzhiyun params.channels = req->channels;
2370*4882a593Smuzhiyun params.mac_addr = req->mac_addr;
2371*4882a593Smuzhiyun params.mac_addr_mask = req->mac_addr_mask;
2372*4882a593Smuzhiyun params.no_cck = false;
2373*4882a593Smuzhiyun params.pass_all = iwl_mvm_scan_pass_all(mvm, req);
2374*4882a593Smuzhiyun params.n_match_sets = req->n_match_sets;
2375*4882a593Smuzhiyun params.match_sets = req->match_sets;
2376*4882a593Smuzhiyun if (!req->n_scan_plans)
2377*4882a593Smuzhiyun return -EINVAL;
2378*4882a593Smuzhiyun
2379*4882a593Smuzhiyun params.n_scan_plans = req->n_scan_plans;
2380*4882a593Smuzhiyun params.scan_plans = req->scan_plans;
2381*4882a593Smuzhiyun
2382*4882a593Smuzhiyun iwl_mvm_fill_scan_type(mvm, ¶ms, vif);
2383*4882a593Smuzhiyun
2384*4882a593Smuzhiyun /* In theory, LMAC scans can handle a 32-bit delay, but since
2385*4882a593Smuzhiyun * waiting for over 18 hours to start the scan is a bit silly
2386*4882a593Smuzhiyun * and to keep it aligned with UMAC scans (which only support
2387*4882a593Smuzhiyun * 16-bit delays), trim it down to 16-bits.
2388*4882a593Smuzhiyun */
2389*4882a593Smuzhiyun if (req->delay > U16_MAX) {
2390*4882a593Smuzhiyun IWL_DEBUG_SCAN(mvm,
2391*4882a593Smuzhiyun "delay value is > 16-bits, set to max possible\n");
2392*4882a593Smuzhiyun params.delay = U16_MAX;
2393*4882a593Smuzhiyun } else {
2394*4882a593Smuzhiyun params.delay = req->delay;
2395*4882a593Smuzhiyun }
2396*4882a593Smuzhiyun
2397*4882a593Smuzhiyun ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
2398*4882a593Smuzhiyun if (ret)
2399*4882a593Smuzhiyun return ret;
2400*4882a593Smuzhiyun
2401*4882a593Smuzhiyun iwl_mvm_build_scan_probe(mvm, vif, ies, ¶ms);
2402*4882a593Smuzhiyun
2403*4882a593Smuzhiyun ret = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, ¶ms, type);
2404*4882a593Smuzhiyun
2405*4882a593Smuzhiyun if (ret)
2406*4882a593Smuzhiyun return ret;
2407*4882a593Smuzhiyun
2408*4882a593Smuzhiyun ret = iwl_mvm_send_cmd(mvm, &hcmd);
2409*4882a593Smuzhiyun if (!ret) {
2410*4882a593Smuzhiyun IWL_DEBUG_SCAN(mvm,
2411*4882a593Smuzhiyun "Sched scan request was sent successfully\n");
2412*4882a593Smuzhiyun mvm->scan_status |= type;
2413*4882a593Smuzhiyun } else {
2414*4882a593Smuzhiyun /* If the scan failed, it usually means that the FW was unable
2415*4882a593Smuzhiyun * to allocate the time events. Warn on it, but maybe we
2416*4882a593Smuzhiyun * should try to send the command again with different params.
2417*4882a593Smuzhiyun */
2418*4882a593Smuzhiyun IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret);
2419*4882a593Smuzhiyun }
2420*4882a593Smuzhiyun
2421*4882a593Smuzhiyun return ret;
2422*4882a593Smuzhiyun }
2423*4882a593Smuzhiyun
iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm * mvm,struct iwl_rx_cmd_buffer * rxb)2424*4882a593Smuzhiyun void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
2425*4882a593Smuzhiyun struct iwl_rx_cmd_buffer *rxb)
2426*4882a593Smuzhiyun {
2427*4882a593Smuzhiyun struct iwl_rx_packet *pkt = rxb_addr(rxb);
2428*4882a593Smuzhiyun struct iwl_umac_scan_complete *notif = (void *)pkt->data;
2429*4882a593Smuzhiyun u32 uid = __le32_to_cpu(notif->uid);
2430*4882a593Smuzhiyun bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED);
2431*4882a593Smuzhiyun
2432*4882a593Smuzhiyun if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status)))
2433*4882a593Smuzhiyun return;
2434*4882a593Smuzhiyun
2435*4882a593Smuzhiyun /* if the scan is already stopping, we don't need to notify mac80211 */
2436*4882a593Smuzhiyun if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
2437*4882a593Smuzhiyun struct cfg80211_scan_info info = {
2438*4882a593Smuzhiyun .aborted = aborted,
2439*4882a593Smuzhiyun .scan_start_tsf = mvm->scan_start,
2440*4882a593Smuzhiyun };
2441*4882a593Smuzhiyun
2442*4882a593Smuzhiyun memcpy(info.tsf_bssid, mvm->scan_vif->bssid, ETH_ALEN);
2443*4882a593Smuzhiyun ieee80211_scan_completed(mvm->hw, &info);
2444*4882a593Smuzhiyun mvm->scan_vif = NULL;
2445*4882a593Smuzhiyun cancel_delayed_work(&mvm->scan_timeout_dwork);
2446*4882a593Smuzhiyun iwl_mvm_resume_tcm(mvm);
2447*4882a593Smuzhiyun } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
2448*4882a593Smuzhiyun ieee80211_sched_scan_stopped(mvm->hw);
2449*4882a593Smuzhiyun mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
2450*4882a593Smuzhiyun }
2451*4882a593Smuzhiyun
2452*4882a593Smuzhiyun mvm->scan_status &= ~mvm->scan_uid_status[uid];
2453*4882a593Smuzhiyun IWL_DEBUG_SCAN(mvm,
2454*4882a593Smuzhiyun "Scan completed, uid %u type %u, status %s, EBS status %s\n",
2455*4882a593Smuzhiyun uid, mvm->scan_uid_status[uid],
2456*4882a593Smuzhiyun notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
2457*4882a593Smuzhiyun "completed" : "aborted",
2458*4882a593Smuzhiyun iwl_mvm_ebs_status_str(notif->ebs_status));
2459*4882a593Smuzhiyun IWL_DEBUG_SCAN(mvm,
2460*4882a593Smuzhiyun "Last line %d, Last iteration %d, Time from last iteration %d\n",
2461*4882a593Smuzhiyun notif->last_schedule, notif->last_iter,
2462*4882a593Smuzhiyun __le32_to_cpu(notif->time_from_last_iter));
2463*4882a593Smuzhiyun
2464*4882a593Smuzhiyun if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS &&
2465*4882a593Smuzhiyun notif->ebs_status != IWL_SCAN_EBS_INACTIVE)
2466*4882a593Smuzhiyun mvm->last_ebs_successful = false;
2467*4882a593Smuzhiyun
2468*4882a593Smuzhiyun mvm->scan_uid_status[uid] = 0;
2469*4882a593Smuzhiyun }
2470*4882a593Smuzhiyun
iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm * mvm,struct iwl_rx_cmd_buffer * rxb)2471*4882a593Smuzhiyun void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
2472*4882a593Smuzhiyun struct iwl_rx_cmd_buffer *rxb)
2473*4882a593Smuzhiyun {
2474*4882a593Smuzhiyun struct iwl_rx_packet *pkt = rxb_addr(rxb);
2475*4882a593Smuzhiyun struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data;
2476*4882a593Smuzhiyun
2477*4882a593Smuzhiyun mvm->scan_start = le64_to_cpu(notif->start_tsf);
2478*4882a593Smuzhiyun
2479*4882a593Smuzhiyun IWL_DEBUG_SCAN(mvm,
2480*4882a593Smuzhiyun "UMAC Scan iteration complete: status=0x%x scanned_channels=%d\n",
2481*4882a593Smuzhiyun notif->status, notif->scanned_channels);
2482*4882a593Smuzhiyun
2483*4882a593Smuzhiyun if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) {
2484*4882a593Smuzhiyun IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n");
2485*4882a593Smuzhiyun ieee80211_sched_scan_results(mvm->hw);
2486*4882a593Smuzhiyun mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
2487*4882a593Smuzhiyun }
2488*4882a593Smuzhiyun
2489*4882a593Smuzhiyun IWL_DEBUG_SCAN(mvm,
2490*4882a593Smuzhiyun "UMAC Scan iteration complete: scan started at %llu (TSF)\n",
2491*4882a593Smuzhiyun mvm->scan_start);
2492*4882a593Smuzhiyun }
2493*4882a593Smuzhiyun
iwl_mvm_umac_scan_abort(struct iwl_mvm * mvm,int type)2494*4882a593Smuzhiyun static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
2495*4882a593Smuzhiyun {
2496*4882a593Smuzhiyun struct iwl_umac_scan_abort cmd = {};
2497*4882a593Smuzhiyun int uid, ret;
2498*4882a593Smuzhiyun
2499*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
2500*4882a593Smuzhiyun
2501*4882a593Smuzhiyun /* We should always get a valid index here, because we already
2502*4882a593Smuzhiyun * checked that this type of scan was running in the generic
2503*4882a593Smuzhiyun * code.
2504*4882a593Smuzhiyun */
2505*4882a593Smuzhiyun uid = iwl_mvm_scan_uid_by_status(mvm, type);
2506*4882a593Smuzhiyun if (WARN_ON_ONCE(uid < 0))
2507*4882a593Smuzhiyun return uid;
2508*4882a593Smuzhiyun
2509*4882a593Smuzhiyun cmd.uid = cpu_to_le32(uid);
2510*4882a593Smuzhiyun
2511*4882a593Smuzhiyun IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
2512*4882a593Smuzhiyun
2513*4882a593Smuzhiyun ret = iwl_mvm_send_cmd_pdu(mvm,
2514*4882a593Smuzhiyun iwl_cmd_id(SCAN_ABORT_UMAC,
2515*4882a593Smuzhiyun IWL_ALWAYS_LONG_GROUP, 0),
2516*4882a593Smuzhiyun 0, sizeof(cmd), &cmd);
2517*4882a593Smuzhiyun if (!ret)
2518*4882a593Smuzhiyun mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
2519*4882a593Smuzhiyun
2520*4882a593Smuzhiyun return ret;
2521*4882a593Smuzhiyun }
2522*4882a593Smuzhiyun
iwl_mvm_scan_stop_wait(struct iwl_mvm * mvm,int type)2523*4882a593Smuzhiyun static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
2524*4882a593Smuzhiyun {
2525*4882a593Smuzhiyun struct iwl_notification_wait wait_scan_done;
2526*4882a593Smuzhiyun static const u16 scan_done_notif[] = { SCAN_COMPLETE_UMAC,
2527*4882a593Smuzhiyun SCAN_OFFLOAD_COMPLETE, };
2528*4882a593Smuzhiyun int ret;
2529*4882a593Smuzhiyun
2530*4882a593Smuzhiyun lockdep_assert_held(&mvm->mutex);
2531*4882a593Smuzhiyun
2532*4882a593Smuzhiyun iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
2533*4882a593Smuzhiyun scan_done_notif,
2534*4882a593Smuzhiyun ARRAY_SIZE(scan_done_notif),
2535*4882a593Smuzhiyun NULL, NULL);
2536*4882a593Smuzhiyun
2537*4882a593Smuzhiyun IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type);
2538*4882a593Smuzhiyun
2539*4882a593Smuzhiyun if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
2540*4882a593Smuzhiyun ret = iwl_mvm_umac_scan_abort(mvm, type);
2541*4882a593Smuzhiyun else
2542*4882a593Smuzhiyun ret = iwl_mvm_lmac_scan_abort(mvm);
2543*4882a593Smuzhiyun
2544*4882a593Smuzhiyun if (ret) {
2545*4882a593Smuzhiyun IWL_DEBUG_SCAN(mvm, "couldn't stop scan type %d\n", type);
2546*4882a593Smuzhiyun iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
2547*4882a593Smuzhiyun return ret;
2548*4882a593Smuzhiyun }
2549*4882a593Smuzhiyun
2550*4882a593Smuzhiyun return iwl_wait_notification(&mvm->notif_wait, &wait_scan_done,
2551*4882a593Smuzhiyun 1 * HZ);
2552*4882a593Smuzhiyun }
2553*4882a593Smuzhiyun
2554*4882a593Smuzhiyun #define IWL_SCAN_REQ_UMAC_HANDLE_SIZE(_ver) { \
2555*4882a593Smuzhiyun case (_ver): return sizeof(struct iwl_scan_req_umac_v##_ver); \
2556*4882a593Smuzhiyun }
2557*4882a593Smuzhiyun
iwl_scan_req_umac_get_size(u8 scan_ver)2558*4882a593Smuzhiyun static int iwl_scan_req_umac_get_size(u8 scan_ver)
2559*4882a593Smuzhiyun {
2560*4882a593Smuzhiyun switch (scan_ver) {
2561*4882a593Smuzhiyun IWL_SCAN_REQ_UMAC_HANDLE_SIZE(14);
2562*4882a593Smuzhiyun IWL_SCAN_REQ_UMAC_HANDLE_SIZE(12);
2563*4882a593Smuzhiyun }
2564*4882a593Smuzhiyun
2565*4882a593Smuzhiyun return 0;
2566*4882a593Smuzhiyun }
2567*4882a593Smuzhiyun
iwl_mvm_scan_size(struct iwl_mvm * mvm)2568*4882a593Smuzhiyun int iwl_mvm_scan_size(struct iwl_mvm *mvm)
2569*4882a593Smuzhiyun {
2570*4882a593Smuzhiyun int base_size, tail_size;
2571*4882a593Smuzhiyun u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
2572*4882a593Smuzhiyun SCAN_REQ_UMAC,
2573*4882a593Smuzhiyun IWL_FW_CMD_VER_UNKNOWN);
2574*4882a593Smuzhiyun
2575*4882a593Smuzhiyun base_size = iwl_scan_req_umac_get_size(scan_ver);
2576*4882a593Smuzhiyun if (base_size)
2577*4882a593Smuzhiyun return base_size;
2578*4882a593Smuzhiyun
2579*4882a593Smuzhiyun
2580*4882a593Smuzhiyun if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm))
2581*4882a593Smuzhiyun base_size = IWL_SCAN_REQ_UMAC_SIZE_V8;
2582*4882a593Smuzhiyun else if (iwl_mvm_is_adaptive_dwell_supported(mvm))
2583*4882a593Smuzhiyun base_size = IWL_SCAN_REQ_UMAC_SIZE_V7;
2584*4882a593Smuzhiyun else if (iwl_mvm_cdb_scan_api(mvm))
2585*4882a593Smuzhiyun base_size = IWL_SCAN_REQ_UMAC_SIZE_V6;
2586*4882a593Smuzhiyun else
2587*4882a593Smuzhiyun base_size = IWL_SCAN_REQ_UMAC_SIZE_V1;
2588*4882a593Smuzhiyun
2589*4882a593Smuzhiyun if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
2590*4882a593Smuzhiyun if (iwl_mvm_is_scan_ext_chan_supported(mvm))
2591*4882a593Smuzhiyun tail_size = sizeof(struct iwl_scan_req_umac_tail_v2);
2592*4882a593Smuzhiyun else
2593*4882a593Smuzhiyun tail_size = sizeof(struct iwl_scan_req_umac_tail_v1);
2594*4882a593Smuzhiyun
2595*4882a593Smuzhiyun return base_size +
2596*4882a593Smuzhiyun sizeof(struct iwl_scan_channel_cfg_umac) *
2597*4882a593Smuzhiyun mvm->fw->ucode_capa.n_scan_channels +
2598*4882a593Smuzhiyun tail_size;
2599*4882a593Smuzhiyun }
2600*4882a593Smuzhiyun return sizeof(struct iwl_scan_req_lmac) +
2601*4882a593Smuzhiyun sizeof(struct iwl_scan_channel_cfg_lmac) *
2602*4882a593Smuzhiyun mvm->fw->ucode_capa.n_scan_channels +
2603*4882a593Smuzhiyun sizeof(struct iwl_scan_probe_req_v1);
2604*4882a593Smuzhiyun }
2605*4882a593Smuzhiyun
2606*4882a593Smuzhiyun /*
2607*4882a593Smuzhiyun * This function is used in nic restart flow, to inform mac80211 about scans
2608*4882a593Smuzhiyun * that was aborted by restart flow or by an assert.
2609*4882a593Smuzhiyun */
iwl_mvm_report_scan_aborted(struct iwl_mvm * mvm)2610*4882a593Smuzhiyun void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
2611*4882a593Smuzhiyun {
2612*4882a593Smuzhiyun if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
2613*4882a593Smuzhiyun int uid, i;
2614*4882a593Smuzhiyun
2615*4882a593Smuzhiyun uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_REGULAR);
2616*4882a593Smuzhiyun if (uid >= 0) {
2617*4882a593Smuzhiyun struct cfg80211_scan_info info = {
2618*4882a593Smuzhiyun .aborted = true,
2619*4882a593Smuzhiyun };
2620*4882a593Smuzhiyun
2621*4882a593Smuzhiyun ieee80211_scan_completed(mvm->hw, &info);
2622*4882a593Smuzhiyun mvm->scan_uid_status[uid] = 0;
2623*4882a593Smuzhiyun }
2624*4882a593Smuzhiyun uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED);
2625*4882a593Smuzhiyun if (uid >= 0 && !mvm->fw_restart) {
2626*4882a593Smuzhiyun ieee80211_sched_scan_stopped(mvm->hw);
2627*4882a593Smuzhiyun mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
2628*4882a593Smuzhiyun mvm->scan_uid_status[uid] = 0;
2629*4882a593Smuzhiyun }
2630*4882a593Smuzhiyun uid = iwl_mvm_scan_uid_by_status(mvm,
2631*4882a593Smuzhiyun IWL_MVM_SCAN_STOPPING_REGULAR);
2632*4882a593Smuzhiyun if (uid >= 0)
2633*4882a593Smuzhiyun mvm->scan_uid_status[uid] = 0;
2634*4882a593Smuzhiyun
2635*4882a593Smuzhiyun uid = iwl_mvm_scan_uid_by_status(mvm,
2636*4882a593Smuzhiyun IWL_MVM_SCAN_STOPPING_SCHED);
2637*4882a593Smuzhiyun if (uid >= 0)
2638*4882a593Smuzhiyun mvm->scan_uid_status[uid] = 0;
2639*4882a593Smuzhiyun
2640*4882a593Smuzhiyun /* We shouldn't have any UIDs still set. Loop over all the
2641*4882a593Smuzhiyun * UIDs to make sure there's nothing left there and warn if
2642*4882a593Smuzhiyun * any is found.
2643*4882a593Smuzhiyun */
2644*4882a593Smuzhiyun for (i = 0; i < mvm->max_scans; i++) {
2645*4882a593Smuzhiyun if (WARN_ONCE(mvm->scan_uid_status[i],
2646*4882a593Smuzhiyun "UMAC scan UID %d status was not cleaned\n",
2647*4882a593Smuzhiyun i))
2648*4882a593Smuzhiyun mvm->scan_uid_status[i] = 0;
2649*4882a593Smuzhiyun }
2650*4882a593Smuzhiyun } else {
2651*4882a593Smuzhiyun if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) {
2652*4882a593Smuzhiyun struct cfg80211_scan_info info = {
2653*4882a593Smuzhiyun .aborted = true,
2654*4882a593Smuzhiyun };
2655*4882a593Smuzhiyun
2656*4882a593Smuzhiyun ieee80211_scan_completed(mvm->hw, &info);
2657*4882a593Smuzhiyun }
2658*4882a593Smuzhiyun
2659*4882a593Smuzhiyun /* Sched scan will be restarted by mac80211 in
2660*4882a593Smuzhiyun * restart_hw, so do not report if FW is about to be
2661*4882a593Smuzhiyun * restarted.
2662*4882a593Smuzhiyun */
2663*4882a593Smuzhiyun if ((mvm->scan_status & IWL_MVM_SCAN_SCHED) &&
2664*4882a593Smuzhiyun !mvm->fw_restart) {
2665*4882a593Smuzhiyun ieee80211_sched_scan_stopped(mvm->hw);
2666*4882a593Smuzhiyun mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
2667*4882a593Smuzhiyun }
2668*4882a593Smuzhiyun }
2669*4882a593Smuzhiyun }
2670*4882a593Smuzhiyun
iwl_mvm_scan_stop(struct iwl_mvm * mvm,int type,bool notify)2671*4882a593Smuzhiyun int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify)
2672*4882a593Smuzhiyun {
2673*4882a593Smuzhiyun int ret;
2674*4882a593Smuzhiyun
2675*4882a593Smuzhiyun if (!(mvm->scan_status & type))
2676*4882a593Smuzhiyun return 0;
2677*4882a593Smuzhiyun
2678*4882a593Smuzhiyun if (iwl_mvm_is_radio_killed(mvm)) {
2679*4882a593Smuzhiyun ret = 0;
2680*4882a593Smuzhiyun goto out;
2681*4882a593Smuzhiyun }
2682*4882a593Smuzhiyun
2683*4882a593Smuzhiyun ret = iwl_mvm_scan_stop_wait(mvm, type);
2684*4882a593Smuzhiyun if (!ret)
2685*4882a593Smuzhiyun mvm->scan_status |= type << IWL_MVM_SCAN_STOPPING_SHIFT;
2686*4882a593Smuzhiyun out:
2687*4882a593Smuzhiyun /* Clear the scan status so the next scan requests will
2688*4882a593Smuzhiyun * succeed and mark the scan as stopping, so that the Rx
2689*4882a593Smuzhiyun * handler doesn't do anything, as the scan was stopped from
2690*4882a593Smuzhiyun * above.
2691*4882a593Smuzhiyun */
2692*4882a593Smuzhiyun mvm->scan_status &= ~type;
2693*4882a593Smuzhiyun
2694*4882a593Smuzhiyun if (type == IWL_MVM_SCAN_REGULAR) {
2695*4882a593Smuzhiyun cancel_delayed_work(&mvm->scan_timeout_dwork);
2696*4882a593Smuzhiyun if (notify) {
2697*4882a593Smuzhiyun struct cfg80211_scan_info info = {
2698*4882a593Smuzhiyun .aborted = true,
2699*4882a593Smuzhiyun };
2700*4882a593Smuzhiyun
2701*4882a593Smuzhiyun ieee80211_scan_completed(mvm->hw, &info);
2702*4882a593Smuzhiyun }
2703*4882a593Smuzhiyun } else if (notify) {
2704*4882a593Smuzhiyun ieee80211_sched_scan_stopped(mvm->hw);
2705*4882a593Smuzhiyun mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
2706*4882a593Smuzhiyun }
2707*4882a593Smuzhiyun
2708*4882a593Smuzhiyun return ret;
2709*4882a593Smuzhiyun }
2710