1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /******************************************************************************
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
5*4882a593Smuzhiyun * Copyright(c) 2015 Intel Deutschland GmbH
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Contact Information:
8*4882a593Smuzhiyun * Intel Linux Wireless <linuxwifi@intel.com>
9*4882a593Smuzhiyun * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun *****************************************************************************/
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <linux/etherdevice.h>
14*4882a593Smuzhiyun #include "iwl-trans.h"
15*4882a593Smuzhiyun #include "iwl-modparams.h"
16*4882a593Smuzhiyun #include "dev.h"
17*4882a593Smuzhiyun #include "agn.h"
18*4882a593Smuzhiyun #include "calib.h"
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun /*
21*4882a593Smuzhiyun * initialize rxon structure with default values from eeprom
22*4882a593Smuzhiyun */
iwl_connection_init_rx_config(struct iwl_priv * priv,struct iwl_rxon_context * ctx)23*4882a593Smuzhiyun void iwl_connection_init_rx_config(struct iwl_priv *priv,
24*4882a593Smuzhiyun struct iwl_rxon_context *ctx)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun memset(&ctx->staging, 0, sizeof(ctx->staging));
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun if (!ctx->vif) {
29*4882a593Smuzhiyun ctx->staging.dev_type = ctx->unused_devtype;
30*4882a593Smuzhiyun } else
31*4882a593Smuzhiyun switch (ctx->vif->type) {
32*4882a593Smuzhiyun case NL80211_IFTYPE_AP:
33*4882a593Smuzhiyun ctx->staging.dev_type = ctx->ap_devtype;
34*4882a593Smuzhiyun break;
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun case NL80211_IFTYPE_STATION:
37*4882a593Smuzhiyun ctx->staging.dev_type = ctx->station_devtype;
38*4882a593Smuzhiyun ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
39*4882a593Smuzhiyun break;
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun case NL80211_IFTYPE_ADHOC:
42*4882a593Smuzhiyun ctx->staging.dev_type = ctx->ibss_devtype;
43*4882a593Smuzhiyun ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
44*4882a593Smuzhiyun ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
45*4882a593Smuzhiyun RXON_FILTER_ACCEPT_GRP_MSK;
46*4882a593Smuzhiyun break;
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun case NL80211_IFTYPE_MONITOR:
49*4882a593Smuzhiyun ctx->staging.dev_type = RXON_DEV_TYPE_SNIFFER;
50*4882a593Smuzhiyun break;
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun default:
53*4882a593Smuzhiyun IWL_ERR(priv, "Unsupported interface type %d\n",
54*4882a593Smuzhiyun ctx->vif->type);
55*4882a593Smuzhiyun break;
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun #if 0
59*4882a593Smuzhiyun /* TODO: Figure out when short_preamble would be set and cache from
60*4882a593Smuzhiyun * that */
61*4882a593Smuzhiyun if (!hw_to_local(priv->hw)->short_preamble)
62*4882a593Smuzhiyun ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
63*4882a593Smuzhiyun else
64*4882a593Smuzhiyun ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
65*4882a593Smuzhiyun #endif
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun ctx->staging.channel =
68*4882a593Smuzhiyun cpu_to_le16(priv->hw->conf.chandef.chan->hw_value);
69*4882a593Smuzhiyun priv->band = priv->hw->conf.chandef.chan->band;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun iwl_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun /* clear both MIX and PURE40 mode flag */
74*4882a593Smuzhiyun ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
75*4882a593Smuzhiyun RXON_FLG_CHANNEL_MODE_PURE_40);
76*4882a593Smuzhiyun if (ctx->vif)
77*4882a593Smuzhiyun memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
80*4882a593Smuzhiyun ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
81*4882a593Smuzhiyun ctx->staging.ofdm_ht_triple_stream_basic_rates = 0xff;
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun
iwlagn_disable_bss(struct iwl_priv * priv,struct iwl_rxon_context * ctx,struct iwl_rxon_cmd * send)84*4882a593Smuzhiyun static int iwlagn_disable_bss(struct iwl_priv *priv,
85*4882a593Smuzhiyun struct iwl_rxon_context *ctx,
86*4882a593Smuzhiyun struct iwl_rxon_cmd *send)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun __le32 old_filter = send->filter_flags;
89*4882a593Smuzhiyun int ret;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
92*4882a593Smuzhiyun ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd,
93*4882a593Smuzhiyun 0, sizeof(*send), send);
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun send->filter_flags = old_filter;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun if (ret)
98*4882a593Smuzhiyun IWL_DEBUG_QUIET_RFKILL(priv,
99*4882a593Smuzhiyun "Error clearing ASSOC_MSK on BSS (%d)\n", ret);
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun return ret;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun
iwlagn_disable_pan(struct iwl_priv * priv,struct iwl_rxon_context * ctx,struct iwl_rxon_cmd * send)104*4882a593Smuzhiyun static int iwlagn_disable_pan(struct iwl_priv *priv,
105*4882a593Smuzhiyun struct iwl_rxon_context *ctx,
106*4882a593Smuzhiyun struct iwl_rxon_cmd *send)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun struct iwl_notification_wait disable_wait;
109*4882a593Smuzhiyun __le32 old_filter = send->filter_flags;
110*4882a593Smuzhiyun u8 old_dev_type = send->dev_type;
111*4882a593Smuzhiyun int ret;
112*4882a593Smuzhiyun static const u16 deactivate_cmd[] = {
113*4882a593Smuzhiyun REPLY_WIPAN_DEACTIVATION_COMPLETE
114*4882a593Smuzhiyun };
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun iwl_init_notification_wait(&priv->notif_wait, &disable_wait,
117*4882a593Smuzhiyun deactivate_cmd, ARRAY_SIZE(deactivate_cmd),
118*4882a593Smuzhiyun NULL, NULL);
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
121*4882a593Smuzhiyun send->dev_type = RXON_DEV_TYPE_P2P;
122*4882a593Smuzhiyun ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd,
123*4882a593Smuzhiyun 0, sizeof(*send), send);
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun send->filter_flags = old_filter;
126*4882a593Smuzhiyun send->dev_type = old_dev_type;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun if (ret) {
129*4882a593Smuzhiyun IWL_ERR(priv, "Error disabling PAN (%d)\n", ret);
130*4882a593Smuzhiyun iwl_remove_notification(&priv->notif_wait, &disable_wait);
131*4882a593Smuzhiyun } else {
132*4882a593Smuzhiyun ret = iwl_wait_notification(&priv->notif_wait,
133*4882a593Smuzhiyun &disable_wait, HZ);
134*4882a593Smuzhiyun if (ret)
135*4882a593Smuzhiyun IWL_ERR(priv, "Timed out waiting for PAN disable\n");
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun return ret;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
iwlagn_disconn_pan(struct iwl_priv * priv,struct iwl_rxon_context * ctx,struct iwl_rxon_cmd * send)141*4882a593Smuzhiyun static int iwlagn_disconn_pan(struct iwl_priv *priv,
142*4882a593Smuzhiyun struct iwl_rxon_context *ctx,
143*4882a593Smuzhiyun struct iwl_rxon_cmd *send)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun __le32 old_filter = send->filter_flags;
146*4882a593Smuzhiyun int ret;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
149*4882a593Smuzhiyun ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, 0,
150*4882a593Smuzhiyun sizeof(*send), send);
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun send->filter_flags = old_filter;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun return ret;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
iwlagn_update_qos(struct iwl_priv * priv,struct iwl_rxon_context * ctx)157*4882a593Smuzhiyun static void iwlagn_update_qos(struct iwl_priv *priv,
158*4882a593Smuzhiyun struct iwl_rxon_context *ctx)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun int ret;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun if (!ctx->is_active)
163*4882a593Smuzhiyun return;
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun ctx->qos_data.def_qos_parm.qos_flags = 0;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun if (ctx->qos_data.qos_active)
168*4882a593Smuzhiyun ctx->qos_data.def_qos_parm.qos_flags |=
169*4882a593Smuzhiyun QOS_PARAM_FLG_UPDATE_EDCA_MSK;
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun if (ctx->ht.enabled)
172*4882a593Smuzhiyun ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun IWL_DEBUG_INFO(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
175*4882a593Smuzhiyun ctx->qos_data.qos_active,
176*4882a593Smuzhiyun ctx->qos_data.def_qos_parm.qos_flags);
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun ret = iwl_dvm_send_cmd_pdu(priv, ctx->qos_cmd, 0,
179*4882a593Smuzhiyun sizeof(struct iwl_qosparam_cmd),
180*4882a593Smuzhiyun &ctx->qos_data.def_qos_parm);
181*4882a593Smuzhiyun if (ret)
182*4882a593Smuzhiyun IWL_DEBUG_QUIET_RFKILL(priv, "Failed to update QoS\n");
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
iwlagn_update_beacon(struct iwl_priv * priv,struct ieee80211_vif * vif)185*4882a593Smuzhiyun static int iwlagn_update_beacon(struct iwl_priv *priv,
186*4882a593Smuzhiyun struct ieee80211_vif *vif)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun lockdep_assert_held(&priv->mutex);
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun dev_kfree_skb(priv->beacon_skb);
191*4882a593Smuzhiyun priv->beacon_skb = ieee80211_beacon_get(priv->hw, vif);
192*4882a593Smuzhiyun if (!priv->beacon_skb)
193*4882a593Smuzhiyun return -ENOMEM;
194*4882a593Smuzhiyun return iwlagn_send_beacon_cmd(priv);
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
iwlagn_send_rxon_assoc(struct iwl_priv * priv,struct iwl_rxon_context * ctx)197*4882a593Smuzhiyun static int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
198*4882a593Smuzhiyun struct iwl_rxon_context *ctx)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun int ret = 0;
201*4882a593Smuzhiyun struct iwl_rxon_assoc_cmd rxon_assoc;
202*4882a593Smuzhiyun const struct iwl_rxon_cmd *rxon1 = &ctx->staging;
203*4882a593Smuzhiyun const struct iwl_rxon_cmd *rxon2 = &ctx->active;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun if ((rxon1->flags == rxon2->flags) &&
206*4882a593Smuzhiyun (rxon1->filter_flags == rxon2->filter_flags) &&
207*4882a593Smuzhiyun (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
208*4882a593Smuzhiyun (rxon1->ofdm_ht_single_stream_basic_rates ==
209*4882a593Smuzhiyun rxon2->ofdm_ht_single_stream_basic_rates) &&
210*4882a593Smuzhiyun (rxon1->ofdm_ht_dual_stream_basic_rates ==
211*4882a593Smuzhiyun rxon2->ofdm_ht_dual_stream_basic_rates) &&
212*4882a593Smuzhiyun (rxon1->ofdm_ht_triple_stream_basic_rates ==
213*4882a593Smuzhiyun rxon2->ofdm_ht_triple_stream_basic_rates) &&
214*4882a593Smuzhiyun (rxon1->acquisition_data == rxon2->acquisition_data) &&
215*4882a593Smuzhiyun (rxon1->rx_chain == rxon2->rx_chain) &&
216*4882a593Smuzhiyun (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
217*4882a593Smuzhiyun IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
218*4882a593Smuzhiyun return 0;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun rxon_assoc.flags = ctx->staging.flags;
222*4882a593Smuzhiyun rxon_assoc.filter_flags = ctx->staging.filter_flags;
223*4882a593Smuzhiyun rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
224*4882a593Smuzhiyun rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
225*4882a593Smuzhiyun rxon_assoc.reserved1 = 0;
226*4882a593Smuzhiyun rxon_assoc.reserved2 = 0;
227*4882a593Smuzhiyun rxon_assoc.reserved3 = 0;
228*4882a593Smuzhiyun rxon_assoc.ofdm_ht_single_stream_basic_rates =
229*4882a593Smuzhiyun ctx->staging.ofdm_ht_single_stream_basic_rates;
230*4882a593Smuzhiyun rxon_assoc.ofdm_ht_dual_stream_basic_rates =
231*4882a593Smuzhiyun ctx->staging.ofdm_ht_dual_stream_basic_rates;
232*4882a593Smuzhiyun rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
233*4882a593Smuzhiyun rxon_assoc.ofdm_ht_triple_stream_basic_rates =
234*4882a593Smuzhiyun ctx->staging.ofdm_ht_triple_stream_basic_rates;
235*4882a593Smuzhiyun rxon_assoc.acquisition_data = ctx->staging.acquisition_data;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_assoc_cmd,
238*4882a593Smuzhiyun CMD_ASYNC, sizeof(rxon_assoc), &rxon_assoc);
239*4882a593Smuzhiyun return ret;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
iwl_adjust_beacon_interval(u16 beacon_val,u16 max_beacon_val)242*4882a593Smuzhiyun static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun u16 new_val;
245*4882a593Smuzhiyun u16 beacon_factor;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun /*
248*4882a593Smuzhiyun * If mac80211 hasn't given us a beacon interval, program
249*4882a593Smuzhiyun * the default into the device (not checking this here
250*4882a593Smuzhiyun * would cause the adjustment below to return the maximum
251*4882a593Smuzhiyun * value, which may break PAN.)
252*4882a593Smuzhiyun */
253*4882a593Smuzhiyun if (!beacon_val)
254*4882a593Smuzhiyun return DEFAULT_BEACON_INTERVAL;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun /*
257*4882a593Smuzhiyun * If the beacon interval we obtained from the peer
258*4882a593Smuzhiyun * is too large, we'll have to wake up more often
259*4882a593Smuzhiyun * (and in IBSS case, we'll beacon too much)
260*4882a593Smuzhiyun *
261*4882a593Smuzhiyun * For example, if max_beacon_val is 4096, and the
262*4882a593Smuzhiyun * requested beacon interval is 7000, we'll have to
263*4882a593Smuzhiyun * use 3500 to be able to wake up on the beacons.
264*4882a593Smuzhiyun *
265*4882a593Smuzhiyun * This could badly influence beacon detection stats.
266*4882a593Smuzhiyun */
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
269*4882a593Smuzhiyun new_val = beacon_val / beacon_factor;
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun if (!new_val)
272*4882a593Smuzhiyun new_val = max_beacon_val;
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun return new_val;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun
iwl_send_rxon_timing(struct iwl_priv * priv,struct iwl_rxon_context * ctx)277*4882a593Smuzhiyun static int iwl_send_rxon_timing(struct iwl_priv *priv,
278*4882a593Smuzhiyun struct iwl_rxon_context *ctx)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun u64 tsf;
281*4882a593Smuzhiyun s32 interval_tm, rem;
282*4882a593Smuzhiyun struct ieee80211_conf *conf = NULL;
283*4882a593Smuzhiyun u16 beacon_int;
284*4882a593Smuzhiyun struct ieee80211_vif *vif = ctx->vif;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun conf = &priv->hw->conf;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun lockdep_assert_held(&priv->mutex);
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
293*4882a593Smuzhiyun ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun beacon_int = vif ? vif->bss_conf.beacon_int : 0;
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun /*
298*4882a593Smuzhiyun * TODO: For IBSS we need to get atim_window from mac80211,
299*4882a593Smuzhiyun * for now just always use 0
300*4882a593Smuzhiyun */
301*4882a593Smuzhiyun ctx->timing.atim_window = 0;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun if (ctx->ctxid == IWL_RXON_CTX_PAN &&
304*4882a593Smuzhiyun (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION) &&
305*4882a593Smuzhiyun iwl_is_associated(priv, IWL_RXON_CTX_BSS) &&
306*4882a593Smuzhiyun priv->contexts[IWL_RXON_CTX_BSS].vif &&
307*4882a593Smuzhiyun priv->contexts[IWL_RXON_CTX_BSS].vif->bss_conf.beacon_int) {
308*4882a593Smuzhiyun ctx->timing.beacon_interval =
309*4882a593Smuzhiyun priv->contexts[IWL_RXON_CTX_BSS].timing.beacon_interval;
310*4882a593Smuzhiyun beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
311*4882a593Smuzhiyun } else if (ctx->ctxid == IWL_RXON_CTX_BSS &&
312*4882a593Smuzhiyun iwl_is_associated(priv, IWL_RXON_CTX_PAN) &&
313*4882a593Smuzhiyun priv->contexts[IWL_RXON_CTX_PAN].vif &&
314*4882a593Smuzhiyun priv->contexts[IWL_RXON_CTX_PAN].vif->bss_conf.beacon_int &&
315*4882a593Smuzhiyun (!iwl_is_associated_ctx(ctx) || !ctx->vif ||
316*4882a593Smuzhiyun !ctx->vif->bss_conf.beacon_int)) {
317*4882a593Smuzhiyun ctx->timing.beacon_interval =
318*4882a593Smuzhiyun priv->contexts[IWL_RXON_CTX_PAN].timing.beacon_interval;
319*4882a593Smuzhiyun beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
320*4882a593Smuzhiyun } else {
321*4882a593Smuzhiyun beacon_int = iwl_adjust_beacon_interval(beacon_int,
322*4882a593Smuzhiyun IWL_MAX_UCODE_BEACON_INTERVAL * TIME_UNIT);
323*4882a593Smuzhiyun ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun ctx->beacon_int = beacon_int;
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
329*4882a593Smuzhiyun interval_tm = beacon_int * TIME_UNIT;
330*4882a593Smuzhiyun rem = do_div(tsf, interval_tm);
331*4882a593Smuzhiyun ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun IWL_DEBUG_ASSOC(priv,
336*4882a593Smuzhiyun "beacon interval %d beacon timer %d beacon tim %d\n",
337*4882a593Smuzhiyun le16_to_cpu(ctx->timing.beacon_interval),
338*4882a593Smuzhiyun le32_to_cpu(ctx->timing.beacon_init_val),
339*4882a593Smuzhiyun le16_to_cpu(ctx->timing.atim_window));
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun return iwl_dvm_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
342*4882a593Smuzhiyun 0, sizeof(ctx->timing), &ctx->timing);
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
iwlagn_rxon_disconn(struct iwl_priv * priv,struct iwl_rxon_context * ctx)345*4882a593Smuzhiyun static int iwlagn_rxon_disconn(struct iwl_priv *priv,
346*4882a593Smuzhiyun struct iwl_rxon_context *ctx)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun int ret;
349*4882a593Smuzhiyun struct iwl_rxon_cmd *active = (void *)&ctx->active;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun if (ctx->ctxid == IWL_RXON_CTX_BSS) {
352*4882a593Smuzhiyun ret = iwlagn_disable_bss(priv, ctx, &ctx->staging);
353*4882a593Smuzhiyun } else {
354*4882a593Smuzhiyun ret = iwlagn_disable_pan(priv, ctx, &ctx->staging);
355*4882a593Smuzhiyun if (ret)
356*4882a593Smuzhiyun return ret;
357*4882a593Smuzhiyun if (ctx->vif) {
358*4882a593Smuzhiyun ret = iwl_send_rxon_timing(priv, ctx);
359*4882a593Smuzhiyun if (ret) {
360*4882a593Smuzhiyun IWL_ERR(priv, "Failed to send timing (%d)!\n", ret);
361*4882a593Smuzhiyun return ret;
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun ret = iwlagn_disconn_pan(priv, ctx, &ctx->staging);
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun if (ret)
367*4882a593Smuzhiyun return ret;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun /*
370*4882a593Smuzhiyun * Un-assoc RXON clears the station table and WEP
371*4882a593Smuzhiyun * keys, so we have to restore those afterwards.
372*4882a593Smuzhiyun */
373*4882a593Smuzhiyun iwl_clear_ucode_stations(priv, ctx);
374*4882a593Smuzhiyun /* update -- might need P2P now */
375*4882a593Smuzhiyun iwl_update_bcast_station(priv, ctx);
376*4882a593Smuzhiyun iwl_restore_stations(priv, ctx);
377*4882a593Smuzhiyun ret = iwl_restore_default_wep_keys(priv, ctx);
378*4882a593Smuzhiyun if (ret) {
379*4882a593Smuzhiyun IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
380*4882a593Smuzhiyun return ret;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun memcpy(active, &ctx->staging, sizeof(*active));
384*4882a593Smuzhiyun return 0;
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun
iwl_set_tx_power(struct iwl_priv * priv,s8 tx_power,bool force)387*4882a593Smuzhiyun static int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun int ret;
390*4882a593Smuzhiyun s8 prev_tx_power;
391*4882a593Smuzhiyun bool defer;
392*4882a593Smuzhiyun struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun if (priv->calib_disabled & IWL_TX_POWER_CALIB_DISABLED)
395*4882a593Smuzhiyun return 0;
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun lockdep_assert_held(&priv->mutex);
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun if (priv->tx_power_user_lmt == tx_power && !force)
400*4882a593Smuzhiyun return 0;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) {
403*4882a593Smuzhiyun IWL_WARN(priv,
404*4882a593Smuzhiyun "Requested user TXPOWER %d below lower limit %d.\n",
405*4882a593Smuzhiyun tx_power,
406*4882a593Smuzhiyun IWLAGN_TX_POWER_TARGET_POWER_MIN);
407*4882a593Smuzhiyun return -EINVAL;
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun if (tx_power > DIV_ROUND_UP(priv->nvm_data->max_tx_pwr_half_dbm, 2)) {
411*4882a593Smuzhiyun IWL_WARN(priv,
412*4882a593Smuzhiyun "Requested user TXPOWER %d above upper limit %d.\n",
413*4882a593Smuzhiyun tx_power, priv->nvm_data->max_tx_pwr_half_dbm);
414*4882a593Smuzhiyun return -EINVAL;
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun if (!iwl_is_ready_rf(priv))
418*4882a593Smuzhiyun return -EIO;
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun /* scan complete and commit_rxon use tx_power_next value,
421*4882a593Smuzhiyun * it always need to be updated for newest request */
422*4882a593Smuzhiyun priv->tx_power_next = tx_power;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun /* do not set tx power when scanning or channel changing */
425*4882a593Smuzhiyun defer = test_bit(STATUS_SCANNING, &priv->status) ||
426*4882a593Smuzhiyun memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
427*4882a593Smuzhiyun if (defer && !force) {
428*4882a593Smuzhiyun IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
429*4882a593Smuzhiyun return 0;
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun prev_tx_power = priv->tx_power_user_lmt;
433*4882a593Smuzhiyun priv->tx_power_user_lmt = tx_power;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun ret = iwlagn_send_tx_power(priv);
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun /* if fail to set tx_power, restore the orig. tx power */
438*4882a593Smuzhiyun if (ret) {
439*4882a593Smuzhiyun priv->tx_power_user_lmt = prev_tx_power;
440*4882a593Smuzhiyun priv->tx_power_next = prev_tx_power;
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun return ret;
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun
iwlagn_rxon_connect(struct iwl_priv * priv,struct iwl_rxon_context * ctx)445*4882a593Smuzhiyun static int iwlagn_rxon_connect(struct iwl_priv *priv,
446*4882a593Smuzhiyun struct iwl_rxon_context *ctx)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun int ret;
449*4882a593Smuzhiyun struct iwl_rxon_cmd *active = (void *)&ctx->active;
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun /* RXON timing must be before associated RXON */
452*4882a593Smuzhiyun if (ctx->ctxid == IWL_RXON_CTX_BSS) {
453*4882a593Smuzhiyun ret = iwl_send_rxon_timing(priv, ctx);
454*4882a593Smuzhiyun if (ret) {
455*4882a593Smuzhiyun IWL_ERR(priv, "Failed to send timing (%d)!\n", ret);
456*4882a593Smuzhiyun return ret;
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun /* QoS info may be cleared by previous un-assoc RXON */
460*4882a593Smuzhiyun iwlagn_update_qos(priv, ctx);
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun /*
463*4882a593Smuzhiyun * We'll run into this code path when beaconing is
464*4882a593Smuzhiyun * enabled, but then we also need to send the beacon
465*4882a593Smuzhiyun * to the device.
466*4882a593Smuzhiyun */
467*4882a593Smuzhiyun if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_AP)) {
468*4882a593Smuzhiyun ret = iwlagn_update_beacon(priv, ctx->vif);
469*4882a593Smuzhiyun if (ret) {
470*4882a593Smuzhiyun IWL_ERR(priv,
471*4882a593Smuzhiyun "Error sending required beacon (%d)!\n",
472*4882a593Smuzhiyun ret);
473*4882a593Smuzhiyun return ret;
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun priv->start_calib = 0;
478*4882a593Smuzhiyun /*
479*4882a593Smuzhiyun * Apply the new configuration.
480*4882a593Smuzhiyun *
481*4882a593Smuzhiyun * Associated RXON doesn't clear the station table in uCode,
482*4882a593Smuzhiyun * so we don't need to restore stations etc. after this.
483*4882a593Smuzhiyun */
484*4882a593Smuzhiyun ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, 0,
485*4882a593Smuzhiyun sizeof(struct iwl_rxon_cmd), &ctx->staging);
486*4882a593Smuzhiyun if (ret) {
487*4882a593Smuzhiyun IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
488*4882a593Smuzhiyun return ret;
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun memcpy(active, &ctx->staging, sizeof(*active));
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun /* IBSS beacon needs to be sent after setting assoc */
493*4882a593Smuzhiyun if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_ADHOC))
494*4882a593Smuzhiyun if (iwlagn_update_beacon(priv, ctx->vif))
495*4882a593Smuzhiyun IWL_ERR(priv, "Error sending IBSS beacon\n");
496*4882a593Smuzhiyun iwl_init_sensitivity(priv);
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun /*
499*4882a593Smuzhiyun * If we issue a new RXON command which required a tune then
500*4882a593Smuzhiyun * we must send a new TXPOWER command or we won't be able to
501*4882a593Smuzhiyun * Tx any frames.
502*4882a593Smuzhiyun *
503*4882a593Smuzhiyun * It's expected we set power here if channel is changing.
504*4882a593Smuzhiyun */
505*4882a593Smuzhiyun ret = iwl_set_tx_power(priv, priv->tx_power_next, true);
506*4882a593Smuzhiyun if (ret) {
507*4882a593Smuzhiyun IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
508*4882a593Smuzhiyun return ret;
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun return 0;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun
iwlagn_set_pan_params(struct iwl_priv * priv)514*4882a593Smuzhiyun int iwlagn_set_pan_params(struct iwl_priv *priv)
515*4882a593Smuzhiyun {
516*4882a593Smuzhiyun struct iwl_wipan_params_cmd cmd;
517*4882a593Smuzhiyun struct iwl_rxon_context *ctx_bss, *ctx_pan;
518*4882a593Smuzhiyun int slot0 = 300, slot1 = 0;
519*4882a593Smuzhiyun int ret;
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun if (priv->valid_contexts == BIT(IWL_RXON_CTX_BSS))
522*4882a593Smuzhiyun return 0;
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun lockdep_assert_held(&priv->mutex);
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun ctx_bss = &priv->contexts[IWL_RXON_CTX_BSS];
529*4882a593Smuzhiyun ctx_pan = &priv->contexts[IWL_RXON_CTX_PAN];
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun /*
532*4882a593Smuzhiyun * If the PAN context is inactive, then we don't need
533*4882a593Smuzhiyun * to update the PAN parameters, the last thing we'll
534*4882a593Smuzhiyun * have done before it goes inactive is making the PAN
535*4882a593Smuzhiyun * parameters be WLAN-only.
536*4882a593Smuzhiyun */
537*4882a593Smuzhiyun if (!ctx_pan->is_active)
538*4882a593Smuzhiyun return 0;
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun memset(&cmd, 0, sizeof(cmd));
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun /* only 2 slots are currently allowed */
543*4882a593Smuzhiyun cmd.num_slots = 2;
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun cmd.slots[0].type = 0; /* BSS */
546*4882a593Smuzhiyun cmd.slots[1].type = 1; /* PAN */
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun if (ctx_bss->vif && ctx_pan->vif) {
549*4882a593Smuzhiyun int bcnint = ctx_pan->beacon_int;
550*4882a593Smuzhiyun int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1;
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun /* should be set, but seems unused?? */
553*4882a593Smuzhiyun cmd.flags |= cpu_to_le16(IWL_WIPAN_PARAMS_FLG_SLOTTED_MODE);
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun if (ctx_pan->vif->type == NL80211_IFTYPE_AP &&
556*4882a593Smuzhiyun bcnint &&
557*4882a593Smuzhiyun bcnint != ctx_bss->beacon_int) {
558*4882a593Smuzhiyun IWL_ERR(priv,
559*4882a593Smuzhiyun "beacon intervals don't match (%d, %d)\n",
560*4882a593Smuzhiyun ctx_bss->beacon_int, ctx_pan->beacon_int);
561*4882a593Smuzhiyun } else
562*4882a593Smuzhiyun bcnint = max_t(int, bcnint,
563*4882a593Smuzhiyun ctx_bss->beacon_int);
564*4882a593Smuzhiyun if (!bcnint)
565*4882a593Smuzhiyun bcnint = DEFAULT_BEACON_INTERVAL;
566*4882a593Smuzhiyun slot0 = bcnint / 2;
567*4882a593Smuzhiyun slot1 = bcnint - slot0;
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun if (test_bit(STATUS_SCAN_HW, &priv->status) ||
570*4882a593Smuzhiyun (!ctx_bss->vif->bss_conf.idle &&
571*4882a593Smuzhiyun !ctx_bss->vif->bss_conf.assoc)) {
572*4882a593Smuzhiyun slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
573*4882a593Smuzhiyun slot1 = IWL_MIN_SLOT_TIME;
574*4882a593Smuzhiyun } else if (!ctx_pan->vif->bss_conf.idle &&
575*4882a593Smuzhiyun !ctx_pan->vif->bss_conf.assoc) {
576*4882a593Smuzhiyun slot1 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
577*4882a593Smuzhiyun slot0 = IWL_MIN_SLOT_TIME;
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun } else if (ctx_pan->vif) {
580*4882a593Smuzhiyun slot0 = 0;
581*4882a593Smuzhiyun slot1 = max_t(int, 1, ctx_pan->vif->bss_conf.dtim_period) *
582*4882a593Smuzhiyun ctx_pan->beacon_int;
583*4882a593Smuzhiyun slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1);
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun if (test_bit(STATUS_SCAN_HW, &priv->status)) {
586*4882a593Smuzhiyun slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME;
587*4882a593Smuzhiyun slot1 = IWL_MIN_SLOT_TIME;
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun cmd.slots[0].width = cpu_to_le16(slot0);
592*4882a593Smuzhiyun cmd.slots[1].width = cpu_to_le16(slot1);
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WIPAN_PARAMS, 0,
595*4882a593Smuzhiyun sizeof(cmd), &cmd);
596*4882a593Smuzhiyun if (ret)
597*4882a593Smuzhiyun IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret);
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun return ret;
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun
_iwl_set_rxon_ht(struct iwl_priv * priv,struct iwl_ht_config * ht_conf,struct iwl_rxon_context * ctx)602*4882a593Smuzhiyun static void _iwl_set_rxon_ht(struct iwl_priv *priv,
603*4882a593Smuzhiyun struct iwl_ht_config *ht_conf,
604*4882a593Smuzhiyun struct iwl_rxon_context *ctx)
605*4882a593Smuzhiyun {
606*4882a593Smuzhiyun struct iwl_rxon_cmd *rxon = &ctx->staging;
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun if (!ctx->ht.enabled) {
609*4882a593Smuzhiyun rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
610*4882a593Smuzhiyun RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
611*4882a593Smuzhiyun RXON_FLG_HT40_PROT_MSK |
612*4882a593Smuzhiyun RXON_FLG_HT_PROT_MSK);
613*4882a593Smuzhiyun return;
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun /* FIXME: if the definition of ht.protection changed, the "translation"
617*4882a593Smuzhiyun * will be needed for rxon->flags
618*4882a593Smuzhiyun */
619*4882a593Smuzhiyun rxon->flags |= cpu_to_le32(ctx->ht.protection <<
620*4882a593Smuzhiyun RXON_FLG_HT_OPERATING_MODE_POS);
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun /* Set up channel bandwidth:
623*4882a593Smuzhiyun * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
624*4882a593Smuzhiyun /* clear the HT channel mode before set the mode */
625*4882a593Smuzhiyun rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
626*4882a593Smuzhiyun RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
627*4882a593Smuzhiyun if (iwl_is_ht40_tx_allowed(priv, ctx, NULL)) {
628*4882a593Smuzhiyun /* pure ht40 */
629*4882a593Smuzhiyun if (ctx->ht.protection ==
630*4882a593Smuzhiyun IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
631*4882a593Smuzhiyun rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
632*4882a593Smuzhiyun /*
633*4882a593Smuzhiyun * Note: control channel is opposite of extension
634*4882a593Smuzhiyun * channel
635*4882a593Smuzhiyun */
636*4882a593Smuzhiyun switch (ctx->ht.extension_chan_offset) {
637*4882a593Smuzhiyun case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
638*4882a593Smuzhiyun rxon->flags &=
639*4882a593Smuzhiyun ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
640*4882a593Smuzhiyun break;
641*4882a593Smuzhiyun case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
642*4882a593Smuzhiyun rxon->flags |=
643*4882a593Smuzhiyun RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
644*4882a593Smuzhiyun break;
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun } else {
647*4882a593Smuzhiyun /*
648*4882a593Smuzhiyun * Note: control channel is opposite of extension
649*4882a593Smuzhiyun * channel
650*4882a593Smuzhiyun */
651*4882a593Smuzhiyun switch (ctx->ht.extension_chan_offset) {
652*4882a593Smuzhiyun case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
653*4882a593Smuzhiyun rxon->flags &=
654*4882a593Smuzhiyun ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
655*4882a593Smuzhiyun rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
656*4882a593Smuzhiyun break;
657*4882a593Smuzhiyun case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
658*4882a593Smuzhiyun rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
659*4882a593Smuzhiyun rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
660*4882a593Smuzhiyun break;
661*4882a593Smuzhiyun case IEEE80211_HT_PARAM_CHA_SEC_NONE:
662*4882a593Smuzhiyun default:
663*4882a593Smuzhiyun /*
664*4882a593Smuzhiyun * channel location only valid if in Mixed
665*4882a593Smuzhiyun * mode
666*4882a593Smuzhiyun */
667*4882a593Smuzhiyun IWL_ERR(priv,
668*4882a593Smuzhiyun "invalid extension channel offset\n");
669*4882a593Smuzhiyun break;
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun } else {
673*4882a593Smuzhiyun rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun iwlagn_set_rxon_chain(priv, ctx);
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
679*4882a593Smuzhiyun "extension channel offset 0x%x\n",
680*4882a593Smuzhiyun le32_to_cpu(rxon->flags), ctx->ht.protection,
681*4882a593Smuzhiyun ctx->ht.extension_chan_offset);
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun
iwl_set_rxon_ht(struct iwl_priv * priv,struct iwl_ht_config * ht_conf)684*4882a593Smuzhiyun void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
685*4882a593Smuzhiyun {
686*4882a593Smuzhiyun struct iwl_rxon_context *ctx;
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun for_each_context(priv, ctx)
689*4882a593Smuzhiyun _iwl_set_rxon_ht(priv, ht_conf, ctx);
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun /*
693*4882a593Smuzhiyun * iwl_set_rxon_channel - Set the band and channel values in staging RXON
694*4882a593Smuzhiyun * @ch: requested channel as a pointer to struct ieee80211_channel
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun * NOTE: Does not commit to the hardware; it sets appropriate bit fields
697*4882a593Smuzhiyun * in the staging RXON flag structure based on the ch->band
698*4882a593Smuzhiyun */
iwl_set_rxon_channel(struct iwl_priv * priv,struct ieee80211_channel * ch,struct iwl_rxon_context * ctx)699*4882a593Smuzhiyun void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
700*4882a593Smuzhiyun struct iwl_rxon_context *ctx)
701*4882a593Smuzhiyun {
702*4882a593Smuzhiyun enum nl80211_band band = ch->band;
703*4882a593Smuzhiyun u16 channel = ch->hw_value;
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun if ((le16_to_cpu(ctx->staging.channel) == channel) &&
706*4882a593Smuzhiyun (priv->band == band))
707*4882a593Smuzhiyun return;
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun ctx->staging.channel = cpu_to_le16(channel);
710*4882a593Smuzhiyun if (band == NL80211_BAND_5GHZ)
711*4882a593Smuzhiyun ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
712*4882a593Smuzhiyun else
713*4882a593Smuzhiyun ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun priv->band = band;
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun
iwl_set_flags_for_band(struct iwl_priv * priv,struct iwl_rxon_context * ctx,enum nl80211_band band,struct ieee80211_vif * vif)721*4882a593Smuzhiyun void iwl_set_flags_for_band(struct iwl_priv *priv,
722*4882a593Smuzhiyun struct iwl_rxon_context *ctx,
723*4882a593Smuzhiyun enum nl80211_band band,
724*4882a593Smuzhiyun struct ieee80211_vif *vif)
725*4882a593Smuzhiyun {
726*4882a593Smuzhiyun if (band == NL80211_BAND_5GHZ) {
727*4882a593Smuzhiyun ctx->staging.flags &=
728*4882a593Smuzhiyun ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
729*4882a593Smuzhiyun | RXON_FLG_CCK_MSK);
730*4882a593Smuzhiyun ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
731*4882a593Smuzhiyun } else {
732*4882a593Smuzhiyun /* Copied from iwl_post_associate() */
733*4882a593Smuzhiyun if (vif && vif->bss_conf.use_short_slot)
734*4882a593Smuzhiyun ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
735*4882a593Smuzhiyun else
736*4882a593Smuzhiyun ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
739*4882a593Smuzhiyun ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
740*4882a593Smuzhiyun ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun
iwl_set_rxon_hwcrypto(struct iwl_priv * priv,struct iwl_rxon_context * ctx,int hw_decrypt)744*4882a593Smuzhiyun static void iwl_set_rxon_hwcrypto(struct iwl_priv *priv,
745*4882a593Smuzhiyun struct iwl_rxon_context *ctx, int hw_decrypt)
746*4882a593Smuzhiyun {
747*4882a593Smuzhiyun struct iwl_rxon_cmd *rxon = &ctx->staging;
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun if (hw_decrypt)
750*4882a593Smuzhiyun rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
751*4882a593Smuzhiyun else
752*4882a593Smuzhiyun rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun /* validate RXON structure is valid */
iwl_check_rxon_cmd(struct iwl_priv * priv,struct iwl_rxon_context * ctx)757*4882a593Smuzhiyun static int iwl_check_rxon_cmd(struct iwl_priv *priv,
758*4882a593Smuzhiyun struct iwl_rxon_context *ctx)
759*4882a593Smuzhiyun {
760*4882a593Smuzhiyun struct iwl_rxon_cmd *rxon = &ctx->staging;
761*4882a593Smuzhiyun u32 errors = 0;
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
764*4882a593Smuzhiyun if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
765*4882a593Smuzhiyun IWL_WARN(priv, "check 2.4G: wrong narrow\n");
766*4882a593Smuzhiyun errors |= BIT(0);
767*4882a593Smuzhiyun }
768*4882a593Smuzhiyun if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
769*4882a593Smuzhiyun IWL_WARN(priv, "check 2.4G: wrong radar\n");
770*4882a593Smuzhiyun errors |= BIT(1);
771*4882a593Smuzhiyun }
772*4882a593Smuzhiyun } else {
773*4882a593Smuzhiyun if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
774*4882a593Smuzhiyun IWL_WARN(priv, "check 5.2G: not short slot!\n");
775*4882a593Smuzhiyun errors |= BIT(2);
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun if (rxon->flags & RXON_FLG_CCK_MSK) {
778*4882a593Smuzhiyun IWL_WARN(priv, "check 5.2G: CCK!\n");
779*4882a593Smuzhiyun errors |= BIT(3);
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
783*4882a593Smuzhiyun IWL_WARN(priv, "mac/bssid mcast!\n");
784*4882a593Smuzhiyun errors |= BIT(4);
785*4882a593Smuzhiyun }
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun /* make sure basic rates 6Mbps and 1Mbps are supported */
788*4882a593Smuzhiyun if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
789*4882a593Smuzhiyun (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
790*4882a593Smuzhiyun IWL_WARN(priv, "neither 1 nor 6 are basic\n");
791*4882a593Smuzhiyun errors |= BIT(5);
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun if (le16_to_cpu(rxon->assoc_id) > 2007) {
795*4882a593Smuzhiyun IWL_WARN(priv, "aid > 2007\n");
796*4882a593Smuzhiyun errors |= BIT(6);
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
800*4882a593Smuzhiyun == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
801*4882a593Smuzhiyun IWL_WARN(priv, "CCK and short slot\n");
802*4882a593Smuzhiyun errors |= BIT(7);
803*4882a593Smuzhiyun }
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
806*4882a593Smuzhiyun == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
807*4882a593Smuzhiyun IWL_WARN(priv, "CCK and auto detect\n");
808*4882a593Smuzhiyun errors |= BIT(8);
809*4882a593Smuzhiyun }
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
812*4882a593Smuzhiyun RXON_FLG_TGG_PROTECT_MSK)) ==
813*4882a593Smuzhiyun RXON_FLG_TGG_PROTECT_MSK) {
814*4882a593Smuzhiyun IWL_WARN(priv, "TGg but no auto-detect\n");
815*4882a593Smuzhiyun errors |= BIT(9);
816*4882a593Smuzhiyun }
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun if (rxon->channel == 0) {
819*4882a593Smuzhiyun IWL_WARN(priv, "zero channel is invalid\n");
820*4882a593Smuzhiyun errors |= BIT(10);
821*4882a593Smuzhiyun }
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun WARN(errors, "Invalid RXON (%#x), channel %d",
824*4882a593Smuzhiyun errors, le16_to_cpu(rxon->channel));
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun return errors ? -EINVAL : 0;
827*4882a593Smuzhiyun }
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun /*
830*4882a593Smuzhiyun * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
831*4882a593Smuzhiyun * @priv: staging_rxon is compared to active_rxon
832*4882a593Smuzhiyun *
833*4882a593Smuzhiyun * If the RXON structure is changing enough to require a new tune,
834*4882a593Smuzhiyun * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
835*4882a593Smuzhiyun * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
836*4882a593Smuzhiyun */
iwl_full_rxon_required(struct iwl_priv * priv,struct iwl_rxon_context * ctx)837*4882a593Smuzhiyun static int iwl_full_rxon_required(struct iwl_priv *priv,
838*4882a593Smuzhiyun struct iwl_rxon_context *ctx)
839*4882a593Smuzhiyun {
840*4882a593Smuzhiyun const struct iwl_rxon_cmd *staging = &ctx->staging;
841*4882a593Smuzhiyun const struct iwl_rxon_cmd *active = &ctx->active;
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun #define CHK(cond) \
844*4882a593Smuzhiyun if ((cond)) { \
845*4882a593Smuzhiyun IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \
846*4882a593Smuzhiyun return 1; \
847*4882a593Smuzhiyun }
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun #define CHK_NEQ(c1, c2) \
850*4882a593Smuzhiyun if ((c1) != (c2)) { \
851*4882a593Smuzhiyun IWL_DEBUG_INFO(priv, "need full RXON - " \
852*4882a593Smuzhiyun #c1 " != " #c2 " - %d != %d\n", \
853*4882a593Smuzhiyun (c1), (c2)); \
854*4882a593Smuzhiyun return 1; \
855*4882a593Smuzhiyun }
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun /* These items are only settable from the full RXON command */
858*4882a593Smuzhiyun CHK(!iwl_is_associated_ctx(ctx));
859*4882a593Smuzhiyun CHK(!ether_addr_equal(staging->bssid_addr, active->bssid_addr));
860*4882a593Smuzhiyun CHK(!ether_addr_equal(staging->node_addr, active->node_addr));
861*4882a593Smuzhiyun CHK(!ether_addr_equal(staging->wlap_bssid_addr,
862*4882a593Smuzhiyun active->wlap_bssid_addr));
863*4882a593Smuzhiyun CHK_NEQ(staging->dev_type, active->dev_type);
864*4882a593Smuzhiyun CHK_NEQ(staging->channel, active->channel);
865*4882a593Smuzhiyun CHK_NEQ(staging->air_propagation, active->air_propagation);
866*4882a593Smuzhiyun CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
867*4882a593Smuzhiyun active->ofdm_ht_single_stream_basic_rates);
868*4882a593Smuzhiyun CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
869*4882a593Smuzhiyun active->ofdm_ht_dual_stream_basic_rates);
870*4882a593Smuzhiyun CHK_NEQ(staging->ofdm_ht_triple_stream_basic_rates,
871*4882a593Smuzhiyun active->ofdm_ht_triple_stream_basic_rates);
872*4882a593Smuzhiyun CHK_NEQ(staging->assoc_id, active->assoc_id);
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
875*4882a593Smuzhiyun * be updated with the RXON_ASSOC command -- however only some
876*4882a593Smuzhiyun * flag transitions are allowed using RXON_ASSOC */
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun /* Check if we are not switching bands */
879*4882a593Smuzhiyun CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
880*4882a593Smuzhiyun active->flags & RXON_FLG_BAND_24G_MSK);
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun /* Check if we are switching association toggle */
883*4882a593Smuzhiyun CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
884*4882a593Smuzhiyun active->filter_flags & RXON_FILTER_ASSOC_MSK);
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun #undef CHK
887*4882a593Smuzhiyun #undef CHK_NEQ
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun return 0;
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun #ifdef CONFIG_IWLWIFI_DEBUG
iwl_print_rx_config_cmd(struct iwl_priv * priv,enum iwl_rxon_context_id ctxid)893*4882a593Smuzhiyun void iwl_print_rx_config_cmd(struct iwl_priv *priv,
894*4882a593Smuzhiyun enum iwl_rxon_context_id ctxid)
895*4882a593Smuzhiyun {
896*4882a593Smuzhiyun struct iwl_rxon_context *ctx = &priv->contexts[ctxid];
897*4882a593Smuzhiyun struct iwl_rxon_cmd *rxon = &ctx->staging;
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
900*4882a593Smuzhiyun iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
901*4882a593Smuzhiyun IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n",
902*4882a593Smuzhiyun le16_to_cpu(rxon->channel));
903*4882a593Smuzhiyun IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n",
904*4882a593Smuzhiyun le32_to_cpu(rxon->flags));
905*4882a593Smuzhiyun IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
906*4882a593Smuzhiyun le32_to_cpu(rxon->filter_flags));
907*4882a593Smuzhiyun IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
908*4882a593Smuzhiyun IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
909*4882a593Smuzhiyun rxon->ofdm_basic_rates);
910*4882a593Smuzhiyun IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n",
911*4882a593Smuzhiyun rxon->cck_basic_rates);
912*4882a593Smuzhiyun IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
913*4882a593Smuzhiyun IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
914*4882a593Smuzhiyun IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n",
915*4882a593Smuzhiyun le16_to_cpu(rxon->assoc_id));
916*4882a593Smuzhiyun }
917*4882a593Smuzhiyun #endif
918*4882a593Smuzhiyun
iwl_calc_basic_rates(struct iwl_priv * priv,struct iwl_rxon_context * ctx)919*4882a593Smuzhiyun static void iwl_calc_basic_rates(struct iwl_priv *priv,
920*4882a593Smuzhiyun struct iwl_rxon_context *ctx)
921*4882a593Smuzhiyun {
922*4882a593Smuzhiyun int lowest_present_ofdm = 100;
923*4882a593Smuzhiyun int lowest_present_cck = 100;
924*4882a593Smuzhiyun u8 cck = 0;
925*4882a593Smuzhiyun u8 ofdm = 0;
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun if (ctx->vif) {
928*4882a593Smuzhiyun struct ieee80211_supported_band *sband;
929*4882a593Smuzhiyun unsigned long basic = ctx->vif->bss_conf.basic_rates;
930*4882a593Smuzhiyun int i;
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun sband = priv->hw->wiphy->bands[priv->hw->conf.chandef.chan->band];
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun for_each_set_bit(i, &basic, BITS_PER_LONG) {
935*4882a593Smuzhiyun int hw = sband->bitrates[i].hw_value;
936*4882a593Smuzhiyun if (hw >= IWL_FIRST_OFDM_RATE) {
937*4882a593Smuzhiyun ofdm |= BIT(hw - IWL_FIRST_OFDM_RATE);
938*4882a593Smuzhiyun if (lowest_present_ofdm > hw)
939*4882a593Smuzhiyun lowest_present_ofdm = hw;
940*4882a593Smuzhiyun } else {
941*4882a593Smuzhiyun BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun cck |= BIT(hw);
944*4882a593Smuzhiyun if (lowest_present_cck > hw)
945*4882a593Smuzhiyun lowest_present_cck = hw;
946*4882a593Smuzhiyun }
947*4882a593Smuzhiyun }
948*4882a593Smuzhiyun }
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun /*
951*4882a593Smuzhiyun * Now we've got the basic rates as bitmaps in the ofdm and cck
952*4882a593Smuzhiyun * variables. This isn't sufficient though, as there might not
953*4882a593Smuzhiyun * be all the right rates in the bitmap. E.g. if the only basic
954*4882a593Smuzhiyun * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
955*4882a593Smuzhiyun * and 6 Mbps because the 802.11-2007 standard says in 9.6:
956*4882a593Smuzhiyun *
957*4882a593Smuzhiyun * [...] a STA responding to a received frame shall transmit
958*4882a593Smuzhiyun * its Control Response frame [...] at the highest rate in the
959*4882a593Smuzhiyun * BSSBasicRateSet parameter that is less than or equal to the
960*4882a593Smuzhiyun * rate of the immediately previous frame in the frame exchange
961*4882a593Smuzhiyun * sequence ([...]) and that is of the same modulation class
962*4882a593Smuzhiyun * ([...]) as the received frame. If no rate contained in the
963*4882a593Smuzhiyun * BSSBasicRateSet parameter meets these conditions, then the
964*4882a593Smuzhiyun * control frame sent in response to a received frame shall be
965*4882a593Smuzhiyun * transmitted at the highest mandatory rate of the PHY that is
966*4882a593Smuzhiyun * less than or equal to the rate of the received frame, and
967*4882a593Smuzhiyun * that is of the same modulation class as the received frame.
968*4882a593Smuzhiyun *
969*4882a593Smuzhiyun * As a consequence, we need to add all mandatory rates that are
970*4882a593Smuzhiyun * lower than all of the basic rates to these bitmaps.
971*4882a593Smuzhiyun */
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun if (IWL_RATE_24M_INDEX < lowest_present_ofdm)
974*4882a593Smuzhiyun ofdm |= IWL_RATE_24M_MASK >> IWL_FIRST_OFDM_RATE;
975*4882a593Smuzhiyun if (IWL_RATE_12M_INDEX < lowest_present_ofdm)
976*4882a593Smuzhiyun ofdm |= IWL_RATE_12M_MASK >> IWL_FIRST_OFDM_RATE;
977*4882a593Smuzhiyun /* 6M already there or needed so always add */
978*4882a593Smuzhiyun ofdm |= IWL_RATE_6M_MASK >> IWL_FIRST_OFDM_RATE;
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun /*
981*4882a593Smuzhiyun * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
982*4882a593Smuzhiyun * Note, however:
983*4882a593Smuzhiyun * - if no CCK rates are basic, it must be ERP since there must
984*4882a593Smuzhiyun * be some basic rates at all, so they're OFDM => ERP PHY
985*4882a593Smuzhiyun * (or we're in 5 GHz, and the cck bitmap will never be used)
986*4882a593Smuzhiyun * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
987*4882a593Smuzhiyun * - if 5.5M is basic, 1M and 2M are mandatory
988*4882a593Smuzhiyun * - if 2M is basic, 1M is mandatory
989*4882a593Smuzhiyun * - if 1M is basic, that's the only valid ACK rate.
990*4882a593Smuzhiyun * As a consequence, it's not as complicated as it sounds, just add
991*4882a593Smuzhiyun * any lower rates to the ACK rate bitmap.
992*4882a593Smuzhiyun */
993*4882a593Smuzhiyun if (IWL_RATE_11M_INDEX < lowest_present_cck)
994*4882a593Smuzhiyun cck |= IWL_RATE_11M_MASK >> IWL_FIRST_CCK_RATE;
995*4882a593Smuzhiyun if (IWL_RATE_5M_INDEX < lowest_present_cck)
996*4882a593Smuzhiyun cck |= IWL_RATE_5M_MASK >> IWL_FIRST_CCK_RATE;
997*4882a593Smuzhiyun if (IWL_RATE_2M_INDEX < lowest_present_cck)
998*4882a593Smuzhiyun cck |= IWL_RATE_2M_MASK >> IWL_FIRST_CCK_RATE;
999*4882a593Smuzhiyun /* 1M already there or needed so always add */
1000*4882a593Smuzhiyun cck |= IWL_RATE_1M_MASK >> IWL_FIRST_CCK_RATE;
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun IWL_DEBUG_RATE(priv, "Set basic rates cck:0x%.2x ofdm:0x%.2x\n",
1003*4882a593Smuzhiyun cck, ofdm);
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun /* "basic_rates" is a misnomer here -- should be called ACK rates */
1006*4882a593Smuzhiyun ctx->staging.cck_basic_rates = cck;
1007*4882a593Smuzhiyun ctx->staging.ofdm_basic_rates = ofdm;
1008*4882a593Smuzhiyun }
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun /*
1011*4882a593Smuzhiyun * iwlagn_commit_rxon - commit staging_rxon to hardware
1012*4882a593Smuzhiyun *
1013*4882a593Smuzhiyun * The RXON command in staging_rxon is committed to the hardware and
1014*4882a593Smuzhiyun * the active_rxon structure is updated with the new data. This
1015*4882a593Smuzhiyun * function correctly transitions out of the RXON_ASSOC_MSK state if
1016*4882a593Smuzhiyun * a HW tune is required based on the RXON structure changes.
1017*4882a593Smuzhiyun *
1018*4882a593Smuzhiyun * The connect/disconnect flow should be as the following:
1019*4882a593Smuzhiyun *
1020*4882a593Smuzhiyun * 1. make sure send RXON command with association bit unset if not connect
1021*4882a593Smuzhiyun * this should include the channel and the band for the candidate
1022*4882a593Smuzhiyun * to be connected to
1023*4882a593Smuzhiyun * 2. Add Station before RXON association with the AP
1024*4882a593Smuzhiyun * 3. RXON_timing has to send before RXON for connection
1025*4882a593Smuzhiyun * 4. full RXON command - associated bit set
1026*4882a593Smuzhiyun * 5. use RXON_ASSOC command to update any flags changes
1027*4882a593Smuzhiyun */
iwlagn_commit_rxon(struct iwl_priv * priv,struct iwl_rxon_context * ctx)1028*4882a593Smuzhiyun int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1029*4882a593Smuzhiyun {
1030*4882a593Smuzhiyun /* cast away the const for active_rxon in this function */
1031*4882a593Smuzhiyun struct iwl_rxon_cmd *active = (void *)&ctx->active;
1032*4882a593Smuzhiyun bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
1033*4882a593Smuzhiyun int ret;
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun lockdep_assert_held(&priv->mutex);
1036*4882a593Smuzhiyun
1037*4882a593Smuzhiyun if (!iwl_is_alive(priv))
1038*4882a593Smuzhiyun return -EBUSY;
1039*4882a593Smuzhiyun
1040*4882a593Smuzhiyun /* This function hardcodes a bunch of dual-mode assumptions */
1041*4882a593Smuzhiyun BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun if (!ctx->is_active)
1044*4882a593Smuzhiyun return 0;
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun /* always get timestamp with Rx frame */
1047*4882a593Smuzhiyun ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun /* recalculate basic rates */
1050*4882a593Smuzhiyun iwl_calc_basic_rates(priv, ctx);
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun /*
1053*4882a593Smuzhiyun * force CTS-to-self frames protection if RTS-CTS is not preferred
1054*4882a593Smuzhiyun * one aggregation protection method
1055*4882a593Smuzhiyun */
1056*4882a593Smuzhiyun if (!priv->hw_params.use_rts_for_aggregation)
1057*4882a593Smuzhiyun ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
1060*4882a593Smuzhiyun !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
1061*4882a593Smuzhiyun ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
1062*4882a593Smuzhiyun else
1063*4882a593Smuzhiyun ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
1064*4882a593Smuzhiyun
1065*4882a593Smuzhiyun iwl_print_rx_config_cmd(priv, ctx->ctxid);
1066*4882a593Smuzhiyun ret = iwl_check_rxon_cmd(priv, ctx);
1067*4882a593Smuzhiyun if (ret) {
1068*4882a593Smuzhiyun IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
1069*4882a593Smuzhiyun return -EINVAL;
1070*4882a593Smuzhiyun }
1071*4882a593Smuzhiyun
1072*4882a593Smuzhiyun /*
1073*4882a593Smuzhiyun * receive commit_rxon request
1074*4882a593Smuzhiyun * abort any previous channel switch if still in process
1075*4882a593Smuzhiyun */
1076*4882a593Smuzhiyun if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
1077*4882a593Smuzhiyun (priv->switch_channel != ctx->staging.channel)) {
1078*4882a593Smuzhiyun IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
1079*4882a593Smuzhiyun le16_to_cpu(priv->switch_channel));
1080*4882a593Smuzhiyun iwl_chswitch_done(priv, false);
1081*4882a593Smuzhiyun }
1082*4882a593Smuzhiyun
1083*4882a593Smuzhiyun /*
1084*4882a593Smuzhiyun * If we don't need to send a full RXON, we can use
1085*4882a593Smuzhiyun * iwl_rxon_assoc_cmd which is used to reconfigure filter
1086*4882a593Smuzhiyun * and other flags for the current radio configuration.
1087*4882a593Smuzhiyun */
1088*4882a593Smuzhiyun if (!iwl_full_rxon_required(priv, ctx)) {
1089*4882a593Smuzhiyun ret = iwlagn_send_rxon_assoc(priv, ctx);
1090*4882a593Smuzhiyun if (ret) {
1091*4882a593Smuzhiyun IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
1092*4882a593Smuzhiyun return ret;
1093*4882a593Smuzhiyun }
1094*4882a593Smuzhiyun
1095*4882a593Smuzhiyun memcpy(active, &ctx->staging, sizeof(*active));
1096*4882a593Smuzhiyun /*
1097*4882a593Smuzhiyun * We do not commit tx power settings while channel changing,
1098*4882a593Smuzhiyun * do it now if after settings changed.
1099*4882a593Smuzhiyun */
1100*4882a593Smuzhiyun iwl_set_tx_power(priv, priv->tx_power_next, false);
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun /* make sure we are in the right PS state */
1103*4882a593Smuzhiyun iwl_power_update_mode(priv, true);
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun return 0;
1106*4882a593Smuzhiyun }
1107*4882a593Smuzhiyun
1108*4882a593Smuzhiyun iwl_set_rxon_hwcrypto(priv, ctx, !iwlwifi_mod_params.swcrypto);
1109*4882a593Smuzhiyun
1110*4882a593Smuzhiyun IWL_DEBUG_INFO(priv,
1111*4882a593Smuzhiyun "Going to commit RXON\n"
1112*4882a593Smuzhiyun " * with%s RXON_FILTER_ASSOC_MSK\n"
1113*4882a593Smuzhiyun " * channel = %d\n"
1114*4882a593Smuzhiyun " * bssid = %pM\n",
1115*4882a593Smuzhiyun (new_assoc ? "" : "out"),
1116*4882a593Smuzhiyun le16_to_cpu(ctx->staging.channel),
1117*4882a593Smuzhiyun ctx->staging.bssid_addr);
1118*4882a593Smuzhiyun
1119*4882a593Smuzhiyun /*
1120*4882a593Smuzhiyun * Always clear associated first, but with the correct config.
1121*4882a593Smuzhiyun * This is required as for example station addition for the
1122*4882a593Smuzhiyun * AP station must be done after the BSSID is set to correctly
1123*4882a593Smuzhiyun * set up filters in the device.
1124*4882a593Smuzhiyun */
1125*4882a593Smuzhiyun ret = iwlagn_rxon_disconn(priv, ctx);
1126*4882a593Smuzhiyun if (ret)
1127*4882a593Smuzhiyun return ret;
1128*4882a593Smuzhiyun
1129*4882a593Smuzhiyun ret = iwlagn_set_pan_params(priv);
1130*4882a593Smuzhiyun if (ret)
1131*4882a593Smuzhiyun return ret;
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun if (new_assoc)
1134*4882a593Smuzhiyun return iwlagn_rxon_connect(priv, ctx);
1135*4882a593Smuzhiyun
1136*4882a593Smuzhiyun return 0;
1137*4882a593Smuzhiyun }
1138*4882a593Smuzhiyun
iwlagn_config_ht40(struct ieee80211_conf * conf,struct iwl_rxon_context * ctx)1139*4882a593Smuzhiyun void iwlagn_config_ht40(struct ieee80211_conf *conf,
1140*4882a593Smuzhiyun struct iwl_rxon_context *ctx)
1141*4882a593Smuzhiyun {
1142*4882a593Smuzhiyun if (conf_is_ht40_minus(conf)) {
1143*4882a593Smuzhiyun ctx->ht.extension_chan_offset =
1144*4882a593Smuzhiyun IEEE80211_HT_PARAM_CHA_SEC_BELOW;
1145*4882a593Smuzhiyun ctx->ht.is_40mhz = true;
1146*4882a593Smuzhiyun } else if (conf_is_ht40_plus(conf)) {
1147*4882a593Smuzhiyun ctx->ht.extension_chan_offset =
1148*4882a593Smuzhiyun IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
1149*4882a593Smuzhiyun ctx->ht.is_40mhz = true;
1150*4882a593Smuzhiyun } else {
1151*4882a593Smuzhiyun ctx->ht.extension_chan_offset =
1152*4882a593Smuzhiyun IEEE80211_HT_PARAM_CHA_SEC_NONE;
1153*4882a593Smuzhiyun ctx->ht.is_40mhz = false;
1154*4882a593Smuzhiyun }
1155*4882a593Smuzhiyun }
1156*4882a593Smuzhiyun
iwlagn_mac_config(struct ieee80211_hw * hw,u32 changed)1157*4882a593Smuzhiyun int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
1158*4882a593Smuzhiyun {
1159*4882a593Smuzhiyun struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1160*4882a593Smuzhiyun struct iwl_rxon_context *ctx;
1161*4882a593Smuzhiyun struct ieee80211_conf *conf = &hw->conf;
1162*4882a593Smuzhiyun struct ieee80211_channel *channel = conf->chandef.chan;
1163*4882a593Smuzhiyun int ret = 0;
1164*4882a593Smuzhiyun
1165*4882a593Smuzhiyun IWL_DEBUG_MAC80211(priv, "enter: changed %#x\n", changed);
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun mutex_lock(&priv->mutex);
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
1170*4882a593Smuzhiyun IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
1171*4882a593Smuzhiyun goto out;
1172*4882a593Smuzhiyun }
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyun if (!iwl_is_ready(priv)) {
1175*4882a593Smuzhiyun IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
1176*4882a593Smuzhiyun goto out;
1177*4882a593Smuzhiyun }
1178*4882a593Smuzhiyun
1179*4882a593Smuzhiyun if (changed & (IEEE80211_CONF_CHANGE_SMPS |
1180*4882a593Smuzhiyun IEEE80211_CONF_CHANGE_CHANNEL)) {
1181*4882a593Smuzhiyun /* mac80211 uses static for non-HT which is what we want */
1182*4882a593Smuzhiyun priv->current_ht_config.smps = conf->smps_mode;
1183*4882a593Smuzhiyun
1184*4882a593Smuzhiyun /*
1185*4882a593Smuzhiyun * Recalculate chain counts.
1186*4882a593Smuzhiyun *
1187*4882a593Smuzhiyun * If monitor mode is enabled then mac80211 will
1188*4882a593Smuzhiyun * set up the SM PS mode to OFF if an HT channel is
1189*4882a593Smuzhiyun * configured.
1190*4882a593Smuzhiyun */
1191*4882a593Smuzhiyun for_each_context(priv, ctx)
1192*4882a593Smuzhiyun iwlagn_set_rxon_chain(priv, ctx);
1193*4882a593Smuzhiyun }
1194*4882a593Smuzhiyun
1195*4882a593Smuzhiyun if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
1196*4882a593Smuzhiyun for_each_context(priv, ctx) {
1197*4882a593Smuzhiyun /* Configure HT40 channels */
1198*4882a593Smuzhiyun if (ctx->ht.enabled != conf_is_ht(conf))
1199*4882a593Smuzhiyun ctx->ht.enabled = conf_is_ht(conf);
1200*4882a593Smuzhiyun
1201*4882a593Smuzhiyun if (ctx->ht.enabled) {
1202*4882a593Smuzhiyun /* if HT40 is used, it should not change
1203*4882a593Smuzhiyun * after associated except channel switch */
1204*4882a593Smuzhiyun if (!ctx->ht.is_40mhz ||
1205*4882a593Smuzhiyun !iwl_is_associated_ctx(ctx))
1206*4882a593Smuzhiyun iwlagn_config_ht40(conf, ctx);
1207*4882a593Smuzhiyun } else
1208*4882a593Smuzhiyun ctx->ht.is_40mhz = false;
1209*4882a593Smuzhiyun
1210*4882a593Smuzhiyun /*
1211*4882a593Smuzhiyun * Default to no protection. Protection mode will
1212*4882a593Smuzhiyun * later be set from BSS config in iwl_ht_conf
1213*4882a593Smuzhiyun */
1214*4882a593Smuzhiyun ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun /* if we are switching from ht to 2.4 clear flags
1217*4882a593Smuzhiyun * from any ht related info since 2.4 does not
1218*4882a593Smuzhiyun * support ht */
1219*4882a593Smuzhiyun if (le16_to_cpu(ctx->staging.channel) !=
1220*4882a593Smuzhiyun channel->hw_value)
1221*4882a593Smuzhiyun ctx->staging.flags = 0;
1222*4882a593Smuzhiyun
1223*4882a593Smuzhiyun iwl_set_rxon_channel(priv, channel, ctx);
1224*4882a593Smuzhiyun iwl_set_rxon_ht(priv, &priv->current_ht_config);
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun iwl_set_flags_for_band(priv, ctx, channel->band,
1227*4882a593Smuzhiyun ctx->vif);
1228*4882a593Smuzhiyun }
1229*4882a593Smuzhiyun
1230*4882a593Smuzhiyun iwl_update_bcast_stations(priv);
1231*4882a593Smuzhiyun }
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun if (changed & (IEEE80211_CONF_CHANGE_PS |
1234*4882a593Smuzhiyun IEEE80211_CONF_CHANGE_IDLE)) {
1235*4882a593Smuzhiyun ret = iwl_power_update_mode(priv, false);
1236*4882a593Smuzhiyun if (ret)
1237*4882a593Smuzhiyun IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
1238*4882a593Smuzhiyun }
1239*4882a593Smuzhiyun
1240*4882a593Smuzhiyun if (changed & IEEE80211_CONF_CHANGE_POWER) {
1241*4882a593Smuzhiyun IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
1242*4882a593Smuzhiyun priv->tx_power_user_lmt, conf->power_level);
1243*4882a593Smuzhiyun
1244*4882a593Smuzhiyun iwl_set_tx_power(priv, conf->power_level, false);
1245*4882a593Smuzhiyun }
1246*4882a593Smuzhiyun
1247*4882a593Smuzhiyun for_each_context(priv, ctx) {
1248*4882a593Smuzhiyun if (!memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
1249*4882a593Smuzhiyun continue;
1250*4882a593Smuzhiyun iwlagn_commit_rxon(priv, ctx);
1251*4882a593Smuzhiyun }
1252*4882a593Smuzhiyun out:
1253*4882a593Smuzhiyun mutex_unlock(&priv->mutex);
1254*4882a593Smuzhiyun IWL_DEBUG_MAC80211(priv, "leave\n");
1255*4882a593Smuzhiyun
1256*4882a593Smuzhiyun return ret;
1257*4882a593Smuzhiyun }
1258*4882a593Smuzhiyun
iwlagn_check_needed_chains(struct iwl_priv * priv,struct iwl_rxon_context * ctx,struct ieee80211_bss_conf * bss_conf)1259*4882a593Smuzhiyun static void iwlagn_check_needed_chains(struct iwl_priv *priv,
1260*4882a593Smuzhiyun struct iwl_rxon_context *ctx,
1261*4882a593Smuzhiyun struct ieee80211_bss_conf *bss_conf)
1262*4882a593Smuzhiyun {
1263*4882a593Smuzhiyun struct ieee80211_vif *vif = ctx->vif;
1264*4882a593Smuzhiyun struct iwl_rxon_context *tmp;
1265*4882a593Smuzhiyun struct ieee80211_sta *sta;
1266*4882a593Smuzhiyun struct iwl_ht_config *ht_conf = &priv->current_ht_config;
1267*4882a593Smuzhiyun struct ieee80211_sta_ht_cap *ht_cap;
1268*4882a593Smuzhiyun bool need_multiple;
1269*4882a593Smuzhiyun
1270*4882a593Smuzhiyun lockdep_assert_held(&priv->mutex);
1271*4882a593Smuzhiyun
1272*4882a593Smuzhiyun switch (vif->type) {
1273*4882a593Smuzhiyun case NL80211_IFTYPE_STATION:
1274*4882a593Smuzhiyun rcu_read_lock();
1275*4882a593Smuzhiyun sta = ieee80211_find_sta(vif, bss_conf->bssid);
1276*4882a593Smuzhiyun if (!sta) {
1277*4882a593Smuzhiyun /*
1278*4882a593Smuzhiyun * If at all, this can only happen through a race
1279*4882a593Smuzhiyun * when the AP disconnects us while we're still
1280*4882a593Smuzhiyun * setting up the connection, in that case mac80211
1281*4882a593Smuzhiyun * will soon tell us about that.
1282*4882a593Smuzhiyun */
1283*4882a593Smuzhiyun need_multiple = false;
1284*4882a593Smuzhiyun rcu_read_unlock();
1285*4882a593Smuzhiyun break;
1286*4882a593Smuzhiyun }
1287*4882a593Smuzhiyun
1288*4882a593Smuzhiyun ht_cap = &sta->ht_cap;
1289*4882a593Smuzhiyun
1290*4882a593Smuzhiyun need_multiple = true;
1291*4882a593Smuzhiyun
1292*4882a593Smuzhiyun /*
1293*4882a593Smuzhiyun * If the peer advertises no support for receiving 2 and 3
1294*4882a593Smuzhiyun * stream MCS rates, it can't be transmitting them either.
1295*4882a593Smuzhiyun */
1296*4882a593Smuzhiyun if (ht_cap->mcs.rx_mask[1] == 0 &&
1297*4882a593Smuzhiyun ht_cap->mcs.rx_mask[2] == 0) {
1298*4882a593Smuzhiyun need_multiple = false;
1299*4882a593Smuzhiyun } else if (!(ht_cap->mcs.tx_params &
1300*4882a593Smuzhiyun IEEE80211_HT_MCS_TX_DEFINED)) {
1301*4882a593Smuzhiyun /* If it can't TX MCS at all ... */
1302*4882a593Smuzhiyun need_multiple = false;
1303*4882a593Smuzhiyun } else if (ht_cap->mcs.tx_params &
1304*4882a593Smuzhiyun IEEE80211_HT_MCS_TX_RX_DIFF) {
1305*4882a593Smuzhiyun int maxstreams;
1306*4882a593Smuzhiyun
1307*4882a593Smuzhiyun /*
1308*4882a593Smuzhiyun * But if it can receive them, it might still not
1309*4882a593Smuzhiyun * be able to transmit them, which is what we need
1310*4882a593Smuzhiyun * to check here -- so check the number of streams
1311*4882a593Smuzhiyun * it advertises for TX (if different from RX).
1312*4882a593Smuzhiyun */
1313*4882a593Smuzhiyun
1314*4882a593Smuzhiyun maxstreams = (ht_cap->mcs.tx_params &
1315*4882a593Smuzhiyun IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK);
1316*4882a593Smuzhiyun maxstreams >>=
1317*4882a593Smuzhiyun IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
1318*4882a593Smuzhiyun maxstreams += 1;
1319*4882a593Smuzhiyun
1320*4882a593Smuzhiyun if (maxstreams <= 1)
1321*4882a593Smuzhiyun need_multiple = false;
1322*4882a593Smuzhiyun }
1323*4882a593Smuzhiyun
1324*4882a593Smuzhiyun rcu_read_unlock();
1325*4882a593Smuzhiyun break;
1326*4882a593Smuzhiyun case NL80211_IFTYPE_ADHOC:
1327*4882a593Smuzhiyun /* currently */
1328*4882a593Smuzhiyun need_multiple = false;
1329*4882a593Smuzhiyun break;
1330*4882a593Smuzhiyun default:
1331*4882a593Smuzhiyun /* only AP really */
1332*4882a593Smuzhiyun need_multiple = true;
1333*4882a593Smuzhiyun break;
1334*4882a593Smuzhiyun }
1335*4882a593Smuzhiyun
1336*4882a593Smuzhiyun ctx->ht_need_multiple_chains = need_multiple;
1337*4882a593Smuzhiyun
1338*4882a593Smuzhiyun if (!need_multiple) {
1339*4882a593Smuzhiyun /* check all contexts */
1340*4882a593Smuzhiyun for_each_context(priv, tmp) {
1341*4882a593Smuzhiyun if (!tmp->vif)
1342*4882a593Smuzhiyun continue;
1343*4882a593Smuzhiyun if (tmp->ht_need_multiple_chains) {
1344*4882a593Smuzhiyun need_multiple = true;
1345*4882a593Smuzhiyun break;
1346*4882a593Smuzhiyun }
1347*4882a593Smuzhiyun }
1348*4882a593Smuzhiyun }
1349*4882a593Smuzhiyun
1350*4882a593Smuzhiyun ht_conf->single_chain_sufficient = !need_multiple;
1351*4882a593Smuzhiyun }
1352*4882a593Smuzhiyun
iwlagn_chain_noise_reset(struct iwl_priv * priv)1353*4882a593Smuzhiyun static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
1354*4882a593Smuzhiyun {
1355*4882a593Smuzhiyun struct iwl_chain_noise_data *data = &priv->chain_noise_data;
1356*4882a593Smuzhiyun int ret;
1357*4882a593Smuzhiyun
1358*4882a593Smuzhiyun if (priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED)
1359*4882a593Smuzhiyun return;
1360*4882a593Smuzhiyun
1361*4882a593Smuzhiyun if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
1362*4882a593Smuzhiyun iwl_is_any_associated(priv)) {
1363*4882a593Smuzhiyun struct iwl_calib_chain_noise_reset_cmd cmd;
1364*4882a593Smuzhiyun
1365*4882a593Smuzhiyun /* clear data for chain noise calibration algorithm */
1366*4882a593Smuzhiyun data->chain_noise_a = 0;
1367*4882a593Smuzhiyun data->chain_noise_b = 0;
1368*4882a593Smuzhiyun data->chain_noise_c = 0;
1369*4882a593Smuzhiyun data->chain_signal_a = 0;
1370*4882a593Smuzhiyun data->chain_signal_b = 0;
1371*4882a593Smuzhiyun data->chain_signal_c = 0;
1372*4882a593Smuzhiyun data->beacon_count = 0;
1373*4882a593Smuzhiyun
1374*4882a593Smuzhiyun memset(&cmd, 0, sizeof(cmd));
1375*4882a593Smuzhiyun iwl_set_calib_hdr(&cmd.hdr,
1376*4882a593Smuzhiyun priv->phy_calib_chain_noise_reset_cmd);
1377*4882a593Smuzhiyun ret = iwl_dvm_send_cmd_pdu(priv,
1378*4882a593Smuzhiyun REPLY_PHY_CALIBRATION_CMD,
1379*4882a593Smuzhiyun 0, sizeof(cmd), &cmd);
1380*4882a593Smuzhiyun if (ret)
1381*4882a593Smuzhiyun IWL_ERR(priv,
1382*4882a593Smuzhiyun "Could not send REPLY_PHY_CALIBRATION_CMD\n");
1383*4882a593Smuzhiyun data->state = IWL_CHAIN_NOISE_ACCUMULATE;
1384*4882a593Smuzhiyun IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
1385*4882a593Smuzhiyun }
1386*4882a593Smuzhiyun }
1387*4882a593Smuzhiyun
iwlagn_bss_info_changed(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changes)1388*4882a593Smuzhiyun void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
1389*4882a593Smuzhiyun struct ieee80211_vif *vif,
1390*4882a593Smuzhiyun struct ieee80211_bss_conf *bss_conf,
1391*4882a593Smuzhiyun u32 changes)
1392*4882a593Smuzhiyun {
1393*4882a593Smuzhiyun struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1394*4882a593Smuzhiyun struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1395*4882a593Smuzhiyun int ret;
1396*4882a593Smuzhiyun bool force = false;
1397*4882a593Smuzhiyun
1398*4882a593Smuzhiyun mutex_lock(&priv->mutex);
1399*4882a593Smuzhiyun
1400*4882a593Smuzhiyun if (changes & BSS_CHANGED_IDLE && bss_conf->idle) {
1401*4882a593Smuzhiyun /*
1402*4882a593Smuzhiyun * If we go idle, then clearly no "passive-no-rx"
1403*4882a593Smuzhiyun * workaround is needed any more, this is a reset.
1404*4882a593Smuzhiyun */
1405*4882a593Smuzhiyun iwlagn_lift_passive_no_rx(priv);
1406*4882a593Smuzhiyun }
1407*4882a593Smuzhiyun
1408*4882a593Smuzhiyun if (unlikely(!iwl_is_ready(priv))) {
1409*4882a593Smuzhiyun IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
1410*4882a593Smuzhiyun mutex_unlock(&priv->mutex);
1411*4882a593Smuzhiyun return;
1412*4882a593Smuzhiyun }
1413*4882a593Smuzhiyun
1414*4882a593Smuzhiyun if (unlikely(!ctx->vif)) {
1415*4882a593Smuzhiyun IWL_DEBUG_MAC80211(priv, "leave - vif is NULL\n");
1416*4882a593Smuzhiyun mutex_unlock(&priv->mutex);
1417*4882a593Smuzhiyun return;
1418*4882a593Smuzhiyun }
1419*4882a593Smuzhiyun
1420*4882a593Smuzhiyun if (changes & BSS_CHANGED_BEACON_INT)
1421*4882a593Smuzhiyun force = true;
1422*4882a593Smuzhiyun
1423*4882a593Smuzhiyun if (changes & BSS_CHANGED_QOS) {
1424*4882a593Smuzhiyun ctx->qos_data.qos_active = bss_conf->qos;
1425*4882a593Smuzhiyun iwlagn_update_qos(priv, ctx);
1426*4882a593Smuzhiyun }
1427*4882a593Smuzhiyun
1428*4882a593Smuzhiyun ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
1429*4882a593Smuzhiyun if (vif->bss_conf.use_short_preamble)
1430*4882a593Smuzhiyun ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
1431*4882a593Smuzhiyun else
1432*4882a593Smuzhiyun ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
1433*4882a593Smuzhiyun
1434*4882a593Smuzhiyun if (changes & BSS_CHANGED_ASSOC) {
1435*4882a593Smuzhiyun if (bss_conf->assoc) {
1436*4882a593Smuzhiyun priv->timestamp = bss_conf->sync_tsf;
1437*4882a593Smuzhiyun ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
1438*4882a593Smuzhiyun } else {
1439*4882a593Smuzhiyun ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1440*4882a593Smuzhiyun
1441*4882a593Smuzhiyun if (ctx->ctxid == IWL_RXON_CTX_BSS)
1442*4882a593Smuzhiyun priv->have_rekey_data = false;
1443*4882a593Smuzhiyun }
1444*4882a593Smuzhiyun
1445*4882a593Smuzhiyun iwlagn_bt_coex_rssi_monitor(priv);
1446*4882a593Smuzhiyun }
1447*4882a593Smuzhiyun
1448*4882a593Smuzhiyun if (ctx->ht.enabled) {
1449*4882a593Smuzhiyun ctx->ht.protection = bss_conf->ht_operation_mode &
1450*4882a593Smuzhiyun IEEE80211_HT_OP_MODE_PROTECTION;
1451*4882a593Smuzhiyun ctx->ht.non_gf_sta_present = !!(bss_conf->ht_operation_mode &
1452*4882a593Smuzhiyun IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
1453*4882a593Smuzhiyun iwlagn_check_needed_chains(priv, ctx, bss_conf);
1454*4882a593Smuzhiyun iwl_set_rxon_ht(priv, &priv->current_ht_config);
1455*4882a593Smuzhiyun }
1456*4882a593Smuzhiyun
1457*4882a593Smuzhiyun iwlagn_set_rxon_chain(priv, ctx);
1458*4882a593Smuzhiyun
1459*4882a593Smuzhiyun if (bss_conf->use_cts_prot && (priv->band != NL80211_BAND_5GHZ))
1460*4882a593Smuzhiyun ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
1461*4882a593Smuzhiyun else
1462*4882a593Smuzhiyun ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
1463*4882a593Smuzhiyun
1464*4882a593Smuzhiyun if (bss_conf->use_cts_prot)
1465*4882a593Smuzhiyun ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
1466*4882a593Smuzhiyun else
1467*4882a593Smuzhiyun ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
1468*4882a593Smuzhiyun
1469*4882a593Smuzhiyun memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
1470*4882a593Smuzhiyun
1471*4882a593Smuzhiyun if (vif->type == NL80211_IFTYPE_AP ||
1472*4882a593Smuzhiyun vif->type == NL80211_IFTYPE_ADHOC) {
1473*4882a593Smuzhiyun if (vif->bss_conf.enable_beacon) {
1474*4882a593Smuzhiyun ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
1475*4882a593Smuzhiyun priv->beacon_ctx = ctx;
1476*4882a593Smuzhiyun } else {
1477*4882a593Smuzhiyun ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1478*4882a593Smuzhiyun priv->beacon_ctx = NULL;
1479*4882a593Smuzhiyun }
1480*4882a593Smuzhiyun }
1481*4882a593Smuzhiyun
1482*4882a593Smuzhiyun /*
1483*4882a593Smuzhiyun * If the ucode decides to do beacon filtering before
1484*4882a593Smuzhiyun * association, it will lose beacons that are needed
1485*4882a593Smuzhiyun * before sending frames out on passive channels. This
1486*4882a593Smuzhiyun * causes association failures on those channels. Enable
1487*4882a593Smuzhiyun * receiving beacons in such cases.
1488*4882a593Smuzhiyun */
1489*4882a593Smuzhiyun
1490*4882a593Smuzhiyun if (vif->type == NL80211_IFTYPE_STATION) {
1491*4882a593Smuzhiyun if (!bss_conf->assoc)
1492*4882a593Smuzhiyun ctx->staging.filter_flags |= RXON_FILTER_BCON_AWARE_MSK;
1493*4882a593Smuzhiyun else
1494*4882a593Smuzhiyun ctx->staging.filter_flags &=
1495*4882a593Smuzhiyun ~RXON_FILTER_BCON_AWARE_MSK;
1496*4882a593Smuzhiyun }
1497*4882a593Smuzhiyun
1498*4882a593Smuzhiyun if (force || memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
1499*4882a593Smuzhiyun iwlagn_commit_rxon(priv, ctx);
1500*4882a593Smuzhiyun
1501*4882a593Smuzhiyun if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) {
1502*4882a593Smuzhiyun /*
1503*4882a593Smuzhiyun * The chain noise calibration will enable PM upon
1504*4882a593Smuzhiyun * completion. If calibration has already been run
1505*4882a593Smuzhiyun * then we need to enable power management here.
1506*4882a593Smuzhiyun */
1507*4882a593Smuzhiyun if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
1508*4882a593Smuzhiyun iwl_power_update_mode(priv, false);
1509*4882a593Smuzhiyun
1510*4882a593Smuzhiyun /* Enable RX differential gain and sensitivity calibrations */
1511*4882a593Smuzhiyun iwlagn_chain_noise_reset(priv);
1512*4882a593Smuzhiyun priv->start_calib = 1;
1513*4882a593Smuzhiyun }
1514*4882a593Smuzhiyun
1515*4882a593Smuzhiyun if (changes & BSS_CHANGED_IBSS) {
1516*4882a593Smuzhiyun ret = iwlagn_manage_ibss_station(priv, vif,
1517*4882a593Smuzhiyun bss_conf->ibss_joined);
1518*4882a593Smuzhiyun if (ret)
1519*4882a593Smuzhiyun IWL_ERR(priv, "failed to %s IBSS station %pM\n",
1520*4882a593Smuzhiyun bss_conf->ibss_joined ? "add" : "remove",
1521*4882a593Smuzhiyun bss_conf->bssid);
1522*4882a593Smuzhiyun }
1523*4882a593Smuzhiyun
1524*4882a593Smuzhiyun if (changes & BSS_CHANGED_BEACON && priv->beacon_ctx == ctx) {
1525*4882a593Smuzhiyun if (iwlagn_update_beacon(priv, vif))
1526*4882a593Smuzhiyun IWL_ERR(priv, "Error updating beacon\n");
1527*4882a593Smuzhiyun }
1528*4882a593Smuzhiyun
1529*4882a593Smuzhiyun mutex_unlock(&priv->mutex);
1530*4882a593Smuzhiyun }
1531*4882a593Smuzhiyun
iwlagn_post_scan(struct iwl_priv * priv)1532*4882a593Smuzhiyun void iwlagn_post_scan(struct iwl_priv *priv)
1533*4882a593Smuzhiyun {
1534*4882a593Smuzhiyun struct iwl_rxon_context *ctx;
1535*4882a593Smuzhiyun
1536*4882a593Smuzhiyun /*
1537*4882a593Smuzhiyun * We do not commit power settings while scan is pending,
1538*4882a593Smuzhiyun * do it now if the settings changed.
1539*4882a593Smuzhiyun */
1540*4882a593Smuzhiyun iwl_power_set_mode(priv, &priv->power_data.sleep_cmd_next, false);
1541*4882a593Smuzhiyun iwl_set_tx_power(priv, priv->tx_power_next, false);
1542*4882a593Smuzhiyun
1543*4882a593Smuzhiyun /*
1544*4882a593Smuzhiyun * Since setting the RXON may have been deferred while
1545*4882a593Smuzhiyun * performing the scan, fire one off if needed
1546*4882a593Smuzhiyun */
1547*4882a593Smuzhiyun for_each_context(priv, ctx)
1548*4882a593Smuzhiyun if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
1549*4882a593Smuzhiyun iwlagn_commit_rxon(priv, ctx);
1550*4882a593Smuzhiyun
1551*4882a593Smuzhiyun iwlagn_set_pan_params(priv);
1552*4882a593Smuzhiyun }
1553