1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /******************************************************************************
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Contact Information:
7*4882a593Smuzhiyun * Intel Linux Wireless <linuxwifi@intel.com>
8*4882a593Smuzhiyun * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun *****************************************************************************/
11*4882a593Smuzhiyun #include <linux/etherdevice.h>
12*4882a593Smuzhiyun #include <linux/kernel.h>
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/sched.h>
15*4882a593Smuzhiyun #include <net/mac80211.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include "iwl-io.h"
18*4882a593Smuzhiyun #include "iwl-agn-hw.h"
19*4882a593Smuzhiyun #include "iwl-trans.h"
20*4882a593Smuzhiyun #include "iwl-modparams.h"
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #include "dev.h"
23*4882a593Smuzhiyun #include "agn.h"
24*4882a593Smuzhiyun
iwlagn_hw_valid_rtc_data_addr(u32 addr)25*4882a593Smuzhiyun int iwlagn_hw_valid_rtc_data_addr(u32 addr)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) &&
28*4882a593Smuzhiyun (addr < IWLAGN_RTC_DATA_UPPER_BOUND);
29*4882a593Smuzhiyun }
30*4882a593Smuzhiyun
iwlagn_send_tx_power(struct iwl_priv * priv)31*4882a593Smuzhiyun int iwlagn_send_tx_power(struct iwl_priv *priv)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun struct iwlagn_tx_power_dbm_cmd tx_power_cmd;
34*4882a593Smuzhiyun u8 tx_ant_cfg_cmd;
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
37*4882a593Smuzhiyun "TX Power requested while scanning!\n"))
38*4882a593Smuzhiyun return -EAGAIN;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /* half dBm need to multiply */
41*4882a593Smuzhiyun tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun if (tx_power_cmd.global_lmt > priv->nvm_data->max_tx_pwr_half_dbm) {
44*4882a593Smuzhiyun /*
45*4882a593Smuzhiyun * For the newer devices which using enhanced/extend tx power
46*4882a593Smuzhiyun * table in EEPROM, the format is in half dBm. driver need to
47*4882a593Smuzhiyun * convert to dBm format before report to mac80211.
48*4882a593Smuzhiyun * By doing so, there is a possibility of 1/2 dBm resolution
49*4882a593Smuzhiyun * lost. driver will perform "round-up" operation before
50*4882a593Smuzhiyun * reporting, but it will cause 1/2 dBm tx power over the
51*4882a593Smuzhiyun * regulatory limit. Perform the checking here, if the
52*4882a593Smuzhiyun * "tx_power_user_lmt" is higher than EEPROM value (in
53*4882a593Smuzhiyun * half-dBm format), lower the tx power based on EEPROM
54*4882a593Smuzhiyun */
55*4882a593Smuzhiyun tx_power_cmd.global_lmt =
56*4882a593Smuzhiyun priv->nvm_data->max_tx_pwr_half_dbm;
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun tx_power_cmd.flags = IWLAGN_TX_POWER_NO_CLOSED;
59*4882a593Smuzhiyun tx_power_cmd.srv_chan_lmt = IWLAGN_TX_POWER_AUTO;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun if (IWL_UCODE_API(priv->fw->ucode_ver) == 1)
62*4882a593Smuzhiyun tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1;
63*4882a593Smuzhiyun else
64*4882a593Smuzhiyun tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun return iwl_dvm_send_cmd_pdu(priv, tx_ant_cfg_cmd, 0,
67*4882a593Smuzhiyun sizeof(tx_power_cmd), &tx_power_cmd);
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
iwlagn_temperature(struct iwl_priv * priv)70*4882a593Smuzhiyun void iwlagn_temperature(struct iwl_priv *priv)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun lockdep_assert_held(&priv->statistics.lock);
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /* store temperature from correct statistics (in Celsius) */
75*4882a593Smuzhiyun priv->temperature = le32_to_cpu(priv->statistics.common.temperature);
76*4882a593Smuzhiyun iwl_tt_handler(priv);
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags,enum nl80211_band band)79*4882a593Smuzhiyun int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun int idx = 0;
82*4882a593Smuzhiyun int band_offset = 0;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun /* HT rate format: mac80211 wants an MCS number, which is just LSB */
85*4882a593Smuzhiyun if (rate_n_flags & RATE_MCS_HT_MSK) {
86*4882a593Smuzhiyun idx = (rate_n_flags & 0xff);
87*4882a593Smuzhiyun return idx;
88*4882a593Smuzhiyun /* Legacy rate format, search for match in table */
89*4882a593Smuzhiyun } else {
90*4882a593Smuzhiyun if (band == NL80211_BAND_5GHZ)
91*4882a593Smuzhiyun band_offset = IWL_FIRST_OFDM_RATE;
92*4882a593Smuzhiyun for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
93*4882a593Smuzhiyun if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
94*4882a593Smuzhiyun return idx - band_offset;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun return -1;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
iwlagn_manage_ibss_station(struct iwl_priv * priv,struct ieee80211_vif * vif,bool add)100*4882a593Smuzhiyun int iwlagn_manage_ibss_station(struct iwl_priv *priv,
101*4882a593Smuzhiyun struct ieee80211_vif *vif, bool add)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun if (add)
106*4882a593Smuzhiyun return iwlagn_add_bssid_station(priv, vif_priv->ctx,
107*4882a593Smuzhiyun vif->bss_conf.bssid,
108*4882a593Smuzhiyun &vif_priv->ibss_bssid_sta_id);
109*4882a593Smuzhiyun return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id,
110*4882a593Smuzhiyun vif->bss_conf.bssid);
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun /*
114*4882a593Smuzhiyun * iwlagn_txfifo_flush: send REPLY_TXFIFO_FLUSH command to uCode
115*4882a593Smuzhiyun *
116*4882a593Smuzhiyun * pre-requirements:
117*4882a593Smuzhiyun * 1. acquire mutex before calling
118*4882a593Smuzhiyun * 2. make sure rf is on and not in exit state
119*4882a593Smuzhiyun */
iwlagn_txfifo_flush(struct iwl_priv * priv,u32 scd_q_msk)120*4882a593Smuzhiyun int iwlagn_txfifo_flush(struct iwl_priv *priv, u32 scd_q_msk)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun struct iwl_txfifo_flush_cmd_v3 flush_cmd_v3 = {
123*4882a593Smuzhiyun .flush_control = cpu_to_le16(IWL_DROP_ALL),
124*4882a593Smuzhiyun };
125*4882a593Smuzhiyun struct iwl_txfifo_flush_cmd_v2 flush_cmd_v2 = {
126*4882a593Smuzhiyun .flush_control = cpu_to_le16(IWL_DROP_ALL),
127*4882a593Smuzhiyun };
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun u32 queue_control = IWL_SCD_VO_MSK | IWL_SCD_VI_MSK |
130*4882a593Smuzhiyun IWL_SCD_BE_MSK | IWL_SCD_BK_MSK | IWL_SCD_MGMT_MSK;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun if ((priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)))
133*4882a593Smuzhiyun queue_control |= IWL_PAN_SCD_VO_MSK | IWL_PAN_SCD_VI_MSK |
134*4882a593Smuzhiyun IWL_PAN_SCD_BE_MSK | IWL_PAN_SCD_BK_MSK |
135*4882a593Smuzhiyun IWL_PAN_SCD_MGMT_MSK |
136*4882a593Smuzhiyun IWL_PAN_SCD_MULTICAST_MSK;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun if (priv->nvm_data->sku_cap_11n_enable)
139*4882a593Smuzhiyun queue_control |= IWL_AGG_TX_QUEUE_MSK;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun if (scd_q_msk)
142*4882a593Smuzhiyun queue_control = scd_q_msk;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun IWL_DEBUG_INFO(priv, "queue control: 0x%x\n", queue_control);
145*4882a593Smuzhiyun flush_cmd_v3.queue_control = cpu_to_le32(queue_control);
146*4882a593Smuzhiyun flush_cmd_v2.queue_control = cpu_to_le16((u16)queue_control);
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun if (IWL_UCODE_API(priv->fw->ucode_ver) > 2)
149*4882a593Smuzhiyun return iwl_dvm_send_cmd_pdu(priv, REPLY_TXFIFO_FLUSH, 0,
150*4882a593Smuzhiyun sizeof(flush_cmd_v3),
151*4882a593Smuzhiyun &flush_cmd_v3);
152*4882a593Smuzhiyun return iwl_dvm_send_cmd_pdu(priv, REPLY_TXFIFO_FLUSH, 0,
153*4882a593Smuzhiyun sizeof(flush_cmd_v2), &flush_cmd_v2);
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
iwlagn_dev_txfifo_flush(struct iwl_priv * priv)156*4882a593Smuzhiyun void iwlagn_dev_txfifo_flush(struct iwl_priv *priv)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun mutex_lock(&priv->mutex);
159*4882a593Smuzhiyun ieee80211_stop_queues(priv->hw);
160*4882a593Smuzhiyun if (iwlagn_txfifo_flush(priv, 0)) {
161*4882a593Smuzhiyun IWL_ERR(priv, "flush request fail\n");
162*4882a593Smuzhiyun goto done;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n");
165*4882a593Smuzhiyun iwl_trans_wait_tx_queues_empty(priv->trans, 0xffffffff);
166*4882a593Smuzhiyun done:
167*4882a593Smuzhiyun ieee80211_wake_queues(priv->hw);
168*4882a593Smuzhiyun mutex_unlock(&priv->mutex);
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun /*
172*4882a593Smuzhiyun * BT coex
173*4882a593Smuzhiyun */
174*4882a593Smuzhiyun /* Notmal TDM */
175*4882a593Smuzhiyun static const __le32 iwlagn_def_3w_lookup[IWLAGN_BT_DECISION_LUT_SIZE] = {
176*4882a593Smuzhiyun cpu_to_le32(0xaaaaaaaa),
177*4882a593Smuzhiyun cpu_to_le32(0xaaaaaaaa),
178*4882a593Smuzhiyun cpu_to_le32(0xaeaaaaaa),
179*4882a593Smuzhiyun cpu_to_le32(0xaaaaaaaa),
180*4882a593Smuzhiyun cpu_to_le32(0xcc00ff28),
181*4882a593Smuzhiyun cpu_to_le32(0x0000aaaa),
182*4882a593Smuzhiyun cpu_to_le32(0xcc00aaaa),
183*4882a593Smuzhiyun cpu_to_le32(0x0000aaaa),
184*4882a593Smuzhiyun cpu_to_le32(0xc0004000),
185*4882a593Smuzhiyun cpu_to_le32(0x00004000),
186*4882a593Smuzhiyun cpu_to_le32(0xf0005000),
187*4882a593Smuzhiyun cpu_to_le32(0xf0005000),
188*4882a593Smuzhiyun };
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun /* Full concurrency */
191*4882a593Smuzhiyun static const __le32 iwlagn_concurrent_lookup[IWLAGN_BT_DECISION_LUT_SIZE] = {
192*4882a593Smuzhiyun cpu_to_le32(0xaaaaaaaa),
193*4882a593Smuzhiyun cpu_to_le32(0xaaaaaaaa),
194*4882a593Smuzhiyun cpu_to_le32(0xaaaaaaaa),
195*4882a593Smuzhiyun cpu_to_le32(0xaaaaaaaa),
196*4882a593Smuzhiyun cpu_to_le32(0xaaaaaaaa),
197*4882a593Smuzhiyun cpu_to_le32(0xaaaaaaaa),
198*4882a593Smuzhiyun cpu_to_le32(0xaaaaaaaa),
199*4882a593Smuzhiyun cpu_to_le32(0xaaaaaaaa),
200*4882a593Smuzhiyun cpu_to_le32(0x00000000),
201*4882a593Smuzhiyun cpu_to_le32(0x00000000),
202*4882a593Smuzhiyun cpu_to_le32(0x00000000),
203*4882a593Smuzhiyun cpu_to_le32(0x00000000),
204*4882a593Smuzhiyun };
205*4882a593Smuzhiyun
iwlagn_send_advance_bt_config(struct iwl_priv * priv)206*4882a593Smuzhiyun void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun struct iwl_basic_bt_cmd basic = {
209*4882a593Smuzhiyun .max_kill = IWLAGN_BT_MAX_KILL_DEFAULT,
210*4882a593Smuzhiyun .bt3_timer_t7_value = IWLAGN_BT3_T7_DEFAULT,
211*4882a593Smuzhiyun .bt3_prio_sample_time = IWLAGN_BT3_PRIO_SAMPLE_DEFAULT,
212*4882a593Smuzhiyun .bt3_timer_t2_value = IWLAGN_BT3_T2_DEFAULT,
213*4882a593Smuzhiyun };
214*4882a593Smuzhiyun struct iwl_bt_cmd_v1 bt_cmd_v1;
215*4882a593Smuzhiyun struct iwl_bt_cmd_v2 bt_cmd_v2;
216*4882a593Smuzhiyun int ret;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) !=
219*4882a593Smuzhiyun sizeof(basic.bt3_lookup_table));
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun if (priv->lib->bt_params) {
222*4882a593Smuzhiyun /*
223*4882a593Smuzhiyun * newer generation of devices (2000 series and newer)
224*4882a593Smuzhiyun * use the version 2 of the bt command
225*4882a593Smuzhiyun * we need to make sure sending the host command
226*4882a593Smuzhiyun * with correct data structure to avoid uCode assert
227*4882a593Smuzhiyun */
228*4882a593Smuzhiyun if (priv->lib->bt_params->bt_session_2) {
229*4882a593Smuzhiyun bt_cmd_v2.prio_boost = cpu_to_le32(
230*4882a593Smuzhiyun priv->lib->bt_params->bt_prio_boost);
231*4882a593Smuzhiyun bt_cmd_v2.tx_prio_boost = 0;
232*4882a593Smuzhiyun bt_cmd_v2.rx_prio_boost = 0;
233*4882a593Smuzhiyun } else {
234*4882a593Smuzhiyun /* older version only has 8 bits */
235*4882a593Smuzhiyun WARN_ON(priv->lib->bt_params->bt_prio_boost & ~0xFF);
236*4882a593Smuzhiyun bt_cmd_v1.prio_boost =
237*4882a593Smuzhiyun priv->lib->bt_params->bt_prio_boost;
238*4882a593Smuzhiyun bt_cmd_v1.tx_prio_boost = 0;
239*4882a593Smuzhiyun bt_cmd_v1.rx_prio_boost = 0;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun } else {
242*4882a593Smuzhiyun IWL_ERR(priv, "failed to construct BT Coex Config\n");
243*4882a593Smuzhiyun return;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun /*
247*4882a593Smuzhiyun * Possible situations when BT needs to take over for receive,
248*4882a593Smuzhiyun * at the same time where STA needs to response to AP's frame(s),
249*4882a593Smuzhiyun * reduce the tx power of the required response frames, by that,
250*4882a593Smuzhiyun * allow the concurrent BT receive & WiFi transmit
251*4882a593Smuzhiyun * (BT - ANT A, WiFi -ANT B), without interference to one another
252*4882a593Smuzhiyun *
253*4882a593Smuzhiyun * Reduced tx power apply to control frames only (ACK/Back/CTS)
254*4882a593Smuzhiyun * when indicated by the BT config command
255*4882a593Smuzhiyun */
256*4882a593Smuzhiyun basic.kill_ack_mask = priv->kill_ack_mask;
257*4882a593Smuzhiyun basic.kill_cts_mask = priv->kill_cts_mask;
258*4882a593Smuzhiyun if (priv->reduced_txpower)
259*4882a593Smuzhiyun basic.reduce_txpower = IWLAGN_BT_REDUCED_TX_PWR;
260*4882a593Smuzhiyun basic.valid = priv->bt_valid;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun /*
263*4882a593Smuzhiyun * Configure BT coex mode to "no coexistence" when the
264*4882a593Smuzhiyun * user disabled BT coexistence, we have no interface
265*4882a593Smuzhiyun * (might be in monitor mode), or the interface is in
266*4882a593Smuzhiyun * IBSS mode (no proper uCode support for coex then).
267*4882a593Smuzhiyun */
268*4882a593Smuzhiyun if (!iwlwifi_mod_params.bt_coex_active ||
269*4882a593Smuzhiyun priv->iw_mode == NL80211_IFTYPE_ADHOC) {
270*4882a593Smuzhiyun basic.flags = IWLAGN_BT_FLAG_COEX_MODE_DISABLED;
271*4882a593Smuzhiyun } else {
272*4882a593Smuzhiyun basic.flags = IWLAGN_BT_FLAG_COEX_MODE_3W <<
273*4882a593Smuzhiyun IWLAGN_BT_FLAG_COEX_MODE_SHIFT;
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun if (!priv->bt_enable_pspoll)
276*4882a593Smuzhiyun basic.flags |= IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE;
277*4882a593Smuzhiyun else
278*4882a593Smuzhiyun basic.flags &= ~IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun if (priv->bt_ch_announce)
281*4882a593Smuzhiyun basic.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION;
282*4882a593Smuzhiyun IWL_DEBUG_COEX(priv, "BT coex flag: 0X%x\n", basic.flags);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun priv->bt_enable_flag = basic.flags;
285*4882a593Smuzhiyun if (priv->bt_full_concurrent)
286*4882a593Smuzhiyun memcpy(basic.bt3_lookup_table, iwlagn_concurrent_lookup,
287*4882a593Smuzhiyun sizeof(iwlagn_concurrent_lookup));
288*4882a593Smuzhiyun else
289*4882a593Smuzhiyun memcpy(basic.bt3_lookup_table, iwlagn_def_3w_lookup,
290*4882a593Smuzhiyun sizeof(iwlagn_def_3w_lookup));
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun IWL_DEBUG_COEX(priv, "BT coex %s in %s mode\n",
293*4882a593Smuzhiyun basic.flags ? "active" : "disabled",
294*4882a593Smuzhiyun priv->bt_full_concurrent ?
295*4882a593Smuzhiyun "full concurrency" : "3-wire");
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun if (priv->lib->bt_params->bt_session_2) {
298*4882a593Smuzhiyun memcpy(&bt_cmd_v2.basic, &basic,
299*4882a593Smuzhiyun sizeof(basic));
300*4882a593Smuzhiyun ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
301*4882a593Smuzhiyun 0, sizeof(bt_cmd_v2), &bt_cmd_v2);
302*4882a593Smuzhiyun } else {
303*4882a593Smuzhiyun memcpy(&bt_cmd_v1.basic, &basic,
304*4882a593Smuzhiyun sizeof(basic));
305*4882a593Smuzhiyun ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
306*4882a593Smuzhiyun 0, sizeof(bt_cmd_v1), &bt_cmd_v1);
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun if (ret)
309*4882a593Smuzhiyun IWL_ERR(priv, "failed to send BT Coex Config\n");
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun
iwlagn_bt_adjust_rssi_monitor(struct iwl_priv * priv,bool rssi_ena)313*4882a593Smuzhiyun void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun struct iwl_rxon_context *ctx, *found_ctx = NULL;
316*4882a593Smuzhiyun bool found_ap = false;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun lockdep_assert_held(&priv->mutex);
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun /* Check whether AP or GO mode is active. */
321*4882a593Smuzhiyun if (rssi_ena) {
322*4882a593Smuzhiyun for_each_context(priv, ctx) {
323*4882a593Smuzhiyun if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_AP &&
324*4882a593Smuzhiyun iwl_is_associated_ctx(ctx)) {
325*4882a593Smuzhiyun found_ap = true;
326*4882a593Smuzhiyun break;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun /*
332*4882a593Smuzhiyun * If disable was received or If GO/AP mode, disable RSSI
333*4882a593Smuzhiyun * measurements.
334*4882a593Smuzhiyun */
335*4882a593Smuzhiyun if (!rssi_ena || found_ap) {
336*4882a593Smuzhiyun if (priv->cur_rssi_ctx) {
337*4882a593Smuzhiyun ctx = priv->cur_rssi_ctx;
338*4882a593Smuzhiyun ieee80211_disable_rssi_reports(ctx->vif);
339*4882a593Smuzhiyun priv->cur_rssi_ctx = NULL;
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun return;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun /*
345*4882a593Smuzhiyun * If rssi measurements need to be enabled, consider all cases now.
346*4882a593Smuzhiyun * Figure out how many contexts are active.
347*4882a593Smuzhiyun */
348*4882a593Smuzhiyun for_each_context(priv, ctx) {
349*4882a593Smuzhiyun if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION &&
350*4882a593Smuzhiyun iwl_is_associated_ctx(ctx)) {
351*4882a593Smuzhiyun found_ctx = ctx;
352*4882a593Smuzhiyun break;
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun /*
357*4882a593Smuzhiyun * rssi monitor already enabled for the correct interface...nothing
358*4882a593Smuzhiyun * to do.
359*4882a593Smuzhiyun */
360*4882a593Smuzhiyun if (found_ctx == priv->cur_rssi_ctx)
361*4882a593Smuzhiyun return;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun /*
364*4882a593Smuzhiyun * Figure out if rssi monitor is currently enabled, and needs
365*4882a593Smuzhiyun * to be changed. If rssi monitor is already enabled, disable
366*4882a593Smuzhiyun * it first else just enable rssi measurements on the
367*4882a593Smuzhiyun * interface found above.
368*4882a593Smuzhiyun */
369*4882a593Smuzhiyun if (priv->cur_rssi_ctx) {
370*4882a593Smuzhiyun ctx = priv->cur_rssi_ctx;
371*4882a593Smuzhiyun if (ctx->vif)
372*4882a593Smuzhiyun ieee80211_disable_rssi_reports(ctx->vif);
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun priv->cur_rssi_ctx = found_ctx;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun if (!found_ctx)
378*4882a593Smuzhiyun return;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun ieee80211_enable_rssi_reports(found_ctx->vif,
381*4882a593Smuzhiyun IWLAGN_BT_PSP_MIN_RSSI_THRESHOLD,
382*4882a593Smuzhiyun IWLAGN_BT_PSP_MAX_RSSI_THRESHOLD);
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun
iwlagn_bt_traffic_is_sco(struct iwl_bt_uart_msg * uart_msg)385*4882a593Smuzhiyun static bool iwlagn_bt_traffic_is_sco(struct iwl_bt_uart_msg *uart_msg)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun return (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) >>
388*4882a593Smuzhiyun BT_UART_MSG_FRAME3SCOESCO_POS;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
iwlagn_bt_traffic_change_work(struct work_struct * work)391*4882a593Smuzhiyun static void iwlagn_bt_traffic_change_work(struct work_struct *work)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun struct iwl_priv *priv =
394*4882a593Smuzhiyun container_of(work, struct iwl_priv, bt_traffic_change_work);
395*4882a593Smuzhiyun struct iwl_rxon_context *ctx;
396*4882a593Smuzhiyun int smps_request = -1;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
399*4882a593Smuzhiyun /* bt coex disabled */
400*4882a593Smuzhiyun return;
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun /*
404*4882a593Smuzhiyun * Note: bt_traffic_load can be overridden by scan complete and
405*4882a593Smuzhiyun * coex profile notifications. Ignore that since only bad consequence
406*4882a593Smuzhiyun * can be not matching debug print with actual state.
407*4882a593Smuzhiyun */
408*4882a593Smuzhiyun IWL_DEBUG_COEX(priv, "BT traffic load changes: %d\n",
409*4882a593Smuzhiyun priv->bt_traffic_load);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun switch (priv->bt_traffic_load) {
412*4882a593Smuzhiyun case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
413*4882a593Smuzhiyun if (priv->bt_status)
414*4882a593Smuzhiyun smps_request = IEEE80211_SMPS_DYNAMIC;
415*4882a593Smuzhiyun else
416*4882a593Smuzhiyun smps_request = IEEE80211_SMPS_AUTOMATIC;
417*4882a593Smuzhiyun break;
418*4882a593Smuzhiyun case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
419*4882a593Smuzhiyun smps_request = IEEE80211_SMPS_DYNAMIC;
420*4882a593Smuzhiyun break;
421*4882a593Smuzhiyun case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
422*4882a593Smuzhiyun case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
423*4882a593Smuzhiyun smps_request = IEEE80211_SMPS_STATIC;
424*4882a593Smuzhiyun break;
425*4882a593Smuzhiyun default:
426*4882a593Smuzhiyun IWL_ERR(priv, "Invalid BT traffic load: %d\n",
427*4882a593Smuzhiyun priv->bt_traffic_load);
428*4882a593Smuzhiyun break;
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun mutex_lock(&priv->mutex);
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun /*
434*4882a593Smuzhiyun * We can not send command to firmware while scanning. When the scan
435*4882a593Smuzhiyun * complete we will schedule this work again. We do check with mutex
436*4882a593Smuzhiyun * locked to prevent new scan request to arrive. We do not check
437*4882a593Smuzhiyun * STATUS_SCANNING to avoid race when queue_work two times from
438*4882a593Smuzhiyun * different notifications, but quit and not perform any work at all.
439*4882a593Smuzhiyun */
440*4882a593Smuzhiyun if (test_bit(STATUS_SCAN_HW, &priv->status))
441*4882a593Smuzhiyun goto out;
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun iwl_update_chain_flags(priv);
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun if (smps_request != -1) {
446*4882a593Smuzhiyun priv->current_ht_config.smps = smps_request;
447*4882a593Smuzhiyun for_each_context(priv, ctx) {
448*4882a593Smuzhiyun if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION)
449*4882a593Smuzhiyun ieee80211_request_smps(ctx->vif, smps_request);
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun /*
454*4882a593Smuzhiyun * Dynamic PS poll related functionality. Adjust RSSI measurements if
455*4882a593Smuzhiyun * necessary.
456*4882a593Smuzhiyun */
457*4882a593Smuzhiyun iwlagn_bt_coex_rssi_monitor(priv);
458*4882a593Smuzhiyun out:
459*4882a593Smuzhiyun mutex_unlock(&priv->mutex);
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun /*
463*4882a593Smuzhiyun * If BT sco traffic, and RSSI monitor is enabled, move measurements to the
464*4882a593Smuzhiyun * correct interface or disable it if this is the last interface to be
465*4882a593Smuzhiyun * removed.
466*4882a593Smuzhiyun */
iwlagn_bt_coex_rssi_monitor(struct iwl_priv * priv)467*4882a593Smuzhiyun void iwlagn_bt_coex_rssi_monitor(struct iwl_priv *priv)
468*4882a593Smuzhiyun {
469*4882a593Smuzhiyun if (priv->bt_is_sco &&
470*4882a593Smuzhiyun priv->bt_traffic_load == IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS)
471*4882a593Smuzhiyun iwlagn_bt_adjust_rssi_monitor(priv, true);
472*4882a593Smuzhiyun else
473*4882a593Smuzhiyun iwlagn_bt_adjust_rssi_monitor(priv, false);
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun
iwlagn_print_uartmsg(struct iwl_priv * priv,struct iwl_bt_uart_msg * uart_msg)476*4882a593Smuzhiyun static void iwlagn_print_uartmsg(struct iwl_priv *priv,
477*4882a593Smuzhiyun struct iwl_bt_uart_msg *uart_msg)
478*4882a593Smuzhiyun {
479*4882a593Smuzhiyun IWL_DEBUG_COEX(priv, "Message Type = 0x%X, SSN = 0x%X, "
480*4882a593Smuzhiyun "Update Req = 0x%X\n",
481*4882a593Smuzhiyun (BT_UART_MSG_FRAME1MSGTYPE_MSK & uart_msg->frame1) >>
482*4882a593Smuzhiyun BT_UART_MSG_FRAME1MSGTYPE_POS,
483*4882a593Smuzhiyun (BT_UART_MSG_FRAME1SSN_MSK & uart_msg->frame1) >>
484*4882a593Smuzhiyun BT_UART_MSG_FRAME1SSN_POS,
485*4882a593Smuzhiyun (BT_UART_MSG_FRAME1UPDATEREQ_MSK & uart_msg->frame1) >>
486*4882a593Smuzhiyun BT_UART_MSG_FRAME1UPDATEREQ_POS);
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun IWL_DEBUG_COEX(priv, "Open connections = 0x%X, Traffic load = 0x%X, "
489*4882a593Smuzhiyun "Chl_SeqN = 0x%X, In band = 0x%X\n",
490*4882a593Smuzhiyun (BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK & uart_msg->frame2) >>
491*4882a593Smuzhiyun BT_UART_MSG_FRAME2OPENCONNECTIONS_POS,
492*4882a593Smuzhiyun (BT_UART_MSG_FRAME2TRAFFICLOAD_MSK & uart_msg->frame2) >>
493*4882a593Smuzhiyun BT_UART_MSG_FRAME2TRAFFICLOAD_POS,
494*4882a593Smuzhiyun (BT_UART_MSG_FRAME2CHLSEQN_MSK & uart_msg->frame2) >>
495*4882a593Smuzhiyun BT_UART_MSG_FRAME2CHLSEQN_POS,
496*4882a593Smuzhiyun (BT_UART_MSG_FRAME2INBAND_MSK & uart_msg->frame2) >>
497*4882a593Smuzhiyun BT_UART_MSG_FRAME2INBAND_POS);
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun IWL_DEBUG_COEX(priv, "SCO/eSCO = 0x%X, Sniff = 0x%X, A2DP = 0x%X, "
500*4882a593Smuzhiyun "ACL = 0x%X, Master = 0x%X, OBEX = 0x%X\n",
501*4882a593Smuzhiyun (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) >>
502*4882a593Smuzhiyun BT_UART_MSG_FRAME3SCOESCO_POS,
503*4882a593Smuzhiyun (BT_UART_MSG_FRAME3SNIFF_MSK & uart_msg->frame3) >>
504*4882a593Smuzhiyun BT_UART_MSG_FRAME3SNIFF_POS,
505*4882a593Smuzhiyun (BT_UART_MSG_FRAME3A2DP_MSK & uart_msg->frame3) >>
506*4882a593Smuzhiyun BT_UART_MSG_FRAME3A2DP_POS,
507*4882a593Smuzhiyun (BT_UART_MSG_FRAME3ACL_MSK & uart_msg->frame3) >>
508*4882a593Smuzhiyun BT_UART_MSG_FRAME3ACL_POS,
509*4882a593Smuzhiyun (BT_UART_MSG_FRAME3MASTER_MSK & uart_msg->frame3) >>
510*4882a593Smuzhiyun BT_UART_MSG_FRAME3MASTER_POS,
511*4882a593Smuzhiyun (BT_UART_MSG_FRAME3OBEX_MSK & uart_msg->frame3) >>
512*4882a593Smuzhiyun BT_UART_MSG_FRAME3OBEX_POS);
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun IWL_DEBUG_COEX(priv, "Idle duration = 0x%X\n",
515*4882a593Smuzhiyun (BT_UART_MSG_FRAME4IDLEDURATION_MSK & uart_msg->frame4) >>
516*4882a593Smuzhiyun BT_UART_MSG_FRAME4IDLEDURATION_POS);
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun IWL_DEBUG_COEX(priv, "Tx Activity = 0x%X, Rx Activity = 0x%X, "
519*4882a593Smuzhiyun "eSCO Retransmissions = 0x%X\n",
520*4882a593Smuzhiyun (BT_UART_MSG_FRAME5TXACTIVITY_MSK & uart_msg->frame5) >>
521*4882a593Smuzhiyun BT_UART_MSG_FRAME5TXACTIVITY_POS,
522*4882a593Smuzhiyun (BT_UART_MSG_FRAME5RXACTIVITY_MSK & uart_msg->frame5) >>
523*4882a593Smuzhiyun BT_UART_MSG_FRAME5RXACTIVITY_POS,
524*4882a593Smuzhiyun (BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK & uart_msg->frame5) >>
525*4882a593Smuzhiyun BT_UART_MSG_FRAME5ESCORETRANSMIT_POS);
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun IWL_DEBUG_COEX(priv, "Sniff Interval = 0x%X, Discoverable = 0x%X\n",
528*4882a593Smuzhiyun (BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK & uart_msg->frame6) >>
529*4882a593Smuzhiyun BT_UART_MSG_FRAME6SNIFFINTERVAL_POS,
530*4882a593Smuzhiyun (BT_UART_MSG_FRAME6DISCOVERABLE_MSK & uart_msg->frame6) >>
531*4882a593Smuzhiyun BT_UART_MSG_FRAME6DISCOVERABLE_POS);
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun IWL_DEBUG_COEX(priv, "Sniff Activity = 0x%X, Page = "
534*4882a593Smuzhiyun "0x%X, Inquiry = 0x%X, Connectable = 0x%X\n",
535*4882a593Smuzhiyun (BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK & uart_msg->frame7) >>
536*4882a593Smuzhiyun BT_UART_MSG_FRAME7SNIFFACTIVITY_POS,
537*4882a593Smuzhiyun (BT_UART_MSG_FRAME7PAGE_MSK & uart_msg->frame7) >>
538*4882a593Smuzhiyun BT_UART_MSG_FRAME7PAGE_POS,
539*4882a593Smuzhiyun (BT_UART_MSG_FRAME7INQUIRY_MSK & uart_msg->frame7) >>
540*4882a593Smuzhiyun BT_UART_MSG_FRAME7INQUIRY_POS,
541*4882a593Smuzhiyun (BT_UART_MSG_FRAME7CONNECTABLE_MSK & uart_msg->frame7) >>
542*4882a593Smuzhiyun BT_UART_MSG_FRAME7CONNECTABLE_POS);
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun
iwlagn_set_kill_msk(struct iwl_priv * priv,struct iwl_bt_uart_msg * uart_msg)545*4882a593Smuzhiyun static bool iwlagn_set_kill_msk(struct iwl_priv *priv,
546*4882a593Smuzhiyun struct iwl_bt_uart_msg *uart_msg)
547*4882a593Smuzhiyun {
548*4882a593Smuzhiyun bool need_update = false;
549*4882a593Smuzhiyun u8 kill_msk = IWL_BT_KILL_REDUCE;
550*4882a593Smuzhiyun static const __le32 bt_kill_ack_msg[3] = {
551*4882a593Smuzhiyun IWLAGN_BT_KILL_ACK_MASK_DEFAULT,
552*4882a593Smuzhiyun IWLAGN_BT_KILL_ACK_CTS_MASK_SCO,
553*4882a593Smuzhiyun IWLAGN_BT_KILL_ACK_CTS_MASK_REDUCE};
554*4882a593Smuzhiyun static const __le32 bt_kill_cts_msg[3] = {
555*4882a593Smuzhiyun IWLAGN_BT_KILL_CTS_MASK_DEFAULT,
556*4882a593Smuzhiyun IWLAGN_BT_KILL_ACK_CTS_MASK_SCO,
557*4882a593Smuzhiyun IWLAGN_BT_KILL_ACK_CTS_MASK_REDUCE};
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun if (!priv->reduced_txpower)
560*4882a593Smuzhiyun kill_msk = (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3)
561*4882a593Smuzhiyun ? IWL_BT_KILL_OVERRIDE : IWL_BT_KILL_DEFAULT;
562*4882a593Smuzhiyun if (priv->kill_ack_mask != bt_kill_ack_msg[kill_msk] ||
563*4882a593Smuzhiyun priv->kill_cts_mask != bt_kill_cts_msg[kill_msk]) {
564*4882a593Smuzhiyun priv->bt_valid |= IWLAGN_BT_VALID_KILL_ACK_MASK;
565*4882a593Smuzhiyun priv->kill_ack_mask = bt_kill_ack_msg[kill_msk];
566*4882a593Smuzhiyun priv->bt_valid |= IWLAGN_BT_VALID_KILL_CTS_MASK;
567*4882a593Smuzhiyun priv->kill_cts_mask = bt_kill_cts_msg[kill_msk];
568*4882a593Smuzhiyun need_update = true;
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun return need_update;
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun /*
574*4882a593Smuzhiyun * Upon RSSI changes, sends a bt config command with following changes
575*4882a593Smuzhiyun * 1. enable/disable "reduced control frames tx power
576*4882a593Smuzhiyun * 2. update the "kill)ack_mask" and "kill_cts_mask"
577*4882a593Smuzhiyun *
578*4882a593Smuzhiyun * If "reduced tx power" is enabled, uCode shall
579*4882a593Smuzhiyun * 1. ACK/Back/CTS rate shall reduced to 6Mbps
580*4882a593Smuzhiyun * 2. not use duplciate 20/40MHz mode
581*4882a593Smuzhiyun */
iwlagn_fill_txpower_mode(struct iwl_priv * priv,struct iwl_bt_uart_msg * uart_msg)582*4882a593Smuzhiyun static bool iwlagn_fill_txpower_mode(struct iwl_priv *priv,
583*4882a593Smuzhiyun struct iwl_bt_uart_msg *uart_msg)
584*4882a593Smuzhiyun {
585*4882a593Smuzhiyun bool need_update = false;
586*4882a593Smuzhiyun struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
587*4882a593Smuzhiyun int ave_rssi;
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun if (!ctx->vif || (ctx->vif->type != NL80211_IFTYPE_STATION)) {
590*4882a593Smuzhiyun IWL_DEBUG_INFO(priv, "BSS ctx not active or not in sta mode\n");
591*4882a593Smuzhiyun return false;
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun ave_rssi = ieee80211_ave_rssi(ctx->vif);
595*4882a593Smuzhiyun if (!ave_rssi) {
596*4882a593Smuzhiyun /* no rssi data, no changes to reduce tx power */
597*4882a593Smuzhiyun IWL_DEBUG_COEX(priv, "no rssi data available\n");
598*4882a593Smuzhiyun return need_update;
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun if (!priv->reduced_txpower &&
601*4882a593Smuzhiyun !iwl_is_associated(priv, IWL_RXON_CTX_PAN) &&
602*4882a593Smuzhiyun (ave_rssi > BT_ENABLE_REDUCED_TXPOWER_THRESHOLD) &&
603*4882a593Smuzhiyun (uart_msg->frame3 & (BT_UART_MSG_FRAME3ACL_MSK |
604*4882a593Smuzhiyun BT_UART_MSG_FRAME3OBEX_MSK)) &&
605*4882a593Smuzhiyun !(uart_msg->frame3 & (BT_UART_MSG_FRAME3SCOESCO_MSK |
606*4882a593Smuzhiyun BT_UART_MSG_FRAME3SNIFF_MSK | BT_UART_MSG_FRAME3A2DP_MSK))) {
607*4882a593Smuzhiyun /* enabling reduced tx power */
608*4882a593Smuzhiyun priv->reduced_txpower = true;
609*4882a593Smuzhiyun priv->bt_valid |= IWLAGN_BT_VALID_REDUCED_TX_PWR;
610*4882a593Smuzhiyun need_update = true;
611*4882a593Smuzhiyun } else if (priv->reduced_txpower &&
612*4882a593Smuzhiyun (iwl_is_associated(priv, IWL_RXON_CTX_PAN) ||
613*4882a593Smuzhiyun (ave_rssi < BT_DISABLE_REDUCED_TXPOWER_THRESHOLD) ||
614*4882a593Smuzhiyun (uart_msg->frame3 & (BT_UART_MSG_FRAME3SCOESCO_MSK |
615*4882a593Smuzhiyun BT_UART_MSG_FRAME3SNIFF_MSK | BT_UART_MSG_FRAME3A2DP_MSK)) ||
616*4882a593Smuzhiyun !(uart_msg->frame3 & (BT_UART_MSG_FRAME3ACL_MSK |
617*4882a593Smuzhiyun BT_UART_MSG_FRAME3OBEX_MSK)))) {
618*4882a593Smuzhiyun /* disable reduced tx power */
619*4882a593Smuzhiyun priv->reduced_txpower = false;
620*4882a593Smuzhiyun priv->bt_valid |= IWLAGN_BT_VALID_REDUCED_TX_PWR;
621*4882a593Smuzhiyun need_update = true;
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun return need_update;
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun
iwlagn_bt_coex_profile_notif(struct iwl_priv * priv,struct iwl_rx_cmd_buffer * rxb)627*4882a593Smuzhiyun static void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
628*4882a593Smuzhiyun struct iwl_rx_cmd_buffer *rxb)
629*4882a593Smuzhiyun {
630*4882a593Smuzhiyun struct iwl_rx_packet *pkt = rxb_addr(rxb);
631*4882a593Smuzhiyun struct iwl_bt_coex_profile_notif *coex = (void *)pkt->data;
632*4882a593Smuzhiyun struct iwl_bt_uart_msg *uart_msg = &coex->last_bt_uart_msg;
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
635*4882a593Smuzhiyun /* bt coex disabled */
636*4882a593Smuzhiyun return;
637*4882a593Smuzhiyun }
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun IWL_DEBUG_COEX(priv, "BT Coex notification:\n");
640*4882a593Smuzhiyun IWL_DEBUG_COEX(priv, " status: %d\n", coex->bt_status);
641*4882a593Smuzhiyun IWL_DEBUG_COEX(priv, " traffic load: %d\n", coex->bt_traffic_load);
642*4882a593Smuzhiyun IWL_DEBUG_COEX(priv, " CI compliance: %d\n",
643*4882a593Smuzhiyun coex->bt_ci_compliance);
644*4882a593Smuzhiyun iwlagn_print_uartmsg(priv, uart_msg);
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun priv->last_bt_traffic_load = priv->bt_traffic_load;
647*4882a593Smuzhiyun priv->bt_is_sco = iwlagn_bt_traffic_is_sco(uart_msg);
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
650*4882a593Smuzhiyun if (priv->bt_status != coex->bt_status ||
651*4882a593Smuzhiyun priv->last_bt_traffic_load != coex->bt_traffic_load) {
652*4882a593Smuzhiyun if (coex->bt_status) {
653*4882a593Smuzhiyun /* BT on */
654*4882a593Smuzhiyun if (!priv->bt_ch_announce)
655*4882a593Smuzhiyun priv->bt_traffic_load =
656*4882a593Smuzhiyun IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
657*4882a593Smuzhiyun else
658*4882a593Smuzhiyun priv->bt_traffic_load =
659*4882a593Smuzhiyun coex->bt_traffic_load;
660*4882a593Smuzhiyun } else {
661*4882a593Smuzhiyun /* BT off */
662*4882a593Smuzhiyun priv->bt_traffic_load =
663*4882a593Smuzhiyun IWL_BT_COEX_TRAFFIC_LOAD_NONE;
664*4882a593Smuzhiyun }
665*4882a593Smuzhiyun priv->bt_status = coex->bt_status;
666*4882a593Smuzhiyun queue_work(priv->workqueue,
667*4882a593Smuzhiyun &priv->bt_traffic_change_work);
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun /* schedule to send runtime bt_config */
672*4882a593Smuzhiyun /* check reduce power before change ack/cts kill mask */
673*4882a593Smuzhiyun if (iwlagn_fill_txpower_mode(priv, uart_msg) ||
674*4882a593Smuzhiyun iwlagn_set_kill_msk(priv, uart_msg))
675*4882a593Smuzhiyun queue_work(priv->workqueue, &priv->bt_runtime_config);
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun /* FIXME: based on notification, adjust the prio_boost */
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun priv->bt_ci_compliance = coex->bt_ci_compliance;
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun
iwlagn_bt_rx_handler_setup(struct iwl_priv * priv)683*4882a593Smuzhiyun void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv)
684*4882a593Smuzhiyun {
685*4882a593Smuzhiyun priv->rx_handlers[REPLY_BT_COEX_PROFILE_NOTIF] =
686*4882a593Smuzhiyun iwlagn_bt_coex_profile_notif;
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun
iwlagn_bt_setup_deferred_work(struct iwl_priv * priv)689*4882a593Smuzhiyun void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv)
690*4882a593Smuzhiyun {
691*4882a593Smuzhiyun INIT_WORK(&priv->bt_traffic_change_work,
692*4882a593Smuzhiyun iwlagn_bt_traffic_change_work);
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun
iwlagn_bt_cancel_deferred_work(struct iwl_priv * priv)695*4882a593Smuzhiyun void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv)
696*4882a593Smuzhiyun {
697*4882a593Smuzhiyun cancel_work_sync(&priv->bt_traffic_change_work);
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun
is_single_rx_stream(struct iwl_priv * priv)700*4882a593Smuzhiyun static bool is_single_rx_stream(struct iwl_priv *priv)
701*4882a593Smuzhiyun {
702*4882a593Smuzhiyun return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
703*4882a593Smuzhiyun priv->current_ht_config.single_chain_sufficient;
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun #define IWL_NUM_RX_CHAINS_MULTIPLE 3
707*4882a593Smuzhiyun #define IWL_NUM_RX_CHAINS_SINGLE 2
708*4882a593Smuzhiyun #define IWL_NUM_IDLE_CHAINS_DUAL 2
709*4882a593Smuzhiyun #define IWL_NUM_IDLE_CHAINS_SINGLE 1
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun /*
712*4882a593Smuzhiyun * Determine how many receiver/antenna chains to use.
713*4882a593Smuzhiyun *
714*4882a593Smuzhiyun * More provides better reception via diversity. Fewer saves power
715*4882a593Smuzhiyun * at the expense of throughput, but only when not in powersave to
716*4882a593Smuzhiyun * start with.
717*4882a593Smuzhiyun *
718*4882a593Smuzhiyun * MIMO (dual stream) requires at least 2, but works better with 3.
719*4882a593Smuzhiyun * This does not determine *which* chains to use, just how many.
720*4882a593Smuzhiyun */
iwl_get_active_rx_chain_count(struct iwl_priv * priv)721*4882a593Smuzhiyun static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
722*4882a593Smuzhiyun {
723*4882a593Smuzhiyun if (priv->lib->bt_params &&
724*4882a593Smuzhiyun priv->lib->bt_params->advanced_bt_coexist &&
725*4882a593Smuzhiyun (priv->bt_full_concurrent ||
726*4882a593Smuzhiyun priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
727*4882a593Smuzhiyun /*
728*4882a593Smuzhiyun * only use chain 'A' in bt high traffic load or
729*4882a593Smuzhiyun * full concurrency mode
730*4882a593Smuzhiyun */
731*4882a593Smuzhiyun return IWL_NUM_RX_CHAINS_SINGLE;
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun /* # of Rx chains to use when expecting MIMO. */
734*4882a593Smuzhiyun if (is_single_rx_stream(priv))
735*4882a593Smuzhiyun return IWL_NUM_RX_CHAINS_SINGLE;
736*4882a593Smuzhiyun else
737*4882a593Smuzhiyun return IWL_NUM_RX_CHAINS_MULTIPLE;
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun /*
741*4882a593Smuzhiyun * When we are in power saving mode, unless device support spatial
742*4882a593Smuzhiyun * multiplexing power save, use the active count for rx chain count.
743*4882a593Smuzhiyun */
iwl_get_idle_rx_chain_count(struct iwl_priv * priv,int active_cnt)744*4882a593Smuzhiyun static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
745*4882a593Smuzhiyun {
746*4882a593Smuzhiyun /* # Rx chains when idling, depending on SMPS mode */
747*4882a593Smuzhiyun switch (priv->current_ht_config.smps) {
748*4882a593Smuzhiyun case IEEE80211_SMPS_STATIC:
749*4882a593Smuzhiyun case IEEE80211_SMPS_DYNAMIC:
750*4882a593Smuzhiyun return IWL_NUM_IDLE_CHAINS_SINGLE;
751*4882a593Smuzhiyun case IEEE80211_SMPS_AUTOMATIC:
752*4882a593Smuzhiyun case IEEE80211_SMPS_OFF:
753*4882a593Smuzhiyun return active_cnt;
754*4882a593Smuzhiyun default:
755*4882a593Smuzhiyun WARN(1, "invalid SMPS mode %d",
756*4882a593Smuzhiyun priv->current_ht_config.smps);
757*4882a593Smuzhiyun return active_cnt;
758*4882a593Smuzhiyun }
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun /* up to 4 chains */
iwl_count_chain_bitmap(u32 chain_bitmap)762*4882a593Smuzhiyun static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
763*4882a593Smuzhiyun {
764*4882a593Smuzhiyun u8 res;
765*4882a593Smuzhiyun res = (chain_bitmap & BIT(0)) >> 0;
766*4882a593Smuzhiyun res += (chain_bitmap & BIT(1)) >> 1;
767*4882a593Smuzhiyun res += (chain_bitmap & BIT(2)) >> 2;
768*4882a593Smuzhiyun res += (chain_bitmap & BIT(3)) >> 3;
769*4882a593Smuzhiyun return res;
770*4882a593Smuzhiyun }
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun /*
773*4882a593Smuzhiyun * iwlagn_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
774*4882a593Smuzhiyun *
775*4882a593Smuzhiyun * Selects how many and which Rx receivers/antennas/chains to use.
776*4882a593Smuzhiyun * This should not be used for scan command ... it puts data in wrong place.
777*4882a593Smuzhiyun */
iwlagn_set_rxon_chain(struct iwl_priv * priv,struct iwl_rxon_context * ctx)778*4882a593Smuzhiyun void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
779*4882a593Smuzhiyun {
780*4882a593Smuzhiyun bool is_single = is_single_rx_stream(priv);
781*4882a593Smuzhiyun bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
782*4882a593Smuzhiyun u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
783*4882a593Smuzhiyun u32 active_chains;
784*4882a593Smuzhiyun u16 rx_chain;
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun /* Tell uCode which antennas are actually connected.
787*4882a593Smuzhiyun * Before first association, we assume all antennas are connected.
788*4882a593Smuzhiyun * Just after first association, iwl_chain_noise_calibration()
789*4882a593Smuzhiyun * checks which antennas actually *are* connected. */
790*4882a593Smuzhiyun if (priv->chain_noise_data.active_chains)
791*4882a593Smuzhiyun active_chains = priv->chain_noise_data.active_chains;
792*4882a593Smuzhiyun else
793*4882a593Smuzhiyun active_chains = priv->nvm_data->valid_rx_ant;
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun if (priv->lib->bt_params &&
796*4882a593Smuzhiyun priv->lib->bt_params->advanced_bt_coexist &&
797*4882a593Smuzhiyun (priv->bt_full_concurrent ||
798*4882a593Smuzhiyun priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
799*4882a593Smuzhiyun /*
800*4882a593Smuzhiyun * only use chain 'A' in bt high traffic load or
801*4882a593Smuzhiyun * full concurrency mode
802*4882a593Smuzhiyun */
803*4882a593Smuzhiyun active_chains = first_antenna(active_chains);
804*4882a593Smuzhiyun }
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun /* How many receivers should we use? */
809*4882a593Smuzhiyun active_rx_cnt = iwl_get_active_rx_chain_count(priv);
810*4882a593Smuzhiyun idle_rx_cnt = iwl_get_idle_rx_chain_count(priv, active_rx_cnt);
811*4882a593Smuzhiyun
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun /* correct rx chain count according hw settings
814*4882a593Smuzhiyun * and chain noise calibration
815*4882a593Smuzhiyun */
816*4882a593Smuzhiyun valid_rx_cnt = iwl_count_chain_bitmap(active_chains);
817*4882a593Smuzhiyun if (valid_rx_cnt < active_rx_cnt)
818*4882a593Smuzhiyun active_rx_cnt = valid_rx_cnt;
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun if (valid_rx_cnt < idle_rx_cnt)
821*4882a593Smuzhiyun idle_rx_cnt = valid_rx_cnt;
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
824*4882a593Smuzhiyun rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun ctx->staging.rx_chain = cpu_to_le16(rx_chain);
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
829*4882a593Smuzhiyun ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
830*4882a593Smuzhiyun else
831*4882a593Smuzhiyun ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
834*4882a593Smuzhiyun ctx->staging.rx_chain,
835*4882a593Smuzhiyun active_rx_cnt, idle_rx_cnt);
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
838*4882a593Smuzhiyun active_rx_cnt < idle_rx_cnt);
839*4882a593Smuzhiyun }
840*4882a593Smuzhiyun
iwl_toggle_tx_ant(struct iwl_priv * priv,u8 ant,u8 valid)841*4882a593Smuzhiyun u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
842*4882a593Smuzhiyun {
843*4882a593Smuzhiyun int i;
844*4882a593Smuzhiyun u8 ind = ant;
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun if (priv->band == NL80211_BAND_2GHZ &&
847*4882a593Smuzhiyun priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
848*4882a593Smuzhiyun return 0;
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun for (i = 0; i < RATE_ANT_NUM - 1; i++) {
851*4882a593Smuzhiyun ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
852*4882a593Smuzhiyun if (valid & BIT(ind))
853*4882a593Smuzhiyun return ind;
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun return ant;
856*4882a593Smuzhiyun }
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
iwlagn_convert_p1k(u16 * p1k,__le16 * out)859*4882a593Smuzhiyun static void iwlagn_convert_p1k(u16 *p1k, __le16 *out)
860*4882a593Smuzhiyun {
861*4882a593Smuzhiyun int i;
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun for (i = 0; i < IWLAGN_P1K_SIZE; i++)
864*4882a593Smuzhiyun out[i] = cpu_to_le16(p1k[i]);
865*4882a593Smuzhiyun }
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun struct wowlan_key_data {
868*4882a593Smuzhiyun struct iwl_rxon_context *ctx;
869*4882a593Smuzhiyun struct iwlagn_wowlan_rsc_tsc_params_cmd *rsc_tsc;
870*4882a593Smuzhiyun struct iwlagn_wowlan_tkip_params_cmd *tkip;
871*4882a593Smuzhiyun const u8 *bssid;
872*4882a593Smuzhiyun bool error, use_rsc_tsc, use_tkip;
873*4882a593Smuzhiyun };
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun
iwlagn_wowlan_program_keys(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key,void * _data)876*4882a593Smuzhiyun static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
877*4882a593Smuzhiyun struct ieee80211_vif *vif,
878*4882a593Smuzhiyun struct ieee80211_sta *sta,
879*4882a593Smuzhiyun struct ieee80211_key_conf *key,
880*4882a593Smuzhiyun void *_data)
881*4882a593Smuzhiyun {
882*4882a593Smuzhiyun struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
883*4882a593Smuzhiyun struct wowlan_key_data *data = _data;
884*4882a593Smuzhiyun struct iwl_rxon_context *ctx = data->ctx;
885*4882a593Smuzhiyun struct aes_sc *aes_sc, *aes_tx_sc = NULL;
886*4882a593Smuzhiyun struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
887*4882a593Smuzhiyun struct iwlagn_p1k_cache *rx_p1ks;
888*4882a593Smuzhiyun u8 *rx_mic_key;
889*4882a593Smuzhiyun struct ieee80211_key_seq seq;
890*4882a593Smuzhiyun u32 cur_rx_iv32 = 0;
891*4882a593Smuzhiyun u16 p1k[IWLAGN_P1K_SIZE];
892*4882a593Smuzhiyun int ret, i;
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun mutex_lock(&priv->mutex);
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
897*4882a593Smuzhiyun key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
898*4882a593Smuzhiyun !sta && !ctx->key_mapping_keys)
899*4882a593Smuzhiyun ret = iwl_set_default_wep_key(priv, ctx, key);
900*4882a593Smuzhiyun else
901*4882a593Smuzhiyun ret = iwl_set_dynamic_key(priv, ctx, key, sta);
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun if (ret) {
904*4882a593Smuzhiyun IWL_ERR(priv, "Error setting key during suspend!\n");
905*4882a593Smuzhiyun data->error = true;
906*4882a593Smuzhiyun }
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun switch (key->cipher) {
909*4882a593Smuzhiyun case WLAN_CIPHER_SUITE_TKIP:
910*4882a593Smuzhiyun if (sta) {
911*4882a593Smuzhiyun u64 pn64;
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
914*4882a593Smuzhiyun tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun rx_p1ks = data->tkip->rx_uni;
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun pn64 = atomic64_read(&key->tx_pn);
919*4882a593Smuzhiyun tkip_tx_sc->iv16 = cpu_to_le16(TKIP_PN_TO_IV16(pn64));
920*4882a593Smuzhiyun tkip_tx_sc->iv32 = cpu_to_le32(TKIP_PN_TO_IV32(pn64));
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k);
923*4882a593Smuzhiyun iwlagn_convert_p1k(p1k, data->tkip->tx.p1k);
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun memcpy(data->tkip->mic_keys.tx,
926*4882a593Smuzhiyun &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
927*4882a593Smuzhiyun IWLAGN_MIC_KEY_SIZE);
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun rx_mic_key = data->tkip->mic_keys.rx_unicast;
930*4882a593Smuzhiyun } else {
931*4882a593Smuzhiyun tkip_sc =
932*4882a593Smuzhiyun data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc;
933*4882a593Smuzhiyun rx_p1ks = data->tkip->rx_multi;
934*4882a593Smuzhiyun rx_mic_key = data->tkip->mic_keys.rx_mcast;
935*4882a593Smuzhiyun }
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun /*
938*4882a593Smuzhiyun * For non-QoS this relies on the fact that both the uCode and
939*4882a593Smuzhiyun * mac80211 use TID 0 (as they need to to avoid replay attacks)
940*4882a593Smuzhiyun * for checking the IV in the frames.
941*4882a593Smuzhiyun */
942*4882a593Smuzhiyun for (i = 0; i < IWLAGN_NUM_RSC; i++) {
943*4882a593Smuzhiyun ieee80211_get_key_rx_seq(key, i, &seq);
944*4882a593Smuzhiyun tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16);
945*4882a593Smuzhiyun tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32);
946*4882a593Smuzhiyun /* wrapping isn't allowed, AP must rekey */
947*4882a593Smuzhiyun if (seq.tkip.iv32 > cur_rx_iv32)
948*4882a593Smuzhiyun cur_rx_iv32 = seq.tkip.iv32;
949*4882a593Smuzhiyun }
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun ieee80211_get_tkip_rx_p1k(key, data->bssid, cur_rx_iv32, p1k);
952*4882a593Smuzhiyun iwlagn_convert_p1k(p1k, rx_p1ks[0].p1k);
953*4882a593Smuzhiyun ieee80211_get_tkip_rx_p1k(key, data->bssid,
954*4882a593Smuzhiyun cur_rx_iv32 + 1, p1k);
955*4882a593Smuzhiyun iwlagn_convert_p1k(p1k, rx_p1ks[1].p1k);
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun memcpy(rx_mic_key,
958*4882a593Smuzhiyun &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
959*4882a593Smuzhiyun IWLAGN_MIC_KEY_SIZE);
960*4882a593Smuzhiyun
961*4882a593Smuzhiyun data->use_tkip = true;
962*4882a593Smuzhiyun data->use_rsc_tsc = true;
963*4882a593Smuzhiyun break;
964*4882a593Smuzhiyun case WLAN_CIPHER_SUITE_CCMP:
965*4882a593Smuzhiyun if (sta) {
966*4882a593Smuzhiyun u64 pn64;
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
969*4882a593Smuzhiyun aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun pn64 = atomic64_read(&key->tx_pn);
972*4882a593Smuzhiyun aes_tx_sc->pn = cpu_to_le64(pn64);
973*4882a593Smuzhiyun } else
974*4882a593Smuzhiyun aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun /*
977*4882a593Smuzhiyun * For non-QoS this relies on the fact that both the uCode and
978*4882a593Smuzhiyun * mac80211 use TID 0 for checking the IV in the frames.
979*4882a593Smuzhiyun */
980*4882a593Smuzhiyun for (i = 0; i < IWLAGN_NUM_RSC; i++) {
981*4882a593Smuzhiyun u8 *pn = seq.ccmp.pn;
982*4882a593Smuzhiyun
983*4882a593Smuzhiyun ieee80211_get_key_rx_seq(key, i, &seq);
984*4882a593Smuzhiyun aes_sc[i].pn = cpu_to_le64(
985*4882a593Smuzhiyun (u64)pn[5] |
986*4882a593Smuzhiyun ((u64)pn[4] << 8) |
987*4882a593Smuzhiyun ((u64)pn[3] << 16) |
988*4882a593Smuzhiyun ((u64)pn[2] << 24) |
989*4882a593Smuzhiyun ((u64)pn[1] << 32) |
990*4882a593Smuzhiyun ((u64)pn[0] << 40));
991*4882a593Smuzhiyun }
992*4882a593Smuzhiyun data->use_rsc_tsc = true;
993*4882a593Smuzhiyun break;
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun mutex_unlock(&priv->mutex);
997*4882a593Smuzhiyun }
998*4882a593Smuzhiyun
iwlagn_send_patterns(struct iwl_priv * priv,struct cfg80211_wowlan * wowlan)999*4882a593Smuzhiyun int iwlagn_send_patterns(struct iwl_priv *priv,
1000*4882a593Smuzhiyun struct cfg80211_wowlan *wowlan)
1001*4882a593Smuzhiyun {
1002*4882a593Smuzhiyun struct iwlagn_wowlan_patterns_cmd *pattern_cmd;
1003*4882a593Smuzhiyun struct iwl_host_cmd cmd = {
1004*4882a593Smuzhiyun .id = REPLY_WOWLAN_PATTERNS,
1005*4882a593Smuzhiyun .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
1006*4882a593Smuzhiyun };
1007*4882a593Smuzhiyun int i, err;
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun if (!wowlan->n_patterns)
1010*4882a593Smuzhiyun return 0;
1011*4882a593Smuzhiyun
1012*4882a593Smuzhiyun cmd.len[0] = struct_size(pattern_cmd, patterns, wowlan->n_patterns);
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
1015*4882a593Smuzhiyun if (!pattern_cmd)
1016*4882a593Smuzhiyun return -ENOMEM;
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun for (i = 0; i < wowlan->n_patterns; i++) {
1021*4882a593Smuzhiyun int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
1022*4882a593Smuzhiyun
1023*4882a593Smuzhiyun memcpy(&pattern_cmd->patterns[i].mask,
1024*4882a593Smuzhiyun wowlan->patterns[i].mask, mask_len);
1025*4882a593Smuzhiyun memcpy(&pattern_cmd->patterns[i].pattern,
1026*4882a593Smuzhiyun wowlan->patterns[i].pattern,
1027*4882a593Smuzhiyun wowlan->patterns[i].pattern_len);
1028*4882a593Smuzhiyun pattern_cmd->patterns[i].mask_size = mask_len;
1029*4882a593Smuzhiyun pattern_cmd->patterns[i].pattern_size =
1030*4882a593Smuzhiyun wowlan->patterns[i].pattern_len;
1031*4882a593Smuzhiyun }
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun cmd.data[0] = pattern_cmd;
1034*4882a593Smuzhiyun err = iwl_dvm_send_cmd(priv, &cmd);
1035*4882a593Smuzhiyun kfree(pattern_cmd);
1036*4882a593Smuzhiyun return err;
1037*4882a593Smuzhiyun }
1038*4882a593Smuzhiyun
iwlagn_suspend(struct iwl_priv * priv,struct cfg80211_wowlan * wowlan)1039*4882a593Smuzhiyun int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan)
1040*4882a593Smuzhiyun {
1041*4882a593Smuzhiyun struct iwlagn_wowlan_wakeup_filter_cmd wakeup_filter_cmd;
1042*4882a593Smuzhiyun struct iwl_rxon_cmd rxon;
1043*4882a593Smuzhiyun struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1044*4882a593Smuzhiyun struct iwlagn_wowlan_kek_kck_material_cmd kek_kck_cmd;
1045*4882a593Smuzhiyun struct iwlagn_wowlan_tkip_params_cmd tkip_cmd = {};
1046*4882a593Smuzhiyun struct iwlagn_d3_config_cmd d3_cfg_cmd = {
1047*4882a593Smuzhiyun /*
1048*4882a593Smuzhiyun * Program the minimum sleep time to 10 seconds, as many
1049*4882a593Smuzhiyun * platforms have issues processing a wakeup signal while
1050*4882a593Smuzhiyun * still being in the process of suspending.
1051*4882a593Smuzhiyun */
1052*4882a593Smuzhiyun .min_sleep_time = cpu_to_le32(10 * 1000 * 1000),
1053*4882a593Smuzhiyun };
1054*4882a593Smuzhiyun struct wowlan_key_data key_data = {
1055*4882a593Smuzhiyun .ctx = ctx,
1056*4882a593Smuzhiyun .bssid = ctx->active.bssid_addr,
1057*4882a593Smuzhiyun .use_rsc_tsc = false,
1058*4882a593Smuzhiyun .tkip = &tkip_cmd,
1059*4882a593Smuzhiyun .use_tkip = false,
1060*4882a593Smuzhiyun };
1061*4882a593Smuzhiyun int ret, i;
1062*4882a593Smuzhiyun u16 seq;
1063*4882a593Smuzhiyun
1064*4882a593Smuzhiyun key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
1065*4882a593Smuzhiyun if (!key_data.rsc_tsc)
1066*4882a593Smuzhiyun return -ENOMEM;
1067*4882a593Smuzhiyun
1068*4882a593Smuzhiyun memset(&wakeup_filter_cmd, 0, sizeof(wakeup_filter_cmd));
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun /*
1071*4882a593Smuzhiyun * We know the last used seqno, and the uCode expects to know that
1072*4882a593Smuzhiyun * one, it will increment before TX.
1073*4882a593Smuzhiyun */
1074*4882a593Smuzhiyun seq = le16_to_cpu(priv->last_seq_ctl) & IEEE80211_SCTL_SEQ;
1075*4882a593Smuzhiyun wakeup_filter_cmd.non_qos_seq = cpu_to_le16(seq);
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun /*
1078*4882a593Smuzhiyun * For QoS counters, we store the one to use next, so subtract 0x10
1079*4882a593Smuzhiyun * since the uCode will add 0x10 before using the value.
1080*4882a593Smuzhiyun */
1081*4882a593Smuzhiyun for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
1082*4882a593Smuzhiyun seq = priv->tid_data[IWL_AP_ID][i].seq_number;
1083*4882a593Smuzhiyun seq -= 0x10;
1084*4882a593Smuzhiyun wakeup_filter_cmd.qos_seq[i] = cpu_to_le16(seq);
1085*4882a593Smuzhiyun }
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun if (wowlan->disconnect)
1088*4882a593Smuzhiyun wakeup_filter_cmd.enabled |=
1089*4882a593Smuzhiyun cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_BEACON_MISS |
1090*4882a593Smuzhiyun IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE);
1091*4882a593Smuzhiyun if (wowlan->magic_pkt)
1092*4882a593Smuzhiyun wakeup_filter_cmd.enabled |=
1093*4882a593Smuzhiyun cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET);
1094*4882a593Smuzhiyun if (wowlan->gtk_rekey_failure)
1095*4882a593Smuzhiyun wakeup_filter_cmd.enabled |=
1096*4882a593Smuzhiyun cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
1097*4882a593Smuzhiyun if (wowlan->eap_identity_req)
1098*4882a593Smuzhiyun wakeup_filter_cmd.enabled |=
1099*4882a593Smuzhiyun cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ);
1100*4882a593Smuzhiyun if (wowlan->four_way_handshake)
1101*4882a593Smuzhiyun wakeup_filter_cmd.enabled |=
1102*4882a593Smuzhiyun cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
1103*4882a593Smuzhiyun if (wowlan->n_patterns)
1104*4882a593Smuzhiyun wakeup_filter_cmd.enabled |=
1105*4882a593Smuzhiyun cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH);
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun if (wowlan->rfkill_release)
1108*4882a593Smuzhiyun d3_cfg_cmd.wakeup_flags |=
1109*4882a593Smuzhiyun cpu_to_le32(IWLAGN_D3_WAKEUP_RFKILL);
1110*4882a593Smuzhiyun
1111*4882a593Smuzhiyun iwl_scan_cancel_timeout(priv, 200);
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun memcpy(&rxon, &ctx->active, sizeof(rxon));
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun priv->ucode_loaded = false;
1116*4882a593Smuzhiyun iwl_trans_stop_device(priv->trans);
1117*4882a593Smuzhiyun ret = iwl_trans_start_hw(priv->trans);
1118*4882a593Smuzhiyun if (ret)
1119*4882a593Smuzhiyun goto out;
1120*4882a593Smuzhiyun
1121*4882a593Smuzhiyun priv->wowlan = true;
1122*4882a593Smuzhiyun
1123*4882a593Smuzhiyun ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_WOWLAN);
1124*4882a593Smuzhiyun if (ret)
1125*4882a593Smuzhiyun goto out;
1126*4882a593Smuzhiyun
1127*4882a593Smuzhiyun /* now configure WoWLAN ucode */
1128*4882a593Smuzhiyun ret = iwl_alive_start(priv);
1129*4882a593Smuzhiyun if (ret)
1130*4882a593Smuzhiyun goto out;
1131*4882a593Smuzhiyun
1132*4882a593Smuzhiyun memcpy(&ctx->staging, &rxon, sizeof(rxon));
1133*4882a593Smuzhiyun ret = iwlagn_commit_rxon(priv, ctx);
1134*4882a593Smuzhiyun if (ret)
1135*4882a593Smuzhiyun goto out;
1136*4882a593Smuzhiyun
1137*4882a593Smuzhiyun ret = iwl_power_update_mode(priv, true);
1138*4882a593Smuzhiyun if (ret)
1139*4882a593Smuzhiyun goto out;
1140*4882a593Smuzhiyun
1141*4882a593Smuzhiyun if (!iwlwifi_mod_params.swcrypto) {
1142*4882a593Smuzhiyun /* mark all keys clear */
1143*4882a593Smuzhiyun priv->ucode_key_table = 0;
1144*4882a593Smuzhiyun ctx->key_mapping_keys = 0;
1145*4882a593Smuzhiyun
1146*4882a593Smuzhiyun /*
1147*4882a593Smuzhiyun * This needs to be unlocked due to lock ordering
1148*4882a593Smuzhiyun * constraints. Since we're in the suspend path
1149*4882a593Smuzhiyun * that isn't really a problem though.
1150*4882a593Smuzhiyun */
1151*4882a593Smuzhiyun mutex_unlock(&priv->mutex);
1152*4882a593Smuzhiyun ieee80211_iter_keys(priv->hw, ctx->vif,
1153*4882a593Smuzhiyun iwlagn_wowlan_program_keys,
1154*4882a593Smuzhiyun &key_data);
1155*4882a593Smuzhiyun mutex_lock(&priv->mutex);
1156*4882a593Smuzhiyun if (key_data.error) {
1157*4882a593Smuzhiyun ret = -EIO;
1158*4882a593Smuzhiyun goto out;
1159*4882a593Smuzhiyun }
1160*4882a593Smuzhiyun
1161*4882a593Smuzhiyun if (key_data.use_rsc_tsc) {
1162*4882a593Smuzhiyun struct iwl_host_cmd rsc_tsc_cmd = {
1163*4882a593Smuzhiyun .id = REPLY_WOWLAN_TSC_RSC_PARAMS,
1164*4882a593Smuzhiyun .data[0] = key_data.rsc_tsc,
1165*4882a593Smuzhiyun .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
1166*4882a593Smuzhiyun .len[0] = sizeof(*key_data.rsc_tsc),
1167*4882a593Smuzhiyun };
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun ret = iwl_dvm_send_cmd(priv, &rsc_tsc_cmd);
1170*4882a593Smuzhiyun if (ret)
1171*4882a593Smuzhiyun goto out;
1172*4882a593Smuzhiyun }
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyun if (key_data.use_tkip) {
1175*4882a593Smuzhiyun ret = iwl_dvm_send_cmd_pdu(priv,
1176*4882a593Smuzhiyun REPLY_WOWLAN_TKIP_PARAMS,
1177*4882a593Smuzhiyun 0, sizeof(tkip_cmd),
1178*4882a593Smuzhiyun &tkip_cmd);
1179*4882a593Smuzhiyun if (ret)
1180*4882a593Smuzhiyun goto out;
1181*4882a593Smuzhiyun }
1182*4882a593Smuzhiyun
1183*4882a593Smuzhiyun if (priv->have_rekey_data) {
1184*4882a593Smuzhiyun memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd));
1185*4882a593Smuzhiyun memcpy(kek_kck_cmd.kck, priv->kck, NL80211_KCK_LEN);
1186*4882a593Smuzhiyun kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN);
1187*4882a593Smuzhiyun memcpy(kek_kck_cmd.kek, priv->kek, NL80211_KEK_LEN);
1188*4882a593Smuzhiyun kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
1189*4882a593Smuzhiyun kek_kck_cmd.replay_ctr = priv->replay_ctr;
1190*4882a593Smuzhiyun
1191*4882a593Smuzhiyun ret = iwl_dvm_send_cmd_pdu(priv,
1192*4882a593Smuzhiyun REPLY_WOWLAN_KEK_KCK_MATERIAL,
1193*4882a593Smuzhiyun 0, sizeof(kek_kck_cmd),
1194*4882a593Smuzhiyun &kek_kck_cmd);
1195*4882a593Smuzhiyun if (ret)
1196*4882a593Smuzhiyun goto out;
1197*4882a593Smuzhiyun }
1198*4882a593Smuzhiyun }
1199*4882a593Smuzhiyun
1200*4882a593Smuzhiyun ret = iwl_dvm_send_cmd_pdu(priv, REPLY_D3_CONFIG, 0,
1201*4882a593Smuzhiyun sizeof(d3_cfg_cmd), &d3_cfg_cmd);
1202*4882a593Smuzhiyun if (ret)
1203*4882a593Smuzhiyun goto out;
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WOWLAN_WAKEUP_FILTER,
1206*4882a593Smuzhiyun 0, sizeof(wakeup_filter_cmd),
1207*4882a593Smuzhiyun &wakeup_filter_cmd);
1208*4882a593Smuzhiyun if (ret)
1209*4882a593Smuzhiyun goto out;
1210*4882a593Smuzhiyun
1211*4882a593Smuzhiyun ret = iwlagn_send_patterns(priv, wowlan);
1212*4882a593Smuzhiyun out:
1213*4882a593Smuzhiyun kfree(key_data.rsc_tsc);
1214*4882a593Smuzhiyun return ret;
1215*4882a593Smuzhiyun }
1216*4882a593Smuzhiyun #endif
1217*4882a593Smuzhiyun
iwl_dvm_send_cmd(struct iwl_priv * priv,struct iwl_host_cmd * cmd)1218*4882a593Smuzhiyun int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1219*4882a593Smuzhiyun {
1220*4882a593Smuzhiyun if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
1221*4882a593Smuzhiyun IWL_WARN(priv, "Not sending command - %s KILL\n",
1222*4882a593Smuzhiyun iwl_is_rfkill(priv) ? "RF" : "CT");
1223*4882a593Smuzhiyun return -EIO;
1224*4882a593Smuzhiyun }
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun if (test_bit(STATUS_FW_ERROR, &priv->status)) {
1227*4882a593Smuzhiyun IWL_ERR(priv, "Command %s failed: FW Error\n",
1228*4882a593Smuzhiyun iwl_get_cmd_string(priv->trans, cmd->id));
1229*4882a593Smuzhiyun return -EIO;
1230*4882a593Smuzhiyun }
1231*4882a593Smuzhiyun
1232*4882a593Smuzhiyun /*
1233*4882a593Smuzhiyun * This can happen upon FW ASSERT: we clear the STATUS_FW_ERROR flag
1234*4882a593Smuzhiyun * in iwl_down but cancel the workers only later.
1235*4882a593Smuzhiyun */
1236*4882a593Smuzhiyun if (!priv->ucode_loaded) {
1237*4882a593Smuzhiyun IWL_ERR(priv, "Fw not loaded - dropping CMD: %x\n", cmd->id);
1238*4882a593Smuzhiyun return -EIO;
1239*4882a593Smuzhiyun }
1240*4882a593Smuzhiyun
1241*4882a593Smuzhiyun /*
1242*4882a593Smuzhiyun * Synchronous commands from this op-mode must hold
1243*4882a593Smuzhiyun * the mutex, this ensures we don't try to send two
1244*4882a593Smuzhiyun * (or more) synchronous commands at a time.
1245*4882a593Smuzhiyun */
1246*4882a593Smuzhiyun if (!(cmd->flags & CMD_ASYNC))
1247*4882a593Smuzhiyun lockdep_assert_held(&priv->mutex);
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun return iwl_trans_send_cmd(priv->trans, cmd);
1250*4882a593Smuzhiyun }
1251*4882a593Smuzhiyun
iwl_dvm_send_cmd_pdu(struct iwl_priv * priv,u8 id,u32 flags,u16 len,const void * data)1252*4882a593Smuzhiyun int iwl_dvm_send_cmd_pdu(struct iwl_priv *priv, u8 id,
1253*4882a593Smuzhiyun u32 flags, u16 len, const void *data)
1254*4882a593Smuzhiyun {
1255*4882a593Smuzhiyun struct iwl_host_cmd cmd = {
1256*4882a593Smuzhiyun .id = id,
1257*4882a593Smuzhiyun .len = { len, },
1258*4882a593Smuzhiyun .data = { data, },
1259*4882a593Smuzhiyun .flags = flags,
1260*4882a593Smuzhiyun };
1261*4882a593Smuzhiyun
1262*4882a593Smuzhiyun return iwl_dvm_send_cmd(priv, &cmd);
1263*4882a593Smuzhiyun }
1264