xref: /OK3568_Linux_fs/kernel/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /******************************************************************************
2*4882a593Smuzhiyun  *
3*4882a593Smuzhiyun  * This file is provided under a dual BSD/GPLv2 license.  When using or
4*4882a593Smuzhiyun  * redistributing this file, you may do so under either license.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * GPL LICENSE SUMMARY
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Copyright(c) 2017 Intel Deutschland GmbH
9*4882a593Smuzhiyun  * Copyright(c) 2018 - 2020 Intel Corporation
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * This program is free software; you can redistribute it and/or modify
12*4882a593Smuzhiyun  * it under the terms of version 2 of the GNU General Public License as
13*4882a593Smuzhiyun  * published by the Free Software Foundation.
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * This program is distributed in the hope that it will be useful, but
16*4882a593Smuzhiyun  * WITHOUT ANY WARRANTY; without even the implied warranty of
17*4882a593Smuzhiyun  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18*4882a593Smuzhiyun  * General Public License for more details.
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  * BSD LICENSE
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  * Copyright(c) 2017 Intel Deutschland GmbH
23*4882a593Smuzhiyun  * Copyright(c) 2018 - 2020 Intel Corporation
24*4882a593Smuzhiyun  * All rights reserved.
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  * Redistribution and use in source and binary forms, with or without
27*4882a593Smuzhiyun  * modification, are permitted provided that the following conditions
28*4882a593Smuzhiyun  * are met:
29*4882a593Smuzhiyun  *
30*4882a593Smuzhiyun  *  * Redistributions of source code must retain the above copyright
31*4882a593Smuzhiyun  *    notice, this list of conditions and the following disclaimer.
32*4882a593Smuzhiyun  *  * Redistributions in binary form must reproduce the above copyright
33*4882a593Smuzhiyun  *    notice, this list of conditions and the following disclaimer in
34*4882a593Smuzhiyun  *    the documentation and/or other materials provided with the
35*4882a593Smuzhiyun  *    distribution.
36*4882a593Smuzhiyun  *  * Neither the name Intel Corporation nor the names of its
37*4882a593Smuzhiyun  *    contributors may be used to endorse or promote products derived
38*4882a593Smuzhiyun  *    from this software without specific prior written permission.
39*4882a593Smuzhiyun  *
40*4882a593Smuzhiyun  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
41*4882a593Smuzhiyun  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
42*4882a593Smuzhiyun  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
43*4882a593Smuzhiyun  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
44*4882a593Smuzhiyun  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
45*4882a593Smuzhiyun  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
46*4882a593Smuzhiyun  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
47*4882a593Smuzhiyun  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
48*4882a593Smuzhiyun  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49*4882a593Smuzhiyun  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
50*4882a593Smuzhiyun  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51*4882a593Smuzhiyun  *
52*4882a593Smuzhiyun  *****************************************************************************/
53*4882a593Smuzhiyun #include "iwl-trans.h"
54*4882a593Smuzhiyun #include "iwl-prph.h"
55*4882a593Smuzhiyun #include "iwl-context-info.h"
56*4882a593Smuzhiyun #include "iwl-context-info-gen3.h"
57*4882a593Smuzhiyun #include "internal.h"
58*4882a593Smuzhiyun #include "fw/dbg.h"
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun /*
61*4882a593Smuzhiyun  * Start up NIC's basic functionality after it has been reset
62*4882a593Smuzhiyun  * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
63*4882a593Smuzhiyun  * NOTE:  This does not load uCode nor start the embedded processor
64*4882a593Smuzhiyun  */
iwl_pcie_gen2_apm_init(struct iwl_trans * trans)65*4882a593Smuzhiyun int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun 	int ret = 0;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	/*
72*4882a593Smuzhiyun 	 * Use "set_bit" below rather than "write", to preserve any hardware
73*4882a593Smuzhiyun 	 * bits already set by default after reset.
74*4882a593Smuzhiyun 	 */
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	/*
77*4882a593Smuzhiyun 	 * Disable L0s without affecting L1;
78*4882a593Smuzhiyun 	 * don't wait for ICH L0s (ICH bug W/A)
79*4882a593Smuzhiyun 	 */
80*4882a593Smuzhiyun 	iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
81*4882a593Smuzhiyun 		    CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
84*4882a593Smuzhiyun 	iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	/*
87*4882a593Smuzhiyun 	 * Enable HAP INTA (interrupt from management bus) to
88*4882a593Smuzhiyun 	 * wake device's PCI Express link L1a -> L0s
89*4882a593Smuzhiyun 	 */
90*4882a593Smuzhiyun 	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
91*4882a593Smuzhiyun 		    CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	iwl_pcie_apm_config(trans);
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	ret = iwl_finish_nic_init(trans, trans->trans_cfg);
96*4882a593Smuzhiyun 	if (ret)
97*4882a593Smuzhiyun 		return ret;
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	set_bit(STATUS_DEVICE_ENABLED, &trans->status);
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	return 0;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun 
iwl_pcie_gen2_apm_stop(struct iwl_trans * trans,bool op_mode_leave)104*4882a593Smuzhiyun static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	if (op_mode_leave) {
109*4882a593Smuzhiyun 		if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
110*4882a593Smuzhiyun 			iwl_pcie_gen2_apm_init(trans);
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 		/* inform ME that we are leaving */
113*4882a593Smuzhiyun 		iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
114*4882a593Smuzhiyun 			    CSR_RESET_LINK_PWR_MGMT_DISABLED);
115*4882a593Smuzhiyun 		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
116*4882a593Smuzhiyun 			    CSR_HW_IF_CONFIG_REG_PREPARE |
117*4882a593Smuzhiyun 			    CSR_HW_IF_CONFIG_REG_ENABLE_PME);
118*4882a593Smuzhiyun 		mdelay(1);
119*4882a593Smuzhiyun 		iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
120*4882a593Smuzhiyun 			      CSR_RESET_LINK_PWR_MGMT_DISABLED);
121*4882a593Smuzhiyun 		mdelay(5);
122*4882a593Smuzhiyun 	}
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	/* Stop device's DMA activity */
127*4882a593Smuzhiyun 	iwl_pcie_apm_stop_master(trans);
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	iwl_trans_sw_reset(trans);
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	/*
132*4882a593Smuzhiyun 	 * Clear "initialization complete" bit to move adapter from
133*4882a593Smuzhiyun 	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
134*4882a593Smuzhiyun 	 */
135*4882a593Smuzhiyun 	iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun 
_iwl_trans_pcie_gen2_stop_device(struct iwl_trans * trans)138*4882a593Smuzhiyun void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	lockdep_assert_held(&trans_pcie->mutex);
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	if (trans_pcie->is_down)
145*4882a593Smuzhiyun 		return;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	trans_pcie->is_down = true;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	/* tell the device to stop sending interrupts */
150*4882a593Smuzhiyun 	iwl_disable_interrupts(trans);
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	/* device going down, Stop using ICT table */
153*4882a593Smuzhiyun 	iwl_pcie_disable_ict(trans);
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	/*
156*4882a593Smuzhiyun 	 * If a HW restart happens during firmware loading,
157*4882a593Smuzhiyun 	 * then the firmware loading might call this function
158*4882a593Smuzhiyun 	 * and later it might be called again due to the
159*4882a593Smuzhiyun 	 * restart. So don't process again if the device is
160*4882a593Smuzhiyun 	 * already dead.
161*4882a593Smuzhiyun 	 */
162*4882a593Smuzhiyun 	if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
163*4882a593Smuzhiyun 		IWL_DEBUG_INFO(trans,
164*4882a593Smuzhiyun 			       "DEVICE_ENABLED bit was set and is now cleared\n");
165*4882a593Smuzhiyun 		iwl_txq_gen2_tx_stop(trans);
166*4882a593Smuzhiyun 		iwl_pcie_rx_stop(trans);
167*4882a593Smuzhiyun 	}
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	iwl_pcie_ctxt_info_free_paging(trans);
170*4882a593Smuzhiyun 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
171*4882a593Smuzhiyun 		iwl_pcie_ctxt_info_gen3_free(trans);
172*4882a593Smuzhiyun 	else
173*4882a593Smuzhiyun 		iwl_pcie_ctxt_info_free(trans);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	/* Make sure (redundant) we've released our request to stay awake */
176*4882a593Smuzhiyun 	iwl_clear_bit(trans, CSR_GP_CNTRL,
177*4882a593Smuzhiyun 		      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	/* Stop the device, and put it in low power state */
180*4882a593Smuzhiyun 	iwl_pcie_gen2_apm_stop(trans, false);
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	iwl_trans_sw_reset(trans);
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	/*
185*4882a593Smuzhiyun 	 * Upon stop, the IVAR table gets erased, so msi-x won't
186*4882a593Smuzhiyun 	 * work. This causes a bug in RF-KILL flows, since the interrupt
187*4882a593Smuzhiyun 	 * that enables radio won't fire on the correct irq, and the
188*4882a593Smuzhiyun 	 * driver won't be able to handle the interrupt.
189*4882a593Smuzhiyun 	 * Configure the IVAR table again after reset.
190*4882a593Smuzhiyun 	 */
191*4882a593Smuzhiyun 	iwl_pcie_conf_msix_hw(trans_pcie);
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	/*
194*4882a593Smuzhiyun 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
195*4882a593Smuzhiyun 	 * This is a bug in certain verions of the hardware.
196*4882a593Smuzhiyun 	 * Certain devices also keep sending HW RF kill interrupt all
197*4882a593Smuzhiyun 	 * the time, unless the interrupt is ACKed even if the interrupt
198*4882a593Smuzhiyun 	 * should be masked. Re-ACK all the interrupts here.
199*4882a593Smuzhiyun 	 */
200*4882a593Smuzhiyun 	iwl_disable_interrupts(trans);
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	/* clear all status bits */
203*4882a593Smuzhiyun 	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
204*4882a593Smuzhiyun 	clear_bit(STATUS_INT_ENABLED, &trans->status);
205*4882a593Smuzhiyun 	clear_bit(STATUS_TPOWER_PMI, &trans->status);
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	/*
208*4882a593Smuzhiyun 	 * Even if we stop the HW, we still want the RF kill
209*4882a593Smuzhiyun 	 * interrupt
210*4882a593Smuzhiyun 	 */
211*4882a593Smuzhiyun 	iwl_enable_rfkill_int(trans);
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	/* re-take ownership to prevent other users from stealing the device */
214*4882a593Smuzhiyun 	iwl_pcie_prepare_card_hw(trans);
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun 
iwl_trans_pcie_gen2_stop_device(struct iwl_trans * trans)217*4882a593Smuzhiyun void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
220*4882a593Smuzhiyun 	bool was_in_rfkill;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	mutex_lock(&trans_pcie->mutex);
223*4882a593Smuzhiyun 	trans_pcie->opmode_down = true;
224*4882a593Smuzhiyun 	was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
225*4882a593Smuzhiyun 	_iwl_trans_pcie_gen2_stop_device(trans);
226*4882a593Smuzhiyun 	iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
227*4882a593Smuzhiyun 	mutex_unlock(&trans_pcie->mutex);
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun 
iwl_pcie_gen2_nic_init(struct iwl_trans * trans)230*4882a593Smuzhiyun static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
233*4882a593Smuzhiyun 	int queue_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
234*4882a593Smuzhiyun 			       trans->cfg->min_txq_size);
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	/* TODO: most of the logic can be removed in A0 - but not in Z0 */
237*4882a593Smuzhiyun 	spin_lock(&trans_pcie->irq_lock);
238*4882a593Smuzhiyun 	iwl_pcie_gen2_apm_init(trans);
239*4882a593Smuzhiyun 	spin_unlock(&trans_pcie->irq_lock);
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	iwl_op_mode_nic_config(trans->op_mode);
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	/* Allocate the RX queue, or reset if it is already allocated */
244*4882a593Smuzhiyun 	if (iwl_pcie_gen2_rx_init(trans))
245*4882a593Smuzhiyun 		return -ENOMEM;
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	/* Allocate or reset and init all Tx and Command queues */
248*4882a593Smuzhiyun 	if (iwl_txq_gen2_init(trans, trans->txqs.cmd.q_id, queue_size))
249*4882a593Smuzhiyun 		return -ENOMEM;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	/* enable shadow regs in HW */
252*4882a593Smuzhiyun 	iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
253*4882a593Smuzhiyun 	IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	return 0;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun 
iwl_trans_pcie_gen2_fw_alive(struct iwl_trans * trans,u32 scd_addr)258*4882a593Smuzhiyun void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	iwl_pcie_reset_ict(trans);
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	/* make sure all queue are not stopped/used */
265*4882a593Smuzhiyun 	memset(trans->txqs.queue_stopped, 0,
266*4882a593Smuzhiyun 	       sizeof(trans->txqs.queue_stopped));
267*4882a593Smuzhiyun 	memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	/* now that we got alive we can free the fw image & the context info.
270*4882a593Smuzhiyun 	 * paging memory cannot be freed included since FW will still use it
271*4882a593Smuzhiyun 	 */
272*4882a593Smuzhiyun 	if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
273*4882a593Smuzhiyun 		iwl_pcie_ctxt_info_free(trans);
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	/*
276*4882a593Smuzhiyun 	 * Re-enable all the interrupts, including the RF-Kill one, now that
277*4882a593Smuzhiyun 	 * the firmware is alive.
278*4882a593Smuzhiyun 	 */
279*4882a593Smuzhiyun 	iwl_enable_interrupts(trans);
280*4882a593Smuzhiyun 	mutex_lock(&trans_pcie->mutex);
281*4882a593Smuzhiyun 	iwl_pcie_check_hw_rf_kill(trans);
282*4882a593Smuzhiyun 	mutex_unlock(&trans_pcie->mutex);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
iwl_pcie_set_ltr(struct iwl_trans * trans)285*4882a593Smuzhiyun static void iwl_pcie_set_ltr(struct iwl_trans *trans)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun 	u32 ltr_val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
288*4882a593Smuzhiyun 		      u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
289*4882a593Smuzhiyun 				      CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) |
290*4882a593Smuzhiyun 		      u32_encode_bits(250,
291*4882a593Smuzhiyun 				      CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) |
292*4882a593Smuzhiyun 		      CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
293*4882a593Smuzhiyun 		      u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
294*4882a593Smuzhiyun 				      CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) |
295*4882a593Smuzhiyun 		      u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	/*
298*4882a593Smuzhiyun 	 * To workaround hardware latency issues during the boot process,
299*4882a593Smuzhiyun 	 * initialize the LTR to ~250 usec (see ltr_val above).
300*4882a593Smuzhiyun 	 * The firmware initializes this again later (to a smaller value).
301*4882a593Smuzhiyun 	 */
302*4882a593Smuzhiyun 	if ((trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210 ||
303*4882a593Smuzhiyun 	     trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) &&
304*4882a593Smuzhiyun 	    !trans->trans_cfg->integrated) {
305*4882a593Smuzhiyun 		iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val);
306*4882a593Smuzhiyun 	} else if (trans->trans_cfg->integrated &&
307*4882a593Smuzhiyun 		   trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) {
308*4882a593Smuzhiyun 		iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL);
309*4882a593Smuzhiyun 		iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val);
310*4882a593Smuzhiyun 	}
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun 
iwl_trans_pcie_gen2_start_fw(struct iwl_trans * trans,const struct fw_img * fw,bool run_in_rfkill)313*4882a593Smuzhiyun int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
314*4882a593Smuzhiyun 				 const struct fw_img *fw, bool run_in_rfkill)
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
317*4882a593Smuzhiyun 	bool hw_rfkill;
318*4882a593Smuzhiyun 	int ret;
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	/* This may fail if AMT took ownership of the device */
321*4882a593Smuzhiyun 	if (iwl_pcie_prepare_card_hw(trans)) {
322*4882a593Smuzhiyun 		IWL_WARN(trans, "Exit HW not ready\n");
323*4882a593Smuzhiyun 		return -EIO;
324*4882a593Smuzhiyun 	}
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	iwl_enable_rfkill_int(trans);
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	/*
331*4882a593Smuzhiyun 	 * We enabled the RF-Kill interrupt and the handler may very
332*4882a593Smuzhiyun 	 * well be running. Disable the interrupts to make sure no other
333*4882a593Smuzhiyun 	 * interrupt can be fired.
334*4882a593Smuzhiyun 	 */
335*4882a593Smuzhiyun 	iwl_disable_interrupts(trans);
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	/* Make sure it finished running */
338*4882a593Smuzhiyun 	iwl_pcie_synchronize_irqs(trans);
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	mutex_lock(&trans_pcie->mutex);
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	/* If platform's RF_KILL switch is NOT set to KILL */
343*4882a593Smuzhiyun 	hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
344*4882a593Smuzhiyun 	if (hw_rfkill && !run_in_rfkill) {
345*4882a593Smuzhiyun 		ret = -ERFKILL;
346*4882a593Smuzhiyun 		goto out;
347*4882a593Smuzhiyun 	}
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	/* Someone called stop_device, don't try to start_fw */
350*4882a593Smuzhiyun 	if (trans_pcie->is_down) {
351*4882a593Smuzhiyun 		IWL_WARN(trans,
352*4882a593Smuzhiyun 			 "Can't start_fw since the HW hasn't been started\n");
353*4882a593Smuzhiyun 		ret = -EIO;
354*4882a593Smuzhiyun 		goto out;
355*4882a593Smuzhiyun 	}
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	/* make sure rfkill handshake bits are cleared */
358*4882a593Smuzhiyun 	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
359*4882a593Smuzhiyun 	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
360*4882a593Smuzhiyun 		    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	/* clear (again), then enable host interrupts */
363*4882a593Smuzhiyun 	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	ret = iwl_pcie_gen2_nic_init(trans);
366*4882a593Smuzhiyun 	if (ret) {
367*4882a593Smuzhiyun 		IWL_ERR(trans, "Unable to init nic\n");
368*4882a593Smuzhiyun 		goto out;
369*4882a593Smuzhiyun 	}
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
372*4882a593Smuzhiyun 		ret = iwl_pcie_ctxt_info_gen3_init(trans, fw);
373*4882a593Smuzhiyun 	else
374*4882a593Smuzhiyun 		ret = iwl_pcie_ctxt_info_init(trans, fw);
375*4882a593Smuzhiyun 	if (ret)
376*4882a593Smuzhiyun 		goto out;
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	iwl_pcie_set_ltr(trans);
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
381*4882a593Smuzhiyun 		iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1);
382*4882a593Smuzhiyun 	else
383*4882a593Smuzhiyun 		iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1);
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	/* re-check RF-Kill state since we may have missed the interrupt */
386*4882a593Smuzhiyun 	hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
387*4882a593Smuzhiyun 	if (hw_rfkill && !run_in_rfkill)
388*4882a593Smuzhiyun 		ret = -ERFKILL;
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun out:
391*4882a593Smuzhiyun 	mutex_unlock(&trans_pcie->mutex);
392*4882a593Smuzhiyun 	return ret;
393*4882a593Smuzhiyun }
394