1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * QLogic Fibre Channel HBA Driver
4*4882a593Smuzhiyun * Copyright (c) 2003-2014 QLogic Corporation
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/vmalloc.h>
8*4882a593Smuzhiyun #include <linux/delay.h>
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include "qla_def.h"
11*4882a593Smuzhiyun #include "qla_gbl.h"
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #define TIMEOUT_100_MS 100
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun static const uint32_t qla8044_reg_tbl[] = {
16*4882a593Smuzhiyun QLA8044_PEG_HALT_STATUS1,
17*4882a593Smuzhiyun QLA8044_PEG_HALT_STATUS2,
18*4882a593Smuzhiyun QLA8044_PEG_ALIVE_COUNTER,
19*4882a593Smuzhiyun QLA8044_CRB_DRV_ACTIVE,
20*4882a593Smuzhiyun QLA8044_CRB_DEV_STATE,
21*4882a593Smuzhiyun QLA8044_CRB_DRV_STATE,
22*4882a593Smuzhiyun QLA8044_CRB_DRV_SCRATCH,
23*4882a593Smuzhiyun QLA8044_CRB_DEV_PART_INFO1,
24*4882a593Smuzhiyun QLA8044_CRB_IDC_VER_MAJOR,
25*4882a593Smuzhiyun QLA8044_FW_VER_MAJOR,
26*4882a593Smuzhiyun QLA8044_FW_VER_MINOR,
27*4882a593Smuzhiyun QLA8044_FW_VER_SUB,
28*4882a593Smuzhiyun QLA8044_CMDPEG_STATE,
29*4882a593Smuzhiyun QLA8044_ASIC_TEMP,
30*4882a593Smuzhiyun };
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /* 8044 Flash Read/Write functions */
33*4882a593Smuzhiyun uint32_t
qla8044_rd_reg(struct qla_hw_data * ha,ulong addr)34*4882a593Smuzhiyun qla8044_rd_reg(struct qla_hw_data *ha, ulong addr)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun return readl((void __iomem *) (ha->nx_pcibase + addr));
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun void
qla8044_wr_reg(struct qla_hw_data * ha,ulong addr,uint32_t val)40*4882a593Smuzhiyun qla8044_wr_reg(struct qla_hw_data *ha, ulong addr, uint32_t val)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun writel(val, (void __iomem *)((ha)->nx_pcibase + addr));
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun int
qla8044_rd_direct(struct scsi_qla_host * vha,const uint32_t crb_reg)46*4882a593Smuzhiyun qla8044_rd_direct(struct scsi_qla_host *vha,
47*4882a593Smuzhiyun const uint32_t crb_reg)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun if (crb_reg < CRB_REG_INDEX_MAX)
52*4882a593Smuzhiyun return qla8044_rd_reg(ha, qla8044_reg_tbl[crb_reg]);
53*4882a593Smuzhiyun else
54*4882a593Smuzhiyun return QLA_FUNCTION_FAILED;
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun void
qla8044_wr_direct(struct scsi_qla_host * vha,const uint32_t crb_reg,const uint32_t value)58*4882a593Smuzhiyun qla8044_wr_direct(struct scsi_qla_host *vha,
59*4882a593Smuzhiyun const uint32_t crb_reg,
60*4882a593Smuzhiyun const uint32_t value)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun if (crb_reg < CRB_REG_INDEX_MAX)
65*4882a593Smuzhiyun qla8044_wr_reg(ha, qla8044_reg_tbl[crb_reg], value);
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun static int
qla8044_set_win_base(scsi_qla_host_t * vha,uint32_t addr)69*4882a593Smuzhiyun qla8044_set_win_base(scsi_qla_host_t *vha, uint32_t addr)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun uint32_t val;
72*4882a593Smuzhiyun int ret_val = QLA_SUCCESS;
73*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun qla8044_wr_reg(ha, QLA8044_CRB_WIN_FUNC(ha->portnum), addr);
76*4882a593Smuzhiyun val = qla8044_rd_reg(ha, QLA8044_CRB_WIN_FUNC(ha->portnum));
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun if (val != addr) {
79*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb087,
80*4882a593Smuzhiyun "%s: Failed to set register window : "
81*4882a593Smuzhiyun "addr written 0x%x, read 0x%x!\n",
82*4882a593Smuzhiyun __func__, addr, val);
83*4882a593Smuzhiyun ret_val = QLA_FUNCTION_FAILED;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun return ret_val;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun static int
qla8044_rd_reg_indirect(scsi_qla_host_t * vha,uint32_t addr,uint32_t * data)89*4882a593Smuzhiyun qla8044_rd_reg_indirect(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun int ret_val = QLA_SUCCESS;
92*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun ret_val = qla8044_set_win_base(vha, addr);
95*4882a593Smuzhiyun if (!ret_val)
96*4882a593Smuzhiyun *data = qla8044_rd_reg(ha, QLA8044_WILDCARD);
97*4882a593Smuzhiyun else
98*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb088,
99*4882a593Smuzhiyun "%s: failed read of addr 0x%x!\n", __func__, addr);
100*4882a593Smuzhiyun return ret_val;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun static int
qla8044_wr_reg_indirect(scsi_qla_host_t * vha,uint32_t addr,uint32_t data)104*4882a593Smuzhiyun qla8044_wr_reg_indirect(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun int ret_val = QLA_SUCCESS;
107*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun ret_val = qla8044_set_win_base(vha, addr);
110*4882a593Smuzhiyun if (!ret_val)
111*4882a593Smuzhiyun qla8044_wr_reg(ha, QLA8044_WILDCARD, data);
112*4882a593Smuzhiyun else
113*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb089,
114*4882a593Smuzhiyun "%s: failed wrt to addr 0x%x, data 0x%x\n",
115*4882a593Smuzhiyun __func__, addr, data);
116*4882a593Smuzhiyun return ret_val;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /*
120*4882a593Smuzhiyun * qla8044_read_write_crb_reg - Read from raddr and write value to waddr.
121*4882a593Smuzhiyun *
122*4882a593Smuzhiyun * @ha : Pointer to adapter structure
123*4882a593Smuzhiyun * @raddr : CRB address to read from
124*4882a593Smuzhiyun * @waddr : CRB address to write to
125*4882a593Smuzhiyun *
126*4882a593Smuzhiyun */
127*4882a593Smuzhiyun static void
qla8044_read_write_crb_reg(struct scsi_qla_host * vha,uint32_t raddr,uint32_t waddr)128*4882a593Smuzhiyun qla8044_read_write_crb_reg(struct scsi_qla_host *vha,
129*4882a593Smuzhiyun uint32_t raddr, uint32_t waddr)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun uint32_t value;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha, raddr, &value);
134*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, waddr, value);
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun static int
qla8044_poll_wait_for_ready(struct scsi_qla_host * vha,uint32_t addr1,uint32_t mask)138*4882a593Smuzhiyun qla8044_poll_wait_for_ready(struct scsi_qla_host *vha, uint32_t addr1,
139*4882a593Smuzhiyun uint32_t mask)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun unsigned long timeout;
142*4882a593Smuzhiyun uint32_t temp;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun /* jiffies after 100ms */
145*4882a593Smuzhiyun timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
146*4882a593Smuzhiyun do {
147*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha, addr1, &temp);
148*4882a593Smuzhiyun if ((temp & mask) != 0)
149*4882a593Smuzhiyun break;
150*4882a593Smuzhiyun if (time_after_eq(jiffies, timeout)) {
151*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb151,
152*4882a593Smuzhiyun "Error in processing rdmdio entry\n");
153*4882a593Smuzhiyun return -1;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun } while (1);
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun return 0;
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun static uint32_t
qla8044_ipmdio_rd_reg(struct scsi_qla_host * vha,uint32_t addr1,uint32_t addr3,uint32_t mask,uint32_t addr)161*4882a593Smuzhiyun qla8044_ipmdio_rd_reg(struct scsi_qla_host *vha,
162*4882a593Smuzhiyun uint32_t addr1, uint32_t addr3, uint32_t mask, uint32_t addr)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun uint32_t temp;
165*4882a593Smuzhiyun int ret = 0;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
168*4882a593Smuzhiyun if (ret == -1)
169*4882a593Smuzhiyun return -1;
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun temp = (0x40000000 | addr);
172*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, addr1, temp);
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
175*4882a593Smuzhiyun if (ret == -1)
176*4882a593Smuzhiyun return 0;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha, addr3, &ret);
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun return ret;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun static int
qla8044_poll_wait_ipmdio_bus_idle(struct scsi_qla_host * vha,uint32_t addr1,uint32_t addr2,uint32_t addr3,uint32_t mask)185*4882a593Smuzhiyun qla8044_poll_wait_ipmdio_bus_idle(struct scsi_qla_host *vha,
186*4882a593Smuzhiyun uint32_t addr1, uint32_t addr2, uint32_t addr3, uint32_t mask)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun unsigned long timeout;
189*4882a593Smuzhiyun uint32_t temp;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun /* jiffies after 100 msecs */
192*4882a593Smuzhiyun timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
193*4882a593Smuzhiyun do {
194*4882a593Smuzhiyun temp = qla8044_ipmdio_rd_reg(vha, addr1, addr3, mask, addr2);
195*4882a593Smuzhiyun if ((temp & 0x1) != 1)
196*4882a593Smuzhiyun break;
197*4882a593Smuzhiyun if (time_after_eq(jiffies, timeout)) {
198*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb152,
199*4882a593Smuzhiyun "Error in processing mdiobus idle\n");
200*4882a593Smuzhiyun return -1;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun } while (1);
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun return 0;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun static int
qla8044_ipmdio_wr_reg(struct scsi_qla_host * vha,uint32_t addr1,uint32_t addr3,uint32_t mask,uint32_t addr,uint32_t value)208*4882a593Smuzhiyun qla8044_ipmdio_wr_reg(struct scsi_qla_host *vha, uint32_t addr1,
209*4882a593Smuzhiyun uint32_t addr3, uint32_t mask, uint32_t addr, uint32_t value)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun int ret = 0;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
214*4882a593Smuzhiyun if (ret == -1)
215*4882a593Smuzhiyun return -1;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, addr3, value);
218*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, addr1, addr);
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
221*4882a593Smuzhiyun if (ret == -1)
222*4882a593Smuzhiyun return -1;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun return 0;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun /*
227*4882a593Smuzhiyun * qla8044_rmw_crb_reg - Read value from raddr, AND with test_mask,
228*4882a593Smuzhiyun * Shift Left,Right/OR/XOR with values RMW header and write value to waddr.
229*4882a593Smuzhiyun *
230*4882a593Smuzhiyun * @vha : Pointer to adapter structure
231*4882a593Smuzhiyun * @raddr : CRB address to read from
232*4882a593Smuzhiyun * @waddr : CRB address to write to
233*4882a593Smuzhiyun * @p_rmw_hdr : header with shift/or/xor values.
234*4882a593Smuzhiyun *
235*4882a593Smuzhiyun */
236*4882a593Smuzhiyun static void
qla8044_rmw_crb_reg(struct scsi_qla_host * vha,uint32_t raddr,uint32_t waddr,struct qla8044_rmw * p_rmw_hdr)237*4882a593Smuzhiyun qla8044_rmw_crb_reg(struct scsi_qla_host *vha,
238*4882a593Smuzhiyun uint32_t raddr, uint32_t waddr, struct qla8044_rmw *p_rmw_hdr)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun uint32_t value;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun if (p_rmw_hdr->index_a)
243*4882a593Smuzhiyun value = vha->reset_tmplt.array[p_rmw_hdr->index_a];
244*4882a593Smuzhiyun else
245*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha, raddr, &value);
246*4882a593Smuzhiyun value &= p_rmw_hdr->test_mask;
247*4882a593Smuzhiyun value <<= p_rmw_hdr->shl;
248*4882a593Smuzhiyun value >>= p_rmw_hdr->shr;
249*4882a593Smuzhiyun value |= p_rmw_hdr->or_value;
250*4882a593Smuzhiyun value ^= p_rmw_hdr->xor_value;
251*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, waddr, value);
252*4882a593Smuzhiyun return;
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun static inline void
qla8044_set_qsnt_ready(struct scsi_qla_host * vha)256*4882a593Smuzhiyun qla8044_set_qsnt_ready(struct scsi_qla_host *vha)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun uint32_t qsnt_state;
259*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun qsnt_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
262*4882a593Smuzhiyun qsnt_state |= (1 << ha->portnum);
263*4882a593Smuzhiyun qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, qsnt_state);
264*4882a593Smuzhiyun ql_log(ql_log_info, vha, 0xb08e, "%s(%ld): qsnt_state: 0x%08x\n",
265*4882a593Smuzhiyun __func__, vha->host_no, qsnt_state);
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun void
qla8044_clear_qsnt_ready(struct scsi_qla_host * vha)269*4882a593Smuzhiyun qla8044_clear_qsnt_ready(struct scsi_qla_host *vha)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun uint32_t qsnt_state;
272*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun qsnt_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
275*4882a593Smuzhiyun qsnt_state &= ~(1 << ha->portnum);
276*4882a593Smuzhiyun qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, qsnt_state);
277*4882a593Smuzhiyun ql_log(ql_log_info, vha, 0xb08f, "%s(%ld): qsnt_state: 0x%08x\n",
278*4882a593Smuzhiyun __func__, vha->host_no, qsnt_state);
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun /**
282*4882a593Smuzhiyun * qla8044_lock_recovery - Recovers the idc_lock.
283*4882a593Smuzhiyun * @vha : Pointer to adapter structure
284*4882a593Smuzhiyun *
285*4882a593Smuzhiyun * Lock Recovery Register
286*4882a593Smuzhiyun * 5-2 Lock recovery owner: Function ID of driver doing lock recovery,
287*4882a593Smuzhiyun * valid if bits 1..0 are set by driver doing lock recovery.
288*4882a593Smuzhiyun * 1-0 1 - Driver intends to force unlock the IDC lock.
289*4882a593Smuzhiyun * 2 - Driver is moving forward to unlock the IDC lock. Driver clears
290*4882a593Smuzhiyun * this field after force unlocking the IDC lock.
291*4882a593Smuzhiyun *
292*4882a593Smuzhiyun * Lock Recovery process
293*4882a593Smuzhiyun * a. Read the IDC_LOCK_RECOVERY register. If the value in bits 1..0 is
294*4882a593Smuzhiyun * greater than 0, then wait for the other driver to unlock otherwise
295*4882a593Smuzhiyun * move to the next step.
296*4882a593Smuzhiyun * b. Indicate intent to force-unlock by writing 1h to the IDC_LOCK_RECOVERY
297*4882a593Smuzhiyun * register bits 1..0 and also set the function# in bits 5..2.
298*4882a593Smuzhiyun * c. Read the IDC_LOCK_RECOVERY register again after a delay of 200ms.
299*4882a593Smuzhiyun * Wait for the other driver to perform lock recovery if the function
300*4882a593Smuzhiyun * number in bits 5..2 has changed, otherwise move to the next step.
301*4882a593Smuzhiyun * d. Write a value of 2h to the IDC_LOCK_RECOVERY register bits 1..0
302*4882a593Smuzhiyun * leaving your function# in bits 5..2.
303*4882a593Smuzhiyun * e. Force unlock using the DRIVER_UNLOCK register and immediately clear
304*4882a593Smuzhiyun * the IDC_LOCK_RECOVERY bits 5..0 by writing 0.
305*4882a593Smuzhiyun **/
306*4882a593Smuzhiyun static int
qla8044_lock_recovery(struct scsi_qla_host * vha)307*4882a593Smuzhiyun qla8044_lock_recovery(struct scsi_qla_host *vha)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun uint32_t lock = 0, lockid;
310*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCKRECOVERY);
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun /* Check for other Recovery in progress, go wait */
315*4882a593Smuzhiyun if ((lockid & IDC_LOCK_RECOVERY_STATE_MASK) != 0)
316*4882a593Smuzhiyun return QLA_FUNCTION_FAILED;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun /* Intent to Recover */
319*4882a593Smuzhiyun qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY,
320*4882a593Smuzhiyun (ha->portnum <<
321*4882a593Smuzhiyun IDC_LOCK_RECOVERY_STATE_SHIFT_BITS) | INTENT_TO_RECOVER);
322*4882a593Smuzhiyun msleep(200);
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun /* Check Intent to Recover is advertised */
325*4882a593Smuzhiyun lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCKRECOVERY);
326*4882a593Smuzhiyun if ((lockid & IDC_LOCK_RECOVERY_OWNER_MASK) != (ha->portnum <<
327*4882a593Smuzhiyun IDC_LOCK_RECOVERY_STATE_SHIFT_BITS))
328*4882a593Smuzhiyun return QLA_FUNCTION_FAILED;
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb08B, "%s:%d: IDC Lock recovery initiated\n"
331*4882a593Smuzhiyun , __func__, ha->portnum);
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun /* Proceed to Recover */
334*4882a593Smuzhiyun qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY,
335*4882a593Smuzhiyun (ha->portnum << IDC_LOCK_RECOVERY_STATE_SHIFT_BITS) |
336*4882a593Smuzhiyun PROCEED_TO_RECOVER);
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun /* Force Unlock() */
339*4882a593Smuzhiyun qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, 0xFF);
340*4882a593Smuzhiyun qla8044_rd_reg(ha, QLA8044_DRV_UNLOCK);
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun /* Clear bits 0-5 in IDC_RECOVERY register*/
343*4882a593Smuzhiyun qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY, 0);
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun /* Get lock() */
346*4882a593Smuzhiyun lock = qla8044_rd_reg(ha, QLA8044_DRV_LOCK);
347*4882a593Smuzhiyun if (lock) {
348*4882a593Smuzhiyun lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
349*4882a593Smuzhiyun lockid = ((lockid + (1 << 8)) & ~0xFF) | ha->portnum;
350*4882a593Smuzhiyun qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, lockid);
351*4882a593Smuzhiyun return QLA_SUCCESS;
352*4882a593Smuzhiyun } else
353*4882a593Smuzhiyun return QLA_FUNCTION_FAILED;
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun int
qla8044_idc_lock(struct qla_hw_data * ha)357*4882a593Smuzhiyun qla8044_idc_lock(struct qla_hw_data *ha)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun uint32_t ret_val = QLA_SUCCESS, timeout = 0, status = 0;
360*4882a593Smuzhiyun uint32_t lock_id, lock_cnt, func_num, tmo_owner = 0, first_owner = 0;
361*4882a593Smuzhiyun scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun while (status == 0) {
364*4882a593Smuzhiyun /* acquire semaphore5 from PCI HW block */
365*4882a593Smuzhiyun status = qla8044_rd_reg(ha, QLA8044_DRV_LOCK);
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun if (status) {
368*4882a593Smuzhiyun /* Increment Counter (8-31) and update func_num (0-7) on
369*4882a593Smuzhiyun * getting a successful lock */
370*4882a593Smuzhiyun lock_id = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
371*4882a593Smuzhiyun lock_id = ((lock_id + (1 << 8)) & ~0xFF) | ha->portnum;
372*4882a593Smuzhiyun qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, lock_id);
373*4882a593Smuzhiyun break;
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun if (timeout == 0)
377*4882a593Smuzhiyun first_owner = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun if (++timeout >=
380*4882a593Smuzhiyun (QLA8044_DRV_LOCK_TIMEOUT / QLA8044_DRV_LOCK_MSLEEP)) {
381*4882a593Smuzhiyun tmo_owner = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
382*4882a593Smuzhiyun func_num = tmo_owner & 0xFF;
383*4882a593Smuzhiyun lock_cnt = tmo_owner >> 8;
384*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb114,
385*4882a593Smuzhiyun "%s: Lock by func %d failed after 2s, lock held "
386*4882a593Smuzhiyun "by func %d, lock count %d, first_owner %d\n",
387*4882a593Smuzhiyun __func__, ha->portnum, func_num, lock_cnt,
388*4882a593Smuzhiyun (first_owner & 0xFF));
389*4882a593Smuzhiyun if (first_owner != tmo_owner) {
390*4882a593Smuzhiyun /* Some other driver got lock,
391*4882a593Smuzhiyun * OR same driver got lock again (counter
392*4882a593Smuzhiyun * value changed), when we were waiting for
393*4882a593Smuzhiyun * lock. Retry for another 2 sec */
394*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb115,
395*4882a593Smuzhiyun "%s: %d: IDC lock failed\n",
396*4882a593Smuzhiyun __func__, ha->portnum);
397*4882a593Smuzhiyun timeout = 0;
398*4882a593Smuzhiyun } else {
399*4882a593Smuzhiyun /* Same driver holding lock > 2sec.
400*4882a593Smuzhiyun * Force Recovery */
401*4882a593Smuzhiyun if (qla8044_lock_recovery(vha) == QLA_SUCCESS) {
402*4882a593Smuzhiyun /* Recovered and got lock */
403*4882a593Smuzhiyun ret_val = QLA_SUCCESS;
404*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb116,
405*4882a593Smuzhiyun "%s:IDC lock Recovery by %d"
406*4882a593Smuzhiyun "successful...\n", __func__,
407*4882a593Smuzhiyun ha->portnum);
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun /* Recovery Failed, some other function
410*4882a593Smuzhiyun * has the lock, wait for 2secs
411*4882a593Smuzhiyun * and retry
412*4882a593Smuzhiyun */
413*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb08a,
414*4882a593Smuzhiyun "%s: IDC lock Recovery by %d "
415*4882a593Smuzhiyun "failed, Retrying timeout\n", __func__,
416*4882a593Smuzhiyun ha->portnum);
417*4882a593Smuzhiyun timeout = 0;
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun msleep(QLA8044_DRV_LOCK_MSLEEP);
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun return ret_val;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun void
qla8044_idc_unlock(struct qla_hw_data * ha)426*4882a593Smuzhiyun qla8044_idc_unlock(struct qla_hw_data *ha)
427*4882a593Smuzhiyun {
428*4882a593Smuzhiyun int id;
429*4882a593Smuzhiyun scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun id = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun if ((id & 0xFF) != ha->portnum) {
434*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb118,
435*4882a593Smuzhiyun "%s: IDC Unlock by %d failed, lock owner is %d!\n",
436*4882a593Smuzhiyun __func__, ha->portnum, (id & 0xFF));
437*4882a593Smuzhiyun return;
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun /* Keep lock counter value, update the ha->func_num to 0xFF */
441*4882a593Smuzhiyun qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, (id | 0xFF));
442*4882a593Smuzhiyun qla8044_rd_reg(ha, QLA8044_DRV_UNLOCK);
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun /* 8044 Flash Lock/Unlock functions */
446*4882a593Smuzhiyun static int
qla8044_flash_lock(scsi_qla_host_t * vha)447*4882a593Smuzhiyun qla8044_flash_lock(scsi_qla_host_t *vha)
448*4882a593Smuzhiyun {
449*4882a593Smuzhiyun int lock_owner;
450*4882a593Smuzhiyun int timeout = 0;
451*4882a593Smuzhiyun uint32_t lock_status = 0;
452*4882a593Smuzhiyun int ret_val = QLA_SUCCESS;
453*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun while (lock_status == 0) {
456*4882a593Smuzhiyun lock_status = qla8044_rd_reg(ha, QLA8044_FLASH_LOCK);
457*4882a593Smuzhiyun if (lock_status)
458*4882a593Smuzhiyun break;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun if (++timeout >= QLA8044_FLASH_LOCK_TIMEOUT / 20) {
461*4882a593Smuzhiyun lock_owner = qla8044_rd_reg(ha,
462*4882a593Smuzhiyun QLA8044_FLASH_LOCK_ID);
463*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb113,
464*4882a593Smuzhiyun "%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d",
465*4882a593Smuzhiyun __func__, ha->portnum, lock_owner);
466*4882a593Smuzhiyun ret_val = QLA_FUNCTION_FAILED;
467*4882a593Smuzhiyun break;
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun msleep(20);
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun qla8044_wr_reg(ha, QLA8044_FLASH_LOCK_ID, ha->portnum);
472*4882a593Smuzhiyun return ret_val;
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun static void
qla8044_flash_unlock(scsi_qla_host_t * vha)476*4882a593Smuzhiyun qla8044_flash_unlock(scsi_qla_host_t *vha)
477*4882a593Smuzhiyun {
478*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun /* Reading FLASH_UNLOCK register unlocks the Flash */
481*4882a593Smuzhiyun qla8044_wr_reg(ha, QLA8044_FLASH_LOCK_ID, 0xFF);
482*4882a593Smuzhiyun qla8044_rd_reg(ha, QLA8044_FLASH_UNLOCK);
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun static
qla8044_flash_lock_recovery(struct scsi_qla_host * vha)487*4882a593Smuzhiyun void qla8044_flash_lock_recovery(struct scsi_qla_host *vha)
488*4882a593Smuzhiyun {
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun if (qla8044_flash_lock(vha)) {
491*4882a593Smuzhiyun /* Someone else is holding the lock. */
492*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb120, "Resetting flash_lock\n");
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun /*
496*4882a593Smuzhiyun * Either we got the lock, or someone
497*4882a593Smuzhiyun * else died while holding it.
498*4882a593Smuzhiyun * In either case, unlock.
499*4882a593Smuzhiyun */
500*4882a593Smuzhiyun qla8044_flash_unlock(vha);
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun /*
504*4882a593Smuzhiyun * Address and length are byte address
505*4882a593Smuzhiyun */
506*4882a593Smuzhiyun static int
qla8044_read_flash_data(scsi_qla_host_t * vha,uint8_t * p_data,uint32_t flash_addr,int u32_word_count)507*4882a593Smuzhiyun qla8044_read_flash_data(scsi_qla_host_t *vha, uint8_t *p_data,
508*4882a593Smuzhiyun uint32_t flash_addr, int u32_word_count)
509*4882a593Smuzhiyun {
510*4882a593Smuzhiyun int i, ret_val = QLA_SUCCESS;
511*4882a593Smuzhiyun uint32_t u32_word;
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun if (qla8044_flash_lock(vha) != QLA_SUCCESS) {
514*4882a593Smuzhiyun ret_val = QLA_FUNCTION_FAILED;
515*4882a593Smuzhiyun goto exit_lock_error;
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun if (flash_addr & 0x03) {
519*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb117,
520*4882a593Smuzhiyun "%s: Illegal addr = 0x%x\n", __func__, flash_addr);
521*4882a593Smuzhiyun ret_val = QLA_FUNCTION_FAILED;
522*4882a593Smuzhiyun goto exit_flash_read;
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun for (i = 0; i < u32_word_count; i++) {
526*4882a593Smuzhiyun if (qla8044_wr_reg_indirect(vha, QLA8044_FLASH_DIRECT_WINDOW,
527*4882a593Smuzhiyun (flash_addr & 0xFFFF0000))) {
528*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb119,
529*4882a593Smuzhiyun "%s: failed to write addr 0x%x to "
530*4882a593Smuzhiyun "FLASH_DIRECT_WINDOW\n! ",
531*4882a593Smuzhiyun __func__, flash_addr);
532*4882a593Smuzhiyun ret_val = QLA_FUNCTION_FAILED;
533*4882a593Smuzhiyun goto exit_flash_read;
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun ret_val = qla8044_rd_reg_indirect(vha,
537*4882a593Smuzhiyun QLA8044_FLASH_DIRECT_DATA(flash_addr),
538*4882a593Smuzhiyun &u32_word);
539*4882a593Smuzhiyun if (ret_val != QLA_SUCCESS) {
540*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb08c,
541*4882a593Smuzhiyun "%s: failed to read addr 0x%x!\n",
542*4882a593Smuzhiyun __func__, flash_addr);
543*4882a593Smuzhiyun goto exit_flash_read;
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun *(uint32_t *)p_data = u32_word;
547*4882a593Smuzhiyun p_data = p_data + 4;
548*4882a593Smuzhiyun flash_addr = flash_addr + 4;
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun exit_flash_read:
552*4882a593Smuzhiyun qla8044_flash_unlock(vha);
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun exit_lock_error:
555*4882a593Smuzhiyun return ret_val;
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun /*
559*4882a593Smuzhiyun * Address and length are byte address
560*4882a593Smuzhiyun */
561*4882a593Smuzhiyun void *
qla8044_read_optrom_data(struct scsi_qla_host * vha,void * buf,uint32_t offset,uint32_t length)562*4882a593Smuzhiyun qla8044_read_optrom_data(struct scsi_qla_host *vha, void *buf,
563*4882a593Smuzhiyun uint32_t offset, uint32_t length)
564*4882a593Smuzhiyun {
565*4882a593Smuzhiyun scsi_block_requests(vha->host);
566*4882a593Smuzhiyun if (qla8044_read_flash_data(vha, buf, offset, length / 4)
567*4882a593Smuzhiyun != QLA_SUCCESS) {
568*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb08d,
569*4882a593Smuzhiyun "%s: Failed to read from flash\n",
570*4882a593Smuzhiyun __func__);
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun scsi_unblock_requests(vha->host);
573*4882a593Smuzhiyun return buf;
574*4882a593Smuzhiyun }
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun static inline int
qla8044_need_reset(struct scsi_qla_host * vha)577*4882a593Smuzhiyun qla8044_need_reset(struct scsi_qla_host *vha)
578*4882a593Smuzhiyun {
579*4882a593Smuzhiyun uint32_t drv_state, drv_active;
580*4882a593Smuzhiyun int rval;
581*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
584*4882a593Smuzhiyun drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun rval = drv_state & (1 << ha->portnum);
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun if (ha->flags.eeh_busy && drv_active)
589*4882a593Smuzhiyun rval = 1;
590*4882a593Smuzhiyun return rval;
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun /*
594*4882a593Smuzhiyun * qla8044_write_list - Write the value (p_entry->arg2) to address specified
595*4882a593Smuzhiyun * by p_entry->arg1 for all entries in header with delay of p_hdr->delay between
596*4882a593Smuzhiyun * entries.
597*4882a593Smuzhiyun *
598*4882a593Smuzhiyun * @vha : Pointer to adapter structure
599*4882a593Smuzhiyun * @p_hdr : reset_entry header for WRITE_LIST opcode.
600*4882a593Smuzhiyun *
601*4882a593Smuzhiyun */
602*4882a593Smuzhiyun static void
qla8044_write_list(struct scsi_qla_host * vha,struct qla8044_reset_entry_hdr * p_hdr)603*4882a593Smuzhiyun qla8044_write_list(struct scsi_qla_host *vha,
604*4882a593Smuzhiyun struct qla8044_reset_entry_hdr *p_hdr)
605*4882a593Smuzhiyun {
606*4882a593Smuzhiyun struct qla8044_entry *p_entry;
607*4882a593Smuzhiyun uint32_t i;
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun p_entry = (struct qla8044_entry *)((char *)p_hdr +
610*4882a593Smuzhiyun sizeof(struct qla8044_reset_entry_hdr));
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun for (i = 0; i < p_hdr->count; i++, p_entry++) {
613*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, p_entry->arg1, p_entry->arg2);
614*4882a593Smuzhiyun if (p_hdr->delay)
615*4882a593Smuzhiyun udelay((uint32_t)(p_hdr->delay));
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun /*
620*4882a593Smuzhiyun * qla8044_read_write_list - Read from address specified by p_entry->arg1,
621*4882a593Smuzhiyun * write value read to address specified by p_entry->arg2, for all entries in
622*4882a593Smuzhiyun * header with delay of p_hdr->delay between entries.
623*4882a593Smuzhiyun *
624*4882a593Smuzhiyun * @vha : Pointer to adapter structure
625*4882a593Smuzhiyun * @p_hdr : reset_entry header for READ_WRITE_LIST opcode.
626*4882a593Smuzhiyun *
627*4882a593Smuzhiyun */
628*4882a593Smuzhiyun static void
qla8044_read_write_list(struct scsi_qla_host * vha,struct qla8044_reset_entry_hdr * p_hdr)629*4882a593Smuzhiyun qla8044_read_write_list(struct scsi_qla_host *vha,
630*4882a593Smuzhiyun struct qla8044_reset_entry_hdr *p_hdr)
631*4882a593Smuzhiyun {
632*4882a593Smuzhiyun struct qla8044_entry *p_entry;
633*4882a593Smuzhiyun uint32_t i;
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun p_entry = (struct qla8044_entry *)((char *)p_hdr +
636*4882a593Smuzhiyun sizeof(struct qla8044_reset_entry_hdr));
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun for (i = 0; i < p_hdr->count; i++, p_entry++) {
639*4882a593Smuzhiyun qla8044_read_write_crb_reg(vha, p_entry->arg1,
640*4882a593Smuzhiyun p_entry->arg2);
641*4882a593Smuzhiyun if (p_hdr->delay)
642*4882a593Smuzhiyun udelay((uint32_t)(p_hdr->delay));
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun /*
647*4882a593Smuzhiyun * qla8044_poll_reg - Poll the given CRB addr for duration msecs till
648*4882a593Smuzhiyun * value read ANDed with test_mask is equal to test_result.
649*4882a593Smuzhiyun *
650*4882a593Smuzhiyun * @ha : Pointer to adapter structure
651*4882a593Smuzhiyun * @addr : CRB register address
652*4882a593Smuzhiyun * @duration : Poll for total of "duration" msecs
653*4882a593Smuzhiyun * @test_mask : Mask value read with "test_mask"
654*4882a593Smuzhiyun * @test_result : Compare (value&test_mask) with test_result.
655*4882a593Smuzhiyun *
656*4882a593Smuzhiyun * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
657*4882a593Smuzhiyun */
658*4882a593Smuzhiyun static int
qla8044_poll_reg(struct scsi_qla_host * vha,uint32_t addr,int duration,uint32_t test_mask,uint32_t test_result)659*4882a593Smuzhiyun qla8044_poll_reg(struct scsi_qla_host *vha, uint32_t addr,
660*4882a593Smuzhiyun int duration, uint32_t test_mask, uint32_t test_result)
661*4882a593Smuzhiyun {
662*4882a593Smuzhiyun uint32_t value = 0;
663*4882a593Smuzhiyun int timeout_error;
664*4882a593Smuzhiyun uint8_t retries;
665*4882a593Smuzhiyun int ret_val = QLA_SUCCESS;
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun ret_val = qla8044_rd_reg_indirect(vha, addr, &value);
668*4882a593Smuzhiyun if (ret_val == QLA_FUNCTION_FAILED) {
669*4882a593Smuzhiyun timeout_error = 1;
670*4882a593Smuzhiyun goto exit_poll_reg;
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun /* poll every 1/10 of the total duration */
674*4882a593Smuzhiyun retries = duration/10;
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun do {
677*4882a593Smuzhiyun if ((value & test_mask) != test_result) {
678*4882a593Smuzhiyun timeout_error = 1;
679*4882a593Smuzhiyun msleep(duration/10);
680*4882a593Smuzhiyun ret_val = qla8044_rd_reg_indirect(vha, addr, &value);
681*4882a593Smuzhiyun if (ret_val == QLA_FUNCTION_FAILED) {
682*4882a593Smuzhiyun timeout_error = 1;
683*4882a593Smuzhiyun goto exit_poll_reg;
684*4882a593Smuzhiyun }
685*4882a593Smuzhiyun } else {
686*4882a593Smuzhiyun timeout_error = 0;
687*4882a593Smuzhiyun break;
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun } while (retries--);
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun exit_poll_reg:
692*4882a593Smuzhiyun if (timeout_error) {
693*4882a593Smuzhiyun vha->reset_tmplt.seq_error++;
694*4882a593Smuzhiyun ql_log(ql_log_fatal, vha, 0xb090,
695*4882a593Smuzhiyun "%s: Poll Failed: 0x%08x 0x%08x 0x%08x\n",
696*4882a593Smuzhiyun __func__, value, test_mask, test_result);
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun return timeout_error;
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun /*
703*4882a593Smuzhiyun * qla8044_poll_list - For all entries in the POLL_LIST header, poll read CRB
704*4882a593Smuzhiyun * register specified by p_entry->arg1 and compare (value AND test_mask) with
705*4882a593Smuzhiyun * test_result to validate it. Wait for p_hdr->delay between processing entries.
706*4882a593Smuzhiyun *
707*4882a593Smuzhiyun * @ha : Pointer to adapter structure
708*4882a593Smuzhiyun * @p_hdr : reset_entry header for POLL_LIST opcode.
709*4882a593Smuzhiyun *
710*4882a593Smuzhiyun */
711*4882a593Smuzhiyun static void
qla8044_poll_list(struct scsi_qla_host * vha,struct qla8044_reset_entry_hdr * p_hdr)712*4882a593Smuzhiyun qla8044_poll_list(struct scsi_qla_host *vha,
713*4882a593Smuzhiyun struct qla8044_reset_entry_hdr *p_hdr)
714*4882a593Smuzhiyun {
715*4882a593Smuzhiyun long delay;
716*4882a593Smuzhiyun struct qla8044_entry *p_entry;
717*4882a593Smuzhiyun struct qla8044_poll *p_poll;
718*4882a593Smuzhiyun uint32_t i;
719*4882a593Smuzhiyun uint32_t value;
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun p_poll = (struct qla8044_poll *)
722*4882a593Smuzhiyun ((char *)p_hdr + sizeof(struct qla8044_reset_entry_hdr));
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun /* Entries start after 8 byte qla8044_poll, poll header contains
725*4882a593Smuzhiyun * the test_mask, test_value.
726*4882a593Smuzhiyun */
727*4882a593Smuzhiyun p_entry = (struct qla8044_entry *)((char *)p_poll +
728*4882a593Smuzhiyun sizeof(struct qla8044_poll));
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun delay = (long)p_hdr->delay;
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun if (!delay) {
733*4882a593Smuzhiyun for (i = 0; i < p_hdr->count; i++, p_entry++)
734*4882a593Smuzhiyun qla8044_poll_reg(vha, p_entry->arg1,
735*4882a593Smuzhiyun delay, p_poll->test_mask, p_poll->test_value);
736*4882a593Smuzhiyun } else {
737*4882a593Smuzhiyun for (i = 0; i < p_hdr->count; i++, p_entry++) {
738*4882a593Smuzhiyun if (delay) {
739*4882a593Smuzhiyun if (qla8044_poll_reg(vha,
740*4882a593Smuzhiyun p_entry->arg1, delay,
741*4882a593Smuzhiyun p_poll->test_mask,
742*4882a593Smuzhiyun p_poll->test_value)) {
743*4882a593Smuzhiyun /*If
744*4882a593Smuzhiyun * (data_read&test_mask != test_value)
745*4882a593Smuzhiyun * read TIMEOUT_ADDR (arg1) and
746*4882a593Smuzhiyun * ADDR (arg2) registers
747*4882a593Smuzhiyun */
748*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha,
749*4882a593Smuzhiyun p_entry->arg1, &value);
750*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha,
751*4882a593Smuzhiyun p_entry->arg2, &value);
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun }
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun /*
759*4882a593Smuzhiyun * qla8044_poll_write_list - Write dr_value, ar_value to dr_addr/ar_addr,
760*4882a593Smuzhiyun * read ar_addr, if (value& test_mask != test_mask) re-read till timeout
761*4882a593Smuzhiyun * expires.
762*4882a593Smuzhiyun *
763*4882a593Smuzhiyun * @vha : Pointer to adapter structure
764*4882a593Smuzhiyun * @p_hdr : reset entry header for POLL_WRITE_LIST opcode.
765*4882a593Smuzhiyun *
766*4882a593Smuzhiyun */
767*4882a593Smuzhiyun static void
qla8044_poll_write_list(struct scsi_qla_host * vha,struct qla8044_reset_entry_hdr * p_hdr)768*4882a593Smuzhiyun qla8044_poll_write_list(struct scsi_qla_host *vha,
769*4882a593Smuzhiyun struct qla8044_reset_entry_hdr *p_hdr)
770*4882a593Smuzhiyun {
771*4882a593Smuzhiyun long delay;
772*4882a593Smuzhiyun struct qla8044_quad_entry *p_entry;
773*4882a593Smuzhiyun struct qla8044_poll *p_poll;
774*4882a593Smuzhiyun uint32_t i;
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun p_poll = (struct qla8044_poll *)((char *)p_hdr +
777*4882a593Smuzhiyun sizeof(struct qla8044_reset_entry_hdr));
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun p_entry = (struct qla8044_quad_entry *)((char *)p_poll +
780*4882a593Smuzhiyun sizeof(struct qla8044_poll));
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun delay = (long)p_hdr->delay;
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun for (i = 0; i < p_hdr->count; i++, p_entry++) {
785*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha,
786*4882a593Smuzhiyun p_entry->dr_addr, p_entry->dr_value);
787*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha,
788*4882a593Smuzhiyun p_entry->ar_addr, p_entry->ar_value);
789*4882a593Smuzhiyun if (delay) {
790*4882a593Smuzhiyun if (qla8044_poll_reg(vha,
791*4882a593Smuzhiyun p_entry->ar_addr, delay,
792*4882a593Smuzhiyun p_poll->test_mask,
793*4882a593Smuzhiyun p_poll->test_value)) {
794*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb091,
795*4882a593Smuzhiyun "%s: Timeout Error: poll list, ",
796*4882a593Smuzhiyun __func__);
797*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb092,
798*4882a593Smuzhiyun "item_num %d, entry_num %d\n", i,
799*4882a593Smuzhiyun vha->reset_tmplt.seq_index);
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun }
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun }
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun /*
806*4882a593Smuzhiyun * qla8044_read_modify_write - Read value from p_entry->arg1, modify the
807*4882a593Smuzhiyun * value, write value to p_entry->arg2. Process entries with p_hdr->delay
808*4882a593Smuzhiyun * between entries.
809*4882a593Smuzhiyun *
810*4882a593Smuzhiyun * @vha : Pointer to adapter structure
811*4882a593Smuzhiyun * @p_hdr : header with shift/or/xor values.
812*4882a593Smuzhiyun *
813*4882a593Smuzhiyun */
814*4882a593Smuzhiyun static void
qla8044_read_modify_write(struct scsi_qla_host * vha,struct qla8044_reset_entry_hdr * p_hdr)815*4882a593Smuzhiyun qla8044_read_modify_write(struct scsi_qla_host *vha,
816*4882a593Smuzhiyun struct qla8044_reset_entry_hdr *p_hdr)
817*4882a593Smuzhiyun {
818*4882a593Smuzhiyun struct qla8044_entry *p_entry;
819*4882a593Smuzhiyun struct qla8044_rmw *p_rmw_hdr;
820*4882a593Smuzhiyun uint32_t i;
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun p_rmw_hdr = (struct qla8044_rmw *)((char *)p_hdr +
823*4882a593Smuzhiyun sizeof(struct qla8044_reset_entry_hdr));
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun p_entry = (struct qla8044_entry *)((char *)p_rmw_hdr +
826*4882a593Smuzhiyun sizeof(struct qla8044_rmw));
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun for (i = 0; i < p_hdr->count; i++, p_entry++) {
829*4882a593Smuzhiyun qla8044_rmw_crb_reg(vha, p_entry->arg1,
830*4882a593Smuzhiyun p_entry->arg2, p_rmw_hdr);
831*4882a593Smuzhiyun if (p_hdr->delay)
832*4882a593Smuzhiyun udelay((uint32_t)(p_hdr->delay));
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun /*
837*4882a593Smuzhiyun * qla8044_pause - Wait for p_hdr->delay msecs, called between processing
838*4882a593Smuzhiyun * two entries of a sequence.
839*4882a593Smuzhiyun *
840*4882a593Smuzhiyun * @vha : Pointer to adapter structure
841*4882a593Smuzhiyun * @p_hdr : Common reset entry header.
842*4882a593Smuzhiyun *
843*4882a593Smuzhiyun */
844*4882a593Smuzhiyun static
qla8044_pause(struct scsi_qla_host * vha,struct qla8044_reset_entry_hdr * p_hdr)845*4882a593Smuzhiyun void qla8044_pause(struct scsi_qla_host *vha,
846*4882a593Smuzhiyun struct qla8044_reset_entry_hdr *p_hdr)
847*4882a593Smuzhiyun {
848*4882a593Smuzhiyun if (p_hdr->delay)
849*4882a593Smuzhiyun mdelay((uint32_t)((long)p_hdr->delay));
850*4882a593Smuzhiyun }
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun /*
853*4882a593Smuzhiyun * qla8044_template_end - Indicates end of reset sequence processing.
854*4882a593Smuzhiyun *
855*4882a593Smuzhiyun * @vha : Pointer to adapter structure
856*4882a593Smuzhiyun * @p_hdr : Common reset entry header.
857*4882a593Smuzhiyun *
858*4882a593Smuzhiyun */
859*4882a593Smuzhiyun static void
qla8044_template_end(struct scsi_qla_host * vha,struct qla8044_reset_entry_hdr * p_hdr)860*4882a593Smuzhiyun qla8044_template_end(struct scsi_qla_host *vha,
861*4882a593Smuzhiyun struct qla8044_reset_entry_hdr *p_hdr)
862*4882a593Smuzhiyun {
863*4882a593Smuzhiyun vha->reset_tmplt.template_end = 1;
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun if (vha->reset_tmplt.seq_error == 0) {
866*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb093,
867*4882a593Smuzhiyun "%s: Reset sequence completed SUCCESSFULLY.\n", __func__);
868*4882a593Smuzhiyun } else {
869*4882a593Smuzhiyun ql_log(ql_log_fatal, vha, 0xb094,
870*4882a593Smuzhiyun "%s: Reset sequence completed with some timeout "
871*4882a593Smuzhiyun "errors.\n", __func__);
872*4882a593Smuzhiyun }
873*4882a593Smuzhiyun }
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun /*
876*4882a593Smuzhiyun * qla8044_poll_read_list - Write ar_value to ar_addr register, read ar_addr,
877*4882a593Smuzhiyun * if (value & test_mask != test_value) re-read till timeout value expires,
878*4882a593Smuzhiyun * read dr_addr register and assign to reset_tmplt.array.
879*4882a593Smuzhiyun *
880*4882a593Smuzhiyun * @vha : Pointer to adapter structure
881*4882a593Smuzhiyun * @p_hdr : Common reset entry header.
882*4882a593Smuzhiyun *
883*4882a593Smuzhiyun */
884*4882a593Smuzhiyun static void
qla8044_poll_read_list(struct scsi_qla_host * vha,struct qla8044_reset_entry_hdr * p_hdr)885*4882a593Smuzhiyun qla8044_poll_read_list(struct scsi_qla_host *vha,
886*4882a593Smuzhiyun struct qla8044_reset_entry_hdr *p_hdr)
887*4882a593Smuzhiyun {
888*4882a593Smuzhiyun long delay;
889*4882a593Smuzhiyun int index;
890*4882a593Smuzhiyun struct qla8044_quad_entry *p_entry;
891*4882a593Smuzhiyun struct qla8044_poll *p_poll;
892*4882a593Smuzhiyun uint32_t i;
893*4882a593Smuzhiyun uint32_t value;
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun p_poll = (struct qla8044_poll *)
896*4882a593Smuzhiyun ((char *)p_hdr + sizeof(struct qla8044_reset_entry_hdr));
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun p_entry = (struct qla8044_quad_entry *)
899*4882a593Smuzhiyun ((char *)p_poll + sizeof(struct qla8044_poll));
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun delay = (long)p_hdr->delay;
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun for (i = 0; i < p_hdr->count; i++, p_entry++) {
904*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, p_entry->ar_addr,
905*4882a593Smuzhiyun p_entry->ar_value);
906*4882a593Smuzhiyun if (delay) {
907*4882a593Smuzhiyun if (qla8044_poll_reg(vha, p_entry->ar_addr, delay,
908*4882a593Smuzhiyun p_poll->test_mask, p_poll->test_value)) {
909*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb095,
910*4882a593Smuzhiyun "%s: Timeout Error: poll "
911*4882a593Smuzhiyun "list, ", __func__);
912*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb096,
913*4882a593Smuzhiyun "Item_num %d, "
914*4882a593Smuzhiyun "entry_num %d\n", i,
915*4882a593Smuzhiyun vha->reset_tmplt.seq_index);
916*4882a593Smuzhiyun } else {
917*4882a593Smuzhiyun index = vha->reset_tmplt.array_index;
918*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha,
919*4882a593Smuzhiyun p_entry->dr_addr, &value);
920*4882a593Smuzhiyun vha->reset_tmplt.array[index++] = value;
921*4882a593Smuzhiyun if (index == QLA8044_MAX_RESET_SEQ_ENTRIES)
922*4882a593Smuzhiyun vha->reset_tmplt.array_index = 1;
923*4882a593Smuzhiyun }
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun }
926*4882a593Smuzhiyun }
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun /*
929*4882a593Smuzhiyun * qla8031_process_reset_template - Process all entries in reset template
930*4882a593Smuzhiyun * till entry with SEQ_END opcode, which indicates end of the reset template
931*4882a593Smuzhiyun * processing. Each entry has a Reset Entry header, entry opcode/command, with
932*4882a593Smuzhiyun * size of the entry, number of entries in sub-sequence and delay in microsecs
933*4882a593Smuzhiyun * or timeout in millisecs.
934*4882a593Smuzhiyun *
935*4882a593Smuzhiyun * @ha : Pointer to adapter structure
936*4882a593Smuzhiyun * @p_buff : Common reset entry header.
937*4882a593Smuzhiyun *
938*4882a593Smuzhiyun */
939*4882a593Smuzhiyun static void
qla8044_process_reset_template(struct scsi_qla_host * vha,char * p_buff)940*4882a593Smuzhiyun qla8044_process_reset_template(struct scsi_qla_host *vha,
941*4882a593Smuzhiyun char *p_buff)
942*4882a593Smuzhiyun {
943*4882a593Smuzhiyun int index, entries;
944*4882a593Smuzhiyun struct qla8044_reset_entry_hdr *p_hdr;
945*4882a593Smuzhiyun char *p_entry = p_buff;
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun vha->reset_tmplt.seq_end = 0;
948*4882a593Smuzhiyun vha->reset_tmplt.template_end = 0;
949*4882a593Smuzhiyun entries = vha->reset_tmplt.hdr->entries;
950*4882a593Smuzhiyun index = vha->reset_tmplt.seq_index;
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun for (; (!vha->reset_tmplt.seq_end) && (index < entries); index++) {
953*4882a593Smuzhiyun p_hdr = (struct qla8044_reset_entry_hdr *)p_entry;
954*4882a593Smuzhiyun switch (p_hdr->cmd) {
955*4882a593Smuzhiyun case OPCODE_NOP:
956*4882a593Smuzhiyun break;
957*4882a593Smuzhiyun case OPCODE_WRITE_LIST:
958*4882a593Smuzhiyun qla8044_write_list(vha, p_hdr);
959*4882a593Smuzhiyun break;
960*4882a593Smuzhiyun case OPCODE_READ_WRITE_LIST:
961*4882a593Smuzhiyun qla8044_read_write_list(vha, p_hdr);
962*4882a593Smuzhiyun break;
963*4882a593Smuzhiyun case OPCODE_POLL_LIST:
964*4882a593Smuzhiyun qla8044_poll_list(vha, p_hdr);
965*4882a593Smuzhiyun break;
966*4882a593Smuzhiyun case OPCODE_POLL_WRITE_LIST:
967*4882a593Smuzhiyun qla8044_poll_write_list(vha, p_hdr);
968*4882a593Smuzhiyun break;
969*4882a593Smuzhiyun case OPCODE_READ_MODIFY_WRITE:
970*4882a593Smuzhiyun qla8044_read_modify_write(vha, p_hdr);
971*4882a593Smuzhiyun break;
972*4882a593Smuzhiyun case OPCODE_SEQ_PAUSE:
973*4882a593Smuzhiyun qla8044_pause(vha, p_hdr);
974*4882a593Smuzhiyun break;
975*4882a593Smuzhiyun case OPCODE_SEQ_END:
976*4882a593Smuzhiyun vha->reset_tmplt.seq_end = 1;
977*4882a593Smuzhiyun break;
978*4882a593Smuzhiyun case OPCODE_TMPL_END:
979*4882a593Smuzhiyun qla8044_template_end(vha, p_hdr);
980*4882a593Smuzhiyun break;
981*4882a593Smuzhiyun case OPCODE_POLL_READ_LIST:
982*4882a593Smuzhiyun qla8044_poll_read_list(vha, p_hdr);
983*4882a593Smuzhiyun break;
984*4882a593Smuzhiyun default:
985*4882a593Smuzhiyun ql_log(ql_log_fatal, vha, 0xb097,
986*4882a593Smuzhiyun "%s: Unknown command ==> 0x%04x on "
987*4882a593Smuzhiyun "entry = %d\n", __func__, p_hdr->cmd, index);
988*4882a593Smuzhiyun break;
989*4882a593Smuzhiyun }
990*4882a593Smuzhiyun /*
991*4882a593Smuzhiyun *Set pointer to next entry in the sequence.
992*4882a593Smuzhiyun */
993*4882a593Smuzhiyun p_entry += p_hdr->size;
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun vha->reset_tmplt.seq_index = index;
996*4882a593Smuzhiyun }
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun static void
qla8044_process_init_seq(struct scsi_qla_host * vha)999*4882a593Smuzhiyun qla8044_process_init_seq(struct scsi_qla_host *vha)
1000*4882a593Smuzhiyun {
1001*4882a593Smuzhiyun qla8044_process_reset_template(vha,
1002*4882a593Smuzhiyun vha->reset_tmplt.init_offset);
1003*4882a593Smuzhiyun if (vha->reset_tmplt.seq_end != 1)
1004*4882a593Smuzhiyun ql_log(ql_log_fatal, vha, 0xb098,
1005*4882a593Smuzhiyun "%s: Abrupt INIT Sub-Sequence end.\n",
1006*4882a593Smuzhiyun __func__);
1007*4882a593Smuzhiyun }
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun static void
qla8044_process_stop_seq(struct scsi_qla_host * vha)1010*4882a593Smuzhiyun qla8044_process_stop_seq(struct scsi_qla_host *vha)
1011*4882a593Smuzhiyun {
1012*4882a593Smuzhiyun vha->reset_tmplt.seq_index = 0;
1013*4882a593Smuzhiyun qla8044_process_reset_template(vha, vha->reset_tmplt.stop_offset);
1014*4882a593Smuzhiyun if (vha->reset_tmplt.seq_end != 1)
1015*4882a593Smuzhiyun ql_log(ql_log_fatal, vha, 0xb099,
1016*4882a593Smuzhiyun "%s: Abrupt STOP Sub-Sequence end.\n", __func__);
1017*4882a593Smuzhiyun }
1018*4882a593Smuzhiyun
1019*4882a593Smuzhiyun static void
qla8044_process_start_seq(struct scsi_qla_host * vha)1020*4882a593Smuzhiyun qla8044_process_start_seq(struct scsi_qla_host *vha)
1021*4882a593Smuzhiyun {
1022*4882a593Smuzhiyun qla8044_process_reset_template(vha, vha->reset_tmplt.start_offset);
1023*4882a593Smuzhiyun if (vha->reset_tmplt.template_end != 1)
1024*4882a593Smuzhiyun ql_log(ql_log_fatal, vha, 0xb09a,
1025*4882a593Smuzhiyun "%s: Abrupt START Sub-Sequence end.\n",
1026*4882a593Smuzhiyun __func__);
1027*4882a593Smuzhiyun }
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun static int
qla8044_lockless_flash_read_u32(struct scsi_qla_host * vha,uint32_t flash_addr,uint8_t * p_data,int u32_word_count)1030*4882a593Smuzhiyun qla8044_lockless_flash_read_u32(struct scsi_qla_host *vha,
1031*4882a593Smuzhiyun uint32_t flash_addr, uint8_t *p_data, int u32_word_count)
1032*4882a593Smuzhiyun {
1033*4882a593Smuzhiyun uint32_t i;
1034*4882a593Smuzhiyun uint32_t u32_word;
1035*4882a593Smuzhiyun uint32_t flash_offset;
1036*4882a593Smuzhiyun uint32_t addr = flash_addr;
1037*4882a593Smuzhiyun int ret_val = QLA_SUCCESS;
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun flash_offset = addr & (QLA8044_FLASH_SECTOR_SIZE - 1);
1040*4882a593Smuzhiyun
1041*4882a593Smuzhiyun if (addr & 0x3) {
1042*4882a593Smuzhiyun ql_log(ql_log_fatal, vha, 0xb09b, "%s: Illegal addr = 0x%x\n",
1043*4882a593Smuzhiyun __func__, addr);
1044*4882a593Smuzhiyun ret_val = QLA_FUNCTION_FAILED;
1045*4882a593Smuzhiyun goto exit_lockless_read;
1046*4882a593Smuzhiyun }
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun ret_val = qla8044_wr_reg_indirect(vha,
1049*4882a593Smuzhiyun QLA8044_FLASH_DIRECT_WINDOW, (addr));
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun if (ret_val != QLA_SUCCESS) {
1052*4882a593Smuzhiyun ql_log(ql_log_fatal, vha, 0xb09c,
1053*4882a593Smuzhiyun "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n",
1054*4882a593Smuzhiyun __func__, addr);
1055*4882a593Smuzhiyun goto exit_lockless_read;
1056*4882a593Smuzhiyun }
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun /* Check if data is spread across multiple sectors */
1059*4882a593Smuzhiyun if ((flash_offset + (u32_word_count * sizeof(uint32_t))) >
1060*4882a593Smuzhiyun (QLA8044_FLASH_SECTOR_SIZE - 1)) {
1061*4882a593Smuzhiyun /* Multi sector read */
1062*4882a593Smuzhiyun for (i = 0; i < u32_word_count; i++) {
1063*4882a593Smuzhiyun ret_val = qla8044_rd_reg_indirect(vha,
1064*4882a593Smuzhiyun QLA8044_FLASH_DIRECT_DATA(addr), &u32_word);
1065*4882a593Smuzhiyun if (ret_val != QLA_SUCCESS) {
1066*4882a593Smuzhiyun ql_log(ql_log_fatal, vha, 0xb09d,
1067*4882a593Smuzhiyun "%s: failed to read addr 0x%x!\n",
1068*4882a593Smuzhiyun __func__, addr);
1069*4882a593Smuzhiyun goto exit_lockless_read;
1070*4882a593Smuzhiyun }
1071*4882a593Smuzhiyun *(uint32_t *)p_data = u32_word;
1072*4882a593Smuzhiyun p_data = p_data + 4;
1073*4882a593Smuzhiyun addr = addr + 4;
1074*4882a593Smuzhiyun flash_offset = flash_offset + 4;
1075*4882a593Smuzhiyun if (flash_offset > (QLA8044_FLASH_SECTOR_SIZE - 1)) {
1076*4882a593Smuzhiyun /* This write is needed once for each sector */
1077*4882a593Smuzhiyun ret_val = qla8044_wr_reg_indirect(vha,
1078*4882a593Smuzhiyun QLA8044_FLASH_DIRECT_WINDOW, (addr));
1079*4882a593Smuzhiyun if (ret_val != QLA_SUCCESS) {
1080*4882a593Smuzhiyun ql_log(ql_log_fatal, vha, 0xb09f,
1081*4882a593Smuzhiyun "%s: failed to write addr "
1082*4882a593Smuzhiyun "0x%x to FLASH_DIRECT_WINDOW!\n",
1083*4882a593Smuzhiyun __func__, addr);
1084*4882a593Smuzhiyun goto exit_lockless_read;
1085*4882a593Smuzhiyun }
1086*4882a593Smuzhiyun flash_offset = 0;
1087*4882a593Smuzhiyun }
1088*4882a593Smuzhiyun }
1089*4882a593Smuzhiyun } else {
1090*4882a593Smuzhiyun /* Single sector read */
1091*4882a593Smuzhiyun for (i = 0; i < u32_word_count; i++) {
1092*4882a593Smuzhiyun ret_val = qla8044_rd_reg_indirect(vha,
1093*4882a593Smuzhiyun QLA8044_FLASH_DIRECT_DATA(addr), &u32_word);
1094*4882a593Smuzhiyun if (ret_val != QLA_SUCCESS) {
1095*4882a593Smuzhiyun ql_log(ql_log_fatal, vha, 0xb0a0,
1096*4882a593Smuzhiyun "%s: failed to read addr 0x%x!\n",
1097*4882a593Smuzhiyun __func__, addr);
1098*4882a593Smuzhiyun goto exit_lockless_read;
1099*4882a593Smuzhiyun }
1100*4882a593Smuzhiyun *(uint32_t *)p_data = u32_word;
1101*4882a593Smuzhiyun p_data = p_data + 4;
1102*4882a593Smuzhiyun addr = addr + 4;
1103*4882a593Smuzhiyun }
1104*4882a593Smuzhiyun }
1105*4882a593Smuzhiyun
1106*4882a593Smuzhiyun exit_lockless_read:
1107*4882a593Smuzhiyun return ret_val;
1108*4882a593Smuzhiyun }
1109*4882a593Smuzhiyun
1110*4882a593Smuzhiyun /*
1111*4882a593Smuzhiyun * qla8044_ms_mem_write_128b - Writes data to MS/off-chip memory
1112*4882a593Smuzhiyun *
1113*4882a593Smuzhiyun * @vha : Pointer to adapter structure
1114*4882a593Smuzhiyun * addr : Flash address to write to
1115*4882a593Smuzhiyun * data : Data to be written
1116*4882a593Smuzhiyun * count : word_count to be written
1117*4882a593Smuzhiyun *
1118*4882a593Smuzhiyun * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
1119*4882a593Smuzhiyun */
1120*4882a593Smuzhiyun static int
qla8044_ms_mem_write_128b(struct scsi_qla_host * vha,uint64_t addr,uint32_t * data,uint32_t count)1121*4882a593Smuzhiyun qla8044_ms_mem_write_128b(struct scsi_qla_host *vha,
1122*4882a593Smuzhiyun uint64_t addr, uint32_t *data, uint32_t count)
1123*4882a593Smuzhiyun {
1124*4882a593Smuzhiyun int i, j, ret_val = QLA_SUCCESS;
1125*4882a593Smuzhiyun uint32_t agt_ctrl;
1126*4882a593Smuzhiyun unsigned long flags;
1127*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
1128*4882a593Smuzhiyun
1129*4882a593Smuzhiyun /* Only 128-bit aligned access */
1130*4882a593Smuzhiyun if (addr & 0xF) {
1131*4882a593Smuzhiyun ret_val = QLA_FUNCTION_FAILED;
1132*4882a593Smuzhiyun goto exit_ms_mem_write;
1133*4882a593Smuzhiyun }
1134*4882a593Smuzhiyun write_lock_irqsave(&ha->hw_lock, flags);
1135*4882a593Smuzhiyun
1136*4882a593Smuzhiyun /* Write address */
1137*4882a593Smuzhiyun ret_val = qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_HI, 0);
1138*4882a593Smuzhiyun if (ret_val == QLA_FUNCTION_FAILED) {
1139*4882a593Smuzhiyun ql_log(ql_log_fatal, vha, 0xb0a1,
1140*4882a593Smuzhiyun "%s: write to AGT_ADDR_HI failed!\n", __func__);
1141*4882a593Smuzhiyun goto exit_ms_mem_write_unlock;
1142*4882a593Smuzhiyun }
1143*4882a593Smuzhiyun
1144*4882a593Smuzhiyun for (i = 0; i < count; i++, addr += 16) {
1145*4882a593Smuzhiyun if (!((addr_in_range(addr, QLA8044_ADDR_QDR_NET,
1146*4882a593Smuzhiyun QLA8044_ADDR_QDR_NET_MAX)) ||
1147*4882a593Smuzhiyun (addr_in_range(addr, QLA8044_ADDR_DDR_NET,
1148*4882a593Smuzhiyun QLA8044_ADDR_DDR_NET_MAX)))) {
1149*4882a593Smuzhiyun ret_val = QLA_FUNCTION_FAILED;
1150*4882a593Smuzhiyun goto exit_ms_mem_write_unlock;
1151*4882a593Smuzhiyun }
1152*4882a593Smuzhiyun
1153*4882a593Smuzhiyun ret_val = qla8044_wr_reg_indirect(vha,
1154*4882a593Smuzhiyun MD_MIU_TEST_AGT_ADDR_LO, addr);
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun /* Write data */
1157*4882a593Smuzhiyun ret_val += qla8044_wr_reg_indirect(vha,
1158*4882a593Smuzhiyun MD_MIU_TEST_AGT_WRDATA_LO, *data++);
1159*4882a593Smuzhiyun ret_val += qla8044_wr_reg_indirect(vha,
1160*4882a593Smuzhiyun MD_MIU_TEST_AGT_WRDATA_HI, *data++);
1161*4882a593Smuzhiyun ret_val += qla8044_wr_reg_indirect(vha,
1162*4882a593Smuzhiyun MD_MIU_TEST_AGT_WRDATA_ULO, *data++);
1163*4882a593Smuzhiyun ret_val += qla8044_wr_reg_indirect(vha,
1164*4882a593Smuzhiyun MD_MIU_TEST_AGT_WRDATA_UHI, *data++);
1165*4882a593Smuzhiyun if (ret_val == QLA_FUNCTION_FAILED) {
1166*4882a593Smuzhiyun ql_log(ql_log_fatal, vha, 0xb0a2,
1167*4882a593Smuzhiyun "%s: write to AGT_WRDATA failed!\n",
1168*4882a593Smuzhiyun __func__);
1169*4882a593Smuzhiyun goto exit_ms_mem_write_unlock;
1170*4882a593Smuzhiyun }
1171*4882a593Smuzhiyun
1172*4882a593Smuzhiyun /* Check write status */
1173*4882a593Smuzhiyun ret_val = qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL,
1174*4882a593Smuzhiyun MIU_TA_CTL_WRITE_ENABLE);
1175*4882a593Smuzhiyun ret_val += qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL,
1176*4882a593Smuzhiyun MIU_TA_CTL_WRITE_START);
1177*4882a593Smuzhiyun if (ret_val == QLA_FUNCTION_FAILED) {
1178*4882a593Smuzhiyun ql_log(ql_log_fatal, vha, 0xb0a3,
1179*4882a593Smuzhiyun "%s: write to AGT_CTRL failed!\n", __func__);
1180*4882a593Smuzhiyun goto exit_ms_mem_write_unlock;
1181*4882a593Smuzhiyun }
1182*4882a593Smuzhiyun
1183*4882a593Smuzhiyun for (j = 0; j < MAX_CTL_CHECK; j++) {
1184*4882a593Smuzhiyun ret_val = qla8044_rd_reg_indirect(vha,
1185*4882a593Smuzhiyun MD_MIU_TEST_AGT_CTRL, &agt_ctrl);
1186*4882a593Smuzhiyun if (ret_val == QLA_FUNCTION_FAILED) {
1187*4882a593Smuzhiyun ql_log(ql_log_fatal, vha, 0xb0a4,
1188*4882a593Smuzhiyun "%s: failed to read "
1189*4882a593Smuzhiyun "MD_MIU_TEST_AGT_CTRL!\n", __func__);
1190*4882a593Smuzhiyun goto exit_ms_mem_write_unlock;
1191*4882a593Smuzhiyun }
1192*4882a593Smuzhiyun if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0)
1193*4882a593Smuzhiyun break;
1194*4882a593Smuzhiyun }
1195*4882a593Smuzhiyun
1196*4882a593Smuzhiyun /* Status check failed */
1197*4882a593Smuzhiyun if (j >= MAX_CTL_CHECK) {
1198*4882a593Smuzhiyun ql_log(ql_log_fatal, vha, 0xb0a5,
1199*4882a593Smuzhiyun "%s: MS memory write failed!\n",
1200*4882a593Smuzhiyun __func__);
1201*4882a593Smuzhiyun ret_val = QLA_FUNCTION_FAILED;
1202*4882a593Smuzhiyun goto exit_ms_mem_write_unlock;
1203*4882a593Smuzhiyun }
1204*4882a593Smuzhiyun }
1205*4882a593Smuzhiyun
1206*4882a593Smuzhiyun exit_ms_mem_write_unlock:
1207*4882a593Smuzhiyun write_unlock_irqrestore(&ha->hw_lock, flags);
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyun exit_ms_mem_write:
1210*4882a593Smuzhiyun return ret_val;
1211*4882a593Smuzhiyun }
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun static int
qla8044_copy_bootloader(struct scsi_qla_host * vha)1214*4882a593Smuzhiyun qla8044_copy_bootloader(struct scsi_qla_host *vha)
1215*4882a593Smuzhiyun {
1216*4882a593Smuzhiyun uint8_t *p_cache;
1217*4882a593Smuzhiyun uint32_t src, count, size;
1218*4882a593Smuzhiyun uint64_t dest;
1219*4882a593Smuzhiyun int ret_val = QLA_SUCCESS;
1220*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun src = QLA8044_BOOTLOADER_FLASH_ADDR;
1223*4882a593Smuzhiyun dest = qla8044_rd_reg(ha, QLA8044_BOOTLOADER_ADDR);
1224*4882a593Smuzhiyun size = qla8044_rd_reg(ha, QLA8044_BOOTLOADER_SIZE);
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun /* 128 bit alignment check */
1227*4882a593Smuzhiyun if (size & 0xF)
1228*4882a593Smuzhiyun size = (size + 16) & ~0xF;
1229*4882a593Smuzhiyun
1230*4882a593Smuzhiyun /* 16 byte count */
1231*4882a593Smuzhiyun count = size/16;
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun p_cache = vmalloc(size);
1234*4882a593Smuzhiyun if (p_cache == NULL) {
1235*4882a593Smuzhiyun ql_log(ql_log_fatal, vha, 0xb0a6,
1236*4882a593Smuzhiyun "%s: Failed to allocate memory for "
1237*4882a593Smuzhiyun "boot loader cache\n", __func__);
1238*4882a593Smuzhiyun ret_val = QLA_FUNCTION_FAILED;
1239*4882a593Smuzhiyun goto exit_copy_bootloader;
1240*4882a593Smuzhiyun }
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun ret_val = qla8044_lockless_flash_read_u32(vha, src,
1243*4882a593Smuzhiyun p_cache, size/sizeof(uint32_t));
1244*4882a593Smuzhiyun if (ret_val == QLA_FUNCTION_FAILED) {
1245*4882a593Smuzhiyun ql_log(ql_log_fatal, vha, 0xb0a7,
1246*4882a593Smuzhiyun "%s: Error reading F/W from flash!!!\n", __func__);
1247*4882a593Smuzhiyun goto exit_copy_error;
1248*4882a593Smuzhiyun }
1249*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb0a8, "%s: Read F/W from flash!\n",
1250*4882a593Smuzhiyun __func__);
1251*4882a593Smuzhiyun
1252*4882a593Smuzhiyun /* 128 bit/16 byte write to MS memory */
1253*4882a593Smuzhiyun ret_val = qla8044_ms_mem_write_128b(vha, dest,
1254*4882a593Smuzhiyun (uint32_t *)p_cache, count);
1255*4882a593Smuzhiyun if (ret_val == QLA_FUNCTION_FAILED) {
1256*4882a593Smuzhiyun ql_log(ql_log_fatal, vha, 0xb0a9,
1257*4882a593Smuzhiyun "%s: Error writing F/W to MS !!!\n", __func__);
1258*4882a593Smuzhiyun goto exit_copy_error;
1259*4882a593Smuzhiyun }
1260*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb0aa,
1261*4882a593Smuzhiyun "%s: Wrote F/W (size %d) to MS !!!\n",
1262*4882a593Smuzhiyun __func__, size);
1263*4882a593Smuzhiyun
1264*4882a593Smuzhiyun exit_copy_error:
1265*4882a593Smuzhiyun vfree(p_cache);
1266*4882a593Smuzhiyun
1267*4882a593Smuzhiyun exit_copy_bootloader:
1268*4882a593Smuzhiyun return ret_val;
1269*4882a593Smuzhiyun }
1270*4882a593Smuzhiyun
1271*4882a593Smuzhiyun static int
qla8044_restart(struct scsi_qla_host * vha)1272*4882a593Smuzhiyun qla8044_restart(struct scsi_qla_host *vha)
1273*4882a593Smuzhiyun {
1274*4882a593Smuzhiyun int ret_val = QLA_SUCCESS;
1275*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
1276*4882a593Smuzhiyun
1277*4882a593Smuzhiyun qla8044_process_stop_seq(vha);
1278*4882a593Smuzhiyun
1279*4882a593Smuzhiyun /* Collect minidump */
1280*4882a593Smuzhiyun if (ql2xmdenable)
1281*4882a593Smuzhiyun qla8044_get_minidump(vha);
1282*4882a593Smuzhiyun else
1283*4882a593Smuzhiyun ql_log(ql_log_fatal, vha, 0xb14c,
1284*4882a593Smuzhiyun "Minidump disabled.\n");
1285*4882a593Smuzhiyun
1286*4882a593Smuzhiyun qla8044_process_init_seq(vha);
1287*4882a593Smuzhiyun
1288*4882a593Smuzhiyun if (qla8044_copy_bootloader(vha)) {
1289*4882a593Smuzhiyun ql_log(ql_log_fatal, vha, 0xb0ab,
1290*4882a593Smuzhiyun "%s: Copy bootloader, firmware restart failed!\n",
1291*4882a593Smuzhiyun __func__);
1292*4882a593Smuzhiyun ret_val = QLA_FUNCTION_FAILED;
1293*4882a593Smuzhiyun goto exit_restart;
1294*4882a593Smuzhiyun }
1295*4882a593Smuzhiyun
1296*4882a593Smuzhiyun /*
1297*4882a593Smuzhiyun * Loads F/W from flash
1298*4882a593Smuzhiyun */
1299*4882a593Smuzhiyun qla8044_wr_reg(ha, QLA8044_FW_IMAGE_VALID, QLA8044_BOOT_FROM_FLASH);
1300*4882a593Smuzhiyun
1301*4882a593Smuzhiyun qla8044_process_start_seq(vha);
1302*4882a593Smuzhiyun
1303*4882a593Smuzhiyun exit_restart:
1304*4882a593Smuzhiyun return ret_val;
1305*4882a593Smuzhiyun }
1306*4882a593Smuzhiyun
1307*4882a593Smuzhiyun /*
1308*4882a593Smuzhiyun * qla8044_check_cmd_peg_status - Check peg status to see if Peg is
1309*4882a593Smuzhiyun * initialized.
1310*4882a593Smuzhiyun *
1311*4882a593Smuzhiyun * @ha : Pointer to adapter structure
1312*4882a593Smuzhiyun *
1313*4882a593Smuzhiyun * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
1314*4882a593Smuzhiyun */
1315*4882a593Smuzhiyun static int
qla8044_check_cmd_peg_status(struct scsi_qla_host * vha)1316*4882a593Smuzhiyun qla8044_check_cmd_peg_status(struct scsi_qla_host *vha)
1317*4882a593Smuzhiyun {
1318*4882a593Smuzhiyun uint32_t val, ret_val = QLA_FUNCTION_FAILED;
1319*4882a593Smuzhiyun int retries = CRB_CMDPEG_CHECK_RETRY_COUNT;
1320*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
1321*4882a593Smuzhiyun
1322*4882a593Smuzhiyun do {
1323*4882a593Smuzhiyun val = qla8044_rd_reg(ha, QLA8044_CMDPEG_STATE);
1324*4882a593Smuzhiyun if (val == PHAN_INITIALIZE_COMPLETE) {
1325*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb0ac,
1326*4882a593Smuzhiyun "%s: Command Peg initialization "
1327*4882a593Smuzhiyun "complete! state=0x%x\n", __func__, val);
1328*4882a593Smuzhiyun ret_val = QLA_SUCCESS;
1329*4882a593Smuzhiyun break;
1330*4882a593Smuzhiyun }
1331*4882a593Smuzhiyun msleep(CRB_CMDPEG_CHECK_DELAY);
1332*4882a593Smuzhiyun } while (--retries);
1333*4882a593Smuzhiyun
1334*4882a593Smuzhiyun return ret_val;
1335*4882a593Smuzhiyun }
1336*4882a593Smuzhiyun
1337*4882a593Smuzhiyun static int
qla8044_start_firmware(struct scsi_qla_host * vha)1338*4882a593Smuzhiyun qla8044_start_firmware(struct scsi_qla_host *vha)
1339*4882a593Smuzhiyun {
1340*4882a593Smuzhiyun int ret_val = QLA_SUCCESS;
1341*4882a593Smuzhiyun
1342*4882a593Smuzhiyun if (qla8044_restart(vha)) {
1343*4882a593Smuzhiyun ql_log(ql_log_fatal, vha, 0xb0ad,
1344*4882a593Smuzhiyun "%s: Restart Error!!!, Need Reset!!!\n",
1345*4882a593Smuzhiyun __func__);
1346*4882a593Smuzhiyun ret_val = QLA_FUNCTION_FAILED;
1347*4882a593Smuzhiyun goto exit_start_fw;
1348*4882a593Smuzhiyun } else
1349*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb0af,
1350*4882a593Smuzhiyun "%s: Restart done!\n", __func__);
1351*4882a593Smuzhiyun
1352*4882a593Smuzhiyun ret_val = qla8044_check_cmd_peg_status(vha);
1353*4882a593Smuzhiyun if (ret_val) {
1354*4882a593Smuzhiyun ql_log(ql_log_fatal, vha, 0xb0b0,
1355*4882a593Smuzhiyun "%s: Peg not initialized!\n", __func__);
1356*4882a593Smuzhiyun ret_val = QLA_FUNCTION_FAILED;
1357*4882a593Smuzhiyun }
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun exit_start_fw:
1360*4882a593Smuzhiyun return ret_val;
1361*4882a593Smuzhiyun }
1362*4882a593Smuzhiyun
1363*4882a593Smuzhiyun void
qla8044_clear_drv_active(struct qla_hw_data * ha)1364*4882a593Smuzhiyun qla8044_clear_drv_active(struct qla_hw_data *ha)
1365*4882a593Smuzhiyun {
1366*4882a593Smuzhiyun uint32_t drv_active;
1367*4882a593Smuzhiyun struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
1368*4882a593Smuzhiyun
1369*4882a593Smuzhiyun drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
1370*4882a593Smuzhiyun drv_active &= ~(1 << (ha->portnum));
1371*4882a593Smuzhiyun
1372*4882a593Smuzhiyun ql_log(ql_log_info, vha, 0xb0b1,
1373*4882a593Smuzhiyun "%s(%ld): drv_active: 0x%08x\n",
1374*4882a593Smuzhiyun __func__, vha->host_no, drv_active);
1375*4882a593Smuzhiyun
1376*4882a593Smuzhiyun qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active);
1377*4882a593Smuzhiyun }
1378*4882a593Smuzhiyun
1379*4882a593Smuzhiyun /*
1380*4882a593Smuzhiyun * qla8044_device_bootstrap - Initialize device, set DEV_READY, start fw
1381*4882a593Smuzhiyun * @ha: pointer to adapter structure
1382*4882a593Smuzhiyun *
1383*4882a593Smuzhiyun * Note: IDC lock must be held upon entry
1384*4882a593Smuzhiyun **/
1385*4882a593Smuzhiyun static int
qla8044_device_bootstrap(struct scsi_qla_host * vha)1386*4882a593Smuzhiyun qla8044_device_bootstrap(struct scsi_qla_host *vha)
1387*4882a593Smuzhiyun {
1388*4882a593Smuzhiyun int rval = QLA_FUNCTION_FAILED;
1389*4882a593Smuzhiyun int i;
1390*4882a593Smuzhiyun uint32_t old_count = 0, count = 0;
1391*4882a593Smuzhiyun int need_reset = 0;
1392*4882a593Smuzhiyun uint32_t idc_ctrl;
1393*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
1394*4882a593Smuzhiyun
1395*4882a593Smuzhiyun need_reset = qla8044_need_reset(vha);
1396*4882a593Smuzhiyun
1397*4882a593Smuzhiyun if (!need_reset) {
1398*4882a593Smuzhiyun old_count = qla8044_rd_direct(vha,
1399*4882a593Smuzhiyun QLA8044_PEG_ALIVE_COUNTER_INDEX);
1400*4882a593Smuzhiyun
1401*4882a593Smuzhiyun for (i = 0; i < 10; i++) {
1402*4882a593Smuzhiyun msleep(200);
1403*4882a593Smuzhiyun
1404*4882a593Smuzhiyun count = qla8044_rd_direct(vha,
1405*4882a593Smuzhiyun QLA8044_PEG_ALIVE_COUNTER_INDEX);
1406*4882a593Smuzhiyun if (count != old_count) {
1407*4882a593Smuzhiyun rval = QLA_SUCCESS;
1408*4882a593Smuzhiyun goto dev_ready;
1409*4882a593Smuzhiyun }
1410*4882a593Smuzhiyun }
1411*4882a593Smuzhiyun qla8044_flash_lock_recovery(vha);
1412*4882a593Smuzhiyun } else {
1413*4882a593Smuzhiyun /* We are trying to perform a recovery here. */
1414*4882a593Smuzhiyun if (ha->flags.isp82xx_fw_hung)
1415*4882a593Smuzhiyun qla8044_flash_lock_recovery(vha);
1416*4882a593Smuzhiyun }
1417*4882a593Smuzhiyun
1418*4882a593Smuzhiyun /* set to DEV_INITIALIZING */
1419*4882a593Smuzhiyun ql_log(ql_log_info, vha, 0xb0b2,
1420*4882a593Smuzhiyun "%s: HW State: INITIALIZING\n", __func__);
1421*4882a593Smuzhiyun qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
1422*4882a593Smuzhiyun QLA8XXX_DEV_INITIALIZING);
1423*4882a593Smuzhiyun
1424*4882a593Smuzhiyun qla8044_idc_unlock(ha);
1425*4882a593Smuzhiyun rval = qla8044_start_firmware(vha);
1426*4882a593Smuzhiyun qla8044_idc_lock(ha);
1427*4882a593Smuzhiyun
1428*4882a593Smuzhiyun if (rval != QLA_SUCCESS) {
1429*4882a593Smuzhiyun ql_log(ql_log_info, vha, 0xb0b3,
1430*4882a593Smuzhiyun "%s: HW State: FAILED\n", __func__);
1431*4882a593Smuzhiyun qla8044_clear_drv_active(ha);
1432*4882a593Smuzhiyun qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
1433*4882a593Smuzhiyun QLA8XXX_DEV_FAILED);
1434*4882a593Smuzhiyun return rval;
1435*4882a593Smuzhiyun }
1436*4882a593Smuzhiyun
1437*4882a593Smuzhiyun /* For ISP8044, If IDC_CTRL GRACEFUL_RESET_BIT1 is set , reset it after
1438*4882a593Smuzhiyun * device goes to INIT state. */
1439*4882a593Smuzhiyun idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
1440*4882a593Smuzhiyun if (idc_ctrl & GRACEFUL_RESET_BIT1) {
1441*4882a593Smuzhiyun qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
1442*4882a593Smuzhiyun (idc_ctrl & ~GRACEFUL_RESET_BIT1));
1443*4882a593Smuzhiyun ha->fw_dumped = false;
1444*4882a593Smuzhiyun }
1445*4882a593Smuzhiyun
1446*4882a593Smuzhiyun dev_ready:
1447*4882a593Smuzhiyun ql_log(ql_log_info, vha, 0xb0b4,
1448*4882a593Smuzhiyun "%s: HW State: READY\n", __func__);
1449*4882a593Smuzhiyun qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, QLA8XXX_DEV_READY);
1450*4882a593Smuzhiyun
1451*4882a593Smuzhiyun return rval;
1452*4882a593Smuzhiyun }
1453*4882a593Smuzhiyun
1454*4882a593Smuzhiyun /*-------------------------Reset Sequence Functions-----------------------*/
1455*4882a593Smuzhiyun static void
qla8044_dump_reset_seq_hdr(struct scsi_qla_host * vha)1456*4882a593Smuzhiyun qla8044_dump_reset_seq_hdr(struct scsi_qla_host *vha)
1457*4882a593Smuzhiyun {
1458*4882a593Smuzhiyun u8 *phdr;
1459*4882a593Smuzhiyun
1460*4882a593Smuzhiyun if (!vha->reset_tmplt.buff) {
1461*4882a593Smuzhiyun ql_log(ql_log_fatal, vha, 0xb0b5,
1462*4882a593Smuzhiyun "%s: Error Invalid reset_seq_template\n", __func__);
1463*4882a593Smuzhiyun return;
1464*4882a593Smuzhiyun }
1465*4882a593Smuzhiyun
1466*4882a593Smuzhiyun phdr = vha->reset_tmplt.buff;
1467*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb0b6,
1468*4882a593Smuzhiyun "Reset Template :\n\t0x%X 0x%X 0x%X 0x%X"
1469*4882a593Smuzhiyun "0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n"
1470*4882a593Smuzhiyun "\t0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n\n",
1471*4882a593Smuzhiyun *phdr, *(phdr+1), *(phdr+2), *(phdr+3), *(phdr+4),
1472*4882a593Smuzhiyun *(phdr+5), *(phdr+6), *(phdr+7), *(phdr + 8),
1473*4882a593Smuzhiyun *(phdr+9), *(phdr+10), *(phdr+11), *(phdr+12),
1474*4882a593Smuzhiyun *(phdr+13), *(phdr+14), *(phdr+15));
1475*4882a593Smuzhiyun }
1476*4882a593Smuzhiyun
1477*4882a593Smuzhiyun /*
1478*4882a593Smuzhiyun * qla8044_reset_seq_checksum_test - Validate Reset Sequence template.
1479*4882a593Smuzhiyun *
1480*4882a593Smuzhiyun * @ha : Pointer to adapter structure
1481*4882a593Smuzhiyun *
1482*4882a593Smuzhiyun * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
1483*4882a593Smuzhiyun */
1484*4882a593Smuzhiyun static int
qla8044_reset_seq_checksum_test(struct scsi_qla_host * vha)1485*4882a593Smuzhiyun qla8044_reset_seq_checksum_test(struct scsi_qla_host *vha)
1486*4882a593Smuzhiyun {
1487*4882a593Smuzhiyun uint32_t sum = 0;
1488*4882a593Smuzhiyun uint16_t *buff = (uint16_t *)vha->reset_tmplt.buff;
1489*4882a593Smuzhiyun int u16_count = vha->reset_tmplt.hdr->size / sizeof(uint16_t);
1490*4882a593Smuzhiyun
1491*4882a593Smuzhiyun while (u16_count-- > 0)
1492*4882a593Smuzhiyun sum += *buff++;
1493*4882a593Smuzhiyun
1494*4882a593Smuzhiyun while (sum >> 16)
1495*4882a593Smuzhiyun sum = (sum & 0xFFFF) + (sum >> 16);
1496*4882a593Smuzhiyun
1497*4882a593Smuzhiyun /* checksum of 0 indicates a valid template */
1498*4882a593Smuzhiyun if (~sum) {
1499*4882a593Smuzhiyun return QLA_SUCCESS;
1500*4882a593Smuzhiyun } else {
1501*4882a593Smuzhiyun ql_log(ql_log_fatal, vha, 0xb0b7,
1502*4882a593Smuzhiyun "%s: Reset seq checksum failed\n", __func__);
1503*4882a593Smuzhiyun return QLA_FUNCTION_FAILED;
1504*4882a593Smuzhiyun }
1505*4882a593Smuzhiyun }
1506*4882a593Smuzhiyun
1507*4882a593Smuzhiyun /*
1508*4882a593Smuzhiyun * qla8044_read_reset_template - Read Reset Template from Flash, validate
1509*4882a593Smuzhiyun * the template and store offsets of stop/start/init offsets in ha->reset_tmplt.
1510*4882a593Smuzhiyun *
1511*4882a593Smuzhiyun * @ha : Pointer to adapter structure
1512*4882a593Smuzhiyun */
1513*4882a593Smuzhiyun void
qla8044_read_reset_template(struct scsi_qla_host * vha)1514*4882a593Smuzhiyun qla8044_read_reset_template(struct scsi_qla_host *vha)
1515*4882a593Smuzhiyun {
1516*4882a593Smuzhiyun uint8_t *p_buff;
1517*4882a593Smuzhiyun uint32_t addr, tmplt_hdr_def_size, tmplt_hdr_size;
1518*4882a593Smuzhiyun
1519*4882a593Smuzhiyun vha->reset_tmplt.seq_error = 0;
1520*4882a593Smuzhiyun vha->reset_tmplt.buff = vmalloc(QLA8044_RESTART_TEMPLATE_SIZE);
1521*4882a593Smuzhiyun if (vha->reset_tmplt.buff == NULL) {
1522*4882a593Smuzhiyun ql_log(ql_log_fatal, vha, 0xb0b8,
1523*4882a593Smuzhiyun "%s: Failed to allocate reset template resources\n",
1524*4882a593Smuzhiyun __func__);
1525*4882a593Smuzhiyun goto exit_read_reset_template;
1526*4882a593Smuzhiyun }
1527*4882a593Smuzhiyun
1528*4882a593Smuzhiyun p_buff = vha->reset_tmplt.buff;
1529*4882a593Smuzhiyun addr = QLA8044_RESET_TEMPLATE_ADDR;
1530*4882a593Smuzhiyun
1531*4882a593Smuzhiyun tmplt_hdr_def_size =
1532*4882a593Smuzhiyun sizeof(struct qla8044_reset_template_hdr) / sizeof(uint32_t);
1533*4882a593Smuzhiyun
1534*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb0b9,
1535*4882a593Smuzhiyun "%s: Read template hdr size %d from Flash\n",
1536*4882a593Smuzhiyun __func__, tmplt_hdr_def_size);
1537*4882a593Smuzhiyun
1538*4882a593Smuzhiyun /* Copy template header from flash */
1539*4882a593Smuzhiyun if (qla8044_read_flash_data(vha, p_buff, addr, tmplt_hdr_def_size)) {
1540*4882a593Smuzhiyun ql_log(ql_log_fatal, vha, 0xb0ba,
1541*4882a593Smuzhiyun "%s: Failed to read reset template\n", __func__);
1542*4882a593Smuzhiyun goto exit_read_template_error;
1543*4882a593Smuzhiyun }
1544*4882a593Smuzhiyun
1545*4882a593Smuzhiyun vha->reset_tmplt.hdr =
1546*4882a593Smuzhiyun (struct qla8044_reset_template_hdr *) vha->reset_tmplt.buff;
1547*4882a593Smuzhiyun
1548*4882a593Smuzhiyun /* Validate the template header size and signature */
1549*4882a593Smuzhiyun tmplt_hdr_size = vha->reset_tmplt.hdr->hdr_size/sizeof(uint32_t);
1550*4882a593Smuzhiyun if ((tmplt_hdr_size != tmplt_hdr_def_size) ||
1551*4882a593Smuzhiyun (vha->reset_tmplt.hdr->signature != RESET_TMPLT_HDR_SIGNATURE)) {
1552*4882a593Smuzhiyun ql_log(ql_log_fatal, vha, 0xb0bb,
1553*4882a593Smuzhiyun "%s: Template Header size invalid %d "
1554*4882a593Smuzhiyun "tmplt_hdr_def_size %d!!!\n", __func__,
1555*4882a593Smuzhiyun tmplt_hdr_size, tmplt_hdr_def_size);
1556*4882a593Smuzhiyun goto exit_read_template_error;
1557*4882a593Smuzhiyun }
1558*4882a593Smuzhiyun
1559*4882a593Smuzhiyun addr = QLA8044_RESET_TEMPLATE_ADDR + vha->reset_tmplt.hdr->hdr_size;
1560*4882a593Smuzhiyun p_buff = vha->reset_tmplt.buff + vha->reset_tmplt.hdr->hdr_size;
1561*4882a593Smuzhiyun tmplt_hdr_def_size = (vha->reset_tmplt.hdr->size -
1562*4882a593Smuzhiyun vha->reset_tmplt.hdr->hdr_size)/sizeof(uint32_t);
1563*4882a593Smuzhiyun
1564*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb0bc,
1565*4882a593Smuzhiyun "%s: Read rest of the template size %d\n",
1566*4882a593Smuzhiyun __func__, vha->reset_tmplt.hdr->size);
1567*4882a593Smuzhiyun
1568*4882a593Smuzhiyun /* Copy rest of the template */
1569*4882a593Smuzhiyun if (qla8044_read_flash_data(vha, p_buff, addr, tmplt_hdr_def_size)) {
1570*4882a593Smuzhiyun ql_log(ql_log_fatal, vha, 0xb0bd,
1571*4882a593Smuzhiyun "%s: Failed to read reset template\n", __func__);
1572*4882a593Smuzhiyun goto exit_read_template_error;
1573*4882a593Smuzhiyun }
1574*4882a593Smuzhiyun
1575*4882a593Smuzhiyun /* Integrity check */
1576*4882a593Smuzhiyun if (qla8044_reset_seq_checksum_test(vha)) {
1577*4882a593Smuzhiyun ql_log(ql_log_fatal, vha, 0xb0be,
1578*4882a593Smuzhiyun "%s: Reset Seq checksum failed!\n", __func__);
1579*4882a593Smuzhiyun goto exit_read_template_error;
1580*4882a593Smuzhiyun }
1581*4882a593Smuzhiyun
1582*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb0bf,
1583*4882a593Smuzhiyun "%s: Reset Seq checksum passed! Get stop, "
1584*4882a593Smuzhiyun "start and init seq offsets\n", __func__);
1585*4882a593Smuzhiyun
1586*4882a593Smuzhiyun /* Get STOP, START, INIT sequence offsets */
1587*4882a593Smuzhiyun vha->reset_tmplt.init_offset = vha->reset_tmplt.buff +
1588*4882a593Smuzhiyun vha->reset_tmplt.hdr->init_seq_offset;
1589*4882a593Smuzhiyun
1590*4882a593Smuzhiyun vha->reset_tmplt.start_offset = vha->reset_tmplt.buff +
1591*4882a593Smuzhiyun vha->reset_tmplt.hdr->start_seq_offset;
1592*4882a593Smuzhiyun
1593*4882a593Smuzhiyun vha->reset_tmplt.stop_offset = vha->reset_tmplt.buff +
1594*4882a593Smuzhiyun vha->reset_tmplt.hdr->hdr_size;
1595*4882a593Smuzhiyun
1596*4882a593Smuzhiyun qla8044_dump_reset_seq_hdr(vha);
1597*4882a593Smuzhiyun
1598*4882a593Smuzhiyun goto exit_read_reset_template;
1599*4882a593Smuzhiyun
1600*4882a593Smuzhiyun exit_read_template_error:
1601*4882a593Smuzhiyun vfree(vha->reset_tmplt.buff);
1602*4882a593Smuzhiyun
1603*4882a593Smuzhiyun exit_read_reset_template:
1604*4882a593Smuzhiyun return;
1605*4882a593Smuzhiyun }
1606*4882a593Smuzhiyun
1607*4882a593Smuzhiyun void
qla8044_set_idc_dontreset(struct scsi_qla_host * vha)1608*4882a593Smuzhiyun qla8044_set_idc_dontreset(struct scsi_qla_host *vha)
1609*4882a593Smuzhiyun {
1610*4882a593Smuzhiyun uint32_t idc_ctrl;
1611*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
1612*4882a593Smuzhiyun
1613*4882a593Smuzhiyun idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
1614*4882a593Smuzhiyun idc_ctrl |= DONTRESET_BIT0;
1615*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb0c0,
1616*4882a593Smuzhiyun "%s: idc_ctrl = %d\n", __func__, idc_ctrl);
1617*4882a593Smuzhiyun qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, idc_ctrl);
1618*4882a593Smuzhiyun }
1619*4882a593Smuzhiyun
1620*4882a593Smuzhiyun static inline void
qla8044_set_rst_ready(struct scsi_qla_host * vha)1621*4882a593Smuzhiyun qla8044_set_rst_ready(struct scsi_qla_host *vha)
1622*4882a593Smuzhiyun {
1623*4882a593Smuzhiyun uint32_t drv_state;
1624*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
1625*4882a593Smuzhiyun
1626*4882a593Smuzhiyun drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
1627*4882a593Smuzhiyun
1628*4882a593Smuzhiyun /* For ISP8044, drv_active register has 1 bit per function,
1629*4882a593Smuzhiyun * shift 1 by func_num to set a bit for the function.*/
1630*4882a593Smuzhiyun drv_state |= (1 << ha->portnum);
1631*4882a593Smuzhiyun
1632*4882a593Smuzhiyun ql_log(ql_log_info, vha, 0xb0c1,
1633*4882a593Smuzhiyun "%s(%ld): drv_state: 0x%08x\n",
1634*4882a593Smuzhiyun __func__, vha->host_no, drv_state);
1635*4882a593Smuzhiyun qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, drv_state);
1636*4882a593Smuzhiyun }
1637*4882a593Smuzhiyun
1638*4882a593Smuzhiyun /**
1639*4882a593Smuzhiyun * qla8044_need_reset_handler - Code to start reset sequence
1640*4882a593Smuzhiyun * @vha: pointer to adapter structure
1641*4882a593Smuzhiyun *
1642*4882a593Smuzhiyun * Note: IDC lock must be held upon entry
1643*4882a593Smuzhiyun */
1644*4882a593Smuzhiyun static void
qla8044_need_reset_handler(struct scsi_qla_host * vha)1645*4882a593Smuzhiyun qla8044_need_reset_handler(struct scsi_qla_host *vha)
1646*4882a593Smuzhiyun {
1647*4882a593Smuzhiyun uint32_t dev_state = 0, drv_state, drv_active;
1648*4882a593Smuzhiyun unsigned long reset_timeout;
1649*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
1650*4882a593Smuzhiyun
1651*4882a593Smuzhiyun ql_log(ql_log_fatal, vha, 0xb0c2,
1652*4882a593Smuzhiyun "%s: Performing ISP error recovery\n", __func__);
1653*4882a593Smuzhiyun
1654*4882a593Smuzhiyun if (vha->flags.online) {
1655*4882a593Smuzhiyun qla8044_idc_unlock(ha);
1656*4882a593Smuzhiyun qla2x00_abort_isp_cleanup(vha);
1657*4882a593Smuzhiyun ha->isp_ops->get_flash_version(vha, vha->req->ring);
1658*4882a593Smuzhiyun ha->isp_ops->nvram_config(vha);
1659*4882a593Smuzhiyun qla8044_idc_lock(ha);
1660*4882a593Smuzhiyun }
1661*4882a593Smuzhiyun
1662*4882a593Smuzhiyun dev_state = qla8044_rd_direct(vha,
1663*4882a593Smuzhiyun QLA8044_CRB_DEV_STATE_INDEX);
1664*4882a593Smuzhiyun drv_state = qla8044_rd_direct(vha,
1665*4882a593Smuzhiyun QLA8044_CRB_DRV_STATE_INDEX);
1666*4882a593Smuzhiyun drv_active = qla8044_rd_direct(vha,
1667*4882a593Smuzhiyun QLA8044_CRB_DRV_ACTIVE_INDEX);
1668*4882a593Smuzhiyun
1669*4882a593Smuzhiyun ql_log(ql_log_info, vha, 0xb0c5,
1670*4882a593Smuzhiyun "%s(%ld): drv_state = 0x%x, drv_active = 0x%x dev_state = 0x%x\n",
1671*4882a593Smuzhiyun __func__, vha->host_no, drv_state, drv_active, dev_state);
1672*4882a593Smuzhiyun
1673*4882a593Smuzhiyun qla8044_set_rst_ready(vha);
1674*4882a593Smuzhiyun
1675*4882a593Smuzhiyun /* wait for 10 seconds for reset ack from all functions */
1676*4882a593Smuzhiyun reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
1677*4882a593Smuzhiyun
1678*4882a593Smuzhiyun do {
1679*4882a593Smuzhiyun if (time_after_eq(jiffies, reset_timeout)) {
1680*4882a593Smuzhiyun ql_log(ql_log_info, vha, 0xb0c4,
1681*4882a593Smuzhiyun "%s: Function %d: Reset Ack Timeout!, drv_state: 0x%08x, drv_active: 0x%08x\n",
1682*4882a593Smuzhiyun __func__, ha->portnum, drv_state, drv_active);
1683*4882a593Smuzhiyun break;
1684*4882a593Smuzhiyun }
1685*4882a593Smuzhiyun
1686*4882a593Smuzhiyun qla8044_idc_unlock(ha);
1687*4882a593Smuzhiyun msleep(1000);
1688*4882a593Smuzhiyun qla8044_idc_lock(ha);
1689*4882a593Smuzhiyun
1690*4882a593Smuzhiyun dev_state = qla8044_rd_direct(vha,
1691*4882a593Smuzhiyun QLA8044_CRB_DEV_STATE_INDEX);
1692*4882a593Smuzhiyun drv_state = qla8044_rd_direct(vha,
1693*4882a593Smuzhiyun QLA8044_CRB_DRV_STATE_INDEX);
1694*4882a593Smuzhiyun drv_active = qla8044_rd_direct(vha,
1695*4882a593Smuzhiyun QLA8044_CRB_DRV_ACTIVE_INDEX);
1696*4882a593Smuzhiyun } while (((drv_state & drv_active) != drv_active) &&
1697*4882a593Smuzhiyun (dev_state == QLA8XXX_DEV_NEED_RESET));
1698*4882a593Smuzhiyun
1699*4882a593Smuzhiyun /* Remove IDC participation of functions not acknowledging */
1700*4882a593Smuzhiyun if (drv_state != drv_active) {
1701*4882a593Smuzhiyun ql_log(ql_log_info, vha, 0xb0c7,
1702*4882a593Smuzhiyun "%s(%ld): Function %d turning off drv_active of non-acking function 0x%x\n",
1703*4882a593Smuzhiyun __func__, vha->host_no, ha->portnum,
1704*4882a593Smuzhiyun (drv_active ^ drv_state));
1705*4882a593Smuzhiyun drv_active = drv_active & drv_state;
1706*4882a593Smuzhiyun qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX,
1707*4882a593Smuzhiyun drv_active);
1708*4882a593Smuzhiyun } else {
1709*4882a593Smuzhiyun /*
1710*4882a593Smuzhiyun * Reset owner should execute reset recovery,
1711*4882a593Smuzhiyun * if all functions acknowledged
1712*4882a593Smuzhiyun */
1713*4882a593Smuzhiyun if ((ha->flags.nic_core_reset_owner) &&
1714*4882a593Smuzhiyun (dev_state == QLA8XXX_DEV_NEED_RESET)) {
1715*4882a593Smuzhiyun ha->flags.nic_core_reset_owner = 0;
1716*4882a593Smuzhiyun qla8044_device_bootstrap(vha);
1717*4882a593Smuzhiyun return;
1718*4882a593Smuzhiyun }
1719*4882a593Smuzhiyun }
1720*4882a593Smuzhiyun
1721*4882a593Smuzhiyun /* Exit if non active function */
1722*4882a593Smuzhiyun if (!(drv_active & (1 << ha->portnum))) {
1723*4882a593Smuzhiyun ha->flags.nic_core_reset_owner = 0;
1724*4882a593Smuzhiyun return;
1725*4882a593Smuzhiyun }
1726*4882a593Smuzhiyun
1727*4882a593Smuzhiyun /*
1728*4882a593Smuzhiyun * Execute Reset Recovery if Reset Owner or Function 7
1729*4882a593Smuzhiyun * is the only active function
1730*4882a593Smuzhiyun */
1731*4882a593Smuzhiyun if (ha->flags.nic_core_reset_owner ||
1732*4882a593Smuzhiyun ((drv_state & drv_active) == QLA8044_FUN7_ACTIVE_INDEX)) {
1733*4882a593Smuzhiyun ha->flags.nic_core_reset_owner = 0;
1734*4882a593Smuzhiyun qla8044_device_bootstrap(vha);
1735*4882a593Smuzhiyun }
1736*4882a593Smuzhiyun }
1737*4882a593Smuzhiyun
1738*4882a593Smuzhiyun static void
qla8044_set_drv_active(struct scsi_qla_host * vha)1739*4882a593Smuzhiyun qla8044_set_drv_active(struct scsi_qla_host *vha)
1740*4882a593Smuzhiyun {
1741*4882a593Smuzhiyun uint32_t drv_active;
1742*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
1743*4882a593Smuzhiyun
1744*4882a593Smuzhiyun drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
1745*4882a593Smuzhiyun
1746*4882a593Smuzhiyun /* For ISP8044, drv_active register has 1 bit per function,
1747*4882a593Smuzhiyun * shift 1 by func_num to set a bit for the function.*/
1748*4882a593Smuzhiyun drv_active |= (1 << ha->portnum);
1749*4882a593Smuzhiyun
1750*4882a593Smuzhiyun ql_log(ql_log_info, vha, 0xb0c8,
1751*4882a593Smuzhiyun "%s(%ld): drv_active: 0x%08x\n",
1752*4882a593Smuzhiyun __func__, vha->host_no, drv_active);
1753*4882a593Smuzhiyun qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active);
1754*4882a593Smuzhiyun }
1755*4882a593Smuzhiyun
1756*4882a593Smuzhiyun static int
qla8044_check_drv_active(struct scsi_qla_host * vha)1757*4882a593Smuzhiyun qla8044_check_drv_active(struct scsi_qla_host *vha)
1758*4882a593Smuzhiyun {
1759*4882a593Smuzhiyun uint32_t drv_active;
1760*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
1761*4882a593Smuzhiyun
1762*4882a593Smuzhiyun drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
1763*4882a593Smuzhiyun if (drv_active & (1 << ha->portnum))
1764*4882a593Smuzhiyun return QLA_SUCCESS;
1765*4882a593Smuzhiyun else
1766*4882a593Smuzhiyun return QLA_TEST_FAILED;
1767*4882a593Smuzhiyun }
1768*4882a593Smuzhiyun
1769*4882a593Smuzhiyun static void
qla8044_clear_idc_dontreset(struct scsi_qla_host * vha)1770*4882a593Smuzhiyun qla8044_clear_idc_dontreset(struct scsi_qla_host *vha)
1771*4882a593Smuzhiyun {
1772*4882a593Smuzhiyun uint32_t idc_ctrl;
1773*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
1774*4882a593Smuzhiyun
1775*4882a593Smuzhiyun idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
1776*4882a593Smuzhiyun idc_ctrl &= ~DONTRESET_BIT0;
1777*4882a593Smuzhiyun ql_log(ql_log_info, vha, 0xb0c9,
1778*4882a593Smuzhiyun "%s: idc_ctrl = %d\n", __func__,
1779*4882a593Smuzhiyun idc_ctrl);
1780*4882a593Smuzhiyun qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, idc_ctrl);
1781*4882a593Smuzhiyun }
1782*4882a593Smuzhiyun
1783*4882a593Smuzhiyun static int
qla8044_set_idc_ver(struct scsi_qla_host * vha)1784*4882a593Smuzhiyun qla8044_set_idc_ver(struct scsi_qla_host *vha)
1785*4882a593Smuzhiyun {
1786*4882a593Smuzhiyun int idc_ver;
1787*4882a593Smuzhiyun uint32_t drv_active;
1788*4882a593Smuzhiyun int rval = QLA_SUCCESS;
1789*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
1790*4882a593Smuzhiyun
1791*4882a593Smuzhiyun drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
1792*4882a593Smuzhiyun if (drv_active == (1 << ha->portnum)) {
1793*4882a593Smuzhiyun idc_ver = qla8044_rd_direct(vha,
1794*4882a593Smuzhiyun QLA8044_CRB_DRV_IDC_VERSION_INDEX);
1795*4882a593Smuzhiyun idc_ver &= (~0xFF);
1796*4882a593Smuzhiyun idc_ver |= QLA8044_IDC_VER_MAJ_VALUE;
1797*4882a593Smuzhiyun qla8044_wr_direct(vha, QLA8044_CRB_DRV_IDC_VERSION_INDEX,
1798*4882a593Smuzhiyun idc_ver);
1799*4882a593Smuzhiyun ql_log(ql_log_info, vha, 0xb0ca,
1800*4882a593Smuzhiyun "%s: IDC version updated to %d\n",
1801*4882a593Smuzhiyun __func__, idc_ver);
1802*4882a593Smuzhiyun } else {
1803*4882a593Smuzhiyun idc_ver = qla8044_rd_direct(vha,
1804*4882a593Smuzhiyun QLA8044_CRB_DRV_IDC_VERSION_INDEX);
1805*4882a593Smuzhiyun idc_ver &= 0xFF;
1806*4882a593Smuzhiyun if (QLA8044_IDC_VER_MAJ_VALUE != idc_ver) {
1807*4882a593Smuzhiyun ql_log(ql_log_info, vha, 0xb0cb,
1808*4882a593Smuzhiyun "%s: qla4xxx driver IDC version %d "
1809*4882a593Smuzhiyun "is not compatible with IDC version %d "
1810*4882a593Smuzhiyun "of other drivers!\n",
1811*4882a593Smuzhiyun __func__, QLA8044_IDC_VER_MAJ_VALUE,
1812*4882a593Smuzhiyun idc_ver);
1813*4882a593Smuzhiyun rval = QLA_FUNCTION_FAILED;
1814*4882a593Smuzhiyun goto exit_set_idc_ver;
1815*4882a593Smuzhiyun }
1816*4882a593Smuzhiyun }
1817*4882a593Smuzhiyun
1818*4882a593Smuzhiyun /* Update IDC_MINOR_VERSION */
1819*4882a593Smuzhiyun idc_ver = qla8044_rd_reg(ha, QLA8044_CRB_IDC_VER_MINOR);
1820*4882a593Smuzhiyun idc_ver &= ~(0x03 << (ha->portnum * 2));
1821*4882a593Smuzhiyun idc_ver |= (QLA8044_IDC_VER_MIN_VALUE << (ha->portnum * 2));
1822*4882a593Smuzhiyun qla8044_wr_reg(ha, QLA8044_CRB_IDC_VER_MINOR, idc_ver);
1823*4882a593Smuzhiyun
1824*4882a593Smuzhiyun exit_set_idc_ver:
1825*4882a593Smuzhiyun return rval;
1826*4882a593Smuzhiyun }
1827*4882a593Smuzhiyun
1828*4882a593Smuzhiyun static int
qla8044_update_idc_reg(struct scsi_qla_host * vha)1829*4882a593Smuzhiyun qla8044_update_idc_reg(struct scsi_qla_host *vha)
1830*4882a593Smuzhiyun {
1831*4882a593Smuzhiyun uint32_t drv_active;
1832*4882a593Smuzhiyun int rval = QLA_SUCCESS;
1833*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
1834*4882a593Smuzhiyun
1835*4882a593Smuzhiyun if (vha->flags.init_done)
1836*4882a593Smuzhiyun goto exit_update_idc_reg;
1837*4882a593Smuzhiyun
1838*4882a593Smuzhiyun qla8044_idc_lock(ha);
1839*4882a593Smuzhiyun qla8044_set_drv_active(vha);
1840*4882a593Smuzhiyun
1841*4882a593Smuzhiyun drv_active = qla8044_rd_direct(vha,
1842*4882a593Smuzhiyun QLA8044_CRB_DRV_ACTIVE_INDEX);
1843*4882a593Smuzhiyun
1844*4882a593Smuzhiyun /* If we are the first driver to load and
1845*4882a593Smuzhiyun * ql2xdontresethba is not set, clear IDC_CTRL BIT0. */
1846*4882a593Smuzhiyun if ((drv_active == (1 << ha->portnum)) && !ql2xdontresethba)
1847*4882a593Smuzhiyun qla8044_clear_idc_dontreset(vha);
1848*4882a593Smuzhiyun
1849*4882a593Smuzhiyun rval = qla8044_set_idc_ver(vha);
1850*4882a593Smuzhiyun if (rval == QLA_FUNCTION_FAILED)
1851*4882a593Smuzhiyun qla8044_clear_drv_active(ha);
1852*4882a593Smuzhiyun qla8044_idc_unlock(ha);
1853*4882a593Smuzhiyun
1854*4882a593Smuzhiyun exit_update_idc_reg:
1855*4882a593Smuzhiyun return rval;
1856*4882a593Smuzhiyun }
1857*4882a593Smuzhiyun
1858*4882a593Smuzhiyun /**
1859*4882a593Smuzhiyun * qla8044_need_qsnt_handler - Code to start qsnt
1860*4882a593Smuzhiyun * @vha: pointer to adapter structure
1861*4882a593Smuzhiyun */
1862*4882a593Smuzhiyun static void
qla8044_need_qsnt_handler(struct scsi_qla_host * vha)1863*4882a593Smuzhiyun qla8044_need_qsnt_handler(struct scsi_qla_host *vha)
1864*4882a593Smuzhiyun {
1865*4882a593Smuzhiyun unsigned long qsnt_timeout;
1866*4882a593Smuzhiyun uint32_t drv_state, drv_active, dev_state;
1867*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
1868*4882a593Smuzhiyun
1869*4882a593Smuzhiyun if (vha->flags.online)
1870*4882a593Smuzhiyun qla2x00_quiesce_io(vha);
1871*4882a593Smuzhiyun else
1872*4882a593Smuzhiyun return;
1873*4882a593Smuzhiyun
1874*4882a593Smuzhiyun qla8044_set_qsnt_ready(vha);
1875*4882a593Smuzhiyun
1876*4882a593Smuzhiyun /* Wait for 30 secs for all functions to ack qsnt mode */
1877*4882a593Smuzhiyun qsnt_timeout = jiffies + (QSNT_ACK_TOV * HZ);
1878*4882a593Smuzhiyun drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
1879*4882a593Smuzhiyun drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
1880*4882a593Smuzhiyun
1881*4882a593Smuzhiyun /* Shift drv_active by 1 to match drv_state. As quiescent ready bit
1882*4882a593Smuzhiyun position is at bit 1 and drv active is at bit 0 */
1883*4882a593Smuzhiyun drv_active = drv_active << 1;
1884*4882a593Smuzhiyun
1885*4882a593Smuzhiyun while (drv_state != drv_active) {
1886*4882a593Smuzhiyun if (time_after_eq(jiffies, qsnt_timeout)) {
1887*4882a593Smuzhiyun /* Other functions did not ack, changing state to
1888*4882a593Smuzhiyun * DEV_READY
1889*4882a593Smuzhiyun */
1890*4882a593Smuzhiyun clear_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
1891*4882a593Smuzhiyun qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
1892*4882a593Smuzhiyun QLA8XXX_DEV_READY);
1893*4882a593Smuzhiyun qla8044_clear_qsnt_ready(vha);
1894*4882a593Smuzhiyun ql_log(ql_log_info, vha, 0xb0cc,
1895*4882a593Smuzhiyun "Timeout waiting for quiescent ack!!!\n");
1896*4882a593Smuzhiyun return;
1897*4882a593Smuzhiyun }
1898*4882a593Smuzhiyun qla8044_idc_unlock(ha);
1899*4882a593Smuzhiyun msleep(1000);
1900*4882a593Smuzhiyun qla8044_idc_lock(ha);
1901*4882a593Smuzhiyun
1902*4882a593Smuzhiyun drv_state = qla8044_rd_direct(vha,
1903*4882a593Smuzhiyun QLA8044_CRB_DRV_STATE_INDEX);
1904*4882a593Smuzhiyun drv_active = qla8044_rd_direct(vha,
1905*4882a593Smuzhiyun QLA8044_CRB_DRV_ACTIVE_INDEX);
1906*4882a593Smuzhiyun drv_active = drv_active << 1;
1907*4882a593Smuzhiyun }
1908*4882a593Smuzhiyun
1909*4882a593Smuzhiyun /* All functions have Acked. Set quiescent state */
1910*4882a593Smuzhiyun dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
1911*4882a593Smuzhiyun
1912*4882a593Smuzhiyun if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT) {
1913*4882a593Smuzhiyun qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
1914*4882a593Smuzhiyun QLA8XXX_DEV_QUIESCENT);
1915*4882a593Smuzhiyun ql_log(ql_log_info, vha, 0xb0cd,
1916*4882a593Smuzhiyun "%s: HW State: QUIESCENT\n", __func__);
1917*4882a593Smuzhiyun }
1918*4882a593Smuzhiyun }
1919*4882a593Smuzhiyun
1920*4882a593Smuzhiyun /*
1921*4882a593Smuzhiyun * qla8044_device_state_handler - Adapter state machine
1922*4882a593Smuzhiyun * @ha: pointer to host adapter structure.
1923*4882a593Smuzhiyun *
1924*4882a593Smuzhiyun * Note: IDC lock must be UNLOCKED upon entry
1925*4882a593Smuzhiyun **/
1926*4882a593Smuzhiyun int
qla8044_device_state_handler(struct scsi_qla_host * vha)1927*4882a593Smuzhiyun qla8044_device_state_handler(struct scsi_qla_host *vha)
1928*4882a593Smuzhiyun {
1929*4882a593Smuzhiyun uint32_t dev_state;
1930*4882a593Smuzhiyun int rval = QLA_SUCCESS;
1931*4882a593Smuzhiyun unsigned long dev_init_timeout;
1932*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
1933*4882a593Smuzhiyun
1934*4882a593Smuzhiyun rval = qla8044_update_idc_reg(vha);
1935*4882a593Smuzhiyun if (rval == QLA_FUNCTION_FAILED)
1936*4882a593Smuzhiyun goto exit_error;
1937*4882a593Smuzhiyun
1938*4882a593Smuzhiyun dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
1939*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb0ce,
1940*4882a593Smuzhiyun "Device state is 0x%x = %s\n",
1941*4882a593Smuzhiyun dev_state, dev_state < MAX_STATES ?
1942*4882a593Smuzhiyun qdev_state(dev_state) : "Unknown");
1943*4882a593Smuzhiyun
1944*4882a593Smuzhiyun /* wait for 30 seconds for device to go ready */
1945*4882a593Smuzhiyun dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
1946*4882a593Smuzhiyun
1947*4882a593Smuzhiyun qla8044_idc_lock(ha);
1948*4882a593Smuzhiyun
1949*4882a593Smuzhiyun while (1) {
1950*4882a593Smuzhiyun if (time_after_eq(jiffies, dev_init_timeout)) {
1951*4882a593Smuzhiyun if (qla8044_check_drv_active(vha) == QLA_SUCCESS) {
1952*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb0cf,
1953*4882a593Smuzhiyun "%s: Device Init Failed 0x%x = %s\n",
1954*4882a593Smuzhiyun QLA2XXX_DRIVER_NAME, dev_state,
1955*4882a593Smuzhiyun dev_state < MAX_STATES ?
1956*4882a593Smuzhiyun qdev_state(dev_state) : "Unknown");
1957*4882a593Smuzhiyun qla8044_wr_direct(vha,
1958*4882a593Smuzhiyun QLA8044_CRB_DEV_STATE_INDEX,
1959*4882a593Smuzhiyun QLA8XXX_DEV_FAILED);
1960*4882a593Smuzhiyun }
1961*4882a593Smuzhiyun }
1962*4882a593Smuzhiyun
1963*4882a593Smuzhiyun dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
1964*4882a593Smuzhiyun ql_log(ql_log_info, vha, 0xb0d0,
1965*4882a593Smuzhiyun "Device state is 0x%x = %s\n",
1966*4882a593Smuzhiyun dev_state, dev_state < MAX_STATES ?
1967*4882a593Smuzhiyun qdev_state(dev_state) : "Unknown");
1968*4882a593Smuzhiyun
1969*4882a593Smuzhiyun /* NOTE: Make sure idc unlocked upon exit of switch statement */
1970*4882a593Smuzhiyun switch (dev_state) {
1971*4882a593Smuzhiyun case QLA8XXX_DEV_READY:
1972*4882a593Smuzhiyun ha->flags.nic_core_reset_owner = 0;
1973*4882a593Smuzhiyun goto exit;
1974*4882a593Smuzhiyun case QLA8XXX_DEV_COLD:
1975*4882a593Smuzhiyun rval = qla8044_device_bootstrap(vha);
1976*4882a593Smuzhiyun break;
1977*4882a593Smuzhiyun case QLA8XXX_DEV_INITIALIZING:
1978*4882a593Smuzhiyun qla8044_idc_unlock(ha);
1979*4882a593Smuzhiyun msleep(1000);
1980*4882a593Smuzhiyun qla8044_idc_lock(ha);
1981*4882a593Smuzhiyun break;
1982*4882a593Smuzhiyun case QLA8XXX_DEV_NEED_RESET:
1983*4882a593Smuzhiyun /* For ISP8044, if NEED_RESET is set by any driver,
1984*4882a593Smuzhiyun * it should be honored, irrespective of IDC_CTRL
1985*4882a593Smuzhiyun * DONTRESET_BIT0 */
1986*4882a593Smuzhiyun qla8044_need_reset_handler(vha);
1987*4882a593Smuzhiyun break;
1988*4882a593Smuzhiyun case QLA8XXX_DEV_NEED_QUIESCENT:
1989*4882a593Smuzhiyun /* idc locked/unlocked in handler */
1990*4882a593Smuzhiyun qla8044_need_qsnt_handler(vha);
1991*4882a593Smuzhiyun
1992*4882a593Smuzhiyun /* Reset the init timeout after qsnt handler */
1993*4882a593Smuzhiyun dev_init_timeout = jiffies +
1994*4882a593Smuzhiyun (ha->fcoe_reset_timeout * HZ);
1995*4882a593Smuzhiyun break;
1996*4882a593Smuzhiyun case QLA8XXX_DEV_QUIESCENT:
1997*4882a593Smuzhiyun ql_log(ql_log_info, vha, 0xb0d1,
1998*4882a593Smuzhiyun "HW State: QUIESCENT\n");
1999*4882a593Smuzhiyun
2000*4882a593Smuzhiyun qla8044_idc_unlock(ha);
2001*4882a593Smuzhiyun msleep(1000);
2002*4882a593Smuzhiyun qla8044_idc_lock(ha);
2003*4882a593Smuzhiyun
2004*4882a593Smuzhiyun /* Reset the init timeout after qsnt handler */
2005*4882a593Smuzhiyun dev_init_timeout = jiffies +
2006*4882a593Smuzhiyun (ha->fcoe_reset_timeout * HZ);
2007*4882a593Smuzhiyun break;
2008*4882a593Smuzhiyun case QLA8XXX_DEV_FAILED:
2009*4882a593Smuzhiyun ha->flags.nic_core_reset_owner = 0;
2010*4882a593Smuzhiyun qla8044_idc_unlock(ha);
2011*4882a593Smuzhiyun qla8xxx_dev_failed_handler(vha);
2012*4882a593Smuzhiyun rval = QLA_FUNCTION_FAILED;
2013*4882a593Smuzhiyun qla8044_idc_lock(ha);
2014*4882a593Smuzhiyun goto exit;
2015*4882a593Smuzhiyun default:
2016*4882a593Smuzhiyun qla8044_idc_unlock(ha);
2017*4882a593Smuzhiyun qla8xxx_dev_failed_handler(vha);
2018*4882a593Smuzhiyun rval = QLA_FUNCTION_FAILED;
2019*4882a593Smuzhiyun qla8044_idc_lock(ha);
2020*4882a593Smuzhiyun goto exit;
2021*4882a593Smuzhiyun }
2022*4882a593Smuzhiyun }
2023*4882a593Smuzhiyun exit:
2024*4882a593Smuzhiyun qla8044_idc_unlock(ha);
2025*4882a593Smuzhiyun
2026*4882a593Smuzhiyun exit_error:
2027*4882a593Smuzhiyun return rval;
2028*4882a593Smuzhiyun }
2029*4882a593Smuzhiyun
2030*4882a593Smuzhiyun /**
2031*4882a593Smuzhiyun * qla4_8xxx_check_temp - Check the ISP82XX temperature.
2032*4882a593Smuzhiyun * @vha: adapter block pointer.
2033*4882a593Smuzhiyun *
2034*4882a593Smuzhiyun * Note: The caller should not hold the idc lock.
2035*4882a593Smuzhiyun */
2036*4882a593Smuzhiyun static int
qla8044_check_temp(struct scsi_qla_host * vha)2037*4882a593Smuzhiyun qla8044_check_temp(struct scsi_qla_host *vha)
2038*4882a593Smuzhiyun {
2039*4882a593Smuzhiyun uint32_t temp, temp_state, temp_val;
2040*4882a593Smuzhiyun int status = QLA_SUCCESS;
2041*4882a593Smuzhiyun
2042*4882a593Smuzhiyun temp = qla8044_rd_direct(vha, QLA8044_CRB_TEMP_STATE_INDEX);
2043*4882a593Smuzhiyun temp_state = qla82xx_get_temp_state(temp);
2044*4882a593Smuzhiyun temp_val = qla82xx_get_temp_val(temp);
2045*4882a593Smuzhiyun
2046*4882a593Smuzhiyun if (temp_state == QLA82XX_TEMP_PANIC) {
2047*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb0d2,
2048*4882a593Smuzhiyun "Device temperature %d degrees C"
2049*4882a593Smuzhiyun " exceeds maximum allowed. Hardware has been shut"
2050*4882a593Smuzhiyun " down\n", temp_val);
2051*4882a593Smuzhiyun status = QLA_FUNCTION_FAILED;
2052*4882a593Smuzhiyun return status;
2053*4882a593Smuzhiyun } else if (temp_state == QLA82XX_TEMP_WARN) {
2054*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb0d3,
2055*4882a593Smuzhiyun "Device temperature %d"
2056*4882a593Smuzhiyun " degrees C exceeds operating range."
2057*4882a593Smuzhiyun " Immediate action needed.\n", temp_val);
2058*4882a593Smuzhiyun }
2059*4882a593Smuzhiyun return 0;
2060*4882a593Smuzhiyun }
2061*4882a593Smuzhiyun
qla8044_read_temperature(scsi_qla_host_t * vha)2062*4882a593Smuzhiyun int qla8044_read_temperature(scsi_qla_host_t *vha)
2063*4882a593Smuzhiyun {
2064*4882a593Smuzhiyun uint32_t temp;
2065*4882a593Smuzhiyun
2066*4882a593Smuzhiyun temp = qla8044_rd_direct(vha, QLA8044_CRB_TEMP_STATE_INDEX);
2067*4882a593Smuzhiyun return qla82xx_get_temp_val(temp);
2068*4882a593Smuzhiyun }
2069*4882a593Smuzhiyun
2070*4882a593Smuzhiyun /**
2071*4882a593Smuzhiyun * qla8044_check_fw_alive - Check firmware health
2072*4882a593Smuzhiyun * @vha: Pointer to host adapter structure.
2073*4882a593Smuzhiyun *
2074*4882a593Smuzhiyun * Context: Interrupt
2075*4882a593Smuzhiyun */
2076*4882a593Smuzhiyun int
qla8044_check_fw_alive(struct scsi_qla_host * vha)2077*4882a593Smuzhiyun qla8044_check_fw_alive(struct scsi_qla_host *vha)
2078*4882a593Smuzhiyun {
2079*4882a593Smuzhiyun uint32_t fw_heartbeat_counter;
2080*4882a593Smuzhiyun uint32_t halt_status1, halt_status2;
2081*4882a593Smuzhiyun int status = QLA_SUCCESS;
2082*4882a593Smuzhiyun
2083*4882a593Smuzhiyun fw_heartbeat_counter = qla8044_rd_direct(vha,
2084*4882a593Smuzhiyun QLA8044_PEG_ALIVE_COUNTER_INDEX);
2085*4882a593Smuzhiyun
2086*4882a593Smuzhiyun /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
2087*4882a593Smuzhiyun if (fw_heartbeat_counter == 0xffffffff) {
2088*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb0d4,
2089*4882a593Smuzhiyun "scsi%ld: %s: Device in frozen "
2090*4882a593Smuzhiyun "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
2091*4882a593Smuzhiyun vha->host_no, __func__);
2092*4882a593Smuzhiyun return status;
2093*4882a593Smuzhiyun }
2094*4882a593Smuzhiyun
2095*4882a593Smuzhiyun if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
2096*4882a593Smuzhiyun vha->seconds_since_last_heartbeat++;
2097*4882a593Smuzhiyun /* FW not alive after 2 seconds */
2098*4882a593Smuzhiyun if (vha->seconds_since_last_heartbeat == 2) {
2099*4882a593Smuzhiyun vha->seconds_since_last_heartbeat = 0;
2100*4882a593Smuzhiyun halt_status1 = qla8044_rd_direct(vha,
2101*4882a593Smuzhiyun QLA8044_PEG_HALT_STATUS1_INDEX);
2102*4882a593Smuzhiyun halt_status2 = qla8044_rd_direct(vha,
2103*4882a593Smuzhiyun QLA8044_PEG_HALT_STATUS2_INDEX);
2104*4882a593Smuzhiyun
2105*4882a593Smuzhiyun ql_log(ql_log_info, vha, 0xb0d5,
2106*4882a593Smuzhiyun "scsi(%ld): %s, ISP8044 "
2107*4882a593Smuzhiyun "Dumping hw/fw registers:\n"
2108*4882a593Smuzhiyun " PEG_HALT_STATUS1: 0x%x, "
2109*4882a593Smuzhiyun "PEG_HALT_STATUS2: 0x%x,\n",
2110*4882a593Smuzhiyun vha->host_no, __func__, halt_status1,
2111*4882a593Smuzhiyun halt_status2);
2112*4882a593Smuzhiyun status = QLA_FUNCTION_FAILED;
2113*4882a593Smuzhiyun }
2114*4882a593Smuzhiyun } else
2115*4882a593Smuzhiyun vha->seconds_since_last_heartbeat = 0;
2116*4882a593Smuzhiyun
2117*4882a593Smuzhiyun vha->fw_heartbeat_counter = fw_heartbeat_counter;
2118*4882a593Smuzhiyun return status;
2119*4882a593Smuzhiyun }
2120*4882a593Smuzhiyun
2121*4882a593Smuzhiyun void
qla8044_watchdog(struct scsi_qla_host * vha)2122*4882a593Smuzhiyun qla8044_watchdog(struct scsi_qla_host *vha)
2123*4882a593Smuzhiyun {
2124*4882a593Smuzhiyun uint32_t dev_state, halt_status;
2125*4882a593Smuzhiyun int halt_status_unrecoverable = 0;
2126*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
2127*4882a593Smuzhiyun
2128*4882a593Smuzhiyun /* don't poll if reset is going on or FW hang in quiescent state */
2129*4882a593Smuzhiyun if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
2130*4882a593Smuzhiyun test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))) {
2131*4882a593Smuzhiyun dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
2132*4882a593Smuzhiyun
2133*4882a593Smuzhiyun if (qla8044_check_fw_alive(vha)) {
2134*4882a593Smuzhiyun ha->flags.isp82xx_fw_hung = 1;
2135*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb10a,
2136*4882a593Smuzhiyun "Firmware hung.\n");
2137*4882a593Smuzhiyun qla82xx_clear_pending_mbx(vha);
2138*4882a593Smuzhiyun }
2139*4882a593Smuzhiyun
2140*4882a593Smuzhiyun if (qla8044_check_temp(vha)) {
2141*4882a593Smuzhiyun set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
2142*4882a593Smuzhiyun ha->flags.isp82xx_fw_hung = 1;
2143*4882a593Smuzhiyun qla2xxx_wake_dpc(vha);
2144*4882a593Smuzhiyun } else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
2145*4882a593Smuzhiyun !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
2146*4882a593Smuzhiyun ql_log(ql_log_info, vha, 0xb0d6,
2147*4882a593Smuzhiyun "%s: HW State: NEED RESET!\n",
2148*4882a593Smuzhiyun __func__);
2149*4882a593Smuzhiyun set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2150*4882a593Smuzhiyun qla2xxx_wake_dpc(vha);
2151*4882a593Smuzhiyun } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT &&
2152*4882a593Smuzhiyun !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
2153*4882a593Smuzhiyun ql_log(ql_log_info, vha, 0xb0d7,
2154*4882a593Smuzhiyun "%s: HW State: NEED QUIES detected!\n",
2155*4882a593Smuzhiyun __func__);
2156*4882a593Smuzhiyun set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
2157*4882a593Smuzhiyun qla2xxx_wake_dpc(vha);
2158*4882a593Smuzhiyun } else {
2159*4882a593Smuzhiyun /* Check firmware health */
2160*4882a593Smuzhiyun if (ha->flags.isp82xx_fw_hung) {
2161*4882a593Smuzhiyun halt_status = qla8044_rd_direct(vha,
2162*4882a593Smuzhiyun QLA8044_PEG_HALT_STATUS1_INDEX);
2163*4882a593Smuzhiyun if (halt_status &
2164*4882a593Smuzhiyun QLA8044_HALT_STATUS_FW_RESET) {
2165*4882a593Smuzhiyun ql_log(ql_log_fatal, vha,
2166*4882a593Smuzhiyun 0xb0d8, "%s: Firmware "
2167*4882a593Smuzhiyun "error detected device "
2168*4882a593Smuzhiyun "is being reset\n",
2169*4882a593Smuzhiyun __func__);
2170*4882a593Smuzhiyun } else if (halt_status &
2171*4882a593Smuzhiyun QLA8044_HALT_STATUS_UNRECOVERABLE) {
2172*4882a593Smuzhiyun halt_status_unrecoverable = 1;
2173*4882a593Smuzhiyun }
2174*4882a593Smuzhiyun
2175*4882a593Smuzhiyun /* Since we cannot change dev_state in interrupt
2176*4882a593Smuzhiyun * context, set appropriate DPC flag then wakeup
2177*4882a593Smuzhiyun * DPC */
2178*4882a593Smuzhiyun if (halt_status_unrecoverable) {
2179*4882a593Smuzhiyun set_bit(ISP_UNRECOVERABLE,
2180*4882a593Smuzhiyun &vha->dpc_flags);
2181*4882a593Smuzhiyun } else {
2182*4882a593Smuzhiyun if (dev_state ==
2183*4882a593Smuzhiyun QLA8XXX_DEV_QUIESCENT) {
2184*4882a593Smuzhiyun set_bit(FCOE_CTX_RESET_NEEDED,
2185*4882a593Smuzhiyun &vha->dpc_flags);
2186*4882a593Smuzhiyun ql_log(ql_log_info, vha, 0xb0d9,
2187*4882a593Smuzhiyun "%s: FW CONTEXT Reset "
2188*4882a593Smuzhiyun "needed!\n", __func__);
2189*4882a593Smuzhiyun } else {
2190*4882a593Smuzhiyun ql_log(ql_log_info, vha,
2191*4882a593Smuzhiyun 0xb0da, "%s: "
2192*4882a593Smuzhiyun "detect abort needed\n",
2193*4882a593Smuzhiyun __func__);
2194*4882a593Smuzhiyun set_bit(ISP_ABORT_NEEDED,
2195*4882a593Smuzhiyun &vha->dpc_flags);
2196*4882a593Smuzhiyun }
2197*4882a593Smuzhiyun }
2198*4882a593Smuzhiyun qla2xxx_wake_dpc(vha);
2199*4882a593Smuzhiyun }
2200*4882a593Smuzhiyun }
2201*4882a593Smuzhiyun
2202*4882a593Smuzhiyun }
2203*4882a593Smuzhiyun }
2204*4882a593Smuzhiyun
2205*4882a593Smuzhiyun static int
qla8044_minidump_process_control(struct scsi_qla_host * vha,struct qla8044_minidump_entry_hdr * entry_hdr)2206*4882a593Smuzhiyun qla8044_minidump_process_control(struct scsi_qla_host *vha,
2207*4882a593Smuzhiyun struct qla8044_minidump_entry_hdr *entry_hdr)
2208*4882a593Smuzhiyun {
2209*4882a593Smuzhiyun struct qla8044_minidump_entry_crb *crb_entry;
2210*4882a593Smuzhiyun uint32_t read_value, opcode, poll_time, addr, index;
2211*4882a593Smuzhiyun uint32_t crb_addr, rval = QLA_SUCCESS;
2212*4882a593Smuzhiyun unsigned long wtime;
2213*4882a593Smuzhiyun struct qla8044_minidump_template_hdr *tmplt_hdr;
2214*4882a593Smuzhiyun int i;
2215*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
2216*4882a593Smuzhiyun
2217*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb0dd, "Entering fn: %s\n", __func__);
2218*4882a593Smuzhiyun tmplt_hdr = (struct qla8044_minidump_template_hdr *)
2219*4882a593Smuzhiyun ha->md_tmplt_hdr;
2220*4882a593Smuzhiyun crb_entry = (struct qla8044_minidump_entry_crb *)entry_hdr;
2221*4882a593Smuzhiyun
2222*4882a593Smuzhiyun crb_addr = crb_entry->addr;
2223*4882a593Smuzhiyun for (i = 0; i < crb_entry->op_count; i++) {
2224*4882a593Smuzhiyun opcode = crb_entry->crb_ctrl.opcode;
2225*4882a593Smuzhiyun
2226*4882a593Smuzhiyun if (opcode & QLA82XX_DBG_OPCODE_WR) {
2227*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, crb_addr,
2228*4882a593Smuzhiyun crb_entry->value_1);
2229*4882a593Smuzhiyun opcode &= ~QLA82XX_DBG_OPCODE_WR;
2230*4882a593Smuzhiyun }
2231*4882a593Smuzhiyun
2232*4882a593Smuzhiyun if (opcode & QLA82XX_DBG_OPCODE_RW) {
2233*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
2234*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, crb_addr, read_value);
2235*4882a593Smuzhiyun opcode &= ~QLA82XX_DBG_OPCODE_RW;
2236*4882a593Smuzhiyun }
2237*4882a593Smuzhiyun
2238*4882a593Smuzhiyun if (opcode & QLA82XX_DBG_OPCODE_AND) {
2239*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
2240*4882a593Smuzhiyun read_value &= crb_entry->value_2;
2241*4882a593Smuzhiyun opcode &= ~QLA82XX_DBG_OPCODE_AND;
2242*4882a593Smuzhiyun if (opcode & QLA82XX_DBG_OPCODE_OR) {
2243*4882a593Smuzhiyun read_value |= crb_entry->value_3;
2244*4882a593Smuzhiyun opcode &= ~QLA82XX_DBG_OPCODE_OR;
2245*4882a593Smuzhiyun }
2246*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, crb_addr, read_value);
2247*4882a593Smuzhiyun }
2248*4882a593Smuzhiyun if (opcode & QLA82XX_DBG_OPCODE_OR) {
2249*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
2250*4882a593Smuzhiyun read_value |= crb_entry->value_3;
2251*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, crb_addr, read_value);
2252*4882a593Smuzhiyun opcode &= ~QLA82XX_DBG_OPCODE_OR;
2253*4882a593Smuzhiyun }
2254*4882a593Smuzhiyun if (opcode & QLA82XX_DBG_OPCODE_POLL) {
2255*4882a593Smuzhiyun poll_time = crb_entry->crb_strd.poll_timeout;
2256*4882a593Smuzhiyun wtime = jiffies + poll_time;
2257*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
2258*4882a593Smuzhiyun
2259*4882a593Smuzhiyun do {
2260*4882a593Smuzhiyun if ((read_value & crb_entry->value_2) ==
2261*4882a593Smuzhiyun crb_entry->value_1) {
2262*4882a593Smuzhiyun break;
2263*4882a593Smuzhiyun } else if (time_after_eq(jiffies, wtime)) {
2264*4882a593Smuzhiyun /* capturing dump failed */
2265*4882a593Smuzhiyun rval = QLA_FUNCTION_FAILED;
2266*4882a593Smuzhiyun break;
2267*4882a593Smuzhiyun } else {
2268*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha,
2269*4882a593Smuzhiyun crb_addr, &read_value);
2270*4882a593Smuzhiyun }
2271*4882a593Smuzhiyun } while (1);
2272*4882a593Smuzhiyun opcode &= ~QLA82XX_DBG_OPCODE_POLL;
2273*4882a593Smuzhiyun }
2274*4882a593Smuzhiyun
2275*4882a593Smuzhiyun if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) {
2276*4882a593Smuzhiyun if (crb_entry->crb_strd.state_index_a) {
2277*4882a593Smuzhiyun index = crb_entry->crb_strd.state_index_a;
2278*4882a593Smuzhiyun addr = tmplt_hdr->saved_state_array[index];
2279*4882a593Smuzhiyun } else {
2280*4882a593Smuzhiyun addr = crb_addr;
2281*4882a593Smuzhiyun }
2282*4882a593Smuzhiyun
2283*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha, addr, &read_value);
2284*4882a593Smuzhiyun index = crb_entry->crb_ctrl.state_index_v;
2285*4882a593Smuzhiyun tmplt_hdr->saved_state_array[index] = read_value;
2286*4882a593Smuzhiyun opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE;
2287*4882a593Smuzhiyun }
2288*4882a593Smuzhiyun
2289*4882a593Smuzhiyun if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) {
2290*4882a593Smuzhiyun if (crb_entry->crb_strd.state_index_a) {
2291*4882a593Smuzhiyun index = crb_entry->crb_strd.state_index_a;
2292*4882a593Smuzhiyun addr = tmplt_hdr->saved_state_array[index];
2293*4882a593Smuzhiyun } else {
2294*4882a593Smuzhiyun addr = crb_addr;
2295*4882a593Smuzhiyun }
2296*4882a593Smuzhiyun
2297*4882a593Smuzhiyun if (crb_entry->crb_ctrl.state_index_v) {
2298*4882a593Smuzhiyun index = crb_entry->crb_ctrl.state_index_v;
2299*4882a593Smuzhiyun read_value =
2300*4882a593Smuzhiyun tmplt_hdr->saved_state_array[index];
2301*4882a593Smuzhiyun } else {
2302*4882a593Smuzhiyun read_value = crb_entry->value_1;
2303*4882a593Smuzhiyun }
2304*4882a593Smuzhiyun
2305*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, addr, read_value);
2306*4882a593Smuzhiyun opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE;
2307*4882a593Smuzhiyun }
2308*4882a593Smuzhiyun
2309*4882a593Smuzhiyun if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) {
2310*4882a593Smuzhiyun index = crb_entry->crb_ctrl.state_index_v;
2311*4882a593Smuzhiyun read_value = tmplt_hdr->saved_state_array[index];
2312*4882a593Smuzhiyun read_value <<= crb_entry->crb_ctrl.shl;
2313*4882a593Smuzhiyun read_value >>= crb_entry->crb_ctrl.shr;
2314*4882a593Smuzhiyun if (crb_entry->value_2)
2315*4882a593Smuzhiyun read_value &= crb_entry->value_2;
2316*4882a593Smuzhiyun read_value |= crb_entry->value_3;
2317*4882a593Smuzhiyun read_value += crb_entry->value_1;
2318*4882a593Smuzhiyun tmplt_hdr->saved_state_array[index] = read_value;
2319*4882a593Smuzhiyun opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE;
2320*4882a593Smuzhiyun }
2321*4882a593Smuzhiyun crb_addr += crb_entry->crb_strd.addr_stride;
2322*4882a593Smuzhiyun }
2323*4882a593Smuzhiyun return rval;
2324*4882a593Smuzhiyun }
2325*4882a593Smuzhiyun
2326*4882a593Smuzhiyun static void
qla8044_minidump_process_rdcrb(struct scsi_qla_host * vha,struct qla8044_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2327*4882a593Smuzhiyun qla8044_minidump_process_rdcrb(struct scsi_qla_host *vha,
2328*4882a593Smuzhiyun struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2329*4882a593Smuzhiyun {
2330*4882a593Smuzhiyun uint32_t r_addr, r_stride, loop_cnt, i, r_value;
2331*4882a593Smuzhiyun struct qla8044_minidump_entry_crb *crb_hdr;
2332*4882a593Smuzhiyun uint32_t *data_ptr = *d_ptr;
2333*4882a593Smuzhiyun
2334*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb0de, "Entering fn: %s\n", __func__);
2335*4882a593Smuzhiyun crb_hdr = (struct qla8044_minidump_entry_crb *)entry_hdr;
2336*4882a593Smuzhiyun r_addr = crb_hdr->addr;
2337*4882a593Smuzhiyun r_stride = crb_hdr->crb_strd.addr_stride;
2338*4882a593Smuzhiyun loop_cnt = crb_hdr->op_count;
2339*4882a593Smuzhiyun
2340*4882a593Smuzhiyun for (i = 0; i < loop_cnt; i++) {
2341*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha, r_addr, &r_value);
2342*4882a593Smuzhiyun *data_ptr++ = r_addr;
2343*4882a593Smuzhiyun *data_ptr++ = r_value;
2344*4882a593Smuzhiyun r_addr += r_stride;
2345*4882a593Smuzhiyun }
2346*4882a593Smuzhiyun *d_ptr = data_ptr;
2347*4882a593Smuzhiyun }
2348*4882a593Smuzhiyun
2349*4882a593Smuzhiyun static int
qla8044_minidump_process_rdmem(struct scsi_qla_host * vha,struct qla8044_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2350*4882a593Smuzhiyun qla8044_minidump_process_rdmem(struct scsi_qla_host *vha,
2351*4882a593Smuzhiyun struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2352*4882a593Smuzhiyun {
2353*4882a593Smuzhiyun uint32_t r_addr, r_value, r_data;
2354*4882a593Smuzhiyun uint32_t i, j, loop_cnt;
2355*4882a593Smuzhiyun struct qla8044_minidump_entry_rdmem *m_hdr;
2356*4882a593Smuzhiyun unsigned long flags;
2357*4882a593Smuzhiyun uint32_t *data_ptr = *d_ptr;
2358*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
2359*4882a593Smuzhiyun
2360*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb0df, "Entering fn: %s\n", __func__);
2361*4882a593Smuzhiyun m_hdr = (struct qla8044_minidump_entry_rdmem *)entry_hdr;
2362*4882a593Smuzhiyun r_addr = m_hdr->read_addr;
2363*4882a593Smuzhiyun loop_cnt = m_hdr->read_data_size/16;
2364*4882a593Smuzhiyun
2365*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb0f0,
2366*4882a593Smuzhiyun "[%s]: Read addr: 0x%x, read_data_size: 0x%x\n",
2367*4882a593Smuzhiyun __func__, r_addr, m_hdr->read_data_size);
2368*4882a593Smuzhiyun
2369*4882a593Smuzhiyun if (r_addr & 0xf) {
2370*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb0f1,
2371*4882a593Smuzhiyun "[%s]: Read addr 0x%x not 16 bytes aligned\n",
2372*4882a593Smuzhiyun __func__, r_addr);
2373*4882a593Smuzhiyun return QLA_FUNCTION_FAILED;
2374*4882a593Smuzhiyun }
2375*4882a593Smuzhiyun
2376*4882a593Smuzhiyun if (m_hdr->read_data_size % 16) {
2377*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb0f2,
2378*4882a593Smuzhiyun "[%s]: Read data[0x%x] not multiple of 16 bytes\n",
2379*4882a593Smuzhiyun __func__, m_hdr->read_data_size);
2380*4882a593Smuzhiyun return QLA_FUNCTION_FAILED;
2381*4882a593Smuzhiyun }
2382*4882a593Smuzhiyun
2383*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb0f3,
2384*4882a593Smuzhiyun "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n",
2385*4882a593Smuzhiyun __func__, r_addr, m_hdr->read_data_size, loop_cnt);
2386*4882a593Smuzhiyun
2387*4882a593Smuzhiyun write_lock_irqsave(&ha->hw_lock, flags);
2388*4882a593Smuzhiyun for (i = 0; i < loop_cnt; i++) {
2389*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_LO, r_addr);
2390*4882a593Smuzhiyun r_value = 0;
2391*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_HI, r_value);
2392*4882a593Smuzhiyun r_value = MIU_TA_CTL_ENABLE;
2393*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, r_value);
2394*4882a593Smuzhiyun r_value = MIU_TA_CTL_START_ENABLE;
2395*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, r_value);
2396*4882a593Smuzhiyun
2397*4882a593Smuzhiyun for (j = 0; j < MAX_CTL_CHECK; j++) {
2398*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL,
2399*4882a593Smuzhiyun &r_value);
2400*4882a593Smuzhiyun if ((r_value & MIU_TA_CTL_BUSY) == 0)
2401*4882a593Smuzhiyun break;
2402*4882a593Smuzhiyun }
2403*4882a593Smuzhiyun
2404*4882a593Smuzhiyun if (j >= MAX_CTL_CHECK) {
2405*4882a593Smuzhiyun write_unlock_irqrestore(&ha->hw_lock, flags);
2406*4882a593Smuzhiyun return QLA_SUCCESS;
2407*4882a593Smuzhiyun }
2408*4882a593Smuzhiyun
2409*4882a593Smuzhiyun for (j = 0; j < 4; j++) {
2410*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha, MD_MIU_TEST_AGT_RDDATA[j],
2411*4882a593Smuzhiyun &r_data);
2412*4882a593Smuzhiyun *data_ptr++ = r_data;
2413*4882a593Smuzhiyun }
2414*4882a593Smuzhiyun
2415*4882a593Smuzhiyun r_addr += 16;
2416*4882a593Smuzhiyun }
2417*4882a593Smuzhiyun write_unlock_irqrestore(&ha->hw_lock, flags);
2418*4882a593Smuzhiyun
2419*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb0f4,
2420*4882a593Smuzhiyun "Leaving fn: %s datacount: 0x%x\n",
2421*4882a593Smuzhiyun __func__, (loop_cnt * 16));
2422*4882a593Smuzhiyun
2423*4882a593Smuzhiyun *d_ptr = data_ptr;
2424*4882a593Smuzhiyun return QLA_SUCCESS;
2425*4882a593Smuzhiyun }
2426*4882a593Smuzhiyun
2427*4882a593Smuzhiyun /* ISP83xx flash read for _RDROM _BOARD */
2428*4882a593Smuzhiyun static uint32_t
qla8044_minidump_process_rdrom(struct scsi_qla_host * vha,struct qla8044_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2429*4882a593Smuzhiyun qla8044_minidump_process_rdrom(struct scsi_qla_host *vha,
2430*4882a593Smuzhiyun struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2431*4882a593Smuzhiyun {
2432*4882a593Smuzhiyun uint32_t fl_addr, u32_count, rval;
2433*4882a593Smuzhiyun struct qla8044_minidump_entry_rdrom *rom_hdr;
2434*4882a593Smuzhiyun uint32_t *data_ptr = *d_ptr;
2435*4882a593Smuzhiyun
2436*4882a593Smuzhiyun rom_hdr = (struct qla8044_minidump_entry_rdrom *)entry_hdr;
2437*4882a593Smuzhiyun fl_addr = rom_hdr->read_addr;
2438*4882a593Smuzhiyun u32_count = (rom_hdr->read_data_size)/sizeof(uint32_t);
2439*4882a593Smuzhiyun
2440*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb0f5, "[%s]: fl_addr: 0x%x, count: 0x%x\n",
2441*4882a593Smuzhiyun __func__, fl_addr, u32_count);
2442*4882a593Smuzhiyun
2443*4882a593Smuzhiyun rval = qla8044_lockless_flash_read_u32(vha, fl_addr,
2444*4882a593Smuzhiyun (u8 *)(data_ptr), u32_count);
2445*4882a593Smuzhiyun
2446*4882a593Smuzhiyun if (rval != QLA_SUCCESS) {
2447*4882a593Smuzhiyun ql_log(ql_log_fatal, vha, 0xb0f6,
2448*4882a593Smuzhiyun "%s: Flash Read Error,Count=%d\n", __func__, u32_count);
2449*4882a593Smuzhiyun return QLA_FUNCTION_FAILED;
2450*4882a593Smuzhiyun } else {
2451*4882a593Smuzhiyun data_ptr += u32_count;
2452*4882a593Smuzhiyun *d_ptr = data_ptr;
2453*4882a593Smuzhiyun return QLA_SUCCESS;
2454*4882a593Smuzhiyun }
2455*4882a593Smuzhiyun }
2456*4882a593Smuzhiyun
2457*4882a593Smuzhiyun static void
qla8044_mark_entry_skipped(struct scsi_qla_host * vha,struct qla8044_minidump_entry_hdr * entry_hdr,int index)2458*4882a593Smuzhiyun qla8044_mark_entry_skipped(struct scsi_qla_host *vha,
2459*4882a593Smuzhiyun struct qla8044_minidump_entry_hdr *entry_hdr, int index)
2460*4882a593Smuzhiyun {
2461*4882a593Smuzhiyun entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG;
2462*4882a593Smuzhiyun
2463*4882a593Smuzhiyun ql_log(ql_log_info, vha, 0xb0f7,
2464*4882a593Smuzhiyun "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n",
2465*4882a593Smuzhiyun vha->host_no, index, entry_hdr->entry_type,
2466*4882a593Smuzhiyun entry_hdr->d_ctrl.entry_capture_mask);
2467*4882a593Smuzhiyun }
2468*4882a593Smuzhiyun
2469*4882a593Smuzhiyun static int
qla8044_minidump_process_l2tag(struct scsi_qla_host * vha,struct qla8044_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2470*4882a593Smuzhiyun qla8044_minidump_process_l2tag(struct scsi_qla_host *vha,
2471*4882a593Smuzhiyun struct qla8044_minidump_entry_hdr *entry_hdr,
2472*4882a593Smuzhiyun uint32_t **d_ptr)
2473*4882a593Smuzhiyun {
2474*4882a593Smuzhiyun uint32_t addr, r_addr, c_addr, t_r_addr;
2475*4882a593Smuzhiyun uint32_t i, k, loop_count, t_value, r_cnt, r_value;
2476*4882a593Smuzhiyun unsigned long p_wait, w_time, p_mask;
2477*4882a593Smuzhiyun uint32_t c_value_w, c_value_r;
2478*4882a593Smuzhiyun struct qla8044_minidump_entry_cache *cache_hdr;
2479*4882a593Smuzhiyun int rval = QLA_FUNCTION_FAILED;
2480*4882a593Smuzhiyun uint32_t *data_ptr = *d_ptr;
2481*4882a593Smuzhiyun
2482*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb0f8, "Entering fn: %s\n", __func__);
2483*4882a593Smuzhiyun cache_hdr = (struct qla8044_minidump_entry_cache *)entry_hdr;
2484*4882a593Smuzhiyun
2485*4882a593Smuzhiyun loop_count = cache_hdr->op_count;
2486*4882a593Smuzhiyun r_addr = cache_hdr->read_addr;
2487*4882a593Smuzhiyun c_addr = cache_hdr->control_addr;
2488*4882a593Smuzhiyun c_value_w = cache_hdr->cache_ctrl.write_value;
2489*4882a593Smuzhiyun
2490*4882a593Smuzhiyun t_r_addr = cache_hdr->tag_reg_addr;
2491*4882a593Smuzhiyun t_value = cache_hdr->addr_ctrl.init_tag_value;
2492*4882a593Smuzhiyun r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
2493*4882a593Smuzhiyun p_wait = cache_hdr->cache_ctrl.poll_wait;
2494*4882a593Smuzhiyun p_mask = cache_hdr->cache_ctrl.poll_mask;
2495*4882a593Smuzhiyun
2496*4882a593Smuzhiyun for (i = 0; i < loop_count; i++) {
2497*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, t_r_addr, t_value);
2498*4882a593Smuzhiyun if (c_value_w)
2499*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, c_addr, c_value_w);
2500*4882a593Smuzhiyun
2501*4882a593Smuzhiyun if (p_mask) {
2502*4882a593Smuzhiyun w_time = jiffies + p_wait;
2503*4882a593Smuzhiyun do {
2504*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha, c_addr,
2505*4882a593Smuzhiyun &c_value_r);
2506*4882a593Smuzhiyun if ((c_value_r & p_mask) == 0) {
2507*4882a593Smuzhiyun break;
2508*4882a593Smuzhiyun } else if (time_after_eq(jiffies, w_time)) {
2509*4882a593Smuzhiyun /* capturing dump failed */
2510*4882a593Smuzhiyun return rval;
2511*4882a593Smuzhiyun }
2512*4882a593Smuzhiyun } while (1);
2513*4882a593Smuzhiyun }
2514*4882a593Smuzhiyun
2515*4882a593Smuzhiyun addr = r_addr;
2516*4882a593Smuzhiyun for (k = 0; k < r_cnt; k++) {
2517*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha, addr, &r_value);
2518*4882a593Smuzhiyun *data_ptr++ = r_value;
2519*4882a593Smuzhiyun addr += cache_hdr->read_ctrl.read_addr_stride;
2520*4882a593Smuzhiyun }
2521*4882a593Smuzhiyun t_value += cache_hdr->addr_ctrl.tag_value_stride;
2522*4882a593Smuzhiyun }
2523*4882a593Smuzhiyun *d_ptr = data_ptr;
2524*4882a593Smuzhiyun return QLA_SUCCESS;
2525*4882a593Smuzhiyun }
2526*4882a593Smuzhiyun
2527*4882a593Smuzhiyun static void
qla8044_minidump_process_l1cache(struct scsi_qla_host * vha,struct qla8044_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2528*4882a593Smuzhiyun qla8044_minidump_process_l1cache(struct scsi_qla_host *vha,
2529*4882a593Smuzhiyun struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2530*4882a593Smuzhiyun {
2531*4882a593Smuzhiyun uint32_t addr, r_addr, c_addr, t_r_addr;
2532*4882a593Smuzhiyun uint32_t i, k, loop_count, t_value, r_cnt, r_value;
2533*4882a593Smuzhiyun uint32_t c_value_w;
2534*4882a593Smuzhiyun struct qla8044_minidump_entry_cache *cache_hdr;
2535*4882a593Smuzhiyun uint32_t *data_ptr = *d_ptr;
2536*4882a593Smuzhiyun
2537*4882a593Smuzhiyun cache_hdr = (struct qla8044_minidump_entry_cache *)entry_hdr;
2538*4882a593Smuzhiyun loop_count = cache_hdr->op_count;
2539*4882a593Smuzhiyun r_addr = cache_hdr->read_addr;
2540*4882a593Smuzhiyun c_addr = cache_hdr->control_addr;
2541*4882a593Smuzhiyun c_value_w = cache_hdr->cache_ctrl.write_value;
2542*4882a593Smuzhiyun
2543*4882a593Smuzhiyun t_r_addr = cache_hdr->tag_reg_addr;
2544*4882a593Smuzhiyun t_value = cache_hdr->addr_ctrl.init_tag_value;
2545*4882a593Smuzhiyun r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
2546*4882a593Smuzhiyun
2547*4882a593Smuzhiyun for (i = 0; i < loop_count; i++) {
2548*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, t_r_addr, t_value);
2549*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, c_addr, c_value_w);
2550*4882a593Smuzhiyun addr = r_addr;
2551*4882a593Smuzhiyun for (k = 0; k < r_cnt; k++) {
2552*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha, addr, &r_value);
2553*4882a593Smuzhiyun *data_ptr++ = r_value;
2554*4882a593Smuzhiyun addr += cache_hdr->read_ctrl.read_addr_stride;
2555*4882a593Smuzhiyun }
2556*4882a593Smuzhiyun t_value += cache_hdr->addr_ctrl.tag_value_stride;
2557*4882a593Smuzhiyun }
2558*4882a593Smuzhiyun *d_ptr = data_ptr;
2559*4882a593Smuzhiyun }
2560*4882a593Smuzhiyun
2561*4882a593Smuzhiyun static void
qla8044_minidump_process_rdocm(struct scsi_qla_host * vha,struct qla8044_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2562*4882a593Smuzhiyun qla8044_minidump_process_rdocm(struct scsi_qla_host *vha,
2563*4882a593Smuzhiyun struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2564*4882a593Smuzhiyun {
2565*4882a593Smuzhiyun uint32_t r_addr, r_stride, loop_cnt, i, r_value;
2566*4882a593Smuzhiyun struct qla8044_minidump_entry_rdocm *ocm_hdr;
2567*4882a593Smuzhiyun uint32_t *data_ptr = *d_ptr;
2568*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
2569*4882a593Smuzhiyun
2570*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb0f9, "Entering fn: %s\n", __func__);
2571*4882a593Smuzhiyun
2572*4882a593Smuzhiyun ocm_hdr = (struct qla8044_minidump_entry_rdocm *)entry_hdr;
2573*4882a593Smuzhiyun r_addr = ocm_hdr->read_addr;
2574*4882a593Smuzhiyun r_stride = ocm_hdr->read_addr_stride;
2575*4882a593Smuzhiyun loop_cnt = ocm_hdr->op_count;
2576*4882a593Smuzhiyun
2577*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb0fa,
2578*4882a593Smuzhiyun "[%s]: r_addr: 0x%x, r_stride: 0x%x, loop_cnt: 0x%x\n",
2579*4882a593Smuzhiyun __func__, r_addr, r_stride, loop_cnt);
2580*4882a593Smuzhiyun
2581*4882a593Smuzhiyun for (i = 0; i < loop_cnt; i++) {
2582*4882a593Smuzhiyun r_value = readl((void __iomem *)(r_addr + ha->nx_pcibase));
2583*4882a593Smuzhiyun *data_ptr++ = r_value;
2584*4882a593Smuzhiyun r_addr += r_stride;
2585*4882a593Smuzhiyun }
2586*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb0fb, "Leaving fn: %s datacount: 0x%lx\n",
2587*4882a593Smuzhiyun __func__, (long unsigned int) (loop_cnt * sizeof(uint32_t)));
2588*4882a593Smuzhiyun
2589*4882a593Smuzhiyun *d_ptr = data_ptr;
2590*4882a593Smuzhiyun }
2591*4882a593Smuzhiyun
2592*4882a593Smuzhiyun static void
qla8044_minidump_process_rdmux(struct scsi_qla_host * vha,struct qla8044_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2593*4882a593Smuzhiyun qla8044_minidump_process_rdmux(struct scsi_qla_host *vha,
2594*4882a593Smuzhiyun struct qla8044_minidump_entry_hdr *entry_hdr,
2595*4882a593Smuzhiyun uint32_t **d_ptr)
2596*4882a593Smuzhiyun {
2597*4882a593Smuzhiyun uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
2598*4882a593Smuzhiyun struct qla8044_minidump_entry_mux *mux_hdr;
2599*4882a593Smuzhiyun uint32_t *data_ptr = *d_ptr;
2600*4882a593Smuzhiyun
2601*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb0fc, "Entering fn: %s\n", __func__);
2602*4882a593Smuzhiyun
2603*4882a593Smuzhiyun mux_hdr = (struct qla8044_minidump_entry_mux *)entry_hdr;
2604*4882a593Smuzhiyun r_addr = mux_hdr->read_addr;
2605*4882a593Smuzhiyun s_addr = mux_hdr->select_addr;
2606*4882a593Smuzhiyun s_stride = mux_hdr->select_value_stride;
2607*4882a593Smuzhiyun s_value = mux_hdr->select_value;
2608*4882a593Smuzhiyun loop_cnt = mux_hdr->op_count;
2609*4882a593Smuzhiyun
2610*4882a593Smuzhiyun for (i = 0; i < loop_cnt; i++) {
2611*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, s_addr, s_value);
2612*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha, r_addr, &r_value);
2613*4882a593Smuzhiyun *data_ptr++ = s_value;
2614*4882a593Smuzhiyun *data_ptr++ = r_value;
2615*4882a593Smuzhiyun s_value += s_stride;
2616*4882a593Smuzhiyun }
2617*4882a593Smuzhiyun *d_ptr = data_ptr;
2618*4882a593Smuzhiyun }
2619*4882a593Smuzhiyun
2620*4882a593Smuzhiyun static void
qla8044_minidump_process_queue(struct scsi_qla_host * vha,struct qla8044_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2621*4882a593Smuzhiyun qla8044_minidump_process_queue(struct scsi_qla_host *vha,
2622*4882a593Smuzhiyun struct qla8044_minidump_entry_hdr *entry_hdr,
2623*4882a593Smuzhiyun uint32_t **d_ptr)
2624*4882a593Smuzhiyun {
2625*4882a593Smuzhiyun uint32_t s_addr, r_addr;
2626*4882a593Smuzhiyun uint32_t r_stride, r_value, r_cnt, qid = 0;
2627*4882a593Smuzhiyun uint32_t i, k, loop_cnt;
2628*4882a593Smuzhiyun struct qla8044_minidump_entry_queue *q_hdr;
2629*4882a593Smuzhiyun uint32_t *data_ptr = *d_ptr;
2630*4882a593Smuzhiyun
2631*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb0fd, "Entering fn: %s\n", __func__);
2632*4882a593Smuzhiyun q_hdr = (struct qla8044_minidump_entry_queue *)entry_hdr;
2633*4882a593Smuzhiyun s_addr = q_hdr->select_addr;
2634*4882a593Smuzhiyun r_cnt = q_hdr->rd_strd.read_addr_cnt;
2635*4882a593Smuzhiyun r_stride = q_hdr->rd_strd.read_addr_stride;
2636*4882a593Smuzhiyun loop_cnt = q_hdr->op_count;
2637*4882a593Smuzhiyun
2638*4882a593Smuzhiyun for (i = 0; i < loop_cnt; i++) {
2639*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, s_addr, qid);
2640*4882a593Smuzhiyun r_addr = q_hdr->read_addr;
2641*4882a593Smuzhiyun for (k = 0; k < r_cnt; k++) {
2642*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha, r_addr, &r_value);
2643*4882a593Smuzhiyun *data_ptr++ = r_value;
2644*4882a593Smuzhiyun r_addr += r_stride;
2645*4882a593Smuzhiyun }
2646*4882a593Smuzhiyun qid += q_hdr->q_strd.queue_id_stride;
2647*4882a593Smuzhiyun }
2648*4882a593Smuzhiyun *d_ptr = data_ptr;
2649*4882a593Smuzhiyun }
2650*4882a593Smuzhiyun
2651*4882a593Smuzhiyun /* ISP83xx functions to process new minidump entries... */
2652*4882a593Smuzhiyun static uint32_t
qla8044_minidump_process_pollrd(struct scsi_qla_host * vha,struct qla8044_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2653*4882a593Smuzhiyun qla8044_minidump_process_pollrd(struct scsi_qla_host *vha,
2654*4882a593Smuzhiyun struct qla8044_minidump_entry_hdr *entry_hdr,
2655*4882a593Smuzhiyun uint32_t **d_ptr)
2656*4882a593Smuzhiyun {
2657*4882a593Smuzhiyun uint32_t r_addr, s_addr, s_value, r_value, poll_wait, poll_mask;
2658*4882a593Smuzhiyun uint16_t s_stride, i;
2659*4882a593Smuzhiyun struct qla8044_minidump_entry_pollrd *pollrd_hdr;
2660*4882a593Smuzhiyun uint32_t *data_ptr = *d_ptr;
2661*4882a593Smuzhiyun
2662*4882a593Smuzhiyun pollrd_hdr = (struct qla8044_minidump_entry_pollrd *) entry_hdr;
2663*4882a593Smuzhiyun s_addr = pollrd_hdr->select_addr;
2664*4882a593Smuzhiyun r_addr = pollrd_hdr->read_addr;
2665*4882a593Smuzhiyun s_value = pollrd_hdr->select_value;
2666*4882a593Smuzhiyun s_stride = pollrd_hdr->select_value_stride;
2667*4882a593Smuzhiyun
2668*4882a593Smuzhiyun poll_wait = pollrd_hdr->poll_wait;
2669*4882a593Smuzhiyun poll_mask = pollrd_hdr->poll_mask;
2670*4882a593Smuzhiyun
2671*4882a593Smuzhiyun for (i = 0; i < pollrd_hdr->op_count; i++) {
2672*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, s_addr, s_value);
2673*4882a593Smuzhiyun poll_wait = pollrd_hdr->poll_wait;
2674*4882a593Smuzhiyun while (1) {
2675*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha, s_addr, &r_value);
2676*4882a593Smuzhiyun if ((r_value & poll_mask) != 0) {
2677*4882a593Smuzhiyun break;
2678*4882a593Smuzhiyun } else {
2679*4882a593Smuzhiyun usleep_range(1000, 1100);
2680*4882a593Smuzhiyun if (--poll_wait == 0) {
2681*4882a593Smuzhiyun ql_log(ql_log_fatal, vha, 0xb0fe,
2682*4882a593Smuzhiyun "%s: TIMEOUT\n", __func__);
2683*4882a593Smuzhiyun goto error;
2684*4882a593Smuzhiyun }
2685*4882a593Smuzhiyun }
2686*4882a593Smuzhiyun }
2687*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha, r_addr, &r_value);
2688*4882a593Smuzhiyun *data_ptr++ = s_value;
2689*4882a593Smuzhiyun *data_ptr++ = r_value;
2690*4882a593Smuzhiyun
2691*4882a593Smuzhiyun s_value += s_stride;
2692*4882a593Smuzhiyun }
2693*4882a593Smuzhiyun *d_ptr = data_ptr;
2694*4882a593Smuzhiyun return QLA_SUCCESS;
2695*4882a593Smuzhiyun
2696*4882a593Smuzhiyun error:
2697*4882a593Smuzhiyun return QLA_FUNCTION_FAILED;
2698*4882a593Smuzhiyun }
2699*4882a593Smuzhiyun
2700*4882a593Smuzhiyun static void
qla8044_minidump_process_rdmux2(struct scsi_qla_host * vha,struct qla8044_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2701*4882a593Smuzhiyun qla8044_minidump_process_rdmux2(struct scsi_qla_host *vha,
2702*4882a593Smuzhiyun struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2703*4882a593Smuzhiyun {
2704*4882a593Smuzhiyun uint32_t sel_val1, sel_val2, t_sel_val, data, i;
2705*4882a593Smuzhiyun uint32_t sel_addr1, sel_addr2, sel_val_mask, read_addr;
2706*4882a593Smuzhiyun struct qla8044_minidump_entry_rdmux2 *rdmux2_hdr;
2707*4882a593Smuzhiyun uint32_t *data_ptr = *d_ptr;
2708*4882a593Smuzhiyun
2709*4882a593Smuzhiyun rdmux2_hdr = (struct qla8044_minidump_entry_rdmux2 *) entry_hdr;
2710*4882a593Smuzhiyun sel_val1 = rdmux2_hdr->select_value_1;
2711*4882a593Smuzhiyun sel_val2 = rdmux2_hdr->select_value_2;
2712*4882a593Smuzhiyun sel_addr1 = rdmux2_hdr->select_addr_1;
2713*4882a593Smuzhiyun sel_addr2 = rdmux2_hdr->select_addr_2;
2714*4882a593Smuzhiyun sel_val_mask = rdmux2_hdr->select_value_mask;
2715*4882a593Smuzhiyun read_addr = rdmux2_hdr->read_addr;
2716*4882a593Smuzhiyun
2717*4882a593Smuzhiyun for (i = 0; i < rdmux2_hdr->op_count; i++) {
2718*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, sel_addr1, sel_val1);
2719*4882a593Smuzhiyun t_sel_val = sel_val1 & sel_val_mask;
2720*4882a593Smuzhiyun *data_ptr++ = t_sel_val;
2721*4882a593Smuzhiyun
2722*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, sel_addr2, t_sel_val);
2723*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha, read_addr, &data);
2724*4882a593Smuzhiyun
2725*4882a593Smuzhiyun *data_ptr++ = data;
2726*4882a593Smuzhiyun
2727*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, sel_addr1, sel_val2);
2728*4882a593Smuzhiyun t_sel_val = sel_val2 & sel_val_mask;
2729*4882a593Smuzhiyun *data_ptr++ = t_sel_val;
2730*4882a593Smuzhiyun
2731*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, sel_addr2, t_sel_val);
2732*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha, read_addr, &data);
2733*4882a593Smuzhiyun
2734*4882a593Smuzhiyun *data_ptr++ = data;
2735*4882a593Smuzhiyun
2736*4882a593Smuzhiyun sel_val1 += rdmux2_hdr->select_value_stride;
2737*4882a593Smuzhiyun sel_val2 += rdmux2_hdr->select_value_stride;
2738*4882a593Smuzhiyun }
2739*4882a593Smuzhiyun
2740*4882a593Smuzhiyun *d_ptr = data_ptr;
2741*4882a593Smuzhiyun }
2742*4882a593Smuzhiyun
2743*4882a593Smuzhiyun static uint32_t
qla8044_minidump_process_pollrdmwr(struct scsi_qla_host * vha,struct qla8044_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2744*4882a593Smuzhiyun qla8044_minidump_process_pollrdmwr(struct scsi_qla_host *vha,
2745*4882a593Smuzhiyun struct qla8044_minidump_entry_hdr *entry_hdr,
2746*4882a593Smuzhiyun uint32_t **d_ptr)
2747*4882a593Smuzhiyun {
2748*4882a593Smuzhiyun uint32_t poll_wait, poll_mask, r_value, data;
2749*4882a593Smuzhiyun uint32_t addr_1, addr_2, value_1, value_2;
2750*4882a593Smuzhiyun struct qla8044_minidump_entry_pollrdmwr *poll_hdr;
2751*4882a593Smuzhiyun uint32_t *data_ptr = *d_ptr;
2752*4882a593Smuzhiyun
2753*4882a593Smuzhiyun poll_hdr = (struct qla8044_minidump_entry_pollrdmwr *) entry_hdr;
2754*4882a593Smuzhiyun addr_1 = poll_hdr->addr_1;
2755*4882a593Smuzhiyun addr_2 = poll_hdr->addr_2;
2756*4882a593Smuzhiyun value_1 = poll_hdr->value_1;
2757*4882a593Smuzhiyun value_2 = poll_hdr->value_2;
2758*4882a593Smuzhiyun poll_mask = poll_hdr->poll_mask;
2759*4882a593Smuzhiyun
2760*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, addr_1, value_1);
2761*4882a593Smuzhiyun
2762*4882a593Smuzhiyun poll_wait = poll_hdr->poll_wait;
2763*4882a593Smuzhiyun while (1) {
2764*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha, addr_1, &r_value);
2765*4882a593Smuzhiyun
2766*4882a593Smuzhiyun if ((r_value & poll_mask) != 0) {
2767*4882a593Smuzhiyun break;
2768*4882a593Smuzhiyun } else {
2769*4882a593Smuzhiyun usleep_range(1000, 1100);
2770*4882a593Smuzhiyun if (--poll_wait == 0) {
2771*4882a593Smuzhiyun ql_log(ql_log_fatal, vha, 0xb0ff,
2772*4882a593Smuzhiyun "%s: TIMEOUT\n", __func__);
2773*4882a593Smuzhiyun goto error;
2774*4882a593Smuzhiyun }
2775*4882a593Smuzhiyun }
2776*4882a593Smuzhiyun }
2777*4882a593Smuzhiyun
2778*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha, addr_2, &data);
2779*4882a593Smuzhiyun data &= poll_hdr->modify_mask;
2780*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, addr_2, data);
2781*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, addr_1, value_2);
2782*4882a593Smuzhiyun
2783*4882a593Smuzhiyun poll_wait = poll_hdr->poll_wait;
2784*4882a593Smuzhiyun while (1) {
2785*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha, addr_1, &r_value);
2786*4882a593Smuzhiyun
2787*4882a593Smuzhiyun if ((r_value & poll_mask) != 0) {
2788*4882a593Smuzhiyun break;
2789*4882a593Smuzhiyun } else {
2790*4882a593Smuzhiyun usleep_range(1000, 1100);
2791*4882a593Smuzhiyun if (--poll_wait == 0) {
2792*4882a593Smuzhiyun ql_log(ql_log_fatal, vha, 0xb100,
2793*4882a593Smuzhiyun "%s: TIMEOUT2\n", __func__);
2794*4882a593Smuzhiyun goto error;
2795*4882a593Smuzhiyun }
2796*4882a593Smuzhiyun }
2797*4882a593Smuzhiyun }
2798*4882a593Smuzhiyun
2799*4882a593Smuzhiyun *data_ptr++ = addr_2;
2800*4882a593Smuzhiyun *data_ptr++ = data;
2801*4882a593Smuzhiyun
2802*4882a593Smuzhiyun *d_ptr = data_ptr;
2803*4882a593Smuzhiyun
2804*4882a593Smuzhiyun return QLA_SUCCESS;
2805*4882a593Smuzhiyun
2806*4882a593Smuzhiyun error:
2807*4882a593Smuzhiyun return QLA_FUNCTION_FAILED;
2808*4882a593Smuzhiyun }
2809*4882a593Smuzhiyun
2810*4882a593Smuzhiyun #define ISP8044_PEX_DMA_ENGINE_INDEX 8
2811*4882a593Smuzhiyun #define ISP8044_PEX_DMA_BASE_ADDRESS 0x77320000
2812*4882a593Smuzhiyun #define ISP8044_PEX_DMA_NUM_OFFSET 0x10000UL
2813*4882a593Smuzhiyun #define ISP8044_PEX_DMA_CMD_ADDR_LOW 0x0
2814*4882a593Smuzhiyun #define ISP8044_PEX_DMA_CMD_ADDR_HIGH 0x04
2815*4882a593Smuzhiyun #define ISP8044_PEX_DMA_CMD_STS_AND_CNTRL 0x08
2816*4882a593Smuzhiyun
2817*4882a593Smuzhiyun #define ISP8044_PEX_DMA_READ_SIZE (16 * 1024)
2818*4882a593Smuzhiyun #define ISP8044_PEX_DMA_MAX_WAIT (100 * 100) /* Max wait of 100 msecs */
2819*4882a593Smuzhiyun
2820*4882a593Smuzhiyun static int
qla8044_check_dma_engine_state(struct scsi_qla_host * vha)2821*4882a593Smuzhiyun qla8044_check_dma_engine_state(struct scsi_qla_host *vha)
2822*4882a593Smuzhiyun {
2823*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
2824*4882a593Smuzhiyun int rval = QLA_SUCCESS;
2825*4882a593Smuzhiyun uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0;
2826*4882a593Smuzhiyun uint64_t dma_base_addr = 0;
2827*4882a593Smuzhiyun struct qla8044_minidump_template_hdr *tmplt_hdr = NULL;
2828*4882a593Smuzhiyun
2829*4882a593Smuzhiyun tmplt_hdr = ha->md_tmplt_hdr;
2830*4882a593Smuzhiyun dma_eng_num =
2831*4882a593Smuzhiyun tmplt_hdr->saved_state_array[ISP8044_PEX_DMA_ENGINE_INDEX];
2832*4882a593Smuzhiyun dma_base_addr = ISP8044_PEX_DMA_BASE_ADDRESS +
2833*4882a593Smuzhiyun (dma_eng_num * ISP8044_PEX_DMA_NUM_OFFSET);
2834*4882a593Smuzhiyun
2835*4882a593Smuzhiyun /* Read the pex-dma's command-status-and-control register. */
2836*4882a593Smuzhiyun rval = qla8044_rd_reg_indirect(vha,
2837*4882a593Smuzhiyun (dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL),
2838*4882a593Smuzhiyun &cmd_sts_and_cntrl);
2839*4882a593Smuzhiyun if (rval)
2840*4882a593Smuzhiyun return QLA_FUNCTION_FAILED;
2841*4882a593Smuzhiyun
2842*4882a593Smuzhiyun /* Check if requested pex-dma engine is available. */
2843*4882a593Smuzhiyun if (cmd_sts_and_cntrl & BIT_31)
2844*4882a593Smuzhiyun return QLA_SUCCESS;
2845*4882a593Smuzhiyun
2846*4882a593Smuzhiyun return QLA_FUNCTION_FAILED;
2847*4882a593Smuzhiyun }
2848*4882a593Smuzhiyun
2849*4882a593Smuzhiyun static int
qla8044_start_pex_dma(struct scsi_qla_host * vha,struct qla8044_minidump_entry_rdmem_pex_dma * m_hdr)2850*4882a593Smuzhiyun qla8044_start_pex_dma(struct scsi_qla_host *vha,
2851*4882a593Smuzhiyun struct qla8044_minidump_entry_rdmem_pex_dma *m_hdr)
2852*4882a593Smuzhiyun {
2853*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
2854*4882a593Smuzhiyun int rval = QLA_SUCCESS, wait = 0;
2855*4882a593Smuzhiyun uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0;
2856*4882a593Smuzhiyun uint64_t dma_base_addr = 0;
2857*4882a593Smuzhiyun struct qla8044_minidump_template_hdr *tmplt_hdr = NULL;
2858*4882a593Smuzhiyun
2859*4882a593Smuzhiyun tmplt_hdr = ha->md_tmplt_hdr;
2860*4882a593Smuzhiyun dma_eng_num =
2861*4882a593Smuzhiyun tmplt_hdr->saved_state_array[ISP8044_PEX_DMA_ENGINE_INDEX];
2862*4882a593Smuzhiyun dma_base_addr = ISP8044_PEX_DMA_BASE_ADDRESS +
2863*4882a593Smuzhiyun (dma_eng_num * ISP8044_PEX_DMA_NUM_OFFSET);
2864*4882a593Smuzhiyun
2865*4882a593Smuzhiyun rval = qla8044_wr_reg_indirect(vha,
2866*4882a593Smuzhiyun dma_base_addr + ISP8044_PEX_DMA_CMD_ADDR_LOW,
2867*4882a593Smuzhiyun m_hdr->desc_card_addr);
2868*4882a593Smuzhiyun if (rval)
2869*4882a593Smuzhiyun goto error_exit;
2870*4882a593Smuzhiyun
2871*4882a593Smuzhiyun rval = qla8044_wr_reg_indirect(vha,
2872*4882a593Smuzhiyun dma_base_addr + ISP8044_PEX_DMA_CMD_ADDR_HIGH, 0);
2873*4882a593Smuzhiyun if (rval)
2874*4882a593Smuzhiyun goto error_exit;
2875*4882a593Smuzhiyun
2876*4882a593Smuzhiyun rval = qla8044_wr_reg_indirect(vha,
2877*4882a593Smuzhiyun dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL,
2878*4882a593Smuzhiyun m_hdr->start_dma_cmd);
2879*4882a593Smuzhiyun if (rval)
2880*4882a593Smuzhiyun goto error_exit;
2881*4882a593Smuzhiyun
2882*4882a593Smuzhiyun /* Wait for dma operation to complete. */
2883*4882a593Smuzhiyun for (wait = 0; wait < ISP8044_PEX_DMA_MAX_WAIT; wait++) {
2884*4882a593Smuzhiyun rval = qla8044_rd_reg_indirect(vha,
2885*4882a593Smuzhiyun (dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL),
2886*4882a593Smuzhiyun &cmd_sts_and_cntrl);
2887*4882a593Smuzhiyun if (rval)
2888*4882a593Smuzhiyun goto error_exit;
2889*4882a593Smuzhiyun
2890*4882a593Smuzhiyun if ((cmd_sts_and_cntrl & BIT_1) == 0)
2891*4882a593Smuzhiyun break;
2892*4882a593Smuzhiyun
2893*4882a593Smuzhiyun udelay(10);
2894*4882a593Smuzhiyun }
2895*4882a593Smuzhiyun
2896*4882a593Smuzhiyun /* Wait a max of 100 ms, otherwise fallback to rdmem entry read */
2897*4882a593Smuzhiyun if (wait >= ISP8044_PEX_DMA_MAX_WAIT) {
2898*4882a593Smuzhiyun rval = QLA_FUNCTION_FAILED;
2899*4882a593Smuzhiyun goto error_exit;
2900*4882a593Smuzhiyun }
2901*4882a593Smuzhiyun
2902*4882a593Smuzhiyun error_exit:
2903*4882a593Smuzhiyun return rval;
2904*4882a593Smuzhiyun }
2905*4882a593Smuzhiyun
2906*4882a593Smuzhiyun static int
qla8044_minidump_pex_dma_read(struct scsi_qla_host * vha,struct qla8044_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)2907*4882a593Smuzhiyun qla8044_minidump_pex_dma_read(struct scsi_qla_host *vha,
2908*4882a593Smuzhiyun struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2909*4882a593Smuzhiyun {
2910*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
2911*4882a593Smuzhiyun int rval = QLA_SUCCESS;
2912*4882a593Smuzhiyun struct qla8044_minidump_entry_rdmem_pex_dma *m_hdr = NULL;
2913*4882a593Smuzhiyun uint32_t chunk_size, read_size;
2914*4882a593Smuzhiyun uint8_t *data_ptr = (uint8_t *)*d_ptr;
2915*4882a593Smuzhiyun void *rdmem_buffer = NULL;
2916*4882a593Smuzhiyun dma_addr_t rdmem_dma;
2917*4882a593Smuzhiyun struct qla8044_pex_dma_descriptor dma_desc;
2918*4882a593Smuzhiyun
2919*4882a593Smuzhiyun rval = qla8044_check_dma_engine_state(vha);
2920*4882a593Smuzhiyun if (rval != QLA_SUCCESS) {
2921*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb147,
2922*4882a593Smuzhiyun "DMA engine not available. Fallback to rdmem-read.\n");
2923*4882a593Smuzhiyun return QLA_FUNCTION_FAILED;
2924*4882a593Smuzhiyun }
2925*4882a593Smuzhiyun
2926*4882a593Smuzhiyun m_hdr = (void *)entry_hdr;
2927*4882a593Smuzhiyun
2928*4882a593Smuzhiyun rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev,
2929*4882a593Smuzhiyun ISP8044_PEX_DMA_READ_SIZE, &rdmem_dma, GFP_KERNEL);
2930*4882a593Smuzhiyun if (!rdmem_buffer) {
2931*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb148,
2932*4882a593Smuzhiyun "Unable to allocate rdmem dma buffer\n");
2933*4882a593Smuzhiyun return QLA_FUNCTION_FAILED;
2934*4882a593Smuzhiyun }
2935*4882a593Smuzhiyun
2936*4882a593Smuzhiyun /* Prepare pex-dma descriptor to be written to MS memory. */
2937*4882a593Smuzhiyun /* dma-desc-cmd layout:
2938*4882a593Smuzhiyun * 0-3: dma-desc-cmd 0-3
2939*4882a593Smuzhiyun * 4-7: pcid function number
2940*4882a593Smuzhiyun * 8-15: dma-desc-cmd 8-15
2941*4882a593Smuzhiyun * dma_bus_addr: dma buffer address
2942*4882a593Smuzhiyun * cmd.read_data_size: amount of data-chunk to be read.
2943*4882a593Smuzhiyun */
2944*4882a593Smuzhiyun dma_desc.cmd.dma_desc_cmd = (m_hdr->dma_desc_cmd & 0xff0f);
2945*4882a593Smuzhiyun dma_desc.cmd.dma_desc_cmd |=
2946*4882a593Smuzhiyun ((PCI_FUNC(ha->pdev->devfn) & 0xf) << 0x4);
2947*4882a593Smuzhiyun
2948*4882a593Smuzhiyun dma_desc.dma_bus_addr = rdmem_dma;
2949*4882a593Smuzhiyun dma_desc.cmd.read_data_size = chunk_size = ISP8044_PEX_DMA_READ_SIZE;
2950*4882a593Smuzhiyun read_size = 0;
2951*4882a593Smuzhiyun
2952*4882a593Smuzhiyun /*
2953*4882a593Smuzhiyun * Perform rdmem operation using pex-dma.
2954*4882a593Smuzhiyun * Prepare dma in chunks of ISP8044_PEX_DMA_READ_SIZE.
2955*4882a593Smuzhiyun */
2956*4882a593Smuzhiyun while (read_size < m_hdr->read_data_size) {
2957*4882a593Smuzhiyun if (m_hdr->read_data_size - read_size <
2958*4882a593Smuzhiyun ISP8044_PEX_DMA_READ_SIZE) {
2959*4882a593Smuzhiyun chunk_size = (m_hdr->read_data_size - read_size);
2960*4882a593Smuzhiyun dma_desc.cmd.read_data_size = chunk_size;
2961*4882a593Smuzhiyun }
2962*4882a593Smuzhiyun
2963*4882a593Smuzhiyun dma_desc.src_addr = m_hdr->read_addr + read_size;
2964*4882a593Smuzhiyun
2965*4882a593Smuzhiyun /* Prepare: Write pex-dma descriptor to MS memory. */
2966*4882a593Smuzhiyun rval = qla8044_ms_mem_write_128b(vha,
2967*4882a593Smuzhiyun m_hdr->desc_card_addr, (uint32_t *)&dma_desc,
2968*4882a593Smuzhiyun (sizeof(struct qla8044_pex_dma_descriptor)/16));
2969*4882a593Smuzhiyun if (rval) {
2970*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb14a,
2971*4882a593Smuzhiyun "%s: Error writing rdmem-dma-init to MS !!!\n",
2972*4882a593Smuzhiyun __func__);
2973*4882a593Smuzhiyun goto error_exit;
2974*4882a593Smuzhiyun }
2975*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb14b,
2976*4882a593Smuzhiyun "%s: Dma-descriptor: Instruct for rdmem dma "
2977*4882a593Smuzhiyun "(chunk_size 0x%x).\n", __func__, chunk_size);
2978*4882a593Smuzhiyun
2979*4882a593Smuzhiyun /* Execute: Start pex-dma operation. */
2980*4882a593Smuzhiyun rval = qla8044_start_pex_dma(vha, m_hdr);
2981*4882a593Smuzhiyun if (rval)
2982*4882a593Smuzhiyun goto error_exit;
2983*4882a593Smuzhiyun
2984*4882a593Smuzhiyun memcpy(data_ptr, rdmem_buffer, chunk_size);
2985*4882a593Smuzhiyun data_ptr += chunk_size;
2986*4882a593Smuzhiyun read_size += chunk_size;
2987*4882a593Smuzhiyun }
2988*4882a593Smuzhiyun
2989*4882a593Smuzhiyun *d_ptr = (uint32_t *)data_ptr;
2990*4882a593Smuzhiyun
2991*4882a593Smuzhiyun error_exit:
2992*4882a593Smuzhiyun if (rdmem_buffer)
2993*4882a593Smuzhiyun dma_free_coherent(&ha->pdev->dev, ISP8044_PEX_DMA_READ_SIZE,
2994*4882a593Smuzhiyun rdmem_buffer, rdmem_dma);
2995*4882a593Smuzhiyun
2996*4882a593Smuzhiyun return rval;
2997*4882a593Smuzhiyun }
2998*4882a593Smuzhiyun
2999*4882a593Smuzhiyun static uint32_t
qla8044_minidump_process_rddfe(struct scsi_qla_host * vha,struct qla8044_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)3000*4882a593Smuzhiyun qla8044_minidump_process_rddfe(struct scsi_qla_host *vha,
3001*4882a593Smuzhiyun struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
3002*4882a593Smuzhiyun {
3003*4882a593Smuzhiyun int loop_cnt;
3004*4882a593Smuzhiyun uint32_t addr1, addr2, value, data, temp, wrVal;
3005*4882a593Smuzhiyun uint8_t stride, stride2;
3006*4882a593Smuzhiyun uint16_t count;
3007*4882a593Smuzhiyun uint32_t poll, mask, modify_mask;
3008*4882a593Smuzhiyun uint32_t wait_count = 0;
3009*4882a593Smuzhiyun uint32_t *data_ptr = *d_ptr;
3010*4882a593Smuzhiyun struct qla8044_minidump_entry_rddfe *rddfe;
3011*4882a593Smuzhiyun
3012*4882a593Smuzhiyun rddfe = (struct qla8044_minidump_entry_rddfe *) entry_hdr;
3013*4882a593Smuzhiyun
3014*4882a593Smuzhiyun addr1 = rddfe->addr_1;
3015*4882a593Smuzhiyun value = rddfe->value;
3016*4882a593Smuzhiyun stride = rddfe->stride;
3017*4882a593Smuzhiyun stride2 = rddfe->stride2;
3018*4882a593Smuzhiyun count = rddfe->count;
3019*4882a593Smuzhiyun
3020*4882a593Smuzhiyun poll = rddfe->poll;
3021*4882a593Smuzhiyun mask = rddfe->mask;
3022*4882a593Smuzhiyun modify_mask = rddfe->modify_mask;
3023*4882a593Smuzhiyun
3024*4882a593Smuzhiyun addr2 = addr1 + stride;
3025*4882a593Smuzhiyun
3026*4882a593Smuzhiyun for (loop_cnt = 0x0; loop_cnt < count; loop_cnt++) {
3027*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, addr1, (0x40000000 | value));
3028*4882a593Smuzhiyun
3029*4882a593Smuzhiyun wait_count = 0;
3030*4882a593Smuzhiyun while (wait_count < poll) {
3031*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha, addr1, &temp);
3032*4882a593Smuzhiyun if ((temp & mask) != 0)
3033*4882a593Smuzhiyun break;
3034*4882a593Smuzhiyun wait_count++;
3035*4882a593Smuzhiyun }
3036*4882a593Smuzhiyun
3037*4882a593Smuzhiyun if (wait_count == poll) {
3038*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb153,
3039*4882a593Smuzhiyun "%s: TIMEOUT\n", __func__);
3040*4882a593Smuzhiyun goto error;
3041*4882a593Smuzhiyun } else {
3042*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha, addr2, &temp);
3043*4882a593Smuzhiyun temp = temp & modify_mask;
3044*4882a593Smuzhiyun temp = (temp | ((loop_cnt << 16) | loop_cnt));
3045*4882a593Smuzhiyun wrVal = ((temp << 16) | temp);
3046*4882a593Smuzhiyun
3047*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, addr2, wrVal);
3048*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, addr1, value);
3049*4882a593Smuzhiyun
3050*4882a593Smuzhiyun wait_count = 0;
3051*4882a593Smuzhiyun while (wait_count < poll) {
3052*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha, addr1, &temp);
3053*4882a593Smuzhiyun if ((temp & mask) != 0)
3054*4882a593Smuzhiyun break;
3055*4882a593Smuzhiyun wait_count++;
3056*4882a593Smuzhiyun }
3057*4882a593Smuzhiyun if (wait_count == poll) {
3058*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb154,
3059*4882a593Smuzhiyun "%s: TIMEOUT\n", __func__);
3060*4882a593Smuzhiyun goto error;
3061*4882a593Smuzhiyun }
3062*4882a593Smuzhiyun
3063*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, addr1,
3064*4882a593Smuzhiyun ((0x40000000 | value) + stride2));
3065*4882a593Smuzhiyun wait_count = 0;
3066*4882a593Smuzhiyun while (wait_count < poll) {
3067*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha, addr1, &temp);
3068*4882a593Smuzhiyun if ((temp & mask) != 0)
3069*4882a593Smuzhiyun break;
3070*4882a593Smuzhiyun wait_count++;
3071*4882a593Smuzhiyun }
3072*4882a593Smuzhiyun
3073*4882a593Smuzhiyun if (wait_count == poll) {
3074*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb155,
3075*4882a593Smuzhiyun "%s: TIMEOUT\n", __func__);
3076*4882a593Smuzhiyun goto error;
3077*4882a593Smuzhiyun }
3078*4882a593Smuzhiyun
3079*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha, addr2, &data);
3080*4882a593Smuzhiyun
3081*4882a593Smuzhiyun *data_ptr++ = wrVal;
3082*4882a593Smuzhiyun *data_ptr++ = data;
3083*4882a593Smuzhiyun }
3084*4882a593Smuzhiyun
3085*4882a593Smuzhiyun }
3086*4882a593Smuzhiyun
3087*4882a593Smuzhiyun *d_ptr = data_ptr;
3088*4882a593Smuzhiyun return QLA_SUCCESS;
3089*4882a593Smuzhiyun
3090*4882a593Smuzhiyun error:
3091*4882a593Smuzhiyun return -1;
3092*4882a593Smuzhiyun
3093*4882a593Smuzhiyun }
3094*4882a593Smuzhiyun
3095*4882a593Smuzhiyun static uint32_t
qla8044_minidump_process_rdmdio(struct scsi_qla_host * vha,struct qla8044_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)3096*4882a593Smuzhiyun qla8044_minidump_process_rdmdio(struct scsi_qla_host *vha,
3097*4882a593Smuzhiyun struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
3098*4882a593Smuzhiyun {
3099*4882a593Smuzhiyun int ret = 0;
3100*4882a593Smuzhiyun uint32_t addr1, addr2, value1, value2, data, selVal;
3101*4882a593Smuzhiyun uint8_t stride1, stride2;
3102*4882a593Smuzhiyun uint32_t addr3, addr4, addr5, addr6, addr7;
3103*4882a593Smuzhiyun uint16_t count, loop_cnt;
3104*4882a593Smuzhiyun uint32_t mask;
3105*4882a593Smuzhiyun uint32_t *data_ptr = *d_ptr;
3106*4882a593Smuzhiyun
3107*4882a593Smuzhiyun struct qla8044_minidump_entry_rdmdio *rdmdio;
3108*4882a593Smuzhiyun
3109*4882a593Smuzhiyun rdmdio = (struct qla8044_minidump_entry_rdmdio *) entry_hdr;
3110*4882a593Smuzhiyun
3111*4882a593Smuzhiyun addr1 = rdmdio->addr_1;
3112*4882a593Smuzhiyun addr2 = rdmdio->addr_2;
3113*4882a593Smuzhiyun value1 = rdmdio->value_1;
3114*4882a593Smuzhiyun stride1 = rdmdio->stride_1;
3115*4882a593Smuzhiyun stride2 = rdmdio->stride_2;
3116*4882a593Smuzhiyun count = rdmdio->count;
3117*4882a593Smuzhiyun
3118*4882a593Smuzhiyun mask = rdmdio->mask;
3119*4882a593Smuzhiyun value2 = rdmdio->value_2;
3120*4882a593Smuzhiyun
3121*4882a593Smuzhiyun addr3 = addr1 + stride1;
3122*4882a593Smuzhiyun
3123*4882a593Smuzhiyun for (loop_cnt = 0; loop_cnt < count; loop_cnt++) {
3124*4882a593Smuzhiyun ret = qla8044_poll_wait_ipmdio_bus_idle(vha, addr1, addr2,
3125*4882a593Smuzhiyun addr3, mask);
3126*4882a593Smuzhiyun if (ret == -1)
3127*4882a593Smuzhiyun goto error;
3128*4882a593Smuzhiyun
3129*4882a593Smuzhiyun addr4 = addr2 - stride1;
3130*4882a593Smuzhiyun ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr4,
3131*4882a593Smuzhiyun value2);
3132*4882a593Smuzhiyun if (ret == -1)
3133*4882a593Smuzhiyun goto error;
3134*4882a593Smuzhiyun
3135*4882a593Smuzhiyun addr5 = addr2 - (2 * stride1);
3136*4882a593Smuzhiyun ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr5,
3137*4882a593Smuzhiyun value1);
3138*4882a593Smuzhiyun if (ret == -1)
3139*4882a593Smuzhiyun goto error;
3140*4882a593Smuzhiyun
3141*4882a593Smuzhiyun addr6 = addr2 - (3 * stride1);
3142*4882a593Smuzhiyun ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask,
3143*4882a593Smuzhiyun addr6, 0x2);
3144*4882a593Smuzhiyun if (ret == -1)
3145*4882a593Smuzhiyun goto error;
3146*4882a593Smuzhiyun
3147*4882a593Smuzhiyun ret = qla8044_poll_wait_ipmdio_bus_idle(vha, addr1, addr2,
3148*4882a593Smuzhiyun addr3, mask);
3149*4882a593Smuzhiyun if (ret == -1)
3150*4882a593Smuzhiyun goto error;
3151*4882a593Smuzhiyun
3152*4882a593Smuzhiyun addr7 = addr2 - (4 * stride1);
3153*4882a593Smuzhiyun data = qla8044_ipmdio_rd_reg(vha, addr1, addr3, mask, addr7);
3154*4882a593Smuzhiyun if (data == -1)
3155*4882a593Smuzhiyun goto error;
3156*4882a593Smuzhiyun
3157*4882a593Smuzhiyun selVal = (value2 << 18) | (value1 << 2) | 2;
3158*4882a593Smuzhiyun
3159*4882a593Smuzhiyun stride2 = rdmdio->stride_2;
3160*4882a593Smuzhiyun *data_ptr++ = selVal;
3161*4882a593Smuzhiyun *data_ptr++ = data;
3162*4882a593Smuzhiyun
3163*4882a593Smuzhiyun value1 = value1 + stride2;
3164*4882a593Smuzhiyun *d_ptr = data_ptr;
3165*4882a593Smuzhiyun }
3166*4882a593Smuzhiyun
3167*4882a593Smuzhiyun return 0;
3168*4882a593Smuzhiyun
3169*4882a593Smuzhiyun error:
3170*4882a593Smuzhiyun return -1;
3171*4882a593Smuzhiyun }
3172*4882a593Smuzhiyun
qla8044_minidump_process_pollwr(struct scsi_qla_host * vha,struct qla8044_minidump_entry_hdr * entry_hdr,uint32_t ** d_ptr)3173*4882a593Smuzhiyun static uint32_t qla8044_minidump_process_pollwr(struct scsi_qla_host *vha,
3174*4882a593Smuzhiyun struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
3175*4882a593Smuzhiyun {
3176*4882a593Smuzhiyun uint32_t addr1, addr2, value1, value2, poll, r_value;
3177*4882a593Smuzhiyun uint32_t wait_count = 0;
3178*4882a593Smuzhiyun struct qla8044_minidump_entry_pollwr *pollwr_hdr;
3179*4882a593Smuzhiyun
3180*4882a593Smuzhiyun pollwr_hdr = (struct qla8044_minidump_entry_pollwr *)entry_hdr;
3181*4882a593Smuzhiyun addr1 = pollwr_hdr->addr_1;
3182*4882a593Smuzhiyun addr2 = pollwr_hdr->addr_2;
3183*4882a593Smuzhiyun value1 = pollwr_hdr->value_1;
3184*4882a593Smuzhiyun value2 = pollwr_hdr->value_2;
3185*4882a593Smuzhiyun
3186*4882a593Smuzhiyun poll = pollwr_hdr->poll;
3187*4882a593Smuzhiyun
3188*4882a593Smuzhiyun while (wait_count < poll) {
3189*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha, addr1, &r_value);
3190*4882a593Smuzhiyun
3191*4882a593Smuzhiyun if ((r_value & poll) != 0)
3192*4882a593Smuzhiyun break;
3193*4882a593Smuzhiyun wait_count++;
3194*4882a593Smuzhiyun }
3195*4882a593Smuzhiyun
3196*4882a593Smuzhiyun if (wait_count == poll) {
3197*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb156, "%s: TIMEOUT\n", __func__);
3198*4882a593Smuzhiyun goto error;
3199*4882a593Smuzhiyun }
3200*4882a593Smuzhiyun
3201*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, addr2, value2);
3202*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, addr1, value1);
3203*4882a593Smuzhiyun
3204*4882a593Smuzhiyun wait_count = 0;
3205*4882a593Smuzhiyun while (wait_count < poll) {
3206*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha, addr1, &r_value);
3207*4882a593Smuzhiyun
3208*4882a593Smuzhiyun if ((r_value & poll) != 0)
3209*4882a593Smuzhiyun break;
3210*4882a593Smuzhiyun wait_count++;
3211*4882a593Smuzhiyun }
3212*4882a593Smuzhiyun
3213*4882a593Smuzhiyun return QLA_SUCCESS;
3214*4882a593Smuzhiyun
3215*4882a593Smuzhiyun error:
3216*4882a593Smuzhiyun return -1;
3217*4882a593Smuzhiyun }
3218*4882a593Smuzhiyun
3219*4882a593Smuzhiyun /*
3220*4882a593Smuzhiyun *
3221*4882a593Smuzhiyun * qla8044_collect_md_data - Retrieve firmware minidump data.
3222*4882a593Smuzhiyun * @ha: pointer to adapter structure
3223*4882a593Smuzhiyun **/
3224*4882a593Smuzhiyun int
qla8044_collect_md_data(struct scsi_qla_host * vha)3225*4882a593Smuzhiyun qla8044_collect_md_data(struct scsi_qla_host *vha)
3226*4882a593Smuzhiyun {
3227*4882a593Smuzhiyun int num_entry_hdr = 0;
3228*4882a593Smuzhiyun struct qla8044_minidump_entry_hdr *entry_hdr;
3229*4882a593Smuzhiyun struct qla8044_minidump_template_hdr *tmplt_hdr;
3230*4882a593Smuzhiyun uint32_t *data_ptr;
3231*4882a593Smuzhiyun uint32_t data_collected = 0, f_capture_mask;
3232*4882a593Smuzhiyun int i, rval = QLA_FUNCTION_FAILED;
3233*4882a593Smuzhiyun uint64_t now;
3234*4882a593Smuzhiyun uint32_t timestamp, idc_control;
3235*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
3236*4882a593Smuzhiyun
3237*4882a593Smuzhiyun if (!ha->md_dump) {
3238*4882a593Smuzhiyun ql_log(ql_log_info, vha, 0xb101,
3239*4882a593Smuzhiyun "%s(%ld) No buffer to dump\n",
3240*4882a593Smuzhiyun __func__, vha->host_no);
3241*4882a593Smuzhiyun return rval;
3242*4882a593Smuzhiyun }
3243*4882a593Smuzhiyun
3244*4882a593Smuzhiyun if (ha->fw_dumped) {
3245*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb10d,
3246*4882a593Smuzhiyun "Firmware has been previously dumped (%p) "
3247*4882a593Smuzhiyun "-- ignoring request.\n", ha->fw_dump);
3248*4882a593Smuzhiyun goto md_failed;
3249*4882a593Smuzhiyun }
3250*4882a593Smuzhiyun
3251*4882a593Smuzhiyun ha->fw_dumped = false;
3252*4882a593Smuzhiyun
3253*4882a593Smuzhiyun if (!ha->md_tmplt_hdr || !ha->md_dump) {
3254*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb10e,
3255*4882a593Smuzhiyun "Memory not allocated for minidump capture\n");
3256*4882a593Smuzhiyun goto md_failed;
3257*4882a593Smuzhiyun }
3258*4882a593Smuzhiyun
3259*4882a593Smuzhiyun qla8044_idc_lock(ha);
3260*4882a593Smuzhiyun idc_control = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
3261*4882a593Smuzhiyun if (idc_control & GRACEFUL_RESET_BIT1) {
3262*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb112,
3263*4882a593Smuzhiyun "Forced reset from application, "
3264*4882a593Smuzhiyun "ignore minidump capture\n");
3265*4882a593Smuzhiyun qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
3266*4882a593Smuzhiyun (idc_control & ~GRACEFUL_RESET_BIT1));
3267*4882a593Smuzhiyun qla8044_idc_unlock(ha);
3268*4882a593Smuzhiyun
3269*4882a593Smuzhiyun goto md_failed;
3270*4882a593Smuzhiyun }
3271*4882a593Smuzhiyun qla8044_idc_unlock(ha);
3272*4882a593Smuzhiyun
3273*4882a593Smuzhiyun if (qla82xx_validate_template_chksum(vha)) {
3274*4882a593Smuzhiyun ql_log(ql_log_info, vha, 0xb109,
3275*4882a593Smuzhiyun "Template checksum validation error\n");
3276*4882a593Smuzhiyun goto md_failed;
3277*4882a593Smuzhiyun }
3278*4882a593Smuzhiyun
3279*4882a593Smuzhiyun tmplt_hdr = (struct qla8044_minidump_template_hdr *)
3280*4882a593Smuzhiyun ha->md_tmplt_hdr;
3281*4882a593Smuzhiyun data_ptr = (uint32_t *)((uint8_t *)ha->md_dump);
3282*4882a593Smuzhiyun num_entry_hdr = tmplt_hdr->num_of_entries;
3283*4882a593Smuzhiyun
3284*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb11a,
3285*4882a593Smuzhiyun "Capture Mask obtained: 0x%x\n", tmplt_hdr->capture_debug_level);
3286*4882a593Smuzhiyun
3287*4882a593Smuzhiyun f_capture_mask = tmplt_hdr->capture_debug_level & 0xFF;
3288*4882a593Smuzhiyun
3289*4882a593Smuzhiyun /* Validate whether required debug level is set */
3290*4882a593Smuzhiyun if ((f_capture_mask & 0x3) != 0x3) {
3291*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb10f,
3292*4882a593Smuzhiyun "Minimum required capture mask[0x%x] level not set\n",
3293*4882a593Smuzhiyun f_capture_mask);
3294*4882a593Smuzhiyun
3295*4882a593Smuzhiyun }
3296*4882a593Smuzhiyun tmplt_hdr->driver_capture_mask = ql2xmdcapmask;
3297*4882a593Smuzhiyun ql_log(ql_log_info, vha, 0xb102,
3298*4882a593Smuzhiyun "[%s]: starting data ptr: %p\n",
3299*4882a593Smuzhiyun __func__, data_ptr);
3300*4882a593Smuzhiyun ql_log(ql_log_info, vha, 0xb10b,
3301*4882a593Smuzhiyun "[%s]: no of entry headers in Template: 0x%x\n",
3302*4882a593Smuzhiyun __func__, num_entry_hdr);
3303*4882a593Smuzhiyun ql_log(ql_log_info, vha, 0xb10c,
3304*4882a593Smuzhiyun "[%s]: Total_data_size 0x%x, %d obtained\n",
3305*4882a593Smuzhiyun __func__, ha->md_dump_size, ha->md_dump_size);
3306*4882a593Smuzhiyun
3307*4882a593Smuzhiyun /* Update current timestamp before taking dump */
3308*4882a593Smuzhiyun now = get_jiffies_64();
3309*4882a593Smuzhiyun timestamp = (u32)(jiffies_to_msecs(now) / 1000);
3310*4882a593Smuzhiyun tmplt_hdr->driver_timestamp = timestamp;
3311*4882a593Smuzhiyun
3312*4882a593Smuzhiyun entry_hdr = (struct qla8044_minidump_entry_hdr *)
3313*4882a593Smuzhiyun (((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset);
3314*4882a593Smuzhiyun tmplt_hdr->saved_state_array[QLA8044_SS_OCM_WNDREG_INDEX] =
3315*4882a593Smuzhiyun tmplt_hdr->ocm_window_reg[ha->portnum];
3316*4882a593Smuzhiyun
3317*4882a593Smuzhiyun /* Walk through the entry headers - validate/perform required action */
3318*4882a593Smuzhiyun for (i = 0; i < num_entry_hdr; i++) {
3319*4882a593Smuzhiyun if (data_collected > ha->md_dump_size) {
3320*4882a593Smuzhiyun ql_log(ql_log_info, vha, 0xb103,
3321*4882a593Smuzhiyun "Data collected: [0x%x], "
3322*4882a593Smuzhiyun "Total Dump size: [0x%x]\n",
3323*4882a593Smuzhiyun data_collected, ha->md_dump_size);
3324*4882a593Smuzhiyun return rval;
3325*4882a593Smuzhiyun }
3326*4882a593Smuzhiyun
3327*4882a593Smuzhiyun if (!(entry_hdr->d_ctrl.entry_capture_mask &
3328*4882a593Smuzhiyun ql2xmdcapmask)) {
3329*4882a593Smuzhiyun entry_hdr->d_ctrl.driver_flags |=
3330*4882a593Smuzhiyun QLA82XX_DBG_SKIPPED_FLAG;
3331*4882a593Smuzhiyun goto skip_nxt_entry;
3332*4882a593Smuzhiyun }
3333*4882a593Smuzhiyun
3334*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb104,
3335*4882a593Smuzhiyun "Data collected: [0x%x], Dump size left:[0x%x]\n",
3336*4882a593Smuzhiyun data_collected,
3337*4882a593Smuzhiyun (ha->md_dump_size - data_collected));
3338*4882a593Smuzhiyun
3339*4882a593Smuzhiyun /* Decode the entry type and take required action to capture
3340*4882a593Smuzhiyun * debug data
3341*4882a593Smuzhiyun */
3342*4882a593Smuzhiyun switch (entry_hdr->entry_type) {
3343*4882a593Smuzhiyun case QLA82XX_RDEND:
3344*4882a593Smuzhiyun qla8044_mark_entry_skipped(vha, entry_hdr, i);
3345*4882a593Smuzhiyun break;
3346*4882a593Smuzhiyun case QLA82XX_CNTRL:
3347*4882a593Smuzhiyun rval = qla8044_minidump_process_control(vha,
3348*4882a593Smuzhiyun entry_hdr);
3349*4882a593Smuzhiyun if (rval != QLA_SUCCESS) {
3350*4882a593Smuzhiyun qla8044_mark_entry_skipped(vha, entry_hdr, i);
3351*4882a593Smuzhiyun goto md_failed;
3352*4882a593Smuzhiyun }
3353*4882a593Smuzhiyun break;
3354*4882a593Smuzhiyun case QLA82XX_RDCRB:
3355*4882a593Smuzhiyun qla8044_minidump_process_rdcrb(vha,
3356*4882a593Smuzhiyun entry_hdr, &data_ptr);
3357*4882a593Smuzhiyun break;
3358*4882a593Smuzhiyun case QLA82XX_RDMEM:
3359*4882a593Smuzhiyun rval = qla8044_minidump_pex_dma_read(vha,
3360*4882a593Smuzhiyun entry_hdr, &data_ptr);
3361*4882a593Smuzhiyun if (rval != QLA_SUCCESS) {
3362*4882a593Smuzhiyun rval = qla8044_minidump_process_rdmem(vha,
3363*4882a593Smuzhiyun entry_hdr, &data_ptr);
3364*4882a593Smuzhiyun if (rval != QLA_SUCCESS) {
3365*4882a593Smuzhiyun qla8044_mark_entry_skipped(vha,
3366*4882a593Smuzhiyun entry_hdr, i);
3367*4882a593Smuzhiyun goto md_failed;
3368*4882a593Smuzhiyun }
3369*4882a593Smuzhiyun }
3370*4882a593Smuzhiyun break;
3371*4882a593Smuzhiyun case QLA82XX_BOARD:
3372*4882a593Smuzhiyun case QLA82XX_RDROM:
3373*4882a593Smuzhiyun rval = qla8044_minidump_process_rdrom(vha,
3374*4882a593Smuzhiyun entry_hdr, &data_ptr);
3375*4882a593Smuzhiyun if (rval != QLA_SUCCESS) {
3376*4882a593Smuzhiyun qla8044_mark_entry_skipped(vha,
3377*4882a593Smuzhiyun entry_hdr, i);
3378*4882a593Smuzhiyun }
3379*4882a593Smuzhiyun break;
3380*4882a593Smuzhiyun case QLA82XX_L2DTG:
3381*4882a593Smuzhiyun case QLA82XX_L2ITG:
3382*4882a593Smuzhiyun case QLA82XX_L2DAT:
3383*4882a593Smuzhiyun case QLA82XX_L2INS:
3384*4882a593Smuzhiyun rval = qla8044_minidump_process_l2tag(vha,
3385*4882a593Smuzhiyun entry_hdr, &data_ptr);
3386*4882a593Smuzhiyun if (rval != QLA_SUCCESS) {
3387*4882a593Smuzhiyun qla8044_mark_entry_skipped(vha, entry_hdr, i);
3388*4882a593Smuzhiyun goto md_failed;
3389*4882a593Smuzhiyun }
3390*4882a593Smuzhiyun break;
3391*4882a593Smuzhiyun case QLA8044_L1DTG:
3392*4882a593Smuzhiyun case QLA8044_L1ITG:
3393*4882a593Smuzhiyun case QLA82XX_L1DAT:
3394*4882a593Smuzhiyun case QLA82XX_L1INS:
3395*4882a593Smuzhiyun qla8044_minidump_process_l1cache(vha,
3396*4882a593Smuzhiyun entry_hdr, &data_ptr);
3397*4882a593Smuzhiyun break;
3398*4882a593Smuzhiyun case QLA82XX_RDOCM:
3399*4882a593Smuzhiyun qla8044_minidump_process_rdocm(vha,
3400*4882a593Smuzhiyun entry_hdr, &data_ptr);
3401*4882a593Smuzhiyun break;
3402*4882a593Smuzhiyun case QLA82XX_RDMUX:
3403*4882a593Smuzhiyun qla8044_minidump_process_rdmux(vha,
3404*4882a593Smuzhiyun entry_hdr, &data_ptr);
3405*4882a593Smuzhiyun break;
3406*4882a593Smuzhiyun case QLA82XX_QUEUE:
3407*4882a593Smuzhiyun qla8044_minidump_process_queue(vha,
3408*4882a593Smuzhiyun entry_hdr, &data_ptr);
3409*4882a593Smuzhiyun break;
3410*4882a593Smuzhiyun case QLA8044_POLLRD:
3411*4882a593Smuzhiyun rval = qla8044_minidump_process_pollrd(vha,
3412*4882a593Smuzhiyun entry_hdr, &data_ptr);
3413*4882a593Smuzhiyun if (rval != QLA_SUCCESS)
3414*4882a593Smuzhiyun qla8044_mark_entry_skipped(vha, entry_hdr, i);
3415*4882a593Smuzhiyun break;
3416*4882a593Smuzhiyun case QLA8044_RDMUX2:
3417*4882a593Smuzhiyun qla8044_minidump_process_rdmux2(vha,
3418*4882a593Smuzhiyun entry_hdr, &data_ptr);
3419*4882a593Smuzhiyun break;
3420*4882a593Smuzhiyun case QLA8044_POLLRDMWR:
3421*4882a593Smuzhiyun rval = qla8044_minidump_process_pollrdmwr(vha,
3422*4882a593Smuzhiyun entry_hdr, &data_ptr);
3423*4882a593Smuzhiyun if (rval != QLA_SUCCESS)
3424*4882a593Smuzhiyun qla8044_mark_entry_skipped(vha, entry_hdr, i);
3425*4882a593Smuzhiyun break;
3426*4882a593Smuzhiyun case QLA8044_RDDFE:
3427*4882a593Smuzhiyun rval = qla8044_minidump_process_rddfe(vha, entry_hdr,
3428*4882a593Smuzhiyun &data_ptr);
3429*4882a593Smuzhiyun if (rval != QLA_SUCCESS)
3430*4882a593Smuzhiyun qla8044_mark_entry_skipped(vha, entry_hdr, i);
3431*4882a593Smuzhiyun break;
3432*4882a593Smuzhiyun case QLA8044_RDMDIO:
3433*4882a593Smuzhiyun rval = qla8044_minidump_process_rdmdio(vha, entry_hdr,
3434*4882a593Smuzhiyun &data_ptr);
3435*4882a593Smuzhiyun if (rval != QLA_SUCCESS)
3436*4882a593Smuzhiyun qla8044_mark_entry_skipped(vha, entry_hdr, i);
3437*4882a593Smuzhiyun break;
3438*4882a593Smuzhiyun case QLA8044_POLLWR:
3439*4882a593Smuzhiyun rval = qla8044_minidump_process_pollwr(vha, entry_hdr,
3440*4882a593Smuzhiyun &data_ptr);
3441*4882a593Smuzhiyun if (rval != QLA_SUCCESS)
3442*4882a593Smuzhiyun qla8044_mark_entry_skipped(vha, entry_hdr, i);
3443*4882a593Smuzhiyun break;
3444*4882a593Smuzhiyun case QLA82XX_RDNOP:
3445*4882a593Smuzhiyun default:
3446*4882a593Smuzhiyun qla8044_mark_entry_skipped(vha, entry_hdr, i);
3447*4882a593Smuzhiyun break;
3448*4882a593Smuzhiyun }
3449*4882a593Smuzhiyun
3450*4882a593Smuzhiyun data_collected = (uint8_t *)data_ptr -
3451*4882a593Smuzhiyun (uint8_t *)((uint8_t *)ha->md_dump);
3452*4882a593Smuzhiyun skip_nxt_entry:
3453*4882a593Smuzhiyun /*
3454*4882a593Smuzhiyun * next entry in the template
3455*4882a593Smuzhiyun */
3456*4882a593Smuzhiyun entry_hdr = (struct qla8044_minidump_entry_hdr *)
3457*4882a593Smuzhiyun (((uint8_t *)entry_hdr) + entry_hdr->entry_size);
3458*4882a593Smuzhiyun }
3459*4882a593Smuzhiyun
3460*4882a593Smuzhiyun if (data_collected != ha->md_dump_size) {
3461*4882a593Smuzhiyun ql_log(ql_log_info, vha, 0xb105,
3462*4882a593Smuzhiyun "Dump data mismatch: Data collected: "
3463*4882a593Smuzhiyun "[0x%x], total_data_size:[0x%x]\n",
3464*4882a593Smuzhiyun data_collected, ha->md_dump_size);
3465*4882a593Smuzhiyun rval = QLA_FUNCTION_FAILED;
3466*4882a593Smuzhiyun goto md_failed;
3467*4882a593Smuzhiyun }
3468*4882a593Smuzhiyun
3469*4882a593Smuzhiyun ql_log(ql_log_info, vha, 0xb110,
3470*4882a593Smuzhiyun "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n",
3471*4882a593Smuzhiyun vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump);
3472*4882a593Smuzhiyun ha->fw_dumped = true;
3473*4882a593Smuzhiyun qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
3474*4882a593Smuzhiyun
3475*4882a593Smuzhiyun
3476*4882a593Smuzhiyun ql_log(ql_log_info, vha, 0xb106,
3477*4882a593Smuzhiyun "Leaving fn: %s Last entry: 0x%x\n",
3478*4882a593Smuzhiyun __func__, i);
3479*4882a593Smuzhiyun md_failed:
3480*4882a593Smuzhiyun return rval;
3481*4882a593Smuzhiyun }
3482*4882a593Smuzhiyun
3483*4882a593Smuzhiyun void
qla8044_get_minidump(struct scsi_qla_host * vha)3484*4882a593Smuzhiyun qla8044_get_minidump(struct scsi_qla_host *vha)
3485*4882a593Smuzhiyun {
3486*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
3487*4882a593Smuzhiyun
3488*4882a593Smuzhiyun if (!qla8044_collect_md_data(vha)) {
3489*4882a593Smuzhiyun ha->fw_dumped = true;
3490*4882a593Smuzhiyun ha->prev_minidump_failed = 0;
3491*4882a593Smuzhiyun } else {
3492*4882a593Smuzhiyun ql_log(ql_log_fatal, vha, 0xb0db,
3493*4882a593Smuzhiyun "%s: Unable to collect minidump\n",
3494*4882a593Smuzhiyun __func__);
3495*4882a593Smuzhiyun ha->prev_minidump_failed = 1;
3496*4882a593Smuzhiyun }
3497*4882a593Smuzhiyun }
3498*4882a593Smuzhiyun
3499*4882a593Smuzhiyun static int
qla8044_poll_flash_status_reg(struct scsi_qla_host * vha)3500*4882a593Smuzhiyun qla8044_poll_flash_status_reg(struct scsi_qla_host *vha)
3501*4882a593Smuzhiyun {
3502*4882a593Smuzhiyun uint32_t flash_status;
3503*4882a593Smuzhiyun int retries = QLA8044_FLASH_READ_RETRY_COUNT;
3504*4882a593Smuzhiyun int ret_val = QLA_SUCCESS;
3505*4882a593Smuzhiyun
3506*4882a593Smuzhiyun while (retries--) {
3507*4882a593Smuzhiyun ret_val = qla8044_rd_reg_indirect(vha, QLA8044_FLASH_STATUS,
3508*4882a593Smuzhiyun &flash_status);
3509*4882a593Smuzhiyun if (ret_val) {
3510*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb13c,
3511*4882a593Smuzhiyun "%s: Failed to read FLASH_STATUS reg.\n",
3512*4882a593Smuzhiyun __func__);
3513*4882a593Smuzhiyun break;
3514*4882a593Smuzhiyun }
3515*4882a593Smuzhiyun if ((flash_status & QLA8044_FLASH_STATUS_READY) ==
3516*4882a593Smuzhiyun QLA8044_FLASH_STATUS_READY)
3517*4882a593Smuzhiyun break;
3518*4882a593Smuzhiyun msleep(QLA8044_FLASH_STATUS_REG_POLL_DELAY);
3519*4882a593Smuzhiyun }
3520*4882a593Smuzhiyun
3521*4882a593Smuzhiyun if (!retries)
3522*4882a593Smuzhiyun ret_val = QLA_FUNCTION_FAILED;
3523*4882a593Smuzhiyun
3524*4882a593Smuzhiyun return ret_val;
3525*4882a593Smuzhiyun }
3526*4882a593Smuzhiyun
3527*4882a593Smuzhiyun static int
qla8044_write_flash_status_reg(struct scsi_qla_host * vha,uint32_t data)3528*4882a593Smuzhiyun qla8044_write_flash_status_reg(struct scsi_qla_host *vha,
3529*4882a593Smuzhiyun uint32_t data)
3530*4882a593Smuzhiyun {
3531*4882a593Smuzhiyun int ret_val = QLA_SUCCESS;
3532*4882a593Smuzhiyun uint32_t cmd;
3533*4882a593Smuzhiyun
3534*4882a593Smuzhiyun cmd = vha->hw->fdt_wrt_sts_reg_cmd;
3535*4882a593Smuzhiyun
3536*4882a593Smuzhiyun ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
3537*4882a593Smuzhiyun QLA8044_FLASH_STATUS_WRITE_DEF_SIG | cmd);
3538*4882a593Smuzhiyun if (ret_val) {
3539*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb125,
3540*4882a593Smuzhiyun "%s: Failed to write to FLASH_ADDR.\n", __func__);
3541*4882a593Smuzhiyun goto exit_func;
3542*4882a593Smuzhiyun }
3543*4882a593Smuzhiyun
3544*4882a593Smuzhiyun ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, data);
3545*4882a593Smuzhiyun if (ret_val) {
3546*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb126,
3547*4882a593Smuzhiyun "%s: Failed to write to FLASH_WRDATA.\n", __func__);
3548*4882a593Smuzhiyun goto exit_func;
3549*4882a593Smuzhiyun }
3550*4882a593Smuzhiyun
3551*4882a593Smuzhiyun ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
3552*4882a593Smuzhiyun QLA8044_FLASH_SECOND_ERASE_MS_VAL);
3553*4882a593Smuzhiyun if (ret_val) {
3554*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb127,
3555*4882a593Smuzhiyun "%s: Failed to write to FLASH_CONTROL.\n", __func__);
3556*4882a593Smuzhiyun goto exit_func;
3557*4882a593Smuzhiyun }
3558*4882a593Smuzhiyun
3559*4882a593Smuzhiyun ret_val = qla8044_poll_flash_status_reg(vha);
3560*4882a593Smuzhiyun if (ret_val)
3561*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb128,
3562*4882a593Smuzhiyun "%s: Error polling flash status reg.\n", __func__);
3563*4882a593Smuzhiyun
3564*4882a593Smuzhiyun exit_func:
3565*4882a593Smuzhiyun return ret_val;
3566*4882a593Smuzhiyun }
3567*4882a593Smuzhiyun
3568*4882a593Smuzhiyun /*
3569*4882a593Smuzhiyun * This function assumes that the flash lock is held.
3570*4882a593Smuzhiyun */
3571*4882a593Smuzhiyun static int
qla8044_unprotect_flash(scsi_qla_host_t * vha)3572*4882a593Smuzhiyun qla8044_unprotect_flash(scsi_qla_host_t *vha)
3573*4882a593Smuzhiyun {
3574*4882a593Smuzhiyun int ret_val;
3575*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
3576*4882a593Smuzhiyun
3577*4882a593Smuzhiyun ret_val = qla8044_write_flash_status_reg(vha, ha->fdt_wrt_enable);
3578*4882a593Smuzhiyun if (ret_val)
3579*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb139,
3580*4882a593Smuzhiyun "%s: Write flash status failed.\n", __func__);
3581*4882a593Smuzhiyun
3582*4882a593Smuzhiyun return ret_val;
3583*4882a593Smuzhiyun }
3584*4882a593Smuzhiyun
3585*4882a593Smuzhiyun /*
3586*4882a593Smuzhiyun * This function assumes that the flash lock is held.
3587*4882a593Smuzhiyun */
3588*4882a593Smuzhiyun static int
qla8044_protect_flash(scsi_qla_host_t * vha)3589*4882a593Smuzhiyun qla8044_protect_flash(scsi_qla_host_t *vha)
3590*4882a593Smuzhiyun {
3591*4882a593Smuzhiyun int ret_val;
3592*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
3593*4882a593Smuzhiyun
3594*4882a593Smuzhiyun ret_val = qla8044_write_flash_status_reg(vha, ha->fdt_wrt_disable);
3595*4882a593Smuzhiyun if (ret_val)
3596*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb13b,
3597*4882a593Smuzhiyun "%s: Write flash status failed.\n", __func__);
3598*4882a593Smuzhiyun
3599*4882a593Smuzhiyun return ret_val;
3600*4882a593Smuzhiyun }
3601*4882a593Smuzhiyun
3602*4882a593Smuzhiyun
3603*4882a593Smuzhiyun static int
qla8044_erase_flash_sector(struct scsi_qla_host * vha,uint32_t sector_start_addr)3604*4882a593Smuzhiyun qla8044_erase_flash_sector(struct scsi_qla_host *vha,
3605*4882a593Smuzhiyun uint32_t sector_start_addr)
3606*4882a593Smuzhiyun {
3607*4882a593Smuzhiyun uint32_t reversed_addr;
3608*4882a593Smuzhiyun int ret_val = QLA_SUCCESS;
3609*4882a593Smuzhiyun
3610*4882a593Smuzhiyun ret_val = qla8044_poll_flash_status_reg(vha);
3611*4882a593Smuzhiyun if (ret_val) {
3612*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb12e,
3613*4882a593Smuzhiyun "%s: Poll flash status after erase failed..\n", __func__);
3614*4882a593Smuzhiyun }
3615*4882a593Smuzhiyun
3616*4882a593Smuzhiyun reversed_addr = (((sector_start_addr & 0xFF) << 16) |
3617*4882a593Smuzhiyun (sector_start_addr & 0xFF00) |
3618*4882a593Smuzhiyun ((sector_start_addr & 0xFF0000) >> 16));
3619*4882a593Smuzhiyun
3620*4882a593Smuzhiyun ret_val = qla8044_wr_reg_indirect(vha,
3621*4882a593Smuzhiyun QLA8044_FLASH_WRDATA, reversed_addr);
3622*4882a593Smuzhiyun if (ret_val) {
3623*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb12f,
3624*4882a593Smuzhiyun "%s: Failed to write to FLASH_WRDATA.\n", __func__);
3625*4882a593Smuzhiyun }
3626*4882a593Smuzhiyun ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
3627*4882a593Smuzhiyun QLA8044_FLASH_ERASE_SIG | vha->hw->fdt_erase_cmd);
3628*4882a593Smuzhiyun if (ret_val) {
3629*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb130,
3630*4882a593Smuzhiyun "%s: Failed to write to FLASH_ADDR.\n", __func__);
3631*4882a593Smuzhiyun }
3632*4882a593Smuzhiyun ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
3633*4882a593Smuzhiyun QLA8044_FLASH_LAST_ERASE_MS_VAL);
3634*4882a593Smuzhiyun if (ret_val) {
3635*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb131,
3636*4882a593Smuzhiyun "%s: Failed write to FLASH_CONTROL.\n", __func__);
3637*4882a593Smuzhiyun }
3638*4882a593Smuzhiyun ret_val = qla8044_poll_flash_status_reg(vha);
3639*4882a593Smuzhiyun if (ret_val) {
3640*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb132,
3641*4882a593Smuzhiyun "%s: Poll flash status failed.\n", __func__);
3642*4882a593Smuzhiyun }
3643*4882a593Smuzhiyun
3644*4882a593Smuzhiyun
3645*4882a593Smuzhiyun return ret_val;
3646*4882a593Smuzhiyun }
3647*4882a593Smuzhiyun
3648*4882a593Smuzhiyun /*
3649*4882a593Smuzhiyun * qla8044_flash_write_u32 - Write data to flash
3650*4882a593Smuzhiyun *
3651*4882a593Smuzhiyun * @ha : Pointer to adapter structure
3652*4882a593Smuzhiyun * addr : Flash address to write to
3653*4882a593Smuzhiyun * p_data : Data to be written
3654*4882a593Smuzhiyun *
3655*4882a593Smuzhiyun * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
3656*4882a593Smuzhiyun *
3657*4882a593Smuzhiyun * NOTE: Lock should be held on entry
3658*4882a593Smuzhiyun */
3659*4882a593Smuzhiyun static int
qla8044_flash_write_u32(struct scsi_qla_host * vha,uint32_t addr,uint32_t * p_data)3660*4882a593Smuzhiyun qla8044_flash_write_u32(struct scsi_qla_host *vha, uint32_t addr,
3661*4882a593Smuzhiyun uint32_t *p_data)
3662*4882a593Smuzhiyun {
3663*4882a593Smuzhiyun int ret_val = QLA_SUCCESS;
3664*4882a593Smuzhiyun
3665*4882a593Smuzhiyun ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
3666*4882a593Smuzhiyun 0x00800000 | (addr >> 2));
3667*4882a593Smuzhiyun if (ret_val) {
3668*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb134,
3669*4882a593Smuzhiyun "%s: Failed write to FLASH_ADDR.\n", __func__);
3670*4882a593Smuzhiyun goto exit_func;
3671*4882a593Smuzhiyun }
3672*4882a593Smuzhiyun ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *p_data);
3673*4882a593Smuzhiyun if (ret_val) {
3674*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb135,
3675*4882a593Smuzhiyun "%s: Failed write to FLASH_WRDATA.\n", __func__);
3676*4882a593Smuzhiyun goto exit_func;
3677*4882a593Smuzhiyun }
3678*4882a593Smuzhiyun ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, 0x3D);
3679*4882a593Smuzhiyun if (ret_val) {
3680*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb136,
3681*4882a593Smuzhiyun "%s: Failed write to FLASH_CONTROL.\n", __func__);
3682*4882a593Smuzhiyun goto exit_func;
3683*4882a593Smuzhiyun }
3684*4882a593Smuzhiyun ret_val = qla8044_poll_flash_status_reg(vha);
3685*4882a593Smuzhiyun if (ret_val) {
3686*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb137,
3687*4882a593Smuzhiyun "%s: Poll flash status failed.\n", __func__);
3688*4882a593Smuzhiyun }
3689*4882a593Smuzhiyun
3690*4882a593Smuzhiyun exit_func:
3691*4882a593Smuzhiyun return ret_val;
3692*4882a593Smuzhiyun }
3693*4882a593Smuzhiyun
3694*4882a593Smuzhiyun static int
qla8044_write_flash_buffer_mode(scsi_qla_host_t * vha,uint32_t * dwptr,uint32_t faddr,uint32_t dwords)3695*4882a593Smuzhiyun qla8044_write_flash_buffer_mode(scsi_qla_host_t *vha, uint32_t *dwptr,
3696*4882a593Smuzhiyun uint32_t faddr, uint32_t dwords)
3697*4882a593Smuzhiyun {
3698*4882a593Smuzhiyun int ret = QLA_FUNCTION_FAILED;
3699*4882a593Smuzhiyun uint32_t spi_val;
3700*4882a593Smuzhiyun
3701*4882a593Smuzhiyun if (dwords < QLA8044_MIN_OPTROM_BURST_DWORDS ||
3702*4882a593Smuzhiyun dwords > QLA8044_MAX_OPTROM_BURST_DWORDS) {
3703*4882a593Smuzhiyun ql_dbg(ql_dbg_user, vha, 0xb123,
3704*4882a593Smuzhiyun "Got unsupported dwords = 0x%x.\n",
3705*4882a593Smuzhiyun dwords);
3706*4882a593Smuzhiyun return QLA_FUNCTION_FAILED;
3707*4882a593Smuzhiyun }
3708*4882a593Smuzhiyun
3709*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL, &spi_val);
3710*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL,
3711*4882a593Smuzhiyun spi_val | QLA8044_FLASH_SPI_CTL);
3712*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
3713*4882a593Smuzhiyun QLA8044_FLASH_FIRST_TEMP_VAL);
3714*4882a593Smuzhiyun
3715*4882a593Smuzhiyun /* First DWORD write to FLASH_WRDATA */
3716*4882a593Smuzhiyun ret = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA,
3717*4882a593Smuzhiyun *dwptr++);
3718*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
3719*4882a593Smuzhiyun QLA8044_FLASH_FIRST_MS_PATTERN);
3720*4882a593Smuzhiyun
3721*4882a593Smuzhiyun ret = qla8044_poll_flash_status_reg(vha);
3722*4882a593Smuzhiyun if (ret) {
3723*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb124,
3724*4882a593Smuzhiyun "%s: Failed.\n", __func__);
3725*4882a593Smuzhiyun goto exit_func;
3726*4882a593Smuzhiyun }
3727*4882a593Smuzhiyun
3728*4882a593Smuzhiyun dwords--;
3729*4882a593Smuzhiyun
3730*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
3731*4882a593Smuzhiyun QLA8044_FLASH_SECOND_TEMP_VAL);
3732*4882a593Smuzhiyun
3733*4882a593Smuzhiyun
3734*4882a593Smuzhiyun /* Second to N-1 DWORDS writes */
3735*4882a593Smuzhiyun while (dwords != 1) {
3736*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *dwptr++);
3737*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
3738*4882a593Smuzhiyun QLA8044_FLASH_SECOND_MS_PATTERN);
3739*4882a593Smuzhiyun ret = qla8044_poll_flash_status_reg(vha);
3740*4882a593Smuzhiyun if (ret) {
3741*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb129,
3742*4882a593Smuzhiyun "%s: Failed.\n", __func__);
3743*4882a593Smuzhiyun goto exit_func;
3744*4882a593Smuzhiyun }
3745*4882a593Smuzhiyun dwords--;
3746*4882a593Smuzhiyun }
3747*4882a593Smuzhiyun
3748*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
3749*4882a593Smuzhiyun QLA8044_FLASH_FIRST_TEMP_VAL | (faddr >> 2));
3750*4882a593Smuzhiyun
3751*4882a593Smuzhiyun /* Last DWORD write */
3752*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *dwptr++);
3753*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
3754*4882a593Smuzhiyun QLA8044_FLASH_LAST_MS_PATTERN);
3755*4882a593Smuzhiyun ret = qla8044_poll_flash_status_reg(vha);
3756*4882a593Smuzhiyun if (ret) {
3757*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb12a,
3758*4882a593Smuzhiyun "%s: Failed.\n", __func__);
3759*4882a593Smuzhiyun goto exit_func;
3760*4882a593Smuzhiyun }
3761*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_STATUS, &spi_val);
3762*4882a593Smuzhiyun
3763*4882a593Smuzhiyun if ((spi_val & QLA8044_FLASH_SPI_CTL) == QLA8044_FLASH_SPI_CTL) {
3764*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb12b,
3765*4882a593Smuzhiyun "%s: Failed.\n", __func__);
3766*4882a593Smuzhiyun spi_val = 0;
3767*4882a593Smuzhiyun /* Operation failed, clear error bit. */
3768*4882a593Smuzhiyun qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL,
3769*4882a593Smuzhiyun &spi_val);
3770*4882a593Smuzhiyun qla8044_wr_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL,
3771*4882a593Smuzhiyun spi_val | QLA8044_FLASH_SPI_CTL);
3772*4882a593Smuzhiyun }
3773*4882a593Smuzhiyun exit_func:
3774*4882a593Smuzhiyun return ret;
3775*4882a593Smuzhiyun }
3776*4882a593Smuzhiyun
3777*4882a593Smuzhiyun static int
qla8044_write_flash_dword_mode(scsi_qla_host_t * vha,uint32_t * dwptr,uint32_t faddr,uint32_t dwords)3778*4882a593Smuzhiyun qla8044_write_flash_dword_mode(scsi_qla_host_t *vha, uint32_t *dwptr,
3779*4882a593Smuzhiyun uint32_t faddr, uint32_t dwords)
3780*4882a593Smuzhiyun {
3781*4882a593Smuzhiyun int ret = QLA_FUNCTION_FAILED;
3782*4882a593Smuzhiyun uint32_t liter;
3783*4882a593Smuzhiyun
3784*4882a593Smuzhiyun for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) {
3785*4882a593Smuzhiyun ret = qla8044_flash_write_u32(vha, faddr, dwptr);
3786*4882a593Smuzhiyun if (ret) {
3787*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb141,
3788*4882a593Smuzhiyun "%s: flash address=%x data=%x.\n", __func__,
3789*4882a593Smuzhiyun faddr, *dwptr);
3790*4882a593Smuzhiyun break;
3791*4882a593Smuzhiyun }
3792*4882a593Smuzhiyun }
3793*4882a593Smuzhiyun
3794*4882a593Smuzhiyun return ret;
3795*4882a593Smuzhiyun }
3796*4882a593Smuzhiyun
3797*4882a593Smuzhiyun int
qla8044_write_optrom_data(struct scsi_qla_host * vha,void * buf,uint32_t offset,uint32_t length)3798*4882a593Smuzhiyun qla8044_write_optrom_data(struct scsi_qla_host *vha, void *buf,
3799*4882a593Smuzhiyun uint32_t offset, uint32_t length)
3800*4882a593Smuzhiyun {
3801*4882a593Smuzhiyun int rval = QLA_FUNCTION_FAILED, i, burst_iter_count;
3802*4882a593Smuzhiyun int dword_count, erase_sec_count;
3803*4882a593Smuzhiyun uint32_t erase_offset;
3804*4882a593Smuzhiyun uint8_t *p_cache, *p_src;
3805*4882a593Smuzhiyun
3806*4882a593Smuzhiyun erase_offset = offset;
3807*4882a593Smuzhiyun
3808*4882a593Smuzhiyun p_cache = kcalloc(length, sizeof(uint8_t), GFP_KERNEL);
3809*4882a593Smuzhiyun if (!p_cache)
3810*4882a593Smuzhiyun return QLA_FUNCTION_FAILED;
3811*4882a593Smuzhiyun
3812*4882a593Smuzhiyun memcpy(p_cache, buf, length);
3813*4882a593Smuzhiyun p_src = p_cache;
3814*4882a593Smuzhiyun dword_count = length / sizeof(uint32_t);
3815*4882a593Smuzhiyun /* Since the offset and legth are sector aligned, it will be always
3816*4882a593Smuzhiyun * multiple of burst_iter_count (64)
3817*4882a593Smuzhiyun */
3818*4882a593Smuzhiyun burst_iter_count = dword_count / QLA8044_MAX_OPTROM_BURST_DWORDS;
3819*4882a593Smuzhiyun erase_sec_count = length / QLA8044_SECTOR_SIZE;
3820*4882a593Smuzhiyun
3821*4882a593Smuzhiyun /* Suspend HBA. */
3822*4882a593Smuzhiyun scsi_block_requests(vha->host);
3823*4882a593Smuzhiyun /* Lock and enable write for whole operation. */
3824*4882a593Smuzhiyun qla8044_flash_lock(vha);
3825*4882a593Smuzhiyun qla8044_unprotect_flash(vha);
3826*4882a593Smuzhiyun
3827*4882a593Smuzhiyun /* Erasing the sectors */
3828*4882a593Smuzhiyun for (i = 0; i < erase_sec_count; i++) {
3829*4882a593Smuzhiyun rval = qla8044_erase_flash_sector(vha, erase_offset);
3830*4882a593Smuzhiyun ql_dbg(ql_dbg_user, vha, 0xb138,
3831*4882a593Smuzhiyun "Done erase of sector=0x%x.\n",
3832*4882a593Smuzhiyun erase_offset);
3833*4882a593Smuzhiyun if (rval) {
3834*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb121,
3835*4882a593Smuzhiyun "Failed to erase the sector having address: "
3836*4882a593Smuzhiyun "0x%x.\n", erase_offset);
3837*4882a593Smuzhiyun goto out;
3838*4882a593Smuzhiyun }
3839*4882a593Smuzhiyun erase_offset += QLA8044_SECTOR_SIZE;
3840*4882a593Smuzhiyun }
3841*4882a593Smuzhiyun ql_dbg(ql_dbg_user, vha, 0xb13f,
3842*4882a593Smuzhiyun "Got write for addr = 0x%x length=0x%x.\n",
3843*4882a593Smuzhiyun offset, length);
3844*4882a593Smuzhiyun
3845*4882a593Smuzhiyun for (i = 0; i < burst_iter_count; i++) {
3846*4882a593Smuzhiyun
3847*4882a593Smuzhiyun /* Go with write. */
3848*4882a593Smuzhiyun rval = qla8044_write_flash_buffer_mode(vha, (uint32_t *)p_src,
3849*4882a593Smuzhiyun offset, QLA8044_MAX_OPTROM_BURST_DWORDS);
3850*4882a593Smuzhiyun if (rval) {
3851*4882a593Smuzhiyun /* Buffer Mode failed skip to dword mode */
3852*4882a593Smuzhiyun ql_log(ql_log_warn, vha, 0xb122,
3853*4882a593Smuzhiyun "Failed to write flash in buffer mode, "
3854*4882a593Smuzhiyun "Reverting to slow-write.\n");
3855*4882a593Smuzhiyun rval = qla8044_write_flash_dword_mode(vha,
3856*4882a593Smuzhiyun (uint32_t *)p_src, offset,
3857*4882a593Smuzhiyun QLA8044_MAX_OPTROM_BURST_DWORDS);
3858*4882a593Smuzhiyun }
3859*4882a593Smuzhiyun p_src += sizeof(uint32_t) * QLA8044_MAX_OPTROM_BURST_DWORDS;
3860*4882a593Smuzhiyun offset += sizeof(uint32_t) * QLA8044_MAX_OPTROM_BURST_DWORDS;
3861*4882a593Smuzhiyun }
3862*4882a593Smuzhiyun ql_dbg(ql_dbg_user, vha, 0xb133,
3863*4882a593Smuzhiyun "Done writing.\n");
3864*4882a593Smuzhiyun
3865*4882a593Smuzhiyun out:
3866*4882a593Smuzhiyun qla8044_protect_flash(vha);
3867*4882a593Smuzhiyun qla8044_flash_unlock(vha);
3868*4882a593Smuzhiyun scsi_unblock_requests(vha->host);
3869*4882a593Smuzhiyun kfree(p_cache);
3870*4882a593Smuzhiyun
3871*4882a593Smuzhiyun return rval;
3872*4882a593Smuzhiyun }
3873*4882a593Smuzhiyun
3874*4882a593Smuzhiyun #define LEG_INT_PTR_B31 (1 << 31)
3875*4882a593Smuzhiyun #define LEG_INT_PTR_B30 (1 << 30)
3876*4882a593Smuzhiyun #define PF_BITS_MASK (0xF << 16)
3877*4882a593Smuzhiyun /**
3878*4882a593Smuzhiyun * qla8044_intr_handler() - Process interrupts for the ISP8044
3879*4882a593Smuzhiyun * @irq: interrupt number
3880*4882a593Smuzhiyun * @dev_id: SCSI driver HA context
3881*4882a593Smuzhiyun *
3882*4882a593Smuzhiyun * Called by system whenever the host adapter generates an interrupt.
3883*4882a593Smuzhiyun *
3884*4882a593Smuzhiyun * Returns handled flag.
3885*4882a593Smuzhiyun */
3886*4882a593Smuzhiyun irqreturn_t
qla8044_intr_handler(int irq,void * dev_id)3887*4882a593Smuzhiyun qla8044_intr_handler(int irq, void *dev_id)
3888*4882a593Smuzhiyun {
3889*4882a593Smuzhiyun scsi_qla_host_t *vha;
3890*4882a593Smuzhiyun struct qla_hw_data *ha;
3891*4882a593Smuzhiyun struct rsp_que *rsp;
3892*4882a593Smuzhiyun struct device_reg_82xx __iomem *reg;
3893*4882a593Smuzhiyun int status = 0;
3894*4882a593Smuzhiyun unsigned long flags;
3895*4882a593Smuzhiyun unsigned long iter;
3896*4882a593Smuzhiyun uint32_t stat;
3897*4882a593Smuzhiyun uint16_t mb[8];
3898*4882a593Smuzhiyun uint32_t leg_int_ptr = 0, pf_bit;
3899*4882a593Smuzhiyun
3900*4882a593Smuzhiyun rsp = (struct rsp_que *) dev_id;
3901*4882a593Smuzhiyun if (!rsp) {
3902*4882a593Smuzhiyun ql_log(ql_log_info, NULL, 0xb143,
3903*4882a593Smuzhiyun "%s(): NULL response queue pointer\n", __func__);
3904*4882a593Smuzhiyun return IRQ_NONE;
3905*4882a593Smuzhiyun }
3906*4882a593Smuzhiyun ha = rsp->hw;
3907*4882a593Smuzhiyun vha = pci_get_drvdata(ha->pdev);
3908*4882a593Smuzhiyun
3909*4882a593Smuzhiyun if (unlikely(pci_channel_offline(ha->pdev)))
3910*4882a593Smuzhiyun return IRQ_HANDLED;
3911*4882a593Smuzhiyun
3912*4882a593Smuzhiyun leg_int_ptr = qla8044_rd_reg(ha, LEG_INTR_PTR_OFFSET);
3913*4882a593Smuzhiyun
3914*4882a593Smuzhiyun /* Legacy interrupt is valid if bit31 of leg_int_ptr is set */
3915*4882a593Smuzhiyun if (!(leg_int_ptr & (LEG_INT_PTR_B31))) {
3916*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb144,
3917*4882a593Smuzhiyun "%s: Legacy Interrupt Bit 31 not set, "
3918*4882a593Smuzhiyun "spurious interrupt!\n", __func__);
3919*4882a593Smuzhiyun return IRQ_NONE;
3920*4882a593Smuzhiyun }
3921*4882a593Smuzhiyun
3922*4882a593Smuzhiyun pf_bit = ha->portnum << 16;
3923*4882a593Smuzhiyun /* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */
3924*4882a593Smuzhiyun if ((leg_int_ptr & (PF_BITS_MASK)) != pf_bit) {
3925*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb145,
3926*4882a593Smuzhiyun "%s: Incorrect function ID 0x%x in "
3927*4882a593Smuzhiyun "legacy interrupt register, "
3928*4882a593Smuzhiyun "ha->pf_bit = 0x%x\n", __func__,
3929*4882a593Smuzhiyun (leg_int_ptr & (PF_BITS_MASK)), pf_bit);
3930*4882a593Smuzhiyun return IRQ_NONE;
3931*4882a593Smuzhiyun }
3932*4882a593Smuzhiyun
3933*4882a593Smuzhiyun /* To de-assert legacy interrupt, write 0 to Legacy Interrupt Trigger
3934*4882a593Smuzhiyun * Control register and poll till Legacy Interrupt Pointer register
3935*4882a593Smuzhiyun * bit32 is 0.
3936*4882a593Smuzhiyun */
3937*4882a593Smuzhiyun qla8044_wr_reg(ha, LEG_INTR_TRIG_OFFSET, 0);
3938*4882a593Smuzhiyun do {
3939*4882a593Smuzhiyun leg_int_ptr = qla8044_rd_reg(ha, LEG_INTR_PTR_OFFSET);
3940*4882a593Smuzhiyun if ((leg_int_ptr & (PF_BITS_MASK)) != pf_bit)
3941*4882a593Smuzhiyun break;
3942*4882a593Smuzhiyun } while (leg_int_ptr & (LEG_INT_PTR_B30));
3943*4882a593Smuzhiyun
3944*4882a593Smuzhiyun reg = &ha->iobase->isp82;
3945*4882a593Smuzhiyun spin_lock_irqsave(&ha->hardware_lock, flags);
3946*4882a593Smuzhiyun for (iter = 1; iter--; ) {
3947*4882a593Smuzhiyun
3948*4882a593Smuzhiyun if (rd_reg_dword(®->host_int)) {
3949*4882a593Smuzhiyun stat = rd_reg_dword(®->host_status);
3950*4882a593Smuzhiyun if ((stat & HSRX_RISC_INT) == 0)
3951*4882a593Smuzhiyun break;
3952*4882a593Smuzhiyun
3953*4882a593Smuzhiyun switch (stat & 0xff) {
3954*4882a593Smuzhiyun case 0x1:
3955*4882a593Smuzhiyun case 0x2:
3956*4882a593Smuzhiyun case 0x10:
3957*4882a593Smuzhiyun case 0x11:
3958*4882a593Smuzhiyun qla82xx_mbx_completion(vha, MSW(stat));
3959*4882a593Smuzhiyun status |= MBX_INTERRUPT;
3960*4882a593Smuzhiyun break;
3961*4882a593Smuzhiyun case 0x12:
3962*4882a593Smuzhiyun mb[0] = MSW(stat);
3963*4882a593Smuzhiyun mb[1] = rd_reg_word(®->mailbox_out[1]);
3964*4882a593Smuzhiyun mb[2] = rd_reg_word(®->mailbox_out[2]);
3965*4882a593Smuzhiyun mb[3] = rd_reg_word(®->mailbox_out[3]);
3966*4882a593Smuzhiyun qla2x00_async_event(vha, rsp, mb);
3967*4882a593Smuzhiyun break;
3968*4882a593Smuzhiyun case 0x13:
3969*4882a593Smuzhiyun qla24xx_process_response_queue(vha, rsp);
3970*4882a593Smuzhiyun break;
3971*4882a593Smuzhiyun default:
3972*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb146,
3973*4882a593Smuzhiyun "Unrecognized interrupt type "
3974*4882a593Smuzhiyun "(%d).\n", stat & 0xff);
3975*4882a593Smuzhiyun break;
3976*4882a593Smuzhiyun }
3977*4882a593Smuzhiyun }
3978*4882a593Smuzhiyun wrt_reg_dword(®->host_int, 0);
3979*4882a593Smuzhiyun }
3980*4882a593Smuzhiyun
3981*4882a593Smuzhiyun qla2x00_handle_mbx_completion(ha, status);
3982*4882a593Smuzhiyun spin_unlock_irqrestore(&ha->hardware_lock, flags);
3983*4882a593Smuzhiyun
3984*4882a593Smuzhiyun return IRQ_HANDLED;
3985*4882a593Smuzhiyun }
3986*4882a593Smuzhiyun
3987*4882a593Smuzhiyun static int
qla8044_idc_dontreset(struct qla_hw_data * ha)3988*4882a593Smuzhiyun qla8044_idc_dontreset(struct qla_hw_data *ha)
3989*4882a593Smuzhiyun {
3990*4882a593Smuzhiyun uint32_t idc_ctrl;
3991*4882a593Smuzhiyun
3992*4882a593Smuzhiyun idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
3993*4882a593Smuzhiyun return idc_ctrl & DONTRESET_BIT0;
3994*4882a593Smuzhiyun }
3995*4882a593Smuzhiyun
3996*4882a593Smuzhiyun static void
qla8044_clear_rst_ready(scsi_qla_host_t * vha)3997*4882a593Smuzhiyun qla8044_clear_rst_ready(scsi_qla_host_t *vha)
3998*4882a593Smuzhiyun {
3999*4882a593Smuzhiyun uint32_t drv_state;
4000*4882a593Smuzhiyun
4001*4882a593Smuzhiyun drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
4002*4882a593Smuzhiyun
4003*4882a593Smuzhiyun /*
4004*4882a593Smuzhiyun * For ISP8044, drv_active register has 1 bit per function,
4005*4882a593Smuzhiyun * shift 1 by func_num to set a bit for the function.
4006*4882a593Smuzhiyun * For ISP82xx, drv_active has 4 bits per function
4007*4882a593Smuzhiyun */
4008*4882a593Smuzhiyun drv_state &= ~(1 << vha->hw->portnum);
4009*4882a593Smuzhiyun
4010*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb13d,
4011*4882a593Smuzhiyun "drv_state: 0x%08x\n", drv_state);
4012*4882a593Smuzhiyun qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, drv_state);
4013*4882a593Smuzhiyun }
4014*4882a593Smuzhiyun
4015*4882a593Smuzhiyun int
qla8044_abort_isp(scsi_qla_host_t * vha)4016*4882a593Smuzhiyun qla8044_abort_isp(scsi_qla_host_t *vha)
4017*4882a593Smuzhiyun {
4018*4882a593Smuzhiyun int rval;
4019*4882a593Smuzhiyun uint32_t dev_state;
4020*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
4021*4882a593Smuzhiyun
4022*4882a593Smuzhiyun qla8044_idc_lock(ha);
4023*4882a593Smuzhiyun dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
4024*4882a593Smuzhiyun
4025*4882a593Smuzhiyun if (ql2xdontresethba)
4026*4882a593Smuzhiyun qla8044_set_idc_dontreset(vha);
4027*4882a593Smuzhiyun
4028*4882a593Smuzhiyun /* If device_state is NEED_RESET, go ahead with
4029*4882a593Smuzhiyun * Reset,irrespective of ql2xdontresethba. This is to allow a
4030*4882a593Smuzhiyun * non-reset-owner to force a reset. Non-reset-owner sets
4031*4882a593Smuzhiyun * the IDC_CTRL BIT0 to prevent Reset-owner from doing a Reset
4032*4882a593Smuzhiyun * and then forces a Reset by setting device_state to
4033*4882a593Smuzhiyun * NEED_RESET. */
4034*4882a593Smuzhiyun if (dev_state == QLA8XXX_DEV_READY) {
4035*4882a593Smuzhiyun /* If IDC_CTRL DONTRESETHBA_BIT0 is set don't do reset
4036*4882a593Smuzhiyun * recovery */
4037*4882a593Smuzhiyun if (qla8044_idc_dontreset(ha) == DONTRESET_BIT0) {
4038*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb13e,
4039*4882a593Smuzhiyun "Reset recovery disabled\n");
4040*4882a593Smuzhiyun rval = QLA_FUNCTION_FAILED;
4041*4882a593Smuzhiyun goto exit_isp_reset;
4042*4882a593Smuzhiyun }
4043*4882a593Smuzhiyun
4044*4882a593Smuzhiyun ql_dbg(ql_dbg_p3p, vha, 0xb140,
4045*4882a593Smuzhiyun "HW State: NEED RESET\n");
4046*4882a593Smuzhiyun qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
4047*4882a593Smuzhiyun QLA8XXX_DEV_NEED_RESET);
4048*4882a593Smuzhiyun }
4049*4882a593Smuzhiyun
4050*4882a593Smuzhiyun /* For ISP8044, Reset owner is NIC, iSCSI or FCOE based on priority
4051*4882a593Smuzhiyun * and which drivers are present. Unlike ISP82XX, the function setting
4052*4882a593Smuzhiyun * NEED_RESET, may not be the Reset owner. */
4053*4882a593Smuzhiyun qla83xx_reset_ownership(vha);
4054*4882a593Smuzhiyun
4055*4882a593Smuzhiyun qla8044_idc_unlock(ha);
4056*4882a593Smuzhiyun rval = qla8044_device_state_handler(vha);
4057*4882a593Smuzhiyun qla8044_idc_lock(ha);
4058*4882a593Smuzhiyun qla8044_clear_rst_ready(vha);
4059*4882a593Smuzhiyun
4060*4882a593Smuzhiyun exit_isp_reset:
4061*4882a593Smuzhiyun qla8044_idc_unlock(ha);
4062*4882a593Smuzhiyun if (rval == QLA_SUCCESS) {
4063*4882a593Smuzhiyun ha->flags.isp82xx_fw_hung = 0;
4064*4882a593Smuzhiyun ha->flags.nic_core_reset_hdlr_active = 0;
4065*4882a593Smuzhiyun rval = qla82xx_restart_isp(vha);
4066*4882a593Smuzhiyun }
4067*4882a593Smuzhiyun
4068*4882a593Smuzhiyun return rval;
4069*4882a593Smuzhiyun }
4070*4882a593Smuzhiyun
4071*4882a593Smuzhiyun void
qla8044_fw_dump(scsi_qla_host_t * vha)4072*4882a593Smuzhiyun qla8044_fw_dump(scsi_qla_host_t *vha)
4073*4882a593Smuzhiyun {
4074*4882a593Smuzhiyun struct qla_hw_data *ha = vha->hw;
4075*4882a593Smuzhiyun
4076*4882a593Smuzhiyun if (!ha->allow_cna_fw_dump)
4077*4882a593Smuzhiyun return;
4078*4882a593Smuzhiyun
4079*4882a593Smuzhiyun scsi_block_requests(vha->host);
4080*4882a593Smuzhiyun ha->flags.isp82xx_no_md_cap = 1;
4081*4882a593Smuzhiyun qla8044_idc_lock(ha);
4082*4882a593Smuzhiyun qla82xx_set_reset_owner(vha);
4083*4882a593Smuzhiyun qla8044_idc_unlock(ha);
4084*4882a593Smuzhiyun qla2x00_wait_for_chip_reset(vha);
4085*4882a593Smuzhiyun scsi_unblock_requests(vha->host);
4086*4882a593Smuzhiyun }
4087