xref: /OK3568_Linux_fs/kernel/drivers/scsi/qla2xxx/qla_mid.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * QLogic Fibre Channel HBA Driver
4*4882a593Smuzhiyun  * Copyright (c)  2003-2014 QLogic Corporation
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun #include "qla_def.h"
7*4882a593Smuzhiyun #include "qla_gbl.h"
8*4882a593Smuzhiyun #include "qla_target.h"
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/moduleparam.h>
11*4882a593Smuzhiyun #include <linux/vmalloc.h>
12*4882a593Smuzhiyun #include <linux/slab.h>
13*4882a593Smuzhiyun #include <linux/list.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <scsi/scsi_tcq.h>
16*4882a593Smuzhiyun #include <scsi/scsicam.h>
17*4882a593Smuzhiyun #include <linux/delay.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun void
qla2x00_vp_stop_timer(scsi_qla_host_t * vha)20*4882a593Smuzhiyun qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun 	if (vha->vp_idx && vha->timer_active) {
23*4882a593Smuzhiyun 		del_timer_sync(&vha->timer);
24*4882a593Smuzhiyun 		vha->timer_active = 0;
25*4882a593Smuzhiyun 	}
26*4882a593Smuzhiyun }
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun static uint32_t
qla24xx_allocate_vp_id(scsi_qla_host_t * vha)29*4882a593Smuzhiyun qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun 	uint32_t vp_id;
32*4882a593Smuzhiyun 	struct qla_hw_data *ha = vha->hw;
33*4882a593Smuzhiyun 	unsigned long flags;
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	/* Find an empty slot and assign an vp_id */
36*4882a593Smuzhiyun 	mutex_lock(&ha->vport_lock);
37*4882a593Smuzhiyun 	vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
38*4882a593Smuzhiyun 	if (vp_id > ha->max_npiv_vports) {
39*4882a593Smuzhiyun 		ql_dbg(ql_dbg_vport, vha, 0xa000,
40*4882a593Smuzhiyun 		    "vp_id %d is bigger than max-supported %d.\n",
41*4882a593Smuzhiyun 		    vp_id, ha->max_npiv_vports);
42*4882a593Smuzhiyun 		mutex_unlock(&ha->vport_lock);
43*4882a593Smuzhiyun 		return vp_id;
44*4882a593Smuzhiyun 	}
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	set_bit(vp_id, ha->vp_idx_map);
47*4882a593Smuzhiyun 	ha->num_vhosts++;
48*4882a593Smuzhiyun 	vha->vp_idx = vp_id;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	spin_lock_irqsave(&ha->vport_slock, flags);
51*4882a593Smuzhiyun 	list_add_tail(&vha->list, &ha->vp_list);
52*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ha->vport_slock, flags);
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	spin_lock_irqsave(&ha->hardware_lock, flags);
55*4882a593Smuzhiyun 	qlt_update_vp_map(vha, SET_VP_IDX);
56*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	mutex_unlock(&ha->vport_lock);
59*4882a593Smuzhiyun 	return vp_id;
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun void
qla24xx_deallocate_vp_id(scsi_qla_host_t * vha)63*4882a593Smuzhiyun qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun 	uint16_t vp_id;
66*4882a593Smuzhiyun 	struct qla_hw_data *ha = vha->hw;
67*4882a593Smuzhiyun 	unsigned long flags = 0;
68*4882a593Smuzhiyun 	u8 i;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	mutex_lock(&ha->vport_lock);
71*4882a593Smuzhiyun 	/*
72*4882a593Smuzhiyun 	 * Wait for all pending activities to finish before removing vport from
73*4882a593Smuzhiyun 	 * the list.
74*4882a593Smuzhiyun 	 * Lock needs to be held for safe removal from the list (it
75*4882a593Smuzhiyun 	 * ensures no active vp_list traversal while the vport is removed
76*4882a593Smuzhiyun 	 * from the queue)
77*4882a593Smuzhiyun 	 */
78*4882a593Smuzhiyun 	for (i = 0; i < 10; i++) {
79*4882a593Smuzhiyun 		if (wait_event_timeout(vha->vref_waitq,
80*4882a593Smuzhiyun 		    !atomic_read(&vha->vref_count), HZ) > 0)
81*4882a593Smuzhiyun 			break;
82*4882a593Smuzhiyun 	}
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	spin_lock_irqsave(&ha->vport_slock, flags);
85*4882a593Smuzhiyun 	if (atomic_read(&vha->vref_count)) {
86*4882a593Smuzhiyun 		ql_dbg(ql_dbg_vport, vha, 0xfffa,
87*4882a593Smuzhiyun 		    "vha->vref_count=%u timeout\n", vha->vref_count.counter);
88*4882a593Smuzhiyun 		vha->vref_count = (atomic_t)ATOMIC_INIT(0);
89*4882a593Smuzhiyun 	}
90*4882a593Smuzhiyun 	list_del(&vha->list);
91*4882a593Smuzhiyun 	qlt_update_vp_map(vha, RESET_VP_IDX);
92*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ha->vport_slock, flags);
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	vp_id = vha->vp_idx;
95*4882a593Smuzhiyun 	ha->num_vhosts--;
96*4882a593Smuzhiyun 	clear_bit(vp_id, ha->vp_idx_map);
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	mutex_unlock(&ha->vport_lock);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun static scsi_qla_host_t *
qla24xx_find_vhost_by_name(struct qla_hw_data * ha,uint8_t * port_name)102*4882a593Smuzhiyun qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	scsi_qla_host_t *vha;
105*4882a593Smuzhiyun 	struct scsi_qla_host *tvha;
106*4882a593Smuzhiyun 	unsigned long flags;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	spin_lock_irqsave(&ha->vport_slock, flags);
109*4882a593Smuzhiyun 	/* Locate matching device in database. */
110*4882a593Smuzhiyun 	list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
111*4882a593Smuzhiyun 		if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
112*4882a593Smuzhiyun 			spin_unlock_irqrestore(&ha->vport_slock, flags);
113*4882a593Smuzhiyun 			return vha;
114*4882a593Smuzhiyun 		}
115*4882a593Smuzhiyun 	}
116*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ha->vport_slock, flags);
117*4882a593Smuzhiyun 	return NULL;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun /*
121*4882a593Smuzhiyun  * qla2x00_mark_vp_devices_dead
122*4882a593Smuzhiyun  *	Updates fcport state when device goes offline.
123*4882a593Smuzhiyun  *
124*4882a593Smuzhiyun  * Input:
125*4882a593Smuzhiyun  *	ha = adapter block pointer.
126*4882a593Smuzhiyun  *	fcport = port structure pointer.
127*4882a593Smuzhiyun  *
128*4882a593Smuzhiyun  * Return:
129*4882a593Smuzhiyun  *	None.
130*4882a593Smuzhiyun  *
131*4882a593Smuzhiyun  * Context:
132*4882a593Smuzhiyun  */
133*4882a593Smuzhiyun static void
qla2x00_mark_vp_devices_dead(scsi_qla_host_t * vha)134*4882a593Smuzhiyun qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun 	/*
137*4882a593Smuzhiyun 	 * !!! NOTE !!!
138*4882a593Smuzhiyun 	 * This function, if called in contexts other than vp create, disable
139*4882a593Smuzhiyun 	 * or delete, please make sure this is synchronized with the
140*4882a593Smuzhiyun 	 * delete thread.
141*4882a593Smuzhiyun 	 */
142*4882a593Smuzhiyun 	fc_port_t *fcport;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
145*4882a593Smuzhiyun 		ql_dbg(ql_dbg_vport, vha, 0xa001,
146*4882a593Smuzhiyun 		    "Marking port dead, loop_id=0x%04x : %x.\n",
147*4882a593Smuzhiyun 		    fcport->loop_id, fcport->vha->vp_idx);
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 		qla2x00_mark_device_lost(vha, fcport, 0);
150*4882a593Smuzhiyun 		qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
151*4882a593Smuzhiyun 	}
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun int
qla24xx_disable_vp(scsi_qla_host_t * vha)155*4882a593Smuzhiyun qla24xx_disable_vp(scsi_qla_host_t *vha)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun 	unsigned long flags;
158*4882a593Smuzhiyun 	int ret = QLA_SUCCESS;
159*4882a593Smuzhiyun 	fc_port_t *fcport;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	if (vha->hw->flags.fw_started)
162*4882a593Smuzhiyun 		ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	atomic_set(&vha->loop_state, LOOP_DOWN);
165*4882a593Smuzhiyun 	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
166*4882a593Smuzhiyun 	list_for_each_entry(fcport, &vha->vp_fcports, list)
167*4882a593Smuzhiyun 		fcport->logout_on_delete = 0;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	qla2x00_mark_all_devices_lost(vha);
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	/* Remove port id from vp target map */
172*4882a593Smuzhiyun 	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
173*4882a593Smuzhiyun 	qlt_update_vp_map(vha, RESET_AL_PA);
174*4882a593Smuzhiyun 	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	qla2x00_mark_vp_devices_dead(vha);
177*4882a593Smuzhiyun 	atomic_set(&vha->vp_state, VP_FAILED);
178*4882a593Smuzhiyun 	vha->flags.management_server_logged_in = 0;
179*4882a593Smuzhiyun 	if (ret == QLA_SUCCESS) {
180*4882a593Smuzhiyun 		fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
181*4882a593Smuzhiyun 	} else {
182*4882a593Smuzhiyun 		fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
183*4882a593Smuzhiyun 		return -1;
184*4882a593Smuzhiyun 	}
185*4882a593Smuzhiyun 	return 0;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun int
qla24xx_enable_vp(scsi_qla_host_t * vha)189*4882a593Smuzhiyun qla24xx_enable_vp(scsi_qla_host_t *vha)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun 	int ret;
192*4882a593Smuzhiyun 	struct qla_hw_data *ha = vha->hw;
193*4882a593Smuzhiyun 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	/* Check if physical ha port is Up */
196*4882a593Smuzhiyun 	if (atomic_read(&base_vha->loop_state) == LOOP_DOWN  ||
197*4882a593Smuzhiyun 		atomic_read(&base_vha->loop_state) == LOOP_DEAD ||
198*4882a593Smuzhiyun 		!(ha->current_topology & ISP_CFG_F)) {
199*4882a593Smuzhiyun 		vha->vp_err_state =  VP_ERR_PORTDWN;
200*4882a593Smuzhiyun 		fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
201*4882a593Smuzhiyun 		ql_dbg(ql_dbg_taskm, vha, 0x800b,
202*4882a593Smuzhiyun 		    "%s skip enable. loop_state %x topo %x\n",
203*4882a593Smuzhiyun 		    __func__, base_vha->loop_state.counter,
204*4882a593Smuzhiyun 		    ha->current_topology);
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 		goto enable_failed;
207*4882a593Smuzhiyun 	}
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	/* Initialize the new vport unless it is a persistent port */
210*4882a593Smuzhiyun 	mutex_lock(&ha->vport_lock);
211*4882a593Smuzhiyun 	ret = qla24xx_modify_vp_config(vha);
212*4882a593Smuzhiyun 	mutex_unlock(&ha->vport_lock);
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	if (ret != QLA_SUCCESS) {
215*4882a593Smuzhiyun 		fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
216*4882a593Smuzhiyun 		goto enable_failed;
217*4882a593Smuzhiyun 	}
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	ql_dbg(ql_dbg_taskm, vha, 0x801a,
220*4882a593Smuzhiyun 	    "Virtual port with id: %d - Enabled.\n", vha->vp_idx);
221*4882a593Smuzhiyun 	return 0;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun enable_failed:
224*4882a593Smuzhiyun 	ql_dbg(ql_dbg_taskm, vha, 0x801b,
225*4882a593Smuzhiyun 	    "Virtual port with id: %d - Disabled.\n", vha->vp_idx);
226*4882a593Smuzhiyun 	return 1;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun static void
qla24xx_configure_vp(scsi_qla_host_t * vha)230*4882a593Smuzhiyun qla24xx_configure_vp(scsi_qla_host_t *vha)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun 	struct fc_vport *fc_vport;
233*4882a593Smuzhiyun 	int ret;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	fc_vport = vha->fc_vport;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	ql_dbg(ql_dbg_vport, vha, 0xa002,
238*4882a593Smuzhiyun 	    "%s: change request #3.\n", __func__);
239*4882a593Smuzhiyun 	ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
240*4882a593Smuzhiyun 	if (ret != QLA_SUCCESS) {
241*4882a593Smuzhiyun 		ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable "
242*4882a593Smuzhiyun 		    "receiving of RSCN requests: 0x%x.\n", ret);
243*4882a593Smuzhiyun 		return;
244*4882a593Smuzhiyun 	} else {
245*4882a593Smuzhiyun 		/* Corresponds to SCR enabled */
246*4882a593Smuzhiyun 		clear_bit(VP_SCR_NEEDED, &vha->vp_flags);
247*4882a593Smuzhiyun 	}
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	vha->flags.online = 1;
250*4882a593Smuzhiyun 	if (qla24xx_configure_vhba(vha))
251*4882a593Smuzhiyun 		return;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	atomic_set(&vha->vp_state, VP_ACTIVE);
254*4882a593Smuzhiyun 	fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun void
qla2x00_alert_all_vps(struct rsp_que * rsp,uint16_t * mb)258*4882a593Smuzhiyun qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun 	scsi_qla_host_t *vha;
261*4882a593Smuzhiyun 	struct qla_hw_data *ha = rsp->hw;
262*4882a593Smuzhiyun 	int i = 0;
263*4882a593Smuzhiyun 	unsigned long flags;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	spin_lock_irqsave(&ha->vport_slock, flags);
266*4882a593Smuzhiyun 	list_for_each_entry(vha, &ha->vp_list, list) {
267*4882a593Smuzhiyun 		if (vha->vp_idx) {
268*4882a593Smuzhiyun 			if (test_bit(VPORT_DELETE, &vha->dpc_flags))
269*4882a593Smuzhiyun 				continue;
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 			atomic_inc(&vha->vref_count);
272*4882a593Smuzhiyun 			spin_unlock_irqrestore(&ha->vport_slock, flags);
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 			switch (mb[0]) {
275*4882a593Smuzhiyun 			case MBA_LIP_OCCURRED:
276*4882a593Smuzhiyun 			case MBA_LOOP_UP:
277*4882a593Smuzhiyun 			case MBA_LOOP_DOWN:
278*4882a593Smuzhiyun 			case MBA_LIP_RESET:
279*4882a593Smuzhiyun 			case MBA_POINT_TO_POINT:
280*4882a593Smuzhiyun 			case MBA_CHG_IN_CONNECTION:
281*4882a593Smuzhiyun 				ql_dbg(ql_dbg_async, vha, 0x5024,
282*4882a593Smuzhiyun 				    "Async_event for VP[%d], mb=0x%x vha=%p.\n",
283*4882a593Smuzhiyun 				    i, *mb, vha);
284*4882a593Smuzhiyun 				qla2x00_async_event(vha, rsp, mb);
285*4882a593Smuzhiyun 				break;
286*4882a593Smuzhiyun 			case MBA_PORT_UPDATE:
287*4882a593Smuzhiyun 			case MBA_RSCN_UPDATE:
288*4882a593Smuzhiyun 				if ((mb[3] & 0xff) == vha->vp_idx) {
289*4882a593Smuzhiyun 					ql_dbg(ql_dbg_async, vha, 0x5024,
290*4882a593Smuzhiyun 					    "Async_event for VP[%d], mb=0x%x vha=%p\n",
291*4882a593Smuzhiyun 					    i, *mb, vha);
292*4882a593Smuzhiyun 					qla2x00_async_event(vha, rsp, mb);
293*4882a593Smuzhiyun 				}
294*4882a593Smuzhiyun 				break;
295*4882a593Smuzhiyun 			}
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 			spin_lock_irqsave(&ha->vport_slock, flags);
298*4882a593Smuzhiyun 			atomic_dec(&vha->vref_count);
299*4882a593Smuzhiyun 			wake_up(&vha->vref_waitq);
300*4882a593Smuzhiyun 		}
301*4882a593Smuzhiyun 		i++;
302*4882a593Smuzhiyun 	}
303*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ha->vport_slock, flags);
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun int
qla2x00_vp_abort_isp(scsi_qla_host_t * vha)307*4882a593Smuzhiyun qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun 	fc_port_t *fcport;
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	/*
312*4882a593Smuzhiyun 	 * To exclusively reset vport, we need to log it out first.
313*4882a593Smuzhiyun 	 * Note: This control_vp can fail if ISP reset is already
314*4882a593Smuzhiyun 	 * issued, this is expected, as the vp would be already
315*4882a593Smuzhiyun 	 * logged out due to ISP reset.
316*4882a593Smuzhiyun 	 */
317*4882a593Smuzhiyun 	if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
318*4882a593Smuzhiyun 		qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
319*4882a593Smuzhiyun 		list_for_each_entry(fcport, &vha->vp_fcports, list)
320*4882a593Smuzhiyun 			fcport->logout_on_delete = 0;
321*4882a593Smuzhiyun 	}
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	/*
324*4882a593Smuzhiyun 	 * Physical port will do most of the abort and recovery work. We can
325*4882a593Smuzhiyun 	 * just treat it as a loop down
326*4882a593Smuzhiyun 	 */
327*4882a593Smuzhiyun 	if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
328*4882a593Smuzhiyun 		atomic_set(&vha->loop_state, LOOP_DOWN);
329*4882a593Smuzhiyun 		qla2x00_mark_all_devices_lost(vha);
330*4882a593Smuzhiyun 	} else {
331*4882a593Smuzhiyun 		if (!atomic_read(&vha->loop_down_timer))
332*4882a593Smuzhiyun 			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
333*4882a593Smuzhiyun 	}
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	ql_dbg(ql_dbg_taskm, vha, 0x801d,
336*4882a593Smuzhiyun 	    "Scheduling enable of Vport %d.\n", vha->vp_idx);
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	return qla24xx_enable_vp(vha);
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun static int
qla2x00_do_dpc_vp(scsi_qla_host_t * vha)342*4882a593Smuzhiyun qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun 	struct qla_hw_data *ha = vha->hw;
345*4882a593Smuzhiyun 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
348*4882a593Smuzhiyun 	    "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	/* Check if Fw is ready to configure VP first */
351*4882a593Smuzhiyun 	if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) {
352*4882a593Smuzhiyun 		if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
353*4882a593Smuzhiyun 			/* VP acquired. complete port configuration */
354*4882a593Smuzhiyun 			ql_dbg(ql_dbg_dpc, vha, 0x4014,
355*4882a593Smuzhiyun 			    "Configure VP scheduled.\n");
356*4882a593Smuzhiyun 			qla24xx_configure_vp(vha);
357*4882a593Smuzhiyun 			ql_dbg(ql_dbg_dpc, vha, 0x4015,
358*4882a593Smuzhiyun 			    "Configure VP end.\n");
359*4882a593Smuzhiyun 			return 0;
360*4882a593Smuzhiyun 		}
361*4882a593Smuzhiyun 	}
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	if (test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags)) {
364*4882a593Smuzhiyun 		if (atomic_read(&vha->loop_state) == LOOP_READY) {
365*4882a593Smuzhiyun 			qla24xx_process_purex_list(&vha->purex_list);
366*4882a593Smuzhiyun 			clear_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags);
367*4882a593Smuzhiyun 		}
368*4882a593Smuzhiyun 	}
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
371*4882a593Smuzhiyun 		ql_dbg(ql_dbg_dpc, vha, 0x4016,
372*4882a593Smuzhiyun 		    "FCPort update scheduled.\n");
373*4882a593Smuzhiyun 		qla2x00_update_fcports(vha);
374*4882a593Smuzhiyun 		clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
375*4882a593Smuzhiyun 		ql_dbg(ql_dbg_dpc, vha, 0x4017,
376*4882a593Smuzhiyun 		    "FCPort update end.\n");
377*4882a593Smuzhiyun 	}
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) &&
380*4882a593Smuzhiyun 	    !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
381*4882a593Smuzhiyun 	    atomic_read(&vha->loop_state) != LOOP_DOWN) {
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 		if (!vha->relogin_jif ||
384*4882a593Smuzhiyun 		    time_after_eq(jiffies, vha->relogin_jif)) {
385*4882a593Smuzhiyun 			vha->relogin_jif = jiffies + HZ;
386*4882a593Smuzhiyun 			clear_bit(RELOGIN_NEEDED, &vha->dpc_flags);
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 			ql_dbg(ql_dbg_dpc, vha, 0x4018,
389*4882a593Smuzhiyun 			    "Relogin needed scheduled.\n");
390*4882a593Smuzhiyun 			qla24xx_post_relogin_work(vha);
391*4882a593Smuzhiyun 		}
392*4882a593Smuzhiyun 	}
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
395*4882a593Smuzhiyun 	    (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
396*4882a593Smuzhiyun 		clear_bit(RESET_ACTIVE, &vha->dpc_flags);
397*4882a593Smuzhiyun 	}
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
400*4882a593Smuzhiyun 		if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
401*4882a593Smuzhiyun 			ql_dbg(ql_dbg_dpc, vha, 0x401a,
402*4882a593Smuzhiyun 			    "Loop resync scheduled.\n");
403*4882a593Smuzhiyun 			qla2x00_loop_resync(vha);
404*4882a593Smuzhiyun 			clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
405*4882a593Smuzhiyun 			ql_dbg(ql_dbg_dpc, vha, 0x401b,
406*4882a593Smuzhiyun 			    "Loop resync end.\n");
407*4882a593Smuzhiyun 		}
408*4882a593Smuzhiyun 	}
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c,
411*4882a593Smuzhiyun 	    "Exiting %s.\n", __func__);
412*4882a593Smuzhiyun 	return 0;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun void
qla2x00_do_dpc_all_vps(scsi_qla_host_t * vha)416*4882a593Smuzhiyun qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun 	struct qla_hw_data *ha = vha->hw;
419*4882a593Smuzhiyun 	scsi_qla_host_t *vp;
420*4882a593Smuzhiyun 	unsigned long flags = 0;
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	if (vha->vp_idx)
423*4882a593Smuzhiyun 		return;
424*4882a593Smuzhiyun 	if (list_empty(&ha->vp_list))
425*4882a593Smuzhiyun 		return;
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	if (!(ha->current_topology & ISP_CFG_F))
430*4882a593Smuzhiyun 		return;
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	spin_lock_irqsave(&ha->vport_slock, flags);
433*4882a593Smuzhiyun 	list_for_each_entry(vp, &ha->vp_list, list) {
434*4882a593Smuzhiyun 		if (vp->vp_idx) {
435*4882a593Smuzhiyun 			atomic_inc(&vp->vref_count);
436*4882a593Smuzhiyun 			spin_unlock_irqrestore(&ha->vport_slock, flags);
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 			qla2x00_do_dpc_vp(vp);
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 			spin_lock_irqsave(&ha->vport_slock, flags);
441*4882a593Smuzhiyun 			atomic_dec(&vp->vref_count);
442*4882a593Smuzhiyun 		}
443*4882a593Smuzhiyun 	}
444*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ha->vport_slock, flags);
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun int
qla24xx_vport_create_req_sanity_check(struct fc_vport * fc_vport)448*4882a593Smuzhiyun qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
449*4882a593Smuzhiyun {
450*4882a593Smuzhiyun 	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
451*4882a593Smuzhiyun 	struct qla_hw_data *ha = base_vha->hw;
452*4882a593Smuzhiyun 	scsi_qla_host_t *vha;
453*4882a593Smuzhiyun 	uint8_t port_name[WWN_SIZE];
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR)
456*4882a593Smuzhiyun 		return VPCERR_UNSUPPORTED;
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	/* Check up the F/W and H/W support NPIV */
459*4882a593Smuzhiyun 	if (!ha->flags.npiv_supported)
460*4882a593Smuzhiyun 		return VPCERR_UNSUPPORTED;
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	/* Check up whether npiv supported switch presented */
463*4882a593Smuzhiyun 	if (!(ha->switch_cap & FLOGI_MID_SUPPORT))
464*4882a593Smuzhiyun 		return VPCERR_NO_FABRIC_SUPP;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	/* Check up unique WWPN */
467*4882a593Smuzhiyun 	u64_to_wwn(fc_vport->port_name, port_name);
468*4882a593Smuzhiyun 	if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
469*4882a593Smuzhiyun 		return VPCERR_BAD_WWN;
470*4882a593Smuzhiyun 	vha = qla24xx_find_vhost_by_name(ha, port_name);
471*4882a593Smuzhiyun 	if (vha)
472*4882a593Smuzhiyun 		return VPCERR_BAD_WWN;
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	/* Check up max-npiv-supports */
475*4882a593Smuzhiyun 	if (ha->num_vhosts > ha->max_npiv_vports) {
476*4882a593Smuzhiyun 		ql_dbg(ql_dbg_vport, vha, 0xa004,
477*4882a593Smuzhiyun 		    "num_vhosts %ud is bigger "
478*4882a593Smuzhiyun 		    "than max_npiv_vports %ud.\n",
479*4882a593Smuzhiyun 		    ha->num_vhosts, ha->max_npiv_vports);
480*4882a593Smuzhiyun 		return VPCERR_UNSUPPORTED;
481*4882a593Smuzhiyun 	}
482*4882a593Smuzhiyun 	return 0;
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun scsi_qla_host_t *
qla24xx_create_vhost(struct fc_vport * fc_vport)486*4882a593Smuzhiyun qla24xx_create_vhost(struct fc_vport *fc_vport)
487*4882a593Smuzhiyun {
488*4882a593Smuzhiyun 	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
489*4882a593Smuzhiyun 	struct qla_hw_data *ha = base_vha->hw;
490*4882a593Smuzhiyun 	scsi_qla_host_t *vha;
491*4882a593Smuzhiyun 	struct scsi_host_template *sht = &qla2xxx_driver_template;
492*4882a593Smuzhiyun 	struct Scsi_Host *host;
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	vha = qla2x00_create_host(sht, ha);
495*4882a593Smuzhiyun 	if (!vha) {
496*4882a593Smuzhiyun 		ql_log(ql_log_warn, vha, 0xa005,
497*4882a593Smuzhiyun 		    "scsi_host_alloc() failed for vport.\n");
498*4882a593Smuzhiyun 		return(NULL);
499*4882a593Smuzhiyun 	}
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 	host = vha->host;
502*4882a593Smuzhiyun 	fc_vport->dd_data = vha;
503*4882a593Smuzhiyun 	/* New host info */
504*4882a593Smuzhiyun 	u64_to_wwn(fc_vport->node_name, vha->node_name);
505*4882a593Smuzhiyun 	u64_to_wwn(fc_vport->port_name, vha->port_name);
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	vha->fc_vport = fc_vport;
508*4882a593Smuzhiyun 	vha->device_flags = 0;
509*4882a593Smuzhiyun 	vha->vp_idx = qla24xx_allocate_vp_id(vha);
510*4882a593Smuzhiyun 	if (vha->vp_idx > ha->max_npiv_vports) {
511*4882a593Smuzhiyun 		ql_dbg(ql_dbg_vport, vha, 0xa006,
512*4882a593Smuzhiyun 		    "Couldn't allocate vp_id.\n");
513*4882a593Smuzhiyun 		goto create_vhost_failed;
514*4882a593Smuzhiyun 	}
515*4882a593Smuzhiyun 	vha->mgmt_svr_loop_id = qla2x00_reserve_mgmt_server_loop_id(vha);
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	vha->dpc_flags = 0L;
518*4882a593Smuzhiyun 	ha->dpc_active = 0;
519*4882a593Smuzhiyun 	set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
520*4882a593Smuzhiyun 	set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	/*
523*4882a593Smuzhiyun 	 * To fix the issue of processing a parent's RSCN for the vport before
524*4882a593Smuzhiyun 	 * its SCR is complete.
525*4882a593Smuzhiyun 	 */
526*4882a593Smuzhiyun 	set_bit(VP_SCR_NEEDED, &vha->vp_flags);
527*4882a593Smuzhiyun 	atomic_set(&vha->loop_state, LOOP_DOWN);
528*4882a593Smuzhiyun 	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	qla2x00_start_timer(vha, WATCH_INTERVAL);
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	vha->req = base_vha->req;
533*4882a593Smuzhiyun 	vha->flags.nvme_enabled = base_vha->flags.nvme_enabled;
534*4882a593Smuzhiyun 	host->can_queue = base_vha->req->length + 128;
535*4882a593Smuzhiyun 	host->cmd_per_lun = 3;
536*4882a593Smuzhiyun 	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
537*4882a593Smuzhiyun 		host->max_cmd_len = 32;
538*4882a593Smuzhiyun 	else
539*4882a593Smuzhiyun 		host->max_cmd_len = MAX_CMDSZ;
540*4882a593Smuzhiyun 	host->max_channel = MAX_BUSES - 1;
541*4882a593Smuzhiyun 	host->max_lun = ql2xmaxlun;
542*4882a593Smuzhiyun 	host->unique_id = host->host_no;
543*4882a593Smuzhiyun 	host->max_id = ha->max_fibre_devices;
544*4882a593Smuzhiyun 	host->transportt = qla2xxx_transport_vport_template;
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	ql_dbg(ql_dbg_vport, vha, 0xa007,
547*4882a593Smuzhiyun 	    "Detect vport hba %ld at address = %p.\n",
548*4882a593Smuzhiyun 	    vha->host_no, vha);
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	vha->flags.init_done = 1;
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	mutex_lock(&ha->vport_lock);
553*4882a593Smuzhiyun 	set_bit(vha->vp_idx, ha->vp_idx_map);
554*4882a593Smuzhiyun 	ha->cur_vport_count++;
555*4882a593Smuzhiyun 	mutex_unlock(&ha->vport_lock);
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	return vha;
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun create_vhost_failed:
560*4882a593Smuzhiyun 	return NULL;
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun static void
qla25xx_free_req_que(struct scsi_qla_host * vha,struct req_que * req)564*4882a593Smuzhiyun qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
565*4882a593Smuzhiyun {
566*4882a593Smuzhiyun 	struct qla_hw_data *ha = vha->hw;
567*4882a593Smuzhiyun 	uint16_t que_id = req->id;
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
570*4882a593Smuzhiyun 		sizeof(request_t), req->ring, req->dma);
571*4882a593Smuzhiyun 	req->ring = NULL;
572*4882a593Smuzhiyun 	req->dma = 0;
573*4882a593Smuzhiyun 	if (que_id) {
574*4882a593Smuzhiyun 		ha->req_q_map[que_id] = NULL;
575*4882a593Smuzhiyun 		mutex_lock(&ha->vport_lock);
576*4882a593Smuzhiyun 		clear_bit(que_id, ha->req_qid_map);
577*4882a593Smuzhiyun 		mutex_unlock(&ha->vport_lock);
578*4882a593Smuzhiyun 	}
579*4882a593Smuzhiyun 	kfree(req->outstanding_cmds);
580*4882a593Smuzhiyun 	kfree(req);
581*4882a593Smuzhiyun 	req = NULL;
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun static void
qla25xx_free_rsp_que(struct scsi_qla_host * vha,struct rsp_que * rsp)585*4882a593Smuzhiyun qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun 	struct qla_hw_data *ha = vha->hw;
588*4882a593Smuzhiyun 	uint16_t que_id = rsp->id;
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 	if (rsp->msix && rsp->msix->have_irq) {
591*4882a593Smuzhiyun 		free_irq(rsp->msix->vector, rsp->msix->handle);
592*4882a593Smuzhiyun 		rsp->msix->have_irq = 0;
593*4882a593Smuzhiyun 		rsp->msix->in_use = 0;
594*4882a593Smuzhiyun 		rsp->msix->handle = NULL;
595*4882a593Smuzhiyun 	}
596*4882a593Smuzhiyun 	dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
597*4882a593Smuzhiyun 		sizeof(response_t), rsp->ring, rsp->dma);
598*4882a593Smuzhiyun 	rsp->ring = NULL;
599*4882a593Smuzhiyun 	rsp->dma = 0;
600*4882a593Smuzhiyun 	if (que_id) {
601*4882a593Smuzhiyun 		ha->rsp_q_map[que_id] = NULL;
602*4882a593Smuzhiyun 		mutex_lock(&ha->vport_lock);
603*4882a593Smuzhiyun 		clear_bit(que_id, ha->rsp_qid_map);
604*4882a593Smuzhiyun 		mutex_unlock(&ha->vport_lock);
605*4882a593Smuzhiyun 	}
606*4882a593Smuzhiyun 	kfree(rsp);
607*4882a593Smuzhiyun 	rsp = NULL;
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun int
qla25xx_delete_req_que(struct scsi_qla_host * vha,struct req_que * req)611*4882a593Smuzhiyun qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
612*4882a593Smuzhiyun {
613*4882a593Smuzhiyun 	int ret = QLA_SUCCESS;
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	if (req && vha->flags.qpairs_req_created) {
616*4882a593Smuzhiyun 		req->options |= BIT_0;
617*4882a593Smuzhiyun 		ret = qla25xx_init_req_que(vha, req);
618*4882a593Smuzhiyun 		if (ret != QLA_SUCCESS)
619*4882a593Smuzhiyun 			return QLA_FUNCTION_FAILED;
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 		qla25xx_free_req_que(vha, req);
622*4882a593Smuzhiyun 	}
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	return ret;
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun int
qla25xx_delete_rsp_que(struct scsi_qla_host * vha,struct rsp_que * rsp)628*4882a593Smuzhiyun qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
629*4882a593Smuzhiyun {
630*4882a593Smuzhiyun 	int ret = QLA_SUCCESS;
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	if (rsp && vha->flags.qpairs_rsp_created) {
633*4882a593Smuzhiyun 		rsp->options |= BIT_0;
634*4882a593Smuzhiyun 		ret = qla25xx_init_rsp_que(vha, rsp);
635*4882a593Smuzhiyun 		if (ret != QLA_SUCCESS)
636*4882a593Smuzhiyun 			return QLA_FUNCTION_FAILED;
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 		qla25xx_free_rsp_que(vha, rsp);
639*4882a593Smuzhiyun 	}
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 	return ret;
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun /* Delete all queues for a given vhost */
645*4882a593Smuzhiyun int
qla25xx_delete_queues(struct scsi_qla_host * vha)646*4882a593Smuzhiyun qla25xx_delete_queues(struct scsi_qla_host *vha)
647*4882a593Smuzhiyun {
648*4882a593Smuzhiyun 	int cnt, ret = 0;
649*4882a593Smuzhiyun 	struct req_que *req = NULL;
650*4882a593Smuzhiyun 	struct rsp_que *rsp = NULL;
651*4882a593Smuzhiyun 	struct qla_hw_data *ha = vha->hw;
652*4882a593Smuzhiyun 	struct qla_qpair *qpair, *tqpair;
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	if (ql2xmqsupport || ql2xnvmeenable) {
655*4882a593Smuzhiyun 		list_for_each_entry_safe(qpair, tqpair, &vha->qp_list,
656*4882a593Smuzhiyun 		    qp_list_elem)
657*4882a593Smuzhiyun 			qla2xxx_delete_qpair(vha, qpair);
658*4882a593Smuzhiyun 	} else {
659*4882a593Smuzhiyun 		/* Delete request queues */
660*4882a593Smuzhiyun 		for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
661*4882a593Smuzhiyun 			req = ha->req_q_map[cnt];
662*4882a593Smuzhiyun 			if (req && test_bit(cnt, ha->req_qid_map)) {
663*4882a593Smuzhiyun 				ret = qla25xx_delete_req_que(vha, req);
664*4882a593Smuzhiyun 				if (ret != QLA_SUCCESS) {
665*4882a593Smuzhiyun 					ql_log(ql_log_warn, vha, 0x00ea,
666*4882a593Smuzhiyun 					    "Couldn't delete req que %d.\n",
667*4882a593Smuzhiyun 					    req->id);
668*4882a593Smuzhiyun 					return ret;
669*4882a593Smuzhiyun 				}
670*4882a593Smuzhiyun 			}
671*4882a593Smuzhiyun 		}
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 		/* Delete response queues */
674*4882a593Smuzhiyun 		for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
675*4882a593Smuzhiyun 			rsp = ha->rsp_q_map[cnt];
676*4882a593Smuzhiyun 			if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
677*4882a593Smuzhiyun 				ret = qla25xx_delete_rsp_que(vha, rsp);
678*4882a593Smuzhiyun 				if (ret != QLA_SUCCESS) {
679*4882a593Smuzhiyun 					ql_log(ql_log_warn, vha, 0x00eb,
680*4882a593Smuzhiyun 					    "Couldn't delete rsp que %d.\n",
681*4882a593Smuzhiyun 					    rsp->id);
682*4882a593Smuzhiyun 					return ret;
683*4882a593Smuzhiyun 				}
684*4882a593Smuzhiyun 			}
685*4882a593Smuzhiyun 		}
686*4882a593Smuzhiyun 	}
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 	return ret;
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun int
qla25xx_create_req_que(struct qla_hw_data * ha,uint16_t options,uint8_t vp_idx,uint16_t rid,int rsp_que,uint8_t qos,bool startqp)692*4882a593Smuzhiyun qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
693*4882a593Smuzhiyun     uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos, bool startqp)
694*4882a593Smuzhiyun {
695*4882a593Smuzhiyun 	int ret = 0;
696*4882a593Smuzhiyun 	struct req_que *req = NULL;
697*4882a593Smuzhiyun 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
698*4882a593Smuzhiyun 	struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
699*4882a593Smuzhiyun 	uint16_t que_id = 0;
700*4882a593Smuzhiyun 	device_reg_t *reg;
701*4882a593Smuzhiyun 	uint32_t cnt;
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
704*4882a593Smuzhiyun 	if (req == NULL) {
705*4882a593Smuzhiyun 		ql_log(ql_log_fatal, base_vha, 0x00d9,
706*4882a593Smuzhiyun 		    "Failed to allocate memory for request queue.\n");
707*4882a593Smuzhiyun 		goto failed;
708*4882a593Smuzhiyun 	}
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 	req->length = REQUEST_ENTRY_CNT_24XX;
711*4882a593Smuzhiyun 	req->ring = dma_alloc_coherent(&ha->pdev->dev,
712*4882a593Smuzhiyun 			(req->length + 1) * sizeof(request_t),
713*4882a593Smuzhiyun 			&req->dma, GFP_KERNEL);
714*4882a593Smuzhiyun 	if (req->ring == NULL) {
715*4882a593Smuzhiyun 		ql_log(ql_log_fatal, base_vha, 0x00da,
716*4882a593Smuzhiyun 		    "Failed to allocate memory for request_ring.\n");
717*4882a593Smuzhiyun 		goto que_failed;
718*4882a593Smuzhiyun 	}
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 	ret = qla2x00_alloc_outstanding_cmds(ha, req);
721*4882a593Smuzhiyun 	if (ret != QLA_SUCCESS)
722*4882a593Smuzhiyun 		goto que_failed;
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 	mutex_lock(&ha->mq_lock);
725*4882a593Smuzhiyun 	que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
726*4882a593Smuzhiyun 	if (que_id >= ha->max_req_queues) {
727*4882a593Smuzhiyun 		mutex_unlock(&ha->mq_lock);
728*4882a593Smuzhiyun 		ql_log(ql_log_warn, base_vha, 0x00db,
729*4882a593Smuzhiyun 		    "No resources to create additional request queue.\n");
730*4882a593Smuzhiyun 		goto que_failed;
731*4882a593Smuzhiyun 	}
732*4882a593Smuzhiyun 	set_bit(que_id, ha->req_qid_map);
733*4882a593Smuzhiyun 	ha->req_q_map[que_id] = req;
734*4882a593Smuzhiyun 	req->rid = rid;
735*4882a593Smuzhiyun 	req->vp_idx = vp_idx;
736*4882a593Smuzhiyun 	req->qos = qos;
737*4882a593Smuzhiyun 
738*4882a593Smuzhiyun 	ql_dbg(ql_dbg_multiq, base_vha, 0xc002,
739*4882a593Smuzhiyun 	    "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
740*4882a593Smuzhiyun 	    que_id, req->rid, req->vp_idx, req->qos);
741*4882a593Smuzhiyun 	ql_dbg(ql_dbg_init, base_vha, 0x00dc,
742*4882a593Smuzhiyun 	    "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
743*4882a593Smuzhiyun 	    que_id, req->rid, req->vp_idx, req->qos);
744*4882a593Smuzhiyun 	if (rsp_que < 0)
745*4882a593Smuzhiyun 		req->rsp = NULL;
746*4882a593Smuzhiyun 	else
747*4882a593Smuzhiyun 		req->rsp = ha->rsp_q_map[rsp_que];
748*4882a593Smuzhiyun 	/* Use alternate PCI bus number */
749*4882a593Smuzhiyun 	if (MSB(req->rid))
750*4882a593Smuzhiyun 		options |= BIT_4;
751*4882a593Smuzhiyun 	/* Use alternate PCI devfn */
752*4882a593Smuzhiyun 	if (LSB(req->rid))
753*4882a593Smuzhiyun 		options |= BIT_5;
754*4882a593Smuzhiyun 	req->options = options;
755*4882a593Smuzhiyun 
756*4882a593Smuzhiyun 	ql_dbg(ql_dbg_multiq, base_vha, 0xc003,
757*4882a593Smuzhiyun 	    "options=0x%x.\n", req->options);
758*4882a593Smuzhiyun 	ql_dbg(ql_dbg_init, base_vha, 0x00dd,
759*4882a593Smuzhiyun 	    "options=0x%x.\n", req->options);
760*4882a593Smuzhiyun 	for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
761*4882a593Smuzhiyun 		req->outstanding_cmds[cnt] = NULL;
762*4882a593Smuzhiyun 	req->current_outstanding_cmd = 1;
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 	req->ring_ptr = req->ring;
765*4882a593Smuzhiyun 	req->ring_index = 0;
766*4882a593Smuzhiyun 	req->cnt = req->length;
767*4882a593Smuzhiyun 	req->id = que_id;
768*4882a593Smuzhiyun 	reg = ISP_QUE_REG(ha, que_id);
769*4882a593Smuzhiyun 	req->req_q_in = &reg->isp25mq.req_q_in;
770*4882a593Smuzhiyun 	req->req_q_out = &reg->isp25mq.req_q_out;
771*4882a593Smuzhiyun 	req->max_q_depth = ha->req_q_map[0]->max_q_depth;
772*4882a593Smuzhiyun 	req->out_ptr = (uint16_t *)(req->ring + req->length);
773*4882a593Smuzhiyun 	mutex_unlock(&ha->mq_lock);
774*4882a593Smuzhiyun 	ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
775*4882a593Smuzhiyun 	    "ring_ptr=%p ring_index=%d, "
776*4882a593Smuzhiyun 	    "cnt=%d id=%d max_q_depth=%d.\n",
777*4882a593Smuzhiyun 	    req->ring_ptr, req->ring_index,
778*4882a593Smuzhiyun 	    req->cnt, req->id, req->max_q_depth);
779*4882a593Smuzhiyun 	ql_dbg(ql_dbg_init, base_vha, 0x00de,
780*4882a593Smuzhiyun 	    "ring_ptr=%p ring_index=%d, "
781*4882a593Smuzhiyun 	    "cnt=%d id=%d max_q_depth=%d.\n",
782*4882a593Smuzhiyun 	    req->ring_ptr, req->ring_index, req->cnt,
783*4882a593Smuzhiyun 	    req->id, req->max_q_depth);
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 	if (startqp) {
786*4882a593Smuzhiyun 		ret = qla25xx_init_req_que(base_vha, req);
787*4882a593Smuzhiyun 		if (ret != QLA_SUCCESS) {
788*4882a593Smuzhiyun 			ql_log(ql_log_fatal, base_vha, 0x00df,
789*4882a593Smuzhiyun 			    "%s failed.\n", __func__);
790*4882a593Smuzhiyun 			mutex_lock(&ha->mq_lock);
791*4882a593Smuzhiyun 			clear_bit(que_id, ha->req_qid_map);
792*4882a593Smuzhiyun 			mutex_unlock(&ha->mq_lock);
793*4882a593Smuzhiyun 			goto que_failed;
794*4882a593Smuzhiyun 		}
795*4882a593Smuzhiyun 		vha->flags.qpairs_req_created = 1;
796*4882a593Smuzhiyun 	}
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 	return req->id;
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun que_failed:
801*4882a593Smuzhiyun 	qla25xx_free_req_que(base_vha, req);
802*4882a593Smuzhiyun failed:
803*4882a593Smuzhiyun 	return 0;
804*4882a593Smuzhiyun }
805*4882a593Smuzhiyun 
qla_do_work(struct work_struct * work)806*4882a593Smuzhiyun static void qla_do_work(struct work_struct *work)
807*4882a593Smuzhiyun {
808*4882a593Smuzhiyun 	unsigned long flags;
809*4882a593Smuzhiyun 	struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work);
810*4882a593Smuzhiyun 	struct scsi_qla_host *vha = qpair->vha;
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 	spin_lock_irqsave(&qpair->qp_lock, flags);
813*4882a593Smuzhiyun 	qla24xx_process_response_queue(vha, qpair->rsp);
814*4882a593Smuzhiyun 	spin_unlock_irqrestore(&qpair->qp_lock, flags);
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun }
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun /* create response queue */
819*4882a593Smuzhiyun int
qla25xx_create_rsp_que(struct qla_hw_data * ha,uint16_t options,uint8_t vp_idx,uint16_t rid,struct qla_qpair * qpair,bool startqp)820*4882a593Smuzhiyun qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
821*4882a593Smuzhiyun     uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair, bool startqp)
822*4882a593Smuzhiyun {
823*4882a593Smuzhiyun 	int ret = 0;
824*4882a593Smuzhiyun 	struct rsp_que *rsp = NULL;
825*4882a593Smuzhiyun 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
826*4882a593Smuzhiyun 	struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
827*4882a593Smuzhiyun 	uint16_t que_id = 0;
828*4882a593Smuzhiyun 	device_reg_t *reg;
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun 	rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
831*4882a593Smuzhiyun 	if (rsp == NULL) {
832*4882a593Smuzhiyun 		ql_log(ql_log_warn, base_vha, 0x0066,
833*4882a593Smuzhiyun 		    "Failed to allocate memory for response queue.\n");
834*4882a593Smuzhiyun 		goto failed;
835*4882a593Smuzhiyun 	}
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 	rsp->length = RESPONSE_ENTRY_CNT_MQ;
838*4882a593Smuzhiyun 	rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
839*4882a593Smuzhiyun 			(rsp->length + 1) * sizeof(response_t),
840*4882a593Smuzhiyun 			&rsp->dma, GFP_KERNEL);
841*4882a593Smuzhiyun 	if (rsp->ring == NULL) {
842*4882a593Smuzhiyun 		ql_log(ql_log_warn, base_vha, 0x00e1,
843*4882a593Smuzhiyun 		    "Failed to allocate memory for response ring.\n");
844*4882a593Smuzhiyun 		goto que_failed;
845*4882a593Smuzhiyun 	}
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun 	mutex_lock(&ha->mq_lock);
848*4882a593Smuzhiyun 	que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
849*4882a593Smuzhiyun 	if (que_id >= ha->max_rsp_queues) {
850*4882a593Smuzhiyun 		mutex_unlock(&ha->mq_lock);
851*4882a593Smuzhiyun 		ql_log(ql_log_warn, base_vha, 0x00e2,
852*4882a593Smuzhiyun 		    "No resources to create additional request queue.\n");
853*4882a593Smuzhiyun 		goto que_failed;
854*4882a593Smuzhiyun 	}
855*4882a593Smuzhiyun 	set_bit(que_id, ha->rsp_qid_map);
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 	rsp->msix = qpair->msix;
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun 	ha->rsp_q_map[que_id] = rsp;
860*4882a593Smuzhiyun 	rsp->rid = rid;
861*4882a593Smuzhiyun 	rsp->vp_idx = vp_idx;
862*4882a593Smuzhiyun 	rsp->hw = ha;
863*4882a593Smuzhiyun 	ql_dbg(ql_dbg_init, base_vha, 0x00e4,
864*4882a593Smuzhiyun 	    "rsp queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
865*4882a593Smuzhiyun 	    que_id, rsp->rid, rsp->vp_idx, rsp->hw);
866*4882a593Smuzhiyun 	/* Use alternate PCI bus number */
867*4882a593Smuzhiyun 	if (MSB(rsp->rid))
868*4882a593Smuzhiyun 		options |= BIT_4;
869*4882a593Smuzhiyun 	/* Use alternate PCI devfn */
870*4882a593Smuzhiyun 	if (LSB(rsp->rid))
871*4882a593Smuzhiyun 		options |= BIT_5;
872*4882a593Smuzhiyun 	/* Enable MSIX handshake mode on for uncapable adapters */
873*4882a593Smuzhiyun 	if (!IS_MSIX_NACK_CAPABLE(ha))
874*4882a593Smuzhiyun 		options |= BIT_6;
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun 	/* Set option to indicate response queue creation */
877*4882a593Smuzhiyun 	options |= BIT_1;
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun 	rsp->options = options;
880*4882a593Smuzhiyun 	rsp->id = que_id;
881*4882a593Smuzhiyun 	reg = ISP_QUE_REG(ha, que_id);
882*4882a593Smuzhiyun 	rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
883*4882a593Smuzhiyun 	rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
884*4882a593Smuzhiyun 	rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length);
885*4882a593Smuzhiyun 	mutex_unlock(&ha->mq_lock);
886*4882a593Smuzhiyun 	ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
887*4882a593Smuzhiyun 	    "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
888*4882a593Smuzhiyun 	    rsp->options, rsp->id, rsp->rsp_q_in,
889*4882a593Smuzhiyun 	    rsp->rsp_q_out);
890*4882a593Smuzhiyun 	ql_dbg(ql_dbg_init, base_vha, 0x00e5,
891*4882a593Smuzhiyun 	    "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
892*4882a593Smuzhiyun 	    rsp->options, rsp->id, rsp->rsp_q_in,
893*4882a593Smuzhiyun 	    rsp->rsp_q_out);
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 	ret = qla25xx_request_irq(ha, qpair, qpair->msix,
896*4882a593Smuzhiyun 		ha->flags.disable_msix_handshake ?
897*4882a593Smuzhiyun 		QLA_MSIX_QPAIR_MULTIQ_RSP_Q : QLA_MSIX_QPAIR_MULTIQ_RSP_Q_HS);
898*4882a593Smuzhiyun 	if (ret)
899*4882a593Smuzhiyun 		goto que_failed;
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 	if (startqp) {
902*4882a593Smuzhiyun 		ret = qla25xx_init_rsp_que(base_vha, rsp);
903*4882a593Smuzhiyun 		if (ret != QLA_SUCCESS) {
904*4882a593Smuzhiyun 			ql_log(ql_log_fatal, base_vha, 0x00e7,
905*4882a593Smuzhiyun 			    "%s failed.\n", __func__);
906*4882a593Smuzhiyun 			mutex_lock(&ha->mq_lock);
907*4882a593Smuzhiyun 			clear_bit(que_id, ha->rsp_qid_map);
908*4882a593Smuzhiyun 			mutex_unlock(&ha->mq_lock);
909*4882a593Smuzhiyun 			goto que_failed;
910*4882a593Smuzhiyun 		}
911*4882a593Smuzhiyun 		vha->flags.qpairs_rsp_created = 1;
912*4882a593Smuzhiyun 	}
913*4882a593Smuzhiyun 	rsp->req = NULL;
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun 	qla2x00_init_response_q_entries(rsp);
916*4882a593Smuzhiyun 	if (qpair->hw->wq)
917*4882a593Smuzhiyun 		INIT_WORK(&qpair->q_work, qla_do_work);
918*4882a593Smuzhiyun 	return rsp->id;
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun que_failed:
921*4882a593Smuzhiyun 	qla25xx_free_rsp_que(base_vha, rsp);
922*4882a593Smuzhiyun failed:
923*4882a593Smuzhiyun 	return 0;
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun 
qla_ctrlvp_sp_done(srb_t * sp,int res)926*4882a593Smuzhiyun static void qla_ctrlvp_sp_done(srb_t *sp, int res)
927*4882a593Smuzhiyun {
928*4882a593Smuzhiyun 	if (sp->comp)
929*4882a593Smuzhiyun 		complete(sp->comp);
930*4882a593Smuzhiyun 	/* don't free sp here. Let the caller do the free */
931*4882a593Smuzhiyun }
932*4882a593Smuzhiyun 
933*4882a593Smuzhiyun /**
934*4882a593Smuzhiyun  * qla24xx_control_vp() - Enable a virtual port for given host
935*4882a593Smuzhiyun  * @vha:	adapter block pointer
936*4882a593Smuzhiyun  * @cmd:	command type to be sent for enable virtual port
937*4882a593Smuzhiyun  *
938*4882a593Smuzhiyun  * Return:	qla2xxx local function return status code.
939*4882a593Smuzhiyun  */
qla24xx_control_vp(scsi_qla_host_t * vha,int cmd)940*4882a593Smuzhiyun int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
941*4882a593Smuzhiyun {
942*4882a593Smuzhiyun 	int rval = QLA_MEMORY_ALLOC_FAILED;
943*4882a593Smuzhiyun 	struct qla_hw_data *ha = vha->hw;
944*4882a593Smuzhiyun 	int	vp_index = vha->vp_idx;
945*4882a593Smuzhiyun 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
946*4882a593Smuzhiyun 	DECLARE_COMPLETION_ONSTACK(comp);
947*4882a593Smuzhiyun 	srb_t *sp;
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun 	ql_dbg(ql_dbg_vport, vha, 0x10c1,
950*4882a593Smuzhiyun 	    "Entered %s cmd %x index %d.\n", __func__, cmd, vp_index);
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun 	if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
953*4882a593Smuzhiyun 		return QLA_PARAMETER_ERROR;
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 	sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL);
956*4882a593Smuzhiyun 	if (!sp)
957*4882a593Smuzhiyun 		return rval;
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun 	sp->type = SRB_CTRL_VP;
960*4882a593Smuzhiyun 	sp->name = "ctrl_vp";
961*4882a593Smuzhiyun 	sp->comp = &comp;
962*4882a593Smuzhiyun 	sp->done = qla_ctrlvp_sp_done;
963*4882a593Smuzhiyun 	sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
964*4882a593Smuzhiyun 	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
965*4882a593Smuzhiyun 	sp->u.iocb_cmd.u.ctrlvp.cmd = cmd;
966*4882a593Smuzhiyun 	sp->u.iocb_cmd.u.ctrlvp.vp_index = vp_index;
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun 	rval = qla2x00_start_sp(sp);
969*4882a593Smuzhiyun 	if (rval != QLA_SUCCESS) {
970*4882a593Smuzhiyun 		ql_dbg(ql_dbg_async, vha, 0xffff,
971*4882a593Smuzhiyun 		    "%s: %s Failed submission. %x.\n",
972*4882a593Smuzhiyun 		    __func__, sp->name, rval);
973*4882a593Smuzhiyun 		goto done;
974*4882a593Smuzhiyun 	}
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun 	ql_dbg(ql_dbg_vport, vha, 0x113f, "%s hndl %x submitted\n",
977*4882a593Smuzhiyun 	    sp->name, sp->handle);
978*4882a593Smuzhiyun 
979*4882a593Smuzhiyun 	wait_for_completion(&comp);
980*4882a593Smuzhiyun 	sp->comp = NULL;
981*4882a593Smuzhiyun 
982*4882a593Smuzhiyun 	rval = sp->rc;
983*4882a593Smuzhiyun 	switch (rval) {
984*4882a593Smuzhiyun 	case QLA_FUNCTION_TIMEOUT:
985*4882a593Smuzhiyun 		ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Timeout. %x.\n",
986*4882a593Smuzhiyun 		    __func__, sp->name, rval);
987*4882a593Smuzhiyun 		break;
988*4882a593Smuzhiyun 	case QLA_SUCCESS:
989*4882a593Smuzhiyun 		ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s done.\n",
990*4882a593Smuzhiyun 		    __func__, sp->name);
991*4882a593Smuzhiyun 		break;
992*4882a593Smuzhiyun 	default:
993*4882a593Smuzhiyun 		ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Failed. %x.\n",
994*4882a593Smuzhiyun 		    __func__, sp->name, rval);
995*4882a593Smuzhiyun 		break;
996*4882a593Smuzhiyun 	}
997*4882a593Smuzhiyun done:
998*4882a593Smuzhiyun 	sp->free(sp);
999*4882a593Smuzhiyun 	return rval;
1000*4882a593Smuzhiyun }
1001