xref: /OK3568_Linux_fs/kernel/drivers/scsi/ibmvscsi/ibmvfc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * ibmvfc.c -- driver for IBM Power Virtual Fibre Channel Adapter
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Copyright (C) IBM Corporation, 2008
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/module.h>
11*4882a593Smuzhiyun #include <linux/moduleparam.h>
12*4882a593Smuzhiyun #include <linux/dma-mapping.h>
13*4882a593Smuzhiyun #include <linux/dmapool.h>
14*4882a593Smuzhiyun #include <linux/delay.h>
15*4882a593Smuzhiyun #include <linux/interrupt.h>
16*4882a593Smuzhiyun #include <linux/kthread.h>
17*4882a593Smuzhiyun #include <linux/slab.h>
18*4882a593Smuzhiyun #include <linux/of.h>
19*4882a593Smuzhiyun #include <linux/pm.h>
20*4882a593Smuzhiyun #include <linux/stringify.h>
21*4882a593Smuzhiyun #include <linux/bsg-lib.h>
22*4882a593Smuzhiyun #include <asm/firmware.h>
23*4882a593Smuzhiyun #include <asm/irq.h>
24*4882a593Smuzhiyun #include <asm/vio.h>
25*4882a593Smuzhiyun #include <scsi/scsi.h>
26*4882a593Smuzhiyun #include <scsi/scsi_cmnd.h>
27*4882a593Smuzhiyun #include <scsi/scsi_host.h>
28*4882a593Smuzhiyun #include <scsi/scsi_device.h>
29*4882a593Smuzhiyun #include <scsi/scsi_tcq.h>
30*4882a593Smuzhiyun #include <scsi/scsi_transport_fc.h>
31*4882a593Smuzhiyun #include <scsi/scsi_bsg_fc.h>
32*4882a593Smuzhiyun #include "ibmvfc.h"
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT;
35*4882a593Smuzhiyun static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
36*4882a593Smuzhiyun static u64 max_lun = IBMVFC_MAX_LUN;
37*4882a593Smuzhiyun static unsigned int max_targets = IBMVFC_MAX_TARGETS;
38*4882a593Smuzhiyun static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
39*4882a593Smuzhiyun static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
40*4882a593Smuzhiyun static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
41*4882a593Smuzhiyun static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
42*4882a593Smuzhiyun static unsigned int cls3_error = IBMVFC_CLS3_ERROR;
43*4882a593Smuzhiyun static LIST_HEAD(ibmvfc_head);
44*4882a593Smuzhiyun static DEFINE_SPINLOCK(ibmvfc_driver_lock);
45*4882a593Smuzhiyun static struct scsi_transport_template *ibmvfc_transport_template;
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun MODULE_DESCRIPTION("IBM Virtual Fibre Channel Driver");
48*4882a593Smuzhiyun MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
49*4882a593Smuzhiyun MODULE_LICENSE("GPL");
50*4882a593Smuzhiyun MODULE_VERSION(IBMVFC_DRIVER_VERSION);
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun module_param_named(init_timeout, init_timeout, uint, S_IRUGO | S_IWUSR);
53*4882a593Smuzhiyun MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds. "
54*4882a593Smuzhiyun 		 "[Default=" __stringify(IBMVFC_INIT_TIMEOUT) "]");
55*4882a593Smuzhiyun module_param_named(default_timeout, default_timeout, uint, S_IRUGO | S_IWUSR);
56*4882a593Smuzhiyun MODULE_PARM_DESC(default_timeout,
57*4882a593Smuzhiyun 		 "Default timeout in seconds for initialization and EH commands. "
58*4882a593Smuzhiyun 		 "[Default=" __stringify(IBMVFC_DEFAULT_TIMEOUT) "]");
59*4882a593Smuzhiyun module_param_named(max_requests, max_requests, uint, S_IRUGO);
60*4882a593Smuzhiyun MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
61*4882a593Smuzhiyun 		 "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
62*4882a593Smuzhiyun module_param_named(max_lun, max_lun, ullong, S_IRUGO);
63*4882a593Smuzhiyun MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. "
64*4882a593Smuzhiyun 		 "[Default=" __stringify(IBMVFC_MAX_LUN) "]");
65*4882a593Smuzhiyun module_param_named(max_targets, max_targets, uint, S_IRUGO);
66*4882a593Smuzhiyun MODULE_PARM_DESC(max_targets, "Maximum allowed targets. "
67*4882a593Smuzhiyun 		 "[Default=" __stringify(IBMVFC_MAX_TARGETS) "]");
68*4882a593Smuzhiyun module_param_named(disc_threads, disc_threads, uint, S_IRUGO);
69*4882a593Smuzhiyun MODULE_PARM_DESC(disc_threads, "Number of device discovery threads to use. "
70*4882a593Smuzhiyun 		 "[Default=" __stringify(IBMVFC_MAX_DISC_THREADS) "]");
71*4882a593Smuzhiyun module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR);
72*4882a593Smuzhiyun MODULE_PARM_DESC(debug, "Enable driver debug information. "
73*4882a593Smuzhiyun 		 "[Default=" __stringify(IBMVFC_DEBUG) "]");
74*4882a593Smuzhiyun module_param_named(log_level, log_level, uint, 0);
75*4882a593Smuzhiyun MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. "
76*4882a593Smuzhiyun 		 "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]");
77*4882a593Smuzhiyun module_param_named(cls3_error, cls3_error, uint, 0);
78*4882a593Smuzhiyun MODULE_PARM_DESC(cls3_error, "Enable FC Class 3 Error Recovery. "
79*4882a593Smuzhiyun 		 "[Default=" __stringify(IBMVFC_CLS3_ERROR) "]");
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun static const struct {
82*4882a593Smuzhiyun 	u16 status;
83*4882a593Smuzhiyun 	u16 error;
84*4882a593Smuzhiyun 	u8 result;
85*4882a593Smuzhiyun 	u8 retry;
86*4882a593Smuzhiyun 	int log;
87*4882a593Smuzhiyun 	char *name;
88*4882a593Smuzhiyun } cmd_status [] = {
89*4882a593Smuzhiyun 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" },
90*4882a593Smuzhiyun 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" },
91*4882a593Smuzhiyun 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" },
92*4882a593Smuzhiyun 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_TRANSPORT_DISRUPTED, 1, 1, "network down" },
93*4882a593Smuzhiyun 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" },
94*4882a593Smuzhiyun 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" },
95*4882a593Smuzhiyun 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" },
96*4882a593Smuzhiyun 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_REGISTER, DID_ERROR, 1, 1, "unable to register" },
97*4882a593Smuzhiyun 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_BUSY, DID_BUS_BUSY, 1, 0, "transport busy" },
98*4882a593Smuzhiyun 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
99*4882a593Smuzhiyun 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
100*4882a593Smuzhiyun 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
101*4882a593Smuzhiyun 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" },
102*4882a593Smuzhiyun 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	{ IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
105*4882a593Smuzhiyun 	{ IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" },
106*4882a593Smuzhiyun 	{ IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ERROR, 0, 1, "invalid parameter" },
107*4882a593Smuzhiyun 	{ IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ERROR, 0, 1, "missing parameter" },
108*4882a593Smuzhiyun 	{ IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" },
109*4882a593Smuzhiyun 	{ IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ERROR, 0, 1, "transaction cancelled" },
110*4882a593Smuzhiyun 	{ IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ERROR, 0, 1, "transaction cancelled implicit" },
111*4882a593Smuzhiyun 	{ IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
112*4882a593Smuzhiyun 	{ IBMVFC_VIOS_FAILURE, IBMVFC_PLOGI_REQUIRED, DID_ERROR, 0, 1, "port login required" },
113*4882a593Smuzhiyun 	{ IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	{ IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" },
116*4882a593Smuzhiyun 	{ IBMVFC_FC_FAILURE, IBMVFC_INVALID_VERSION, DID_ERROR, 0, 1, "invalid version level" },
117*4882a593Smuzhiyun 	{ IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_ERROR, DID_ERROR, 1, 1, "logical error" },
118*4882a593Smuzhiyun 	{ IBMVFC_FC_FAILURE, IBMVFC_INVALID_CT_IU_SIZE, DID_ERROR, 0, 1, "invalid CT_IU size" },
119*4882a593Smuzhiyun 	{ IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_BUSY, DID_REQUEUE, 1, 0, "logical busy" },
120*4882a593Smuzhiyun 	{ IBMVFC_FC_FAILURE, IBMVFC_PROTOCOL_ERROR, DID_ERROR, 1, 1, "protocol error" },
121*4882a593Smuzhiyun 	{ IBMVFC_FC_FAILURE, IBMVFC_UNABLE_TO_PERFORM_REQ, DID_ERROR, 1, 1, "unable to perform request" },
122*4882a593Smuzhiyun 	{ IBMVFC_FC_FAILURE, IBMVFC_CMD_NOT_SUPPORTED, DID_ERROR, 0, 0, "command not supported" },
123*4882a593Smuzhiyun 	{ IBMVFC_FC_FAILURE, IBMVFC_SERVER_NOT_AVAIL, DID_ERROR, 0, 1, "server not available" },
124*4882a593Smuzhiyun 	{ IBMVFC_FC_FAILURE, IBMVFC_CMD_IN_PROGRESS, DID_ERROR, 0, 1, "command already in progress" },
125*4882a593Smuzhiyun 	{ IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	{ IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
128*4882a593Smuzhiyun 	{ IBMVFC_FC_SCSI_ERROR, IBMVFC_COMMAND_FAILED, DID_ERROR, 0, 1, "PRLI to device failed." },
129*4882a593Smuzhiyun };
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun static void ibmvfc_npiv_login(struct ibmvfc_host *);
132*4882a593Smuzhiyun static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
133*4882a593Smuzhiyun static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
134*4882a593Smuzhiyun static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
135*4882a593Smuzhiyun static void ibmvfc_npiv_logout(struct ibmvfc_host *);
136*4882a593Smuzhiyun static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *);
137*4882a593Smuzhiyun static void ibmvfc_tgt_move_login(struct ibmvfc_target *);
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun static const char *unknown_error = "unknown error";
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun #ifdef CONFIG_SCSI_IBMVFC_TRACE
142*4882a593Smuzhiyun /**
143*4882a593Smuzhiyun  * ibmvfc_trc_start - Log a start trace entry
144*4882a593Smuzhiyun  * @evt:		ibmvfc event struct
145*4882a593Smuzhiyun  *
146*4882a593Smuzhiyun  **/
ibmvfc_trc_start(struct ibmvfc_event * evt)147*4882a593Smuzhiyun static void ibmvfc_trc_start(struct ibmvfc_event *evt)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = evt->vhost;
150*4882a593Smuzhiyun 	struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
151*4882a593Smuzhiyun 	struct ibmvfc_mad_common *mad = &evt->iu.mad_common;
152*4882a593Smuzhiyun 	struct ibmvfc_trace_entry *entry;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	entry = &vhost->trace[vhost->trace_index++];
155*4882a593Smuzhiyun 	entry->evt = evt;
156*4882a593Smuzhiyun 	entry->time = jiffies;
157*4882a593Smuzhiyun 	entry->fmt = evt->crq.format;
158*4882a593Smuzhiyun 	entry->type = IBMVFC_TRC_START;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	switch (entry->fmt) {
161*4882a593Smuzhiyun 	case IBMVFC_CMD_FORMAT:
162*4882a593Smuzhiyun 		entry->op_code = vfc_cmd->iu.cdb[0];
163*4882a593Smuzhiyun 		entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
164*4882a593Smuzhiyun 		entry->lun = scsilun_to_int(&vfc_cmd->iu.lun);
165*4882a593Smuzhiyun 		entry->tmf_flags = vfc_cmd->iu.tmf_flags;
166*4882a593Smuzhiyun 		entry->u.start.xfer_len = be32_to_cpu(vfc_cmd->iu.xfer_len);
167*4882a593Smuzhiyun 		break;
168*4882a593Smuzhiyun 	case IBMVFC_MAD_FORMAT:
169*4882a593Smuzhiyun 		entry->op_code = be32_to_cpu(mad->opcode);
170*4882a593Smuzhiyun 		break;
171*4882a593Smuzhiyun 	default:
172*4882a593Smuzhiyun 		break;
173*4882a593Smuzhiyun 	}
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun /**
177*4882a593Smuzhiyun  * ibmvfc_trc_end - Log an end trace entry
178*4882a593Smuzhiyun  * @evt:		ibmvfc event struct
179*4882a593Smuzhiyun  *
180*4882a593Smuzhiyun  **/
ibmvfc_trc_end(struct ibmvfc_event * evt)181*4882a593Smuzhiyun static void ibmvfc_trc_end(struct ibmvfc_event *evt)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = evt->vhost;
184*4882a593Smuzhiyun 	struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
185*4882a593Smuzhiyun 	struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common;
186*4882a593Smuzhiyun 	struct ibmvfc_trace_entry *entry = &vhost->trace[vhost->trace_index++];
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	entry->evt = evt;
189*4882a593Smuzhiyun 	entry->time = jiffies;
190*4882a593Smuzhiyun 	entry->fmt = evt->crq.format;
191*4882a593Smuzhiyun 	entry->type = IBMVFC_TRC_END;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	switch (entry->fmt) {
194*4882a593Smuzhiyun 	case IBMVFC_CMD_FORMAT:
195*4882a593Smuzhiyun 		entry->op_code = vfc_cmd->iu.cdb[0];
196*4882a593Smuzhiyun 		entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
197*4882a593Smuzhiyun 		entry->lun = scsilun_to_int(&vfc_cmd->iu.lun);
198*4882a593Smuzhiyun 		entry->tmf_flags = vfc_cmd->iu.tmf_flags;
199*4882a593Smuzhiyun 		entry->u.end.status = be16_to_cpu(vfc_cmd->status);
200*4882a593Smuzhiyun 		entry->u.end.error = be16_to_cpu(vfc_cmd->error);
201*4882a593Smuzhiyun 		entry->u.end.fcp_rsp_flags = vfc_cmd->rsp.flags;
202*4882a593Smuzhiyun 		entry->u.end.rsp_code = vfc_cmd->rsp.data.info.rsp_code;
203*4882a593Smuzhiyun 		entry->u.end.scsi_status = vfc_cmd->rsp.scsi_status;
204*4882a593Smuzhiyun 		break;
205*4882a593Smuzhiyun 	case IBMVFC_MAD_FORMAT:
206*4882a593Smuzhiyun 		entry->op_code = be32_to_cpu(mad->opcode);
207*4882a593Smuzhiyun 		entry->u.end.status = be16_to_cpu(mad->status);
208*4882a593Smuzhiyun 		break;
209*4882a593Smuzhiyun 	default:
210*4882a593Smuzhiyun 		break;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	}
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun #else
216*4882a593Smuzhiyun #define ibmvfc_trc_start(evt) do { } while (0)
217*4882a593Smuzhiyun #define ibmvfc_trc_end(evt) do { } while (0)
218*4882a593Smuzhiyun #endif
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun /**
221*4882a593Smuzhiyun  * ibmvfc_get_err_index - Find the index into cmd_status for the fcp response
222*4882a593Smuzhiyun  * @status:		status / error class
223*4882a593Smuzhiyun  * @error:		error
224*4882a593Smuzhiyun  *
225*4882a593Smuzhiyun  * Return value:
226*4882a593Smuzhiyun  *	index into cmd_status / -EINVAL on failure
227*4882a593Smuzhiyun  **/
ibmvfc_get_err_index(u16 status,u16 error)228*4882a593Smuzhiyun static int ibmvfc_get_err_index(u16 status, u16 error)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun 	int i;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(cmd_status); i++)
233*4882a593Smuzhiyun 		if ((cmd_status[i].status & status) == cmd_status[i].status &&
234*4882a593Smuzhiyun 		    cmd_status[i].error == error)
235*4882a593Smuzhiyun 			return i;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	return -EINVAL;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun /**
241*4882a593Smuzhiyun  * ibmvfc_get_cmd_error - Find the error description for the fcp response
242*4882a593Smuzhiyun  * @status:		status / error class
243*4882a593Smuzhiyun  * @error:		error
244*4882a593Smuzhiyun  *
245*4882a593Smuzhiyun  * Return value:
246*4882a593Smuzhiyun  *	error description string
247*4882a593Smuzhiyun  **/
ibmvfc_get_cmd_error(u16 status,u16 error)248*4882a593Smuzhiyun static const char *ibmvfc_get_cmd_error(u16 status, u16 error)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun 	int rc = ibmvfc_get_err_index(status, error);
251*4882a593Smuzhiyun 	if (rc >= 0)
252*4882a593Smuzhiyun 		return cmd_status[rc].name;
253*4882a593Smuzhiyun 	return unknown_error;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun /**
257*4882a593Smuzhiyun  * ibmvfc_get_err_result - Find the scsi status to return for the fcp response
258*4882a593Smuzhiyun  * @vfc_cmd:	ibmvfc command struct
259*4882a593Smuzhiyun  *
260*4882a593Smuzhiyun  * Return value:
261*4882a593Smuzhiyun  *	SCSI result value to return for completed command
262*4882a593Smuzhiyun  **/
ibmvfc_get_err_result(struct ibmvfc_cmd * vfc_cmd)263*4882a593Smuzhiyun static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun 	int err;
266*4882a593Smuzhiyun 	struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
267*4882a593Smuzhiyun 	int fc_rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	if ((rsp->flags & FCP_RSP_LEN_VALID) &&
270*4882a593Smuzhiyun 	    ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
271*4882a593Smuzhiyun 	     rsp->data.info.rsp_code))
272*4882a593Smuzhiyun 		return DID_ERROR << 16;
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	err = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
275*4882a593Smuzhiyun 	if (err >= 0)
276*4882a593Smuzhiyun 		return rsp->scsi_status | (cmd_status[err].result << 16);
277*4882a593Smuzhiyun 	return rsp->scsi_status | (DID_ERROR << 16);
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun /**
281*4882a593Smuzhiyun  * ibmvfc_retry_cmd - Determine if error status is retryable
282*4882a593Smuzhiyun  * @status:		status / error class
283*4882a593Smuzhiyun  * @error:		error
284*4882a593Smuzhiyun  *
285*4882a593Smuzhiyun  * Return value:
286*4882a593Smuzhiyun  *	1 if error should be retried / 0 if it should not
287*4882a593Smuzhiyun  **/
ibmvfc_retry_cmd(u16 status,u16 error)288*4882a593Smuzhiyun static int ibmvfc_retry_cmd(u16 status, u16 error)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun 	int rc = ibmvfc_get_err_index(status, error);
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	if (rc >= 0)
293*4882a593Smuzhiyun 		return cmd_status[rc].retry;
294*4882a593Smuzhiyun 	return 1;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun static const char *unknown_fc_explain = "unknown fc explain";
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun static const struct {
300*4882a593Smuzhiyun 	u16 fc_explain;
301*4882a593Smuzhiyun 	char *name;
302*4882a593Smuzhiyun } ls_explain [] = {
303*4882a593Smuzhiyun 	{ 0x00, "no additional explanation" },
304*4882a593Smuzhiyun 	{ 0x01, "service parameter error - options" },
305*4882a593Smuzhiyun 	{ 0x03, "service parameter error - initiator control" },
306*4882a593Smuzhiyun 	{ 0x05, "service parameter error - recipient control" },
307*4882a593Smuzhiyun 	{ 0x07, "service parameter error - received data field size" },
308*4882a593Smuzhiyun 	{ 0x09, "service parameter error - concurrent seq" },
309*4882a593Smuzhiyun 	{ 0x0B, "service parameter error - credit" },
310*4882a593Smuzhiyun 	{ 0x0D, "invalid N_Port/F_Port_Name" },
311*4882a593Smuzhiyun 	{ 0x0E, "invalid node/Fabric Name" },
312*4882a593Smuzhiyun 	{ 0x0F, "invalid common service parameters" },
313*4882a593Smuzhiyun 	{ 0x11, "invalid association header" },
314*4882a593Smuzhiyun 	{ 0x13, "association header required" },
315*4882a593Smuzhiyun 	{ 0x15, "invalid originator S_ID" },
316*4882a593Smuzhiyun 	{ 0x17, "invalid OX_ID-RX-ID combination" },
317*4882a593Smuzhiyun 	{ 0x19, "command (request) already in progress" },
318*4882a593Smuzhiyun 	{ 0x1E, "N_Port Login requested" },
319*4882a593Smuzhiyun 	{ 0x1F, "Invalid N_Port_ID" },
320*4882a593Smuzhiyun };
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun static const struct {
323*4882a593Smuzhiyun 	u16 fc_explain;
324*4882a593Smuzhiyun 	char *name;
325*4882a593Smuzhiyun } gs_explain [] = {
326*4882a593Smuzhiyun 	{ 0x00, "no additional explanation" },
327*4882a593Smuzhiyun 	{ 0x01, "port identifier not registered" },
328*4882a593Smuzhiyun 	{ 0x02, "port name not registered" },
329*4882a593Smuzhiyun 	{ 0x03, "node name not registered" },
330*4882a593Smuzhiyun 	{ 0x04, "class of service not registered" },
331*4882a593Smuzhiyun 	{ 0x06, "initial process associator not registered" },
332*4882a593Smuzhiyun 	{ 0x07, "FC-4 TYPEs not registered" },
333*4882a593Smuzhiyun 	{ 0x08, "symbolic port name not registered" },
334*4882a593Smuzhiyun 	{ 0x09, "symbolic node name not registered" },
335*4882a593Smuzhiyun 	{ 0x0A, "port type not registered" },
336*4882a593Smuzhiyun 	{ 0xF0, "authorization exception" },
337*4882a593Smuzhiyun 	{ 0xF1, "authentication exception" },
338*4882a593Smuzhiyun 	{ 0xF2, "data base full" },
339*4882a593Smuzhiyun 	{ 0xF3, "data base empty" },
340*4882a593Smuzhiyun 	{ 0xF4, "processing request" },
341*4882a593Smuzhiyun 	{ 0xF5, "unable to verify connection" },
342*4882a593Smuzhiyun 	{ 0xF6, "devices not in a common zone" },
343*4882a593Smuzhiyun };
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun /**
346*4882a593Smuzhiyun  * ibmvfc_get_ls_explain - Return the FC Explain description text
347*4882a593Smuzhiyun  * @status:	FC Explain status
348*4882a593Smuzhiyun  *
349*4882a593Smuzhiyun  * Returns:
350*4882a593Smuzhiyun  *	error string
351*4882a593Smuzhiyun  **/
ibmvfc_get_ls_explain(u16 status)352*4882a593Smuzhiyun static const char *ibmvfc_get_ls_explain(u16 status)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun 	int i;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(ls_explain); i++)
357*4882a593Smuzhiyun 		if (ls_explain[i].fc_explain == status)
358*4882a593Smuzhiyun 			return ls_explain[i].name;
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	return unknown_fc_explain;
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun /**
364*4882a593Smuzhiyun  * ibmvfc_get_gs_explain - Return the FC Explain description text
365*4882a593Smuzhiyun  * @status:	FC Explain status
366*4882a593Smuzhiyun  *
367*4882a593Smuzhiyun  * Returns:
368*4882a593Smuzhiyun  *	error string
369*4882a593Smuzhiyun  **/
ibmvfc_get_gs_explain(u16 status)370*4882a593Smuzhiyun static const char *ibmvfc_get_gs_explain(u16 status)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun 	int i;
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(gs_explain); i++)
375*4882a593Smuzhiyun 		if (gs_explain[i].fc_explain == status)
376*4882a593Smuzhiyun 			return gs_explain[i].name;
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	return unknown_fc_explain;
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun static const struct {
382*4882a593Smuzhiyun 	enum ibmvfc_fc_type fc_type;
383*4882a593Smuzhiyun 	char *name;
384*4882a593Smuzhiyun } fc_type [] = {
385*4882a593Smuzhiyun 	{ IBMVFC_FABRIC_REJECT, "fabric reject" },
386*4882a593Smuzhiyun 	{ IBMVFC_PORT_REJECT, "port reject" },
387*4882a593Smuzhiyun 	{ IBMVFC_LS_REJECT, "ELS reject" },
388*4882a593Smuzhiyun 	{ IBMVFC_FABRIC_BUSY, "fabric busy" },
389*4882a593Smuzhiyun 	{ IBMVFC_PORT_BUSY, "port busy" },
390*4882a593Smuzhiyun 	{ IBMVFC_BASIC_REJECT, "basic reject" },
391*4882a593Smuzhiyun };
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun static const char *unknown_fc_type = "unknown fc type";
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun /**
396*4882a593Smuzhiyun  * ibmvfc_get_fc_type - Return the FC Type description text
397*4882a593Smuzhiyun  * @status:	FC Type error status
398*4882a593Smuzhiyun  *
399*4882a593Smuzhiyun  * Returns:
400*4882a593Smuzhiyun  *	error string
401*4882a593Smuzhiyun  **/
ibmvfc_get_fc_type(u16 status)402*4882a593Smuzhiyun static const char *ibmvfc_get_fc_type(u16 status)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun 	int i;
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(fc_type); i++)
407*4882a593Smuzhiyun 		if (fc_type[i].fc_type == status)
408*4882a593Smuzhiyun 			return fc_type[i].name;
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	return unknown_fc_type;
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun /**
414*4882a593Smuzhiyun  * ibmvfc_set_tgt_action - Set the next init action for the target
415*4882a593Smuzhiyun  * @tgt:		ibmvfc target struct
416*4882a593Smuzhiyun  * @action:		action to perform
417*4882a593Smuzhiyun  *
418*4882a593Smuzhiyun  * Returns:
419*4882a593Smuzhiyun  *	0 if action changed / non-zero if not changed
420*4882a593Smuzhiyun  **/
ibmvfc_set_tgt_action(struct ibmvfc_target * tgt,enum ibmvfc_target_action action)421*4882a593Smuzhiyun static int ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
422*4882a593Smuzhiyun 				  enum ibmvfc_target_action action)
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun 	int rc = -EINVAL;
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	switch (tgt->action) {
427*4882a593Smuzhiyun 	case IBMVFC_TGT_ACTION_LOGOUT_RPORT:
428*4882a593Smuzhiyun 		if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT ||
429*4882a593Smuzhiyun 		    action == IBMVFC_TGT_ACTION_DEL_RPORT) {
430*4882a593Smuzhiyun 			tgt->action = action;
431*4882a593Smuzhiyun 			rc = 0;
432*4882a593Smuzhiyun 		}
433*4882a593Smuzhiyun 		break;
434*4882a593Smuzhiyun 	case IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT:
435*4882a593Smuzhiyun 		if (action == IBMVFC_TGT_ACTION_DEL_RPORT ||
436*4882a593Smuzhiyun 		    action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
437*4882a593Smuzhiyun 			tgt->action = action;
438*4882a593Smuzhiyun 			rc = 0;
439*4882a593Smuzhiyun 		}
440*4882a593Smuzhiyun 		break;
441*4882a593Smuzhiyun 	case IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT:
442*4882a593Smuzhiyun 		if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
443*4882a593Smuzhiyun 			tgt->action = action;
444*4882a593Smuzhiyun 			rc = 0;
445*4882a593Smuzhiyun 		}
446*4882a593Smuzhiyun 		break;
447*4882a593Smuzhiyun 	case IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT:
448*4882a593Smuzhiyun 		if (action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
449*4882a593Smuzhiyun 			tgt->action = action;
450*4882a593Smuzhiyun 			rc = 0;
451*4882a593Smuzhiyun 		}
452*4882a593Smuzhiyun 		break;
453*4882a593Smuzhiyun 	case IBMVFC_TGT_ACTION_DEL_RPORT:
454*4882a593Smuzhiyun 		if (action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
455*4882a593Smuzhiyun 			tgt->action = action;
456*4882a593Smuzhiyun 			rc = 0;
457*4882a593Smuzhiyun 		}
458*4882a593Smuzhiyun 		break;
459*4882a593Smuzhiyun 	case IBMVFC_TGT_ACTION_DELETED_RPORT:
460*4882a593Smuzhiyun 		break;
461*4882a593Smuzhiyun 	default:
462*4882a593Smuzhiyun 		tgt->action = action;
463*4882a593Smuzhiyun 		rc = 0;
464*4882a593Smuzhiyun 		break;
465*4882a593Smuzhiyun 	}
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	if (action >= IBMVFC_TGT_ACTION_LOGOUT_RPORT)
468*4882a593Smuzhiyun 		tgt->add_rport = 0;
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 	return rc;
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun /**
474*4882a593Smuzhiyun  * ibmvfc_set_host_state - Set the state for the host
475*4882a593Smuzhiyun  * @vhost:		ibmvfc host struct
476*4882a593Smuzhiyun  * @state:		state to set host to
477*4882a593Smuzhiyun  *
478*4882a593Smuzhiyun  * Returns:
479*4882a593Smuzhiyun  *	0 if state changed / non-zero if not changed
480*4882a593Smuzhiyun  **/
ibmvfc_set_host_state(struct ibmvfc_host * vhost,enum ibmvfc_host_state state)481*4882a593Smuzhiyun static int ibmvfc_set_host_state(struct ibmvfc_host *vhost,
482*4882a593Smuzhiyun 				  enum ibmvfc_host_state state)
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun 	int rc = 0;
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	switch (vhost->state) {
487*4882a593Smuzhiyun 	case IBMVFC_HOST_OFFLINE:
488*4882a593Smuzhiyun 		rc = -EINVAL;
489*4882a593Smuzhiyun 		break;
490*4882a593Smuzhiyun 	default:
491*4882a593Smuzhiyun 		vhost->state = state;
492*4882a593Smuzhiyun 		break;
493*4882a593Smuzhiyun 	}
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	return rc;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun /**
499*4882a593Smuzhiyun  * ibmvfc_set_host_action - Set the next init action for the host
500*4882a593Smuzhiyun  * @vhost:		ibmvfc host struct
501*4882a593Smuzhiyun  * @action:		action to perform
502*4882a593Smuzhiyun  *
503*4882a593Smuzhiyun  **/
ibmvfc_set_host_action(struct ibmvfc_host * vhost,enum ibmvfc_host_action action)504*4882a593Smuzhiyun static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
505*4882a593Smuzhiyun 				   enum ibmvfc_host_action action)
506*4882a593Smuzhiyun {
507*4882a593Smuzhiyun 	switch (action) {
508*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_ALLOC_TGTS:
509*4882a593Smuzhiyun 		if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
510*4882a593Smuzhiyun 			vhost->action = action;
511*4882a593Smuzhiyun 		break;
512*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_LOGO_WAIT:
513*4882a593Smuzhiyun 		if (vhost->action == IBMVFC_HOST_ACTION_LOGO)
514*4882a593Smuzhiyun 			vhost->action = action;
515*4882a593Smuzhiyun 		break;
516*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_INIT_WAIT:
517*4882a593Smuzhiyun 		if (vhost->action == IBMVFC_HOST_ACTION_INIT)
518*4882a593Smuzhiyun 			vhost->action = action;
519*4882a593Smuzhiyun 		break;
520*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_QUERY:
521*4882a593Smuzhiyun 		switch (vhost->action) {
522*4882a593Smuzhiyun 		case IBMVFC_HOST_ACTION_INIT_WAIT:
523*4882a593Smuzhiyun 		case IBMVFC_HOST_ACTION_NONE:
524*4882a593Smuzhiyun 		case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
525*4882a593Smuzhiyun 			vhost->action = action;
526*4882a593Smuzhiyun 			break;
527*4882a593Smuzhiyun 		default:
528*4882a593Smuzhiyun 			break;
529*4882a593Smuzhiyun 		}
530*4882a593Smuzhiyun 		break;
531*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_TGT_INIT:
532*4882a593Smuzhiyun 		if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
533*4882a593Smuzhiyun 			vhost->action = action;
534*4882a593Smuzhiyun 		break;
535*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_REENABLE:
536*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_RESET:
537*4882a593Smuzhiyun 		vhost->action = action;
538*4882a593Smuzhiyun 		break;
539*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_INIT:
540*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_TGT_DEL:
541*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_LOGO:
542*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_QUERY_TGTS:
543*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
544*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_NONE:
545*4882a593Smuzhiyun 	default:
546*4882a593Smuzhiyun 		switch (vhost->action) {
547*4882a593Smuzhiyun 		case IBMVFC_HOST_ACTION_RESET:
548*4882a593Smuzhiyun 		case IBMVFC_HOST_ACTION_REENABLE:
549*4882a593Smuzhiyun 			break;
550*4882a593Smuzhiyun 		default:
551*4882a593Smuzhiyun 			vhost->action = action;
552*4882a593Smuzhiyun 			break;
553*4882a593Smuzhiyun 		}
554*4882a593Smuzhiyun 		break;
555*4882a593Smuzhiyun 	}
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun /**
559*4882a593Smuzhiyun  * ibmvfc_reinit_host - Re-start host initialization (no NPIV Login)
560*4882a593Smuzhiyun  * @vhost:		ibmvfc host struct
561*4882a593Smuzhiyun  *
562*4882a593Smuzhiyun  * Return value:
563*4882a593Smuzhiyun  *	nothing
564*4882a593Smuzhiyun  **/
ibmvfc_reinit_host(struct ibmvfc_host * vhost)565*4882a593Smuzhiyun static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
566*4882a593Smuzhiyun {
567*4882a593Smuzhiyun 	if (vhost->action == IBMVFC_HOST_ACTION_NONE &&
568*4882a593Smuzhiyun 	    vhost->state == IBMVFC_ACTIVE) {
569*4882a593Smuzhiyun 		if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
570*4882a593Smuzhiyun 			scsi_block_requests(vhost->host);
571*4882a593Smuzhiyun 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
572*4882a593Smuzhiyun 		}
573*4882a593Smuzhiyun 	} else
574*4882a593Smuzhiyun 		vhost->reinit = 1;
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	wake_up(&vhost->work_wait_q);
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun /**
580*4882a593Smuzhiyun  * ibmvfc_del_tgt - Schedule cleanup and removal of the target
581*4882a593Smuzhiyun  * @tgt:		ibmvfc target struct
582*4882a593Smuzhiyun  * @job_step:	job step to perform
583*4882a593Smuzhiyun  *
584*4882a593Smuzhiyun  **/
ibmvfc_del_tgt(struct ibmvfc_target * tgt)585*4882a593Smuzhiyun static void ibmvfc_del_tgt(struct ibmvfc_target *tgt)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun 	if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT))
588*4882a593Smuzhiyun 		tgt->job_step = ibmvfc_tgt_implicit_logout_and_del;
589*4882a593Smuzhiyun 	wake_up(&tgt->vhost->work_wait_q);
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun /**
593*4882a593Smuzhiyun  * ibmvfc_link_down - Handle a link down event from the adapter
594*4882a593Smuzhiyun  * @vhost:	ibmvfc host struct
595*4882a593Smuzhiyun  * @state:	ibmvfc host state to enter
596*4882a593Smuzhiyun  *
597*4882a593Smuzhiyun  **/
ibmvfc_link_down(struct ibmvfc_host * vhost,enum ibmvfc_host_state state)598*4882a593Smuzhiyun static void ibmvfc_link_down(struct ibmvfc_host *vhost,
599*4882a593Smuzhiyun 			     enum ibmvfc_host_state state)
600*4882a593Smuzhiyun {
601*4882a593Smuzhiyun 	struct ibmvfc_target *tgt;
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 	ENTER;
604*4882a593Smuzhiyun 	scsi_block_requests(vhost->host);
605*4882a593Smuzhiyun 	list_for_each_entry(tgt, &vhost->targets, queue)
606*4882a593Smuzhiyun 		ibmvfc_del_tgt(tgt);
607*4882a593Smuzhiyun 	ibmvfc_set_host_state(vhost, state);
608*4882a593Smuzhiyun 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
609*4882a593Smuzhiyun 	vhost->events_to_log |= IBMVFC_AE_LINKDOWN;
610*4882a593Smuzhiyun 	wake_up(&vhost->work_wait_q);
611*4882a593Smuzhiyun 	LEAVE;
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun /**
615*4882a593Smuzhiyun  * ibmvfc_init_host - Start host initialization
616*4882a593Smuzhiyun  * @vhost:		ibmvfc host struct
617*4882a593Smuzhiyun  *
618*4882a593Smuzhiyun  * Return value:
619*4882a593Smuzhiyun  *	nothing
620*4882a593Smuzhiyun  **/
ibmvfc_init_host(struct ibmvfc_host * vhost)621*4882a593Smuzhiyun static void ibmvfc_init_host(struct ibmvfc_host *vhost)
622*4882a593Smuzhiyun {
623*4882a593Smuzhiyun 	struct ibmvfc_target *tgt;
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
626*4882a593Smuzhiyun 		if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
627*4882a593Smuzhiyun 			dev_err(vhost->dev,
628*4882a593Smuzhiyun 				"Host initialization retries exceeded. Taking adapter offline\n");
629*4882a593Smuzhiyun 			ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
630*4882a593Smuzhiyun 			return;
631*4882a593Smuzhiyun 		}
632*4882a593Smuzhiyun 	}
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
635*4882a593Smuzhiyun 		memset(vhost->async_crq.msgs, 0, PAGE_SIZE);
636*4882a593Smuzhiyun 		vhost->async_crq.cur = 0;
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 		list_for_each_entry(tgt, &vhost->targets, queue) {
639*4882a593Smuzhiyun 			if (vhost->client_migrated)
640*4882a593Smuzhiyun 				tgt->need_login = 1;
641*4882a593Smuzhiyun 			else
642*4882a593Smuzhiyun 				ibmvfc_del_tgt(tgt);
643*4882a593Smuzhiyun 		}
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 		scsi_block_requests(vhost->host);
646*4882a593Smuzhiyun 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
647*4882a593Smuzhiyun 		vhost->job_step = ibmvfc_npiv_login;
648*4882a593Smuzhiyun 		wake_up(&vhost->work_wait_q);
649*4882a593Smuzhiyun 	}
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun /**
653*4882a593Smuzhiyun  * ibmvfc_send_crq - Send a CRQ
654*4882a593Smuzhiyun  * @vhost:	ibmvfc host struct
655*4882a593Smuzhiyun  * @word1:	the first 64 bits of the data
656*4882a593Smuzhiyun  * @word2:	the second 64 bits of the data
657*4882a593Smuzhiyun  *
658*4882a593Smuzhiyun  * Return value:
659*4882a593Smuzhiyun  *	0 on success / other on failure
660*4882a593Smuzhiyun  **/
ibmvfc_send_crq(struct ibmvfc_host * vhost,u64 word1,u64 word2)661*4882a593Smuzhiyun static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2)
662*4882a593Smuzhiyun {
663*4882a593Smuzhiyun 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
664*4882a593Smuzhiyun 	return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun /**
668*4882a593Smuzhiyun  * ibmvfc_send_crq_init - Send a CRQ init message
669*4882a593Smuzhiyun  * @vhost:	ibmvfc host struct
670*4882a593Smuzhiyun  *
671*4882a593Smuzhiyun  * Return value:
672*4882a593Smuzhiyun  *	0 on success / other on failure
673*4882a593Smuzhiyun  **/
ibmvfc_send_crq_init(struct ibmvfc_host * vhost)674*4882a593Smuzhiyun static int ibmvfc_send_crq_init(struct ibmvfc_host *vhost)
675*4882a593Smuzhiyun {
676*4882a593Smuzhiyun 	ibmvfc_dbg(vhost, "Sending CRQ init\n");
677*4882a593Smuzhiyun 	return ibmvfc_send_crq(vhost, 0xC001000000000000LL, 0);
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun /**
681*4882a593Smuzhiyun  * ibmvfc_send_crq_init_complete - Send a CRQ init complete message
682*4882a593Smuzhiyun  * @vhost:	ibmvfc host struct
683*4882a593Smuzhiyun  *
684*4882a593Smuzhiyun  * Return value:
685*4882a593Smuzhiyun  *	0 on success / other on failure
686*4882a593Smuzhiyun  **/
ibmvfc_send_crq_init_complete(struct ibmvfc_host * vhost)687*4882a593Smuzhiyun static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
688*4882a593Smuzhiyun {
689*4882a593Smuzhiyun 	ibmvfc_dbg(vhost, "Sending CRQ init complete\n");
690*4882a593Smuzhiyun 	return ibmvfc_send_crq(vhost, 0xC002000000000000LL, 0);
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun /**
694*4882a593Smuzhiyun  * ibmvfc_release_crq_queue - Deallocates data and unregisters CRQ
695*4882a593Smuzhiyun  * @vhost:	ibmvfc host struct
696*4882a593Smuzhiyun  *
697*4882a593Smuzhiyun  * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
698*4882a593Smuzhiyun  * the crq with the hypervisor.
699*4882a593Smuzhiyun  **/
ibmvfc_release_crq_queue(struct ibmvfc_host * vhost)700*4882a593Smuzhiyun static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
701*4882a593Smuzhiyun {
702*4882a593Smuzhiyun 	long rc = 0;
703*4882a593Smuzhiyun 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
704*4882a593Smuzhiyun 	struct ibmvfc_crq_queue *crq = &vhost->crq;
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	ibmvfc_dbg(vhost, "Releasing CRQ\n");
707*4882a593Smuzhiyun 	free_irq(vdev->irq, vhost);
708*4882a593Smuzhiyun 	tasklet_kill(&vhost->tasklet);
709*4882a593Smuzhiyun 	do {
710*4882a593Smuzhiyun 		if (rc)
711*4882a593Smuzhiyun 			msleep(100);
712*4882a593Smuzhiyun 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
713*4882a593Smuzhiyun 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 	vhost->state = IBMVFC_NO_CRQ;
716*4882a593Smuzhiyun 	vhost->logged_in = 0;
717*4882a593Smuzhiyun 	dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
718*4882a593Smuzhiyun 	free_page((unsigned long)crq->msgs);
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun /**
722*4882a593Smuzhiyun  * ibmvfc_reenable_crq_queue - reenables the CRQ
723*4882a593Smuzhiyun  * @vhost:	ibmvfc host struct
724*4882a593Smuzhiyun  *
725*4882a593Smuzhiyun  * Return value:
726*4882a593Smuzhiyun  *	0 on success / other on failure
727*4882a593Smuzhiyun  **/
ibmvfc_reenable_crq_queue(struct ibmvfc_host * vhost)728*4882a593Smuzhiyun static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
729*4882a593Smuzhiyun {
730*4882a593Smuzhiyun 	int rc = 0;
731*4882a593Smuzhiyun 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 	/* Re-enable the CRQ */
734*4882a593Smuzhiyun 	do {
735*4882a593Smuzhiyun 		if (rc)
736*4882a593Smuzhiyun 			msleep(100);
737*4882a593Smuzhiyun 		rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
738*4882a593Smuzhiyun 	} while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 	if (rc)
741*4882a593Smuzhiyun 		dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc);
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 	return rc;
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun /**
747*4882a593Smuzhiyun  * ibmvfc_reset_crq - resets a crq after a failure
748*4882a593Smuzhiyun  * @vhost:	ibmvfc host struct
749*4882a593Smuzhiyun  *
750*4882a593Smuzhiyun  * Return value:
751*4882a593Smuzhiyun  *	0 on success / other on failure
752*4882a593Smuzhiyun  **/
ibmvfc_reset_crq(struct ibmvfc_host * vhost)753*4882a593Smuzhiyun static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
754*4882a593Smuzhiyun {
755*4882a593Smuzhiyun 	int rc = 0;
756*4882a593Smuzhiyun 	unsigned long flags;
757*4882a593Smuzhiyun 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
758*4882a593Smuzhiyun 	struct ibmvfc_crq_queue *crq = &vhost->crq;
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 	/* Close the CRQ */
761*4882a593Smuzhiyun 	do {
762*4882a593Smuzhiyun 		if (rc)
763*4882a593Smuzhiyun 			msleep(100);
764*4882a593Smuzhiyun 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
765*4882a593Smuzhiyun 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	spin_lock_irqsave(vhost->host->host_lock, flags);
768*4882a593Smuzhiyun 	vhost->state = IBMVFC_NO_CRQ;
769*4882a593Smuzhiyun 	vhost->logged_in = 0;
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	/* Clean out the queue */
772*4882a593Smuzhiyun 	memset(crq->msgs, 0, PAGE_SIZE);
773*4882a593Smuzhiyun 	crq->cur = 0;
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 	/* And re-open it again */
776*4882a593Smuzhiyun 	rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
777*4882a593Smuzhiyun 				crq->msg_token, PAGE_SIZE);
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 	if (rc == H_CLOSED)
780*4882a593Smuzhiyun 		/* Adapter is good, but other end is not ready */
781*4882a593Smuzhiyun 		dev_warn(vhost->dev, "Partner adapter not ready\n");
782*4882a593Smuzhiyun 	else if (rc != 0)
783*4882a593Smuzhiyun 		dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc);
784*4882a593Smuzhiyun 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 	return rc;
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun /**
790*4882a593Smuzhiyun  * ibmvfc_valid_event - Determines if event is valid.
791*4882a593Smuzhiyun  * @pool:	event_pool that contains the event
792*4882a593Smuzhiyun  * @evt:	ibmvfc event to be checked for validity
793*4882a593Smuzhiyun  *
794*4882a593Smuzhiyun  * Return value:
795*4882a593Smuzhiyun  *	1 if event is valid / 0 if event is not valid
796*4882a593Smuzhiyun  **/
ibmvfc_valid_event(struct ibmvfc_event_pool * pool,struct ibmvfc_event * evt)797*4882a593Smuzhiyun static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool,
798*4882a593Smuzhiyun 			      struct ibmvfc_event *evt)
799*4882a593Smuzhiyun {
800*4882a593Smuzhiyun 	int index = evt - pool->events;
801*4882a593Smuzhiyun 	if (index < 0 || index >= pool->size)	/* outside of bounds */
802*4882a593Smuzhiyun 		return 0;
803*4882a593Smuzhiyun 	if (evt != pool->events + index)	/* unaligned */
804*4882a593Smuzhiyun 		return 0;
805*4882a593Smuzhiyun 	return 1;
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun /**
809*4882a593Smuzhiyun  * ibmvfc_free_event - Free the specified event
810*4882a593Smuzhiyun  * @evt:	ibmvfc_event to be freed
811*4882a593Smuzhiyun  *
812*4882a593Smuzhiyun  **/
ibmvfc_free_event(struct ibmvfc_event * evt)813*4882a593Smuzhiyun static void ibmvfc_free_event(struct ibmvfc_event *evt)
814*4882a593Smuzhiyun {
815*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = evt->vhost;
816*4882a593Smuzhiyun 	struct ibmvfc_event_pool *pool = &vhost->pool;
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 	BUG_ON(!ibmvfc_valid_event(pool, evt));
819*4882a593Smuzhiyun 	BUG_ON(atomic_inc_return(&evt->free) != 1);
820*4882a593Smuzhiyun 	list_add_tail(&evt->queue, &vhost->free);
821*4882a593Smuzhiyun }
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun /**
824*4882a593Smuzhiyun  * ibmvfc_scsi_eh_done - EH done function for queuecommand commands
825*4882a593Smuzhiyun  * @evt:	ibmvfc event struct
826*4882a593Smuzhiyun  *
827*4882a593Smuzhiyun  * This function does not setup any error status, that must be done
828*4882a593Smuzhiyun  * before this function gets called.
829*4882a593Smuzhiyun  **/
ibmvfc_scsi_eh_done(struct ibmvfc_event * evt)830*4882a593Smuzhiyun static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
831*4882a593Smuzhiyun {
832*4882a593Smuzhiyun 	struct scsi_cmnd *cmnd = evt->cmnd;
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun 	if (cmnd) {
835*4882a593Smuzhiyun 		scsi_dma_unmap(cmnd);
836*4882a593Smuzhiyun 		cmnd->scsi_done(cmnd);
837*4882a593Smuzhiyun 	}
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun 	if (evt->eh_comp)
840*4882a593Smuzhiyun 		complete(evt->eh_comp);
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 	ibmvfc_free_event(evt);
843*4882a593Smuzhiyun }
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun /**
846*4882a593Smuzhiyun  * ibmvfc_fail_request - Fail request with specified error code
847*4882a593Smuzhiyun  * @evt:		ibmvfc event struct
848*4882a593Smuzhiyun  * @error_code:	error code to fail request with
849*4882a593Smuzhiyun  *
850*4882a593Smuzhiyun  * Return value:
851*4882a593Smuzhiyun  *	none
852*4882a593Smuzhiyun  **/
ibmvfc_fail_request(struct ibmvfc_event * evt,int error_code)853*4882a593Smuzhiyun static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
854*4882a593Smuzhiyun {
855*4882a593Smuzhiyun 	if (evt->cmnd) {
856*4882a593Smuzhiyun 		evt->cmnd->result = (error_code << 16);
857*4882a593Smuzhiyun 		evt->done = ibmvfc_scsi_eh_done;
858*4882a593Smuzhiyun 	} else
859*4882a593Smuzhiyun 		evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_DRIVER_FAILED);
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 	list_del(&evt->queue);
862*4882a593Smuzhiyun 	del_timer(&evt->timer);
863*4882a593Smuzhiyun 	ibmvfc_trc_end(evt);
864*4882a593Smuzhiyun 	evt->done(evt);
865*4882a593Smuzhiyun }
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun /**
868*4882a593Smuzhiyun  * ibmvfc_purge_requests - Our virtual adapter just shut down. Purge any sent requests
869*4882a593Smuzhiyun  * @vhost:		ibmvfc host struct
870*4882a593Smuzhiyun  * @error_code:	error code to fail requests with
871*4882a593Smuzhiyun  *
872*4882a593Smuzhiyun  * Return value:
873*4882a593Smuzhiyun  *	none
874*4882a593Smuzhiyun  **/
ibmvfc_purge_requests(struct ibmvfc_host * vhost,int error_code)875*4882a593Smuzhiyun static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
876*4882a593Smuzhiyun {
877*4882a593Smuzhiyun 	struct ibmvfc_event *evt, *pos;
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun 	ibmvfc_dbg(vhost, "Purging all requests\n");
880*4882a593Smuzhiyun 	list_for_each_entry_safe(evt, pos, &vhost->sent, queue)
881*4882a593Smuzhiyun 		ibmvfc_fail_request(evt, error_code);
882*4882a593Smuzhiyun }
883*4882a593Smuzhiyun 
884*4882a593Smuzhiyun /**
885*4882a593Smuzhiyun  * ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ
886*4882a593Smuzhiyun  * @vhost:	struct ibmvfc host to reset
887*4882a593Smuzhiyun  **/
ibmvfc_hard_reset_host(struct ibmvfc_host * vhost)888*4882a593Smuzhiyun static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost)
889*4882a593Smuzhiyun {
890*4882a593Smuzhiyun 	ibmvfc_purge_requests(vhost, DID_ERROR);
891*4882a593Smuzhiyun 	ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
892*4882a593Smuzhiyun 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
893*4882a593Smuzhiyun }
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun /**
896*4882a593Smuzhiyun  * __ibmvfc_reset_host - Reset the connection to the server (no locking)
897*4882a593Smuzhiyun  * @vhost:	struct ibmvfc host to reset
898*4882a593Smuzhiyun  **/
__ibmvfc_reset_host(struct ibmvfc_host * vhost)899*4882a593Smuzhiyun static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
900*4882a593Smuzhiyun {
901*4882a593Smuzhiyun 	if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT &&
902*4882a593Smuzhiyun 	    !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
903*4882a593Smuzhiyun 		scsi_block_requests(vhost->host);
904*4882a593Smuzhiyun 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO);
905*4882a593Smuzhiyun 		vhost->job_step = ibmvfc_npiv_logout;
906*4882a593Smuzhiyun 		wake_up(&vhost->work_wait_q);
907*4882a593Smuzhiyun 	} else
908*4882a593Smuzhiyun 		ibmvfc_hard_reset_host(vhost);
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun /**
912*4882a593Smuzhiyun  * ibmvfc_reset_host - Reset the connection to the server
913*4882a593Smuzhiyun  * @vhost:	ibmvfc host struct
914*4882a593Smuzhiyun  **/
ibmvfc_reset_host(struct ibmvfc_host * vhost)915*4882a593Smuzhiyun static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
916*4882a593Smuzhiyun {
917*4882a593Smuzhiyun 	unsigned long flags;
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun 	spin_lock_irqsave(vhost->host->host_lock, flags);
920*4882a593Smuzhiyun 	__ibmvfc_reset_host(vhost);
921*4882a593Smuzhiyun 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
922*4882a593Smuzhiyun }
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun /**
925*4882a593Smuzhiyun  * ibmvfc_retry_host_init - Retry host initialization if allowed
926*4882a593Smuzhiyun  * @vhost:	ibmvfc host struct
927*4882a593Smuzhiyun  *
928*4882a593Smuzhiyun  * Returns: 1 if init will be retried / 0 if not
929*4882a593Smuzhiyun  *
930*4882a593Smuzhiyun  **/
ibmvfc_retry_host_init(struct ibmvfc_host * vhost)931*4882a593Smuzhiyun static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
932*4882a593Smuzhiyun {
933*4882a593Smuzhiyun 	int retry = 0;
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun 	if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
936*4882a593Smuzhiyun 		vhost->delay_init = 1;
937*4882a593Smuzhiyun 		if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
938*4882a593Smuzhiyun 			dev_err(vhost->dev,
939*4882a593Smuzhiyun 				"Host initialization retries exceeded. Taking adapter offline\n");
940*4882a593Smuzhiyun 			ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
941*4882a593Smuzhiyun 		} else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
942*4882a593Smuzhiyun 			__ibmvfc_reset_host(vhost);
943*4882a593Smuzhiyun 		else {
944*4882a593Smuzhiyun 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
945*4882a593Smuzhiyun 			retry = 1;
946*4882a593Smuzhiyun 		}
947*4882a593Smuzhiyun 	}
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun 	wake_up(&vhost->work_wait_q);
950*4882a593Smuzhiyun 	return retry;
951*4882a593Smuzhiyun }
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun /**
954*4882a593Smuzhiyun  * __ibmvfc_get_target - Find the specified scsi_target (no locking)
955*4882a593Smuzhiyun  * @starget:	scsi target struct
956*4882a593Smuzhiyun  *
957*4882a593Smuzhiyun  * Return value:
958*4882a593Smuzhiyun  *	ibmvfc_target struct / NULL if not found
959*4882a593Smuzhiyun  **/
__ibmvfc_get_target(struct scsi_target * starget)960*4882a593Smuzhiyun static struct ibmvfc_target *__ibmvfc_get_target(struct scsi_target *starget)
961*4882a593Smuzhiyun {
962*4882a593Smuzhiyun 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
963*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = shost_priv(shost);
964*4882a593Smuzhiyun 	struct ibmvfc_target *tgt;
965*4882a593Smuzhiyun 
966*4882a593Smuzhiyun 	list_for_each_entry(tgt, &vhost->targets, queue)
967*4882a593Smuzhiyun 		if (tgt->target_id == starget->id) {
968*4882a593Smuzhiyun 			kref_get(&tgt->kref);
969*4882a593Smuzhiyun 			return tgt;
970*4882a593Smuzhiyun 		}
971*4882a593Smuzhiyun 	return NULL;
972*4882a593Smuzhiyun }
973*4882a593Smuzhiyun 
974*4882a593Smuzhiyun /**
975*4882a593Smuzhiyun  * ibmvfc_get_target - Find the specified scsi_target
976*4882a593Smuzhiyun  * @starget:	scsi target struct
977*4882a593Smuzhiyun  *
978*4882a593Smuzhiyun  * Return value:
979*4882a593Smuzhiyun  *	ibmvfc_target struct / NULL if not found
980*4882a593Smuzhiyun  **/
ibmvfc_get_target(struct scsi_target * starget)981*4882a593Smuzhiyun static struct ibmvfc_target *ibmvfc_get_target(struct scsi_target *starget)
982*4882a593Smuzhiyun {
983*4882a593Smuzhiyun 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
984*4882a593Smuzhiyun 	struct ibmvfc_target *tgt;
985*4882a593Smuzhiyun 	unsigned long flags;
986*4882a593Smuzhiyun 
987*4882a593Smuzhiyun 	spin_lock_irqsave(shost->host_lock, flags);
988*4882a593Smuzhiyun 	tgt = __ibmvfc_get_target(starget);
989*4882a593Smuzhiyun 	spin_unlock_irqrestore(shost->host_lock, flags);
990*4882a593Smuzhiyun 	return tgt;
991*4882a593Smuzhiyun }
992*4882a593Smuzhiyun 
993*4882a593Smuzhiyun /**
994*4882a593Smuzhiyun  * ibmvfc_get_host_speed - Get host port speed
995*4882a593Smuzhiyun  * @shost:		scsi host struct
996*4882a593Smuzhiyun  *
997*4882a593Smuzhiyun  * Return value:
998*4882a593Smuzhiyun  * 	none
999*4882a593Smuzhiyun  **/
ibmvfc_get_host_speed(struct Scsi_Host * shost)1000*4882a593Smuzhiyun static void ibmvfc_get_host_speed(struct Scsi_Host *shost)
1001*4882a593Smuzhiyun {
1002*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = shost_priv(shost);
1003*4882a593Smuzhiyun 	unsigned long flags;
1004*4882a593Smuzhiyun 
1005*4882a593Smuzhiyun 	spin_lock_irqsave(shost->host_lock, flags);
1006*4882a593Smuzhiyun 	if (vhost->state == IBMVFC_ACTIVE) {
1007*4882a593Smuzhiyun 		switch (be64_to_cpu(vhost->login_buf->resp.link_speed) / 100) {
1008*4882a593Smuzhiyun 		case 1:
1009*4882a593Smuzhiyun 			fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
1010*4882a593Smuzhiyun 			break;
1011*4882a593Smuzhiyun 		case 2:
1012*4882a593Smuzhiyun 			fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
1013*4882a593Smuzhiyun 			break;
1014*4882a593Smuzhiyun 		case 4:
1015*4882a593Smuzhiyun 			fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
1016*4882a593Smuzhiyun 			break;
1017*4882a593Smuzhiyun 		case 8:
1018*4882a593Smuzhiyun 			fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
1019*4882a593Smuzhiyun 			break;
1020*4882a593Smuzhiyun 		case 10:
1021*4882a593Smuzhiyun 			fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
1022*4882a593Smuzhiyun 			break;
1023*4882a593Smuzhiyun 		case 16:
1024*4882a593Smuzhiyun 			fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
1025*4882a593Smuzhiyun 			break;
1026*4882a593Smuzhiyun 		default:
1027*4882a593Smuzhiyun 			ibmvfc_log(vhost, 3, "Unknown port speed: %lld Gbit\n",
1028*4882a593Smuzhiyun 				   be64_to_cpu(vhost->login_buf->resp.link_speed) / 100);
1029*4882a593Smuzhiyun 			fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
1030*4882a593Smuzhiyun 			break;
1031*4882a593Smuzhiyun 		}
1032*4882a593Smuzhiyun 	} else
1033*4882a593Smuzhiyun 		fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
1034*4882a593Smuzhiyun 	spin_unlock_irqrestore(shost->host_lock, flags);
1035*4882a593Smuzhiyun }
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun /**
1038*4882a593Smuzhiyun  * ibmvfc_get_host_port_state - Get host port state
1039*4882a593Smuzhiyun  * @shost:		scsi host struct
1040*4882a593Smuzhiyun  *
1041*4882a593Smuzhiyun  * Return value:
1042*4882a593Smuzhiyun  * 	none
1043*4882a593Smuzhiyun  **/
ibmvfc_get_host_port_state(struct Scsi_Host * shost)1044*4882a593Smuzhiyun static void ibmvfc_get_host_port_state(struct Scsi_Host *shost)
1045*4882a593Smuzhiyun {
1046*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = shost_priv(shost);
1047*4882a593Smuzhiyun 	unsigned long flags;
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun 	spin_lock_irqsave(shost->host_lock, flags);
1050*4882a593Smuzhiyun 	switch (vhost->state) {
1051*4882a593Smuzhiyun 	case IBMVFC_INITIALIZING:
1052*4882a593Smuzhiyun 	case IBMVFC_ACTIVE:
1053*4882a593Smuzhiyun 		fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
1054*4882a593Smuzhiyun 		break;
1055*4882a593Smuzhiyun 	case IBMVFC_LINK_DOWN:
1056*4882a593Smuzhiyun 		fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
1057*4882a593Smuzhiyun 		break;
1058*4882a593Smuzhiyun 	case IBMVFC_LINK_DEAD:
1059*4882a593Smuzhiyun 	case IBMVFC_HOST_OFFLINE:
1060*4882a593Smuzhiyun 		fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
1061*4882a593Smuzhiyun 		break;
1062*4882a593Smuzhiyun 	case IBMVFC_HALTED:
1063*4882a593Smuzhiyun 		fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED;
1064*4882a593Smuzhiyun 		break;
1065*4882a593Smuzhiyun 	case IBMVFC_NO_CRQ:
1066*4882a593Smuzhiyun 		fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1067*4882a593Smuzhiyun 		break;
1068*4882a593Smuzhiyun 	default:
1069*4882a593Smuzhiyun 		ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state);
1070*4882a593Smuzhiyun 		fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1071*4882a593Smuzhiyun 		break;
1072*4882a593Smuzhiyun 	}
1073*4882a593Smuzhiyun 	spin_unlock_irqrestore(shost->host_lock, flags);
1074*4882a593Smuzhiyun }
1075*4882a593Smuzhiyun 
1076*4882a593Smuzhiyun /**
1077*4882a593Smuzhiyun  * ibmvfc_set_rport_dev_loss_tmo - Set rport's device loss timeout
1078*4882a593Smuzhiyun  * @rport:		rport struct
1079*4882a593Smuzhiyun  * @timeout:	timeout value
1080*4882a593Smuzhiyun  *
1081*4882a593Smuzhiyun  * Return value:
1082*4882a593Smuzhiyun  * 	none
1083*4882a593Smuzhiyun  **/
ibmvfc_set_rport_dev_loss_tmo(struct fc_rport * rport,u32 timeout)1084*4882a593Smuzhiyun static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
1085*4882a593Smuzhiyun {
1086*4882a593Smuzhiyun 	if (timeout)
1087*4882a593Smuzhiyun 		rport->dev_loss_tmo = timeout;
1088*4882a593Smuzhiyun 	else
1089*4882a593Smuzhiyun 		rport->dev_loss_tmo = 1;
1090*4882a593Smuzhiyun }
1091*4882a593Smuzhiyun 
1092*4882a593Smuzhiyun /**
1093*4882a593Smuzhiyun  * ibmvfc_release_tgt - Free memory allocated for a target
1094*4882a593Smuzhiyun  * @kref:		kref struct
1095*4882a593Smuzhiyun  *
1096*4882a593Smuzhiyun  **/
ibmvfc_release_tgt(struct kref * kref)1097*4882a593Smuzhiyun static void ibmvfc_release_tgt(struct kref *kref)
1098*4882a593Smuzhiyun {
1099*4882a593Smuzhiyun 	struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
1100*4882a593Smuzhiyun 	kfree(tgt);
1101*4882a593Smuzhiyun }
1102*4882a593Smuzhiyun 
1103*4882a593Smuzhiyun /**
1104*4882a593Smuzhiyun  * ibmvfc_get_starget_node_name - Get SCSI target's node name
1105*4882a593Smuzhiyun  * @starget:	scsi target struct
1106*4882a593Smuzhiyun  *
1107*4882a593Smuzhiyun  * Return value:
1108*4882a593Smuzhiyun  * 	none
1109*4882a593Smuzhiyun  **/
ibmvfc_get_starget_node_name(struct scsi_target * starget)1110*4882a593Smuzhiyun static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
1111*4882a593Smuzhiyun {
1112*4882a593Smuzhiyun 	struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1113*4882a593Smuzhiyun 	fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0;
1114*4882a593Smuzhiyun 	if (tgt)
1115*4882a593Smuzhiyun 		kref_put(&tgt->kref, ibmvfc_release_tgt);
1116*4882a593Smuzhiyun }
1117*4882a593Smuzhiyun 
1118*4882a593Smuzhiyun /**
1119*4882a593Smuzhiyun  * ibmvfc_get_starget_port_name - Get SCSI target's port name
1120*4882a593Smuzhiyun  * @starget:	scsi target struct
1121*4882a593Smuzhiyun  *
1122*4882a593Smuzhiyun  * Return value:
1123*4882a593Smuzhiyun  * 	none
1124*4882a593Smuzhiyun  **/
ibmvfc_get_starget_port_name(struct scsi_target * starget)1125*4882a593Smuzhiyun static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
1126*4882a593Smuzhiyun {
1127*4882a593Smuzhiyun 	struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1128*4882a593Smuzhiyun 	fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0;
1129*4882a593Smuzhiyun 	if (tgt)
1130*4882a593Smuzhiyun 		kref_put(&tgt->kref, ibmvfc_release_tgt);
1131*4882a593Smuzhiyun }
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun /**
1134*4882a593Smuzhiyun  * ibmvfc_get_starget_port_id - Get SCSI target's port ID
1135*4882a593Smuzhiyun  * @starget:	scsi target struct
1136*4882a593Smuzhiyun  *
1137*4882a593Smuzhiyun  * Return value:
1138*4882a593Smuzhiyun  * 	none
1139*4882a593Smuzhiyun  **/
ibmvfc_get_starget_port_id(struct scsi_target * starget)1140*4882a593Smuzhiyun static void ibmvfc_get_starget_port_id(struct scsi_target *starget)
1141*4882a593Smuzhiyun {
1142*4882a593Smuzhiyun 	struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1143*4882a593Smuzhiyun 	fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1;
1144*4882a593Smuzhiyun 	if (tgt)
1145*4882a593Smuzhiyun 		kref_put(&tgt->kref, ibmvfc_release_tgt);
1146*4882a593Smuzhiyun }
1147*4882a593Smuzhiyun 
1148*4882a593Smuzhiyun /**
1149*4882a593Smuzhiyun  * ibmvfc_wait_while_resetting - Wait while the host resets
1150*4882a593Smuzhiyun  * @vhost:		ibmvfc host struct
1151*4882a593Smuzhiyun  *
1152*4882a593Smuzhiyun  * Return value:
1153*4882a593Smuzhiyun  * 	0 on success / other on failure
1154*4882a593Smuzhiyun  **/
ibmvfc_wait_while_resetting(struct ibmvfc_host * vhost)1155*4882a593Smuzhiyun static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost)
1156*4882a593Smuzhiyun {
1157*4882a593Smuzhiyun 	long timeout = wait_event_timeout(vhost->init_wait_q,
1158*4882a593Smuzhiyun 					  ((vhost->state == IBMVFC_ACTIVE ||
1159*4882a593Smuzhiyun 					    vhost->state == IBMVFC_HOST_OFFLINE ||
1160*4882a593Smuzhiyun 					    vhost->state == IBMVFC_LINK_DEAD) &&
1161*4882a593Smuzhiyun 					   vhost->action == IBMVFC_HOST_ACTION_NONE),
1162*4882a593Smuzhiyun 					  (init_timeout * HZ));
1163*4882a593Smuzhiyun 
1164*4882a593Smuzhiyun 	return timeout ? 0 : -EIO;
1165*4882a593Smuzhiyun }
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun /**
1168*4882a593Smuzhiyun  * ibmvfc_issue_fc_host_lip - Re-initiate link initialization
1169*4882a593Smuzhiyun  * @shost:		scsi host struct
1170*4882a593Smuzhiyun  *
1171*4882a593Smuzhiyun  * Return value:
1172*4882a593Smuzhiyun  * 	0 on success / other on failure
1173*4882a593Smuzhiyun  **/
ibmvfc_issue_fc_host_lip(struct Scsi_Host * shost)1174*4882a593Smuzhiyun static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost)
1175*4882a593Smuzhiyun {
1176*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = shost_priv(shost);
1177*4882a593Smuzhiyun 
1178*4882a593Smuzhiyun 	dev_err(vhost->dev, "Initiating host LIP. Resetting connection\n");
1179*4882a593Smuzhiyun 	ibmvfc_reset_host(vhost);
1180*4882a593Smuzhiyun 	return ibmvfc_wait_while_resetting(vhost);
1181*4882a593Smuzhiyun }
1182*4882a593Smuzhiyun 
1183*4882a593Smuzhiyun /**
1184*4882a593Smuzhiyun  * ibmvfc_gather_partition_info - Gather info about the LPAR
1185*4882a593Smuzhiyun  *
1186*4882a593Smuzhiyun  * Return value:
1187*4882a593Smuzhiyun  *	none
1188*4882a593Smuzhiyun  **/
ibmvfc_gather_partition_info(struct ibmvfc_host * vhost)1189*4882a593Smuzhiyun static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
1190*4882a593Smuzhiyun {
1191*4882a593Smuzhiyun 	struct device_node *rootdn;
1192*4882a593Smuzhiyun 	const char *name;
1193*4882a593Smuzhiyun 	const unsigned int *num;
1194*4882a593Smuzhiyun 
1195*4882a593Smuzhiyun 	rootdn = of_find_node_by_path("/");
1196*4882a593Smuzhiyun 	if (!rootdn)
1197*4882a593Smuzhiyun 		return;
1198*4882a593Smuzhiyun 
1199*4882a593Smuzhiyun 	name = of_get_property(rootdn, "ibm,partition-name", NULL);
1200*4882a593Smuzhiyun 	if (name)
1201*4882a593Smuzhiyun 		strncpy(vhost->partition_name, name, sizeof(vhost->partition_name));
1202*4882a593Smuzhiyun 	num = of_get_property(rootdn, "ibm,partition-no", NULL);
1203*4882a593Smuzhiyun 	if (num)
1204*4882a593Smuzhiyun 		vhost->partition_number = *num;
1205*4882a593Smuzhiyun 	of_node_put(rootdn);
1206*4882a593Smuzhiyun }
1207*4882a593Smuzhiyun 
1208*4882a593Smuzhiyun /**
1209*4882a593Smuzhiyun  * ibmvfc_set_login_info - Setup info for NPIV login
1210*4882a593Smuzhiyun  * @vhost:	ibmvfc host struct
1211*4882a593Smuzhiyun  *
1212*4882a593Smuzhiyun  * Return value:
1213*4882a593Smuzhiyun  *	none
1214*4882a593Smuzhiyun  **/
ibmvfc_set_login_info(struct ibmvfc_host * vhost)1215*4882a593Smuzhiyun static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
1216*4882a593Smuzhiyun {
1217*4882a593Smuzhiyun 	struct ibmvfc_npiv_login *login_info = &vhost->login_info;
1218*4882a593Smuzhiyun 	struct device_node *of_node = vhost->dev->of_node;
1219*4882a593Smuzhiyun 	const char *location;
1220*4882a593Smuzhiyun 
1221*4882a593Smuzhiyun 	memset(login_info, 0, sizeof(*login_info));
1222*4882a593Smuzhiyun 
1223*4882a593Smuzhiyun 	login_info->ostype = cpu_to_be32(IBMVFC_OS_LINUX);
1224*4882a593Smuzhiyun 	login_info->max_dma_len = cpu_to_be64(IBMVFC_MAX_SECTORS << 9);
1225*4882a593Smuzhiyun 	login_info->max_payload = cpu_to_be32(sizeof(struct ibmvfc_fcp_cmd_iu));
1226*4882a593Smuzhiyun 	login_info->max_response = cpu_to_be32(sizeof(struct ibmvfc_fcp_rsp));
1227*4882a593Smuzhiyun 	login_info->partition_num = cpu_to_be32(vhost->partition_number);
1228*4882a593Smuzhiyun 	login_info->vfc_frame_version = cpu_to_be32(1);
1229*4882a593Smuzhiyun 	login_info->fcp_version = cpu_to_be16(3);
1230*4882a593Smuzhiyun 	login_info->flags = cpu_to_be16(IBMVFC_FLUSH_ON_HALT);
1231*4882a593Smuzhiyun 	if (vhost->client_migrated)
1232*4882a593Smuzhiyun 		login_info->flags |= cpu_to_be16(IBMVFC_CLIENT_MIGRATED);
1233*4882a593Smuzhiyun 
1234*4882a593Smuzhiyun 	login_info->max_cmds = cpu_to_be32(max_requests + IBMVFC_NUM_INTERNAL_REQ);
1235*4882a593Smuzhiyun 	login_info->capabilities = cpu_to_be64(IBMVFC_CAN_MIGRATE);
1236*4882a593Smuzhiyun 	login_info->async.va = cpu_to_be64(vhost->async_crq.msg_token);
1237*4882a593Smuzhiyun 	login_info->async.len = cpu_to_be32(vhost->async_crq.size * sizeof(*vhost->async_crq.msgs));
1238*4882a593Smuzhiyun 	strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
1239*4882a593Smuzhiyun 	strncpy(login_info->device_name,
1240*4882a593Smuzhiyun 		dev_name(&vhost->host->shost_gendev), IBMVFC_MAX_NAME);
1241*4882a593Smuzhiyun 
1242*4882a593Smuzhiyun 	location = of_get_property(of_node, "ibm,loc-code", NULL);
1243*4882a593Smuzhiyun 	location = location ? location : dev_name(vhost->dev);
1244*4882a593Smuzhiyun 	strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME);
1245*4882a593Smuzhiyun }
1246*4882a593Smuzhiyun 
1247*4882a593Smuzhiyun /**
1248*4882a593Smuzhiyun  * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
1249*4882a593Smuzhiyun  * @vhost:	ibmvfc host who owns the event pool
1250*4882a593Smuzhiyun  *
1251*4882a593Smuzhiyun  * Returns zero on success.
1252*4882a593Smuzhiyun  **/
ibmvfc_init_event_pool(struct ibmvfc_host * vhost)1253*4882a593Smuzhiyun static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost)
1254*4882a593Smuzhiyun {
1255*4882a593Smuzhiyun 	int i;
1256*4882a593Smuzhiyun 	struct ibmvfc_event_pool *pool = &vhost->pool;
1257*4882a593Smuzhiyun 
1258*4882a593Smuzhiyun 	ENTER;
1259*4882a593Smuzhiyun 	pool->size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
1260*4882a593Smuzhiyun 	pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
1261*4882a593Smuzhiyun 	if (!pool->events)
1262*4882a593Smuzhiyun 		return -ENOMEM;
1263*4882a593Smuzhiyun 
1264*4882a593Smuzhiyun 	pool->iu_storage = dma_alloc_coherent(vhost->dev,
1265*4882a593Smuzhiyun 					      pool->size * sizeof(*pool->iu_storage),
1266*4882a593Smuzhiyun 					      &pool->iu_token, 0);
1267*4882a593Smuzhiyun 
1268*4882a593Smuzhiyun 	if (!pool->iu_storage) {
1269*4882a593Smuzhiyun 		kfree(pool->events);
1270*4882a593Smuzhiyun 		return -ENOMEM;
1271*4882a593Smuzhiyun 	}
1272*4882a593Smuzhiyun 
1273*4882a593Smuzhiyun 	for (i = 0; i < pool->size; ++i) {
1274*4882a593Smuzhiyun 		struct ibmvfc_event *evt = &pool->events[i];
1275*4882a593Smuzhiyun 		atomic_set(&evt->free, 1);
1276*4882a593Smuzhiyun 		evt->crq.valid = 0x80;
1277*4882a593Smuzhiyun 		evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
1278*4882a593Smuzhiyun 		evt->xfer_iu = pool->iu_storage + i;
1279*4882a593Smuzhiyun 		evt->vhost = vhost;
1280*4882a593Smuzhiyun 		evt->ext_list = NULL;
1281*4882a593Smuzhiyun 		list_add_tail(&evt->queue, &vhost->free);
1282*4882a593Smuzhiyun 	}
1283*4882a593Smuzhiyun 
1284*4882a593Smuzhiyun 	LEAVE;
1285*4882a593Smuzhiyun 	return 0;
1286*4882a593Smuzhiyun }
1287*4882a593Smuzhiyun 
1288*4882a593Smuzhiyun /**
1289*4882a593Smuzhiyun  * ibmvfc_free_event_pool - Frees memory of the event pool of a host
1290*4882a593Smuzhiyun  * @vhost:	ibmvfc host who owns the event pool
1291*4882a593Smuzhiyun  *
1292*4882a593Smuzhiyun  **/
ibmvfc_free_event_pool(struct ibmvfc_host * vhost)1293*4882a593Smuzhiyun static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost)
1294*4882a593Smuzhiyun {
1295*4882a593Smuzhiyun 	int i;
1296*4882a593Smuzhiyun 	struct ibmvfc_event_pool *pool = &vhost->pool;
1297*4882a593Smuzhiyun 
1298*4882a593Smuzhiyun 	ENTER;
1299*4882a593Smuzhiyun 	for (i = 0; i < pool->size; ++i) {
1300*4882a593Smuzhiyun 		list_del(&pool->events[i].queue);
1301*4882a593Smuzhiyun 		BUG_ON(atomic_read(&pool->events[i].free) != 1);
1302*4882a593Smuzhiyun 		if (pool->events[i].ext_list)
1303*4882a593Smuzhiyun 			dma_pool_free(vhost->sg_pool,
1304*4882a593Smuzhiyun 				      pool->events[i].ext_list,
1305*4882a593Smuzhiyun 				      pool->events[i].ext_list_token);
1306*4882a593Smuzhiyun 	}
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun 	kfree(pool->events);
1309*4882a593Smuzhiyun 	dma_free_coherent(vhost->dev,
1310*4882a593Smuzhiyun 			  pool->size * sizeof(*pool->iu_storage),
1311*4882a593Smuzhiyun 			  pool->iu_storage, pool->iu_token);
1312*4882a593Smuzhiyun 	LEAVE;
1313*4882a593Smuzhiyun }
1314*4882a593Smuzhiyun 
1315*4882a593Smuzhiyun /**
1316*4882a593Smuzhiyun  * ibmvfc_get_event - Gets the next free event in pool
1317*4882a593Smuzhiyun  * @vhost:	ibmvfc host struct
1318*4882a593Smuzhiyun  *
1319*4882a593Smuzhiyun  * Returns a free event from the pool.
1320*4882a593Smuzhiyun  **/
ibmvfc_get_event(struct ibmvfc_host * vhost)1321*4882a593Smuzhiyun static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_host *vhost)
1322*4882a593Smuzhiyun {
1323*4882a593Smuzhiyun 	struct ibmvfc_event *evt;
1324*4882a593Smuzhiyun 
1325*4882a593Smuzhiyun 	BUG_ON(list_empty(&vhost->free));
1326*4882a593Smuzhiyun 	evt = list_entry(vhost->free.next, struct ibmvfc_event, queue);
1327*4882a593Smuzhiyun 	atomic_set(&evt->free, 0);
1328*4882a593Smuzhiyun 	list_del(&evt->queue);
1329*4882a593Smuzhiyun 	return evt;
1330*4882a593Smuzhiyun }
1331*4882a593Smuzhiyun 
1332*4882a593Smuzhiyun /**
1333*4882a593Smuzhiyun  * ibmvfc_init_event - Initialize fields in an event struct that are always
1334*4882a593Smuzhiyun  *				required.
1335*4882a593Smuzhiyun  * @evt:	The event
1336*4882a593Smuzhiyun  * @done:	Routine to call when the event is responded to
1337*4882a593Smuzhiyun  * @format:	SRP or MAD format
1338*4882a593Smuzhiyun  **/
ibmvfc_init_event(struct ibmvfc_event * evt,void (* done)(struct ibmvfc_event *),u8 format)1339*4882a593Smuzhiyun static void ibmvfc_init_event(struct ibmvfc_event *evt,
1340*4882a593Smuzhiyun 			      void (*done) (struct ibmvfc_event *), u8 format)
1341*4882a593Smuzhiyun {
1342*4882a593Smuzhiyun 	evt->cmnd = NULL;
1343*4882a593Smuzhiyun 	evt->sync_iu = NULL;
1344*4882a593Smuzhiyun 	evt->crq.format = format;
1345*4882a593Smuzhiyun 	evt->done = done;
1346*4882a593Smuzhiyun 	evt->eh_comp = NULL;
1347*4882a593Smuzhiyun }
1348*4882a593Smuzhiyun 
1349*4882a593Smuzhiyun /**
1350*4882a593Smuzhiyun  * ibmvfc_map_sg_list - Initialize scatterlist
1351*4882a593Smuzhiyun  * @scmd:	scsi command struct
1352*4882a593Smuzhiyun  * @nseg:	number of scatterlist segments
1353*4882a593Smuzhiyun  * @md:	memory descriptor list to initialize
1354*4882a593Smuzhiyun  **/
ibmvfc_map_sg_list(struct scsi_cmnd * scmd,int nseg,struct srp_direct_buf * md)1355*4882a593Smuzhiyun static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg,
1356*4882a593Smuzhiyun 			       struct srp_direct_buf *md)
1357*4882a593Smuzhiyun {
1358*4882a593Smuzhiyun 	int i;
1359*4882a593Smuzhiyun 	struct scatterlist *sg;
1360*4882a593Smuzhiyun 
1361*4882a593Smuzhiyun 	scsi_for_each_sg(scmd, sg, nseg, i) {
1362*4882a593Smuzhiyun 		md[i].va = cpu_to_be64(sg_dma_address(sg));
1363*4882a593Smuzhiyun 		md[i].len = cpu_to_be32(sg_dma_len(sg));
1364*4882a593Smuzhiyun 		md[i].key = 0;
1365*4882a593Smuzhiyun 	}
1366*4882a593Smuzhiyun }
1367*4882a593Smuzhiyun 
1368*4882a593Smuzhiyun /**
1369*4882a593Smuzhiyun  * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes descriptor fields
1370*4882a593Smuzhiyun  * @scmd:		struct scsi_cmnd with the scatterlist
1371*4882a593Smuzhiyun  * @evt:		ibmvfc event struct
1372*4882a593Smuzhiyun  * @vfc_cmd:	vfc_cmd that contains the memory descriptor
1373*4882a593Smuzhiyun  * @dev:		device for which to map dma memory
1374*4882a593Smuzhiyun  *
1375*4882a593Smuzhiyun  * Returns:
1376*4882a593Smuzhiyun  *	0 on success / non-zero on failure
1377*4882a593Smuzhiyun  **/
ibmvfc_map_sg_data(struct scsi_cmnd * scmd,struct ibmvfc_event * evt,struct ibmvfc_cmd * vfc_cmd,struct device * dev)1378*4882a593Smuzhiyun static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
1379*4882a593Smuzhiyun 			      struct ibmvfc_event *evt,
1380*4882a593Smuzhiyun 			      struct ibmvfc_cmd *vfc_cmd, struct device *dev)
1381*4882a593Smuzhiyun {
1382*4882a593Smuzhiyun 
1383*4882a593Smuzhiyun 	int sg_mapped;
1384*4882a593Smuzhiyun 	struct srp_direct_buf *data = &vfc_cmd->ioba;
1385*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = dev_get_drvdata(dev);
1386*4882a593Smuzhiyun 
1387*4882a593Smuzhiyun 	if (cls3_error)
1388*4882a593Smuzhiyun 		vfc_cmd->flags |= cpu_to_be16(IBMVFC_CLASS_3_ERR);
1389*4882a593Smuzhiyun 
1390*4882a593Smuzhiyun 	sg_mapped = scsi_dma_map(scmd);
1391*4882a593Smuzhiyun 	if (!sg_mapped) {
1392*4882a593Smuzhiyun 		vfc_cmd->flags |= cpu_to_be16(IBMVFC_NO_MEM_DESC);
1393*4882a593Smuzhiyun 		return 0;
1394*4882a593Smuzhiyun 	} else if (unlikely(sg_mapped < 0)) {
1395*4882a593Smuzhiyun 		if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1396*4882a593Smuzhiyun 			scmd_printk(KERN_ERR, scmd, "Failed to map DMA buffer for command\n");
1397*4882a593Smuzhiyun 		return sg_mapped;
1398*4882a593Smuzhiyun 	}
1399*4882a593Smuzhiyun 
1400*4882a593Smuzhiyun 	if (scmd->sc_data_direction == DMA_TO_DEVICE) {
1401*4882a593Smuzhiyun 		vfc_cmd->flags |= cpu_to_be16(IBMVFC_WRITE);
1402*4882a593Smuzhiyun 		vfc_cmd->iu.add_cdb_len |= IBMVFC_WRDATA;
1403*4882a593Smuzhiyun 	} else {
1404*4882a593Smuzhiyun 		vfc_cmd->flags |= cpu_to_be16(IBMVFC_READ);
1405*4882a593Smuzhiyun 		vfc_cmd->iu.add_cdb_len |= IBMVFC_RDDATA;
1406*4882a593Smuzhiyun 	}
1407*4882a593Smuzhiyun 
1408*4882a593Smuzhiyun 	if (sg_mapped == 1) {
1409*4882a593Smuzhiyun 		ibmvfc_map_sg_list(scmd, sg_mapped, data);
1410*4882a593Smuzhiyun 		return 0;
1411*4882a593Smuzhiyun 	}
1412*4882a593Smuzhiyun 
1413*4882a593Smuzhiyun 	vfc_cmd->flags |= cpu_to_be16(IBMVFC_SCATTERLIST);
1414*4882a593Smuzhiyun 
1415*4882a593Smuzhiyun 	if (!evt->ext_list) {
1416*4882a593Smuzhiyun 		evt->ext_list = dma_pool_alloc(vhost->sg_pool, GFP_ATOMIC,
1417*4882a593Smuzhiyun 					       &evt->ext_list_token);
1418*4882a593Smuzhiyun 
1419*4882a593Smuzhiyun 		if (!evt->ext_list) {
1420*4882a593Smuzhiyun 			scsi_dma_unmap(scmd);
1421*4882a593Smuzhiyun 			if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1422*4882a593Smuzhiyun 				scmd_printk(KERN_ERR, scmd, "Can't allocate memory for scatterlist\n");
1423*4882a593Smuzhiyun 			return -ENOMEM;
1424*4882a593Smuzhiyun 		}
1425*4882a593Smuzhiyun 	}
1426*4882a593Smuzhiyun 
1427*4882a593Smuzhiyun 	ibmvfc_map_sg_list(scmd, sg_mapped, evt->ext_list);
1428*4882a593Smuzhiyun 
1429*4882a593Smuzhiyun 	data->va = cpu_to_be64(evt->ext_list_token);
1430*4882a593Smuzhiyun 	data->len = cpu_to_be32(sg_mapped * sizeof(struct srp_direct_buf));
1431*4882a593Smuzhiyun 	data->key = 0;
1432*4882a593Smuzhiyun 	return 0;
1433*4882a593Smuzhiyun }
1434*4882a593Smuzhiyun 
1435*4882a593Smuzhiyun /**
1436*4882a593Smuzhiyun  * ibmvfc_timeout - Internal command timeout handler
1437*4882a593Smuzhiyun  * @evt:	struct ibmvfc_event that timed out
1438*4882a593Smuzhiyun  *
1439*4882a593Smuzhiyun  * Called when an internally generated command times out
1440*4882a593Smuzhiyun  **/
ibmvfc_timeout(struct timer_list * t)1441*4882a593Smuzhiyun static void ibmvfc_timeout(struct timer_list *t)
1442*4882a593Smuzhiyun {
1443*4882a593Smuzhiyun 	struct ibmvfc_event *evt = from_timer(evt, t, timer);
1444*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = evt->vhost;
1445*4882a593Smuzhiyun 	dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt);
1446*4882a593Smuzhiyun 	ibmvfc_reset_host(vhost);
1447*4882a593Smuzhiyun }
1448*4882a593Smuzhiyun 
1449*4882a593Smuzhiyun /**
1450*4882a593Smuzhiyun  * ibmvfc_send_event - Transforms event to u64 array and calls send_crq()
1451*4882a593Smuzhiyun  * @evt:		event to be sent
1452*4882a593Smuzhiyun  * @vhost:		ibmvfc host struct
1453*4882a593Smuzhiyun  * @timeout:	timeout in seconds - 0 means do not time command
1454*4882a593Smuzhiyun  *
1455*4882a593Smuzhiyun  * Returns the value returned from ibmvfc_send_crq(). (Zero for success)
1456*4882a593Smuzhiyun  **/
ibmvfc_send_event(struct ibmvfc_event * evt,struct ibmvfc_host * vhost,unsigned long timeout)1457*4882a593Smuzhiyun static int ibmvfc_send_event(struct ibmvfc_event *evt,
1458*4882a593Smuzhiyun 			     struct ibmvfc_host *vhost, unsigned long timeout)
1459*4882a593Smuzhiyun {
1460*4882a593Smuzhiyun 	__be64 *crq_as_u64 = (__be64 *) &evt->crq;
1461*4882a593Smuzhiyun 	int rc;
1462*4882a593Smuzhiyun 
1463*4882a593Smuzhiyun 	/* Copy the IU into the transfer area */
1464*4882a593Smuzhiyun 	*evt->xfer_iu = evt->iu;
1465*4882a593Smuzhiyun 	if (evt->crq.format == IBMVFC_CMD_FORMAT)
1466*4882a593Smuzhiyun 		evt->xfer_iu->cmd.tag = cpu_to_be64((u64)evt);
1467*4882a593Smuzhiyun 	else if (evt->crq.format == IBMVFC_MAD_FORMAT)
1468*4882a593Smuzhiyun 		evt->xfer_iu->mad_common.tag = cpu_to_be64((u64)evt);
1469*4882a593Smuzhiyun 	else
1470*4882a593Smuzhiyun 		BUG();
1471*4882a593Smuzhiyun 
1472*4882a593Smuzhiyun 	list_add_tail(&evt->queue, &vhost->sent);
1473*4882a593Smuzhiyun 	timer_setup(&evt->timer, ibmvfc_timeout, 0);
1474*4882a593Smuzhiyun 
1475*4882a593Smuzhiyun 	if (timeout) {
1476*4882a593Smuzhiyun 		evt->timer.expires = jiffies + (timeout * HZ);
1477*4882a593Smuzhiyun 		add_timer(&evt->timer);
1478*4882a593Smuzhiyun 	}
1479*4882a593Smuzhiyun 
1480*4882a593Smuzhiyun 	mb();
1481*4882a593Smuzhiyun 
1482*4882a593Smuzhiyun 	if ((rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]),
1483*4882a593Smuzhiyun 				  be64_to_cpu(crq_as_u64[1])))) {
1484*4882a593Smuzhiyun 		list_del(&evt->queue);
1485*4882a593Smuzhiyun 		del_timer(&evt->timer);
1486*4882a593Smuzhiyun 
1487*4882a593Smuzhiyun 		/* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
1488*4882a593Smuzhiyun 		 * Firmware will send a CRQ with a transport event (0xFF) to
1489*4882a593Smuzhiyun 		 * tell this client what has happened to the transport. This
1490*4882a593Smuzhiyun 		 * will be handled in ibmvfc_handle_crq()
1491*4882a593Smuzhiyun 		 */
1492*4882a593Smuzhiyun 		if (rc == H_CLOSED) {
1493*4882a593Smuzhiyun 			if (printk_ratelimit())
1494*4882a593Smuzhiyun 				dev_warn(vhost->dev, "Send warning. Receive queue closed, will retry.\n");
1495*4882a593Smuzhiyun 			if (evt->cmnd)
1496*4882a593Smuzhiyun 				scsi_dma_unmap(evt->cmnd);
1497*4882a593Smuzhiyun 			ibmvfc_free_event(evt);
1498*4882a593Smuzhiyun 			return SCSI_MLQUEUE_HOST_BUSY;
1499*4882a593Smuzhiyun 		}
1500*4882a593Smuzhiyun 
1501*4882a593Smuzhiyun 		dev_err(vhost->dev, "Send error (rc=%d)\n", rc);
1502*4882a593Smuzhiyun 		if (evt->cmnd) {
1503*4882a593Smuzhiyun 			evt->cmnd->result = DID_ERROR << 16;
1504*4882a593Smuzhiyun 			evt->done = ibmvfc_scsi_eh_done;
1505*4882a593Smuzhiyun 		} else
1506*4882a593Smuzhiyun 			evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_CRQ_ERROR);
1507*4882a593Smuzhiyun 
1508*4882a593Smuzhiyun 		evt->done(evt);
1509*4882a593Smuzhiyun 	} else
1510*4882a593Smuzhiyun 		ibmvfc_trc_start(evt);
1511*4882a593Smuzhiyun 
1512*4882a593Smuzhiyun 	return 0;
1513*4882a593Smuzhiyun }
1514*4882a593Smuzhiyun 
1515*4882a593Smuzhiyun /**
1516*4882a593Smuzhiyun  * ibmvfc_log_error - Log an error for the failed command if appropriate
1517*4882a593Smuzhiyun  * @evt:	ibmvfc event to log
1518*4882a593Smuzhiyun  *
1519*4882a593Smuzhiyun  **/
ibmvfc_log_error(struct ibmvfc_event * evt)1520*4882a593Smuzhiyun static void ibmvfc_log_error(struct ibmvfc_event *evt)
1521*4882a593Smuzhiyun {
1522*4882a593Smuzhiyun 	struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1523*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = evt->vhost;
1524*4882a593Smuzhiyun 	struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
1525*4882a593Smuzhiyun 	struct scsi_cmnd *cmnd = evt->cmnd;
1526*4882a593Smuzhiyun 	const char *err = unknown_error;
1527*4882a593Smuzhiyun 	int index = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
1528*4882a593Smuzhiyun 	int logerr = 0;
1529*4882a593Smuzhiyun 	int rsp_code = 0;
1530*4882a593Smuzhiyun 
1531*4882a593Smuzhiyun 	if (index >= 0) {
1532*4882a593Smuzhiyun 		logerr = cmd_status[index].log;
1533*4882a593Smuzhiyun 		err = cmd_status[index].name;
1534*4882a593Smuzhiyun 	}
1535*4882a593Smuzhiyun 
1536*4882a593Smuzhiyun 	if (!logerr && (vhost->log_level <= (IBMVFC_DEFAULT_LOG_LEVEL + 1)))
1537*4882a593Smuzhiyun 		return;
1538*4882a593Smuzhiyun 
1539*4882a593Smuzhiyun 	if (rsp->flags & FCP_RSP_LEN_VALID)
1540*4882a593Smuzhiyun 		rsp_code = rsp->data.info.rsp_code;
1541*4882a593Smuzhiyun 
1542*4882a593Smuzhiyun 	scmd_printk(KERN_ERR, cmnd, "Command (%02X) : %s (%x:%x) "
1543*4882a593Smuzhiyun 		    "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
1544*4882a593Smuzhiyun 		    cmnd->cmnd[0], err, be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error),
1545*4882a593Smuzhiyun 		    rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
1546*4882a593Smuzhiyun }
1547*4882a593Smuzhiyun 
1548*4882a593Smuzhiyun /**
1549*4882a593Smuzhiyun  * ibmvfc_relogin - Log back into the specified device
1550*4882a593Smuzhiyun  * @sdev:	scsi device struct
1551*4882a593Smuzhiyun  *
1552*4882a593Smuzhiyun  **/
ibmvfc_relogin(struct scsi_device * sdev)1553*4882a593Smuzhiyun static void ibmvfc_relogin(struct scsi_device *sdev)
1554*4882a593Smuzhiyun {
1555*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
1556*4882a593Smuzhiyun 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1557*4882a593Smuzhiyun 	struct ibmvfc_target *tgt;
1558*4882a593Smuzhiyun 
1559*4882a593Smuzhiyun 	list_for_each_entry(tgt, &vhost->targets, queue) {
1560*4882a593Smuzhiyun 		if (rport == tgt->rport) {
1561*4882a593Smuzhiyun 			ibmvfc_del_tgt(tgt);
1562*4882a593Smuzhiyun 			break;
1563*4882a593Smuzhiyun 		}
1564*4882a593Smuzhiyun 	}
1565*4882a593Smuzhiyun 
1566*4882a593Smuzhiyun 	ibmvfc_reinit_host(vhost);
1567*4882a593Smuzhiyun }
1568*4882a593Smuzhiyun 
1569*4882a593Smuzhiyun /**
1570*4882a593Smuzhiyun  * ibmvfc_scsi_done - Handle responses from commands
1571*4882a593Smuzhiyun  * @evt:	ibmvfc event to be handled
1572*4882a593Smuzhiyun  *
1573*4882a593Smuzhiyun  * Used as a callback when sending scsi cmds.
1574*4882a593Smuzhiyun  **/
ibmvfc_scsi_done(struct ibmvfc_event * evt)1575*4882a593Smuzhiyun static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
1576*4882a593Smuzhiyun {
1577*4882a593Smuzhiyun 	struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1578*4882a593Smuzhiyun 	struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
1579*4882a593Smuzhiyun 	struct scsi_cmnd *cmnd = evt->cmnd;
1580*4882a593Smuzhiyun 	u32 rsp_len = 0;
1581*4882a593Smuzhiyun 	u32 sense_len = be32_to_cpu(rsp->fcp_sense_len);
1582*4882a593Smuzhiyun 
1583*4882a593Smuzhiyun 	if (cmnd) {
1584*4882a593Smuzhiyun 		if (be16_to_cpu(vfc_cmd->response_flags) & IBMVFC_ADAPTER_RESID_VALID)
1585*4882a593Smuzhiyun 			scsi_set_resid(cmnd, be32_to_cpu(vfc_cmd->adapter_resid));
1586*4882a593Smuzhiyun 		else if (rsp->flags & FCP_RESID_UNDER)
1587*4882a593Smuzhiyun 			scsi_set_resid(cmnd, be32_to_cpu(rsp->fcp_resid));
1588*4882a593Smuzhiyun 		else
1589*4882a593Smuzhiyun 			scsi_set_resid(cmnd, 0);
1590*4882a593Smuzhiyun 
1591*4882a593Smuzhiyun 		if (vfc_cmd->status) {
1592*4882a593Smuzhiyun 			cmnd->result = ibmvfc_get_err_result(vfc_cmd);
1593*4882a593Smuzhiyun 
1594*4882a593Smuzhiyun 			if (rsp->flags & FCP_RSP_LEN_VALID)
1595*4882a593Smuzhiyun 				rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
1596*4882a593Smuzhiyun 			if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE)
1597*4882a593Smuzhiyun 				sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len;
1598*4882a593Smuzhiyun 			if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
1599*4882a593Smuzhiyun 				memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
1600*4882a593Smuzhiyun 			if ((be16_to_cpu(vfc_cmd->status) & IBMVFC_VIOS_FAILURE) &&
1601*4882a593Smuzhiyun 			    (be16_to_cpu(vfc_cmd->error) == IBMVFC_PLOGI_REQUIRED))
1602*4882a593Smuzhiyun 				ibmvfc_relogin(cmnd->device);
1603*4882a593Smuzhiyun 
1604*4882a593Smuzhiyun 			if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
1605*4882a593Smuzhiyun 				cmnd->result = (DID_ERROR << 16);
1606*4882a593Smuzhiyun 
1607*4882a593Smuzhiyun 			ibmvfc_log_error(evt);
1608*4882a593Smuzhiyun 		}
1609*4882a593Smuzhiyun 
1610*4882a593Smuzhiyun 		if (!cmnd->result &&
1611*4882a593Smuzhiyun 		    (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) < cmnd->underflow))
1612*4882a593Smuzhiyun 			cmnd->result = (DID_ERROR << 16);
1613*4882a593Smuzhiyun 
1614*4882a593Smuzhiyun 		scsi_dma_unmap(cmnd);
1615*4882a593Smuzhiyun 		cmnd->scsi_done(cmnd);
1616*4882a593Smuzhiyun 	}
1617*4882a593Smuzhiyun 
1618*4882a593Smuzhiyun 	if (evt->eh_comp)
1619*4882a593Smuzhiyun 		complete(evt->eh_comp);
1620*4882a593Smuzhiyun 
1621*4882a593Smuzhiyun 	ibmvfc_free_event(evt);
1622*4882a593Smuzhiyun }
1623*4882a593Smuzhiyun 
1624*4882a593Smuzhiyun /**
1625*4882a593Smuzhiyun  * ibmvfc_host_chkready - Check if the host can accept commands
1626*4882a593Smuzhiyun  * @vhost:	 struct ibmvfc host
1627*4882a593Smuzhiyun  *
1628*4882a593Smuzhiyun  * Returns:
1629*4882a593Smuzhiyun  *	1 if host can accept command / 0 if not
1630*4882a593Smuzhiyun  **/
ibmvfc_host_chkready(struct ibmvfc_host * vhost)1631*4882a593Smuzhiyun static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
1632*4882a593Smuzhiyun {
1633*4882a593Smuzhiyun 	int result = 0;
1634*4882a593Smuzhiyun 
1635*4882a593Smuzhiyun 	switch (vhost->state) {
1636*4882a593Smuzhiyun 	case IBMVFC_LINK_DEAD:
1637*4882a593Smuzhiyun 	case IBMVFC_HOST_OFFLINE:
1638*4882a593Smuzhiyun 		result = DID_NO_CONNECT << 16;
1639*4882a593Smuzhiyun 		break;
1640*4882a593Smuzhiyun 	case IBMVFC_NO_CRQ:
1641*4882a593Smuzhiyun 	case IBMVFC_INITIALIZING:
1642*4882a593Smuzhiyun 	case IBMVFC_HALTED:
1643*4882a593Smuzhiyun 	case IBMVFC_LINK_DOWN:
1644*4882a593Smuzhiyun 		result = DID_REQUEUE << 16;
1645*4882a593Smuzhiyun 		break;
1646*4882a593Smuzhiyun 	case IBMVFC_ACTIVE:
1647*4882a593Smuzhiyun 		result = 0;
1648*4882a593Smuzhiyun 		break;
1649*4882a593Smuzhiyun 	}
1650*4882a593Smuzhiyun 
1651*4882a593Smuzhiyun 	return result;
1652*4882a593Smuzhiyun }
1653*4882a593Smuzhiyun 
1654*4882a593Smuzhiyun /**
1655*4882a593Smuzhiyun  * ibmvfc_queuecommand - The queuecommand function of the scsi template
1656*4882a593Smuzhiyun  * @cmnd:	struct scsi_cmnd to be executed
1657*4882a593Smuzhiyun  * @done:	Callback function to be called when cmnd is completed
1658*4882a593Smuzhiyun  *
1659*4882a593Smuzhiyun  * Returns:
1660*4882a593Smuzhiyun  *	0 on success / other on failure
1661*4882a593Smuzhiyun  **/
ibmvfc_queuecommand_lck(struct scsi_cmnd * cmnd,void (* done)(struct scsi_cmnd *))1662*4882a593Smuzhiyun static int ibmvfc_queuecommand_lck(struct scsi_cmnd *cmnd,
1663*4882a593Smuzhiyun 			       void (*done) (struct scsi_cmnd *))
1664*4882a593Smuzhiyun {
1665*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = shost_priv(cmnd->device->host);
1666*4882a593Smuzhiyun 	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1667*4882a593Smuzhiyun 	struct ibmvfc_cmd *vfc_cmd;
1668*4882a593Smuzhiyun 	struct ibmvfc_event *evt;
1669*4882a593Smuzhiyun 	int rc;
1670*4882a593Smuzhiyun 
1671*4882a593Smuzhiyun 	if (unlikely((rc = fc_remote_port_chkready(rport))) ||
1672*4882a593Smuzhiyun 	    unlikely((rc = ibmvfc_host_chkready(vhost)))) {
1673*4882a593Smuzhiyun 		cmnd->result = rc;
1674*4882a593Smuzhiyun 		done(cmnd);
1675*4882a593Smuzhiyun 		return 0;
1676*4882a593Smuzhiyun 	}
1677*4882a593Smuzhiyun 
1678*4882a593Smuzhiyun 	cmnd->result = (DID_OK << 16);
1679*4882a593Smuzhiyun 	evt = ibmvfc_get_event(vhost);
1680*4882a593Smuzhiyun 	ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
1681*4882a593Smuzhiyun 	evt->cmnd = cmnd;
1682*4882a593Smuzhiyun 	cmnd->scsi_done = done;
1683*4882a593Smuzhiyun 	vfc_cmd = &evt->iu.cmd;
1684*4882a593Smuzhiyun 	memset(vfc_cmd, 0, sizeof(*vfc_cmd));
1685*4882a593Smuzhiyun 	vfc_cmd->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_cmd, rsp));
1686*4882a593Smuzhiyun 	vfc_cmd->resp.len = cpu_to_be32(sizeof(vfc_cmd->rsp));
1687*4882a593Smuzhiyun 	vfc_cmd->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
1688*4882a593Smuzhiyun 	vfc_cmd->payload_len = cpu_to_be32(sizeof(vfc_cmd->iu));
1689*4882a593Smuzhiyun 	vfc_cmd->resp_len = cpu_to_be32(sizeof(vfc_cmd->rsp));
1690*4882a593Smuzhiyun 	vfc_cmd->cancel_key = cpu_to_be32((unsigned long)cmnd->device->hostdata);
1691*4882a593Smuzhiyun 	vfc_cmd->tgt_scsi_id = cpu_to_be64(rport->port_id);
1692*4882a593Smuzhiyun 	vfc_cmd->iu.xfer_len = cpu_to_be32(scsi_bufflen(cmnd));
1693*4882a593Smuzhiyun 	int_to_scsilun(cmnd->device->lun, &vfc_cmd->iu.lun);
1694*4882a593Smuzhiyun 	memcpy(vfc_cmd->iu.cdb, cmnd->cmnd, cmnd->cmd_len);
1695*4882a593Smuzhiyun 
1696*4882a593Smuzhiyun 	if (cmnd->flags & SCMD_TAGGED) {
1697*4882a593Smuzhiyun 		vfc_cmd->task_tag = cpu_to_be64(cmnd->tag);
1698*4882a593Smuzhiyun 		vfc_cmd->iu.pri_task_attr = IBMVFC_SIMPLE_TASK;
1699*4882a593Smuzhiyun 	}
1700*4882a593Smuzhiyun 
1701*4882a593Smuzhiyun 	if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
1702*4882a593Smuzhiyun 		return ibmvfc_send_event(evt, vhost, 0);
1703*4882a593Smuzhiyun 
1704*4882a593Smuzhiyun 	ibmvfc_free_event(evt);
1705*4882a593Smuzhiyun 	if (rc == -ENOMEM)
1706*4882a593Smuzhiyun 		return SCSI_MLQUEUE_HOST_BUSY;
1707*4882a593Smuzhiyun 
1708*4882a593Smuzhiyun 	if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1709*4882a593Smuzhiyun 		scmd_printk(KERN_ERR, cmnd,
1710*4882a593Smuzhiyun 			    "Failed to map DMA buffer for command. rc=%d\n", rc);
1711*4882a593Smuzhiyun 
1712*4882a593Smuzhiyun 	cmnd->result = DID_ERROR << 16;
1713*4882a593Smuzhiyun 	done(cmnd);
1714*4882a593Smuzhiyun 	return 0;
1715*4882a593Smuzhiyun }
1716*4882a593Smuzhiyun 
DEF_SCSI_QCMD(ibmvfc_queuecommand)1717*4882a593Smuzhiyun static DEF_SCSI_QCMD(ibmvfc_queuecommand)
1718*4882a593Smuzhiyun 
1719*4882a593Smuzhiyun /**
1720*4882a593Smuzhiyun  * ibmvfc_sync_completion - Signal that a synchronous command has completed
1721*4882a593Smuzhiyun  * @evt:	ibmvfc event struct
1722*4882a593Smuzhiyun  *
1723*4882a593Smuzhiyun  **/
1724*4882a593Smuzhiyun static void ibmvfc_sync_completion(struct ibmvfc_event *evt)
1725*4882a593Smuzhiyun {
1726*4882a593Smuzhiyun 	/* copy the response back */
1727*4882a593Smuzhiyun 	if (evt->sync_iu)
1728*4882a593Smuzhiyun 		*evt->sync_iu = *evt->xfer_iu;
1729*4882a593Smuzhiyun 
1730*4882a593Smuzhiyun 	complete(&evt->comp);
1731*4882a593Smuzhiyun }
1732*4882a593Smuzhiyun 
1733*4882a593Smuzhiyun /**
1734*4882a593Smuzhiyun  * ibmvfc_bsg_timeout_done - Completion handler for cancelling BSG commands
1735*4882a593Smuzhiyun  * @evt:	struct ibmvfc_event
1736*4882a593Smuzhiyun  *
1737*4882a593Smuzhiyun  **/
ibmvfc_bsg_timeout_done(struct ibmvfc_event * evt)1738*4882a593Smuzhiyun static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt)
1739*4882a593Smuzhiyun {
1740*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = evt->vhost;
1741*4882a593Smuzhiyun 
1742*4882a593Smuzhiyun 	ibmvfc_free_event(evt);
1743*4882a593Smuzhiyun 	vhost->aborting_passthru = 0;
1744*4882a593Smuzhiyun 	dev_info(vhost->dev, "Passthru command cancelled\n");
1745*4882a593Smuzhiyun }
1746*4882a593Smuzhiyun 
1747*4882a593Smuzhiyun /**
1748*4882a593Smuzhiyun  * ibmvfc_bsg_timeout - Handle a BSG timeout
1749*4882a593Smuzhiyun  * @job:	struct bsg_job that timed out
1750*4882a593Smuzhiyun  *
1751*4882a593Smuzhiyun  * Returns:
1752*4882a593Smuzhiyun  *	0 on success / other on failure
1753*4882a593Smuzhiyun  **/
ibmvfc_bsg_timeout(struct bsg_job * job)1754*4882a593Smuzhiyun static int ibmvfc_bsg_timeout(struct bsg_job *job)
1755*4882a593Smuzhiyun {
1756*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
1757*4882a593Smuzhiyun 	unsigned long port_id = (unsigned long)job->dd_data;
1758*4882a593Smuzhiyun 	struct ibmvfc_event *evt;
1759*4882a593Smuzhiyun 	struct ibmvfc_tmf *tmf;
1760*4882a593Smuzhiyun 	unsigned long flags;
1761*4882a593Smuzhiyun 	int rc;
1762*4882a593Smuzhiyun 
1763*4882a593Smuzhiyun 	ENTER;
1764*4882a593Smuzhiyun 	spin_lock_irqsave(vhost->host->host_lock, flags);
1765*4882a593Smuzhiyun 	if (vhost->aborting_passthru || vhost->state != IBMVFC_ACTIVE) {
1766*4882a593Smuzhiyun 		__ibmvfc_reset_host(vhost);
1767*4882a593Smuzhiyun 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
1768*4882a593Smuzhiyun 		return 0;
1769*4882a593Smuzhiyun 	}
1770*4882a593Smuzhiyun 
1771*4882a593Smuzhiyun 	vhost->aborting_passthru = 1;
1772*4882a593Smuzhiyun 	evt = ibmvfc_get_event(vhost);
1773*4882a593Smuzhiyun 	ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT);
1774*4882a593Smuzhiyun 
1775*4882a593Smuzhiyun 	tmf = &evt->iu.tmf;
1776*4882a593Smuzhiyun 	memset(tmf, 0, sizeof(*tmf));
1777*4882a593Smuzhiyun 	tmf->common.version = cpu_to_be32(1);
1778*4882a593Smuzhiyun 	tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
1779*4882a593Smuzhiyun 	tmf->common.length = cpu_to_be16(sizeof(*tmf));
1780*4882a593Smuzhiyun 	tmf->scsi_id = cpu_to_be64(port_id);
1781*4882a593Smuzhiyun 	tmf->cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
1782*4882a593Smuzhiyun 	tmf->my_cancel_key = cpu_to_be32(IBMVFC_INTERNAL_CANCEL_KEY);
1783*4882a593Smuzhiyun 	rc = ibmvfc_send_event(evt, vhost, default_timeout);
1784*4882a593Smuzhiyun 
1785*4882a593Smuzhiyun 	if (rc != 0) {
1786*4882a593Smuzhiyun 		vhost->aborting_passthru = 0;
1787*4882a593Smuzhiyun 		dev_err(vhost->dev, "Failed to send cancel event. rc=%d\n", rc);
1788*4882a593Smuzhiyun 		rc = -EIO;
1789*4882a593Smuzhiyun 	} else
1790*4882a593Smuzhiyun 		dev_info(vhost->dev, "Cancelling passthru command to port id 0x%lx\n",
1791*4882a593Smuzhiyun 			 port_id);
1792*4882a593Smuzhiyun 
1793*4882a593Smuzhiyun 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
1794*4882a593Smuzhiyun 
1795*4882a593Smuzhiyun 	LEAVE;
1796*4882a593Smuzhiyun 	return rc;
1797*4882a593Smuzhiyun }
1798*4882a593Smuzhiyun 
1799*4882a593Smuzhiyun /**
1800*4882a593Smuzhiyun  * ibmvfc_bsg_plogi - PLOGI into a target to handle a BSG command
1801*4882a593Smuzhiyun  * @vhost:		struct ibmvfc_host to send command
1802*4882a593Smuzhiyun  * @port_id:	port ID to send command
1803*4882a593Smuzhiyun  *
1804*4882a593Smuzhiyun  * Returns:
1805*4882a593Smuzhiyun  *	0 on success / other on failure
1806*4882a593Smuzhiyun  **/
ibmvfc_bsg_plogi(struct ibmvfc_host * vhost,unsigned int port_id)1807*4882a593Smuzhiyun static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
1808*4882a593Smuzhiyun {
1809*4882a593Smuzhiyun 	struct ibmvfc_port_login *plogi;
1810*4882a593Smuzhiyun 	struct ibmvfc_target *tgt;
1811*4882a593Smuzhiyun 	struct ibmvfc_event *evt;
1812*4882a593Smuzhiyun 	union ibmvfc_iu rsp_iu;
1813*4882a593Smuzhiyun 	unsigned long flags;
1814*4882a593Smuzhiyun 	int rc = 0, issue_login = 1;
1815*4882a593Smuzhiyun 
1816*4882a593Smuzhiyun 	ENTER;
1817*4882a593Smuzhiyun 	spin_lock_irqsave(vhost->host->host_lock, flags);
1818*4882a593Smuzhiyun 	list_for_each_entry(tgt, &vhost->targets, queue) {
1819*4882a593Smuzhiyun 		if (tgt->scsi_id == port_id) {
1820*4882a593Smuzhiyun 			issue_login = 0;
1821*4882a593Smuzhiyun 			break;
1822*4882a593Smuzhiyun 		}
1823*4882a593Smuzhiyun 	}
1824*4882a593Smuzhiyun 
1825*4882a593Smuzhiyun 	if (!issue_login)
1826*4882a593Smuzhiyun 		goto unlock_out;
1827*4882a593Smuzhiyun 	if (unlikely((rc = ibmvfc_host_chkready(vhost))))
1828*4882a593Smuzhiyun 		goto unlock_out;
1829*4882a593Smuzhiyun 
1830*4882a593Smuzhiyun 	evt = ibmvfc_get_event(vhost);
1831*4882a593Smuzhiyun 	ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
1832*4882a593Smuzhiyun 	plogi = &evt->iu.plogi;
1833*4882a593Smuzhiyun 	memset(plogi, 0, sizeof(*plogi));
1834*4882a593Smuzhiyun 	plogi->common.version = cpu_to_be32(1);
1835*4882a593Smuzhiyun 	plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
1836*4882a593Smuzhiyun 	plogi->common.length = cpu_to_be16(sizeof(*plogi));
1837*4882a593Smuzhiyun 	plogi->scsi_id = cpu_to_be64(port_id);
1838*4882a593Smuzhiyun 	evt->sync_iu = &rsp_iu;
1839*4882a593Smuzhiyun 	init_completion(&evt->comp);
1840*4882a593Smuzhiyun 
1841*4882a593Smuzhiyun 	rc = ibmvfc_send_event(evt, vhost, default_timeout);
1842*4882a593Smuzhiyun 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
1843*4882a593Smuzhiyun 
1844*4882a593Smuzhiyun 	if (rc)
1845*4882a593Smuzhiyun 		return -EIO;
1846*4882a593Smuzhiyun 
1847*4882a593Smuzhiyun 	wait_for_completion(&evt->comp);
1848*4882a593Smuzhiyun 
1849*4882a593Smuzhiyun 	if (rsp_iu.plogi.common.status)
1850*4882a593Smuzhiyun 		rc = -EIO;
1851*4882a593Smuzhiyun 
1852*4882a593Smuzhiyun 	spin_lock_irqsave(vhost->host->host_lock, flags);
1853*4882a593Smuzhiyun 	ibmvfc_free_event(evt);
1854*4882a593Smuzhiyun unlock_out:
1855*4882a593Smuzhiyun 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
1856*4882a593Smuzhiyun 	LEAVE;
1857*4882a593Smuzhiyun 	return rc;
1858*4882a593Smuzhiyun }
1859*4882a593Smuzhiyun 
1860*4882a593Smuzhiyun /**
1861*4882a593Smuzhiyun  * ibmvfc_bsg_request - Handle a BSG request
1862*4882a593Smuzhiyun  * @job:	struct bsg_job to be executed
1863*4882a593Smuzhiyun  *
1864*4882a593Smuzhiyun  * Returns:
1865*4882a593Smuzhiyun  *	0 on success / other on failure
1866*4882a593Smuzhiyun  **/
ibmvfc_bsg_request(struct bsg_job * job)1867*4882a593Smuzhiyun static int ibmvfc_bsg_request(struct bsg_job *job)
1868*4882a593Smuzhiyun {
1869*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
1870*4882a593Smuzhiyun 	struct fc_rport *rport = fc_bsg_to_rport(job);
1871*4882a593Smuzhiyun 	struct ibmvfc_passthru_mad *mad;
1872*4882a593Smuzhiyun 	struct ibmvfc_event *evt;
1873*4882a593Smuzhiyun 	union ibmvfc_iu rsp_iu;
1874*4882a593Smuzhiyun 	unsigned long flags, port_id = -1;
1875*4882a593Smuzhiyun 	struct fc_bsg_request *bsg_request = job->request;
1876*4882a593Smuzhiyun 	struct fc_bsg_reply *bsg_reply = job->reply;
1877*4882a593Smuzhiyun 	unsigned int code = bsg_request->msgcode;
1878*4882a593Smuzhiyun 	int rc = 0, req_seg, rsp_seg, issue_login = 0;
1879*4882a593Smuzhiyun 	u32 fc_flags, rsp_len;
1880*4882a593Smuzhiyun 
1881*4882a593Smuzhiyun 	ENTER;
1882*4882a593Smuzhiyun 	bsg_reply->reply_payload_rcv_len = 0;
1883*4882a593Smuzhiyun 	if (rport)
1884*4882a593Smuzhiyun 		port_id = rport->port_id;
1885*4882a593Smuzhiyun 
1886*4882a593Smuzhiyun 	switch (code) {
1887*4882a593Smuzhiyun 	case FC_BSG_HST_ELS_NOLOGIN:
1888*4882a593Smuzhiyun 		port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) |
1889*4882a593Smuzhiyun 			(bsg_request->rqst_data.h_els.port_id[1] << 8) |
1890*4882a593Smuzhiyun 			bsg_request->rqst_data.h_els.port_id[2];
1891*4882a593Smuzhiyun 		fallthrough;
1892*4882a593Smuzhiyun 	case FC_BSG_RPT_ELS:
1893*4882a593Smuzhiyun 		fc_flags = IBMVFC_FC_ELS;
1894*4882a593Smuzhiyun 		break;
1895*4882a593Smuzhiyun 	case FC_BSG_HST_CT:
1896*4882a593Smuzhiyun 		issue_login = 1;
1897*4882a593Smuzhiyun 		port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) |
1898*4882a593Smuzhiyun 			(bsg_request->rqst_data.h_ct.port_id[1] << 8) |
1899*4882a593Smuzhiyun 			bsg_request->rqst_data.h_ct.port_id[2];
1900*4882a593Smuzhiyun 		fallthrough;
1901*4882a593Smuzhiyun 	case FC_BSG_RPT_CT:
1902*4882a593Smuzhiyun 		fc_flags = IBMVFC_FC_CT_IU;
1903*4882a593Smuzhiyun 		break;
1904*4882a593Smuzhiyun 	default:
1905*4882a593Smuzhiyun 		return -ENOTSUPP;
1906*4882a593Smuzhiyun 	}
1907*4882a593Smuzhiyun 
1908*4882a593Smuzhiyun 	if (port_id == -1)
1909*4882a593Smuzhiyun 		return -EINVAL;
1910*4882a593Smuzhiyun 	if (!mutex_trylock(&vhost->passthru_mutex))
1911*4882a593Smuzhiyun 		return -EBUSY;
1912*4882a593Smuzhiyun 
1913*4882a593Smuzhiyun 	job->dd_data = (void *)port_id;
1914*4882a593Smuzhiyun 	req_seg = dma_map_sg(vhost->dev, job->request_payload.sg_list,
1915*4882a593Smuzhiyun 			     job->request_payload.sg_cnt, DMA_TO_DEVICE);
1916*4882a593Smuzhiyun 
1917*4882a593Smuzhiyun 	if (!req_seg) {
1918*4882a593Smuzhiyun 		mutex_unlock(&vhost->passthru_mutex);
1919*4882a593Smuzhiyun 		return -ENOMEM;
1920*4882a593Smuzhiyun 	}
1921*4882a593Smuzhiyun 
1922*4882a593Smuzhiyun 	rsp_seg = dma_map_sg(vhost->dev, job->reply_payload.sg_list,
1923*4882a593Smuzhiyun 			     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1924*4882a593Smuzhiyun 
1925*4882a593Smuzhiyun 	if (!rsp_seg) {
1926*4882a593Smuzhiyun 		dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
1927*4882a593Smuzhiyun 			     job->request_payload.sg_cnt, DMA_TO_DEVICE);
1928*4882a593Smuzhiyun 		mutex_unlock(&vhost->passthru_mutex);
1929*4882a593Smuzhiyun 		return -ENOMEM;
1930*4882a593Smuzhiyun 	}
1931*4882a593Smuzhiyun 
1932*4882a593Smuzhiyun 	if (req_seg > 1 || rsp_seg > 1) {
1933*4882a593Smuzhiyun 		rc = -EINVAL;
1934*4882a593Smuzhiyun 		goto out;
1935*4882a593Smuzhiyun 	}
1936*4882a593Smuzhiyun 
1937*4882a593Smuzhiyun 	if (issue_login)
1938*4882a593Smuzhiyun 		rc = ibmvfc_bsg_plogi(vhost, port_id);
1939*4882a593Smuzhiyun 
1940*4882a593Smuzhiyun 	spin_lock_irqsave(vhost->host->host_lock, flags);
1941*4882a593Smuzhiyun 
1942*4882a593Smuzhiyun 	if (unlikely(rc || (rport && (rc = fc_remote_port_chkready(rport)))) ||
1943*4882a593Smuzhiyun 	    unlikely((rc = ibmvfc_host_chkready(vhost)))) {
1944*4882a593Smuzhiyun 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
1945*4882a593Smuzhiyun 		goto out;
1946*4882a593Smuzhiyun 	}
1947*4882a593Smuzhiyun 
1948*4882a593Smuzhiyun 	evt = ibmvfc_get_event(vhost);
1949*4882a593Smuzhiyun 	ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
1950*4882a593Smuzhiyun 	mad = &evt->iu.passthru;
1951*4882a593Smuzhiyun 
1952*4882a593Smuzhiyun 	memset(mad, 0, sizeof(*mad));
1953*4882a593Smuzhiyun 	mad->common.version = cpu_to_be32(1);
1954*4882a593Smuzhiyun 	mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
1955*4882a593Smuzhiyun 	mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));
1956*4882a593Smuzhiyun 
1957*4882a593Smuzhiyun 	mad->cmd_ioba.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) +
1958*4882a593Smuzhiyun 		offsetof(struct ibmvfc_passthru_mad, iu));
1959*4882a593Smuzhiyun 	mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));
1960*4882a593Smuzhiyun 
1961*4882a593Smuzhiyun 	mad->iu.cmd_len = cpu_to_be32(job->request_payload.payload_len);
1962*4882a593Smuzhiyun 	mad->iu.rsp_len = cpu_to_be32(job->reply_payload.payload_len);
1963*4882a593Smuzhiyun 	mad->iu.flags = cpu_to_be32(fc_flags);
1964*4882a593Smuzhiyun 	mad->iu.cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
1965*4882a593Smuzhiyun 
1966*4882a593Smuzhiyun 	mad->iu.cmd.va = cpu_to_be64(sg_dma_address(job->request_payload.sg_list));
1967*4882a593Smuzhiyun 	mad->iu.cmd.len = cpu_to_be32(sg_dma_len(job->request_payload.sg_list));
1968*4882a593Smuzhiyun 	mad->iu.rsp.va = cpu_to_be64(sg_dma_address(job->reply_payload.sg_list));
1969*4882a593Smuzhiyun 	mad->iu.rsp.len = cpu_to_be32(sg_dma_len(job->reply_payload.sg_list));
1970*4882a593Smuzhiyun 	mad->iu.scsi_id = cpu_to_be64(port_id);
1971*4882a593Smuzhiyun 	mad->iu.tag = cpu_to_be64((u64)evt);
1972*4882a593Smuzhiyun 	rsp_len = be32_to_cpu(mad->iu.rsp.len);
1973*4882a593Smuzhiyun 
1974*4882a593Smuzhiyun 	evt->sync_iu = &rsp_iu;
1975*4882a593Smuzhiyun 	init_completion(&evt->comp);
1976*4882a593Smuzhiyun 	rc = ibmvfc_send_event(evt, vhost, 0);
1977*4882a593Smuzhiyun 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
1978*4882a593Smuzhiyun 
1979*4882a593Smuzhiyun 	if (rc) {
1980*4882a593Smuzhiyun 		rc = -EIO;
1981*4882a593Smuzhiyun 		goto out;
1982*4882a593Smuzhiyun 	}
1983*4882a593Smuzhiyun 
1984*4882a593Smuzhiyun 	wait_for_completion(&evt->comp);
1985*4882a593Smuzhiyun 
1986*4882a593Smuzhiyun 	if (rsp_iu.passthru.common.status)
1987*4882a593Smuzhiyun 		rc = -EIO;
1988*4882a593Smuzhiyun 	else
1989*4882a593Smuzhiyun 		bsg_reply->reply_payload_rcv_len = rsp_len;
1990*4882a593Smuzhiyun 
1991*4882a593Smuzhiyun 	spin_lock_irqsave(vhost->host->host_lock, flags);
1992*4882a593Smuzhiyun 	ibmvfc_free_event(evt);
1993*4882a593Smuzhiyun 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
1994*4882a593Smuzhiyun 	bsg_reply->result = rc;
1995*4882a593Smuzhiyun 	bsg_job_done(job, bsg_reply->result,
1996*4882a593Smuzhiyun 		       bsg_reply->reply_payload_rcv_len);
1997*4882a593Smuzhiyun 	rc = 0;
1998*4882a593Smuzhiyun out:
1999*4882a593Smuzhiyun 	dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
2000*4882a593Smuzhiyun 		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
2001*4882a593Smuzhiyun 	dma_unmap_sg(vhost->dev, job->reply_payload.sg_list,
2002*4882a593Smuzhiyun 		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2003*4882a593Smuzhiyun 	mutex_unlock(&vhost->passthru_mutex);
2004*4882a593Smuzhiyun 	LEAVE;
2005*4882a593Smuzhiyun 	return rc;
2006*4882a593Smuzhiyun }
2007*4882a593Smuzhiyun 
2008*4882a593Smuzhiyun /**
2009*4882a593Smuzhiyun  * ibmvfc_reset_device - Reset the device with the specified reset type
2010*4882a593Smuzhiyun  * @sdev:	scsi device to reset
2011*4882a593Smuzhiyun  * @type:	reset type
2012*4882a593Smuzhiyun  * @desc:	reset type description for log messages
2013*4882a593Smuzhiyun  *
2014*4882a593Smuzhiyun  * Returns:
2015*4882a593Smuzhiyun  *	0 on success / other on failure
2016*4882a593Smuzhiyun  **/
ibmvfc_reset_device(struct scsi_device * sdev,int type,char * desc)2017*4882a593Smuzhiyun static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
2018*4882a593Smuzhiyun {
2019*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2020*4882a593Smuzhiyun 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2021*4882a593Smuzhiyun 	struct ibmvfc_cmd *tmf;
2022*4882a593Smuzhiyun 	struct ibmvfc_event *evt = NULL;
2023*4882a593Smuzhiyun 	union ibmvfc_iu rsp_iu;
2024*4882a593Smuzhiyun 	struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
2025*4882a593Smuzhiyun 	int rsp_rc = -EBUSY;
2026*4882a593Smuzhiyun 	unsigned long flags;
2027*4882a593Smuzhiyun 	int rsp_code = 0;
2028*4882a593Smuzhiyun 
2029*4882a593Smuzhiyun 	spin_lock_irqsave(vhost->host->host_lock, flags);
2030*4882a593Smuzhiyun 	if (vhost->state == IBMVFC_ACTIVE) {
2031*4882a593Smuzhiyun 		evt = ibmvfc_get_event(vhost);
2032*4882a593Smuzhiyun 		ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
2033*4882a593Smuzhiyun 
2034*4882a593Smuzhiyun 		tmf = &evt->iu.cmd;
2035*4882a593Smuzhiyun 		memset(tmf, 0, sizeof(*tmf));
2036*4882a593Smuzhiyun 		tmf->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_cmd, rsp));
2037*4882a593Smuzhiyun 		tmf->resp.len = cpu_to_be32(sizeof(tmf->rsp));
2038*4882a593Smuzhiyun 		tmf->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
2039*4882a593Smuzhiyun 		tmf->payload_len = cpu_to_be32(sizeof(tmf->iu));
2040*4882a593Smuzhiyun 		tmf->resp_len = cpu_to_be32(sizeof(tmf->rsp));
2041*4882a593Smuzhiyun 		tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
2042*4882a593Smuzhiyun 		tmf->tgt_scsi_id = cpu_to_be64(rport->port_id);
2043*4882a593Smuzhiyun 		int_to_scsilun(sdev->lun, &tmf->iu.lun);
2044*4882a593Smuzhiyun 		tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
2045*4882a593Smuzhiyun 		tmf->iu.tmf_flags = type;
2046*4882a593Smuzhiyun 		evt->sync_iu = &rsp_iu;
2047*4882a593Smuzhiyun 
2048*4882a593Smuzhiyun 		init_completion(&evt->comp);
2049*4882a593Smuzhiyun 		rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2050*4882a593Smuzhiyun 	}
2051*4882a593Smuzhiyun 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2052*4882a593Smuzhiyun 
2053*4882a593Smuzhiyun 	if (rsp_rc != 0) {
2054*4882a593Smuzhiyun 		sdev_printk(KERN_ERR, sdev, "Failed to send %s reset event. rc=%d\n",
2055*4882a593Smuzhiyun 			    desc, rsp_rc);
2056*4882a593Smuzhiyun 		return -EIO;
2057*4882a593Smuzhiyun 	}
2058*4882a593Smuzhiyun 
2059*4882a593Smuzhiyun 	sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc);
2060*4882a593Smuzhiyun 	wait_for_completion(&evt->comp);
2061*4882a593Smuzhiyun 
2062*4882a593Smuzhiyun 	if (rsp_iu.cmd.status)
2063*4882a593Smuzhiyun 		rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd);
2064*4882a593Smuzhiyun 
2065*4882a593Smuzhiyun 	if (rsp_code) {
2066*4882a593Smuzhiyun 		if (fc_rsp->flags & FCP_RSP_LEN_VALID)
2067*4882a593Smuzhiyun 			rsp_code = fc_rsp->data.info.rsp_code;
2068*4882a593Smuzhiyun 
2069*4882a593Smuzhiyun 		sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
2070*4882a593Smuzhiyun 			    "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc,
2071*4882a593Smuzhiyun 			    ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
2072*4882a593Smuzhiyun 			    be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
2073*4882a593Smuzhiyun 			    fc_rsp->scsi_status);
2074*4882a593Smuzhiyun 		rsp_rc = -EIO;
2075*4882a593Smuzhiyun 	} else
2076*4882a593Smuzhiyun 		sdev_printk(KERN_INFO, sdev, "%s reset successful\n", desc);
2077*4882a593Smuzhiyun 
2078*4882a593Smuzhiyun 	spin_lock_irqsave(vhost->host->host_lock, flags);
2079*4882a593Smuzhiyun 	ibmvfc_free_event(evt);
2080*4882a593Smuzhiyun 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2081*4882a593Smuzhiyun 	return rsp_rc;
2082*4882a593Smuzhiyun }
2083*4882a593Smuzhiyun 
2084*4882a593Smuzhiyun /**
2085*4882a593Smuzhiyun  * ibmvfc_match_rport - Match function for specified remote port
2086*4882a593Smuzhiyun  * @evt:	ibmvfc event struct
2087*4882a593Smuzhiyun  * @device:	device to match (rport)
2088*4882a593Smuzhiyun  *
2089*4882a593Smuzhiyun  * Returns:
2090*4882a593Smuzhiyun  *	1 if event matches rport / 0 if event does not match rport
2091*4882a593Smuzhiyun  **/
ibmvfc_match_rport(struct ibmvfc_event * evt,void * rport)2092*4882a593Smuzhiyun static int ibmvfc_match_rport(struct ibmvfc_event *evt, void *rport)
2093*4882a593Smuzhiyun {
2094*4882a593Smuzhiyun 	struct fc_rport *cmd_rport;
2095*4882a593Smuzhiyun 
2096*4882a593Smuzhiyun 	if (evt->cmnd) {
2097*4882a593Smuzhiyun 		cmd_rport = starget_to_rport(scsi_target(evt->cmnd->device));
2098*4882a593Smuzhiyun 		if (cmd_rport == rport)
2099*4882a593Smuzhiyun 			return 1;
2100*4882a593Smuzhiyun 	}
2101*4882a593Smuzhiyun 	return 0;
2102*4882a593Smuzhiyun }
2103*4882a593Smuzhiyun 
2104*4882a593Smuzhiyun /**
2105*4882a593Smuzhiyun  * ibmvfc_match_target - Match function for specified target
2106*4882a593Smuzhiyun  * @evt:	ibmvfc event struct
2107*4882a593Smuzhiyun  * @device:	device to match (starget)
2108*4882a593Smuzhiyun  *
2109*4882a593Smuzhiyun  * Returns:
2110*4882a593Smuzhiyun  *	1 if event matches starget / 0 if event does not match starget
2111*4882a593Smuzhiyun  **/
ibmvfc_match_target(struct ibmvfc_event * evt,void * device)2112*4882a593Smuzhiyun static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device)
2113*4882a593Smuzhiyun {
2114*4882a593Smuzhiyun 	if (evt->cmnd && scsi_target(evt->cmnd->device) == device)
2115*4882a593Smuzhiyun 		return 1;
2116*4882a593Smuzhiyun 	return 0;
2117*4882a593Smuzhiyun }
2118*4882a593Smuzhiyun 
2119*4882a593Smuzhiyun /**
2120*4882a593Smuzhiyun  * ibmvfc_match_lun - Match function for specified LUN
2121*4882a593Smuzhiyun  * @evt:	ibmvfc event struct
2122*4882a593Smuzhiyun  * @device:	device to match (sdev)
2123*4882a593Smuzhiyun  *
2124*4882a593Smuzhiyun  * Returns:
2125*4882a593Smuzhiyun  *	1 if event matches sdev / 0 if event does not match sdev
2126*4882a593Smuzhiyun  **/
ibmvfc_match_lun(struct ibmvfc_event * evt,void * device)2127*4882a593Smuzhiyun static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
2128*4882a593Smuzhiyun {
2129*4882a593Smuzhiyun 	if (evt->cmnd && evt->cmnd->device == device)
2130*4882a593Smuzhiyun 		return 1;
2131*4882a593Smuzhiyun 	return 0;
2132*4882a593Smuzhiyun }
2133*4882a593Smuzhiyun 
2134*4882a593Smuzhiyun /**
2135*4882a593Smuzhiyun  * ibmvfc_wait_for_ops - Wait for ops to complete
2136*4882a593Smuzhiyun  * @vhost:	ibmvfc host struct
2137*4882a593Smuzhiyun  * @device:	device to match (starget or sdev)
2138*4882a593Smuzhiyun  * @match:	match function
2139*4882a593Smuzhiyun  *
2140*4882a593Smuzhiyun  * Returns:
2141*4882a593Smuzhiyun  *	SUCCESS / FAILED
2142*4882a593Smuzhiyun  **/
ibmvfc_wait_for_ops(struct ibmvfc_host * vhost,void * device,int (* match)(struct ibmvfc_event *,void *))2143*4882a593Smuzhiyun static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
2144*4882a593Smuzhiyun 			       int (*match) (struct ibmvfc_event *, void *))
2145*4882a593Smuzhiyun {
2146*4882a593Smuzhiyun 	struct ibmvfc_event *evt;
2147*4882a593Smuzhiyun 	DECLARE_COMPLETION_ONSTACK(comp);
2148*4882a593Smuzhiyun 	int wait;
2149*4882a593Smuzhiyun 	unsigned long flags;
2150*4882a593Smuzhiyun 	signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
2151*4882a593Smuzhiyun 
2152*4882a593Smuzhiyun 	ENTER;
2153*4882a593Smuzhiyun 	do {
2154*4882a593Smuzhiyun 		wait = 0;
2155*4882a593Smuzhiyun 		spin_lock_irqsave(vhost->host->host_lock, flags);
2156*4882a593Smuzhiyun 		list_for_each_entry(evt, &vhost->sent, queue) {
2157*4882a593Smuzhiyun 			if (match(evt, device)) {
2158*4882a593Smuzhiyun 				evt->eh_comp = &comp;
2159*4882a593Smuzhiyun 				wait++;
2160*4882a593Smuzhiyun 			}
2161*4882a593Smuzhiyun 		}
2162*4882a593Smuzhiyun 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
2163*4882a593Smuzhiyun 
2164*4882a593Smuzhiyun 		if (wait) {
2165*4882a593Smuzhiyun 			timeout = wait_for_completion_timeout(&comp, timeout);
2166*4882a593Smuzhiyun 
2167*4882a593Smuzhiyun 			if (!timeout) {
2168*4882a593Smuzhiyun 				wait = 0;
2169*4882a593Smuzhiyun 				spin_lock_irqsave(vhost->host->host_lock, flags);
2170*4882a593Smuzhiyun 				list_for_each_entry(evt, &vhost->sent, queue) {
2171*4882a593Smuzhiyun 					if (match(evt, device)) {
2172*4882a593Smuzhiyun 						evt->eh_comp = NULL;
2173*4882a593Smuzhiyun 						wait++;
2174*4882a593Smuzhiyun 					}
2175*4882a593Smuzhiyun 				}
2176*4882a593Smuzhiyun 				spin_unlock_irqrestore(vhost->host->host_lock, flags);
2177*4882a593Smuzhiyun 				if (wait)
2178*4882a593Smuzhiyun 					dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
2179*4882a593Smuzhiyun 				LEAVE;
2180*4882a593Smuzhiyun 				return wait ? FAILED : SUCCESS;
2181*4882a593Smuzhiyun 			}
2182*4882a593Smuzhiyun 		}
2183*4882a593Smuzhiyun 	} while (wait);
2184*4882a593Smuzhiyun 
2185*4882a593Smuzhiyun 	LEAVE;
2186*4882a593Smuzhiyun 	return SUCCESS;
2187*4882a593Smuzhiyun }
2188*4882a593Smuzhiyun 
2189*4882a593Smuzhiyun /**
2190*4882a593Smuzhiyun  * ibmvfc_cancel_all - Cancel all outstanding commands to the device
2191*4882a593Smuzhiyun  * @sdev:	scsi device to cancel commands
2192*4882a593Smuzhiyun  * @type:	type of error recovery being performed
2193*4882a593Smuzhiyun  *
2194*4882a593Smuzhiyun  * This sends a cancel to the VIOS for the specified device. This does
2195*4882a593Smuzhiyun  * NOT send any abort to the actual device. That must be done separately.
2196*4882a593Smuzhiyun  *
2197*4882a593Smuzhiyun  * Returns:
2198*4882a593Smuzhiyun  *	0 on success / other on failure
2199*4882a593Smuzhiyun  **/
ibmvfc_cancel_all(struct scsi_device * sdev,int type)2200*4882a593Smuzhiyun static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
2201*4882a593Smuzhiyun {
2202*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2203*4882a593Smuzhiyun 	struct scsi_target *starget = scsi_target(sdev);
2204*4882a593Smuzhiyun 	struct fc_rport *rport = starget_to_rport(starget);
2205*4882a593Smuzhiyun 	struct ibmvfc_tmf *tmf;
2206*4882a593Smuzhiyun 	struct ibmvfc_event *evt, *found_evt;
2207*4882a593Smuzhiyun 	union ibmvfc_iu rsp;
2208*4882a593Smuzhiyun 	int rsp_rc = -EBUSY;
2209*4882a593Smuzhiyun 	unsigned long flags;
2210*4882a593Smuzhiyun 	u16 status;
2211*4882a593Smuzhiyun 
2212*4882a593Smuzhiyun 	ENTER;
2213*4882a593Smuzhiyun 	spin_lock_irqsave(vhost->host->host_lock, flags);
2214*4882a593Smuzhiyun 	found_evt = NULL;
2215*4882a593Smuzhiyun 	list_for_each_entry(evt, &vhost->sent, queue) {
2216*4882a593Smuzhiyun 		if (evt->cmnd && evt->cmnd->device == sdev) {
2217*4882a593Smuzhiyun 			found_evt = evt;
2218*4882a593Smuzhiyun 			break;
2219*4882a593Smuzhiyun 		}
2220*4882a593Smuzhiyun 	}
2221*4882a593Smuzhiyun 
2222*4882a593Smuzhiyun 	if (!found_evt) {
2223*4882a593Smuzhiyun 		if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2224*4882a593Smuzhiyun 			sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
2225*4882a593Smuzhiyun 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
2226*4882a593Smuzhiyun 		return 0;
2227*4882a593Smuzhiyun 	}
2228*4882a593Smuzhiyun 
2229*4882a593Smuzhiyun 	if (vhost->logged_in) {
2230*4882a593Smuzhiyun 		evt = ibmvfc_get_event(vhost);
2231*4882a593Smuzhiyun 		ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2232*4882a593Smuzhiyun 
2233*4882a593Smuzhiyun 		tmf = &evt->iu.tmf;
2234*4882a593Smuzhiyun 		memset(tmf, 0, sizeof(*tmf));
2235*4882a593Smuzhiyun 		tmf->common.version = cpu_to_be32(1);
2236*4882a593Smuzhiyun 		tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
2237*4882a593Smuzhiyun 		tmf->common.length = cpu_to_be16(sizeof(*tmf));
2238*4882a593Smuzhiyun 		tmf->scsi_id = cpu_to_be64(rport->port_id);
2239*4882a593Smuzhiyun 		int_to_scsilun(sdev->lun, &tmf->lun);
2240*4882a593Smuzhiyun 		if (!(be64_to_cpu(vhost->login_buf->resp.capabilities) & IBMVFC_CAN_SUPPRESS_ABTS))
2241*4882a593Smuzhiyun 			type &= ~IBMVFC_TMF_SUPPRESS_ABTS;
2242*4882a593Smuzhiyun 		if (vhost->state == IBMVFC_ACTIVE)
2243*4882a593Smuzhiyun 			tmf->flags = cpu_to_be32((type | IBMVFC_TMF_LUA_VALID));
2244*4882a593Smuzhiyun 		else
2245*4882a593Smuzhiyun 			tmf->flags = cpu_to_be32(((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID));
2246*4882a593Smuzhiyun 		tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
2247*4882a593Smuzhiyun 		tmf->my_cancel_key = cpu_to_be32((unsigned long)starget->hostdata);
2248*4882a593Smuzhiyun 
2249*4882a593Smuzhiyun 		evt->sync_iu = &rsp;
2250*4882a593Smuzhiyun 		init_completion(&evt->comp);
2251*4882a593Smuzhiyun 		rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2252*4882a593Smuzhiyun 	}
2253*4882a593Smuzhiyun 
2254*4882a593Smuzhiyun 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2255*4882a593Smuzhiyun 
2256*4882a593Smuzhiyun 	if (rsp_rc != 0) {
2257*4882a593Smuzhiyun 		sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
2258*4882a593Smuzhiyun 		/* If failure is received, the host adapter is most likely going
2259*4882a593Smuzhiyun 		 through reset, return success so the caller will wait for the command
2260*4882a593Smuzhiyun 		 being cancelled to get returned */
2261*4882a593Smuzhiyun 		return 0;
2262*4882a593Smuzhiyun 	}
2263*4882a593Smuzhiyun 
2264*4882a593Smuzhiyun 	sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
2265*4882a593Smuzhiyun 
2266*4882a593Smuzhiyun 	wait_for_completion(&evt->comp);
2267*4882a593Smuzhiyun 	status = be16_to_cpu(rsp.mad_common.status);
2268*4882a593Smuzhiyun 	spin_lock_irqsave(vhost->host->host_lock, flags);
2269*4882a593Smuzhiyun 	ibmvfc_free_event(evt);
2270*4882a593Smuzhiyun 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2271*4882a593Smuzhiyun 
2272*4882a593Smuzhiyun 	if (status != IBMVFC_MAD_SUCCESS) {
2273*4882a593Smuzhiyun 		sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
2274*4882a593Smuzhiyun 		switch (status) {
2275*4882a593Smuzhiyun 		case IBMVFC_MAD_DRIVER_FAILED:
2276*4882a593Smuzhiyun 		case IBMVFC_MAD_CRQ_ERROR:
2277*4882a593Smuzhiyun 			/* Host adapter most likely going through reset, return success to
2278*4882a593Smuzhiyun 			 the caller will wait for the command being cancelled to get returned */
2279*4882a593Smuzhiyun 			return 0;
2280*4882a593Smuzhiyun 		default:
2281*4882a593Smuzhiyun 			return -EIO;
2282*4882a593Smuzhiyun 		};
2283*4882a593Smuzhiyun 	}
2284*4882a593Smuzhiyun 
2285*4882a593Smuzhiyun 	sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
2286*4882a593Smuzhiyun 	return 0;
2287*4882a593Smuzhiyun }
2288*4882a593Smuzhiyun 
2289*4882a593Smuzhiyun /**
2290*4882a593Smuzhiyun  * ibmvfc_match_key - Match function for specified cancel key
2291*4882a593Smuzhiyun  * @evt:	ibmvfc event struct
2292*4882a593Smuzhiyun  * @key:	cancel key to match
2293*4882a593Smuzhiyun  *
2294*4882a593Smuzhiyun  * Returns:
2295*4882a593Smuzhiyun  *	1 if event matches key / 0 if event does not match key
2296*4882a593Smuzhiyun  **/
ibmvfc_match_key(struct ibmvfc_event * evt,void * key)2297*4882a593Smuzhiyun static int ibmvfc_match_key(struct ibmvfc_event *evt, void *key)
2298*4882a593Smuzhiyun {
2299*4882a593Smuzhiyun 	unsigned long cancel_key = (unsigned long)key;
2300*4882a593Smuzhiyun 
2301*4882a593Smuzhiyun 	if (evt->crq.format == IBMVFC_CMD_FORMAT &&
2302*4882a593Smuzhiyun 	    be32_to_cpu(evt->iu.cmd.cancel_key) == cancel_key)
2303*4882a593Smuzhiyun 		return 1;
2304*4882a593Smuzhiyun 	return 0;
2305*4882a593Smuzhiyun }
2306*4882a593Smuzhiyun 
2307*4882a593Smuzhiyun /**
2308*4882a593Smuzhiyun  * ibmvfc_match_evt - Match function for specified event
2309*4882a593Smuzhiyun  * @evt:	ibmvfc event struct
2310*4882a593Smuzhiyun  * @match:	event to match
2311*4882a593Smuzhiyun  *
2312*4882a593Smuzhiyun  * Returns:
2313*4882a593Smuzhiyun  *	1 if event matches key / 0 if event does not match key
2314*4882a593Smuzhiyun  **/
ibmvfc_match_evt(struct ibmvfc_event * evt,void * match)2315*4882a593Smuzhiyun static int ibmvfc_match_evt(struct ibmvfc_event *evt, void *match)
2316*4882a593Smuzhiyun {
2317*4882a593Smuzhiyun 	if (evt == match)
2318*4882a593Smuzhiyun 		return 1;
2319*4882a593Smuzhiyun 	return 0;
2320*4882a593Smuzhiyun }
2321*4882a593Smuzhiyun 
2322*4882a593Smuzhiyun /**
2323*4882a593Smuzhiyun  * ibmvfc_abort_task_set - Abort outstanding commands to the device
2324*4882a593Smuzhiyun  * @sdev:	scsi device to abort commands
2325*4882a593Smuzhiyun  *
2326*4882a593Smuzhiyun  * This sends an Abort Task Set to the VIOS for the specified device. This does
2327*4882a593Smuzhiyun  * NOT send any cancel to the VIOS. That must be done separately.
2328*4882a593Smuzhiyun  *
2329*4882a593Smuzhiyun  * Returns:
2330*4882a593Smuzhiyun  *	0 on success / other on failure
2331*4882a593Smuzhiyun  **/
ibmvfc_abort_task_set(struct scsi_device * sdev)2332*4882a593Smuzhiyun static int ibmvfc_abort_task_set(struct scsi_device *sdev)
2333*4882a593Smuzhiyun {
2334*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2335*4882a593Smuzhiyun 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2336*4882a593Smuzhiyun 	struct ibmvfc_cmd *tmf;
2337*4882a593Smuzhiyun 	struct ibmvfc_event *evt, *found_evt;
2338*4882a593Smuzhiyun 	union ibmvfc_iu rsp_iu;
2339*4882a593Smuzhiyun 	struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
2340*4882a593Smuzhiyun 	int rc, rsp_rc = -EBUSY;
2341*4882a593Smuzhiyun 	unsigned long flags, timeout = IBMVFC_ABORT_TIMEOUT;
2342*4882a593Smuzhiyun 	int rsp_code = 0;
2343*4882a593Smuzhiyun 
2344*4882a593Smuzhiyun 	spin_lock_irqsave(vhost->host->host_lock, flags);
2345*4882a593Smuzhiyun 	found_evt = NULL;
2346*4882a593Smuzhiyun 	list_for_each_entry(evt, &vhost->sent, queue) {
2347*4882a593Smuzhiyun 		if (evt->cmnd && evt->cmnd->device == sdev) {
2348*4882a593Smuzhiyun 			found_evt = evt;
2349*4882a593Smuzhiyun 			break;
2350*4882a593Smuzhiyun 		}
2351*4882a593Smuzhiyun 	}
2352*4882a593Smuzhiyun 
2353*4882a593Smuzhiyun 	if (!found_evt) {
2354*4882a593Smuzhiyun 		if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2355*4882a593Smuzhiyun 			sdev_printk(KERN_INFO, sdev, "No events found to abort\n");
2356*4882a593Smuzhiyun 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
2357*4882a593Smuzhiyun 		return 0;
2358*4882a593Smuzhiyun 	}
2359*4882a593Smuzhiyun 
2360*4882a593Smuzhiyun 	if (vhost->state == IBMVFC_ACTIVE) {
2361*4882a593Smuzhiyun 		evt = ibmvfc_get_event(vhost);
2362*4882a593Smuzhiyun 		ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
2363*4882a593Smuzhiyun 
2364*4882a593Smuzhiyun 		tmf = &evt->iu.cmd;
2365*4882a593Smuzhiyun 		memset(tmf, 0, sizeof(*tmf));
2366*4882a593Smuzhiyun 		tmf->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_cmd, rsp));
2367*4882a593Smuzhiyun 		tmf->resp.len = cpu_to_be32(sizeof(tmf->rsp));
2368*4882a593Smuzhiyun 		tmf->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
2369*4882a593Smuzhiyun 		tmf->payload_len = cpu_to_be32(sizeof(tmf->iu));
2370*4882a593Smuzhiyun 		tmf->resp_len = cpu_to_be32(sizeof(tmf->rsp));
2371*4882a593Smuzhiyun 		tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
2372*4882a593Smuzhiyun 		tmf->tgt_scsi_id = cpu_to_be64(rport->port_id);
2373*4882a593Smuzhiyun 		int_to_scsilun(sdev->lun, &tmf->iu.lun);
2374*4882a593Smuzhiyun 		tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
2375*4882a593Smuzhiyun 		tmf->iu.tmf_flags = IBMVFC_ABORT_TASK_SET;
2376*4882a593Smuzhiyun 		evt->sync_iu = &rsp_iu;
2377*4882a593Smuzhiyun 
2378*4882a593Smuzhiyun 		init_completion(&evt->comp);
2379*4882a593Smuzhiyun 		rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2380*4882a593Smuzhiyun 	}
2381*4882a593Smuzhiyun 
2382*4882a593Smuzhiyun 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2383*4882a593Smuzhiyun 
2384*4882a593Smuzhiyun 	if (rsp_rc != 0) {
2385*4882a593Smuzhiyun 		sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc);
2386*4882a593Smuzhiyun 		return -EIO;
2387*4882a593Smuzhiyun 	}
2388*4882a593Smuzhiyun 
2389*4882a593Smuzhiyun 	sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n");
2390*4882a593Smuzhiyun 	timeout = wait_for_completion_timeout(&evt->comp, timeout);
2391*4882a593Smuzhiyun 
2392*4882a593Smuzhiyun 	if (!timeout) {
2393*4882a593Smuzhiyun 		rc = ibmvfc_cancel_all(sdev, 0);
2394*4882a593Smuzhiyun 		if (!rc) {
2395*4882a593Smuzhiyun 			rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
2396*4882a593Smuzhiyun 			if (rc == SUCCESS)
2397*4882a593Smuzhiyun 				rc = 0;
2398*4882a593Smuzhiyun 		}
2399*4882a593Smuzhiyun 
2400*4882a593Smuzhiyun 		if (rc) {
2401*4882a593Smuzhiyun 			sdev_printk(KERN_INFO, sdev, "Cancel failed, resetting host\n");
2402*4882a593Smuzhiyun 			ibmvfc_reset_host(vhost);
2403*4882a593Smuzhiyun 			rsp_rc = -EIO;
2404*4882a593Smuzhiyun 			rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
2405*4882a593Smuzhiyun 
2406*4882a593Smuzhiyun 			if (rc == SUCCESS)
2407*4882a593Smuzhiyun 				rsp_rc = 0;
2408*4882a593Smuzhiyun 
2409*4882a593Smuzhiyun 			rc = ibmvfc_wait_for_ops(vhost, evt, ibmvfc_match_evt);
2410*4882a593Smuzhiyun 			if (rc != SUCCESS) {
2411*4882a593Smuzhiyun 				spin_lock_irqsave(vhost->host->host_lock, flags);
2412*4882a593Smuzhiyun 				ibmvfc_hard_reset_host(vhost);
2413*4882a593Smuzhiyun 				spin_unlock_irqrestore(vhost->host->host_lock, flags);
2414*4882a593Smuzhiyun 				rsp_rc = 0;
2415*4882a593Smuzhiyun 			}
2416*4882a593Smuzhiyun 
2417*4882a593Smuzhiyun 			goto out;
2418*4882a593Smuzhiyun 		}
2419*4882a593Smuzhiyun 	}
2420*4882a593Smuzhiyun 
2421*4882a593Smuzhiyun 	if (rsp_iu.cmd.status)
2422*4882a593Smuzhiyun 		rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd);
2423*4882a593Smuzhiyun 
2424*4882a593Smuzhiyun 	if (rsp_code) {
2425*4882a593Smuzhiyun 		if (fc_rsp->flags & FCP_RSP_LEN_VALID)
2426*4882a593Smuzhiyun 			rsp_code = fc_rsp->data.info.rsp_code;
2427*4882a593Smuzhiyun 
2428*4882a593Smuzhiyun 		sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
2429*4882a593Smuzhiyun 			    "flags: %x fcp_rsp: %x, scsi_status: %x\n",
2430*4882a593Smuzhiyun 			    ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
2431*4882a593Smuzhiyun 			    be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
2432*4882a593Smuzhiyun 			    fc_rsp->scsi_status);
2433*4882a593Smuzhiyun 		rsp_rc = -EIO;
2434*4882a593Smuzhiyun 	} else
2435*4882a593Smuzhiyun 		sdev_printk(KERN_INFO, sdev, "Abort successful\n");
2436*4882a593Smuzhiyun 
2437*4882a593Smuzhiyun out:
2438*4882a593Smuzhiyun 	spin_lock_irqsave(vhost->host->host_lock, flags);
2439*4882a593Smuzhiyun 	ibmvfc_free_event(evt);
2440*4882a593Smuzhiyun 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2441*4882a593Smuzhiyun 	return rsp_rc;
2442*4882a593Smuzhiyun }
2443*4882a593Smuzhiyun 
2444*4882a593Smuzhiyun /**
2445*4882a593Smuzhiyun  * ibmvfc_eh_abort_handler - Abort a command
2446*4882a593Smuzhiyun  * @cmd:	scsi command to abort
2447*4882a593Smuzhiyun  *
2448*4882a593Smuzhiyun  * Returns:
2449*4882a593Smuzhiyun  *	SUCCESS / FAST_IO_FAIL / FAILED
2450*4882a593Smuzhiyun  **/
ibmvfc_eh_abort_handler(struct scsi_cmnd * cmd)2451*4882a593Smuzhiyun static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
2452*4882a593Smuzhiyun {
2453*4882a593Smuzhiyun 	struct scsi_device *sdev = cmd->device;
2454*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2455*4882a593Smuzhiyun 	int cancel_rc, block_rc;
2456*4882a593Smuzhiyun 	int rc = FAILED;
2457*4882a593Smuzhiyun 
2458*4882a593Smuzhiyun 	ENTER;
2459*4882a593Smuzhiyun 	block_rc = fc_block_scsi_eh(cmd);
2460*4882a593Smuzhiyun 	ibmvfc_wait_while_resetting(vhost);
2461*4882a593Smuzhiyun 	if (block_rc != FAST_IO_FAIL) {
2462*4882a593Smuzhiyun 		cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
2463*4882a593Smuzhiyun 		ibmvfc_abort_task_set(sdev);
2464*4882a593Smuzhiyun 	} else
2465*4882a593Smuzhiyun 		cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2466*4882a593Smuzhiyun 
2467*4882a593Smuzhiyun 	if (!cancel_rc)
2468*4882a593Smuzhiyun 		rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2469*4882a593Smuzhiyun 
2470*4882a593Smuzhiyun 	if (block_rc == FAST_IO_FAIL && rc != FAILED)
2471*4882a593Smuzhiyun 		rc = FAST_IO_FAIL;
2472*4882a593Smuzhiyun 
2473*4882a593Smuzhiyun 	LEAVE;
2474*4882a593Smuzhiyun 	return rc;
2475*4882a593Smuzhiyun }
2476*4882a593Smuzhiyun 
2477*4882a593Smuzhiyun /**
2478*4882a593Smuzhiyun  * ibmvfc_eh_device_reset_handler - Reset a single LUN
2479*4882a593Smuzhiyun  * @cmd:	scsi command struct
2480*4882a593Smuzhiyun  *
2481*4882a593Smuzhiyun  * Returns:
2482*4882a593Smuzhiyun  *	SUCCESS / FAST_IO_FAIL / FAILED
2483*4882a593Smuzhiyun  **/
ibmvfc_eh_device_reset_handler(struct scsi_cmnd * cmd)2484*4882a593Smuzhiyun static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
2485*4882a593Smuzhiyun {
2486*4882a593Smuzhiyun 	struct scsi_device *sdev = cmd->device;
2487*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2488*4882a593Smuzhiyun 	int cancel_rc, block_rc, reset_rc = 0;
2489*4882a593Smuzhiyun 	int rc = FAILED;
2490*4882a593Smuzhiyun 
2491*4882a593Smuzhiyun 	ENTER;
2492*4882a593Smuzhiyun 	block_rc = fc_block_scsi_eh(cmd);
2493*4882a593Smuzhiyun 	ibmvfc_wait_while_resetting(vhost);
2494*4882a593Smuzhiyun 	if (block_rc != FAST_IO_FAIL) {
2495*4882a593Smuzhiyun 		cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
2496*4882a593Smuzhiyun 		reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
2497*4882a593Smuzhiyun 	} else
2498*4882a593Smuzhiyun 		cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2499*4882a593Smuzhiyun 
2500*4882a593Smuzhiyun 	if (!cancel_rc && !reset_rc)
2501*4882a593Smuzhiyun 		rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2502*4882a593Smuzhiyun 
2503*4882a593Smuzhiyun 	if (block_rc == FAST_IO_FAIL && rc != FAILED)
2504*4882a593Smuzhiyun 		rc = FAST_IO_FAIL;
2505*4882a593Smuzhiyun 
2506*4882a593Smuzhiyun 	LEAVE;
2507*4882a593Smuzhiyun 	return rc;
2508*4882a593Smuzhiyun }
2509*4882a593Smuzhiyun 
2510*4882a593Smuzhiyun /**
2511*4882a593Smuzhiyun  * ibmvfc_dev_cancel_all_noreset - Device iterated cancel all function
2512*4882a593Smuzhiyun  * @sdev:	scsi device struct
2513*4882a593Smuzhiyun  * @data:	return code
2514*4882a593Smuzhiyun  *
2515*4882a593Smuzhiyun  **/
ibmvfc_dev_cancel_all_noreset(struct scsi_device * sdev,void * data)2516*4882a593Smuzhiyun static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data)
2517*4882a593Smuzhiyun {
2518*4882a593Smuzhiyun 	unsigned long *rc = data;
2519*4882a593Smuzhiyun 	*rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2520*4882a593Smuzhiyun }
2521*4882a593Smuzhiyun 
2522*4882a593Smuzhiyun /**
2523*4882a593Smuzhiyun  * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function
2524*4882a593Smuzhiyun  * @sdev:	scsi device struct
2525*4882a593Smuzhiyun  * @data:	return code
2526*4882a593Smuzhiyun  *
2527*4882a593Smuzhiyun  **/
ibmvfc_dev_cancel_all_reset(struct scsi_device * sdev,void * data)2528*4882a593Smuzhiyun static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data)
2529*4882a593Smuzhiyun {
2530*4882a593Smuzhiyun 	unsigned long *rc = data;
2531*4882a593Smuzhiyun 	*rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET);
2532*4882a593Smuzhiyun }
2533*4882a593Smuzhiyun 
2534*4882a593Smuzhiyun /**
2535*4882a593Smuzhiyun  * ibmvfc_eh_target_reset_handler - Reset the target
2536*4882a593Smuzhiyun  * @cmd:	scsi command struct
2537*4882a593Smuzhiyun  *
2538*4882a593Smuzhiyun  * Returns:
2539*4882a593Smuzhiyun  *	SUCCESS / FAST_IO_FAIL / FAILED
2540*4882a593Smuzhiyun  **/
ibmvfc_eh_target_reset_handler(struct scsi_cmnd * cmd)2541*4882a593Smuzhiyun static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
2542*4882a593Smuzhiyun {
2543*4882a593Smuzhiyun 	struct scsi_device *sdev = cmd->device;
2544*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2545*4882a593Smuzhiyun 	struct scsi_target *starget = scsi_target(sdev);
2546*4882a593Smuzhiyun 	int block_rc;
2547*4882a593Smuzhiyun 	int reset_rc = 0;
2548*4882a593Smuzhiyun 	int rc = FAILED;
2549*4882a593Smuzhiyun 	unsigned long cancel_rc = 0;
2550*4882a593Smuzhiyun 
2551*4882a593Smuzhiyun 	ENTER;
2552*4882a593Smuzhiyun 	block_rc = fc_block_scsi_eh(cmd);
2553*4882a593Smuzhiyun 	ibmvfc_wait_while_resetting(vhost);
2554*4882a593Smuzhiyun 	if (block_rc != FAST_IO_FAIL) {
2555*4882a593Smuzhiyun 		starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
2556*4882a593Smuzhiyun 		reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
2557*4882a593Smuzhiyun 	} else
2558*4882a593Smuzhiyun 		starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_noreset);
2559*4882a593Smuzhiyun 
2560*4882a593Smuzhiyun 	if (!cancel_rc && !reset_rc)
2561*4882a593Smuzhiyun 		rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
2562*4882a593Smuzhiyun 
2563*4882a593Smuzhiyun 	if (block_rc == FAST_IO_FAIL && rc != FAILED)
2564*4882a593Smuzhiyun 		rc = FAST_IO_FAIL;
2565*4882a593Smuzhiyun 
2566*4882a593Smuzhiyun 	LEAVE;
2567*4882a593Smuzhiyun 	return rc;
2568*4882a593Smuzhiyun }
2569*4882a593Smuzhiyun 
2570*4882a593Smuzhiyun /**
2571*4882a593Smuzhiyun  * ibmvfc_eh_host_reset_handler - Reset the connection to the server
2572*4882a593Smuzhiyun  * @cmd:	struct scsi_cmnd having problems
2573*4882a593Smuzhiyun  *
2574*4882a593Smuzhiyun  **/
ibmvfc_eh_host_reset_handler(struct scsi_cmnd * cmd)2575*4882a593Smuzhiyun static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
2576*4882a593Smuzhiyun {
2577*4882a593Smuzhiyun 	int rc;
2578*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
2579*4882a593Smuzhiyun 
2580*4882a593Smuzhiyun 	dev_err(vhost->dev, "Resetting connection due to error recovery\n");
2581*4882a593Smuzhiyun 	rc = ibmvfc_issue_fc_host_lip(vhost->host);
2582*4882a593Smuzhiyun 
2583*4882a593Smuzhiyun 	return rc ? FAILED : SUCCESS;
2584*4882a593Smuzhiyun }
2585*4882a593Smuzhiyun 
2586*4882a593Smuzhiyun /**
2587*4882a593Smuzhiyun  * ibmvfc_terminate_rport_io - Terminate all pending I/O to the rport.
2588*4882a593Smuzhiyun  * @rport:		rport struct
2589*4882a593Smuzhiyun  *
2590*4882a593Smuzhiyun  * Return value:
2591*4882a593Smuzhiyun  * 	none
2592*4882a593Smuzhiyun  **/
ibmvfc_terminate_rport_io(struct fc_rport * rport)2593*4882a593Smuzhiyun static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
2594*4882a593Smuzhiyun {
2595*4882a593Smuzhiyun 	struct Scsi_Host *shost = rport_to_shost(rport);
2596*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = shost_priv(shost);
2597*4882a593Smuzhiyun 	struct fc_rport *dev_rport;
2598*4882a593Smuzhiyun 	struct scsi_device *sdev;
2599*4882a593Smuzhiyun 	struct ibmvfc_target *tgt;
2600*4882a593Smuzhiyun 	unsigned long rc, flags;
2601*4882a593Smuzhiyun 	unsigned int found;
2602*4882a593Smuzhiyun 
2603*4882a593Smuzhiyun 	ENTER;
2604*4882a593Smuzhiyun 	shost_for_each_device(sdev, shost) {
2605*4882a593Smuzhiyun 		dev_rport = starget_to_rport(scsi_target(sdev));
2606*4882a593Smuzhiyun 		if (dev_rport != rport)
2607*4882a593Smuzhiyun 			continue;
2608*4882a593Smuzhiyun 		ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2609*4882a593Smuzhiyun 	}
2610*4882a593Smuzhiyun 
2611*4882a593Smuzhiyun 	rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport);
2612*4882a593Smuzhiyun 
2613*4882a593Smuzhiyun 	if (rc == FAILED)
2614*4882a593Smuzhiyun 		ibmvfc_issue_fc_host_lip(shost);
2615*4882a593Smuzhiyun 
2616*4882a593Smuzhiyun 	spin_lock_irqsave(shost->host_lock, flags);
2617*4882a593Smuzhiyun 	found = 0;
2618*4882a593Smuzhiyun 	list_for_each_entry(tgt, &vhost->targets, queue) {
2619*4882a593Smuzhiyun 		if (tgt->scsi_id == rport->port_id) {
2620*4882a593Smuzhiyun 			found++;
2621*4882a593Smuzhiyun 			break;
2622*4882a593Smuzhiyun 		}
2623*4882a593Smuzhiyun 	}
2624*4882a593Smuzhiyun 
2625*4882a593Smuzhiyun 	if (found && tgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
2626*4882a593Smuzhiyun 		/*
2627*4882a593Smuzhiyun 		 * If we get here, that means we previously attempted to send
2628*4882a593Smuzhiyun 		 * an implicit logout to the target but it failed, most likely
2629*4882a593Smuzhiyun 		 * due to I/O being pending, so we need to send it again
2630*4882a593Smuzhiyun 		 */
2631*4882a593Smuzhiyun 		ibmvfc_del_tgt(tgt);
2632*4882a593Smuzhiyun 		ibmvfc_reinit_host(vhost);
2633*4882a593Smuzhiyun 	}
2634*4882a593Smuzhiyun 
2635*4882a593Smuzhiyun 	spin_unlock_irqrestore(shost->host_lock, flags);
2636*4882a593Smuzhiyun 	LEAVE;
2637*4882a593Smuzhiyun }
2638*4882a593Smuzhiyun 
2639*4882a593Smuzhiyun static const struct ibmvfc_async_desc ae_desc [] = {
2640*4882a593Smuzhiyun 	{ "PLOGI",	IBMVFC_AE_ELS_PLOGI,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2641*4882a593Smuzhiyun 	{ "LOGO",	IBMVFC_AE_ELS_LOGO,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2642*4882a593Smuzhiyun 	{ "PRLO",	IBMVFC_AE_ELS_PRLO,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2643*4882a593Smuzhiyun 	{ "N-Port SCN",	IBMVFC_AE_SCN_NPORT,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2644*4882a593Smuzhiyun 	{ "Group SCN",	IBMVFC_AE_SCN_GROUP,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2645*4882a593Smuzhiyun 	{ "Domain SCN",	IBMVFC_AE_SCN_DOMAIN,	IBMVFC_DEFAULT_LOG_LEVEL },
2646*4882a593Smuzhiyun 	{ "Fabric SCN",	IBMVFC_AE_SCN_FABRIC,	IBMVFC_DEFAULT_LOG_LEVEL },
2647*4882a593Smuzhiyun 	{ "Link Up",	IBMVFC_AE_LINK_UP,	IBMVFC_DEFAULT_LOG_LEVEL },
2648*4882a593Smuzhiyun 	{ "Link Down",	IBMVFC_AE_LINK_DOWN,	IBMVFC_DEFAULT_LOG_LEVEL },
2649*4882a593Smuzhiyun 	{ "Link Dead",	IBMVFC_AE_LINK_DEAD,	IBMVFC_DEFAULT_LOG_LEVEL },
2650*4882a593Smuzhiyun 	{ "Halt",	IBMVFC_AE_HALT,		IBMVFC_DEFAULT_LOG_LEVEL },
2651*4882a593Smuzhiyun 	{ "Resume",	IBMVFC_AE_RESUME,	IBMVFC_DEFAULT_LOG_LEVEL },
2652*4882a593Smuzhiyun 	{ "Adapter Failed", IBMVFC_AE_ADAPTER_FAILED, IBMVFC_DEFAULT_LOG_LEVEL },
2653*4882a593Smuzhiyun };
2654*4882a593Smuzhiyun 
2655*4882a593Smuzhiyun static const struct ibmvfc_async_desc unknown_ae = {
2656*4882a593Smuzhiyun 	"Unknown async", 0, IBMVFC_DEFAULT_LOG_LEVEL
2657*4882a593Smuzhiyun };
2658*4882a593Smuzhiyun 
2659*4882a593Smuzhiyun /**
2660*4882a593Smuzhiyun  * ibmvfc_get_ae_desc - Get text description for async event
2661*4882a593Smuzhiyun  * @ae:	async event
2662*4882a593Smuzhiyun  *
2663*4882a593Smuzhiyun  **/
ibmvfc_get_ae_desc(u64 ae)2664*4882a593Smuzhiyun static const struct ibmvfc_async_desc *ibmvfc_get_ae_desc(u64 ae)
2665*4882a593Smuzhiyun {
2666*4882a593Smuzhiyun 	int i;
2667*4882a593Smuzhiyun 
2668*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(ae_desc); i++)
2669*4882a593Smuzhiyun 		if (ae_desc[i].ae == ae)
2670*4882a593Smuzhiyun 			return &ae_desc[i];
2671*4882a593Smuzhiyun 
2672*4882a593Smuzhiyun 	return &unknown_ae;
2673*4882a593Smuzhiyun }
2674*4882a593Smuzhiyun 
2675*4882a593Smuzhiyun static const struct {
2676*4882a593Smuzhiyun 	enum ibmvfc_ae_link_state state;
2677*4882a593Smuzhiyun 	const char *desc;
2678*4882a593Smuzhiyun } link_desc [] = {
2679*4882a593Smuzhiyun 	{ IBMVFC_AE_LS_LINK_UP,		" link up" },
2680*4882a593Smuzhiyun 	{ IBMVFC_AE_LS_LINK_BOUNCED,	" link bounced" },
2681*4882a593Smuzhiyun 	{ IBMVFC_AE_LS_LINK_DOWN,	" link down" },
2682*4882a593Smuzhiyun 	{ IBMVFC_AE_LS_LINK_DEAD,	" link dead" },
2683*4882a593Smuzhiyun };
2684*4882a593Smuzhiyun 
2685*4882a593Smuzhiyun /**
2686*4882a593Smuzhiyun  * ibmvfc_get_link_state - Get text description for link state
2687*4882a593Smuzhiyun  * @state:	link state
2688*4882a593Smuzhiyun  *
2689*4882a593Smuzhiyun  **/
ibmvfc_get_link_state(enum ibmvfc_ae_link_state state)2690*4882a593Smuzhiyun static const char *ibmvfc_get_link_state(enum ibmvfc_ae_link_state state)
2691*4882a593Smuzhiyun {
2692*4882a593Smuzhiyun 	int i;
2693*4882a593Smuzhiyun 
2694*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(link_desc); i++)
2695*4882a593Smuzhiyun 		if (link_desc[i].state == state)
2696*4882a593Smuzhiyun 			return link_desc[i].desc;
2697*4882a593Smuzhiyun 
2698*4882a593Smuzhiyun 	return "";
2699*4882a593Smuzhiyun }
2700*4882a593Smuzhiyun 
2701*4882a593Smuzhiyun /**
2702*4882a593Smuzhiyun  * ibmvfc_handle_async - Handle an async event from the adapter
2703*4882a593Smuzhiyun  * @crq:	crq to process
2704*4882a593Smuzhiyun  * @vhost:	ibmvfc host struct
2705*4882a593Smuzhiyun  *
2706*4882a593Smuzhiyun  **/
ibmvfc_handle_async(struct ibmvfc_async_crq * crq,struct ibmvfc_host * vhost)2707*4882a593Smuzhiyun static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
2708*4882a593Smuzhiyun 				struct ibmvfc_host *vhost)
2709*4882a593Smuzhiyun {
2710*4882a593Smuzhiyun 	const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(be64_to_cpu(crq->event));
2711*4882a593Smuzhiyun 	struct ibmvfc_target *tgt;
2712*4882a593Smuzhiyun 
2713*4882a593Smuzhiyun 	ibmvfc_log(vhost, desc->log_level, "%s event received. scsi_id: %llx, wwpn: %llx,"
2714*4882a593Smuzhiyun 		   " node_name: %llx%s\n", desc->desc, be64_to_cpu(crq->scsi_id),
2715*4882a593Smuzhiyun 		   be64_to_cpu(crq->wwpn), be64_to_cpu(crq->node_name),
2716*4882a593Smuzhiyun 		   ibmvfc_get_link_state(crq->link_state));
2717*4882a593Smuzhiyun 
2718*4882a593Smuzhiyun 	switch (be64_to_cpu(crq->event)) {
2719*4882a593Smuzhiyun 	case IBMVFC_AE_RESUME:
2720*4882a593Smuzhiyun 		switch (crq->link_state) {
2721*4882a593Smuzhiyun 		case IBMVFC_AE_LS_LINK_DOWN:
2722*4882a593Smuzhiyun 			ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2723*4882a593Smuzhiyun 			break;
2724*4882a593Smuzhiyun 		case IBMVFC_AE_LS_LINK_DEAD:
2725*4882a593Smuzhiyun 			ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
2726*4882a593Smuzhiyun 			break;
2727*4882a593Smuzhiyun 		case IBMVFC_AE_LS_LINK_UP:
2728*4882a593Smuzhiyun 		case IBMVFC_AE_LS_LINK_BOUNCED:
2729*4882a593Smuzhiyun 		default:
2730*4882a593Smuzhiyun 			vhost->events_to_log |= IBMVFC_AE_LINKUP;
2731*4882a593Smuzhiyun 			vhost->delay_init = 1;
2732*4882a593Smuzhiyun 			__ibmvfc_reset_host(vhost);
2733*4882a593Smuzhiyun 			break;
2734*4882a593Smuzhiyun 		}
2735*4882a593Smuzhiyun 
2736*4882a593Smuzhiyun 		break;
2737*4882a593Smuzhiyun 	case IBMVFC_AE_LINK_UP:
2738*4882a593Smuzhiyun 		vhost->events_to_log |= IBMVFC_AE_LINKUP;
2739*4882a593Smuzhiyun 		vhost->delay_init = 1;
2740*4882a593Smuzhiyun 		__ibmvfc_reset_host(vhost);
2741*4882a593Smuzhiyun 		break;
2742*4882a593Smuzhiyun 	case IBMVFC_AE_SCN_FABRIC:
2743*4882a593Smuzhiyun 	case IBMVFC_AE_SCN_DOMAIN:
2744*4882a593Smuzhiyun 		vhost->events_to_log |= IBMVFC_AE_RSCN;
2745*4882a593Smuzhiyun 		if (vhost->state < IBMVFC_HALTED) {
2746*4882a593Smuzhiyun 			vhost->delay_init = 1;
2747*4882a593Smuzhiyun 			__ibmvfc_reset_host(vhost);
2748*4882a593Smuzhiyun 		}
2749*4882a593Smuzhiyun 		break;
2750*4882a593Smuzhiyun 	case IBMVFC_AE_SCN_NPORT:
2751*4882a593Smuzhiyun 	case IBMVFC_AE_SCN_GROUP:
2752*4882a593Smuzhiyun 		vhost->events_to_log |= IBMVFC_AE_RSCN;
2753*4882a593Smuzhiyun 		ibmvfc_reinit_host(vhost);
2754*4882a593Smuzhiyun 		break;
2755*4882a593Smuzhiyun 	case IBMVFC_AE_ELS_LOGO:
2756*4882a593Smuzhiyun 	case IBMVFC_AE_ELS_PRLO:
2757*4882a593Smuzhiyun 	case IBMVFC_AE_ELS_PLOGI:
2758*4882a593Smuzhiyun 		list_for_each_entry(tgt, &vhost->targets, queue) {
2759*4882a593Smuzhiyun 			if (!crq->scsi_id && !crq->wwpn && !crq->node_name)
2760*4882a593Smuzhiyun 				break;
2761*4882a593Smuzhiyun 			if (crq->scsi_id && cpu_to_be64(tgt->scsi_id) != crq->scsi_id)
2762*4882a593Smuzhiyun 				continue;
2763*4882a593Smuzhiyun 			if (crq->wwpn && cpu_to_be64(tgt->ids.port_name) != crq->wwpn)
2764*4882a593Smuzhiyun 				continue;
2765*4882a593Smuzhiyun 			if (crq->node_name && cpu_to_be64(tgt->ids.node_name) != crq->node_name)
2766*4882a593Smuzhiyun 				continue;
2767*4882a593Smuzhiyun 			if (tgt->need_login && be64_to_cpu(crq->event) == IBMVFC_AE_ELS_LOGO)
2768*4882a593Smuzhiyun 				tgt->logo_rcvd = 1;
2769*4882a593Smuzhiyun 			if (!tgt->need_login || be64_to_cpu(crq->event) == IBMVFC_AE_ELS_PLOGI) {
2770*4882a593Smuzhiyun 				ibmvfc_del_tgt(tgt);
2771*4882a593Smuzhiyun 				ibmvfc_reinit_host(vhost);
2772*4882a593Smuzhiyun 			}
2773*4882a593Smuzhiyun 		}
2774*4882a593Smuzhiyun 		break;
2775*4882a593Smuzhiyun 	case IBMVFC_AE_LINK_DOWN:
2776*4882a593Smuzhiyun 	case IBMVFC_AE_ADAPTER_FAILED:
2777*4882a593Smuzhiyun 		ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2778*4882a593Smuzhiyun 		break;
2779*4882a593Smuzhiyun 	case IBMVFC_AE_LINK_DEAD:
2780*4882a593Smuzhiyun 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
2781*4882a593Smuzhiyun 		break;
2782*4882a593Smuzhiyun 	case IBMVFC_AE_HALT:
2783*4882a593Smuzhiyun 		ibmvfc_link_down(vhost, IBMVFC_HALTED);
2784*4882a593Smuzhiyun 		break;
2785*4882a593Smuzhiyun 	default:
2786*4882a593Smuzhiyun 		dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event);
2787*4882a593Smuzhiyun 		break;
2788*4882a593Smuzhiyun 	}
2789*4882a593Smuzhiyun }
2790*4882a593Smuzhiyun 
2791*4882a593Smuzhiyun /**
2792*4882a593Smuzhiyun  * ibmvfc_handle_crq - Handles and frees received events in the CRQ
2793*4882a593Smuzhiyun  * @crq:	Command/Response queue
2794*4882a593Smuzhiyun  * @vhost:	ibmvfc host struct
2795*4882a593Smuzhiyun  *
2796*4882a593Smuzhiyun  **/
ibmvfc_handle_crq(struct ibmvfc_crq * crq,struct ibmvfc_host * vhost)2797*4882a593Smuzhiyun static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
2798*4882a593Smuzhiyun {
2799*4882a593Smuzhiyun 	long rc;
2800*4882a593Smuzhiyun 	struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
2801*4882a593Smuzhiyun 
2802*4882a593Smuzhiyun 	switch (crq->valid) {
2803*4882a593Smuzhiyun 	case IBMVFC_CRQ_INIT_RSP:
2804*4882a593Smuzhiyun 		switch (crq->format) {
2805*4882a593Smuzhiyun 		case IBMVFC_CRQ_INIT:
2806*4882a593Smuzhiyun 			dev_info(vhost->dev, "Partner initialized\n");
2807*4882a593Smuzhiyun 			/* Send back a response */
2808*4882a593Smuzhiyun 			rc = ibmvfc_send_crq_init_complete(vhost);
2809*4882a593Smuzhiyun 			if (rc == 0)
2810*4882a593Smuzhiyun 				ibmvfc_init_host(vhost);
2811*4882a593Smuzhiyun 			else
2812*4882a593Smuzhiyun 				dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc);
2813*4882a593Smuzhiyun 			break;
2814*4882a593Smuzhiyun 		case IBMVFC_CRQ_INIT_COMPLETE:
2815*4882a593Smuzhiyun 			dev_info(vhost->dev, "Partner initialization complete\n");
2816*4882a593Smuzhiyun 			ibmvfc_init_host(vhost);
2817*4882a593Smuzhiyun 			break;
2818*4882a593Smuzhiyun 		default:
2819*4882a593Smuzhiyun 			dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format);
2820*4882a593Smuzhiyun 		}
2821*4882a593Smuzhiyun 		return;
2822*4882a593Smuzhiyun 	case IBMVFC_CRQ_XPORT_EVENT:
2823*4882a593Smuzhiyun 		vhost->state = IBMVFC_NO_CRQ;
2824*4882a593Smuzhiyun 		vhost->logged_in = 0;
2825*4882a593Smuzhiyun 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
2826*4882a593Smuzhiyun 		if (crq->format == IBMVFC_PARTITION_MIGRATED) {
2827*4882a593Smuzhiyun 			/* We need to re-setup the interpartition connection */
2828*4882a593Smuzhiyun 			dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n");
2829*4882a593Smuzhiyun 			vhost->client_migrated = 1;
2830*4882a593Smuzhiyun 
2831*4882a593Smuzhiyun 			scsi_block_requests(vhost->host);
2832*4882a593Smuzhiyun 			ibmvfc_purge_requests(vhost, DID_REQUEUE);
2833*4882a593Smuzhiyun 			ibmvfc_set_host_state(vhost, IBMVFC_LINK_DOWN);
2834*4882a593Smuzhiyun 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
2835*4882a593Smuzhiyun 			wake_up(&vhost->work_wait_q);
2836*4882a593Smuzhiyun 		} else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) {
2837*4882a593Smuzhiyun 			dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format);
2838*4882a593Smuzhiyun 			ibmvfc_purge_requests(vhost, DID_ERROR);
2839*4882a593Smuzhiyun 			ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2840*4882a593Smuzhiyun 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
2841*4882a593Smuzhiyun 		} else {
2842*4882a593Smuzhiyun 			dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format);
2843*4882a593Smuzhiyun 		}
2844*4882a593Smuzhiyun 		return;
2845*4882a593Smuzhiyun 	case IBMVFC_CRQ_CMD_RSP:
2846*4882a593Smuzhiyun 		break;
2847*4882a593Smuzhiyun 	default:
2848*4882a593Smuzhiyun 		dev_err(vhost->dev, "Got an invalid message type 0x%02x\n", crq->valid);
2849*4882a593Smuzhiyun 		return;
2850*4882a593Smuzhiyun 	}
2851*4882a593Smuzhiyun 
2852*4882a593Smuzhiyun 	if (crq->format == IBMVFC_ASYNC_EVENT)
2853*4882a593Smuzhiyun 		return;
2854*4882a593Smuzhiyun 
2855*4882a593Smuzhiyun 	/* The only kind of payload CRQs we should get are responses to
2856*4882a593Smuzhiyun 	 * things we send. Make sure this response is to something we
2857*4882a593Smuzhiyun 	 * actually sent
2858*4882a593Smuzhiyun 	 */
2859*4882a593Smuzhiyun 	if (unlikely(!ibmvfc_valid_event(&vhost->pool, evt))) {
2860*4882a593Smuzhiyun 		dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
2861*4882a593Smuzhiyun 			crq->ioba);
2862*4882a593Smuzhiyun 		return;
2863*4882a593Smuzhiyun 	}
2864*4882a593Smuzhiyun 
2865*4882a593Smuzhiyun 	if (unlikely(atomic_read(&evt->free))) {
2866*4882a593Smuzhiyun 		dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
2867*4882a593Smuzhiyun 			crq->ioba);
2868*4882a593Smuzhiyun 		return;
2869*4882a593Smuzhiyun 	}
2870*4882a593Smuzhiyun 
2871*4882a593Smuzhiyun 	del_timer(&evt->timer);
2872*4882a593Smuzhiyun 	list_del(&evt->queue);
2873*4882a593Smuzhiyun 	ibmvfc_trc_end(evt);
2874*4882a593Smuzhiyun 	evt->done(evt);
2875*4882a593Smuzhiyun }
2876*4882a593Smuzhiyun 
2877*4882a593Smuzhiyun /**
2878*4882a593Smuzhiyun  * ibmvfc_scan_finished - Check if the device scan is done.
2879*4882a593Smuzhiyun  * @shost:	scsi host struct
2880*4882a593Smuzhiyun  * @time:	current elapsed time
2881*4882a593Smuzhiyun  *
2882*4882a593Smuzhiyun  * Returns:
2883*4882a593Smuzhiyun  *	0 if scan is not done / 1 if scan is done
2884*4882a593Smuzhiyun  **/
ibmvfc_scan_finished(struct Scsi_Host * shost,unsigned long time)2885*4882a593Smuzhiyun static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2886*4882a593Smuzhiyun {
2887*4882a593Smuzhiyun 	unsigned long flags;
2888*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = shost_priv(shost);
2889*4882a593Smuzhiyun 	int done = 0;
2890*4882a593Smuzhiyun 
2891*4882a593Smuzhiyun 	spin_lock_irqsave(shost->host_lock, flags);
2892*4882a593Smuzhiyun 	if (time >= (init_timeout * HZ)) {
2893*4882a593Smuzhiyun 		dev_info(vhost->dev, "Scan taking longer than %d seconds, "
2894*4882a593Smuzhiyun 			 "continuing initialization\n", init_timeout);
2895*4882a593Smuzhiyun 		done = 1;
2896*4882a593Smuzhiyun 	}
2897*4882a593Smuzhiyun 
2898*4882a593Smuzhiyun 	if (vhost->scan_complete)
2899*4882a593Smuzhiyun 		done = 1;
2900*4882a593Smuzhiyun 	spin_unlock_irqrestore(shost->host_lock, flags);
2901*4882a593Smuzhiyun 	return done;
2902*4882a593Smuzhiyun }
2903*4882a593Smuzhiyun 
2904*4882a593Smuzhiyun /**
2905*4882a593Smuzhiyun  * ibmvfc_slave_alloc - Setup the device's task set value
2906*4882a593Smuzhiyun  * @sdev:	struct scsi_device device to configure
2907*4882a593Smuzhiyun  *
2908*4882a593Smuzhiyun  * Set the device's task set value so that error handling works as
2909*4882a593Smuzhiyun  * expected.
2910*4882a593Smuzhiyun  *
2911*4882a593Smuzhiyun  * Returns:
2912*4882a593Smuzhiyun  *	0 on success / -ENXIO if device does not exist
2913*4882a593Smuzhiyun  **/
ibmvfc_slave_alloc(struct scsi_device * sdev)2914*4882a593Smuzhiyun static int ibmvfc_slave_alloc(struct scsi_device *sdev)
2915*4882a593Smuzhiyun {
2916*4882a593Smuzhiyun 	struct Scsi_Host *shost = sdev->host;
2917*4882a593Smuzhiyun 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2918*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = shost_priv(shost);
2919*4882a593Smuzhiyun 	unsigned long flags = 0;
2920*4882a593Smuzhiyun 
2921*4882a593Smuzhiyun 	if (!rport || fc_remote_port_chkready(rport))
2922*4882a593Smuzhiyun 		return -ENXIO;
2923*4882a593Smuzhiyun 
2924*4882a593Smuzhiyun 	spin_lock_irqsave(shost->host_lock, flags);
2925*4882a593Smuzhiyun 	sdev->hostdata = (void *)(unsigned long)vhost->task_set++;
2926*4882a593Smuzhiyun 	spin_unlock_irqrestore(shost->host_lock, flags);
2927*4882a593Smuzhiyun 	return 0;
2928*4882a593Smuzhiyun }
2929*4882a593Smuzhiyun 
2930*4882a593Smuzhiyun /**
2931*4882a593Smuzhiyun  * ibmvfc_target_alloc - Setup the target's task set value
2932*4882a593Smuzhiyun  * @starget:	struct scsi_target
2933*4882a593Smuzhiyun  *
2934*4882a593Smuzhiyun  * Set the target's task set value so that error handling works as
2935*4882a593Smuzhiyun  * expected.
2936*4882a593Smuzhiyun  *
2937*4882a593Smuzhiyun  * Returns:
2938*4882a593Smuzhiyun  *	0 on success / -ENXIO if device does not exist
2939*4882a593Smuzhiyun  **/
ibmvfc_target_alloc(struct scsi_target * starget)2940*4882a593Smuzhiyun static int ibmvfc_target_alloc(struct scsi_target *starget)
2941*4882a593Smuzhiyun {
2942*4882a593Smuzhiyun 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2943*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = shost_priv(shost);
2944*4882a593Smuzhiyun 	unsigned long flags = 0;
2945*4882a593Smuzhiyun 
2946*4882a593Smuzhiyun 	spin_lock_irqsave(shost->host_lock, flags);
2947*4882a593Smuzhiyun 	starget->hostdata = (void *)(unsigned long)vhost->task_set++;
2948*4882a593Smuzhiyun 	spin_unlock_irqrestore(shost->host_lock, flags);
2949*4882a593Smuzhiyun 	return 0;
2950*4882a593Smuzhiyun }
2951*4882a593Smuzhiyun 
2952*4882a593Smuzhiyun /**
2953*4882a593Smuzhiyun  * ibmvfc_slave_configure - Configure the device
2954*4882a593Smuzhiyun  * @sdev:	struct scsi_device device to configure
2955*4882a593Smuzhiyun  *
2956*4882a593Smuzhiyun  * Enable allow_restart for a device if it is a disk. Adjust the
2957*4882a593Smuzhiyun  * queue_depth here also.
2958*4882a593Smuzhiyun  *
2959*4882a593Smuzhiyun  * Returns:
2960*4882a593Smuzhiyun  *	0
2961*4882a593Smuzhiyun  **/
ibmvfc_slave_configure(struct scsi_device * sdev)2962*4882a593Smuzhiyun static int ibmvfc_slave_configure(struct scsi_device *sdev)
2963*4882a593Smuzhiyun {
2964*4882a593Smuzhiyun 	struct Scsi_Host *shost = sdev->host;
2965*4882a593Smuzhiyun 	unsigned long flags = 0;
2966*4882a593Smuzhiyun 
2967*4882a593Smuzhiyun 	spin_lock_irqsave(shost->host_lock, flags);
2968*4882a593Smuzhiyun 	if (sdev->type == TYPE_DISK) {
2969*4882a593Smuzhiyun 		sdev->allow_restart = 1;
2970*4882a593Smuzhiyun 		blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
2971*4882a593Smuzhiyun 	}
2972*4882a593Smuzhiyun 	spin_unlock_irqrestore(shost->host_lock, flags);
2973*4882a593Smuzhiyun 	return 0;
2974*4882a593Smuzhiyun }
2975*4882a593Smuzhiyun 
2976*4882a593Smuzhiyun /**
2977*4882a593Smuzhiyun  * ibmvfc_change_queue_depth - Change the device's queue depth
2978*4882a593Smuzhiyun  * @sdev:	scsi device struct
2979*4882a593Smuzhiyun  * @qdepth:	depth to set
2980*4882a593Smuzhiyun  * @reason:	calling context
2981*4882a593Smuzhiyun  *
2982*4882a593Smuzhiyun  * Return value:
2983*4882a593Smuzhiyun  * 	actual depth set
2984*4882a593Smuzhiyun  **/
ibmvfc_change_queue_depth(struct scsi_device * sdev,int qdepth)2985*4882a593Smuzhiyun static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth)
2986*4882a593Smuzhiyun {
2987*4882a593Smuzhiyun 	if (qdepth > IBMVFC_MAX_CMDS_PER_LUN)
2988*4882a593Smuzhiyun 		qdepth = IBMVFC_MAX_CMDS_PER_LUN;
2989*4882a593Smuzhiyun 
2990*4882a593Smuzhiyun 	return scsi_change_queue_depth(sdev, qdepth);
2991*4882a593Smuzhiyun }
2992*4882a593Smuzhiyun 
ibmvfc_show_host_partition_name(struct device * dev,struct device_attribute * attr,char * buf)2993*4882a593Smuzhiyun static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
2994*4882a593Smuzhiyun 						 struct device_attribute *attr, char *buf)
2995*4882a593Smuzhiyun {
2996*4882a593Smuzhiyun 	struct Scsi_Host *shost = class_to_shost(dev);
2997*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = shost_priv(shost);
2998*4882a593Smuzhiyun 
2999*4882a593Smuzhiyun 	return snprintf(buf, PAGE_SIZE, "%s\n",
3000*4882a593Smuzhiyun 			vhost->login_buf->resp.partition_name);
3001*4882a593Smuzhiyun }
3002*4882a593Smuzhiyun 
ibmvfc_show_host_device_name(struct device * dev,struct device_attribute * attr,char * buf)3003*4882a593Smuzhiyun static ssize_t ibmvfc_show_host_device_name(struct device *dev,
3004*4882a593Smuzhiyun 					    struct device_attribute *attr, char *buf)
3005*4882a593Smuzhiyun {
3006*4882a593Smuzhiyun 	struct Scsi_Host *shost = class_to_shost(dev);
3007*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = shost_priv(shost);
3008*4882a593Smuzhiyun 
3009*4882a593Smuzhiyun 	return snprintf(buf, PAGE_SIZE, "%s\n",
3010*4882a593Smuzhiyun 			vhost->login_buf->resp.device_name);
3011*4882a593Smuzhiyun }
3012*4882a593Smuzhiyun 
ibmvfc_show_host_loc_code(struct device * dev,struct device_attribute * attr,char * buf)3013*4882a593Smuzhiyun static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
3014*4882a593Smuzhiyun 					 struct device_attribute *attr, char *buf)
3015*4882a593Smuzhiyun {
3016*4882a593Smuzhiyun 	struct Scsi_Host *shost = class_to_shost(dev);
3017*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = shost_priv(shost);
3018*4882a593Smuzhiyun 
3019*4882a593Smuzhiyun 	return snprintf(buf, PAGE_SIZE, "%s\n",
3020*4882a593Smuzhiyun 			vhost->login_buf->resp.port_loc_code);
3021*4882a593Smuzhiyun }
3022*4882a593Smuzhiyun 
ibmvfc_show_host_drc_name(struct device * dev,struct device_attribute * attr,char * buf)3023*4882a593Smuzhiyun static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
3024*4882a593Smuzhiyun 					 struct device_attribute *attr, char *buf)
3025*4882a593Smuzhiyun {
3026*4882a593Smuzhiyun 	struct Scsi_Host *shost = class_to_shost(dev);
3027*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = shost_priv(shost);
3028*4882a593Smuzhiyun 
3029*4882a593Smuzhiyun 	return snprintf(buf, PAGE_SIZE, "%s\n",
3030*4882a593Smuzhiyun 			vhost->login_buf->resp.drc_name);
3031*4882a593Smuzhiyun }
3032*4882a593Smuzhiyun 
ibmvfc_show_host_npiv_version(struct device * dev,struct device_attribute * attr,char * buf)3033*4882a593Smuzhiyun static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
3034*4882a593Smuzhiyun 					     struct device_attribute *attr, char *buf)
3035*4882a593Smuzhiyun {
3036*4882a593Smuzhiyun 	struct Scsi_Host *shost = class_to_shost(dev);
3037*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = shost_priv(shost);
3038*4882a593Smuzhiyun 	return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version);
3039*4882a593Smuzhiyun }
3040*4882a593Smuzhiyun 
ibmvfc_show_host_capabilities(struct device * dev,struct device_attribute * attr,char * buf)3041*4882a593Smuzhiyun static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
3042*4882a593Smuzhiyun 					     struct device_attribute *attr, char *buf)
3043*4882a593Smuzhiyun {
3044*4882a593Smuzhiyun 	struct Scsi_Host *shost = class_to_shost(dev);
3045*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = shost_priv(shost);
3046*4882a593Smuzhiyun 	return snprintf(buf, PAGE_SIZE, "%llx\n", vhost->login_buf->resp.capabilities);
3047*4882a593Smuzhiyun }
3048*4882a593Smuzhiyun 
3049*4882a593Smuzhiyun /**
3050*4882a593Smuzhiyun  * ibmvfc_show_log_level - Show the adapter's error logging level
3051*4882a593Smuzhiyun  * @dev:	class device struct
3052*4882a593Smuzhiyun  * @buf:	buffer
3053*4882a593Smuzhiyun  *
3054*4882a593Smuzhiyun  * Return value:
3055*4882a593Smuzhiyun  * 	number of bytes printed to buffer
3056*4882a593Smuzhiyun  **/
ibmvfc_show_log_level(struct device * dev,struct device_attribute * attr,char * buf)3057*4882a593Smuzhiyun static ssize_t ibmvfc_show_log_level(struct device *dev,
3058*4882a593Smuzhiyun 				     struct device_attribute *attr, char *buf)
3059*4882a593Smuzhiyun {
3060*4882a593Smuzhiyun 	struct Scsi_Host *shost = class_to_shost(dev);
3061*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = shost_priv(shost);
3062*4882a593Smuzhiyun 	unsigned long flags = 0;
3063*4882a593Smuzhiyun 	int len;
3064*4882a593Smuzhiyun 
3065*4882a593Smuzhiyun 	spin_lock_irqsave(shost->host_lock, flags);
3066*4882a593Smuzhiyun 	len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->log_level);
3067*4882a593Smuzhiyun 	spin_unlock_irqrestore(shost->host_lock, flags);
3068*4882a593Smuzhiyun 	return len;
3069*4882a593Smuzhiyun }
3070*4882a593Smuzhiyun 
3071*4882a593Smuzhiyun /**
3072*4882a593Smuzhiyun  * ibmvfc_store_log_level - Change the adapter's error logging level
3073*4882a593Smuzhiyun  * @dev:	class device struct
3074*4882a593Smuzhiyun  * @buf:	buffer
3075*4882a593Smuzhiyun  *
3076*4882a593Smuzhiyun  * Return value:
3077*4882a593Smuzhiyun  * 	number of bytes printed to buffer
3078*4882a593Smuzhiyun  **/
ibmvfc_store_log_level(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3079*4882a593Smuzhiyun static ssize_t ibmvfc_store_log_level(struct device *dev,
3080*4882a593Smuzhiyun 				      struct device_attribute *attr,
3081*4882a593Smuzhiyun 				      const char *buf, size_t count)
3082*4882a593Smuzhiyun {
3083*4882a593Smuzhiyun 	struct Scsi_Host *shost = class_to_shost(dev);
3084*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = shost_priv(shost);
3085*4882a593Smuzhiyun 	unsigned long flags = 0;
3086*4882a593Smuzhiyun 
3087*4882a593Smuzhiyun 	spin_lock_irqsave(shost->host_lock, flags);
3088*4882a593Smuzhiyun 	vhost->log_level = simple_strtoul(buf, NULL, 10);
3089*4882a593Smuzhiyun 	spin_unlock_irqrestore(shost->host_lock, flags);
3090*4882a593Smuzhiyun 	return strlen(buf);
3091*4882a593Smuzhiyun }
3092*4882a593Smuzhiyun 
3093*4882a593Smuzhiyun static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL);
3094*4882a593Smuzhiyun static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL);
3095*4882a593Smuzhiyun static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL);
3096*4882a593Smuzhiyun static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL);
3097*4882a593Smuzhiyun static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL);
3098*4882a593Smuzhiyun static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL);
3099*4882a593Smuzhiyun static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
3100*4882a593Smuzhiyun 		   ibmvfc_show_log_level, ibmvfc_store_log_level);
3101*4882a593Smuzhiyun 
3102*4882a593Smuzhiyun #ifdef CONFIG_SCSI_IBMVFC_TRACE
3103*4882a593Smuzhiyun /**
3104*4882a593Smuzhiyun  * ibmvfc_read_trace - Dump the adapter trace
3105*4882a593Smuzhiyun  * @filp:		open sysfs file
3106*4882a593Smuzhiyun  * @kobj:		kobject struct
3107*4882a593Smuzhiyun  * @bin_attr:	bin_attribute struct
3108*4882a593Smuzhiyun  * @buf:		buffer
3109*4882a593Smuzhiyun  * @off:		offset
3110*4882a593Smuzhiyun  * @count:		buffer size
3111*4882a593Smuzhiyun  *
3112*4882a593Smuzhiyun  * Return value:
3113*4882a593Smuzhiyun  *	number of bytes printed to buffer
3114*4882a593Smuzhiyun  **/
ibmvfc_read_trace(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)3115*4882a593Smuzhiyun static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
3116*4882a593Smuzhiyun 				 struct bin_attribute *bin_attr,
3117*4882a593Smuzhiyun 				 char *buf, loff_t off, size_t count)
3118*4882a593Smuzhiyun {
3119*4882a593Smuzhiyun 	struct device *dev = container_of(kobj, struct device, kobj);
3120*4882a593Smuzhiyun 	struct Scsi_Host *shost = class_to_shost(dev);
3121*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = shost_priv(shost);
3122*4882a593Smuzhiyun 	unsigned long flags = 0;
3123*4882a593Smuzhiyun 	int size = IBMVFC_TRACE_SIZE;
3124*4882a593Smuzhiyun 	char *src = (char *)vhost->trace;
3125*4882a593Smuzhiyun 
3126*4882a593Smuzhiyun 	if (off > size)
3127*4882a593Smuzhiyun 		return 0;
3128*4882a593Smuzhiyun 	if (off + count > size) {
3129*4882a593Smuzhiyun 		size -= off;
3130*4882a593Smuzhiyun 		count = size;
3131*4882a593Smuzhiyun 	}
3132*4882a593Smuzhiyun 
3133*4882a593Smuzhiyun 	spin_lock_irqsave(shost->host_lock, flags);
3134*4882a593Smuzhiyun 	memcpy(buf, &src[off], count);
3135*4882a593Smuzhiyun 	spin_unlock_irqrestore(shost->host_lock, flags);
3136*4882a593Smuzhiyun 	return count;
3137*4882a593Smuzhiyun }
3138*4882a593Smuzhiyun 
3139*4882a593Smuzhiyun static struct bin_attribute ibmvfc_trace_attr = {
3140*4882a593Smuzhiyun 	.attr =	{
3141*4882a593Smuzhiyun 		.name = "trace",
3142*4882a593Smuzhiyun 		.mode = S_IRUGO,
3143*4882a593Smuzhiyun 	},
3144*4882a593Smuzhiyun 	.size = 0,
3145*4882a593Smuzhiyun 	.read = ibmvfc_read_trace,
3146*4882a593Smuzhiyun };
3147*4882a593Smuzhiyun #endif
3148*4882a593Smuzhiyun 
3149*4882a593Smuzhiyun static struct device_attribute *ibmvfc_attrs[] = {
3150*4882a593Smuzhiyun 	&dev_attr_partition_name,
3151*4882a593Smuzhiyun 	&dev_attr_device_name,
3152*4882a593Smuzhiyun 	&dev_attr_port_loc_code,
3153*4882a593Smuzhiyun 	&dev_attr_drc_name,
3154*4882a593Smuzhiyun 	&dev_attr_npiv_version,
3155*4882a593Smuzhiyun 	&dev_attr_capabilities,
3156*4882a593Smuzhiyun 	&dev_attr_log_level,
3157*4882a593Smuzhiyun 	NULL
3158*4882a593Smuzhiyun };
3159*4882a593Smuzhiyun 
3160*4882a593Smuzhiyun static struct scsi_host_template driver_template = {
3161*4882a593Smuzhiyun 	.module = THIS_MODULE,
3162*4882a593Smuzhiyun 	.name = "IBM POWER Virtual FC Adapter",
3163*4882a593Smuzhiyun 	.proc_name = IBMVFC_NAME,
3164*4882a593Smuzhiyun 	.queuecommand = ibmvfc_queuecommand,
3165*4882a593Smuzhiyun 	.eh_timed_out = fc_eh_timed_out,
3166*4882a593Smuzhiyun 	.eh_abort_handler = ibmvfc_eh_abort_handler,
3167*4882a593Smuzhiyun 	.eh_device_reset_handler = ibmvfc_eh_device_reset_handler,
3168*4882a593Smuzhiyun 	.eh_target_reset_handler = ibmvfc_eh_target_reset_handler,
3169*4882a593Smuzhiyun 	.eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
3170*4882a593Smuzhiyun 	.slave_alloc = ibmvfc_slave_alloc,
3171*4882a593Smuzhiyun 	.slave_configure = ibmvfc_slave_configure,
3172*4882a593Smuzhiyun 	.target_alloc = ibmvfc_target_alloc,
3173*4882a593Smuzhiyun 	.scan_finished = ibmvfc_scan_finished,
3174*4882a593Smuzhiyun 	.change_queue_depth = ibmvfc_change_queue_depth,
3175*4882a593Smuzhiyun 	.cmd_per_lun = 16,
3176*4882a593Smuzhiyun 	.can_queue = IBMVFC_MAX_REQUESTS_DEFAULT,
3177*4882a593Smuzhiyun 	.this_id = -1,
3178*4882a593Smuzhiyun 	.sg_tablesize = SG_ALL,
3179*4882a593Smuzhiyun 	.max_sectors = IBMVFC_MAX_SECTORS,
3180*4882a593Smuzhiyun 	.shost_attrs = ibmvfc_attrs,
3181*4882a593Smuzhiyun 	.track_queue_depth = 1,
3182*4882a593Smuzhiyun };
3183*4882a593Smuzhiyun 
3184*4882a593Smuzhiyun /**
3185*4882a593Smuzhiyun  * ibmvfc_next_async_crq - Returns the next entry in async queue
3186*4882a593Smuzhiyun  * @vhost:	ibmvfc host struct
3187*4882a593Smuzhiyun  *
3188*4882a593Smuzhiyun  * Returns:
3189*4882a593Smuzhiyun  *	Pointer to next entry in queue / NULL if empty
3190*4882a593Smuzhiyun  **/
ibmvfc_next_async_crq(struct ibmvfc_host * vhost)3191*4882a593Smuzhiyun static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
3192*4882a593Smuzhiyun {
3193*4882a593Smuzhiyun 	struct ibmvfc_async_crq_queue *async_crq = &vhost->async_crq;
3194*4882a593Smuzhiyun 	struct ibmvfc_async_crq *crq;
3195*4882a593Smuzhiyun 
3196*4882a593Smuzhiyun 	crq = &async_crq->msgs[async_crq->cur];
3197*4882a593Smuzhiyun 	if (crq->valid & 0x80) {
3198*4882a593Smuzhiyun 		if (++async_crq->cur == async_crq->size)
3199*4882a593Smuzhiyun 			async_crq->cur = 0;
3200*4882a593Smuzhiyun 		rmb();
3201*4882a593Smuzhiyun 	} else
3202*4882a593Smuzhiyun 		crq = NULL;
3203*4882a593Smuzhiyun 
3204*4882a593Smuzhiyun 	return crq;
3205*4882a593Smuzhiyun }
3206*4882a593Smuzhiyun 
3207*4882a593Smuzhiyun /**
3208*4882a593Smuzhiyun  * ibmvfc_next_crq - Returns the next entry in message queue
3209*4882a593Smuzhiyun  * @vhost:	ibmvfc host struct
3210*4882a593Smuzhiyun  *
3211*4882a593Smuzhiyun  * Returns:
3212*4882a593Smuzhiyun  *	Pointer to next entry in queue / NULL if empty
3213*4882a593Smuzhiyun  **/
ibmvfc_next_crq(struct ibmvfc_host * vhost)3214*4882a593Smuzhiyun static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
3215*4882a593Smuzhiyun {
3216*4882a593Smuzhiyun 	struct ibmvfc_crq_queue *queue = &vhost->crq;
3217*4882a593Smuzhiyun 	struct ibmvfc_crq *crq;
3218*4882a593Smuzhiyun 
3219*4882a593Smuzhiyun 	crq = &queue->msgs[queue->cur];
3220*4882a593Smuzhiyun 	if (crq->valid & 0x80) {
3221*4882a593Smuzhiyun 		if (++queue->cur == queue->size)
3222*4882a593Smuzhiyun 			queue->cur = 0;
3223*4882a593Smuzhiyun 		rmb();
3224*4882a593Smuzhiyun 	} else
3225*4882a593Smuzhiyun 		crq = NULL;
3226*4882a593Smuzhiyun 
3227*4882a593Smuzhiyun 	return crq;
3228*4882a593Smuzhiyun }
3229*4882a593Smuzhiyun 
3230*4882a593Smuzhiyun /**
3231*4882a593Smuzhiyun  * ibmvfc_interrupt - Interrupt handler
3232*4882a593Smuzhiyun  * @irq:		number of irq to handle, not used
3233*4882a593Smuzhiyun  * @dev_instance: ibmvfc_host that received interrupt
3234*4882a593Smuzhiyun  *
3235*4882a593Smuzhiyun  * Returns:
3236*4882a593Smuzhiyun  *	IRQ_HANDLED
3237*4882a593Smuzhiyun  **/
ibmvfc_interrupt(int irq,void * dev_instance)3238*4882a593Smuzhiyun static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
3239*4882a593Smuzhiyun {
3240*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = (struct ibmvfc_host *)dev_instance;
3241*4882a593Smuzhiyun 	unsigned long flags;
3242*4882a593Smuzhiyun 
3243*4882a593Smuzhiyun 	spin_lock_irqsave(vhost->host->host_lock, flags);
3244*4882a593Smuzhiyun 	vio_disable_interrupts(to_vio_dev(vhost->dev));
3245*4882a593Smuzhiyun 	tasklet_schedule(&vhost->tasklet);
3246*4882a593Smuzhiyun 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
3247*4882a593Smuzhiyun 	return IRQ_HANDLED;
3248*4882a593Smuzhiyun }
3249*4882a593Smuzhiyun 
3250*4882a593Smuzhiyun /**
3251*4882a593Smuzhiyun  * ibmvfc_tasklet - Interrupt handler tasklet
3252*4882a593Smuzhiyun  * @data:		ibmvfc host struct
3253*4882a593Smuzhiyun  *
3254*4882a593Smuzhiyun  * Returns:
3255*4882a593Smuzhiyun  *	Nothing
3256*4882a593Smuzhiyun  **/
ibmvfc_tasklet(void * data)3257*4882a593Smuzhiyun static void ibmvfc_tasklet(void *data)
3258*4882a593Smuzhiyun {
3259*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = data;
3260*4882a593Smuzhiyun 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
3261*4882a593Smuzhiyun 	struct ibmvfc_crq *crq;
3262*4882a593Smuzhiyun 	struct ibmvfc_async_crq *async;
3263*4882a593Smuzhiyun 	unsigned long flags;
3264*4882a593Smuzhiyun 	int done = 0;
3265*4882a593Smuzhiyun 
3266*4882a593Smuzhiyun 	spin_lock_irqsave(vhost->host->host_lock, flags);
3267*4882a593Smuzhiyun 	while (!done) {
3268*4882a593Smuzhiyun 		/* Pull all the valid messages off the async CRQ */
3269*4882a593Smuzhiyun 		while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
3270*4882a593Smuzhiyun 			ibmvfc_handle_async(async, vhost);
3271*4882a593Smuzhiyun 			async->valid = 0;
3272*4882a593Smuzhiyun 			wmb();
3273*4882a593Smuzhiyun 		}
3274*4882a593Smuzhiyun 
3275*4882a593Smuzhiyun 		/* Pull all the valid messages off the CRQ */
3276*4882a593Smuzhiyun 		while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
3277*4882a593Smuzhiyun 			ibmvfc_handle_crq(crq, vhost);
3278*4882a593Smuzhiyun 			crq->valid = 0;
3279*4882a593Smuzhiyun 			wmb();
3280*4882a593Smuzhiyun 		}
3281*4882a593Smuzhiyun 
3282*4882a593Smuzhiyun 		vio_enable_interrupts(vdev);
3283*4882a593Smuzhiyun 		if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
3284*4882a593Smuzhiyun 			vio_disable_interrupts(vdev);
3285*4882a593Smuzhiyun 			ibmvfc_handle_async(async, vhost);
3286*4882a593Smuzhiyun 			async->valid = 0;
3287*4882a593Smuzhiyun 			wmb();
3288*4882a593Smuzhiyun 		} else if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
3289*4882a593Smuzhiyun 			vio_disable_interrupts(vdev);
3290*4882a593Smuzhiyun 			ibmvfc_handle_crq(crq, vhost);
3291*4882a593Smuzhiyun 			crq->valid = 0;
3292*4882a593Smuzhiyun 			wmb();
3293*4882a593Smuzhiyun 		} else
3294*4882a593Smuzhiyun 			done = 1;
3295*4882a593Smuzhiyun 	}
3296*4882a593Smuzhiyun 
3297*4882a593Smuzhiyun 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
3298*4882a593Smuzhiyun }
3299*4882a593Smuzhiyun 
3300*4882a593Smuzhiyun /**
3301*4882a593Smuzhiyun  * ibmvfc_init_tgt - Set the next init job step for the target
3302*4882a593Smuzhiyun  * @tgt:		ibmvfc target struct
3303*4882a593Smuzhiyun  * @job_step:	job step to perform
3304*4882a593Smuzhiyun  *
3305*4882a593Smuzhiyun  **/
ibmvfc_init_tgt(struct ibmvfc_target * tgt,void (* job_step)(struct ibmvfc_target *))3306*4882a593Smuzhiyun static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
3307*4882a593Smuzhiyun 			    void (*job_step) (struct ibmvfc_target *))
3308*4882a593Smuzhiyun {
3309*4882a593Smuzhiyun 	if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT))
3310*4882a593Smuzhiyun 		tgt->job_step = job_step;
3311*4882a593Smuzhiyun 	wake_up(&tgt->vhost->work_wait_q);
3312*4882a593Smuzhiyun }
3313*4882a593Smuzhiyun 
3314*4882a593Smuzhiyun /**
3315*4882a593Smuzhiyun  * ibmvfc_retry_tgt_init - Attempt to retry a step in target initialization
3316*4882a593Smuzhiyun  * @tgt:		ibmvfc target struct
3317*4882a593Smuzhiyun  * @job_step:	initialization job step
3318*4882a593Smuzhiyun  *
3319*4882a593Smuzhiyun  * Returns: 1 if step will be retried / 0 if not
3320*4882a593Smuzhiyun  *
3321*4882a593Smuzhiyun  **/
ibmvfc_retry_tgt_init(struct ibmvfc_target * tgt,void (* job_step)(struct ibmvfc_target *))3322*4882a593Smuzhiyun static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
3323*4882a593Smuzhiyun 				  void (*job_step) (struct ibmvfc_target *))
3324*4882a593Smuzhiyun {
3325*4882a593Smuzhiyun 	if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
3326*4882a593Smuzhiyun 		ibmvfc_del_tgt(tgt);
3327*4882a593Smuzhiyun 		wake_up(&tgt->vhost->work_wait_q);
3328*4882a593Smuzhiyun 		return 0;
3329*4882a593Smuzhiyun 	} else
3330*4882a593Smuzhiyun 		ibmvfc_init_tgt(tgt, job_step);
3331*4882a593Smuzhiyun 	return 1;
3332*4882a593Smuzhiyun }
3333*4882a593Smuzhiyun 
3334*4882a593Smuzhiyun /* Defined in FC-LS */
3335*4882a593Smuzhiyun static const struct {
3336*4882a593Smuzhiyun 	int code;
3337*4882a593Smuzhiyun 	int retry;
3338*4882a593Smuzhiyun 	int logged_in;
3339*4882a593Smuzhiyun } prli_rsp [] = {
3340*4882a593Smuzhiyun 	{ 0, 1, 0 },
3341*4882a593Smuzhiyun 	{ 1, 0, 1 },
3342*4882a593Smuzhiyun 	{ 2, 1, 0 },
3343*4882a593Smuzhiyun 	{ 3, 1, 0 },
3344*4882a593Smuzhiyun 	{ 4, 0, 0 },
3345*4882a593Smuzhiyun 	{ 5, 0, 0 },
3346*4882a593Smuzhiyun 	{ 6, 0, 1 },
3347*4882a593Smuzhiyun 	{ 7, 0, 0 },
3348*4882a593Smuzhiyun 	{ 8, 1, 0 },
3349*4882a593Smuzhiyun };
3350*4882a593Smuzhiyun 
3351*4882a593Smuzhiyun /**
3352*4882a593Smuzhiyun  * ibmvfc_get_prli_rsp - Find PRLI response index
3353*4882a593Smuzhiyun  * @flags:	PRLI response flags
3354*4882a593Smuzhiyun  *
3355*4882a593Smuzhiyun  **/
ibmvfc_get_prli_rsp(u16 flags)3356*4882a593Smuzhiyun static int ibmvfc_get_prli_rsp(u16 flags)
3357*4882a593Smuzhiyun {
3358*4882a593Smuzhiyun 	int i;
3359*4882a593Smuzhiyun 	int code = (flags & 0x0f00) >> 8;
3360*4882a593Smuzhiyun 
3361*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(prli_rsp); i++)
3362*4882a593Smuzhiyun 		if (prli_rsp[i].code == code)
3363*4882a593Smuzhiyun 			return i;
3364*4882a593Smuzhiyun 
3365*4882a593Smuzhiyun 	return 0;
3366*4882a593Smuzhiyun }
3367*4882a593Smuzhiyun 
3368*4882a593Smuzhiyun /**
3369*4882a593Smuzhiyun  * ibmvfc_tgt_prli_done - Completion handler for Process Login
3370*4882a593Smuzhiyun  * @evt:	ibmvfc event struct
3371*4882a593Smuzhiyun  *
3372*4882a593Smuzhiyun  **/
ibmvfc_tgt_prli_done(struct ibmvfc_event * evt)3373*4882a593Smuzhiyun static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
3374*4882a593Smuzhiyun {
3375*4882a593Smuzhiyun 	struct ibmvfc_target *tgt = evt->tgt;
3376*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = evt->vhost;
3377*4882a593Smuzhiyun 	struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
3378*4882a593Smuzhiyun 	struct ibmvfc_prli_svc_parms *parms = &rsp->parms;
3379*4882a593Smuzhiyun 	u32 status = be16_to_cpu(rsp->common.status);
3380*4882a593Smuzhiyun 	int index, level = IBMVFC_DEFAULT_LOG_LEVEL;
3381*4882a593Smuzhiyun 
3382*4882a593Smuzhiyun 	vhost->discovery_threads--;
3383*4882a593Smuzhiyun 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3384*4882a593Smuzhiyun 	switch (status) {
3385*4882a593Smuzhiyun 	case IBMVFC_MAD_SUCCESS:
3386*4882a593Smuzhiyun 		tgt_dbg(tgt, "Process Login succeeded: %X %02X %04X\n",
3387*4882a593Smuzhiyun 			parms->type, parms->flags, parms->service_parms);
3388*4882a593Smuzhiyun 
3389*4882a593Smuzhiyun 		if (parms->type == IBMVFC_SCSI_FCP_TYPE) {
3390*4882a593Smuzhiyun 			index = ibmvfc_get_prli_rsp(be16_to_cpu(parms->flags));
3391*4882a593Smuzhiyun 			if (prli_rsp[index].logged_in) {
3392*4882a593Smuzhiyun 				if (be16_to_cpu(parms->flags) & IBMVFC_PRLI_EST_IMG_PAIR) {
3393*4882a593Smuzhiyun 					tgt->need_login = 0;
3394*4882a593Smuzhiyun 					tgt->ids.roles = 0;
3395*4882a593Smuzhiyun 					if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_TARGET_FUNC)
3396*4882a593Smuzhiyun 						tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET;
3397*4882a593Smuzhiyun 					if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_INITIATOR_FUNC)
3398*4882a593Smuzhiyun 						tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
3399*4882a593Smuzhiyun 					tgt->add_rport = 1;
3400*4882a593Smuzhiyun 				} else
3401*4882a593Smuzhiyun 					ibmvfc_del_tgt(tgt);
3402*4882a593Smuzhiyun 			} else if (prli_rsp[index].retry)
3403*4882a593Smuzhiyun 				ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
3404*4882a593Smuzhiyun 			else
3405*4882a593Smuzhiyun 				ibmvfc_del_tgt(tgt);
3406*4882a593Smuzhiyun 		} else
3407*4882a593Smuzhiyun 			ibmvfc_del_tgt(tgt);
3408*4882a593Smuzhiyun 		break;
3409*4882a593Smuzhiyun 	case IBMVFC_MAD_DRIVER_FAILED:
3410*4882a593Smuzhiyun 		break;
3411*4882a593Smuzhiyun 	case IBMVFC_MAD_CRQ_ERROR:
3412*4882a593Smuzhiyun 		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
3413*4882a593Smuzhiyun 		break;
3414*4882a593Smuzhiyun 	case IBMVFC_MAD_FAILED:
3415*4882a593Smuzhiyun 	default:
3416*4882a593Smuzhiyun 		if ((be16_to_cpu(rsp->status) & IBMVFC_VIOS_FAILURE) &&
3417*4882a593Smuzhiyun 		     be16_to_cpu(rsp->error) == IBMVFC_PLOGI_REQUIRED)
3418*4882a593Smuzhiyun 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
3419*4882a593Smuzhiyun 		else if (tgt->logo_rcvd)
3420*4882a593Smuzhiyun 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
3421*4882a593Smuzhiyun 		else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
3422*4882a593Smuzhiyun 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
3423*4882a593Smuzhiyun 		else
3424*4882a593Smuzhiyun 			ibmvfc_del_tgt(tgt);
3425*4882a593Smuzhiyun 
3426*4882a593Smuzhiyun 		tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
3427*4882a593Smuzhiyun 			ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
3428*4882a593Smuzhiyun 			be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), status);
3429*4882a593Smuzhiyun 		break;
3430*4882a593Smuzhiyun 	}
3431*4882a593Smuzhiyun 
3432*4882a593Smuzhiyun 	kref_put(&tgt->kref, ibmvfc_release_tgt);
3433*4882a593Smuzhiyun 	ibmvfc_free_event(evt);
3434*4882a593Smuzhiyun 	wake_up(&vhost->work_wait_q);
3435*4882a593Smuzhiyun }
3436*4882a593Smuzhiyun 
3437*4882a593Smuzhiyun /**
3438*4882a593Smuzhiyun  * ibmvfc_tgt_send_prli - Send a process login
3439*4882a593Smuzhiyun  * @tgt:	ibmvfc target struct
3440*4882a593Smuzhiyun  *
3441*4882a593Smuzhiyun  **/
ibmvfc_tgt_send_prli(struct ibmvfc_target * tgt)3442*4882a593Smuzhiyun static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
3443*4882a593Smuzhiyun {
3444*4882a593Smuzhiyun 	struct ibmvfc_process_login *prli;
3445*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = tgt->vhost;
3446*4882a593Smuzhiyun 	struct ibmvfc_event *evt;
3447*4882a593Smuzhiyun 
3448*4882a593Smuzhiyun 	if (vhost->discovery_threads >= disc_threads)
3449*4882a593Smuzhiyun 		return;
3450*4882a593Smuzhiyun 
3451*4882a593Smuzhiyun 	kref_get(&tgt->kref);
3452*4882a593Smuzhiyun 	evt = ibmvfc_get_event(vhost);
3453*4882a593Smuzhiyun 	vhost->discovery_threads++;
3454*4882a593Smuzhiyun 	ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
3455*4882a593Smuzhiyun 	evt->tgt = tgt;
3456*4882a593Smuzhiyun 	prli = &evt->iu.prli;
3457*4882a593Smuzhiyun 	memset(prli, 0, sizeof(*prli));
3458*4882a593Smuzhiyun 	prli->common.version = cpu_to_be32(1);
3459*4882a593Smuzhiyun 	prli->common.opcode = cpu_to_be32(IBMVFC_PROCESS_LOGIN);
3460*4882a593Smuzhiyun 	prli->common.length = cpu_to_be16(sizeof(*prli));
3461*4882a593Smuzhiyun 	prli->scsi_id = cpu_to_be64(tgt->scsi_id);
3462*4882a593Smuzhiyun 
3463*4882a593Smuzhiyun 	prli->parms.type = IBMVFC_SCSI_FCP_TYPE;
3464*4882a593Smuzhiyun 	prli->parms.flags = cpu_to_be16(IBMVFC_PRLI_EST_IMG_PAIR);
3465*4882a593Smuzhiyun 	prli->parms.service_parms = cpu_to_be32(IBMVFC_PRLI_INITIATOR_FUNC);
3466*4882a593Smuzhiyun 	prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_READ_FCP_XFER_RDY_DISABLED);
3467*4882a593Smuzhiyun 
3468*4882a593Smuzhiyun 	if (cls3_error)
3469*4882a593Smuzhiyun 		prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_RETRY);
3470*4882a593Smuzhiyun 
3471*4882a593Smuzhiyun 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
3472*4882a593Smuzhiyun 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3473*4882a593Smuzhiyun 		vhost->discovery_threads--;
3474*4882a593Smuzhiyun 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3475*4882a593Smuzhiyun 		kref_put(&tgt->kref, ibmvfc_release_tgt);
3476*4882a593Smuzhiyun 	} else
3477*4882a593Smuzhiyun 		tgt_dbg(tgt, "Sent process login\n");
3478*4882a593Smuzhiyun }
3479*4882a593Smuzhiyun 
3480*4882a593Smuzhiyun /**
3481*4882a593Smuzhiyun  * ibmvfc_tgt_plogi_done - Completion handler for Port Login
3482*4882a593Smuzhiyun  * @evt:	ibmvfc event struct
3483*4882a593Smuzhiyun  *
3484*4882a593Smuzhiyun  **/
ibmvfc_tgt_plogi_done(struct ibmvfc_event * evt)3485*4882a593Smuzhiyun static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
3486*4882a593Smuzhiyun {
3487*4882a593Smuzhiyun 	struct ibmvfc_target *tgt = evt->tgt;
3488*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = evt->vhost;
3489*4882a593Smuzhiyun 	struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
3490*4882a593Smuzhiyun 	u32 status = be16_to_cpu(rsp->common.status);
3491*4882a593Smuzhiyun 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
3492*4882a593Smuzhiyun 
3493*4882a593Smuzhiyun 	vhost->discovery_threads--;
3494*4882a593Smuzhiyun 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3495*4882a593Smuzhiyun 	switch (status) {
3496*4882a593Smuzhiyun 	case IBMVFC_MAD_SUCCESS:
3497*4882a593Smuzhiyun 		tgt_dbg(tgt, "Port Login succeeded\n");
3498*4882a593Smuzhiyun 		if (tgt->ids.port_name &&
3499*4882a593Smuzhiyun 		    tgt->ids.port_name != wwn_to_u64(rsp->service_parms.port_name)) {
3500*4882a593Smuzhiyun 			vhost->reinit = 1;
3501*4882a593Smuzhiyun 			tgt_dbg(tgt, "Port re-init required\n");
3502*4882a593Smuzhiyun 			break;
3503*4882a593Smuzhiyun 		}
3504*4882a593Smuzhiyun 		tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
3505*4882a593Smuzhiyun 		tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
3506*4882a593Smuzhiyun 		tgt->ids.port_id = tgt->scsi_id;
3507*4882a593Smuzhiyun 		memcpy(&tgt->service_parms, &rsp->service_parms,
3508*4882a593Smuzhiyun 		       sizeof(tgt->service_parms));
3509*4882a593Smuzhiyun 		memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
3510*4882a593Smuzhiyun 		       sizeof(tgt->service_parms_change));
3511*4882a593Smuzhiyun 		ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
3512*4882a593Smuzhiyun 		break;
3513*4882a593Smuzhiyun 	case IBMVFC_MAD_DRIVER_FAILED:
3514*4882a593Smuzhiyun 		break;
3515*4882a593Smuzhiyun 	case IBMVFC_MAD_CRQ_ERROR:
3516*4882a593Smuzhiyun 		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
3517*4882a593Smuzhiyun 		break;
3518*4882a593Smuzhiyun 	case IBMVFC_MAD_FAILED:
3519*4882a593Smuzhiyun 	default:
3520*4882a593Smuzhiyun 		if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
3521*4882a593Smuzhiyun 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
3522*4882a593Smuzhiyun 		else
3523*4882a593Smuzhiyun 			ibmvfc_del_tgt(tgt);
3524*4882a593Smuzhiyun 
3525*4882a593Smuzhiyun 		tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3526*4882a593Smuzhiyun 			ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
3527*4882a593Smuzhiyun 					     be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
3528*4882a593Smuzhiyun 			ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
3529*4882a593Smuzhiyun 			ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), status);
3530*4882a593Smuzhiyun 		break;
3531*4882a593Smuzhiyun 	}
3532*4882a593Smuzhiyun 
3533*4882a593Smuzhiyun 	kref_put(&tgt->kref, ibmvfc_release_tgt);
3534*4882a593Smuzhiyun 	ibmvfc_free_event(evt);
3535*4882a593Smuzhiyun 	wake_up(&vhost->work_wait_q);
3536*4882a593Smuzhiyun }
3537*4882a593Smuzhiyun 
3538*4882a593Smuzhiyun /**
3539*4882a593Smuzhiyun  * ibmvfc_tgt_send_plogi - Send PLOGI to the specified target
3540*4882a593Smuzhiyun  * @tgt:	ibmvfc target struct
3541*4882a593Smuzhiyun  *
3542*4882a593Smuzhiyun  **/
ibmvfc_tgt_send_plogi(struct ibmvfc_target * tgt)3543*4882a593Smuzhiyun static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
3544*4882a593Smuzhiyun {
3545*4882a593Smuzhiyun 	struct ibmvfc_port_login *plogi;
3546*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = tgt->vhost;
3547*4882a593Smuzhiyun 	struct ibmvfc_event *evt;
3548*4882a593Smuzhiyun 
3549*4882a593Smuzhiyun 	if (vhost->discovery_threads >= disc_threads)
3550*4882a593Smuzhiyun 		return;
3551*4882a593Smuzhiyun 
3552*4882a593Smuzhiyun 	kref_get(&tgt->kref);
3553*4882a593Smuzhiyun 	tgt->logo_rcvd = 0;
3554*4882a593Smuzhiyun 	evt = ibmvfc_get_event(vhost);
3555*4882a593Smuzhiyun 	vhost->discovery_threads++;
3556*4882a593Smuzhiyun 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
3557*4882a593Smuzhiyun 	ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
3558*4882a593Smuzhiyun 	evt->tgt = tgt;
3559*4882a593Smuzhiyun 	plogi = &evt->iu.plogi;
3560*4882a593Smuzhiyun 	memset(plogi, 0, sizeof(*plogi));
3561*4882a593Smuzhiyun 	plogi->common.version = cpu_to_be32(1);
3562*4882a593Smuzhiyun 	plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
3563*4882a593Smuzhiyun 	plogi->common.length = cpu_to_be16(sizeof(*plogi));
3564*4882a593Smuzhiyun 	plogi->scsi_id = cpu_to_be64(tgt->scsi_id);
3565*4882a593Smuzhiyun 
3566*4882a593Smuzhiyun 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3567*4882a593Smuzhiyun 		vhost->discovery_threads--;
3568*4882a593Smuzhiyun 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3569*4882a593Smuzhiyun 		kref_put(&tgt->kref, ibmvfc_release_tgt);
3570*4882a593Smuzhiyun 	} else
3571*4882a593Smuzhiyun 		tgt_dbg(tgt, "Sent port login\n");
3572*4882a593Smuzhiyun }
3573*4882a593Smuzhiyun 
3574*4882a593Smuzhiyun /**
3575*4882a593Smuzhiyun  * ibmvfc_tgt_implicit_logout_done - Completion handler for Implicit Logout MAD
3576*4882a593Smuzhiyun  * @evt:	ibmvfc event struct
3577*4882a593Smuzhiyun  *
3578*4882a593Smuzhiyun  **/
ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event * evt)3579*4882a593Smuzhiyun static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
3580*4882a593Smuzhiyun {
3581*4882a593Smuzhiyun 	struct ibmvfc_target *tgt = evt->tgt;
3582*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = evt->vhost;
3583*4882a593Smuzhiyun 	struct ibmvfc_implicit_logout *rsp = &evt->xfer_iu->implicit_logout;
3584*4882a593Smuzhiyun 	u32 status = be16_to_cpu(rsp->common.status);
3585*4882a593Smuzhiyun 
3586*4882a593Smuzhiyun 	vhost->discovery_threads--;
3587*4882a593Smuzhiyun 	ibmvfc_free_event(evt);
3588*4882a593Smuzhiyun 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3589*4882a593Smuzhiyun 
3590*4882a593Smuzhiyun 	switch (status) {
3591*4882a593Smuzhiyun 	case IBMVFC_MAD_SUCCESS:
3592*4882a593Smuzhiyun 		tgt_dbg(tgt, "Implicit Logout succeeded\n");
3593*4882a593Smuzhiyun 		break;
3594*4882a593Smuzhiyun 	case IBMVFC_MAD_DRIVER_FAILED:
3595*4882a593Smuzhiyun 		kref_put(&tgt->kref, ibmvfc_release_tgt);
3596*4882a593Smuzhiyun 		wake_up(&vhost->work_wait_q);
3597*4882a593Smuzhiyun 		return;
3598*4882a593Smuzhiyun 	case IBMVFC_MAD_FAILED:
3599*4882a593Smuzhiyun 	default:
3600*4882a593Smuzhiyun 		tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status);
3601*4882a593Smuzhiyun 		break;
3602*4882a593Smuzhiyun 	}
3603*4882a593Smuzhiyun 
3604*4882a593Smuzhiyun 	ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
3605*4882a593Smuzhiyun 	kref_put(&tgt->kref, ibmvfc_release_tgt);
3606*4882a593Smuzhiyun 	wake_up(&vhost->work_wait_q);
3607*4882a593Smuzhiyun }
3608*4882a593Smuzhiyun 
3609*4882a593Smuzhiyun /**
3610*4882a593Smuzhiyun  * __ibmvfc_tgt_get_implicit_logout_evt - Allocate and init an event for implicit logout
3611*4882a593Smuzhiyun  * @tgt:		ibmvfc target struct
3612*4882a593Smuzhiyun  *
3613*4882a593Smuzhiyun  * Returns:
3614*4882a593Smuzhiyun  *	Allocated and initialized ibmvfc_event struct
3615*4882a593Smuzhiyun  **/
__ibmvfc_tgt_get_implicit_logout_evt(struct ibmvfc_target * tgt,void (* done)(struct ibmvfc_event *))3616*4882a593Smuzhiyun static struct ibmvfc_event *__ibmvfc_tgt_get_implicit_logout_evt(struct ibmvfc_target *tgt,
3617*4882a593Smuzhiyun 								 void (*done) (struct ibmvfc_event *))
3618*4882a593Smuzhiyun {
3619*4882a593Smuzhiyun 	struct ibmvfc_implicit_logout *mad;
3620*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = tgt->vhost;
3621*4882a593Smuzhiyun 	struct ibmvfc_event *evt;
3622*4882a593Smuzhiyun 
3623*4882a593Smuzhiyun 	kref_get(&tgt->kref);
3624*4882a593Smuzhiyun 	evt = ibmvfc_get_event(vhost);
3625*4882a593Smuzhiyun 	ibmvfc_init_event(evt, done, IBMVFC_MAD_FORMAT);
3626*4882a593Smuzhiyun 	evt->tgt = tgt;
3627*4882a593Smuzhiyun 	mad = &evt->iu.implicit_logout;
3628*4882a593Smuzhiyun 	memset(mad, 0, sizeof(*mad));
3629*4882a593Smuzhiyun 	mad->common.version = cpu_to_be32(1);
3630*4882a593Smuzhiyun 	mad->common.opcode = cpu_to_be32(IBMVFC_IMPLICIT_LOGOUT);
3631*4882a593Smuzhiyun 	mad->common.length = cpu_to_be16(sizeof(*mad));
3632*4882a593Smuzhiyun 	mad->old_scsi_id = cpu_to_be64(tgt->scsi_id);
3633*4882a593Smuzhiyun 	return evt;
3634*4882a593Smuzhiyun }
3635*4882a593Smuzhiyun 
3636*4882a593Smuzhiyun /**
3637*4882a593Smuzhiyun  * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target
3638*4882a593Smuzhiyun  * @tgt:		ibmvfc target struct
3639*4882a593Smuzhiyun  *
3640*4882a593Smuzhiyun  **/
ibmvfc_tgt_implicit_logout(struct ibmvfc_target * tgt)3641*4882a593Smuzhiyun static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
3642*4882a593Smuzhiyun {
3643*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = tgt->vhost;
3644*4882a593Smuzhiyun 	struct ibmvfc_event *evt;
3645*4882a593Smuzhiyun 
3646*4882a593Smuzhiyun 	if (vhost->discovery_threads >= disc_threads)
3647*4882a593Smuzhiyun 		return;
3648*4882a593Smuzhiyun 
3649*4882a593Smuzhiyun 	vhost->discovery_threads++;
3650*4882a593Smuzhiyun 	evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
3651*4882a593Smuzhiyun 						   ibmvfc_tgt_implicit_logout_done);
3652*4882a593Smuzhiyun 
3653*4882a593Smuzhiyun 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
3654*4882a593Smuzhiyun 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3655*4882a593Smuzhiyun 		vhost->discovery_threads--;
3656*4882a593Smuzhiyun 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3657*4882a593Smuzhiyun 		kref_put(&tgt->kref, ibmvfc_release_tgt);
3658*4882a593Smuzhiyun 	} else
3659*4882a593Smuzhiyun 		tgt_dbg(tgt, "Sent Implicit Logout\n");
3660*4882a593Smuzhiyun }
3661*4882a593Smuzhiyun 
3662*4882a593Smuzhiyun /**
3663*4882a593Smuzhiyun  * ibmvfc_tgt_implicit_logout_and_del_done - Completion handler for Implicit Logout MAD
3664*4882a593Smuzhiyun  * @evt:	ibmvfc event struct
3665*4882a593Smuzhiyun  *
3666*4882a593Smuzhiyun  **/
ibmvfc_tgt_implicit_logout_and_del_done(struct ibmvfc_event * evt)3667*4882a593Smuzhiyun static void ibmvfc_tgt_implicit_logout_and_del_done(struct ibmvfc_event *evt)
3668*4882a593Smuzhiyun {
3669*4882a593Smuzhiyun 	struct ibmvfc_target *tgt = evt->tgt;
3670*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = evt->vhost;
3671*4882a593Smuzhiyun 	struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
3672*4882a593Smuzhiyun 	u32 status = be16_to_cpu(mad->common.status);
3673*4882a593Smuzhiyun 
3674*4882a593Smuzhiyun 	vhost->discovery_threads--;
3675*4882a593Smuzhiyun 	ibmvfc_free_event(evt);
3676*4882a593Smuzhiyun 
3677*4882a593Smuzhiyun 	/*
3678*4882a593Smuzhiyun 	 * If our state is IBMVFC_HOST_OFFLINE, we could be unloading the
3679*4882a593Smuzhiyun 	 * driver in which case we need to free up all the targets. If we are
3680*4882a593Smuzhiyun 	 * not unloading, we will still go through a hard reset to get out of
3681*4882a593Smuzhiyun 	 * offline state, so there is no need to track the old targets in that
3682*4882a593Smuzhiyun 	 * case.
3683*4882a593Smuzhiyun 	 */
3684*4882a593Smuzhiyun 	if (status == IBMVFC_MAD_SUCCESS || vhost->state == IBMVFC_HOST_OFFLINE)
3685*4882a593Smuzhiyun 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3686*4882a593Smuzhiyun 	else
3687*4882a593Smuzhiyun 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT);
3688*4882a593Smuzhiyun 
3689*4882a593Smuzhiyun 	tgt_dbg(tgt, "Implicit Logout %s\n", (status == IBMVFC_MAD_SUCCESS) ? "succeeded" : "failed");
3690*4882a593Smuzhiyun 	kref_put(&tgt->kref, ibmvfc_release_tgt);
3691*4882a593Smuzhiyun 	wake_up(&vhost->work_wait_q);
3692*4882a593Smuzhiyun }
3693*4882a593Smuzhiyun 
3694*4882a593Smuzhiyun /**
3695*4882a593Smuzhiyun  * ibmvfc_tgt_implicit_logout_and_del - Initiate an Implicit Logout for specified target
3696*4882a593Smuzhiyun  * @tgt:		ibmvfc target struct
3697*4882a593Smuzhiyun  *
3698*4882a593Smuzhiyun  **/
ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target * tgt)3699*4882a593Smuzhiyun static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *tgt)
3700*4882a593Smuzhiyun {
3701*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = tgt->vhost;
3702*4882a593Smuzhiyun 	struct ibmvfc_event *evt;
3703*4882a593Smuzhiyun 
3704*4882a593Smuzhiyun 	if (!vhost->logged_in) {
3705*4882a593Smuzhiyun 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3706*4882a593Smuzhiyun 		return;
3707*4882a593Smuzhiyun 	}
3708*4882a593Smuzhiyun 
3709*4882a593Smuzhiyun 	if (vhost->discovery_threads >= disc_threads)
3710*4882a593Smuzhiyun 		return;
3711*4882a593Smuzhiyun 
3712*4882a593Smuzhiyun 	vhost->discovery_threads++;
3713*4882a593Smuzhiyun 	evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
3714*4882a593Smuzhiyun 						   ibmvfc_tgt_implicit_logout_and_del_done);
3715*4882a593Smuzhiyun 
3716*4882a593Smuzhiyun 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT);
3717*4882a593Smuzhiyun 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3718*4882a593Smuzhiyun 		vhost->discovery_threads--;
3719*4882a593Smuzhiyun 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3720*4882a593Smuzhiyun 		kref_put(&tgt->kref, ibmvfc_release_tgt);
3721*4882a593Smuzhiyun 	} else
3722*4882a593Smuzhiyun 		tgt_dbg(tgt, "Sent Implicit Logout\n");
3723*4882a593Smuzhiyun }
3724*4882a593Smuzhiyun 
3725*4882a593Smuzhiyun /**
3726*4882a593Smuzhiyun  * ibmvfc_tgt_move_login_done - Completion handler for Move Login
3727*4882a593Smuzhiyun  * @evt:	ibmvfc event struct
3728*4882a593Smuzhiyun  *
3729*4882a593Smuzhiyun  **/
ibmvfc_tgt_move_login_done(struct ibmvfc_event * evt)3730*4882a593Smuzhiyun static void ibmvfc_tgt_move_login_done(struct ibmvfc_event *evt)
3731*4882a593Smuzhiyun {
3732*4882a593Smuzhiyun 	struct ibmvfc_target *tgt = evt->tgt;
3733*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = evt->vhost;
3734*4882a593Smuzhiyun 	struct ibmvfc_move_login *rsp = &evt->xfer_iu->move_login;
3735*4882a593Smuzhiyun 	u32 status = be16_to_cpu(rsp->common.status);
3736*4882a593Smuzhiyun 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
3737*4882a593Smuzhiyun 
3738*4882a593Smuzhiyun 	vhost->discovery_threads--;
3739*4882a593Smuzhiyun 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3740*4882a593Smuzhiyun 	switch (status) {
3741*4882a593Smuzhiyun 	case IBMVFC_MAD_SUCCESS:
3742*4882a593Smuzhiyun 		tgt_dbg(tgt, "Move Login succeeded for old scsi_id: %llX\n", tgt->old_scsi_id);
3743*4882a593Smuzhiyun 		tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
3744*4882a593Smuzhiyun 		tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
3745*4882a593Smuzhiyun 		tgt->ids.port_id = tgt->scsi_id;
3746*4882a593Smuzhiyun 		memcpy(&tgt->service_parms, &rsp->service_parms,
3747*4882a593Smuzhiyun 		       sizeof(tgt->service_parms));
3748*4882a593Smuzhiyun 		memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
3749*4882a593Smuzhiyun 		       sizeof(tgt->service_parms_change));
3750*4882a593Smuzhiyun 		ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
3751*4882a593Smuzhiyun 		break;
3752*4882a593Smuzhiyun 	case IBMVFC_MAD_DRIVER_FAILED:
3753*4882a593Smuzhiyun 		break;
3754*4882a593Smuzhiyun 	case IBMVFC_MAD_CRQ_ERROR:
3755*4882a593Smuzhiyun 		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
3756*4882a593Smuzhiyun 		break;
3757*4882a593Smuzhiyun 	case IBMVFC_MAD_FAILED:
3758*4882a593Smuzhiyun 	default:
3759*4882a593Smuzhiyun 		level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
3760*4882a593Smuzhiyun 
3761*4882a593Smuzhiyun 		tgt_log(tgt, level,
3762*4882a593Smuzhiyun 			"Move Login failed: old scsi_id: %llX, flags:%x, vios_flags:%x, rc=0x%02X\n",
3763*4882a593Smuzhiyun 			tgt->old_scsi_id, be32_to_cpu(rsp->flags), be16_to_cpu(rsp->vios_flags),
3764*4882a593Smuzhiyun 			status);
3765*4882a593Smuzhiyun 		break;
3766*4882a593Smuzhiyun 	}
3767*4882a593Smuzhiyun 
3768*4882a593Smuzhiyun 	kref_put(&tgt->kref, ibmvfc_release_tgt);
3769*4882a593Smuzhiyun 	ibmvfc_free_event(evt);
3770*4882a593Smuzhiyun 	wake_up(&vhost->work_wait_q);
3771*4882a593Smuzhiyun }
3772*4882a593Smuzhiyun 
3773*4882a593Smuzhiyun 
3774*4882a593Smuzhiyun /**
3775*4882a593Smuzhiyun  * ibmvfc_tgt_move_login - Initiate a move login for specified target
3776*4882a593Smuzhiyun  * @tgt:		ibmvfc target struct
3777*4882a593Smuzhiyun  *
3778*4882a593Smuzhiyun  **/
ibmvfc_tgt_move_login(struct ibmvfc_target * tgt)3779*4882a593Smuzhiyun static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
3780*4882a593Smuzhiyun {
3781*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = tgt->vhost;
3782*4882a593Smuzhiyun 	struct ibmvfc_move_login *move;
3783*4882a593Smuzhiyun 	struct ibmvfc_event *evt;
3784*4882a593Smuzhiyun 
3785*4882a593Smuzhiyun 	if (vhost->discovery_threads >= disc_threads)
3786*4882a593Smuzhiyun 		return;
3787*4882a593Smuzhiyun 
3788*4882a593Smuzhiyun 	kref_get(&tgt->kref);
3789*4882a593Smuzhiyun 	evt = ibmvfc_get_event(vhost);
3790*4882a593Smuzhiyun 	vhost->discovery_threads++;
3791*4882a593Smuzhiyun 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
3792*4882a593Smuzhiyun 	ibmvfc_init_event(evt, ibmvfc_tgt_move_login_done, IBMVFC_MAD_FORMAT);
3793*4882a593Smuzhiyun 	evt->tgt = tgt;
3794*4882a593Smuzhiyun 	move = &evt->iu.move_login;
3795*4882a593Smuzhiyun 	memset(move, 0, sizeof(*move));
3796*4882a593Smuzhiyun 	move->common.version = cpu_to_be32(1);
3797*4882a593Smuzhiyun 	move->common.opcode = cpu_to_be32(IBMVFC_MOVE_LOGIN);
3798*4882a593Smuzhiyun 	move->common.length = cpu_to_be16(sizeof(*move));
3799*4882a593Smuzhiyun 
3800*4882a593Smuzhiyun 	move->old_scsi_id = cpu_to_be64(tgt->old_scsi_id);
3801*4882a593Smuzhiyun 	move->new_scsi_id = cpu_to_be64(tgt->scsi_id);
3802*4882a593Smuzhiyun 	move->wwpn = cpu_to_be64(tgt->wwpn);
3803*4882a593Smuzhiyun 	move->node_name = cpu_to_be64(tgt->ids.node_name);
3804*4882a593Smuzhiyun 
3805*4882a593Smuzhiyun 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3806*4882a593Smuzhiyun 		vhost->discovery_threads--;
3807*4882a593Smuzhiyun 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3808*4882a593Smuzhiyun 		kref_put(&tgt->kref, ibmvfc_release_tgt);
3809*4882a593Smuzhiyun 	} else
3810*4882a593Smuzhiyun 		tgt_dbg(tgt, "Sent Move Login for old scsi_id: %llX\n", tgt->old_scsi_id);
3811*4882a593Smuzhiyun }
3812*4882a593Smuzhiyun 
3813*4882a593Smuzhiyun /**
3814*4882a593Smuzhiyun  * ibmvfc_adisc_needs_plogi - Does device need PLOGI?
3815*4882a593Smuzhiyun  * @mad:	ibmvfc passthru mad struct
3816*4882a593Smuzhiyun  * @tgt:	ibmvfc target struct
3817*4882a593Smuzhiyun  *
3818*4882a593Smuzhiyun  * Returns:
3819*4882a593Smuzhiyun  *	1 if PLOGI needed / 0 if PLOGI not needed
3820*4882a593Smuzhiyun  **/
ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad * mad,struct ibmvfc_target * tgt)3821*4882a593Smuzhiyun static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad,
3822*4882a593Smuzhiyun 				    struct ibmvfc_target *tgt)
3823*4882a593Smuzhiyun {
3824*4882a593Smuzhiyun 	if (wwn_to_u64((u8 *)&mad->fc_iu.response[2]) != tgt->ids.port_name)
3825*4882a593Smuzhiyun 		return 1;
3826*4882a593Smuzhiyun 	if (wwn_to_u64((u8 *)&mad->fc_iu.response[4]) != tgt->ids.node_name)
3827*4882a593Smuzhiyun 		return 1;
3828*4882a593Smuzhiyun 	if (be32_to_cpu(mad->fc_iu.response[6]) != tgt->scsi_id)
3829*4882a593Smuzhiyun 		return 1;
3830*4882a593Smuzhiyun 	return 0;
3831*4882a593Smuzhiyun }
3832*4882a593Smuzhiyun 
3833*4882a593Smuzhiyun /**
3834*4882a593Smuzhiyun  * ibmvfc_tgt_adisc_done - Completion handler for ADISC
3835*4882a593Smuzhiyun  * @evt:	ibmvfc event struct
3836*4882a593Smuzhiyun  *
3837*4882a593Smuzhiyun  **/
ibmvfc_tgt_adisc_done(struct ibmvfc_event * evt)3838*4882a593Smuzhiyun static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
3839*4882a593Smuzhiyun {
3840*4882a593Smuzhiyun 	struct ibmvfc_target *tgt = evt->tgt;
3841*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = evt->vhost;
3842*4882a593Smuzhiyun 	struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
3843*4882a593Smuzhiyun 	u32 status = be16_to_cpu(mad->common.status);
3844*4882a593Smuzhiyun 	u8 fc_reason, fc_explain;
3845*4882a593Smuzhiyun 
3846*4882a593Smuzhiyun 	vhost->discovery_threads--;
3847*4882a593Smuzhiyun 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3848*4882a593Smuzhiyun 	del_timer(&tgt->timer);
3849*4882a593Smuzhiyun 
3850*4882a593Smuzhiyun 	switch (status) {
3851*4882a593Smuzhiyun 	case IBMVFC_MAD_SUCCESS:
3852*4882a593Smuzhiyun 		tgt_dbg(tgt, "ADISC succeeded\n");
3853*4882a593Smuzhiyun 		if (ibmvfc_adisc_needs_plogi(mad, tgt))
3854*4882a593Smuzhiyun 			ibmvfc_del_tgt(tgt);
3855*4882a593Smuzhiyun 		break;
3856*4882a593Smuzhiyun 	case IBMVFC_MAD_DRIVER_FAILED:
3857*4882a593Smuzhiyun 		break;
3858*4882a593Smuzhiyun 	case IBMVFC_MAD_FAILED:
3859*4882a593Smuzhiyun 	default:
3860*4882a593Smuzhiyun 		ibmvfc_del_tgt(tgt);
3861*4882a593Smuzhiyun 		fc_reason = (be32_to_cpu(mad->fc_iu.response[1]) & 0x00ff0000) >> 16;
3862*4882a593Smuzhiyun 		fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8;
3863*4882a593Smuzhiyun 		tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3864*4882a593Smuzhiyun 			 ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)),
3865*4882a593Smuzhiyun 			 be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error),
3866*4882a593Smuzhiyun 			 ibmvfc_get_fc_type(fc_reason), fc_reason,
3867*4882a593Smuzhiyun 			 ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
3868*4882a593Smuzhiyun 		break;
3869*4882a593Smuzhiyun 	}
3870*4882a593Smuzhiyun 
3871*4882a593Smuzhiyun 	kref_put(&tgt->kref, ibmvfc_release_tgt);
3872*4882a593Smuzhiyun 	ibmvfc_free_event(evt);
3873*4882a593Smuzhiyun 	wake_up(&vhost->work_wait_q);
3874*4882a593Smuzhiyun }
3875*4882a593Smuzhiyun 
3876*4882a593Smuzhiyun /**
3877*4882a593Smuzhiyun  * ibmvfc_init_passthru - Initialize an event struct for FC passthru
3878*4882a593Smuzhiyun  * @evt:		ibmvfc event struct
3879*4882a593Smuzhiyun  *
3880*4882a593Smuzhiyun  **/
ibmvfc_init_passthru(struct ibmvfc_event * evt)3881*4882a593Smuzhiyun static void ibmvfc_init_passthru(struct ibmvfc_event *evt)
3882*4882a593Smuzhiyun {
3883*4882a593Smuzhiyun 	struct ibmvfc_passthru_mad *mad = &evt->iu.passthru;
3884*4882a593Smuzhiyun 
3885*4882a593Smuzhiyun 	memset(mad, 0, sizeof(*mad));
3886*4882a593Smuzhiyun 	mad->common.version = cpu_to_be32(1);
3887*4882a593Smuzhiyun 	mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
3888*4882a593Smuzhiyun 	mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));
3889*4882a593Smuzhiyun 	mad->cmd_ioba.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
3890*4882a593Smuzhiyun 		offsetof(struct ibmvfc_passthru_mad, iu));
3891*4882a593Smuzhiyun 	mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));
3892*4882a593Smuzhiyun 	mad->iu.cmd_len = cpu_to_be32(sizeof(mad->fc_iu.payload));
3893*4882a593Smuzhiyun 	mad->iu.rsp_len = cpu_to_be32(sizeof(mad->fc_iu.response));
3894*4882a593Smuzhiyun 	mad->iu.cmd.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
3895*4882a593Smuzhiyun 		offsetof(struct ibmvfc_passthru_mad, fc_iu) +
3896*4882a593Smuzhiyun 		offsetof(struct ibmvfc_passthru_fc_iu, payload));
3897*4882a593Smuzhiyun 	mad->iu.cmd.len = cpu_to_be32(sizeof(mad->fc_iu.payload));
3898*4882a593Smuzhiyun 	mad->iu.rsp.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
3899*4882a593Smuzhiyun 		offsetof(struct ibmvfc_passthru_mad, fc_iu) +
3900*4882a593Smuzhiyun 		offsetof(struct ibmvfc_passthru_fc_iu, response));
3901*4882a593Smuzhiyun 	mad->iu.rsp.len = cpu_to_be32(sizeof(mad->fc_iu.response));
3902*4882a593Smuzhiyun }
3903*4882a593Smuzhiyun 
3904*4882a593Smuzhiyun /**
3905*4882a593Smuzhiyun  * ibmvfc_tgt_adisc_cancel_done - Completion handler when cancelling an ADISC
3906*4882a593Smuzhiyun  * @evt:		ibmvfc event struct
3907*4882a593Smuzhiyun  *
3908*4882a593Smuzhiyun  * Just cleanup this event struct. Everything else is handled by
3909*4882a593Smuzhiyun  * the ADISC completion handler. If the ADISC never actually comes
3910*4882a593Smuzhiyun  * back, we still have the timer running on the ADISC event struct
3911*4882a593Smuzhiyun  * which will fire and cause the CRQ to get reset.
3912*4882a593Smuzhiyun  *
3913*4882a593Smuzhiyun  **/
ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event * evt)3914*4882a593Smuzhiyun static void ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event *evt)
3915*4882a593Smuzhiyun {
3916*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = evt->vhost;
3917*4882a593Smuzhiyun 	struct ibmvfc_target *tgt = evt->tgt;
3918*4882a593Smuzhiyun 
3919*4882a593Smuzhiyun 	tgt_dbg(tgt, "ADISC cancel complete\n");
3920*4882a593Smuzhiyun 	vhost->abort_threads--;
3921*4882a593Smuzhiyun 	ibmvfc_free_event(evt);
3922*4882a593Smuzhiyun 	kref_put(&tgt->kref, ibmvfc_release_tgt);
3923*4882a593Smuzhiyun 	wake_up(&vhost->work_wait_q);
3924*4882a593Smuzhiyun }
3925*4882a593Smuzhiyun 
3926*4882a593Smuzhiyun /**
3927*4882a593Smuzhiyun  * ibmvfc_adisc_timeout - Handle an ADISC timeout
3928*4882a593Smuzhiyun  * @tgt:		ibmvfc target struct
3929*4882a593Smuzhiyun  *
3930*4882a593Smuzhiyun  * If an ADISC times out, send a cancel. If the cancel times
3931*4882a593Smuzhiyun  * out, reset the CRQ. When the ADISC comes back as cancelled,
3932*4882a593Smuzhiyun  * log back into the target.
3933*4882a593Smuzhiyun  **/
ibmvfc_adisc_timeout(struct timer_list * t)3934*4882a593Smuzhiyun static void ibmvfc_adisc_timeout(struct timer_list *t)
3935*4882a593Smuzhiyun {
3936*4882a593Smuzhiyun 	struct ibmvfc_target *tgt = from_timer(tgt, t, timer);
3937*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = tgt->vhost;
3938*4882a593Smuzhiyun 	struct ibmvfc_event *evt;
3939*4882a593Smuzhiyun 	struct ibmvfc_tmf *tmf;
3940*4882a593Smuzhiyun 	unsigned long flags;
3941*4882a593Smuzhiyun 	int rc;
3942*4882a593Smuzhiyun 
3943*4882a593Smuzhiyun 	tgt_dbg(tgt, "ADISC timeout\n");
3944*4882a593Smuzhiyun 	spin_lock_irqsave(vhost->host->host_lock, flags);
3945*4882a593Smuzhiyun 	if (vhost->abort_threads >= disc_threads ||
3946*4882a593Smuzhiyun 	    tgt->action != IBMVFC_TGT_ACTION_INIT_WAIT ||
3947*4882a593Smuzhiyun 	    vhost->state != IBMVFC_INITIALIZING ||
3948*4882a593Smuzhiyun 	    vhost->action != IBMVFC_HOST_ACTION_QUERY_TGTS) {
3949*4882a593Smuzhiyun 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
3950*4882a593Smuzhiyun 		return;
3951*4882a593Smuzhiyun 	}
3952*4882a593Smuzhiyun 
3953*4882a593Smuzhiyun 	vhost->abort_threads++;
3954*4882a593Smuzhiyun 	kref_get(&tgt->kref);
3955*4882a593Smuzhiyun 	evt = ibmvfc_get_event(vhost);
3956*4882a593Smuzhiyun 	ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT);
3957*4882a593Smuzhiyun 
3958*4882a593Smuzhiyun 	evt->tgt = tgt;
3959*4882a593Smuzhiyun 	tmf = &evt->iu.tmf;
3960*4882a593Smuzhiyun 	memset(tmf, 0, sizeof(*tmf));
3961*4882a593Smuzhiyun 	tmf->common.version = cpu_to_be32(1);
3962*4882a593Smuzhiyun 	tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
3963*4882a593Smuzhiyun 	tmf->common.length = cpu_to_be16(sizeof(*tmf));
3964*4882a593Smuzhiyun 	tmf->scsi_id = cpu_to_be64(tgt->scsi_id);
3965*4882a593Smuzhiyun 	tmf->cancel_key = cpu_to_be32(tgt->cancel_key);
3966*4882a593Smuzhiyun 
3967*4882a593Smuzhiyun 	rc = ibmvfc_send_event(evt, vhost, default_timeout);
3968*4882a593Smuzhiyun 
3969*4882a593Smuzhiyun 	if (rc) {
3970*4882a593Smuzhiyun 		tgt_err(tgt, "Failed to send cancel event for ADISC. rc=%d\n", rc);
3971*4882a593Smuzhiyun 		vhost->abort_threads--;
3972*4882a593Smuzhiyun 		kref_put(&tgt->kref, ibmvfc_release_tgt);
3973*4882a593Smuzhiyun 		__ibmvfc_reset_host(vhost);
3974*4882a593Smuzhiyun 	} else
3975*4882a593Smuzhiyun 		tgt_dbg(tgt, "Attempting to cancel ADISC\n");
3976*4882a593Smuzhiyun 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
3977*4882a593Smuzhiyun }
3978*4882a593Smuzhiyun 
3979*4882a593Smuzhiyun /**
3980*4882a593Smuzhiyun  * ibmvfc_tgt_adisc - Initiate an ADISC for specified target
3981*4882a593Smuzhiyun  * @tgt:		ibmvfc target struct
3982*4882a593Smuzhiyun  *
3983*4882a593Smuzhiyun  * When sending an ADISC we end up with two timers running. The
3984*4882a593Smuzhiyun  * first timer is the timer in the ibmvfc target struct. If this
3985*4882a593Smuzhiyun  * fires, we send a cancel to the target. The second timer is the
3986*4882a593Smuzhiyun  * timer on the ibmvfc event for the ADISC, which is longer. If that
3987*4882a593Smuzhiyun  * fires, it means the ADISC timed out and our attempt to cancel it
3988*4882a593Smuzhiyun  * also failed, so we need to reset the CRQ.
3989*4882a593Smuzhiyun  **/
ibmvfc_tgt_adisc(struct ibmvfc_target * tgt)3990*4882a593Smuzhiyun static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
3991*4882a593Smuzhiyun {
3992*4882a593Smuzhiyun 	struct ibmvfc_passthru_mad *mad;
3993*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = tgt->vhost;
3994*4882a593Smuzhiyun 	struct ibmvfc_event *evt;
3995*4882a593Smuzhiyun 
3996*4882a593Smuzhiyun 	if (vhost->discovery_threads >= disc_threads)
3997*4882a593Smuzhiyun 		return;
3998*4882a593Smuzhiyun 
3999*4882a593Smuzhiyun 	kref_get(&tgt->kref);
4000*4882a593Smuzhiyun 	evt = ibmvfc_get_event(vhost);
4001*4882a593Smuzhiyun 	vhost->discovery_threads++;
4002*4882a593Smuzhiyun 	ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
4003*4882a593Smuzhiyun 	evt->tgt = tgt;
4004*4882a593Smuzhiyun 
4005*4882a593Smuzhiyun 	ibmvfc_init_passthru(evt);
4006*4882a593Smuzhiyun 	mad = &evt->iu.passthru;
4007*4882a593Smuzhiyun 	mad->iu.flags = cpu_to_be32(IBMVFC_FC_ELS);
4008*4882a593Smuzhiyun 	mad->iu.scsi_id = cpu_to_be64(tgt->scsi_id);
4009*4882a593Smuzhiyun 	mad->iu.cancel_key = cpu_to_be32(tgt->cancel_key);
4010*4882a593Smuzhiyun 
4011*4882a593Smuzhiyun 	mad->fc_iu.payload[0] = cpu_to_be32(IBMVFC_ADISC);
4012*4882a593Smuzhiyun 	memcpy(&mad->fc_iu.payload[2], &vhost->login_buf->resp.port_name,
4013*4882a593Smuzhiyun 	       sizeof(vhost->login_buf->resp.port_name));
4014*4882a593Smuzhiyun 	memcpy(&mad->fc_iu.payload[4], &vhost->login_buf->resp.node_name,
4015*4882a593Smuzhiyun 	       sizeof(vhost->login_buf->resp.node_name));
4016*4882a593Smuzhiyun 	mad->fc_iu.payload[6] = cpu_to_be32(be64_to_cpu(vhost->login_buf->resp.scsi_id) & 0x00ffffff);
4017*4882a593Smuzhiyun 
4018*4882a593Smuzhiyun 	if (timer_pending(&tgt->timer))
4019*4882a593Smuzhiyun 		mod_timer(&tgt->timer, jiffies + (IBMVFC_ADISC_TIMEOUT * HZ));
4020*4882a593Smuzhiyun 	else {
4021*4882a593Smuzhiyun 		tgt->timer.expires = jiffies + (IBMVFC_ADISC_TIMEOUT * HZ);
4022*4882a593Smuzhiyun 		add_timer(&tgt->timer);
4023*4882a593Smuzhiyun 	}
4024*4882a593Smuzhiyun 
4025*4882a593Smuzhiyun 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4026*4882a593Smuzhiyun 	if (ibmvfc_send_event(evt, vhost, IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT)) {
4027*4882a593Smuzhiyun 		vhost->discovery_threads--;
4028*4882a593Smuzhiyun 		del_timer(&tgt->timer);
4029*4882a593Smuzhiyun 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4030*4882a593Smuzhiyun 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4031*4882a593Smuzhiyun 	} else
4032*4882a593Smuzhiyun 		tgt_dbg(tgt, "Sent ADISC\n");
4033*4882a593Smuzhiyun }
4034*4882a593Smuzhiyun 
4035*4882a593Smuzhiyun /**
4036*4882a593Smuzhiyun  * ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD
4037*4882a593Smuzhiyun  * @evt:	ibmvfc event struct
4038*4882a593Smuzhiyun  *
4039*4882a593Smuzhiyun  **/
ibmvfc_tgt_query_target_done(struct ibmvfc_event * evt)4040*4882a593Smuzhiyun static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
4041*4882a593Smuzhiyun {
4042*4882a593Smuzhiyun 	struct ibmvfc_target *tgt = evt->tgt;
4043*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = evt->vhost;
4044*4882a593Smuzhiyun 	struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
4045*4882a593Smuzhiyun 	u32 status = be16_to_cpu(rsp->common.status);
4046*4882a593Smuzhiyun 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
4047*4882a593Smuzhiyun 
4048*4882a593Smuzhiyun 	vhost->discovery_threads--;
4049*4882a593Smuzhiyun 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4050*4882a593Smuzhiyun 	switch (status) {
4051*4882a593Smuzhiyun 	case IBMVFC_MAD_SUCCESS:
4052*4882a593Smuzhiyun 		tgt_dbg(tgt, "Query Target succeeded\n");
4053*4882a593Smuzhiyun 		if (be64_to_cpu(rsp->scsi_id) != tgt->scsi_id)
4054*4882a593Smuzhiyun 			ibmvfc_del_tgt(tgt);
4055*4882a593Smuzhiyun 		else
4056*4882a593Smuzhiyun 			ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc);
4057*4882a593Smuzhiyun 		break;
4058*4882a593Smuzhiyun 	case IBMVFC_MAD_DRIVER_FAILED:
4059*4882a593Smuzhiyun 		break;
4060*4882a593Smuzhiyun 	case IBMVFC_MAD_CRQ_ERROR:
4061*4882a593Smuzhiyun 		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
4062*4882a593Smuzhiyun 		break;
4063*4882a593Smuzhiyun 	case IBMVFC_MAD_FAILED:
4064*4882a593Smuzhiyun 	default:
4065*4882a593Smuzhiyun 		if ((be16_to_cpu(rsp->status) & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
4066*4882a593Smuzhiyun 		    be16_to_cpu(rsp->error) == IBMVFC_UNABLE_TO_PERFORM_REQ &&
4067*4882a593Smuzhiyun 		    be16_to_cpu(rsp->fc_explain) == IBMVFC_PORT_NAME_NOT_REG)
4068*4882a593Smuzhiyun 			ibmvfc_del_tgt(tgt);
4069*4882a593Smuzhiyun 		else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
4070*4882a593Smuzhiyun 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
4071*4882a593Smuzhiyun 		else
4072*4882a593Smuzhiyun 			ibmvfc_del_tgt(tgt);
4073*4882a593Smuzhiyun 
4074*4882a593Smuzhiyun 		tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
4075*4882a593Smuzhiyun 			ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4076*4882a593Smuzhiyun 			be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
4077*4882a593Smuzhiyun 			ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
4078*4882a593Smuzhiyun 			ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain),
4079*4882a593Smuzhiyun 			status);
4080*4882a593Smuzhiyun 		break;
4081*4882a593Smuzhiyun 	}
4082*4882a593Smuzhiyun 
4083*4882a593Smuzhiyun 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4084*4882a593Smuzhiyun 	ibmvfc_free_event(evt);
4085*4882a593Smuzhiyun 	wake_up(&vhost->work_wait_q);
4086*4882a593Smuzhiyun }
4087*4882a593Smuzhiyun 
4088*4882a593Smuzhiyun /**
4089*4882a593Smuzhiyun  * ibmvfc_tgt_query_target - Initiate a Query Target for specified target
4090*4882a593Smuzhiyun  * @tgt:	ibmvfc target struct
4091*4882a593Smuzhiyun  *
4092*4882a593Smuzhiyun  **/
ibmvfc_tgt_query_target(struct ibmvfc_target * tgt)4093*4882a593Smuzhiyun static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
4094*4882a593Smuzhiyun {
4095*4882a593Smuzhiyun 	struct ibmvfc_query_tgt *query_tgt;
4096*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = tgt->vhost;
4097*4882a593Smuzhiyun 	struct ibmvfc_event *evt;
4098*4882a593Smuzhiyun 
4099*4882a593Smuzhiyun 	if (vhost->discovery_threads >= disc_threads)
4100*4882a593Smuzhiyun 		return;
4101*4882a593Smuzhiyun 
4102*4882a593Smuzhiyun 	kref_get(&tgt->kref);
4103*4882a593Smuzhiyun 	evt = ibmvfc_get_event(vhost);
4104*4882a593Smuzhiyun 	vhost->discovery_threads++;
4105*4882a593Smuzhiyun 	evt->tgt = tgt;
4106*4882a593Smuzhiyun 	ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
4107*4882a593Smuzhiyun 	query_tgt = &evt->iu.query_tgt;
4108*4882a593Smuzhiyun 	memset(query_tgt, 0, sizeof(*query_tgt));
4109*4882a593Smuzhiyun 	query_tgt->common.version = cpu_to_be32(1);
4110*4882a593Smuzhiyun 	query_tgt->common.opcode = cpu_to_be32(IBMVFC_QUERY_TARGET);
4111*4882a593Smuzhiyun 	query_tgt->common.length = cpu_to_be16(sizeof(*query_tgt));
4112*4882a593Smuzhiyun 	query_tgt->wwpn = cpu_to_be64(tgt->ids.port_name);
4113*4882a593Smuzhiyun 
4114*4882a593Smuzhiyun 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4115*4882a593Smuzhiyun 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4116*4882a593Smuzhiyun 		vhost->discovery_threads--;
4117*4882a593Smuzhiyun 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4118*4882a593Smuzhiyun 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4119*4882a593Smuzhiyun 	} else
4120*4882a593Smuzhiyun 		tgt_dbg(tgt, "Sent Query Target\n");
4121*4882a593Smuzhiyun }
4122*4882a593Smuzhiyun 
4123*4882a593Smuzhiyun /**
4124*4882a593Smuzhiyun  * ibmvfc_alloc_target - Allocate and initialize an ibmvfc target
4125*4882a593Smuzhiyun  * @vhost:		ibmvfc host struct
4126*4882a593Smuzhiyun  * @scsi_id:	SCSI ID to allocate target for
4127*4882a593Smuzhiyun  *
4128*4882a593Smuzhiyun  * Returns:
4129*4882a593Smuzhiyun  *	0 on success / other on failure
4130*4882a593Smuzhiyun  **/
ibmvfc_alloc_target(struct ibmvfc_host * vhost,struct ibmvfc_discover_targets_entry * target)4131*4882a593Smuzhiyun static int ibmvfc_alloc_target(struct ibmvfc_host *vhost,
4132*4882a593Smuzhiyun 			       struct ibmvfc_discover_targets_entry *target)
4133*4882a593Smuzhiyun {
4134*4882a593Smuzhiyun 	struct ibmvfc_target *stgt = NULL;
4135*4882a593Smuzhiyun 	struct ibmvfc_target *wtgt = NULL;
4136*4882a593Smuzhiyun 	struct ibmvfc_target *tgt;
4137*4882a593Smuzhiyun 	unsigned long flags;
4138*4882a593Smuzhiyun 	u64 scsi_id = be32_to_cpu(target->scsi_id) & IBMVFC_DISC_TGT_SCSI_ID_MASK;
4139*4882a593Smuzhiyun 	u64 wwpn = be64_to_cpu(target->wwpn);
4140*4882a593Smuzhiyun 
4141*4882a593Smuzhiyun 	/* Look to see if we already have a target allocated for this SCSI ID or WWPN */
4142*4882a593Smuzhiyun 	spin_lock_irqsave(vhost->host->host_lock, flags);
4143*4882a593Smuzhiyun 	list_for_each_entry(tgt, &vhost->targets, queue) {
4144*4882a593Smuzhiyun 		if (tgt->wwpn == wwpn) {
4145*4882a593Smuzhiyun 			wtgt = tgt;
4146*4882a593Smuzhiyun 			break;
4147*4882a593Smuzhiyun 		}
4148*4882a593Smuzhiyun 	}
4149*4882a593Smuzhiyun 
4150*4882a593Smuzhiyun 	list_for_each_entry(tgt, &vhost->targets, queue) {
4151*4882a593Smuzhiyun 		if (tgt->scsi_id == scsi_id) {
4152*4882a593Smuzhiyun 			stgt = tgt;
4153*4882a593Smuzhiyun 			break;
4154*4882a593Smuzhiyun 		}
4155*4882a593Smuzhiyun 	}
4156*4882a593Smuzhiyun 
4157*4882a593Smuzhiyun 	if (wtgt && !stgt) {
4158*4882a593Smuzhiyun 		/*
4159*4882a593Smuzhiyun 		 * A WWPN target has moved and we still are tracking the old
4160*4882a593Smuzhiyun 		 * SCSI ID.  The only way we should be able to get here is if
4161*4882a593Smuzhiyun 		 * we attempted to send an implicit logout for the old SCSI ID
4162*4882a593Smuzhiyun 		 * and it failed for some reason, such as there being I/O
4163*4882a593Smuzhiyun 		 * pending to the target. In this case, we will have already
4164*4882a593Smuzhiyun 		 * deleted the rport from the FC transport so we do a move
4165*4882a593Smuzhiyun 		 * login, which works even with I/O pending, as it will cancel
4166*4882a593Smuzhiyun 		 * any active commands.
4167*4882a593Smuzhiyun 		 */
4168*4882a593Smuzhiyun 		if (wtgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
4169*4882a593Smuzhiyun 			/*
4170*4882a593Smuzhiyun 			 * Do a move login here. The old target is no longer
4171*4882a593Smuzhiyun 			 * known to the transport layer We don't use the
4172*4882a593Smuzhiyun 			 * normal ibmvfc_set_tgt_action to set this, as we
4173*4882a593Smuzhiyun 			 * don't normally want to allow this state change.
4174*4882a593Smuzhiyun 			 */
4175*4882a593Smuzhiyun 			wtgt->old_scsi_id = wtgt->scsi_id;
4176*4882a593Smuzhiyun 			wtgt->scsi_id = scsi_id;
4177*4882a593Smuzhiyun 			wtgt->action = IBMVFC_TGT_ACTION_INIT;
4178*4882a593Smuzhiyun 			ibmvfc_init_tgt(wtgt, ibmvfc_tgt_move_login);
4179*4882a593Smuzhiyun 			goto unlock_out;
4180*4882a593Smuzhiyun 		} else {
4181*4882a593Smuzhiyun 			tgt_err(wtgt, "Unexpected target state: %d, %p\n",
4182*4882a593Smuzhiyun 				wtgt->action, wtgt->rport);
4183*4882a593Smuzhiyun 		}
4184*4882a593Smuzhiyun 	} else if (stgt) {
4185*4882a593Smuzhiyun 		if (tgt->need_login)
4186*4882a593Smuzhiyun 			ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
4187*4882a593Smuzhiyun 		goto unlock_out;
4188*4882a593Smuzhiyun 	}
4189*4882a593Smuzhiyun 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
4190*4882a593Smuzhiyun 
4191*4882a593Smuzhiyun 	tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
4192*4882a593Smuzhiyun 	memset(tgt, 0, sizeof(*tgt));
4193*4882a593Smuzhiyun 	tgt->scsi_id = scsi_id;
4194*4882a593Smuzhiyun 	tgt->wwpn = wwpn;
4195*4882a593Smuzhiyun 	tgt->vhost = vhost;
4196*4882a593Smuzhiyun 	tgt->need_login = 1;
4197*4882a593Smuzhiyun 	timer_setup(&tgt->timer, ibmvfc_adisc_timeout, 0);
4198*4882a593Smuzhiyun 	kref_init(&tgt->kref);
4199*4882a593Smuzhiyun 	ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
4200*4882a593Smuzhiyun 	spin_lock_irqsave(vhost->host->host_lock, flags);
4201*4882a593Smuzhiyun 	tgt->cancel_key = vhost->task_set++;
4202*4882a593Smuzhiyun 	list_add_tail(&tgt->queue, &vhost->targets);
4203*4882a593Smuzhiyun 
4204*4882a593Smuzhiyun unlock_out:
4205*4882a593Smuzhiyun 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
4206*4882a593Smuzhiyun 	return 0;
4207*4882a593Smuzhiyun }
4208*4882a593Smuzhiyun 
4209*4882a593Smuzhiyun /**
4210*4882a593Smuzhiyun  * ibmvfc_alloc_targets - Allocate and initialize ibmvfc targets
4211*4882a593Smuzhiyun  * @vhost:		ibmvfc host struct
4212*4882a593Smuzhiyun  *
4213*4882a593Smuzhiyun  * Returns:
4214*4882a593Smuzhiyun  *	0 on success / other on failure
4215*4882a593Smuzhiyun  **/
ibmvfc_alloc_targets(struct ibmvfc_host * vhost)4216*4882a593Smuzhiyun static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost)
4217*4882a593Smuzhiyun {
4218*4882a593Smuzhiyun 	int i, rc;
4219*4882a593Smuzhiyun 
4220*4882a593Smuzhiyun 	for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++)
4221*4882a593Smuzhiyun 		rc = ibmvfc_alloc_target(vhost, &vhost->disc_buf[i]);
4222*4882a593Smuzhiyun 
4223*4882a593Smuzhiyun 	return rc;
4224*4882a593Smuzhiyun }
4225*4882a593Smuzhiyun 
4226*4882a593Smuzhiyun /**
4227*4882a593Smuzhiyun  * ibmvfc_discover_targets_done - Completion handler for discover targets MAD
4228*4882a593Smuzhiyun  * @evt:	ibmvfc event struct
4229*4882a593Smuzhiyun  *
4230*4882a593Smuzhiyun  **/
ibmvfc_discover_targets_done(struct ibmvfc_event * evt)4231*4882a593Smuzhiyun static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
4232*4882a593Smuzhiyun {
4233*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = evt->vhost;
4234*4882a593Smuzhiyun 	struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
4235*4882a593Smuzhiyun 	u32 mad_status = be16_to_cpu(rsp->common.status);
4236*4882a593Smuzhiyun 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
4237*4882a593Smuzhiyun 
4238*4882a593Smuzhiyun 	switch (mad_status) {
4239*4882a593Smuzhiyun 	case IBMVFC_MAD_SUCCESS:
4240*4882a593Smuzhiyun 		ibmvfc_dbg(vhost, "Discover Targets succeeded\n");
4241*4882a593Smuzhiyun 		vhost->num_targets = be32_to_cpu(rsp->num_written);
4242*4882a593Smuzhiyun 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
4243*4882a593Smuzhiyun 		break;
4244*4882a593Smuzhiyun 	case IBMVFC_MAD_FAILED:
4245*4882a593Smuzhiyun 		level += ibmvfc_retry_host_init(vhost);
4246*4882a593Smuzhiyun 		ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
4247*4882a593Smuzhiyun 			   ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4248*4882a593Smuzhiyun 			   be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
4249*4882a593Smuzhiyun 		break;
4250*4882a593Smuzhiyun 	case IBMVFC_MAD_DRIVER_FAILED:
4251*4882a593Smuzhiyun 		break;
4252*4882a593Smuzhiyun 	default:
4253*4882a593Smuzhiyun 		dev_err(vhost->dev, "Invalid Discover Targets response: 0x%x\n", mad_status);
4254*4882a593Smuzhiyun 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4255*4882a593Smuzhiyun 		break;
4256*4882a593Smuzhiyun 	}
4257*4882a593Smuzhiyun 
4258*4882a593Smuzhiyun 	ibmvfc_free_event(evt);
4259*4882a593Smuzhiyun 	wake_up(&vhost->work_wait_q);
4260*4882a593Smuzhiyun }
4261*4882a593Smuzhiyun 
4262*4882a593Smuzhiyun /**
4263*4882a593Smuzhiyun  * ibmvfc_discover_targets - Send Discover Targets MAD
4264*4882a593Smuzhiyun  * @vhost:	ibmvfc host struct
4265*4882a593Smuzhiyun  *
4266*4882a593Smuzhiyun  **/
ibmvfc_discover_targets(struct ibmvfc_host * vhost)4267*4882a593Smuzhiyun static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
4268*4882a593Smuzhiyun {
4269*4882a593Smuzhiyun 	struct ibmvfc_discover_targets *mad;
4270*4882a593Smuzhiyun 	struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
4271*4882a593Smuzhiyun 
4272*4882a593Smuzhiyun 	ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
4273*4882a593Smuzhiyun 	mad = &evt->iu.discover_targets;
4274*4882a593Smuzhiyun 	memset(mad, 0, sizeof(*mad));
4275*4882a593Smuzhiyun 	mad->common.version = cpu_to_be32(1);
4276*4882a593Smuzhiyun 	mad->common.opcode = cpu_to_be32(IBMVFC_DISC_TARGETS);
4277*4882a593Smuzhiyun 	mad->common.length = cpu_to_be16(sizeof(*mad));
4278*4882a593Smuzhiyun 	mad->bufflen = cpu_to_be32(vhost->disc_buf_sz);
4279*4882a593Smuzhiyun 	mad->buffer.va = cpu_to_be64(vhost->disc_buf_dma);
4280*4882a593Smuzhiyun 	mad->buffer.len = cpu_to_be32(vhost->disc_buf_sz);
4281*4882a593Smuzhiyun 	mad->flags = cpu_to_be32(IBMVFC_DISC_TGT_PORT_ID_WWPN_LIST);
4282*4882a593Smuzhiyun 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
4283*4882a593Smuzhiyun 
4284*4882a593Smuzhiyun 	if (!ibmvfc_send_event(evt, vhost, default_timeout))
4285*4882a593Smuzhiyun 		ibmvfc_dbg(vhost, "Sent discover targets\n");
4286*4882a593Smuzhiyun 	else
4287*4882a593Smuzhiyun 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4288*4882a593Smuzhiyun }
4289*4882a593Smuzhiyun 
4290*4882a593Smuzhiyun /**
4291*4882a593Smuzhiyun  * ibmvfc_npiv_login_done - Completion handler for NPIV Login
4292*4882a593Smuzhiyun  * @evt:	ibmvfc event struct
4293*4882a593Smuzhiyun  *
4294*4882a593Smuzhiyun  **/
ibmvfc_npiv_login_done(struct ibmvfc_event * evt)4295*4882a593Smuzhiyun static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
4296*4882a593Smuzhiyun {
4297*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = evt->vhost;
4298*4882a593Smuzhiyun 	u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_login.common.status);
4299*4882a593Smuzhiyun 	struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
4300*4882a593Smuzhiyun 	unsigned int npiv_max_sectors;
4301*4882a593Smuzhiyun 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
4302*4882a593Smuzhiyun 
4303*4882a593Smuzhiyun 	switch (mad_status) {
4304*4882a593Smuzhiyun 	case IBMVFC_MAD_SUCCESS:
4305*4882a593Smuzhiyun 		ibmvfc_free_event(evt);
4306*4882a593Smuzhiyun 		break;
4307*4882a593Smuzhiyun 	case IBMVFC_MAD_FAILED:
4308*4882a593Smuzhiyun 		if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
4309*4882a593Smuzhiyun 			level += ibmvfc_retry_host_init(vhost);
4310*4882a593Smuzhiyun 		else
4311*4882a593Smuzhiyun 			ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4312*4882a593Smuzhiyun 		ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
4313*4882a593Smuzhiyun 			   ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4314*4882a593Smuzhiyun 						be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
4315*4882a593Smuzhiyun 		ibmvfc_free_event(evt);
4316*4882a593Smuzhiyun 		return;
4317*4882a593Smuzhiyun 	case IBMVFC_MAD_CRQ_ERROR:
4318*4882a593Smuzhiyun 		ibmvfc_retry_host_init(vhost);
4319*4882a593Smuzhiyun 		fallthrough;
4320*4882a593Smuzhiyun 	case IBMVFC_MAD_DRIVER_FAILED:
4321*4882a593Smuzhiyun 		ibmvfc_free_event(evt);
4322*4882a593Smuzhiyun 		return;
4323*4882a593Smuzhiyun 	default:
4324*4882a593Smuzhiyun 		dev_err(vhost->dev, "Invalid NPIV Login response: 0x%x\n", mad_status);
4325*4882a593Smuzhiyun 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4326*4882a593Smuzhiyun 		ibmvfc_free_event(evt);
4327*4882a593Smuzhiyun 		return;
4328*4882a593Smuzhiyun 	}
4329*4882a593Smuzhiyun 
4330*4882a593Smuzhiyun 	vhost->client_migrated = 0;
4331*4882a593Smuzhiyun 
4332*4882a593Smuzhiyun 	if (!(be32_to_cpu(rsp->flags) & IBMVFC_NATIVE_FC)) {
4333*4882a593Smuzhiyun 		dev_err(vhost->dev, "Virtual adapter does not support FC. %x\n",
4334*4882a593Smuzhiyun 			rsp->flags);
4335*4882a593Smuzhiyun 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4336*4882a593Smuzhiyun 		wake_up(&vhost->work_wait_q);
4337*4882a593Smuzhiyun 		return;
4338*4882a593Smuzhiyun 	}
4339*4882a593Smuzhiyun 
4340*4882a593Smuzhiyun 	if (be32_to_cpu(rsp->max_cmds) <= IBMVFC_NUM_INTERNAL_REQ) {
4341*4882a593Smuzhiyun 		dev_err(vhost->dev, "Virtual adapter supported queue depth too small: %d\n",
4342*4882a593Smuzhiyun 			rsp->max_cmds);
4343*4882a593Smuzhiyun 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4344*4882a593Smuzhiyun 		wake_up(&vhost->work_wait_q);
4345*4882a593Smuzhiyun 		return;
4346*4882a593Smuzhiyun 	}
4347*4882a593Smuzhiyun 
4348*4882a593Smuzhiyun 	vhost->logged_in = 1;
4349*4882a593Smuzhiyun 	npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), IBMVFC_MAX_SECTORS);
4350*4882a593Smuzhiyun 	dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
4351*4882a593Smuzhiyun 		 rsp->partition_name, rsp->device_name, rsp->port_loc_code,
4352*4882a593Smuzhiyun 		 rsp->drc_name, npiv_max_sectors);
4353*4882a593Smuzhiyun 
4354*4882a593Smuzhiyun 	fc_host_fabric_name(vhost->host) = be64_to_cpu(rsp->node_name);
4355*4882a593Smuzhiyun 	fc_host_node_name(vhost->host) = be64_to_cpu(rsp->node_name);
4356*4882a593Smuzhiyun 	fc_host_port_name(vhost->host) = be64_to_cpu(rsp->port_name);
4357*4882a593Smuzhiyun 	fc_host_port_id(vhost->host) = be64_to_cpu(rsp->scsi_id);
4358*4882a593Smuzhiyun 	fc_host_port_type(vhost->host) = FC_PORTTYPE_NPIV;
4359*4882a593Smuzhiyun 	fc_host_supported_classes(vhost->host) = 0;
4360*4882a593Smuzhiyun 	if (be32_to_cpu(rsp->service_parms.class1_parms[0]) & 0x80000000)
4361*4882a593Smuzhiyun 		fc_host_supported_classes(vhost->host) |= FC_COS_CLASS1;
4362*4882a593Smuzhiyun 	if (be32_to_cpu(rsp->service_parms.class2_parms[0]) & 0x80000000)
4363*4882a593Smuzhiyun 		fc_host_supported_classes(vhost->host) |= FC_COS_CLASS2;
4364*4882a593Smuzhiyun 	if (be32_to_cpu(rsp->service_parms.class3_parms[0]) & 0x80000000)
4365*4882a593Smuzhiyun 		fc_host_supported_classes(vhost->host) |= FC_COS_CLASS3;
4366*4882a593Smuzhiyun 	fc_host_maxframe_size(vhost->host) =
4367*4882a593Smuzhiyun 		be16_to_cpu(rsp->service_parms.common.bb_rcv_sz) & 0x0fff;
4368*4882a593Smuzhiyun 
4369*4882a593Smuzhiyun 	vhost->host->can_queue = be32_to_cpu(rsp->max_cmds) - IBMVFC_NUM_INTERNAL_REQ;
4370*4882a593Smuzhiyun 	vhost->host->max_sectors = npiv_max_sectors;
4371*4882a593Smuzhiyun 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
4372*4882a593Smuzhiyun 	wake_up(&vhost->work_wait_q);
4373*4882a593Smuzhiyun }
4374*4882a593Smuzhiyun 
4375*4882a593Smuzhiyun /**
4376*4882a593Smuzhiyun  * ibmvfc_npiv_login - Sends NPIV login
4377*4882a593Smuzhiyun  * @vhost:	ibmvfc host struct
4378*4882a593Smuzhiyun  *
4379*4882a593Smuzhiyun  **/
ibmvfc_npiv_login(struct ibmvfc_host * vhost)4380*4882a593Smuzhiyun static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
4381*4882a593Smuzhiyun {
4382*4882a593Smuzhiyun 	struct ibmvfc_npiv_login_mad *mad;
4383*4882a593Smuzhiyun 	struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
4384*4882a593Smuzhiyun 
4385*4882a593Smuzhiyun 	ibmvfc_gather_partition_info(vhost);
4386*4882a593Smuzhiyun 	ibmvfc_set_login_info(vhost);
4387*4882a593Smuzhiyun 	ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT);
4388*4882a593Smuzhiyun 
4389*4882a593Smuzhiyun 	memcpy(vhost->login_buf, &vhost->login_info, sizeof(vhost->login_info));
4390*4882a593Smuzhiyun 	mad = &evt->iu.npiv_login;
4391*4882a593Smuzhiyun 	memset(mad, 0, sizeof(struct ibmvfc_npiv_login_mad));
4392*4882a593Smuzhiyun 	mad->common.version = cpu_to_be32(1);
4393*4882a593Smuzhiyun 	mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGIN);
4394*4882a593Smuzhiyun 	mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_login_mad));
4395*4882a593Smuzhiyun 	mad->buffer.va = cpu_to_be64(vhost->login_buf_dma);
4396*4882a593Smuzhiyun 	mad->buffer.len = cpu_to_be32(sizeof(*vhost->login_buf));
4397*4882a593Smuzhiyun 
4398*4882a593Smuzhiyun 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
4399*4882a593Smuzhiyun 
4400*4882a593Smuzhiyun 	if (!ibmvfc_send_event(evt, vhost, default_timeout))
4401*4882a593Smuzhiyun 		ibmvfc_dbg(vhost, "Sent NPIV login\n");
4402*4882a593Smuzhiyun 	else
4403*4882a593Smuzhiyun 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4404*4882a593Smuzhiyun };
4405*4882a593Smuzhiyun 
4406*4882a593Smuzhiyun /**
4407*4882a593Smuzhiyun  * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
4408*4882a593Smuzhiyun  * @vhost:		ibmvfc host struct
4409*4882a593Smuzhiyun  *
4410*4882a593Smuzhiyun  **/
ibmvfc_npiv_logout_done(struct ibmvfc_event * evt)4411*4882a593Smuzhiyun static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
4412*4882a593Smuzhiyun {
4413*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = evt->vhost;
4414*4882a593Smuzhiyun 	u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_logout.common.status);
4415*4882a593Smuzhiyun 
4416*4882a593Smuzhiyun 	ibmvfc_free_event(evt);
4417*4882a593Smuzhiyun 
4418*4882a593Smuzhiyun 	switch (mad_status) {
4419*4882a593Smuzhiyun 	case IBMVFC_MAD_SUCCESS:
4420*4882a593Smuzhiyun 		if (list_empty(&vhost->sent) &&
4421*4882a593Smuzhiyun 		    vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) {
4422*4882a593Smuzhiyun 			ibmvfc_init_host(vhost);
4423*4882a593Smuzhiyun 			return;
4424*4882a593Smuzhiyun 		}
4425*4882a593Smuzhiyun 		break;
4426*4882a593Smuzhiyun 	case IBMVFC_MAD_FAILED:
4427*4882a593Smuzhiyun 	case IBMVFC_MAD_NOT_SUPPORTED:
4428*4882a593Smuzhiyun 	case IBMVFC_MAD_CRQ_ERROR:
4429*4882a593Smuzhiyun 	case IBMVFC_MAD_DRIVER_FAILED:
4430*4882a593Smuzhiyun 	default:
4431*4882a593Smuzhiyun 		ibmvfc_dbg(vhost, "NPIV Logout failed. 0x%X\n", mad_status);
4432*4882a593Smuzhiyun 		break;
4433*4882a593Smuzhiyun 	}
4434*4882a593Smuzhiyun 
4435*4882a593Smuzhiyun 	ibmvfc_hard_reset_host(vhost);
4436*4882a593Smuzhiyun }
4437*4882a593Smuzhiyun 
4438*4882a593Smuzhiyun /**
4439*4882a593Smuzhiyun  * ibmvfc_npiv_logout - Issue an NPIV Logout
4440*4882a593Smuzhiyun  * @vhost:		ibmvfc host struct
4441*4882a593Smuzhiyun  *
4442*4882a593Smuzhiyun  **/
ibmvfc_npiv_logout(struct ibmvfc_host * vhost)4443*4882a593Smuzhiyun static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
4444*4882a593Smuzhiyun {
4445*4882a593Smuzhiyun 	struct ibmvfc_npiv_logout_mad *mad;
4446*4882a593Smuzhiyun 	struct ibmvfc_event *evt;
4447*4882a593Smuzhiyun 
4448*4882a593Smuzhiyun 	evt = ibmvfc_get_event(vhost);
4449*4882a593Smuzhiyun 	ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
4450*4882a593Smuzhiyun 
4451*4882a593Smuzhiyun 	mad = &evt->iu.npiv_logout;
4452*4882a593Smuzhiyun 	memset(mad, 0, sizeof(*mad));
4453*4882a593Smuzhiyun 	mad->common.version = cpu_to_be32(1);
4454*4882a593Smuzhiyun 	mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGOUT);
4455*4882a593Smuzhiyun 	mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_logout_mad));
4456*4882a593Smuzhiyun 
4457*4882a593Smuzhiyun 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT);
4458*4882a593Smuzhiyun 
4459*4882a593Smuzhiyun 	if (!ibmvfc_send_event(evt, vhost, default_timeout))
4460*4882a593Smuzhiyun 		ibmvfc_dbg(vhost, "Sent NPIV logout\n");
4461*4882a593Smuzhiyun 	else
4462*4882a593Smuzhiyun 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4463*4882a593Smuzhiyun }
4464*4882a593Smuzhiyun 
4465*4882a593Smuzhiyun /**
4466*4882a593Smuzhiyun  * ibmvfc_dev_init_to_do - Is there target initialization work to do?
4467*4882a593Smuzhiyun  * @vhost:		ibmvfc host struct
4468*4882a593Smuzhiyun  *
4469*4882a593Smuzhiyun  * Returns:
4470*4882a593Smuzhiyun  *	1 if work to do / 0 if not
4471*4882a593Smuzhiyun  **/
ibmvfc_dev_init_to_do(struct ibmvfc_host * vhost)4472*4882a593Smuzhiyun static int ibmvfc_dev_init_to_do(struct ibmvfc_host *vhost)
4473*4882a593Smuzhiyun {
4474*4882a593Smuzhiyun 	struct ibmvfc_target *tgt;
4475*4882a593Smuzhiyun 
4476*4882a593Smuzhiyun 	list_for_each_entry(tgt, &vhost->targets, queue) {
4477*4882a593Smuzhiyun 		if (tgt->action == IBMVFC_TGT_ACTION_INIT ||
4478*4882a593Smuzhiyun 		    tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
4479*4882a593Smuzhiyun 			return 1;
4480*4882a593Smuzhiyun 	}
4481*4882a593Smuzhiyun 
4482*4882a593Smuzhiyun 	return 0;
4483*4882a593Smuzhiyun }
4484*4882a593Smuzhiyun 
4485*4882a593Smuzhiyun /**
4486*4882a593Smuzhiyun  * ibmvfc_dev_logo_to_do - Is there target logout work to do?
4487*4882a593Smuzhiyun  * @vhost:		ibmvfc host struct
4488*4882a593Smuzhiyun  *
4489*4882a593Smuzhiyun  * Returns:
4490*4882a593Smuzhiyun  *	1 if work to do / 0 if not
4491*4882a593Smuzhiyun  **/
ibmvfc_dev_logo_to_do(struct ibmvfc_host * vhost)4492*4882a593Smuzhiyun static int ibmvfc_dev_logo_to_do(struct ibmvfc_host *vhost)
4493*4882a593Smuzhiyun {
4494*4882a593Smuzhiyun 	struct ibmvfc_target *tgt;
4495*4882a593Smuzhiyun 
4496*4882a593Smuzhiyun 	list_for_each_entry(tgt, &vhost->targets, queue) {
4497*4882a593Smuzhiyun 		if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT ||
4498*4882a593Smuzhiyun 		    tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
4499*4882a593Smuzhiyun 			return 1;
4500*4882a593Smuzhiyun 	}
4501*4882a593Smuzhiyun 	return 0;
4502*4882a593Smuzhiyun }
4503*4882a593Smuzhiyun 
4504*4882a593Smuzhiyun /**
4505*4882a593Smuzhiyun  * __ibmvfc_work_to_do - Is there task level work to do? (no locking)
4506*4882a593Smuzhiyun  * @vhost:		ibmvfc host struct
4507*4882a593Smuzhiyun  *
4508*4882a593Smuzhiyun  * Returns:
4509*4882a593Smuzhiyun  *	1 if work to do / 0 if not
4510*4882a593Smuzhiyun  **/
__ibmvfc_work_to_do(struct ibmvfc_host * vhost)4511*4882a593Smuzhiyun static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
4512*4882a593Smuzhiyun {
4513*4882a593Smuzhiyun 	struct ibmvfc_target *tgt;
4514*4882a593Smuzhiyun 
4515*4882a593Smuzhiyun 	if (kthread_should_stop())
4516*4882a593Smuzhiyun 		return 1;
4517*4882a593Smuzhiyun 	switch (vhost->action) {
4518*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_NONE:
4519*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_INIT_WAIT:
4520*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_LOGO_WAIT:
4521*4882a593Smuzhiyun 		return 0;
4522*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_TGT_INIT:
4523*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_QUERY_TGTS:
4524*4882a593Smuzhiyun 		if (vhost->discovery_threads == disc_threads)
4525*4882a593Smuzhiyun 			return 0;
4526*4882a593Smuzhiyun 		list_for_each_entry(tgt, &vhost->targets, queue)
4527*4882a593Smuzhiyun 			if (tgt->action == IBMVFC_TGT_ACTION_INIT)
4528*4882a593Smuzhiyun 				return 1;
4529*4882a593Smuzhiyun 		list_for_each_entry(tgt, &vhost->targets, queue)
4530*4882a593Smuzhiyun 			if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
4531*4882a593Smuzhiyun 				return 0;
4532*4882a593Smuzhiyun 		return 1;
4533*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_TGT_DEL:
4534*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
4535*4882a593Smuzhiyun 		if (vhost->discovery_threads == disc_threads)
4536*4882a593Smuzhiyun 			return 0;
4537*4882a593Smuzhiyun 		list_for_each_entry(tgt, &vhost->targets, queue)
4538*4882a593Smuzhiyun 			if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT)
4539*4882a593Smuzhiyun 				return 1;
4540*4882a593Smuzhiyun 		list_for_each_entry(tgt, &vhost->targets, queue)
4541*4882a593Smuzhiyun 			if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
4542*4882a593Smuzhiyun 				return 0;
4543*4882a593Smuzhiyun 		return 1;
4544*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_LOGO:
4545*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_INIT:
4546*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_ALLOC_TGTS:
4547*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_QUERY:
4548*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_RESET:
4549*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_REENABLE:
4550*4882a593Smuzhiyun 	default:
4551*4882a593Smuzhiyun 		break;
4552*4882a593Smuzhiyun 	}
4553*4882a593Smuzhiyun 
4554*4882a593Smuzhiyun 	return 1;
4555*4882a593Smuzhiyun }
4556*4882a593Smuzhiyun 
4557*4882a593Smuzhiyun /**
4558*4882a593Smuzhiyun  * ibmvfc_work_to_do - Is there task level work to do?
4559*4882a593Smuzhiyun  * @vhost:		ibmvfc host struct
4560*4882a593Smuzhiyun  *
4561*4882a593Smuzhiyun  * Returns:
4562*4882a593Smuzhiyun  *	1 if work to do / 0 if not
4563*4882a593Smuzhiyun  **/
ibmvfc_work_to_do(struct ibmvfc_host * vhost)4564*4882a593Smuzhiyun static int ibmvfc_work_to_do(struct ibmvfc_host *vhost)
4565*4882a593Smuzhiyun {
4566*4882a593Smuzhiyun 	unsigned long flags;
4567*4882a593Smuzhiyun 	int rc;
4568*4882a593Smuzhiyun 
4569*4882a593Smuzhiyun 	spin_lock_irqsave(vhost->host->host_lock, flags);
4570*4882a593Smuzhiyun 	rc = __ibmvfc_work_to_do(vhost);
4571*4882a593Smuzhiyun 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
4572*4882a593Smuzhiyun 	return rc;
4573*4882a593Smuzhiyun }
4574*4882a593Smuzhiyun 
4575*4882a593Smuzhiyun /**
4576*4882a593Smuzhiyun  * ibmvfc_log_ae - Log async events if necessary
4577*4882a593Smuzhiyun  * @vhost:		ibmvfc host struct
4578*4882a593Smuzhiyun  * @events:		events to log
4579*4882a593Smuzhiyun  *
4580*4882a593Smuzhiyun  **/
ibmvfc_log_ae(struct ibmvfc_host * vhost,int events)4581*4882a593Smuzhiyun static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events)
4582*4882a593Smuzhiyun {
4583*4882a593Smuzhiyun 	if (events & IBMVFC_AE_RSCN)
4584*4882a593Smuzhiyun 		fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_RSCN, 0);
4585*4882a593Smuzhiyun 	if ((events & IBMVFC_AE_LINKDOWN) &&
4586*4882a593Smuzhiyun 	    vhost->state >= IBMVFC_HALTED)
4587*4882a593Smuzhiyun 		fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
4588*4882a593Smuzhiyun 	if ((events & IBMVFC_AE_LINKUP) &&
4589*4882a593Smuzhiyun 	    vhost->state == IBMVFC_INITIALIZING)
4590*4882a593Smuzhiyun 		fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKUP, 0);
4591*4882a593Smuzhiyun }
4592*4882a593Smuzhiyun 
4593*4882a593Smuzhiyun /**
4594*4882a593Smuzhiyun  * ibmvfc_tgt_add_rport - Tell the FC transport about a new remote port
4595*4882a593Smuzhiyun  * @tgt:		ibmvfc target struct
4596*4882a593Smuzhiyun  *
4597*4882a593Smuzhiyun  **/
ibmvfc_tgt_add_rport(struct ibmvfc_target * tgt)4598*4882a593Smuzhiyun static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
4599*4882a593Smuzhiyun {
4600*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = tgt->vhost;
4601*4882a593Smuzhiyun 	struct fc_rport *rport;
4602*4882a593Smuzhiyun 	unsigned long flags;
4603*4882a593Smuzhiyun 
4604*4882a593Smuzhiyun 	tgt_dbg(tgt, "Adding rport\n");
4605*4882a593Smuzhiyun 	rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
4606*4882a593Smuzhiyun 	spin_lock_irqsave(vhost->host->host_lock, flags);
4607*4882a593Smuzhiyun 
4608*4882a593Smuzhiyun 	if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
4609*4882a593Smuzhiyun 		tgt_dbg(tgt, "Deleting rport\n");
4610*4882a593Smuzhiyun 		list_del(&tgt->queue);
4611*4882a593Smuzhiyun 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
4612*4882a593Smuzhiyun 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
4613*4882a593Smuzhiyun 		fc_remote_port_delete(rport);
4614*4882a593Smuzhiyun 		del_timer_sync(&tgt->timer);
4615*4882a593Smuzhiyun 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4616*4882a593Smuzhiyun 		return;
4617*4882a593Smuzhiyun 	} else if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
4618*4882a593Smuzhiyun 		tgt_dbg(tgt, "Deleting rport with outstanding I/O\n");
4619*4882a593Smuzhiyun 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
4620*4882a593Smuzhiyun 		tgt->rport = NULL;
4621*4882a593Smuzhiyun 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
4622*4882a593Smuzhiyun 		fc_remote_port_delete(rport);
4623*4882a593Smuzhiyun 		return;
4624*4882a593Smuzhiyun 	} else if (rport && tgt->action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
4625*4882a593Smuzhiyun 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
4626*4882a593Smuzhiyun 		return;
4627*4882a593Smuzhiyun 	}
4628*4882a593Smuzhiyun 
4629*4882a593Smuzhiyun 	if (rport) {
4630*4882a593Smuzhiyun 		tgt_dbg(tgt, "rport add succeeded\n");
4631*4882a593Smuzhiyun 		tgt->rport = rport;
4632*4882a593Smuzhiyun 		rport->maxframe_size = be16_to_cpu(tgt->service_parms.common.bb_rcv_sz) & 0x0fff;
4633*4882a593Smuzhiyun 		rport->supported_classes = 0;
4634*4882a593Smuzhiyun 		tgt->target_id = rport->scsi_target_id;
4635*4882a593Smuzhiyun 		if (be32_to_cpu(tgt->service_parms.class1_parms[0]) & 0x80000000)
4636*4882a593Smuzhiyun 			rport->supported_classes |= FC_COS_CLASS1;
4637*4882a593Smuzhiyun 		if (be32_to_cpu(tgt->service_parms.class2_parms[0]) & 0x80000000)
4638*4882a593Smuzhiyun 			rport->supported_classes |= FC_COS_CLASS2;
4639*4882a593Smuzhiyun 		if (be32_to_cpu(tgt->service_parms.class3_parms[0]) & 0x80000000)
4640*4882a593Smuzhiyun 			rport->supported_classes |= FC_COS_CLASS3;
4641*4882a593Smuzhiyun 		if (rport->rqst_q)
4642*4882a593Smuzhiyun 			blk_queue_max_segments(rport->rqst_q, 1);
4643*4882a593Smuzhiyun 	} else
4644*4882a593Smuzhiyun 		tgt_dbg(tgt, "rport add failed\n");
4645*4882a593Smuzhiyun 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
4646*4882a593Smuzhiyun }
4647*4882a593Smuzhiyun 
4648*4882a593Smuzhiyun /**
4649*4882a593Smuzhiyun  * ibmvfc_do_work - Do task level work
4650*4882a593Smuzhiyun  * @vhost:		ibmvfc host struct
4651*4882a593Smuzhiyun  *
4652*4882a593Smuzhiyun  **/
ibmvfc_do_work(struct ibmvfc_host * vhost)4653*4882a593Smuzhiyun static void ibmvfc_do_work(struct ibmvfc_host *vhost)
4654*4882a593Smuzhiyun {
4655*4882a593Smuzhiyun 	struct ibmvfc_target *tgt;
4656*4882a593Smuzhiyun 	unsigned long flags;
4657*4882a593Smuzhiyun 	struct fc_rport *rport;
4658*4882a593Smuzhiyun 	int rc;
4659*4882a593Smuzhiyun 
4660*4882a593Smuzhiyun 	ibmvfc_log_ae(vhost, vhost->events_to_log);
4661*4882a593Smuzhiyun 	spin_lock_irqsave(vhost->host->host_lock, flags);
4662*4882a593Smuzhiyun 	vhost->events_to_log = 0;
4663*4882a593Smuzhiyun 	switch (vhost->action) {
4664*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_NONE:
4665*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_LOGO_WAIT:
4666*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_INIT_WAIT:
4667*4882a593Smuzhiyun 		break;
4668*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_RESET:
4669*4882a593Smuzhiyun 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
4670*4882a593Smuzhiyun 		rc = ibmvfc_reset_crq(vhost);
4671*4882a593Smuzhiyun 
4672*4882a593Smuzhiyun 		spin_lock_irqsave(vhost->host->host_lock, flags);
4673*4882a593Smuzhiyun 		if (!rc || rc == H_CLOSED)
4674*4882a593Smuzhiyun 			vio_enable_interrupts(to_vio_dev(vhost->dev));
4675*4882a593Smuzhiyun 		if (vhost->action == IBMVFC_HOST_ACTION_RESET) {
4676*4882a593Smuzhiyun 			/*
4677*4882a593Smuzhiyun 			 * The only action we could have changed to would have
4678*4882a593Smuzhiyun 			 * been reenable, in which case, we skip the rest of
4679*4882a593Smuzhiyun 			 * this path and wait until we've done the re-enable
4680*4882a593Smuzhiyun 			 * before sending the crq init.
4681*4882a593Smuzhiyun 			 */
4682*4882a593Smuzhiyun 			vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
4683*4882a593Smuzhiyun 
4684*4882a593Smuzhiyun 			if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
4685*4882a593Smuzhiyun 			    (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
4686*4882a593Smuzhiyun 				ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4687*4882a593Smuzhiyun 				dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
4688*4882a593Smuzhiyun 			}
4689*4882a593Smuzhiyun 		}
4690*4882a593Smuzhiyun 		break;
4691*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_REENABLE:
4692*4882a593Smuzhiyun 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
4693*4882a593Smuzhiyun 		rc = ibmvfc_reenable_crq_queue(vhost);
4694*4882a593Smuzhiyun 
4695*4882a593Smuzhiyun 		spin_lock_irqsave(vhost->host->host_lock, flags);
4696*4882a593Smuzhiyun 		if (vhost->action == IBMVFC_HOST_ACTION_REENABLE) {
4697*4882a593Smuzhiyun 			/*
4698*4882a593Smuzhiyun 			 * The only action we could have changed to would have
4699*4882a593Smuzhiyun 			 * been reset, in which case, we skip the rest of this
4700*4882a593Smuzhiyun 			 * path and wait until we've done the reset before
4701*4882a593Smuzhiyun 			 * sending the crq init.
4702*4882a593Smuzhiyun 			 */
4703*4882a593Smuzhiyun 			vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
4704*4882a593Smuzhiyun 			if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
4705*4882a593Smuzhiyun 				ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4706*4882a593Smuzhiyun 				dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
4707*4882a593Smuzhiyun 			}
4708*4882a593Smuzhiyun 		}
4709*4882a593Smuzhiyun 		break;
4710*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_LOGO:
4711*4882a593Smuzhiyun 		vhost->job_step(vhost);
4712*4882a593Smuzhiyun 		break;
4713*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_INIT:
4714*4882a593Smuzhiyun 		BUG_ON(vhost->state != IBMVFC_INITIALIZING);
4715*4882a593Smuzhiyun 		if (vhost->delay_init) {
4716*4882a593Smuzhiyun 			vhost->delay_init = 0;
4717*4882a593Smuzhiyun 			spin_unlock_irqrestore(vhost->host->host_lock, flags);
4718*4882a593Smuzhiyun 			ssleep(15);
4719*4882a593Smuzhiyun 			return;
4720*4882a593Smuzhiyun 		} else
4721*4882a593Smuzhiyun 			vhost->job_step(vhost);
4722*4882a593Smuzhiyun 		break;
4723*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_QUERY:
4724*4882a593Smuzhiyun 		list_for_each_entry(tgt, &vhost->targets, queue)
4725*4882a593Smuzhiyun 			ibmvfc_init_tgt(tgt, ibmvfc_tgt_query_target);
4726*4882a593Smuzhiyun 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY_TGTS);
4727*4882a593Smuzhiyun 		break;
4728*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_QUERY_TGTS:
4729*4882a593Smuzhiyun 		list_for_each_entry(tgt, &vhost->targets, queue) {
4730*4882a593Smuzhiyun 			if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
4731*4882a593Smuzhiyun 				tgt->job_step(tgt);
4732*4882a593Smuzhiyun 				break;
4733*4882a593Smuzhiyun 			}
4734*4882a593Smuzhiyun 		}
4735*4882a593Smuzhiyun 
4736*4882a593Smuzhiyun 		if (!ibmvfc_dev_init_to_do(vhost))
4737*4882a593Smuzhiyun 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
4738*4882a593Smuzhiyun 		break;
4739*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_TGT_DEL:
4740*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
4741*4882a593Smuzhiyun 		list_for_each_entry(tgt, &vhost->targets, queue) {
4742*4882a593Smuzhiyun 			if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
4743*4882a593Smuzhiyun 				tgt->job_step(tgt);
4744*4882a593Smuzhiyun 				break;
4745*4882a593Smuzhiyun 			}
4746*4882a593Smuzhiyun 		}
4747*4882a593Smuzhiyun 
4748*4882a593Smuzhiyun 		if (ibmvfc_dev_logo_to_do(vhost)) {
4749*4882a593Smuzhiyun 			spin_unlock_irqrestore(vhost->host->host_lock, flags);
4750*4882a593Smuzhiyun 			return;
4751*4882a593Smuzhiyun 		}
4752*4882a593Smuzhiyun 
4753*4882a593Smuzhiyun 		list_for_each_entry(tgt, &vhost->targets, queue) {
4754*4882a593Smuzhiyun 			if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
4755*4882a593Smuzhiyun 				tgt_dbg(tgt, "Deleting rport\n");
4756*4882a593Smuzhiyun 				rport = tgt->rport;
4757*4882a593Smuzhiyun 				tgt->rport = NULL;
4758*4882a593Smuzhiyun 				list_del(&tgt->queue);
4759*4882a593Smuzhiyun 				ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
4760*4882a593Smuzhiyun 				spin_unlock_irqrestore(vhost->host->host_lock, flags);
4761*4882a593Smuzhiyun 				if (rport)
4762*4882a593Smuzhiyun 					fc_remote_port_delete(rport);
4763*4882a593Smuzhiyun 				del_timer_sync(&tgt->timer);
4764*4882a593Smuzhiyun 				kref_put(&tgt->kref, ibmvfc_release_tgt);
4765*4882a593Smuzhiyun 				return;
4766*4882a593Smuzhiyun 			} else if (tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
4767*4882a593Smuzhiyun 				tgt_dbg(tgt, "Deleting rport with I/O outstanding\n");
4768*4882a593Smuzhiyun 				rport = tgt->rport;
4769*4882a593Smuzhiyun 				tgt->rport = NULL;
4770*4882a593Smuzhiyun 				ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
4771*4882a593Smuzhiyun 				spin_unlock_irqrestore(vhost->host->host_lock, flags);
4772*4882a593Smuzhiyun 				if (rport)
4773*4882a593Smuzhiyun 					fc_remote_port_delete(rport);
4774*4882a593Smuzhiyun 				return;
4775*4882a593Smuzhiyun 			}
4776*4882a593Smuzhiyun 		}
4777*4882a593Smuzhiyun 
4778*4882a593Smuzhiyun 		if (vhost->state == IBMVFC_INITIALIZING) {
4779*4882a593Smuzhiyun 			if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) {
4780*4882a593Smuzhiyun 				if (vhost->reinit) {
4781*4882a593Smuzhiyun 					vhost->reinit = 0;
4782*4882a593Smuzhiyun 					scsi_block_requests(vhost->host);
4783*4882a593Smuzhiyun 					ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
4784*4882a593Smuzhiyun 					spin_unlock_irqrestore(vhost->host->host_lock, flags);
4785*4882a593Smuzhiyun 				} else {
4786*4882a593Smuzhiyun 					ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
4787*4882a593Smuzhiyun 					ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
4788*4882a593Smuzhiyun 					wake_up(&vhost->init_wait_q);
4789*4882a593Smuzhiyun 					schedule_work(&vhost->rport_add_work_q);
4790*4882a593Smuzhiyun 					vhost->init_retries = 0;
4791*4882a593Smuzhiyun 					spin_unlock_irqrestore(vhost->host->host_lock, flags);
4792*4882a593Smuzhiyun 					scsi_unblock_requests(vhost->host);
4793*4882a593Smuzhiyun 				}
4794*4882a593Smuzhiyun 
4795*4882a593Smuzhiyun 				return;
4796*4882a593Smuzhiyun 			} else {
4797*4882a593Smuzhiyun 				ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
4798*4882a593Smuzhiyun 				vhost->job_step = ibmvfc_discover_targets;
4799*4882a593Smuzhiyun 			}
4800*4882a593Smuzhiyun 		} else {
4801*4882a593Smuzhiyun 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
4802*4882a593Smuzhiyun 			spin_unlock_irqrestore(vhost->host->host_lock, flags);
4803*4882a593Smuzhiyun 			scsi_unblock_requests(vhost->host);
4804*4882a593Smuzhiyun 			wake_up(&vhost->init_wait_q);
4805*4882a593Smuzhiyun 			return;
4806*4882a593Smuzhiyun 		}
4807*4882a593Smuzhiyun 		break;
4808*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_ALLOC_TGTS:
4809*4882a593Smuzhiyun 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_INIT);
4810*4882a593Smuzhiyun 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
4811*4882a593Smuzhiyun 		ibmvfc_alloc_targets(vhost);
4812*4882a593Smuzhiyun 		spin_lock_irqsave(vhost->host->host_lock, flags);
4813*4882a593Smuzhiyun 		break;
4814*4882a593Smuzhiyun 	case IBMVFC_HOST_ACTION_TGT_INIT:
4815*4882a593Smuzhiyun 		list_for_each_entry(tgt, &vhost->targets, queue) {
4816*4882a593Smuzhiyun 			if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
4817*4882a593Smuzhiyun 				tgt->job_step(tgt);
4818*4882a593Smuzhiyun 				break;
4819*4882a593Smuzhiyun 			}
4820*4882a593Smuzhiyun 		}
4821*4882a593Smuzhiyun 
4822*4882a593Smuzhiyun 		if (!ibmvfc_dev_init_to_do(vhost))
4823*4882a593Smuzhiyun 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED);
4824*4882a593Smuzhiyun 		break;
4825*4882a593Smuzhiyun 	default:
4826*4882a593Smuzhiyun 		break;
4827*4882a593Smuzhiyun 	}
4828*4882a593Smuzhiyun 
4829*4882a593Smuzhiyun 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
4830*4882a593Smuzhiyun }
4831*4882a593Smuzhiyun 
4832*4882a593Smuzhiyun /**
4833*4882a593Smuzhiyun  * ibmvfc_work - Do task level work
4834*4882a593Smuzhiyun  * @data:		ibmvfc host struct
4835*4882a593Smuzhiyun  *
4836*4882a593Smuzhiyun  * Returns:
4837*4882a593Smuzhiyun  *	zero
4838*4882a593Smuzhiyun  **/
ibmvfc_work(void * data)4839*4882a593Smuzhiyun static int ibmvfc_work(void *data)
4840*4882a593Smuzhiyun {
4841*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = data;
4842*4882a593Smuzhiyun 	int rc;
4843*4882a593Smuzhiyun 
4844*4882a593Smuzhiyun 	set_user_nice(current, MIN_NICE);
4845*4882a593Smuzhiyun 
4846*4882a593Smuzhiyun 	while (1) {
4847*4882a593Smuzhiyun 		rc = wait_event_interruptible(vhost->work_wait_q,
4848*4882a593Smuzhiyun 					      ibmvfc_work_to_do(vhost));
4849*4882a593Smuzhiyun 
4850*4882a593Smuzhiyun 		BUG_ON(rc);
4851*4882a593Smuzhiyun 
4852*4882a593Smuzhiyun 		if (kthread_should_stop())
4853*4882a593Smuzhiyun 			break;
4854*4882a593Smuzhiyun 
4855*4882a593Smuzhiyun 		ibmvfc_do_work(vhost);
4856*4882a593Smuzhiyun 	}
4857*4882a593Smuzhiyun 
4858*4882a593Smuzhiyun 	ibmvfc_dbg(vhost, "ibmvfc kthread exiting...\n");
4859*4882a593Smuzhiyun 	return 0;
4860*4882a593Smuzhiyun }
4861*4882a593Smuzhiyun 
4862*4882a593Smuzhiyun /**
4863*4882a593Smuzhiyun  * ibmvfc_init_crq - Initializes and registers CRQ with hypervisor
4864*4882a593Smuzhiyun  * @vhost:	ibmvfc host struct
4865*4882a593Smuzhiyun  *
4866*4882a593Smuzhiyun  * Allocates a page for messages, maps it for dma, and registers
4867*4882a593Smuzhiyun  * the crq with the hypervisor.
4868*4882a593Smuzhiyun  *
4869*4882a593Smuzhiyun  * Return value:
4870*4882a593Smuzhiyun  *	zero on success / other on failure
4871*4882a593Smuzhiyun  **/
ibmvfc_init_crq(struct ibmvfc_host * vhost)4872*4882a593Smuzhiyun static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
4873*4882a593Smuzhiyun {
4874*4882a593Smuzhiyun 	int rc, retrc = -ENOMEM;
4875*4882a593Smuzhiyun 	struct device *dev = vhost->dev;
4876*4882a593Smuzhiyun 	struct vio_dev *vdev = to_vio_dev(dev);
4877*4882a593Smuzhiyun 	struct ibmvfc_crq_queue *crq = &vhost->crq;
4878*4882a593Smuzhiyun 
4879*4882a593Smuzhiyun 	ENTER;
4880*4882a593Smuzhiyun 	crq->msgs = (struct ibmvfc_crq *)get_zeroed_page(GFP_KERNEL);
4881*4882a593Smuzhiyun 
4882*4882a593Smuzhiyun 	if (!crq->msgs)
4883*4882a593Smuzhiyun 		return -ENOMEM;
4884*4882a593Smuzhiyun 
4885*4882a593Smuzhiyun 	crq->size = PAGE_SIZE / sizeof(*crq->msgs);
4886*4882a593Smuzhiyun 	crq->msg_token = dma_map_single(dev, crq->msgs,
4887*4882a593Smuzhiyun 					PAGE_SIZE, DMA_BIDIRECTIONAL);
4888*4882a593Smuzhiyun 
4889*4882a593Smuzhiyun 	if (dma_mapping_error(dev, crq->msg_token))
4890*4882a593Smuzhiyun 		goto map_failed;
4891*4882a593Smuzhiyun 
4892*4882a593Smuzhiyun 	retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4893*4882a593Smuzhiyun 					crq->msg_token, PAGE_SIZE);
4894*4882a593Smuzhiyun 
4895*4882a593Smuzhiyun 	if (rc == H_RESOURCE)
4896*4882a593Smuzhiyun 		/* maybe kexecing and resource is busy. try a reset */
4897*4882a593Smuzhiyun 		retrc = rc = ibmvfc_reset_crq(vhost);
4898*4882a593Smuzhiyun 
4899*4882a593Smuzhiyun 	if (rc == H_CLOSED)
4900*4882a593Smuzhiyun 		dev_warn(dev, "Partner adapter not ready\n");
4901*4882a593Smuzhiyun 	else if (rc) {
4902*4882a593Smuzhiyun 		dev_warn(dev, "Error %d opening adapter\n", rc);
4903*4882a593Smuzhiyun 		goto reg_crq_failed;
4904*4882a593Smuzhiyun 	}
4905*4882a593Smuzhiyun 
4906*4882a593Smuzhiyun 	retrc = 0;
4907*4882a593Smuzhiyun 
4908*4882a593Smuzhiyun 	tasklet_init(&vhost->tasklet, (void *)ibmvfc_tasklet, (unsigned long)vhost);
4909*4882a593Smuzhiyun 
4910*4882a593Smuzhiyun 	if ((rc = request_irq(vdev->irq, ibmvfc_interrupt, 0, IBMVFC_NAME, vhost))) {
4911*4882a593Smuzhiyun 		dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc);
4912*4882a593Smuzhiyun 		goto req_irq_failed;
4913*4882a593Smuzhiyun 	}
4914*4882a593Smuzhiyun 
4915*4882a593Smuzhiyun 	if ((rc = vio_enable_interrupts(vdev))) {
4916*4882a593Smuzhiyun 		dev_err(dev, "Error %d enabling interrupts\n", rc);
4917*4882a593Smuzhiyun 		goto req_irq_failed;
4918*4882a593Smuzhiyun 	}
4919*4882a593Smuzhiyun 
4920*4882a593Smuzhiyun 	crq->cur = 0;
4921*4882a593Smuzhiyun 	LEAVE;
4922*4882a593Smuzhiyun 	return retrc;
4923*4882a593Smuzhiyun 
4924*4882a593Smuzhiyun req_irq_failed:
4925*4882a593Smuzhiyun 	tasklet_kill(&vhost->tasklet);
4926*4882a593Smuzhiyun 	do {
4927*4882a593Smuzhiyun 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4928*4882a593Smuzhiyun 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4929*4882a593Smuzhiyun reg_crq_failed:
4930*4882a593Smuzhiyun 	dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
4931*4882a593Smuzhiyun map_failed:
4932*4882a593Smuzhiyun 	free_page((unsigned long)crq->msgs);
4933*4882a593Smuzhiyun 	return retrc;
4934*4882a593Smuzhiyun }
4935*4882a593Smuzhiyun 
4936*4882a593Smuzhiyun /**
4937*4882a593Smuzhiyun  * ibmvfc_free_mem - Free memory for vhost
4938*4882a593Smuzhiyun  * @vhost:	ibmvfc host struct
4939*4882a593Smuzhiyun  *
4940*4882a593Smuzhiyun  * Return value:
4941*4882a593Smuzhiyun  * 	none
4942*4882a593Smuzhiyun  **/
ibmvfc_free_mem(struct ibmvfc_host * vhost)4943*4882a593Smuzhiyun static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
4944*4882a593Smuzhiyun {
4945*4882a593Smuzhiyun 	struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq;
4946*4882a593Smuzhiyun 
4947*4882a593Smuzhiyun 	ENTER;
4948*4882a593Smuzhiyun 	mempool_destroy(vhost->tgt_pool);
4949*4882a593Smuzhiyun 	kfree(vhost->trace);
4950*4882a593Smuzhiyun 	dma_free_coherent(vhost->dev, vhost->disc_buf_sz, vhost->disc_buf,
4951*4882a593Smuzhiyun 			  vhost->disc_buf_dma);
4952*4882a593Smuzhiyun 	dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf),
4953*4882a593Smuzhiyun 			  vhost->login_buf, vhost->login_buf_dma);
4954*4882a593Smuzhiyun 	dma_pool_destroy(vhost->sg_pool);
4955*4882a593Smuzhiyun 	dma_unmap_single(vhost->dev, async_q->msg_token,
4956*4882a593Smuzhiyun 			 async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL);
4957*4882a593Smuzhiyun 	free_page((unsigned long)async_q->msgs);
4958*4882a593Smuzhiyun 	LEAVE;
4959*4882a593Smuzhiyun }
4960*4882a593Smuzhiyun 
4961*4882a593Smuzhiyun /**
4962*4882a593Smuzhiyun  * ibmvfc_alloc_mem - Allocate memory for vhost
4963*4882a593Smuzhiyun  * @vhost:	ibmvfc host struct
4964*4882a593Smuzhiyun  *
4965*4882a593Smuzhiyun  * Return value:
4966*4882a593Smuzhiyun  * 	0 on success / non-zero on failure
4967*4882a593Smuzhiyun  **/
ibmvfc_alloc_mem(struct ibmvfc_host * vhost)4968*4882a593Smuzhiyun static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
4969*4882a593Smuzhiyun {
4970*4882a593Smuzhiyun 	struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq;
4971*4882a593Smuzhiyun 	struct device *dev = vhost->dev;
4972*4882a593Smuzhiyun 
4973*4882a593Smuzhiyun 	ENTER;
4974*4882a593Smuzhiyun 	async_q->msgs = (struct ibmvfc_async_crq *)get_zeroed_page(GFP_KERNEL);
4975*4882a593Smuzhiyun 	if (!async_q->msgs) {
4976*4882a593Smuzhiyun 		dev_err(dev, "Couldn't allocate async queue.\n");
4977*4882a593Smuzhiyun 		goto nomem;
4978*4882a593Smuzhiyun 	}
4979*4882a593Smuzhiyun 
4980*4882a593Smuzhiyun 	async_q->size = PAGE_SIZE / sizeof(struct ibmvfc_async_crq);
4981*4882a593Smuzhiyun 	async_q->msg_token = dma_map_single(dev, async_q->msgs,
4982*4882a593Smuzhiyun 					    async_q->size * sizeof(*async_q->msgs),
4983*4882a593Smuzhiyun 					    DMA_BIDIRECTIONAL);
4984*4882a593Smuzhiyun 
4985*4882a593Smuzhiyun 	if (dma_mapping_error(dev, async_q->msg_token)) {
4986*4882a593Smuzhiyun 		dev_err(dev, "Failed to map async queue\n");
4987*4882a593Smuzhiyun 		goto free_async_crq;
4988*4882a593Smuzhiyun 	}
4989*4882a593Smuzhiyun 
4990*4882a593Smuzhiyun 	vhost->sg_pool = dma_pool_create(IBMVFC_NAME, dev,
4991*4882a593Smuzhiyun 					 SG_ALL * sizeof(struct srp_direct_buf),
4992*4882a593Smuzhiyun 					 sizeof(struct srp_direct_buf), 0);
4993*4882a593Smuzhiyun 
4994*4882a593Smuzhiyun 	if (!vhost->sg_pool) {
4995*4882a593Smuzhiyun 		dev_err(dev, "Failed to allocate sg pool\n");
4996*4882a593Smuzhiyun 		goto unmap_async_crq;
4997*4882a593Smuzhiyun 	}
4998*4882a593Smuzhiyun 
4999*4882a593Smuzhiyun 	vhost->login_buf = dma_alloc_coherent(dev, sizeof(*vhost->login_buf),
5000*4882a593Smuzhiyun 					      &vhost->login_buf_dma, GFP_KERNEL);
5001*4882a593Smuzhiyun 
5002*4882a593Smuzhiyun 	if (!vhost->login_buf) {
5003*4882a593Smuzhiyun 		dev_err(dev, "Couldn't allocate NPIV login buffer\n");
5004*4882a593Smuzhiyun 		goto free_sg_pool;
5005*4882a593Smuzhiyun 	}
5006*4882a593Smuzhiyun 
5007*4882a593Smuzhiyun 	vhost->disc_buf_sz = sizeof(*vhost->disc_buf) * max_targets;
5008*4882a593Smuzhiyun 	vhost->disc_buf = dma_alloc_coherent(dev, vhost->disc_buf_sz,
5009*4882a593Smuzhiyun 					     &vhost->disc_buf_dma, GFP_KERNEL);
5010*4882a593Smuzhiyun 
5011*4882a593Smuzhiyun 	if (!vhost->disc_buf) {
5012*4882a593Smuzhiyun 		dev_err(dev, "Couldn't allocate Discover Targets buffer\n");
5013*4882a593Smuzhiyun 		goto free_login_buffer;
5014*4882a593Smuzhiyun 	}
5015*4882a593Smuzhiyun 
5016*4882a593Smuzhiyun 	vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES,
5017*4882a593Smuzhiyun 			       sizeof(struct ibmvfc_trace_entry), GFP_KERNEL);
5018*4882a593Smuzhiyun 
5019*4882a593Smuzhiyun 	if (!vhost->trace)
5020*4882a593Smuzhiyun 		goto free_disc_buffer;
5021*4882a593Smuzhiyun 
5022*4882a593Smuzhiyun 	vhost->tgt_pool = mempool_create_kmalloc_pool(IBMVFC_TGT_MEMPOOL_SZ,
5023*4882a593Smuzhiyun 						      sizeof(struct ibmvfc_target));
5024*4882a593Smuzhiyun 
5025*4882a593Smuzhiyun 	if (!vhost->tgt_pool) {
5026*4882a593Smuzhiyun 		dev_err(dev, "Couldn't allocate target memory pool\n");
5027*4882a593Smuzhiyun 		goto free_trace;
5028*4882a593Smuzhiyun 	}
5029*4882a593Smuzhiyun 
5030*4882a593Smuzhiyun 	LEAVE;
5031*4882a593Smuzhiyun 	return 0;
5032*4882a593Smuzhiyun 
5033*4882a593Smuzhiyun free_trace:
5034*4882a593Smuzhiyun 	kfree(vhost->trace);
5035*4882a593Smuzhiyun free_disc_buffer:
5036*4882a593Smuzhiyun 	dma_free_coherent(dev, vhost->disc_buf_sz, vhost->disc_buf,
5037*4882a593Smuzhiyun 			  vhost->disc_buf_dma);
5038*4882a593Smuzhiyun free_login_buffer:
5039*4882a593Smuzhiyun 	dma_free_coherent(dev, sizeof(*vhost->login_buf),
5040*4882a593Smuzhiyun 			  vhost->login_buf, vhost->login_buf_dma);
5041*4882a593Smuzhiyun free_sg_pool:
5042*4882a593Smuzhiyun 	dma_pool_destroy(vhost->sg_pool);
5043*4882a593Smuzhiyun unmap_async_crq:
5044*4882a593Smuzhiyun 	dma_unmap_single(dev, async_q->msg_token,
5045*4882a593Smuzhiyun 			 async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL);
5046*4882a593Smuzhiyun free_async_crq:
5047*4882a593Smuzhiyun 	free_page((unsigned long)async_q->msgs);
5048*4882a593Smuzhiyun nomem:
5049*4882a593Smuzhiyun 	LEAVE;
5050*4882a593Smuzhiyun 	return -ENOMEM;
5051*4882a593Smuzhiyun }
5052*4882a593Smuzhiyun 
5053*4882a593Smuzhiyun /**
5054*4882a593Smuzhiyun  * ibmvfc_rport_add_thread - Worker thread for rport adds
5055*4882a593Smuzhiyun  * @work:	work struct
5056*4882a593Smuzhiyun  *
5057*4882a593Smuzhiyun  **/
ibmvfc_rport_add_thread(struct work_struct * work)5058*4882a593Smuzhiyun static void ibmvfc_rport_add_thread(struct work_struct *work)
5059*4882a593Smuzhiyun {
5060*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host,
5061*4882a593Smuzhiyun 						 rport_add_work_q);
5062*4882a593Smuzhiyun 	struct ibmvfc_target *tgt;
5063*4882a593Smuzhiyun 	struct fc_rport *rport;
5064*4882a593Smuzhiyun 	unsigned long flags;
5065*4882a593Smuzhiyun 	int did_work;
5066*4882a593Smuzhiyun 
5067*4882a593Smuzhiyun 	ENTER;
5068*4882a593Smuzhiyun 	spin_lock_irqsave(vhost->host->host_lock, flags);
5069*4882a593Smuzhiyun 	do {
5070*4882a593Smuzhiyun 		did_work = 0;
5071*4882a593Smuzhiyun 		if (vhost->state != IBMVFC_ACTIVE)
5072*4882a593Smuzhiyun 			break;
5073*4882a593Smuzhiyun 
5074*4882a593Smuzhiyun 		list_for_each_entry(tgt, &vhost->targets, queue) {
5075*4882a593Smuzhiyun 			if (tgt->add_rport) {
5076*4882a593Smuzhiyun 				did_work = 1;
5077*4882a593Smuzhiyun 				tgt->add_rport = 0;
5078*4882a593Smuzhiyun 				kref_get(&tgt->kref);
5079*4882a593Smuzhiyun 				rport = tgt->rport;
5080*4882a593Smuzhiyun 				if (!rport) {
5081*4882a593Smuzhiyun 					spin_unlock_irqrestore(vhost->host->host_lock, flags);
5082*4882a593Smuzhiyun 					ibmvfc_tgt_add_rport(tgt);
5083*4882a593Smuzhiyun 				} else if (get_device(&rport->dev)) {
5084*4882a593Smuzhiyun 					spin_unlock_irqrestore(vhost->host->host_lock, flags);
5085*4882a593Smuzhiyun 					tgt_dbg(tgt, "Setting rport roles\n");
5086*4882a593Smuzhiyun 					fc_remote_port_rolechg(rport, tgt->ids.roles);
5087*4882a593Smuzhiyun 					put_device(&rport->dev);
5088*4882a593Smuzhiyun 				} else {
5089*4882a593Smuzhiyun 					spin_unlock_irqrestore(vhost->host->host_lock, flags);
5090*4882a593Smuzhiyun 				}
5091*4882a593Smuzhiyun 
5092*4882a593Smuzhiyun 				kref_put(&tgt->kref, ibmvfc_release_tgt);
5093*4882a593Smuzhiyun 				spin_lock_irqsave(vhost->host->host_lock, flags);
5094*4882a593Smuzhiyun 				break;
5095*4882a593Smuzhiyun 			}
5096*4882a593Smuzhiyun 		}
5097*4882a593Smuzhiyun 	} while(did_work);
5098*4882a593Smuzhiyun 
5099*4882a593Smuzhiyun 	if (vhost->state == IBMVFC_ACTIVE)
5100*4882a593Smuzhiyun 		vhost->scan_complete = 1;
5101*4882a593Smuzhiyun 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
5102*4882a593Smuzhiyun 	LEAVE;
5103*4882a593Smuzhiyun }
5104*4882a593Smuzhiyun 
5105*4882a593Smuzhiyun /**
5106*4882a593Smuzhiyun  * ibmvfc_probe - Adapter hot plug add entry point
5107*4882a593Smuzhiyun  * @vdev:	vio device struct
5108*4882a593Smuzhiyun  * @id:	vio device id struct
5109*4882a593Smuzhiyun  *
5110*4882a593Smuzhiyun  * Return value:
5111*4882a593Smuzhiyun  * 	0 on success / non-zero on failure
5112*4882a593Smuzhiyun  **/
ibmvfc_probe(struct vio_dev * vdev,const struct vio_device_id * id)5113*4882a593Smuzhiyun static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
5114*4882a593Smuzhiyun {
5115*4882a593Smuzhiyun 	struct ibmvfc_host *vhost;
5116*4882a593Smuzhiyun 	struct Scsi_Host *shost;
5117*4882a593Smuzhiyun 	struct device *dev = &vdev->dev;
5118*4882a593Smuzhiyun 	int rc = -ENOMEM;
5119*4882a593Smuzhiyun 
5120*4882a593Smuzhiyun 	ENTER;
5121*4882a593Smuzhiyun 	shost = scsi_host_alloc(&driver_template, sizeof(*vhost));
5122*4882a593Smuzhiyun 	if (!shost) {
5123*4882a593Smuzhiyun 		dev_err(dev, "Couldn't allocate host data\n");
5124*4882a593Smuzhiyun 		goto out;
5125*4882a593Smuzhiyun 	}
5126*4882a593Smuzhiyun 
5127*4882a593Smuzhiyun 	shost->transportt = ibmvfc_transport_template;
5128*4882a593Smuzhiyun 	shost->can_queue = max_requests;
5129*4882a593Smuzhiyun 	shost->max_lun = max_lun;
5130*4882a593Smuzhiyun 	shost->max_id = max_targets;
5131*4882a593Smuzhiyun 	shost->max_sectors = IBMVFC_MAX_SECTORS;
5132*4882a593Smuzhiyun 	shost->max_cmd_len = IBMVFC_MAX_CDB_LEN;
5133*4882a593Smuzhiyun 	shost->unique_id = shost->host_no;
5134*4882a593Smuzhiyun 
5135*4882a593Smuzhiyun 	vhost = shost_priv(shost);
5136*4882a593Smuzhiyun 	INIT_LIST_HEAD(&vhost->sent);
5137*4882a593Smuzhiyun 	INIT_LIST_HEAD(&vhost->free);
5138*4882a593Smuzhiyun 	INIT_LIST_HEAD(&vhost->targets);
5139*4882a593Smuzhiyun 	sprintf(vhost->name, IBMVFC_NAME);
5140*4882a593Smuzhiyun 	vhost->host = shost;
5141*4882a593Smuzhiyun 	vhost->dev = dev;
5142*4882a593Smuzhiyun 	vhost->partition_number = -1;
5143*4882a593Smuzhiyun 	vhost->log_level = log_level;
5144*4882a593Smuzhiyun 	vhost->task_set = 1;
5145*4882a593Smuzhiyun 	strcpy(vhost->partition_name, "UNKNOWN");
5146*4882a593Smuzhiyun 	init_waitqueue_head(&vhost->work_wait_q);
5147*4882a593Smuzhiyun 	init_waitqueue_head(&vhost->init_wait_q);
5148*4882a593Smuzhiyun 	INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread);
5149*4882a593Smuzhiyun 	mutex_init(&vhost->passthru_mutex);
5150*4882a593Smuzhiyun 
5151*4882a593Smuzhiyun 	if ((rc = ibmvfc_alloc_mem(vhost)))
5152*4882a593Smuzhiyun 		goto free_scsi_host;
5153*4882a593Smuzhiyun 
5154*4882a593Smuzhiyun 	vhost->work_thread = kthread_run(ibmvfc_work, vhost, "%s_%d", IBMVFC_NAME,
5155*4882a593Smuzhiyun 					 shost->host_no);
5156*4882a593Smuzhiyun 
5157*4882a593Smuzhiyun 	if (IS_ERR(vhost->work_thread)) {
5158*4882a593Smuzhiyun 		dev_err(dev, "Couldn't create kernel thread: %ld\n",
5159*4882a593Smuzhiyun 			PTR_ERR(vhost->work_thread));
5160*4882a593Smuzhiyun 		rc = PTR_ERR(vhost->work_thread);
5161*4882a593Smuzhiyun 		goto free_host_mem;
5162*4882a593Smuzhiyun 	}
5163*4882a593Smuzhiyun 
5164*4882a593Smuzhiyun 	if ((rc = ibmvfc_init_crq(vhost))) {
5165*4882a593Smuzhiyun 		dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
5166*4882a593Smuzhiyun 		goto kill_kthread;
5167*4882a593Smuzhiyun 	}
5168*4882a593Smuzhiyun 
5169*4882a593Smuzhiyun 	if ((rc = ibmvfc_init_event_pool(vhost))) {
5170*4882a593Smuzhiyun 		dev_err(dev, "Couldn't initialize event pool. rc=%d\n", rc);
5171*4882a593Smuzhiyun 		goto release_crq;
5172*4882a593Smuzhiyun 	}
5173*4882a593Smuzhiyun 
5174*4882a593Smuzhiyun 	if ((rc = scsi_add_host(shost, dev)))
5175*4882a593Smuzhiyun 		goto release_event_pool;
5176*4882a593Smuzhiyun 
5177*4882a593Smuzhiyun 	fc_host_dev_loss_tmo(shost) = IBMVFC_DEV_LOSS_TMO;
5178*4882a593Smuzhiyun 
5179*4882a593Smuzhiyun 	if ((rc = ibmvfc_create_trace_file(&shost->shost_dev.kobj,
5180*4882a593Smuzhiyun 					   &ibmvfc_trace_attr))) {
5181*4882a593Smuzhiyun 		dev_err(dev, "Failed to create trace file. rc=%d\n", rc);
5182*4882a593Smuzhiyun 		goto remove_shost;
5183*4882a593Smuzhiyun 	}
5184*4882a593Smuzhiyun 
5185*4882a593Smuzhiyun 	if (shost_to_fc_host(shost)->rqst_q)
5186*4882a593Smuzhiyun 		blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1);
5187*4882a593Smuzhiyun 	dev_set_drvdata(dev, vhost);
5188*4882a593Smuzhiyun 	spin_lock(&ibmvfc_driver_lock);
5189*4882a593Smuzhiyun 	list_add_tail(&vhost->queue, &ibmvfc_head);
5190*4882a593Smuzhiyun 	spin_unlock(&ibmvfc_driver_lock);
5191*4882a593Smuzhiyun 
5192*4882a593Smuzhiyun 	ibmvfc_send_crq_init(vhost);
5193*4882a593Smuzhiyun 	scsi_scan_host(shost);
5194*4882a593Smuzhiyun 	return 0;
5195*4882a593Smuzhiyun 
5196*4882a593Smuzhiyun remove_shost:
5197*4882a593Smuzhiyun 	scsi_remove_host(shost);
5198*4882a593Smuzhiyun release_event_pool:
5199*4882a593Smuzhiyun 	ibmvfc_free_event_pool(vhost);
5200*4882a593Smuzhiyun release_crq:
5201*4882a593Smuzhiyun 	ibmvfc_release_crq_queue(vhost);
5202*4882a593Smuzhiyun kill_kthread:
5203*4882a593Smuzhiyun 	kthread_stop(vhost->work_thread);
5204*4882a593Smuzhiyun free_host_mem:
5205*4882a593Smuzhiyun 	ibmvfc_free_mem(vhost);
5206*4882a593Smuzhiyun free_scsi_host:
5207*4882a593Smuzhiyun 	scsi_host_put(shost);
5208*4882a593Smuzhiyun out:
5209*4882a593Smuzhiyun 	LEAVE;
5210*4882a593Smuzhiyun 	return rc;
5211*4882a593Smuzhiyun }
5212*4882a593Smuzhiyun 
5213*4882a593Smuzhiyun /**
5214*4882a593Smuzhiyun  * ibmvfc_remove - Adapter hot plug remove entry point
5215*4882a593Smuzhiyun  * @vdev:	vio device struct
5216*4882a593Smuzhiyun  *
5217*4882a593Smuzhiyun  * Return value:
5218*4882a593Smuzhiyun  * 	0
5219*4882a593Smuzhiyun  **/
ibmvfc_remove(struct vio_dev * vdev)5220*4882a593Smuzhiyun static int ibmvfc_remove(struct vio_dev *vdev)
5221*4882a593Smuzhiyun {
5222*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev);
5223*4882a593Smuzhiyun 	unsigned long flags;
5224*4882a593Smuzhiyun 
5225*4882a593Smuzhiyun 	ENTER;
5226*4882a593Smuzhiyun 	ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr);
5227*4882a593Smuzhiyun 
5228*4882a593Smuzhiyun 	spin_lock_irqsave(vhost->host->host_lock, flags);
5229*4882a593Smuzhiyun 	ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
5230*4882a593Smuzhiyun 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
5231*4882a593Smuzhiyun 
5232*4882a593Smuzhiyun 	ibmvfc_wait_while_resetting(vhost);
5233*4882a593Smuzhiyun 	ibmvfc_release_crq_queue(vhost);
5234*4882a593Smuzhiyun 	kthread_stop(vhost->work_thread);
5235*4882a593Smuzhiyun 	fc_remove_host(vhost->host);
5236*4882a593Smuzhiyun 	scsi_remove_host(vhost->host);
5237*4882a593Smuzhiyun 
5238*4882a593Smuzhiyun 	spin_lock_irqsave(vhost->host->host_lock, flags);
5239*4882a593Smuzhiyun 	ibmvfc_purge_requests(vhost, DID_ERROR);
5240*4882a593Smuzhiyun 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
5241*4882a593Smuzhiyun 	ibmvfc_free_event_pool(vhost);
5242*4882a593Smuzhiyun 
5243*4882a593Smuzhiyun 	ibmvfc_free_mem(vhost);
5244*4882a593Smuzhiyun 	spin_lock(&ibmvfc_driver_lock);
5245*4882a593Smuzhiyun 	list_del(&vhost->queue);
5246*4882a593Smuzhiyun 	spin_unlock(&ibmvfc_driver_lock);
5247*4882a593Smuzhiyun 	scsi_host_put(vhost->host);
5248*4882a593Smuzhiyun 	LEAVE;
5249*4882a593Smuzhiyun 	return 0;
5250*4882a593Smuzhiyun }
5251*4882a593Smuzhiyun 
5252*4882a593Smuzhiyun /**
5253*4882a593Smuzhiyun  * ibmvfc_resume - Resume from suspend
5254*4882a593Smuzhiyun  * @dev:	device struct
5255*4882a593Smuzhiyun  *
5256*4882a593Smuzhiyun  * We may have lost an interrupt across suspend/resume, so kick the
5257*4882a593Smuzhiyun  * interrupt handler
5258*4882a593Smuzhiyun  *
5259*4882a593Smuzhiyun  */
ibmvfc_resume(struct device * dev)5260*4882a593Smuzhiyun static int ibmvfc_resume(struct device *dev)
5261*4882a593Smuzhiyun {
5262*4882a593Smuzhiyun 	unsigned long flags;
5263*4882a593Smuzhiyun 	struct ibmvfc_host *vhost = dev_get_drvdata(dev);
5264*4882a593Smuzhiyun 	struct vio_dev *vdev = to_vio_dev(dev);
5265*4882a593Smuzhiyun 
5266*4882a593Smuzhiyun 	spin_lock_irqsave(vhost->host->host_lock, flags);
5267*4882a593Smuzhiyun 	vio_disable_interrupts(vdev);
5268*4882a593Smuzhiyun 	tasklet_schedule(&vhost->tasklet);
5269*4882a593Smuzhiyun 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
5270*4882a593Smuzhiyun 	return 0;
5271*4882a593Smuzhiyun }
5272*4882a593Smuzhiyun 
5273*4882a593Smuzhiyun /**
5274*4882a593Smuzhiyun  * ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver
5275*4882a593Smuzhiyun  * @vdev:	vio device struct
5276*4882a593Smuzhiyun  *
5277*4882a593Smuzhiyun  * Return value:
5278*4882a593Smuzhiyun  *	Number of bytes the driver will need to DMA map at the same time in
5279*4882a593Smuzhiyun  *	order to perform well.
5280*4882a593Smuzhiyun  */
ibmvfc_get_desired_dma(struct vio_dev * vdev)5281*4882a593Smuzhiyun static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev)
5282*4882a593Smuzhiyun {
5283*4882a593Smuzhiyun 	unsigned long pool_dma = max_requests * sizeof(union ibmvfc_iu);
5284*4882a593Smuzhiyun 	return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun);
5285*4882a593Smuzhiyun }
5286*4882a593Smuzhiyun 
5287*4882a593Smuzhiyun static const struct vio_device_id ibmvfc_device_table[] = {
5288*4882a593Smuzhiyun 	{"fcp", "IBM,vfc-client"},
5289*4882a593Smuzhiyun 	{ "", "" }
5290*4882a593Smuzhiyun };
5291*4882a593Smuzhiyun MODULE_DEVICE_TABLE(vio, ibmvfc_device_table);
5292*4882a593Smuzhiyun 
5293*4882a593Smuzhiyun static const struct dev_pm_ops ibmvfc_pm_ops = {
5294*4882a593Smuzhiyun 	.resume = ibmvfc_resume
5295*4882a593Smuzhiyun };
5296*4882a593Smuzhiyun 
5297*4882a593Smuzhiyun static struct vio_driver ibmvfc_driver = {
5298*4882a593Smuzhiyun 	.id_table = ibmvfc_device_table,
5299*4882a593Smuzhiyun 	.probe = ibmvfc_probe,
5300*4882a593Smuzhiyun 	.remove = ibmvfc_remove,
5301*4882a593Smuzhiyun 	.get_desired_dma = ibmvfc_get_desired_dma,
5302*4882a593Smuzhiyun 	.name = IBMVFC_NAME,
5303*4882a593Smuzhiyun 	.pm = &ibmvfc_pm_ops,
5304*4882a593Smuzhiyun };
5305*4882a593Smuzhiyun 
5306*4882a593Smuzhiyun static struct fc_function_template ibmvfc_transport_functions = {
5307*4882a593Smuzhiyun 	.show_host_fabric_name = 1,
5308*4882a593Smuzhiyun 	.show_host_node_name = 1,
5309*4882a593Smuzhiyun 	.show_host_port_name = 1,
5310*4882a593Smuzhiyun 	.show_host_supported_classes = 1,
5311*4882a593Smuzhiyun 	.show_host_port_type = 1,
5312*4882a593Smuzhiyun 	.show_host_port_id = 1,
5313*4882a593Smuzhiyun 	.show_host_maxframe_size = 1,
5314*4882a593Smuzhiyun 
5315*4882a593Smuzhiyun 	.get_host_port_state = ibmvfc_get_host_port_state,
5316*4882a593Smuzhiyun 	.show_host_port_state = 1,
5317*4882a593Smuzhiyun 
5318*4882a593Smuzhiyun 	.get_host_speed = ibmvfc_get_host_speed,
5319*4882a593Smuzhiyun 	.show_host_speed = 1,
5320*4882a593Smuzhiyun 
5321*4882a593Smuzhiyun 	.issue_fc_host_lip = ibmvfc_issue_fc_host_lip,
5322*4882a593Smuzhiyun 	.terminate_rport_io = ibmvfc_terminate_rport_io,
5323*4882a593Smuzhiyun 
5324*4882a593Smuzhiyun 	.show_rport_maxframe_size = 1,
5325*4882a593Smuzhiyun 	.show_rport_supported_classes = 1,
5326*4882a593Smuzhiyun 
5327*4882a593Smuzhiyun 	.set_rport_dev_loss_tmo = ibmvfc_set_rport_dev_loss_tmo,
5328*4882a593Smuzhiyun 	.show_rport_dev_loss_tmo = 1,
5329*4882a593Smuzhiyun 
5330*4882a593Smuzhiyun 	.get_starget_node_name = ibmvfc_get_starget_node_name,
5331*4882a593Smuzhiyun 	.show_starget_node_name = 1,
5332*4882a593Smuzhiyun 
5333*4882a593Smuzhiyun 	.get_starget_port_name = ibmvfc_get_starget_port_name,
5334*4882a593Smuzhiyun 	.show_starget_port_name = 1,
5335*4882a593Smuzhiyun 
5336*4882a593Smuzhiyun 	.get_starget_port_id = ibmvfc_get_starget_port_id,
5337*4882a593Smuzhiyun 	.show_starget_port_id = 1,
5338*4882a593Smuzhiyun 
5339*4882a593Smuzhiyun 	.bsg_request = ibmvfc_bsg_request,
5340*4882a593Smuzhiyun 	.bsg_timeout = ibmvfc_bsg_timeout,
5341*4882a593Smuzhiyun };
5342*4882a593Smuzhiyun 
5343*4882a593Smuzhiyun /**
5344*4882a593Smuzhiyun  * ibmvfc_module_init - Initialize the ibmvfc module
5345*4882a593Smuzhiyun  *
5346*4882a593Smuzhiyun  * Return value:
5347*4882a593Smuzhiyun  * 	0 on success / other on failure
5348*4882a593Smuzhiyun  **/
ibmvfc_module_init(void)5349*4882a593Smuzhiyun static int __init ibmvfc_module_init(void)
5350*4882a593Smuzhiyun {
5351*4882a593Smuzhiyun 	int rc;
5352*4882a593Smuzhiyun 
5353*4882a593Smuzhiyun 	if (!firmware_has_feature(FW_FEATURE_VIO))
5354*4882a593Smuzhiyun 		return -ENODEV;
5355*4882a593Smuzhiyun 
5356*4882a593Smuzhiyun 	printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n",
5357*4882a593Smuzhiyun 	       IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE);
5358*4882a593Smuzhiyun 
5359*4882a593Smuzhiyun 	ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions);
5360*4882a593Smuzhiyun 	if (!ibmvfc_transport_template)
5361*4882a593Smuzhiyun 		return -ENOMEM;
5362*4882a593Smuzhiyun 
5363*4882a593Smuzhiyun 	rc = vio_register_driver(&ibmvfc_driver);
5364*4882a593Smuzhiyun 	if (rc)
5365*4882a593Smuzhiyun 		fc_release_transport(ibmvfc_transport_template);
5366*4882a593Smuzhiyun 	return rc;
5367*4882a593Smuzhiyun }
5368*4882a593Smuzhiyun 
5369*4882a593Smuzhiyun /**
5370*4882a593Smuzhiyun  * ibmvfc_module_exit - Teardown the ibmvfc module
5371*4882a593Smuzhiyun  *
5372*4882a593Smuzhiyun  * Return value:
5373*4882a593Smuzhiyun  * 	nothing
5374*4882a593Smuzhiyun  **/
ibmvfc_module_exit(void)5375*4882a593Smuzhiyun static void __exit ibmvfc_module_exit(void)
5376*4882a593Smuzhiyun {
5377*4882a593Smuzhiyun 	vio_unregister_driver(&ibmvfc_driver);
5378*4882a593Smuzhiyun 	fc_release_transport(ibmvfc_transport_template);
5379*4882a593Smuzhiyun }
5380*4882a593Smuzhiyun 
5381*4882a593Smuzhiyun module_init(ibmvfc_module_init);
5382*4882a593Smuzhiyun module_exit(ibmvfc_module_exit);
5383