xref: /OK3568_Linux_fs/kernel/drivers/infiniband/ulp/srp/ib_srp.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * This software is available to you under a choice of one of two
5*4882a593Smuzhiyun  * licenses.  You may choose to be licensed under the terms of the GNU
6*4882a593Smuzhiyun  * General Public License (GPL) Version 2, available from the file
7*4882a593Smuzhiyun  * COPYING in the main directory of this source tree, or the
8*4882a593Smuzhiyun  * OpenIB.org BSD license below:
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  *     Redistribution and use in source and binary forms, with or
11*4882a593Smuzhiyun  *     without modification, are permitted provided that the following
12*4882a593Smuzhiyun  *     conditions are met:
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  *      - Redistributions of source code must retain the above
15*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
16*4882a593Smuzhiyun  *        disclaimer.
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  *      - Redistributions in binary form must reproduce the above
19*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
20*4882a593Smuzhiyun  *        disclaimer in the documentation and/or other materials
21*4882a593Smuzhiyun  *        provided with the distribution.
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24*4882a593Smuzhiyun  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25*4882a593Smuzhiyun  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26*4882a593Smuzhiyun  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27*4882a593Smuzhiyun  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28*4882a593Smuzhiyun  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29*4882a593Smuzhiyun  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30*4882a593Smuzhiyun  * SOFTWARE.
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #include <linux/module.h>
36*4882a593Smuzhiyun #include <linux/init.h>
37*4882a593Smuzhiyun #include <linux/slab.h>
38*4882a593Smuzhiyun #include <linux/err.h>
39*4882a593Smuzhiyun #include <linux/string.h>
40*4882a593Smuzhiyun #include <linux/parser.h>
41*4882a593Smuzhiyun #include <linux/random.h>
42*4882a593Smuzhiyun #include <linux/jiffies.h>
43*4882a593Smuzhiyun #include <linux/lockdep.h>
44*4882a593Smuzhiyun #include <linux/inet.h>
45*4882a593Smuzhiyun #include <rdma/ib_cache.h>
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun #include <linux/atomic.h>
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun #include <scsi/scsi.h>
50*4882a593Smuzhiyun #include <scsi/scsi_device.h>
51*4882a593Smuzhiyun #include <scsi/scsi_dbg.h>
52*4882a593Smuzhiyun #include <scsi/scsi_tcq.h>
53*4882a593Smuzhiyun #include <scsi/srp.h>
54*4882a593Smuzhiyun #include <scsi/scsi_transport_srp.h>
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun #include "ib_srp.h"
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun #define DRV_NAME	"ib_srp"
59*4882a593Smuzhiyun #define PFX		DRV_NAME ": "
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun MODULE_AUTHOR("Roland Dreier");
62*4882a593Smuzhiyun MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
63*4882a593Smuzhiyun MODULE_LICENSE("Dual BSD/GPL");
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun #if !defined(CONFIG_DYNAMIC_DEBUG)
66*4882a593Smuzhiyun #define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt)
67*4882a593Smuzhiyun #define DYNAMIC_DEBUG_BRANCH(descriptor) false
68*4882a593Smuzhiyun #endif
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun static unsigned int srp_sg_tablesize;
71*4882a593Smuzhiyun static unsigned int cmd_sg_entries;
72*4882a593Smuzhiyun static unsigned int indirect_sg_entries;
73*4882a593Smuzhiyun static bool allow_ext_sg;
74*4882a593Smuzhiyun static bool register_always = true;
75*4882a593Smuzhiyun static bool never_register;
76*4882a593Smuzhiyun static int topspin_workarounds = 1;
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun module_param(srp_sg_tablesize, uint, 0444);
79*4882a593Smuzhiyun MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun module_param(cmd_sg_entries, uint, 0444);
82*4882a593Smuzhiyun MODULE_PARM_DESC(cmd_sg_entries,
83*4882a593Smuzhiyun 		 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun module_param(indirect_sg_entries, uint, 0444);
86*4882a593Smuzhiyun MODULE_PARM_DESC(indirect_sg_entries,
87*4882a593Smuzhiyun 		 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")");
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun module_param(allow_ext_sg, bool, 0444);
90*4882a593Smuzhiyun MODULE_PARM_DESC(allow_ext_sg,
91*4882a593Smuzhiyun 		  "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun module_param(topspin_workarounds, int, 0444);
94*4882a593Smuzhiyun MODULE_PARM_DESC(topspin_workarounds,
95*4882a593Smuzhiyun 		 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun module_param(register_always, bool, 0444);
98*4882a593Smuzhiyun MODULE_PARM_DESC(register_always,
99*4882a593Smuzhiyun 		 "Use memory registration even for contiguous memory regions");
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun module_param(never_register, bool, 0444);
102*4882a593Smuzhiyun MODULE_PARM_DESC(never_register, "Never register memory");
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun static const struct kernel_param_ops srp_tmo_ops;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun static int srp_reconnect_delay = 10;
107*4882a593Smuzhiyun module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
108*4882a593Smuzhiyun 		S_IRUGO | S_IWUSR);
109*4882a593Smuzhiyun MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun static int srp_fast_io_fail_tmo = 15;
112*4882a593Smuzhiyun module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
113*4882a593Smuzhiyun 		S_IRUGO | S_IWUSR);
114*4882a593Smuzhiyun MODULE_PARM_DESC(fast_io_fail_tmo,
115*4882a593Smuzhiyun 		 "Number of seconds between the observation of a transport"
116*4882a593Smuzhiyun 		 " layer error and failing all I/O. \"off\" means that this"
117*4882a593Smuzhiyun 		 " functionality is disabled.");
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun static int srp_dev_loss_tmo = 600;
120*4882a593Smuzhiyun module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
121*4882a593Smuzhiyun 		S_IRUGO | S_IWUSR);
122*4882a593Smuzhiyun MODULE_PARM_DESC(dev_loss_tmo,
123*4882a593Smuzhiyun 		 "Maximum number of seconds that the SRP transport should"
124*4882a593Smuzhiyun 		 " insulate transport layer errors. After this time has been"
125*4882a593Smuzhiyun 		 " exceeded the SCSI host is removed. Should be"
126*4882a593Smuzhiyun 		 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
127*4882a593Smuzhiyun 		 " if fast_io_fail_tmo has not been set. \"off\" means that"
128*4882a593Smuzhiyun 		 " this functionality is disabled.");
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun static bool srp_use_imm_data = true;
131*4882a593Smuzhiyun module_param_named(use_imm_data, srp_use_imm_data, bool, 0644);
132*4882a593Smuzhiyun MODULE_PARM_DESC(use_imm_data,
133*4882a593Smuzhiyun 		 "Whether or not to request permission to use immediate data during SRP login.");
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun static unsigned int srp_max_imm_data = 8 * 1024;
136*4882a593Smuzhiyun module_param_named(max_imm_data, srp_max_imm_data, uint, 0644);
137*4882a593Smuzhiyun MODULE_PARM_DESC(max_imm_data, "Maximum immediate data size.");
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun static unsigned ch_count;
140*4882a593Smuzhiyun module_param(ch_count, uint, 0444);
141*4882a593Smuzhiyun MODULE_PARM_DESC(ch_count,
142*4882a593Smuzhiyun 		 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun static int srp_add_one(struct ib_device *device);
145*4882a593Smuzhiyun static void srp_remove_one(struct ib_device *device, void *client_data);
146*4882a593Smuzhiyun static void srp_rename_dev(struct ib_device *device, void *client_data);
147*4882a593Smuzhiyun static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
148*4882a593Smuzhiyun static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
149*4882a593Smuzhiyun 		const char *opname);
150*4882a593Smuzhiyun static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
151*4882a593Smuzhiyun 			     const struct ib_cm_event *event);
152*4882a593Smuzhiyun static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
153*4882a593Smuzhiyun 			       struct rdma_cm_event *event);
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun static struct scsi_transport_template *ib_srp_transport_template;
156*4882a593Smuzhiyun static struct workqueue_struct *srp_remove_wq;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun static struct ib_client srp_client = {
159*4882a593Smuzhiyun 	.name   = "srp",
160*4882a593Smuzhiyun 	.add    = srp_add_one,
161*4882a593Smuzhiyun 	.remove = srp_remove_one,
162*4882a593Smuzhiyun 	.rename = srp_rename_dev
163*4882a593Smuzhiyun };
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun static struct ib_sa_client srp_sa_client;
166*4882a593Smuzhiyun 
srp_tmo_get(char * buffer,const struct kernel_param * kp)167*4882a593Smuzhiyun static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun 	int tmo = *(int *)kp->arg;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	if (tmo >= 0)
172*4882a593Smuzhiyun 		return sprintf(buffer, "%d\n", tmo);
173*4882a593Smuzhiyun 	else
174*4882a593Smuzhiyun 		return sprintf(buffer, "off\n");
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun 
srp_tmo_set(const char * val,const struct kernel_param * kp)177*4882a593Smuzhiyun static int srp_tmo_set(const char *val, const struct kernel_param *kp)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun 	int tmo, res;
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	res = srp_parse_tmo(&tmo, val);
182*4882a593Smuzhiyun 	if (res)
183*4882a593Smuzhiyun 		goto out;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	if (kp->arg == &srp_reconnect_delay)
186*4882a593Smuzhiyun 		res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
187*4882a593Smuzhiyun 				    srp_dev_loss_tmo);
188*4882a593Smuzhiyun 	else if (kp->arg == &srp_fast_io_fail_tmo)
189*4882a593Smuzhiyun 		res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
190*4882a593Smuzhiyun 	else
191*4882a593Smuzhiyun 		res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
192*4882a593Smuzhiyun 				    tmo);
193*4882a593Smuzhiyun 	if (res)
194*4882a593Smuzhiyun 		goto out;
195*4882a593Smuzhiyun 	*(int *)kp->arg = tmo;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun out:
198*4882a593Smuzhiyun 	return res;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun static const struct kernel_param_ops srp_tmo_ops = {
202*4882a593Smuzhiyun 	.get = srp_tmo_get,
203*4882a593Smuzhiyun 	.set = srp_tmo_set,
204*4882a593Smuzhiyun };
205*4882a593Smuzhiyun 
host_to_target(struct Scsi_Host * host)206*4882a593Smuzhiyun static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun 	return (struct srp_target_port *) host->hostdata;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun 
srp_target_info(struct Scsi_Host * host)211*4882a593Smuzhiyun static const char *srp_target_info(struct Scsi_Host *host)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	return host_to_target(host)->target_name;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun 
srp_target_is_topspin(struct srp_target_port * target)216*4882a593Smuzhiyun static int srp_target_is_topspin(struct srp_target_port *target)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun 	static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
219*4882a593Smuzhiyun 	static const u8 cisco_oui[3]   = { 0x00, 0x1b, 0x0d };
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	return topspin_workarounds &&
222*4882a593Smuzhiyun 		(!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
223*4882a593Smuzhiyun 		 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun 
srp_alloc_iu(struct srp_host * host,size_t size,gfp_t gfp_mask,enum dma_data_direction direction)226*4882a593Smuzhiyun static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
227*4882a593Smuzhiyun 				   gfp_t gfp_mask,
228*4882a593Smuzhiyun 				   enum dma_data_direction direction)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun 	struct srp_iu *iu;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	iu = kmalloc(sizeof *iu, gfp_mask);
233*4882a593Smuzhiyun 	if (!iu)
234*4882a593Smuzhiyun 		goto out;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	iu->buf = kzalloc(size, gfp_mask);
237*4882a593Smuzhiyun 	if (!iu->buf)
238*4882a593Smuzhiyun 		goto out_free_iu;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
241*4882a593Smuzhiyun 				    direction);
242*4882a593Smuzhiyun 	if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
243*4882a593Smuzhiyun 		goto out_free_buf;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	iu->size      = size;
246*4882a593Smuzhiyun 	iu->direction = direction;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	return iu;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun out_free_buf:
251*4882a593Smuzhiyun 	kfree(iu->buf);
252*4882a593Smuzhiyun out_free_iu:
253*4882a593Smuzhiyun 	kfree(iu);
254*4882a593Smuzhiyun out:
255*4882a593Smuzhiyun 	return NULL;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun 
srp_free_iu(struct srp_host * host,struct srp_iu * iu)258*4882a593Smuzhiyun static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun 	if (!iu)
261*4882a593Smuzhiyun 		return;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
264*4882a593Smuzhiyun 			    iu->direction);
265*4882a593Smuzhiyun 	kfree(iu->buf);
266*4882a593Smuzhiyun 	kfree(iu);
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun 
srp_qp_event(struct ib_event * event,void * context)269*4882a593Smuzhiyun static void srp_qp_event(struct ib_event *event, void *context)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun 	pr_debug("QP event %s (%d)\n",
272*4882a593Smuzhiyun 		 ib_event_msg(event->event), event->event);
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun 
srp_init_ib_qp(struct srp_target_port * target,struct ib_qp * qp)275*4882a593Smuzhiyun static int srp_init_ib_qp(struct srp_target_port *target,
276*4882a593Smuzhiyun 			  struct ib_qp *qp)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun 	struct ib_qp_attr *attr;
279*4882a593Smuzhiyun 	int ret;
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	attr = kmalloc(sizeof *attr, GFP_KERNEL);
282*4882a593Smuzhiyun 	if (!attr)
283*4882a593Smuzhiyun 		return -ENOMEM;
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
286*4882a593Smuzhiyun 				  target->srp_host->port,
287*4882a593Smuzhiyun 				  be16_to_cpu(target->ib_cm.pkey),
288*4882a593Smuzhiyun 				  &attr->pkey_index);
289*4882a593Smuzhiyun 	if (ret)
290*4882a593Smuzhiyun 		goto out;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	attr->qp_state        = IB_QPS_INIT;
293*4882a593Smuzhiyun 	attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
294*4882a593Smuzhiyun 				    IB_ACCESS_REMOTE_WRITE);
295*4882a593Smuzhiyun 	attr->port_num        = target->srp_host->port;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	ret = ib_modify_qp(qp, attr,
298*4882a593Smuzhiyun 			   IB_QP_STATE		|
299*4882a593Smuzhiyun 			   IB_QP_PKEY_INDEX	|
300*4882a593Smuzhiyun 			   IB_QP_ACCESS_FLAGS	|
301*4882a593Smuzhiyun 			   IB_QP_PORT);
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun out:
304*4882a593Smuzhiyun 	kfree(attr);
305*4882a593Smuzhiyun 	return ret;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun 
srp_new_ib_cm_id(struct srp_rdma_ch * ch)308*4882a593Smuzhiyun static int srp_new_ib_cm_id(struct srp_rdma_ch *ch)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun 	struct srp_target_port *target = ch->target;
311*4882a593Smuzhiyun 	struct ib_cm_id *new_cm_id;
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
314*4882a593Smuzhiyun 				    srp_ib_cm_handler, ch);
315*4882a593Smuzhiyun 	if (IS_ERR(new_cm_id))
316*4882a593Smuzhiyun 		return PTR_ERR(new_cm_id);
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	if (ch->ib_cm.cm_id)
319*4882a593Smuzhiyun 		ib_destroy_cm_id(ch->ib_cm.cm_id);
320*4882a593Smuzhiyun 	ch->ib_cm.cm_id = new_cm_id;
321*4882a593Smuzhiyun 	if (rdma_cap_opa_ah(target->srp_host->srp_dev->dev,
322*4882a593Smuzhiyun 			    target->srp_host->port))
323*4882a593Smuzhiyun 		ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_OPA;
324*4882a593Smuzhiyun 	else
325*4882a593Smuzhiyun 		ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_IB;
326*4882a593Smuzhiyun 	ch->ib_cm.path.sgid = target->sgid;
327*4882a593Smuzhiyun 	ch->ib_cm.path.dgid = target->ib_cm.orig_dgid;
328*4882a593Smuzhiyun 	ch->ib_cm.path.pkey = target->ib_cm.pkey;
329*4882a593Smuzhiyun 	ch->ib_cm.path.service_id = target->ib_cm.service_id;
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	return 0;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun 
srp_new_rdma_cm_id(struct srp_rdma_ch * ch)334*4882a593Smuzhiyun static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun 	struct srp_target_port *target = ch->target;
337*4882a593Smuzhiyun 	struct rdma_cm_id *new_cm_id;
338*4882a593Smuzhiyun 	int ret;
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	new_cm_id = rdma_create_id(target->net, srp_rdma_cm_handler, ch,
341*4882a593Smuzhiyun 				   RDMA_PS_TCP, IB_QPT_RC);
342*4882a593Smuzhiyun 	if (IS_ERR(new_cm_id)) {
343*4882a593Smuzhiyun 		ret = PTR_ERR(new_cm_id);
344*4882a593Smuzhiyun 		new_cm_id = NULL;
345*4882a593Smuzhiyun 		goto out;
346*4882a593Smuzhiyun 	}
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	init_completion(&ch->done);
349*4882a593Smuzhiyun 	ret = rdma_resolve_addr(new_cm_id, target->rdma_cm.src_specified ?
350*4882a593Smuzhiyun 				&target->rdma_cm.src.sa : NULL,
351*4882a593Smuzhiyun 				&target->rdma_cm.dst.sa,
352*4882a593Smuzhiyun 				SRP_PATH_REC_TIMEOUT_MS);
353*4882a593Smuzhiyun 	if (ret) {
354*4882a593Smuzhiyun 		pr_err("No route available from %pISpsc to %pISpsc (%d)\n",
355*4882a593Smuzhiyun 		       &target->rdma_cm.src, &target->rdma_cm.dst, ret);
356*4882a593Smuzhiyun 		goto out;
357*4882a593Smuzhiyun 	}
358*4882a593Smuzhiyun 	ret = wait_for_completion_interruptible(&ch->done);
359*4882a593Smuzhiyun 	if (ret < 0)
360*4882a593Smuzhiyun 		goto out;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	ret = ch->status;
363*4882a593Smuzhiyun 	if (ret) {
364*4882a593Smuzhiyun 		pr_err("Resolving address %pISpsc failed (%d)\n",
365*4882a593Smuzhiyun 		       &target->rdma_cm.dst, ret);
366*4882a593Smuzhiyun 		goto out;
367*4882a593Smuzhiyun 	}
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	swap(ch->rdma_cm.cm_id, new_cm_id);
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun out:
372*4882a593Smuzhiyun 	if (new_cm_id)
373*4882a593Smuzhiyun 		rdma_destroy_id(new_cm_id);
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	return ret;
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun 
srp_new_cm_id(struct srp_rdma_ch * ch)378*4882a593Smuzhiyun static int srp_new_cm_id(struct srp_rdma_ch *ch)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun 	struct srp_target_port *target = ch->target;
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	return target->using_rdma_cm ? srp_new_rdma_cm_id(ch) :
383*4882a593Smuzhiyun 		srp_new_ib_cm_id(ch);
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun /**
387*4882a593Smuzhiyun  * srp_destroy_fr_pool() - free the resources owned by a pool
388*4882a593Smuzhiyun  * @pool: Fast registration pool to be destroyed.
389*4882a593Smuzhiyun  */
srp_destroy_fr_pool(struct srp_fr_pool * pool)390*4882a593Smuzhiyun static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun 	int i;
393*4882a593Smuzhiyun 	struct srp_fr_desc *d;
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	if (!pool)
396*4882a593Smuzhiyun 		return;
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
399*4882a593Smuzhiyun 		if (d->mr)
400*4882a593Smuzhiyun 			ib_dereg_mr(d->mr);
401*4882a593Smuzhiyun 	}
402*4882a593Smuzhiyun 	kfree(pool);
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun /**
406*4882a593Smuzhiyun  * srp_create_fr_pool() - allocate and initialize a pool for fast registration
407*4882a593Smuzhiyun  * @device:            IB device to allocate fast registration descriptors for.
408*4882a593Smuzhiyun  * @pd:                Protection domain associated with the FR descriptors.
409*4882a593Smuzhiyun  * @pool_size:         Number of descriptors to allocate.
410*4882a593Smuzhiyun  * @max_page_list_len: Maximum fast registration work request page list length.
411*4882a593Smuzhiyun  */
srp_create_fr_pool(struct ib_device * device,struct ib_pd * pd,int pool_size,int max_page_list_len)412*4882a593Smuzhiyun static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
413*4882a593Smuzhiyun 					      struct ib_pd *pd, int pool_size,
414*4882a593Smuzhiyun 					      int max_page_list_len)
415*4882a593Smuzhiyun {
416*4882a593Smuzhiyun 	struct srp_fr_pool *pool;
417*4882a593Smuzhiyun 	struct srp_fr_desc *d;
418*4882a593Smuzhiyun 	struct ib_mr *mr;
419*4882a593Smuzhiyun 	int i, ret = -EINVAL;
420*4882a593Smuzhiyun 	enum ib_mr_type mr_type;
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	if (pool_size <= 0)
423*4882a593Smuzhiyun 		goto err;
424*4882a593Smuzhiyun 	ret = -ENOMEM;
425*4882a593Smuzhiyun 	pool = kzalloc(struct_size(pool, desc, pool_size), GFP_KERNEL);
426*4882a593Smuzhiyun 	if (!pool)
427*4882a593Smuzhiyun 		goto err;
428*4882a593Smuzhiyun 	pool->size = pool_size;
429*4882a593Smuzhiyun 	pool->max_page_list_len = max_page_list_len;
430*4882a593Smuzhiyun 	spin_lock_init(&pool->lock);
431*4882a593Smuzhiyun 	INIT_LIST_HEAD(&pool->free_list);
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	if (device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
434*4882a593Smuzhiyun 		mr_type = IB_MR_TYPE_SG_GAPS;
435*4882a593Smuzhiyun 	else
436*4882a593Smuzhiyun 		mr_type = IB_MR_TYPE_MEM_REG;
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
439*4882a593Smuzhiyun 		mr = ib_alloc_mr(pd, mr_type, max_page_list_len);
440*4882a593Smuzhiyun 		if (IS_ERR(mr)) {
441*4882a593Smuzhiyun 			ret = PTR_ERR(mr);
442*4882a593Smuzhiyun 			if (ret == -ENOMEM)
443*4882a593Smuzhiyun 				pr_info("%s: ib_alloc_mr() failed. Try to reduce max_cmd_per_lun, max_sect or ch_count\n",
444*4882a593Smuzhiyun 					dev_name(&device->dev));
445*4882a593Smuzhiyun 			goto destroy_pool;
446*4882a593Smuzhiyun 		}
447*4882a593Smuzhiyun 		d->mr = mr;
448*4882a593Smuzhiyun 		list_add_tail(&d->entry, &pool->free_list);
449*4882a593Smuzhiyun 	}
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun out:
452*4882a593Smuzhiyun 	return pool;
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun destroy_pool:
455*4882a593Smuzhiyun 	srp_destroy_fr_pool(pool);
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun err:
458*4882a593Smuzhiyun 	pool = ERR_PTR(ret);
459*4882a593Smuzhiyun 	goto out;
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun /**
463*4882a593Smuzhiyun  * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
464*4882a593Smuzhiyun  * @pool: Pool to obtain descriptor from.
465*4882a593Smuzhiyun  */
srp_fr_pool_get(struct srp_fr_pool * pool)466*4882a593Smuzhiyun static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
467*4882a593Smuzhiyun {
468*4882a593Smuzhiyun 	struct srp_fr_desc *d = NULL;
469*4882a593Smuzhiyun 	unsigned long flags;
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	spin_lock_irqsave(&pool->lock, flags);
472*4882a593Smuzhiyun 	if (!list_empty(&pool->free_list)) {
473*4882a593Smuzhiyun 		d = list_first_entry(&pool->free_list, typeof(*d), entry);
474*4882a593Smuzhiyun 		list_del(&d->entry);
475*4882a593Smuzhiyun 	}
476*4882a593Smuzhiyun 	spin_unlock_irqrestore(&pool->lock, flags);
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	return d;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun /**
482*4882a593Smuzhiyun  * srp_fr_pool_put() - put an FR descriptor back in the free list
483*4882a593Smuzhiyun  * @pool: Pool the descriptor was allocated from.
484*4882a593Smuzhiyun  * @desc: Pointer to an array of fast registration descriptor pointers.
485*4882a593Smuzhiyun  * @n:    Number of descriptors to put back.
486*4882a593Smuzhiyun  *
487*4882a593Smuzhiyun  * Note: The caller must already have queued an invalidation request for
488*4882a593Smuzhiyun  * desc->mr->rkey before calling this function.
489*4882a593Smuzhiyun  */
srp_fr_pool_put(struct srp_fr_pool * pool,struct srp_fr_desc ** desc,int n)490*4882a593Smuzhiyun static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
491*4882a593Smuzhiyun 			    int n)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun 	unsigned long flags;
494*4882a593Smuzhiyun 	int i;
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	spin_lock_irqsave(&pool->lock, flags);
497*4882a593Smuzhiyun 	for (i = 0; i < n; i++)
498*4882a593Smuzhiyun 		list_add(&desc[i]->entry, &pool->free_list);
499*4882a593Smuzhiyun 	spin_unlock_irqrestore(&pool->lock, flags);
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun 
srp_alloc_fr_pool(struct srp_target_port * target)502*4882a593Smuzhiyun static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
503*4882a593Smuzhiyun {
504*4882a593Smuzhiyun 	struct srp_device *dev = target->srp_host->srp_dev;
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
507*4882a593Smuzhiyun 				  dev->max_pages_per_mr);
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun /**
511*4882a593Smuzhiyun  * srp_destroy_qp() - destroy an RDMA queue pair
512*4882a593Smuzhiyun  * @ch: SRP RDMA channel.
513*4882a593Smuzhiyun  *
514*4882a593Smuzhiyun  * Drain the qp before destroying it.  This avoids that the receive
515*4882a593Smuzhiyun  * completion handler can access the queue pair while it is
516*4882a593Smuzhiyun  * being destroyed.
517*4882a593Smuzhiyun  */
srp_destroy_qp(struct srp_rdma_ch * ch)518*4882a593Smuzhiyun static void srp_destroy_qp(struct srp_rdma_ch *ch)
519*4882a593Smuzhiyun {
520*4882a593Smuzhiyun 	spin_lock_irq(&ch->lock);
521*4882a593Smuzhiyun 	ib_process_cq_direct(ch->send_cq, -1);
522*4882a593Smuzhiyun 	spin_unlock_irq(&ch->lock);
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	ib_drain_qp(ch->qp);
525*4882a593Smuzhiyun 	ib_destroy_qp(ch->qp);
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun 
srp_create_ch_ib(struct srp_rdma_ch * ch)528*4882a593Smuzhiyun static int srp_create_ch_ib(struct srp_rdma_ch *ch)
529*4882a593Smuzhiyun {
530*4882a593Smuzhiyun 	struct srp_target_port *target = ch->target;
531*4882a593Smuzhiyun 	struct srp_device *dev = target->srp_host->srp_dev;
532*4882a593Smuzhiyun 	const struct ib_device_attr *attr = &dev->dev->attrs;
533*4882a593Smuzhiyun 	struct ib_qp_init_attr *init_attr;
534*4882a593Smuzhiyun 	struct ib_cq *recv_cq, *send_cq;
535*4882a593Smuzhiyun 	struct ib_qp *qp;
536*4882a593Smuzhiyun 	struct srp_fr_pool *fr_pool = NULL;
537*4882a593Smuzhiyun 	const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2;
538*4882a593Smuzhiyun 	int ret;
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 	init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
541*4882a593Smuzhiyun 	if (!init_attr)
542*4882a593Smuzhiyun 		return -ENOMEM;
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	/* queue_size + 1 for ib_drain_rq() */
545*4882a593Smuzhiyun 	recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
546*4882a593Smuzhiyun 				ch->comp_vector, IB_POLL_SOFTIRQ);
547*4882a593Smuzhiyun 	if (IS_ERR(recv_cq)) {
548*4882a593Smuzhiyun 		ret = PTR_ERR(recv_cq);
549*4882a593Smuzhiyun 		goto err;
550*4882a593Smuzhiyun 	}
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
553*4882a593Smuzhiyun 				ch->comp_vector, IB_POLL_DIRECT);
554*4882a593Smuzhiyun 	if (IS_ERR(send_cq)) {
555*4882a593Smuzhiyun 		ret = PTR_ERR(send_cq);
556*4882a593Smuzhiyun 		goto err_recv_cq;
557*4882a593Smuzhiyun 	}
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	init_attr->event_handler       = srp_qp_event;
560*4882a593Smuzhiyun 	init_attr->cap.max_send_wr     = m * target->queue_size;
561*4882a593Smuzhiyun 	init_attr->cap.max_recv_wr     = target->queue_size + 1;
562*4882a593Smuzhiyun 	init_attr->cap.max_recv_sge    = 1;
563*4882a593Smuzhiyun 	init_attr->cap.max_send_sge    = min(SRP_MAX_SGE, attr->max_send_sge);
564*4882a593Smuzhiyun 	init_attr->sq_sig_type         = IB_SIGNAL_REQ_WR;
565*4882a593Smuzhiyun 	init_attr->qp_type             = IB_QPT_RC;
566*4882a593Smuzhiyun 	init_attr->send_cq             = send_cq;
567*4882a593Smuzhiyun 	init_attr->recv_cq             = recv_cq;
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	ch->max_imm_sge = min(init_attr->cap.max_send_sge - 1U, 255U);
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	if (target->using_rdma_cm) {
572*4882a593Smuzhiyun 		ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr);
573*4882a593Smuzhiyun 		qp = ch->rdma_cm.cm_id->qp;
574*4882a593Smuzhiyun 	} else {
575*4882a593Smuzhiyun 		qp = ib_create_qp(dev->pd, init_attr);
576*4882a593Smuzhiyun 		if (!IS_ERR(qp)) {
577*4882a593Smuzhiyun 			ret = srp_init_ib_qp(target, qp);
578*4882a593Smuzhiyun 			if (ret)
579*4882a593Smuzhiyun 				ib_destroy_qp(qp);
580*4882a593Smuzhiyun 		} else {
581*4882a593Smuzhiyun 			ret = PTR_ERR(qp);
582*4882a593Smuzhiyun 		}
583*4882a593Smuzhiyun 	}
584*4882a593Smuzhiyun 	if (ret) {
585*4882a593Smuzhiyun 		pr_err("QP creation failed for dev %s: %d\n",
586*4882a593Smuzhiyun 		       dev_name(&dev->dev->dev), ret);
587*4882a593Smuzhiyun 		goto err_send_cq;
588*4882a593Smuzhiyun 	}
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 	if (dev->use_fast_reg) {
591*4882a593Smuzhiyun 		fr_pool = srp_alloc_fr_pool(target);
592*4882a593Smuzhiyun 		if (IS_ERR(fr_pool)) {
593*4882a593Smuzhiyun 			ret = PTR_ERR(fr_pool);
594*4882a593Smuzhiyun 			shost_printk(KERN_WARNING, target->scsi_host, PFX
595*4882a593Smuzhiyun 				     "FR pool allocation failed (%d)\n", ret);
596*4882a593Smuzhiyun 			goto err_qp;
597*4882a593Smuzhiyun 		}
598*4882a593Smuzhiyun 	}
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 	if (ch->qp)
601*4882a593Smuzhiyun 		srp_destroy_qp(ch);
602*4882a593Smuzhiyun 	if (ch->recv_cq)
603*4882a593Smuzhiyun 		ib_free_cq(ch->recv_cq);
604*4882a593Smuzhiyun 	if (ch->send_cq)
605*4882a593Smuzhiyun 		ib_free_cq(ch->send_cq);
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	ch->qp = qp;
608*4882a593Smuzhiyun 	ch->recv_cq = recv_cq;
609*4882a593Smuzhiyun 	ch->send_cq = send_cq;
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	if (dev->use_fast_reg) {
612*4882a593Smuzhiyun 		if (ch->fr_pool)
613*4882a593Smuzhiyun 			srp_destroy_fr_pool(ch->fr_pool);
614*4882a593Smuzhiyun 		ch->fr_pool = fr_pool;
615*4882a593Smuzhiyun 	}
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	kfree(init_attr);
618*4882a593Smuzhiyun 	return 0;
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun err_qp:
621*4882a593Smuzhiyun 	if (target->using_rdma_cm)
622*4882a593Smuzhiyun 		rdma_destroy_qp(ch->rdma_cm.cm_id);
623*4882a593Smuzhiyun 	else
624*4882a593Smuzhiyun 		ib_destroy_qp(qp);
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun err_send_cq:
627*4882a593Smuzhiyun 	ib_free_cq(send_cq);
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun err_recv_cq:
630*4882a593Smuzhiyun 	ib_free_cq(recv_cq);
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun err:
633*4882a593Smuzhiyun 	kfree(init_attr);
634*4882a593Smuzhiyun 	return ret;
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun /*
638*4882a593Smuzhiyun  * Note: this function may be called without srp_alloc_iu_bufs() having been
639*4882a593Smuzhiyun  * invoked. Hence the ch->[rt]x_ring checks.
640*4882a593Smuzhiyun  */
srp_free_ch_ib(struct srp_target_port * target,struct srp_rdma_ch * ch)641*4882a593Smuzhiyun static void srp_free_ch_ib(struct srp_target_port *target,
642*4882a593Smuzhiyun 			   struct srp_rdma_ch *ch)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun 	struct srp_device *dev = target->srp_host->srp_dev;
645*4882a593Smuzhiyun 	int i;
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	if (!ch->target)
648*4882a593Smuzhiyun 		return;
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 	if (target->using_rdma_cm) {
651*4882a593Smuzhiyun 		if (ch->rdma_cm.cm_id) {
652*4882a593Smuzhiyun 			rdma_destroy_id(ch->rdma_cm.cm_id);
653*4882a593Smuzhiyun 			ch->rdma_cm.cm_id = NULL;
654*4882a593Smuzhiyun 		}
655*4882a593Smuzhiyun 	} else {
656*4882a593Smuzhiyun 		if (ch->ib_cm.cm_id) {
657*4882a593Smuzhiyun 			ib_destroy_cm_id(ch->ib_cm.cm_id);
658*4882a593Smuzhiyun 			ch->ib_cm.cm_id = NULL;
659*4882a593Smuzhiyun 		}
660*4882a593Smuzhiyun 	}
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	/* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
663*4882a593Smuzhiyun 	if (!ch->qp)
664*4882a593Smuzhiyun 		return;
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	if (dev->use_fast_reg) {
667*4882a593Smuzhiyun 		if (ch->fr_pool)
668*4882a593Smuzhiyun 			srp_destroy_fr_pool(ch->fr_pool);
669*4882a593Smuzhiyun 	}
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 	srp_destroy_qp(ch);
672*4882a593Smuzhiyun 	ib_free_cq(ch->send_cq);
673*4882a593Smuzhiyun 	ib_free_cq(ch->recv_cq);
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	/*
676*4882a593Smuzhiyun 	 * Avoid that the SCSI error handler tries to use this channel after
677*4882a593Smuzhiyun 	 * it has been freed. The SCSI error handler can namely continue
678*4882a593Smuzhiyun 	 * trying to perform recovery actions after scsi_remove_host()
679*4882a593Smuzhiyun 	 * returned.
680*4882a593Smuzhiyun 	 */
681*4882a593Smuzhiyun 	ch->target = NULL;
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun 	ch->qp = NULL;
684*4882a593Smuzhiyun 	ch->send_cq = ch->recv_cq = NULL;
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 	if (ch->rx_ring) {
687*4882a593Smuzhiyun 		for (i = 0; i < target->queue_size; ++i)
688*4882a593Smuzhiyun 			srp_free_iu(target->srp_host, ch->rx_ring[i]);
689*4882a593Smuzhiyun 		kfree(ch->rx_ring);
690*4882a593Smuzhiyun 		ch->rx_ring = NULL;
691*4882a593Smuzhiyun 	}
692*4882a593Smuzhiyun 	if (ch->tx_ring) {
693*4882a593Smuzhiyun 		for (i = 0; i < target->queue_size; ++i)
694*4882a593Smuzhiyun 			srp_free_iu(target->srp_host, ch->tx_ring[i]);
695*4882a593Smuzhiyun 		kfree(ch->tx_ring);
696*4882a593Smuzhiyun 		ch->tx_ring = NULL;
697*4882a593Smuzhiyun 	}
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun 
srp_path_rec_completion(int status,struct sa_path_rec * pathrec,void * ch_ptr)700*4882a593Smuzhiyun static void srp_path_rec_completion(int status,
701*4882a593Smuzhiyun 				    struct sa_path_rec *pathrec,
702*4882a593Smuzhiyun 				    void *ch_ptr)
703*4882a593Smuzhiyun {
704*4882a593Smuzhiyun 	struct srp_rdma_ch *ch = ch_ptr;
705*4882a593Smuzhiyun 	struct srp_target_port *target = ch->target;
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 	ch->status = status;
708*4882a593Smuzhiyun 	if (status)
709*4882a593Smuzhiyun 		shost_printk(KERN_ERR, target->scsi_host,
710*4882a593Smuzhiyun 			     PFX "Got failed path rec status %d\n", status);
711*4882a593Smuzhiyun 	else
712*4882a593Smuzhiyun 		ch->ib_cm.path = *pathrec;
713*4882a593Smuzhiyun 	complete(&ch->done);
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun 
srp_ib_lookup_path(struct srp_rdma_ch * ch)716*4882a593Smuzhiyun static int srp_ib_lookup_path(struct srp_rdma_ch *ch)
717*4882a593Smuzhiyun {
718*4882a593Smuzhiyun 	struct srp_target_port *target = ch->target;
719*4882a593Smuzhiyun 	int ret;
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	ch->ib_cm.path.numb_path = 1;
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 	init_completion(&ch->done);
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 	ch->ib_cm.path_query_id = ib_sa_path_rec_get(&srp_sa_client,
726*4882a593Smuzhiyun 					       target->srp_host->srp_dev->dev,
727*4882a593Smuzhiyun 					       target->srp_host->port,
728*4882a593Smuzhiyun 					       &ch->ib_cm.path,
729*4882a593Smuzhiyun 					       IB_SA_PATH_REC_SERVICE_ID |
730*4882a593Smuzhiyun 					       IB_SA_PATH_REC_DGID	 |
731*4882a593Smuzhiyun 					       IB_SA_PATH_REC_SGID	 |
732*4882a593Smuzhiyun 					       IB_SA_PATH_REC_NUMB_PATH	 |
733*4882a593Smuzhiyun 					       IB_SA_PATH_REC_PKEY,
734*4882a593Smuzhiyun 					       SRP_PATH_REC_TIMEOUT_MS,
735*4882a593Smuzhiyun 					       GFP_KERNEL,
736*4882a593Smuzhiyun 					       srp_path_rec_completion,
737*4882a593Smuzhiyun 					       ch, &ch->ib_cm.path_query);
738*4882a593Smuzhiyun 	if (ch->ib_cm.path_query_id < 0)
739*4882a593Smuzhiyun 		return ch->ib_cm.path_query_id;
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun 	ret = wait_for_completion_interruptible(&ch->done);
742*4882a593Smuzhiyun 	if (ret < 0)
743*4882a593Smuzhiyun 		return ret;
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	if (ch->status < 0)
746*4882a593Smuzhiyun 		shost_printk(KERN_WARNING, target->scsi_host,
747*4882a593Smuzhiyun 			     PFX "Path record query failed: sgid %pI6, dgid %pI6, pkey %#04x, service_id %#16llx\n",
748*4882a593Smuzhiyun 			     ch->ib_cm.path.sgid.raw, ch->ib_cm.path.dgid.raw,
749*4882a593Smuzhiyun 			     be16_to_cpu(target->ib_cm.pkey),
750*4882a593Smuzhiyun 			     be64_to_cpu(target->ib_cm.service_id));
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun 	return ch->status;
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun 
srp_rdma_lookup_path(struct srp_rdma_ch * ch)755*4882a593Smuzhiyun static int srp_rdma_lookup_path(struct srp_rdma_ch *ch)
756*4882a593Smuzhiyun {
757*4882a593Smuzhiyun 	struct srp_target_port *target = ch->target;
758*4882a593Smuzhiyun 	int ret;
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 	init_completion(&ch->done);
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun 	ret = rdma_resolve_route(ch->rdma_cm.cm_id, SRP_PATH_REC_TIMEOUT_MS);
763*4882a593Smuzhiyun 	if (ret)
764*4882a593Smuzhiyun 		return ret;
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 	wait_for_completion_interruptible(&ch->done);
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 	if (ch->status != 0)
769*4882a593Smuzhiyun 		shost_printk(KERN_WARNING, target->scsi_host,
770*4882a593Smuzhiyun 			     PFX "Path resolution failed\n");
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 	return ch->status;
773*4882a593Smuzhiyun }
774*4882a593Smuzhiyun 
srp_lookup_path(struct srp_rdma_ch * ch)775*4882a593Smuzhiyun static int srp_lookup_path(struct srp_rdma_ch *ch)
776*4882a593Smuzhiyun {
777*4882a593Smuzhiyun 	struct srp_target_port *target = ch->target;
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 	return target->using_rdma_cm ? srp_rdma_lookup_path(ch) :
780*4882a593Smuzhiyun 		srp_ib_lookup_path(ch);
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun 
srp_get_subnet_timeout(struct srp_host * host)783*4882a593Smuzhiyun static u8 srp_get_subnet_timeout(struct srp_host *host)
784*4882a593Smuzhiyun {
785*4882a593Smuzhiyun 	struct ib_port_attr attr;
786*4882a593Smuzhiyun 	int ret;
787*4882a593Smuzhiyun 	u8 subnet_timeout = 18;
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	ret = ib_query_port(host->srp_dev->dev, host->port, &attr);
790*4882a593Smuzhiyun 	if (ret == 0)
791*4882a593Smuzhiyun 		subnet_timeout = attr.subnet_timeout;
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun 	if (unlikely(subnet_timeout < 15))
794*4882a593Smuzhiyun 		pr_warn("%s: subnet timeout %d may cause SRP login to fail.\n",
795*4882a593Smuzhiyun 			dev_name(&host->srp_dev->dev->dev), subnet_timeout);
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 	return subnet_timeout;
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun 
srp_send_req(struct srp_rdma_ch * ch,uint32_t max_iu_len,bool multich)800*4882a593Smuzhiyun static int srp_send_req(struct srp_rdma_ch *ch, uint32_t max_iu_len,
801*4882a593Smuzhiyun 			bool multich)
802*4882a593Smuzhiyun {
803*4882a593Smuzhiyun 	struct srp_target_port *target = ch->target;
804*4882a593Smuzhiyun 	struct {
805*4882a593Smuzhiyun 		struct rdma_conn_param	  rdma_param;
806*4882a593Smuzhiyun 		struct srp_login_req_rdma rdma_req;
807*4882a593Smuzhiyun 		struct ib_cm_req_param	  ib_param;
808*4882a593Smuzhiyun 		struct srp_login_req	  ib_req;
809*4882a593Smuzhiyun 	} *req = NULL;
810*4882a593Smuzhiyun 	char *ipi, *tpi;
811*4882a593Smuzhiyun 	int status;
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun 	req = kzalloc(sizeof *req, GFP_KERNEL);
814*4882a593Smuzhiyun 	if (!req)
815*4882a593Smuzhiyun 		return -ENOMEM;
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 	req->ib_param.flow_control = 1;
818*4882a593Smuzhiyun 	req->ib_param.retry_count = target->tl_retry_count;
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun 	/*
821*4882a593Smuzhiyun 	 * Pick some arbitrary defaults here; we could make these
822*4882a593Smuzhiyun 	 * module parameters if anyone cared about setting them.
823*4882a593Smuzhiyun 	 */
824*4882a593Smuzhiyun 	req->ib_param.responder_resources = 4;
825*4882a593Smuzhiyun 	req->ib_param.rnr_retry_count = 7;
826*4882a593Smuzhiyun 	req->ib_param.max_cm_retries = 15;
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun 	req->ib_req.opcode = SRP_LOGIN_REQ;
829*4882a593Smuzhiyun 	req->ib_req.tag = 0;
830*4882a593Smuzhiyun 	req->ib_req.req_it_iu_len = cpu_to_be32(max_iu_len);
831*4882a593Smuzhiyun 	req->ib_req.req_buf_fmt	= cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
832*4882a593Smuzhiyun 					      SRP_BUF_FORMAT_INDIRECT);
833*4882a593Smuzhiyun 	req->ib_req.req_flags = (multich ? SRP_MULTICHAN_MULTI :
834*4882a593Smuzhiyun 				 SRP_MULTICHAN_SINGLE);
835*4882a593Smuzhiyun 	if (srp_use_imm_data) {
836*4882a593Smuzhiyun 		req->ib_req.req_flags |= SRP_IMMED_REQUESTED;
837*4882a593Smuzhiyun 		req->ib_req.imm_data_offset = cpu_to_be16(SRP_IMM_DATA_OFFSET);
838*4882a593Smuzhiyun 	}
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun 	if (target->using_rdma_cm) {
841*4882a593Smuzhiyun 		req->rdma_param.flow_control = req->ib_param.flow_control;
842*4882a593Smuzhiyun 		req->rdma_param.responder_resources =
843*4882a593Smuzhiyun 			req->ib_param.responder_resources;
844*4882a593Smuzhiyun 		req->rdma_param.initiator_depth = req->ib_param.initiator_depth;
845*4882a593Smuzhiyun 		req->rdma_param.retry_count = req->ib_param.retry_count;
846*4882a593Smuzhiyun 		req->rdma_param.rnr_retry_count = req->ib_param.rnr_retry_count;
847*4882a593Smuzhiyun 		req->rdma_param.private_data = &req->rdma_req;
848*4882a593Smuzhiyun 		req->rdma_param.private_data_len = sizeof(req->rdma_req);
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 		req->rdma_req.opcode = req->ib_req.opcode;
851*4882a593Smuzhiyun 		req->rdma_req.tag = req->ib_req.tag;
852*4882a593Smuzhiyun 		req->rdma_req.req_it_iu_len = req->ib_req.req_it_iu_len;
853*4882a593Smuzhiyun 		req->rdma_req.req_buf_fmt = req->ib_req.req_buf_fmt;
854*4882a593Smuzhiyun 		req->rdma_req.req_flags	= req->ib_req.req_flags;
855*4882a593Smuzhiyun 		req->rdma_req.imm_data_offset = req->ib_req.imm_data_offset;
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 		ipi = req->rdma_req.initiator_port_id;
858*4882a593Smuzhiyun 		tpi = req->rdma_req.target_port_id;
859*4882a593Smuzhiyun 	} else {
860*4882a593Smuzhiyun 		u8 subnet_timeout;
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun 		subnet_timeout = srp_get_subnet_timeout(target->srp_host);
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun 		req->ib_param.primary_path = &ch->ib_cm.path;
865*4882a593Smuzhiyun 		req->ib_param.alternate_path = NULL;
866*4882a593Smuzhiyun 		req->ib_param.service_id = target->ib_cm.service_id;
867*4882a593Smuzhiyun 		get_random_bytes(&req->ib_param.starting_psn, 4);
868*4882a593Smuzhiyun 		req->ib_param.starting_psn &= 0xffffff;
869*4882a593Smuzhiyun 		req->ib_param.qp_num = ch->qp->qp_num;
870*4882a593Smuzhiyun 		req->ib_param.qp_type = ch->qp->qp_type;
871*4882a593Smuzhiyun 		req->ib_param.local_cm_response_timeout = subnet_timeout + 2;
872*4882a593Smuzhiyun 		req->ib_param.remote_cm_response_timeout = subnet_timeout + 2;
873*4882a593Smuzhiyun 		req->ib_param.private_data = &req->ib_req;
874*4882a593Smuzhiyun 		req->ib_param.private_data_len = sizeof(req->ib_req);
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun 		ipi = req->ib_req.initiator_port_id;
877*4882a593Smuzhiyun 		tpi = req->ib_req.target_port_id;
878*4882a593Smuzhiyun 	}
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun 	/*
881*4882a593Smuzhiyun 	 * In the published SRP specification (draft rev. 16a), the
882*4882a593Smuzhiyun 	 * port identifier format is 8 bytes of ID extension followed
883*4882a593Smuzhiyun 	 * by 8 bytes of GUID.  Older drafts put the two halves in the
884*4882a593Smuzhiyun 	 * opposite order, so that the GUID comes first.
885*4882a593Smuzhiyun 	 *
886*4882a593Smuzhiyun 	 * Targets conforming to these obsolete drafts can be
887*4882a593Smuzhiyun 	 * recognized by the I/O Class they report.
888*4882a593Smuzhiyun 	 */
889*4882a593Smuzhiyun 	if (target->io_class == SRP_REV10_IB_IO_CLASS) {
890*4882a593Smuzhiyun 		memcpy(ipi,     &target->sgid.global.interface_id, 8);
891*4882a593Smuzhiyun 		memcpy(ipi + 8, &target->initiator_ext, 8);
892*4882a593Smuzhiyun 		memcpy(tpi,     &target->ioc_guid, 8);
893*4882a593Smuzhiyun 		memcpy(tpi + 8, &target->id_ext, 8);
894*4882a593Smuzhiyun 	} else {
895*4882a593Smuzhiyun 		memcpy(ipi,     &target->initiator_ext, 8);
896*4882a593Smuzhiyun 		memcpy(ipi + 8, &target->sgid.global.interface_id, 8);
897*4882a593Smuzhiyun 		memcpy(tpi,     &target->id_ext, 8);
898*4882a593Smuzhiyun 		memcpy(tpi + 8, &target->ioc_guid, 8);
899*4882a593Smuzhiyun 	}
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 	/*
902*4882a593Smuzhiyun 	 * Topspin/Cisco SRP targets will reject our login unless we
903*4882a593Smuzhiyun 	 * zero out the first 8 bytes of our initiator port ID and set
904*4882a593Smuzhiyun 	 * the second 8 bytes to the local node GUID.
905*4882a593Smuzhiyun 	 */
906*4882a593Smuzhiyun 	if (srp_target_is_topspin(target)) {
907*4882a593Smuzhiyun 		shost_printk(KERN_DEBUG, target->scsi_host,
908*4882a593Smuzhiyun 			     PFX "Topspin/Cisco initiator port ID workaround "
909*4882a593Smuzhiyun 			     "activated for target GUID %016llx\n",
910*4882a593Smuzhiyun 			     be64_to_cpu(target->ioc_guid));
911*4882a593Smuzhiyun 		memset(ipi, 0, 8);
912*4882a593Smuzhiyun 		memcpy(ipi + 8, &target->srp_host->srp_dev->dev->node_guid, 8);
913*4882a593Smuzhiyun 	}
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun 	if (target->using_rdma_cm)
916*4882a593Smuzhiyun 		status = rdma_connect(ch->rdma_cm.cm_id, &req->rdma_param);
917*4882a593Smuzhiyun 	else
918*4882a593Smuzhiyun 		status = ib_send_cm_req(ch->ib_cm.cm_id, &req->ib_param);
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun 	kfree(req);
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun 	return status;
923*4882a593Smuzhiyun }
924*4882a593Smuzhiyun 
srp_queue_remove_work(struct srp_target_port * target)925*4882a593Smuzhiyun static bool srp_queue_remove_work(struct srp_target_port *target)
926*4882a593Smuzhiyun {
927*4882a593Smuzhiyun 	bool changed = false;
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 	spin_lock_irq(&target->lock);
930*4882a593Smuzhiyun 	if (target->state != SRP_TARGET_REMOVED) {
931*4882a593Smuzhiyun 		target->state = SRP_TARGET_REMOVED;
932*4882a593Smuzhiyun 		changed = true;
933*4882a593Smuzhiyun 	}
934*4882a593Smuzhiyun 	spin_unlock_irq(&target->lock);
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 	if (changed)
937*4882a593Smuzhiyun 		queue_work(srp_remove_wq, &target->remove_work);
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun 	return changed;
940*4882a593Smuzhiyun }
941*4882a593Smuzhiyun 
srp_disconnect_target(struct srp_target_port * target)942*4882a593Smuzhiyun static void srp_disconnect_target(struct srp_target_port *target)
943*4882a593Smuzhiyun {
944*4882a593Smuzhiyun 	struct srp_rdma_ch *ch;
945*4882a593Smuzhiyun 	int i, ret;
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 	/* XXX should send SRP_I_LOGOUT request */
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun 	for (i = 0; i < target->ch_count; i++) {
950*4882a593Smuzhiyun 		ch = &target->ch[i];
951*4882a593Smuzhiyun 		ch->connected = false;
952*4882a593Smuzhiyun 		ret = 0;
953*4882a593Smuzhiyun 		if (target->using_rdma_cm) {
954*4882a593Smuzhiyun 			if (ch->rdma_cm.cm_id)
955*4882a593Smuzhiyun 				rdma_disconnect(ch->rdma_cm.cm_id);
956*4882a593Smuzhiyun 		} else {
957*4882a593Smuzhiyun 			if (ch->ib_cm.cm_id)
958*4882a593Smuzhiyun 				ret = ib_send_cm_dreq(ch->ib_cm.cm_id,
959*4882a593Smuzhiyun 						      NULL, 0);
960*4882a593Smuzhiyun 		}
961*4882a593Smuzhiyun 		if (ret < 0) {
962*4882a593Smuzhiyun 			shost_printk(KERN_DEBUG, target->scsi_host,
963*4882a593Smuzhiyun 				     PFX "Sending CM DREQ failed\n");
964*4882a593Smuzhiyun 		}
965*4882a593Smuzhiyun 	}
966*4882a593Smuzhiyun }
967*4882a593Smuzhiyun 
srp_free_req_data(struct srp_target_port * target,struct srp_rdma_ch * ch)968*4882a593Smuzhiyun static void srp_free_req_data(struct srp_target_port *target,
969*4882a593Smuzhiyun 			      struct srp_rdma_ch *ch)
970*4882a593Smuzhiyun {
971*4882a593Smuzhiyun 	struct srp_device *dev = target->srp_host->srp_dev;
972*4882a593Smuzhiyun 	struct ib_device *ibdev = dev->dev;
973*4882a593Smuzhiyun 	struct srp_request *req;
974*4882a593Smuzhiyun 	int i;
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun 	if (!ch->req_ring)
977*4882a593Smuzhiyun 		return;
978*4882a593Smuzhiyun 
979*4882a593Smuzhiyun 	for (i = 0; i < target->req_ring_size; ++i) {
980*4882a593Smuzhiyun 		req = &ch->req_ring[i];
981*4882a593Smuzhiyun 		if (dev->use_fast_reg)
982*4882a593Smuzhiyun 			kfree(req->fr_list);
983*4882a593Smuzhiyun 		if (req->indirect_dma_addr) {
984*4882a593Smuzhiyun 			ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
985*4882a593Smuzhiyun 					    target->indirect_size,
986*4882a593Smuzhiyun 					    DMA_TO_DEVICE);
987*4882a593Smuzhiyun 		}
988*4882a593Smuzhiyun 		kfree(req->indirect_desc);
989*4882a593Smuzhiyun 	}
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun 	kfree(ch->req_ring);
992*4882a593Smuzhiyun 	ch->req_ring = NULL;
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun 
srp_alloc_req_data(struct srp_rdma_ch * ch)995*4882a593Smuzhiyun static int srp_alloc_req_data(struct srp_rdma_ch *ch)
996*4882a593Smuzhiyun {
997*4882a593Smuzhiyun 	struct srp_target_port *target = ch->target;
998*4882a593Smuzhiyun 	struct srp_device *srp_dev = target->srp_host->srp_dev;
999*4882a593Smuzhiyun 	struct ib_device *ibdev = srp_dev->dev;
1000*4882a593Smuzhiyun 	struct srp_request *req;
1001*4882a593Smuzhiyun 	dma_addr_t dma_addr;
1002*4882a593Smuzhiyun 	int i, ret = -ENOMEM;
1003*4882a593Smuzhiyun 
1004*4882a593Smuzhiyun 	ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
1005*4882a593Smuzhiyun 			       GFP_KERNEL);
1006*4882a593Smuzhiyun 	if (!ch->req_ring)
1007*4882a593Smuzhiyun 		goto out;
1008*4882a593Smuzhiyun 
1009*4882a593Smuzhiyun 	for (i = 0; i < target->req_ring_size; ++i) {
1010*4882a593Smuzhiyun 		req = &ch->req_ring[i];
1011*4882a593Smuzhiyun 		if (srp_dev->use_fast_reg) {
1012*4882a593Smuzhiyun 			req->fr_list = kmalloc_array(target->mr_per_cmd,
1013*4882a593Smuzhiyun 						sizeof(void *), GFP_KERNEL);
1014*4882a593Smuzhiyun 			if (!req->fr_list)
1015*4882a593Smuzhiyun 				goto out;
1016*4882a593Smuzhiyun 		}
1017*4882a593Smuzhiyun 		req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
1018*4882a593Smuzhiyun 		if (!req->indirect_desc)
1019*4882a593Smuzhiyun 			goto out;
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun 		dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
1022*4882a593Smuzhiyun 					     target->indirect_size,
1023*4882a593Smuzhiyun 					     DMA_TO_DEVICE);
1024*4882a593Smuzhiyun 		if (ib_dma_mapping_error(ibdev, dma_addr))
1025*4882a593Smuzhiyun 			goto out;
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun 		req->indirect_dma_addr = dma_addr;
1028*4882a593Smuzhiyun 	}
1029*4882a593Smuzhiyun 	ret = 0;
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun out:
1032*4882a593Smuzhiyun 	return ret;
1033*4882a593Smuzhiyun }
1034*4882a593Smuzhiyun 
1035*4882a593Smuzhiyun /**
1036*4882a593Smuzhiyun  * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
1037*4882a593Smuzhiyun  * @shost: SCSI host whose attributes to remove from sysfs.
1038*4882a593Smuzhiyun  *
1039*4882a593Smuzhiyun  * Note: Any attributes defined in the host template and that did not exist
1040*4882a593Smuzhiyun  * before invocation of this function will be ignored.
1041*4882a593Smuzhiyun  */
srp_del_scsi_host_attr(struct Scsi_Host * shost)1042*4882a593Smuzhiyun static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
1043*4882a593Smuzhiyun {
1044*4882a593Smuzhiyun 	struct device_attribute **attr;
1045*4882a593Smuzhiyun 
1046*4882a593Smuzhiyun 	for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
1047*4882a593Smuzhiyun 		device_remove_file(&shost->shost_dev, *attr);
1048*4882a593Smuzhiyun }
1049*4882a593Smuzhiyun 
srp_remove_target(struct srp_target_port * target)1050*4882a593Smuzhiyun static void srp_remove_target(struct srp_target_port *target)
1051*4882a593Smuzhiyun {
1052*4882a593Smuzhiyun 	struct srp_rdma_ch *ch;
1053*4882a593Smuzhiyun 	int i;
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun 	WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun 	srp_del_scsi_host_attr(target->scsi_host);
1058*4882a593Smuzhiyun 	srp_rport_get(target->rport);
1059*4882a593Smuzhiyun 	srp_remove_host(target->scsi_host);
1060*4882a593Smuzhiyun 	scsi_remove_host(target->scsi_host);
1061*4882a593Smuzhiyun 	srp_stop_rport_timers(target->rport);
1062*4882a593Smuzhiyun 	srp_disconnect_target(target);
1063*4882a593Smuzhiyun 	kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
1064*4882a593Smuzhiyun 	for (i = 0; i < target->ch_count; i++) {
1065*4882a593Smuzhiyun 		ch = &target->ch[i];
1066*4882a593Smuzhiyun 		srp_free_ch_ib(target, ch);
1067*4882a593Smuzhiyun 	}
1068*4882a593Smuzhiyun 	cancel_work_sync(&target->tl_err_work);
1069*4882a593Smuzhiyun 	srp_rport_put(target->rport);
1070*4882a593Smuzhiyun 	for (i = 0; i < target->ch_count; i++) {
1071*4882a593Smuzhiyun 		ch = &target->ch[i];
1072*4882a593Smuzhiyun 		srp_free_req_data(target, ch);
1073*4882a593Smuzhiyun 	}
1074*4882a593Smuzhiyun 	kfree(target->ch);
1075*4882a593Smuzhiyun 	target->ch = NULL;
1076*4882a593Smuzhiyun 
1077*4882a593Smuzhiyun 	spin_lock(&target->srp_host->target_lock);
1078*4882a593Smuzhiyun 	list_del(&target->list);
1079*4882a593Smuzhiyun 	spin_unlock(&target->srp_host->target_lock);
1080*4882a593Smuzhiyun 
1081*4882a593Smuzhiyun 	scsi_host_put(target->scsi_host);
1082*4882a593Smuzhiyun }
1083*4882a593Smuzhiyun 
srp_remove_work(struct work_struct * work)1084*4882a593Smuzhiyun static void srp_remove_work(struct work_struct *work)
1085*4882a593Smuzhiyun {
1086*4882a593Smuzhiyun 	struct srp_target_port *target =
1087*4882a593Smuzhiyun 		container_of(work, struct srp_target_port, remove_work);
1088*4882a593Smuzhiyun 
1089*4882a593Smuzhiyun 	WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
1090*4882a593Smuzhiyun 
1091*4882a593Smuzhiyun 	srp_remove_target(target);
1092*4882a593Smuzhiyun }
1093*4882a593Smuzhiyun 
srp_rport_delete(struct srp_rport * rport)1094*4882a593Smuzhiyun static void srp_rport_delete(struct srp_rport *rport)
1095*4882a593Smuzhiyun {
1096*4882a593Smuzhiyun 	struct srp_target_port *target = rport->lld_data;
1097*4882a593Smuzhiyun 
1098*4882a593Smuzhiyun 	srp_queue_remove_work(target);
1099*4882a593Smuzhiyun }
1100*4882a593Smuzhiyun 
1101*4882a593Smuzhiyun /**
1102*4882a593Smuzhiyun  * srp_connected_ch() - number of connected channels
1103*4882a593Smuzhiyun  * @target: SRP target port.
1104*4882a593Smuzhiyun  */
srp_connected_ch(struct srp_target_port * target)1105*4882a593Smuzhiyun static int srp_connected_ch(struct srp_target_port *target)
1106*4882a593Smuzhiyun {
1107*4882a593Smuzhiyun 	int i, c = 0;
1108*4882a593Smuzhiyun 
1109*4882a593Smuzhiyun 	for (i = 0; i < target->ch_count; i++)
1110*4882a593Smuzhiyun 		c += target->ch[i].connected;
1111*4882a593Smuzhiyun 
1112*4882a593Smuzhiyun 	return c;
1113*4882a593Smuzhiyun }
1114*4882a593Smuzhiyun 
srp_connect_ch(struct srp_rdma_ch * ch,uint32_t max_iu_len,bool multich)1115*4882a593Smuzhiyun static int srp_connect_ch(struct srp_rdma_ch *ch, uint32_t max_iu_len,
1116*4882a593Smuzhiyun 			  bool multich)
1117*4882a593Smuzhiyun {
1118*4882a593Smuzhiyun 	struct srp_target_port *target = ch->target;
1119*4882a593Smuzhiyun 	int ret;
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun 	WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun 	ret = srp_lookup_path(ch);
1124*4882a593Smuzhiyun 	if (ret)
1125*4882a593Smuzhiyun 		goto out;
1126*4882a593Smuzhiyun 
1127*4882a593Smuzhiyun 	while (1) {
1128*4882a593Smuzhiyun 		init_completion(&ch->done);
1129*4882a593Smuzhiyun 		ret = srp_send_req(ch, max_iu_len, multich);
1130*4882a593Smuzhiyun 		if (ret)
1131*4882a593Smuzhiyun 			goto out;
1132*4882a593Smuzhiyun 		ret = wait_for_completion_interruptible(&ch->done);
1133*4882a593Smuzhiyun 		if (ret < 0)
1134*4882a593Smuzhiyun 			goto out;
1135*4882a593Smuzhiyun 
1136*4882a593Smuzhiyun 		/*
1137*4882a593Smuzhiyun 		 * The CM event handling code will set status to
1138*4882a593Smuzhiyun 		 * SRP_PORT_REDIRECT if we get a port redirect REJ
1139*4882a593Smuzhiyun 		 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1140*4882a593Smuzhiyun 		 * redirect REJ back.
1141*4882a593Smuzhiyun 		 */
1142*4882a593Smuzhiyun 		ret = ch->status;
1143*4882a593Smuzhiyun 		switch (ret) {
1144*4882a593Smuzhiyun 		case 0:
1145*4882a593Smuzhiyun 			ch->connected = true;
1146*4882a593Smuzhiyun 			goto out;
1147*4882a593Smuzhiyun 
1148*4882a593Smuzhiyun 		case SRP_PORT_REDIRECT:
1149*4882a593Smuzhiyun 			ret = srp_lookup_path(ch);
1150*4882a593Smuzhiyun 			if (ret)
1151*4882a593Smuzhiyun 				goto out;
1152*4882a593Smuzhiyun 			break;
1153*4882a593Smuzhiyun 
1154*4882a593Smuzhiyun 		case SRP_DLID_REDIRECT:
1155*4882a593Smuzhiyun 			break;
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun 		case SRP_STALE_CONN:
1158*4882a593Smuzhiyun 			shost_printk(KERN_ERR, target->scsi_host, PFX
1159*4882a593Smuzhiyun 				     "giving up on stale connection\n");
1160*4882a593Smuzhiyun 			ret = -ECONNRESET;
1161*4882a593Smuzhiyun 			goto out;
1162*4882a593Smuzhiyun 
1163*4882a593Smuzhiyun 		default:
1164*4882a593Smuzhiyun 			goto out;
1165*4882a593Smuzhiyun 		}
1166*4882a593Smuzhiyun 	}
1167*4882a593Smuzhiyun 
1168*4882a593Smuzhiyun out:
1169*4882a593Smuzhiyun 	return ret <= 0 ? ret : -ENODEV;
1170*4882a593Smuzhiyun }
1171*4882a593Smuzhiyun 
srp_inv_rkey_err_done(struct ib_cq * cq,struct ib_wc * wc)1172*4882a593Smuzhiyun static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
1173*4882a593Smuzhiyun {
1174*4882a593Smuzhiyun 	srp_handle_qp_err(cq, wc, "INV RKEY");
1175*4882a593Smuzhiyun }
1176*4882a593Smuzhiyun 
srp_inv_rkey(struct srp_request * req,struct srp_rdma_ch * ch,u32 rkey)1177*4882a593Smuzhiyun static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
1178*4882a593Smuzhiyun 		u32 rkey)
1179*4882a593Smuzhiyun {
1180*4882a593Smuzhiyun 	struct ib_send_wr wr = {
1181*4882a593Smuzhiyun 		.opcode		    = IB_WR_LOCAL_INV,
1182*4882a593Smuzhiyun 		.next		    = NULL,
1183*4882a593Smuzhiyun 		.num_sge	    = 0,
1184*4882a593Smuzhiyun 		.send_flags	    = 0,
1185*4882a593Smuzhiyun 		.ex.invalidate_rkey = rkey,
1186*4882a593Smuzhiyun 	};
1187*4882a593Smuzhiyun 
1188*4882a593Smuzhiyun 	wr.wr_cqe = &req->reg_cqe;
1189*4882a593Smuzhiyun 	req->reg_cqe.done = srp_inv_rkey_err_done;
1190*4882a593Smuzhiyun 	return ib_post_send(ch->qp, &wr, NULL);
1191*4882a593Smuzhiyun }
1192*4882a593Smuzhiyun 
srp_unmap_data(struct scsi_cmnd * scmnd,struct srp_rdma_ch * ch,struct srp_request * req)1193*4882a593Smuzhiyun static void srp_unmap_data(struct scsi_cmnd *scmnd,
1194*4882a593Smuzhiyun 			   struct srp_rdma_ch *ch,
1195*4882a593Smuzhiyun 			   struct srp_request *req)
1196*4882a593Smuzhiyun {
1197*4882a593Smuzhiyun 	struct srp_target_port *target = ch->target;
1198*4882a593Smuzhiyun 	struct srp_device *dev = target->srp_host->srp_dev;
1199*4882a593Smuzhiyun 	struct ib_device *ibdev = dev->dev;
1200*4882a593Smuzhiyun 	int i, res;
1201*4882a593Smuzhiyun 
1202*4882a593Smuzhiyun 	if (!scsi_sglist(scmnd) ||
1203*4882a593Smuzhiyun 	    (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1204*4882a593Smuzhiyun 	     scmnd->sc_data_direction != DMA_FROM_DEVICE))
1205*4882a593Smuzhiyun 		return;
1206*4882a593Smuzhiyun 
1207*4882a593Smuzhiyun 	if (dev->use_fast_reg) {
1208*4882a593Smuzhiyun 		struct srp_fr_desc **pfr;
1209*4882a593Smuzhiyun 
1210*4882a593Smuzhiyun 		for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1211*4882a593Smuzhiyun 			res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
1212*4882a593Smuzhiyun 			if (res < 0) {
1213*4882a593Smuzhiyun 				shost_printk(KERN_ERR, target->scsi_host, PFX
1214*4882a593Smuzhiyun 				  "Queueing INV WR for rkey %#x failed (%d)\n",
1215*4882a593Smuzhiyun 				  (*pfr)->mr->rkey, res);
1216*4882a593Smuzhiyun 				queue_work(system_long_wq,
1217*4882a593Smuzhiyun 					   &target->tl_err_work);
1218*4882a593Smuzhiyun 			}
1219*4882a593Smuzhiyun 		}
1220*4882a593Smuzhiyun 		if (req->nmdesc)
1221*4882a593Smuzhiyun 			srp_fr_pool_put(ch->fr_pool, req->fr_list,
1222*4882a593Smuzhiyun 					req->nmdesc);
1223*4882a593Smuzhiyun 	}
1224*4882a593Smuzhiyun 
1225*4882a593Smuzhiyun 	ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1226*4882a593Smuzhiyun 			scmnd->sc_data_direction);
1227*4882a593Smuzhiyun }
1228*4882a593Smuzhiyun 
1229*4882a593Smuzhiyun /**
1230*4882a593Smuzhiyun  * srp_claim_req - Take ownership of the scmnd associated with a request.
1231*4882a593Smuzhiyun  * @ch: SRP RDMA channel.
1232*4882a593Smuzhiyun  * @req: SRP request.
1233*4882a593Smuzhiyun  * @sdev: If not NULL, only take ownership for this SCSI device.
1234*4882a593Smuzhiyun  * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1235*4882a593Smuzhiyun  *         ownership of @req->scmnd if it equals @scmnd.
1236*4882a593Smuzhiyun  *
1237*4882a593Smuzhiyun  * Return value:
1238*4882a593Smuzhiyun  * Either NULL or a pointer to the SCSI command the caller became owner of.
1239*4882a593Smuzhiyun  */
srp_claim_req(struct srp_rdma_ch * ch,struct srp_request * req,struct scsi_device * sdev,struct scsi_cmnd * scmnd)1240*4882a593Smuzhiyun static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
1241*4882a593Smuzhiyun 				       struct srp_request *req,
1242*4882a593Smuzhiyun 				       struct scsi_device *sdev,
1243*4882a593Smuzhiyun 				       struct scsi_cmnd *scmnd)
1244*4882a593Smuzhiyun {
1245*4882a593Smuzhiyun 	unsigned long flags;
1246*4882a593Smuzhiyun 
1247*4882a593Smuzhiyun 	spin_lock_irqsave(&ch->lock, flags);
1248*4882a593Smuzhiyun 	if (req->scmnd &&
1249*4882a593Smuzhiyun 	    (!sdev || req->scmnd->device == sdev) &&
1250*4882a593Smuzhiyun 	    (!scmnd || req->scmnd == scmnd)) {
1251*4882a593Smuzhiyun 		scmnd = req->scmnd;
1252*4882a593Smuzhiyun 		req->scmnd = NULL;
1253*4882a593Smuzhiyun 	} else {
1254*4882a593Smuzhiyun 		scmnd = NULL;
1255*4882a593Smuzhiyun 	}
1256*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ch->lock, flags);
1257*4882a593Smuzhiyun 
1258*4882a593Smuzhiyun 	return scmnd;
1259*4882a593Smuzhiyun }
1260*4882a593Smuzhiyun 
1261*4882a593Smuzhiyun /**
1262*4882a593Smuzhiyun  * srp_free_req() - Unmap data and adjust ch->req_lim.
1263*4882a593Smuzhiyun  * @ch:     SRP RDMA channel.
1264*4882a593Smuzhiyun  * @req:    Request to be freed.
1265*4882a593Smuzhiyun  * @scmnd:  SCSI command associated with @req.
1266*4882a593Smuzhiyun  * @req_lim_delta: Amount to be added to @target->req_lim.
1267*4882a593Smuzhiyun  */
srp_free_req(struct srp_rdma_ch * ch,struct srp_request * req,struct scsi_cmnd * scmnd,s32 req_lim_delta)1268*4882a593Smuzhiyun static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1269*4882a593Smuzhiyun 			 struct scsi_cmnd *scmnd, s32 req_lim_delta)
1270*4882a593Smuzhiyun {
1271*4882a593Smuzhiyun 	unsigned long flags;
1272*4882a593Smuzhiyun 
1273*4882a593Smuzhiyun 	srp_unmap_data(scmnd, ch, req);
1274*4882a593Smuzhiyun 
1275*4882a593Smuzhiyun 	spin_lock_irqsave(&ch->lock, flags);
1276*4882a593Smuzhiyun 	ch->req_lim += req_lim_delta;
1277*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ch->lock, flags);
1278*4882a593Smuzhiyun }
1279*4882a593Smuzhiyun 
srp_finish_req(struct srp_rdma_ch * ch,struct srp_request * req,struct scsi_device * sdev,int result)1280*4882a593Smuzhiyun static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1281*4882a593Smuzhiyun 			   struct scsi_device *sdev, int result)
1282*4882a593Smuzhiyun {
1283*4882a593Smuzhiyun 	struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
1284*4882a593Smuzhiyun 
1285*4882a593Smuzhiyun 	if (scmnd) {
1286*4882a593Smuzhiyun 		srp_free_req(ch, req, scmnd, 0);
1287*4882a593Smuzhiyun 		scmnd->result = result;
1288*4882a593Smuzhiyun 		scmnd->scsi_done(scmnd);
1289*4882a593Smuzhiyun 	}
1290*4882a593Smuzhiyun }
1291*4882a593Smuzhiyun 
srp_terminate_io(struct srp_rport * rport)1292*4882a593Smuzhiyun static void srp_terminate_io(struct srp_rport *rport)
1293*4882a593Smuzhiyun {
1294*4882a593Smuzhiyun 	struct srp_target_port *target = rport->lld_data;
1295*4882a593Smuzhiyun 	struct srp_rdma_ch *ch;
1296*4882a593Smuzhiyun 	int i, j;
1297*4882a593Smuzhiyun 
1298*4882a593Smuzhiyun 	for (i = 0; i < target->ch_count; i++) {
1299*4882a593Smuzhiyun 		ch = &target->ch[i];
1300*4882a593Smuzhiyun 
1301*4882a593Smuzhiyun 		for (j = 0; j < target->req_ring_size; ++j) {
1302*4882a593Smuzhiyun 			struct srp_request *req = &ch->req_ring[j];
1303*4882a593Smuzhiyun 
1304*4882a593Smuzhiyun 			srp_finish_req(ch, req, NULL,
1305*4882a593Smuzhiyun 				       DID_TRANSPORT_FAILFAST << 16);
1306*4882a593Smuzhiyun 		}
1307*4882a593Smuzhiyun 	}
1308*4882a593Smuzhiyun }
1309*4882a593Smuzhiyun 
1310*4882a593Smuzhiyun /* Calculate maximum initiator to target information unit length. */
srp_max_it_iu_len(int cmd_sg_cnt,bool use_imm_data,uint32_t max_it_iu_size)1311*4882a593Smuzhiyun static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data,
1312*4882a593Smuzhiyun 				  uint32_t max_it_iu_size)
1313*4882a593Smuzhiyun {
1314*4882a593Smuzhiyun 	uint32_t max_iu_len = sizeof(struct srp_cmd) + SRP_MAX_ADD_CDB_LEN +
1315*4882a593Smuzhiyun 		sizeof(struct srp_indirect_buf) +
1316*4882a593Smuzhiyun 		cmd_sg_cnt * sizeof(struct srp_direct_buf);
1317*4882a593Smuzhiyun 
1318*4882a593Smuzhiyun 	if (use_imm_data)
1319*4882a593Smuzhiyun 		max_iu_len = max(max_iu_len, SRP_IMM_DATA_OFFSET +
1320*4882a593Smuzhiyun 				 srp_max_imm_data);
1321*4882a593Smuzhiyun 
1322*4882a593Smuzhiyun 	if (max_it_iu_size)
1323*4882a593Smuzhiyun 		max_iu_len = min(max_iu_len, max_it_iu_size);
1324*4882a593Smuzhiyun 
1325*4882a593Smuzhiyun 	pr_debug("max_iu_len = %d\n", max_iu_len);
1326*4882a593Smuzhiyun 
1327*4882a593Smuzhiyun 	return max_iu_len;
1328*4882a593Smuzhiyun }
1329*4882a593Smuzhiyun 
1330*4882a593Smuzhiyun /*
1331*4882a593Smuzhiyun  * It is up to the caller to ensure that srp_rport_reconnect() calls are
1332*4882a593Smuzhiyun  * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1333*4882a593Smuzhiyun  * srp_reset_device() or srp_reset_host() calls will occur while this function
1334*4882a593Smuzhiyun  * is in progress. One way to realize that is not to call this function
1335*4882a593Smuzhiyun  * directly but to call srp_reconnect_rport() instead since that last function
1336*4882a593Smuzhiyun  * serializes calls of this function via rport->mutex and also blocks
1337*4882a593Smuzhiyun  * srp_queuecommand() calls before invoking this function.
1338*4882a593Smuzhiyun  */
srp_rport_reconnect(struct srp_rport * rport)1339*4882a593Smuzhiyun static int srp_rport_reconnect(struct srp_rport *rport)
1340*4882a593Smuzhiyun {
1341*4882a593Smuzhiyun 	struct srp_target_port *target = rport->lld_data;
1342*4882a593Smuzhiyun 	struct srp_rdma_ch *ch;
1343*4882a593Smuzhiyun 	uint32_t max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
1344*4882a593Smuzhiyun 						srp_use_imm_data,
1345*4882a593Smuzhiyun 						target->max_it_iu_size);
1346*4882a593Smuzhiyun 	int i, j, ret = 0;
1347*4882a593Smuzhiyun 	bool multich = false;
1348*4882a593Smuzhiyun 
1349*4882a593Smuzhiyun 	srp_disconnect_target(target);
1350*4882a593Smuzhiyun 
1351*4882a593Smuzhiyun 	if (target->state == SRP_TARGET_SCANNING)
1352*4882a593Smuzhiyun 		return -ENODEV;
1353*4882a593Smuzhiyun 
1354*4882a593Smuzhiyun 	/*
1355*4882a593Smuzhiyun 	 * Now get a new local CM ID so that we avoid confusing the target in
1356*4882a593Smuzhiyun 	 * case things are really fouled up. Doing so also ensures that all CM
1357*4882a593Smuzhiyun 	 * callbacks will have finished before a new QP is allocated.
1358*4882a593Smuzhiyun 	 */
1359*4882a593Smuzhiyun 	for (i = 0; i < target->ch_count; i++) {
1360*4882a593Smuzhiyun 		ch = &target->ch[i];
1361*4882a593Smuzhiyun 		ret += srp_new_cm_id(ch);
1362*4882a593Smuzhiyun 	}
1363*4882a593Smuzhiyun 	for (i = 0; i < target->ch_count; i++) {
1364*4882a593Smuzhiyun 		ch = &target->ch[i];
1365*4882a593Smuzhiyun 		for (j = 0; j < target->req_ring_size; ++j) {
1366*4882a593Smuzhiyun 			struct srp_request *req = &ch->req_ring[j];
1367*4882a593Smuzhiyun 
1368*4882a593Smuzhiyun 			srp_finish_req(ch, req, NULL, DID_RESET << 16);
1369*4882a593Smuzhiyun 		}
1370*4882a593Smuzhiyun 	}
1371*4882a593Smuzhiyun 	for (i = 0; i < target->ch_count; i++) {
1372*4882a593Smuzhiyun 		ch = &target->ch[i];
1373*4882a593Smuzhiyun 		/*
1374*4882a593Smuzhiyun 		 * Whether or not creating a new CM ID succeeded, create a new
1375*4882a593Smuzhiyun 		 * QP. This guarantees that all completion callback function
1376*4882a593Smuzhiyun 		 * invocations have finished before request resetting starts.
1377*4882a593Smuzhiyun 		 */
1378*4882a593Smuzhiyun 		ret += srp_create_ch_ib(ch);
1379*4882a593Smuzhiyun 
1380*4882a593Smuzhiyun 		INIT_LIST_HEAD(&ch->free_tx);
1381*4882a593Smuzhiyun 		for (j = 0; j < target->queue_size; ++j)
1382*4882a593Smuzhiyun 			list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1383*4882a593Smuzhiyun 	}
1384*4882a593Smuzhiyun 
1385*4882a593Smuzhiyun 	target->qp_in_error = false;
1386*4882a593Smuzhiyun 
1387*4882a593Smuzhiyun 	for (i = 0; i < target->ch_count; i++) {
1388*4882a593Smuzhiyun 		ch = &target->ch[i];
1389*4882a593Smuzhiyun 		if (ret)
1390*4882a593Smuzhiyun 			break;
1391*4882a593Smuzhiyun 		ret = srp_connect_ch(ch, max_iu_len, multich);
1392*4882a593Smuzhiyun 		multich = true;
1393*4882a593Smuzhiyun 	}
1394*4882a593Smuzhiyun 
1395*4882a593Smuzhiyun 	if (ret == 0)
1396*4882a593Smuzhiyun 		shost_printk(KERN_INFO, target->scsi_host,
1397*4882a593Smuzhiyun 			     PFX "reconnect succeeded\n");
1398*4882a593Smuzhiyun 
1399*4882a593Smuzhiyun 	return ret;
1400*4882a593Smuzhiyun }
1401*4882a593Smuzhiyun 
srp_map_desc(struct srp_map_state * state,dma_addr_t dma_addr,unsigned int dma_len,u32 rkey)1402*4882a593Smuzhiyun static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1403*4882a593Smuzhiyun 			 unsigned int dma_len, u32 rkey)
1404*4882a593Smuzhiyun {
1405*4882a593Smuzhiyun 	struct srp_direct_buf *desc = state->desc;
1406*4882a593Smuzhiyun 
1407*4882a593Smuzhiyun 	WARN_ON_ONCE(!dma_len);
1408*4882a593Smuzhiyun 
1409*4882a593Smuzhiyun 	desc->va = cpu_to_be64(dma_addr);
1410*4882a593Smuzhiyun 	desc->key = cpu_to_be32(rkey);
1411*4882a593Smuzhiyun 	desc->len = cpu_to_be32(dma_len);
1412*4882a593Smuzhiyun 
1413*4882a593Smuzhiyun 	state->total_len += dma_len;
1414*4882a593Smuzhiyun 	state->desc++;
1415*4882a593Smuzhiyun 	state->ndesc++;
1416*4882a593Smuzhiyun }
1417*4882a593Smuzhiyun 
srp_reg_mr_err_done(struct ib_cq * cq,struct ib_wc * wc)1418*4882a593Smuzhiyun static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
1419*4882a593Smuzhiyun {
1420*4882a593Smuzhiyun 	srp_handle_qp_err(cq, wc, "FAST REG");
1421*4882a593Smuzhiyun }
1422*4882a593Smuzhiyun 
1423*4882a593Smuzhiyun /*
1424*4882a593Smuzhiyun  * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset
1425*4882a593Smuzhiyun  * where to start in the first element. If sg_offset_p != NULL then
1426*4882a593Smuzhiyun  * *sg_offset_p is updated to the offset in state->sg[retval] of the first
1427*4882a593Smuzhiyun  * byte that has not yet been mapped.
1428*4882a593Smuzhiyun  */
srp_map_finish_fr(struct srp_map_state * state,struct srp_request * req,struct srp_rdma_ch * ch,int sg_nents,unsigned int * sg_offset_p)1429*4882a593Smuzhiyun static int srp_map_finish_fr(struct srp_map_state *state,
1430*4882a593Smuzhiyun 			     struct srp_request *req,
1431*4882a593Smuzhiyun 			     struct srp_rdma_ch *ch, int sg_nents,
1432*4882a593Smuzhiyun 			     unsigned int *sg_offset_p)
1433*4882a593Smuzhiyun {
1434*4882a593Smuzhiyun 	struct srp_target_port *target = ch->target;
1435*4882a593Smuzhiyun 	struct srp_device *dev = target->srp_host->srp_dev;
1436*4882a593Smuzhiyun 	struct ib_reg_wr wr;
1437*4882a593Smuzhiyun 	struct srp_fr_desc *desc;
1438*4882a593Smuzhiyun 	u32 rkey;
1439*4882a593Smuzhiyun 	int n, err;
1440*4882a593Smuzhiyun 
1441*4882a593Smuzhiyun 	if (state->fr.next >= state->fr.end) {
1442*4882a593Smuzhiyun 		shost_printk(KERN_ERR, ch->target->scsi_host,
1443*4882a593Smuzhiyun 			     PFX "Out of MRs (mr_per_cmd = %d)\n",
1444*4882a593Smuzhiyun 			     ch->target->mr_per_cmd);
1445*4882a593Smuzhiyun 		return -ENOMEM;
1446*4882a593Smuzhiyun 	}
1447*4882a593Smuzhiyun 
1448*4882a593Smuzhiyun 	WARN_ON_ONCE(!dev->use_fast_reg);
1449*4882a593Smuzhiyun 
1450*4882a593Smuzhiyun 	if (sg_nents == 1 && target->global_rkey) {
1451*4882a593Smuzhiyun 		unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1452*4882a593Smuzhiyun 
1453*4882a593Smuzhiyun 		srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
1454*4882a593Smuzhiyun 			     sg_dma_len(state->sg) - sg_offset,
1455*4882a593Smuzhiyun 			     target->global_rkey);
1456*4882a593Smuzhiyun 		if (sg_offset_p)
1457*4882a593Smuzhiyun 			*sg_offset_p = 0;
1458*4882a593Smuzhiyun 		return 1;
1459*4882a593Smuzhiyun 	}
1460*4882a593Smuzhiyun 
1461*4882a593Smuzhiyun 	desc = srp_fr_pool_get(ch->fr_pool);
1462*4882a593Smuzhiyun 	if (!desc)
1463*4882a593Smuzhiyun 		return -ENOMEM;
1464*4882a593Smuzhiyun 
1465*4882a593Smuzhiyun 	rkey = ib_inc_rkey(desc->mr->rkey);
1466*4882a593Smuzhiyun 	ib_update_fast_reg_key(desc->mr, rkey);
1467*4882a593Smuzhiyun 
1468*4882a593Smuzhiyun 	n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p,
1469*4882a593Smuzhiyun 			 dev->mr_page_size);
1470*4882a593Smuzhiyun 	if (unlikely(n < 0)) {
1471*4882a593Smuzhiyun 		srp_fr_pool_put(ch->fr_pool, &desc, 1);
1472*4882a593Smuzhiyun 		pr_debug("%s: ib_map_mr_sg(%d, %d) returned %d.\n",
1473*4882a593Smuzhiyun 			 dev_name(&req->scmnd->device->sdev_gendev), sg_nents,
1474*4882a593Smuzhiyun 			 sg_offset_p ? *sg_offset_p : -1, n);
1475*4882a593Smuzhiyun 		return n;
1476*4882a593Smuzhiyun 	}
1477*4882a593Smuzhiyun 
1478*4882a593Smuzhiyun 	WARN_ON_ONCE(desc->mr->length == 0);
1479*4882a593Smuzhiyun 
1480*4882a593Smuzhiyun 	req->reg_cqe.done = srp_reg_mr_err_done;
1481*4882a593Smuzhiyun 
1482*4882a593Smuzhiyun 	wr.wr.next = NULL;
1483*4882a593Smuzhiyun 	wr.wr.opcode = IB_WR_REG_MR;
1484*4882a593Smuzhiyun 	wr.wr.wr_cqe = &req->reg_cqe;
1485*4882a593Smuzhiyun 	wr.wr.num_sge = 0;
1486*4882a593Smuzhiyun 	wr.wr.send_flags = 0;
1487*4882a593Smuzhiyun 	wr.mr = desc->mr;
1488*4882a593Smuzhiyun 	wr.key = desc->mr->rkey;
1489*4882a593Smuzhiyun 	wr.access = (IB_ACCESS_LOCAL_WRITE |
1490*4882a593Smuzhiyun 		     IB_ACCESS_REMOTE_READ |
1491*4882a593Smuzhiyun 		     IB_ACCESS_REMOTE_WRITE);
1492*4882a593Smuzhiyun 
1493*4882a593Smuzhiyun 	*state->fr.next++ = desc;
1494*4882a593Smuzhiyun 	state->nmdesc++;
1495*4882a593Smuzhiyun 
1496*4882a593Smuzhiyun 	srp_map_desc(state, desc->mr->iova,
1497*4882a593Smuzhiyun 		     desc->mr->length, desc->mr->rkey);
1498*4882a593Smuzhiyun 
1499*4882a593Smuzhiyun 	err = ib_post_send(ch->qp, &wr.wr, NULL);
1500*4882a593Smuzhiyun 	if (unlikely(err)) {
1501*4882a593Smuzhiyun 		WARN_ON_ONCE(err == -ENOMEM);
1502*4882a593Smuzhiyun 		return err;
1503*4882a593Smuzhiyun 	}
1504*4882a593Smuzhiyun 
1505*4882a593Smuzhiyun 	return n;
1506*4882a593Smuzhiyun }
1507*4882a593Smuzhiyun 
srp_map_sg_fr(struct srp_map_state * state,struct srp_rdma_ch * ch,struct srp_request * req,struct scatterlist * scat,int count)1508*4882a593Smuzhiyun static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1509*4882a593Smuzhiyun 			 struct srp_request *req, struct scatterlist *scat,
1510*4882a593Smuzhiyun 			 int count)
1511*4882a593Smuzhiyun {
1512*4882a593Smuzhiyun 	unsigned int sg_offset = 0;
1513*4882a593Smuzhiyun 
1514*4882a593Smuzhiyun 	state->fr.next = req->fr_list;
1515*4882a593Smuzhiyun 	state->fr.end = req->fr_list + ch->target->mr_per_cmd;
1516*4882a593Smuzhiyun 	state->sg = scat;
1517*4882a593Smuzhiyun 
1518*4882a593Smuzhiyun 	if (count == 0)
1519*4882a593Smuzhiyun 		return 0;
1520*4882a593Smuzhiyun 
1521*4882a593Smuzhiyun 	while (count) {
1522*4882a593Smuzhiyun 		int i, n;
1523*4882a593Smuzhiyun 
1524*4882a593Smuzhiyun 		n = srp_map_finish_fr(state, req, ch, count, &sg_offset);
1525*4882a593Smuzhiyun 		if (unlikely(n < 0))
1526*4882a593Smuzhiyun 			return n;
1527*4882a593Smuzhiyun 
1528*4882a593Smuzhiyun 		count -= n;
1529*4882a593Smuzhiyun 		for (i = 0; i < n; i++)
1530*4882a593Smuzhiyun 			state->sg = sg_next(state->sg);
1531*4882a593Smuzhiyun 	}
1532*4882a593Smuzhiyun 
1533*4882a593Smuzhiyun 	return 0;
1534*4882a593Smuzhiyun }
1535*4882a593Smuzhiyun 
srp_map_sg_dma(struct srp_map_state * state,struct srp_rdma_ch * ch,struct srp_request * req,struct scatterlist * scat,int count)1536*4882a593Smuzhiyun static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1537*4882a593Smuzhiyun 			  struct srp_request *req, struct scatterlist *scat,
1538*4882a593Smuzhiyun 			  int count)
1539*4882a593Smuzhiyun {
1540*4882a593Smuzhiyun 	struct srp_target_port *target = ch->target;
1541*4882a593Smuzhiyun 	struct scatterlist *sg;
1542*4882a593Smuzhiyun 	int i;
1543*4882a593Smuzhiyun 
1544*4882a593Smuzhiyun 	for_each_sg(scat, sg, count, i) {
1545*4882a593Smuzhiyun 		srp_map_desc(state, sg_dma_address(sg), sg_dma_len(sg),
1546*4882a593Smuzhiyun 			     target->global_rkey);
1547*4882a593Smuzhiyun 	}
1548*4882a593Smuzhiyun 
1549*4882a593Smuzhiyun 	return 0;
1550*4882a593Smuzhiyun }
1551*4882a593Smuzhiyun 
1552*4882a593Smuzhiyun /*
1553*4882a593Smuzhiyun  * Register the indirect data buffer descriptor with the HCA.
1554*4882a593Smuzhiyun  *
1555*4882a593Smuzhiyun  * Note: since the indirect data buffer descriptor has been allocated with
1556*4882a593Smuzhiyun  * kmalloc() it is guaranteed that this buffer is a physically contiguous
1557*4882a593Smuzhiyun  * memory buffer.
1558*4882a593Smuzhiyun  */
srp_map_idb(struct srp_rdma_ch * ch,struct srp_request * req,void ** next_mr,void ** end_mr,u32 idb_len,__be32 * idb_rkey)1559*4882a593Smuzhiyun static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1560*4882a593Smuzhiyun 		       void **next_mr, void **end_mr, u32 idb_len,
1561*4882a593Smuzhiyun 		       __be32 *idb_rkey)
1562*4882a593Smuzhiyun {
1563*4882a593Smuzhiyun 	struct srp_target_port *target = ch->target;
1564*4882a593Smuzhiyun 	struct srp_device *dev = target->srp_host->srp_dev;
1565*4882a593Smuzhiyun 	struct srp_map_state state;
1566*4882a593Smuzhiyun 	struct srp_direct_buf idb_desc;
1567*4882a593Smuzhiyun 	struct scatterlist idb_sg[1];
1568*4882a593Smuzhiyun 	int ret;
1569*4882a593Smuzhiyun 
1570*4882a593Smuzhiyun 	memset(&state, 0, sizeof(state));
1571*4882a593Smuzhiyun 	memset(&idb_desc, 0, sizeof(idb_desc));
1572*4882a593Smuzhiyun 	state.gen.next = next_mr;
1573*4882a593Smuzhiyun 	state.gen.end = end_mr;
1574*4882a593Smuzhiyun 	state.desc = &idb_desc;
1575*4882a593Smuzhiyun 	state.base_dma_addr = req->indirect_dma_addr;
1576*4882a593Smuzhiyun 	state.dma_len = idb_len;
1577*4882a593Smuzhiyun 
1578*4882a593Smuzhiyun 	if (dev->use_fast_reg) {
1579*4882a593Smuzhiyun 		state.sg = idb_sg;
1580*4882a593Smuzhiyun 		sg_init_one(idb_sg, req->indirect_desc, idb_len);
1581*4882a593Smuzhiyun 		idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
1582*4882a593Smuzhiyun #ifdef CONFIG_NEED_SG_DMA_LENGTH
1583*4882a593Smuzhiyun 		idb_sg->dma_length = idb_sg->length;	      /* hack^2 */
1584*4882a593Smuzhiyun #endif
1585*4882a593Smuzhiyun 		ret = srp_map_finish_fr(&state, req, ch, 1, NULL);
1586*4882a593Smuzhiyun 		if (ret < 0)
1587*4882a593Smuzhiyun 			return ret;
1588*4882a593Smuzhiyun 		WARN_ON_ONCE(ret < 1);
1589*4882a593Smuzhiyun 	} else {
1590*4882a593Smuzhiyun 		return -EINVAL;
1591*4882a593Smuzhiyun 	}
1592*4882a593Smuzhiyun 
1593*4882a593Smuzhiyun 	*idb_rkey = idb_desc.key;
1594*4882a593Smuzhiyun 
1595*4882a593Smuzhiyun 	return 0;
1596*4882a593Smuzhiyun }
1597*4882a593Smuzhiyun 
srp_check_mapping(struct srp_map_state * state,struct srp_rdma_ch * ch,struct srp_request * req,struct scatterlist * scat,int count)1598*4882a593Smuzhiyun static void srp_check_mapping(struct srp_map_state *state,
1599*4882a593Smuzhiyun 			      struct srp_rdma_ch *ch, struct srp_request *req,
1600*4882a593Smuzhiyun 			      struct scatterlist *scat, int count)
1601*4882a593Smuzhiyun {
1602*4882a593Smuzhiyun 	struct srp_device *dev = ch->target->srp_host->srp_dev;
1603*4882a593Smuzhiyun 	struct srp_fr_desc **pfr;
1604*4882a593Smuzhiyun 	u64 desc_len = 0, mr_len = 0;
1605*4882a593Smuzhiyun 	int i;
1606*4882a593Smuzhiyun 
1607*4882a593Smuzhiyun 	for (i = 0; i < state->ndesc; i++)
1608*4882a593Smuzhiyun 		desc_len += be32_to_cpu(req->indirect_desc[i].len);
1609*4882a593Smuzhiyun 	if (dev->use_fast_reg)
1610*4882a593Smuzhiyun 		for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
1611*4882a593Smuzhiyun 			mr_len += (*pfr)->mr->length;
1612*4882a593Smuzhiyun 	if (desc_len != scsi_bufflen(req->scmnd) ||
1613*4882a593Smuzhiyun 	    mr_len > scsi_bufflen(req->scmnd))
1614*4882a593Smuzhiyun 		pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
1615*4882a593Smuzhiyun 		       scsi_bufflen(req->scmnd), desc_len, mr_len,
1616*4882a593Smuzhiyun 		       state->ndesc, state->nmdesc);
1617*4882a593Smuzhiyun }
1618*4882a593Smuzhiyun 
1619*4882a593Smuzhiyun /**
1620*4882a593Smuzhiyun  * srp_map_data() - map SCSI data buffer onto an SRP request
1621*4882a593Smuzhiyun  * @scmnd: SCSI command to map
1622*4882a593Smuzhiyun  * @ch: SRP RDMA channel
1623*4882a593Smuzhiyun  * @req: SRP request
1624*4882a593Smuzhiyun  *
1625*4882a593Smuzhiyun  * Returns the length in bytes of the SRP_CMD IU or a negative value if
1626*4882a593Smuzhiyun  * mapping failed. The size of any immediate data is not included in the
1627*4882a593Smuzhiyun  * return value.
1628*4882a593Smuzhiyun  */
srp_map_data(struct scsi_cmnd * scmnd,struct srp_rdma_ch * ch,struct srp_request * req)1629*4882a593Smuzhiyun static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1630*4882a593Smuzhiyun 			struct srp_request *req)
1631*4882a593Smuzhiyun {
1632*4882a593Smuzhiyun 	struct srp_target_port *target = ch->target;
1633*4882a593Smuzhiyun 	struct scatterlist *scat, *sg;
1634*4882a593Smuzhiyun 	struct srp_cmd *cmd = req->cmd->buf;
1635*4882a593Smuzhiyun 	int i, len, nents, count, ret;
1636*4882a593Smuzhiyun 	struct srp_device *dev;
1637*4882a593Smuzhiyun 	struct ib_device *ibdev;
1638*4882a593Smuzhiyun 	struct srp_map_state state;
1639*4882a593Smuzhiyun 	struct srp_indirect_buf *indirect_hdr;
1640*4882a593Smuzhiyun 	u64 data_len;
1641*4882a593Smuzhiyun 	u32 idb_len, table_len;
1642*4882a593Smuzhiyun 	__be32 idb_rkey;
1643*4882a593Smuzhiyun 	u8 fmt;
1644*4882a593Smuzhiyun 
1645*4882a593Smuzhiyun 	req->cmd->num_sge = 1;
1646*4882a593Smuzhiyun 
1647*4882a593Smuzhiyun 	if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1648*4882a593Smuzhiyun 		return sizeof(struct srp_cmd) + cmd->add_cdb_len;
1649*4882a593Smuzhiyun 
1650*4882a593Smuzhiyun 	if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1651*4882a593Smuzhiyun 	    scmnd->sc_data_direction != DMA_TO_DEVICE) {
1652*4882a593Smuzhiyun 		shost_printk(KERN_WARNING, target->scsi_host,
1653*4882a593Smuzhiyun 			     PFX "Unhandled data direction %d\n",
1654*4882a593Smuzhiyun 			     scmnd->sc_data_direction);
1655*4882a593Smuzhiyun 		return -EINVAL;
1656*4882a593Smuzhiyun 	}
1657*4882a593Smuzhiyun 
1658*4882a593Smuzhiyun 	nents = scsi_sg_count(scmnd);
1659*4882a593Smuzhiyun 	scat  = scsi_sglist(scmnd);
1660*4882a593Smuzhiyun 	data_len = scsi_bufflen(scmnd);
1661*4882a593Smuzhiyun 
1662*4882a593Smuzhiyun 	dev = target->srp_host->srp_dev;
1663*4882a593Smuzhiyun 	ibdev = dev->dev;
1664*4882a593Smuzhiyun 
1665*4882a593Smuzhiyun 	count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1666*4882a593Smuzhiyun 	if (unlikely(count == 0))
1667*4882a593Smuzhiyun 		return -EIO;
1668*4882a593Smuzhiyun 
1669*4882a593Smuzhiyun 	if (ch->use_imm_data &&
1670*4882a593Smuzhiyun 	    count <= ch->max_imm_sge &&
1671*4882a593Smuzhiyun 	    SRP_IMM_DATA_OFFSET + data_len <= ch->max_it_iu_len &&
1672*4882a593Smuzhiyun 	    scmnd->sc_data_direction == DMA_TO_DEVICE) {
1673*4882a593Smuzhiyun 		struct srp_imm_buf *buf;
1674*4882a593Smuzhiyun 		struct ib_sge *sge = &req->cmd->sge[1];
1675*4882a593Smuzhiyun 
1676*4882a593Smuzhiyun 		fmt = SRP_DATA_DESC_IMM;
1677*4882a593Smuzhiyun 		len = SRP_IMM_DATA_OFFSET;
1678*4882a593Smuzhiyun 		req->nmdesc = 0;
1679*4882a593Smuzhiyun 		buf = (void *)cmd->add_data + cmd->add_cdb_len;
1680*4882a593Smuzhiyun 		buf->len = cpu_to_be32(data_len);
1681*4882a593Smuzhiyun 		WARN_ON_ONCE((void *)(buf + 1) > (void *)cmd + len);
1682*4882a593Smuzhiyun 		for_each_sg(scat, sg, count, i) {
1683*4882a593Smuzhiyun 			sge[i].addr   = sg_dma_address(sg);
1684*4882a593Smuzhiyun 			sge[i].length = sg_dma_len(sg);
1685*4882a593Smuzhiyun 			sge[i].lkey   = target->lkey;
1686*4882a593Smuzhiyun 		}
1687*4882a593Smuzhiyun 		req->cmd->num_sge += count;
1688*4882a593Smuzhiyun 		goto map_complete;
1689*4882a593Smuzhiyun 	}
1690*4882a593Smuzhiyun 
1691*4882a593Smuzhiyun 	fmt = SRP_DATA_DESC_DIRECT;
1692*4882a593Smuzhiyun 	len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
1693*4882a593Smuzhiyun 		sizeof(struct srp_direct_buf);
1694*4882a593Smuzhiyun 
1695*4882a593Smuzhiyun 	if (count == 1 && target->global_rkey) {
1696*4882a593Smuzhiyun 		/*
1697*4882a593Smuzhiyun 		 * The midlayer only generated a single gather/scatter
1698*4882a593Smuzhiyun 		 * entry, or DMA mapping coalesced everything to a
1699*4882a593Smuzhiyun 		 * single entry.  So a direct descriptor along with
1700*4882a593Smuzhiyun 		 * the DMA MR suffices.
1701*4882a593Smuzhiyun 		 */
1702*4882a593Smuzhiyun 		struct srp_direct_buf *buf;
1703*4882a593Smuzhiyun 
1704*4882a593Smuzhiyun 		buf = (void *)cmd->add_data + cmd->add_cdb_len;
1705*4882a593Smuzhiyun 		buf->va  = cpu_to_be64(sg_dma_address(scat));
1706*4882a593Smuzhiyun 		buf->key = cpu_to_be32(target->global_rkey);
1707*4882a593Smuzhiyun 		buf->len = cpu_to_be32(sg_dma_len(scat));
1708*4882a593Smuzhiyun 
1709*4882a593Smuzhiyun 		req->nmdesc = 0;
1710*4882a593Smuzhiyun 		goto map_complete;
1711*4882a593Smuzhiyun 	}
1712*4882a593Smuzhiyun 
1713*4882a593Smuzhiyun 	/*
1714*4882a593Smuzhiyun 	 * We have more than one scatter/gather entry, so build our indirect
1715*4882a593Smuzhiyun 	 * descriptor table, trying to merge as many entries as we can.
1716*4882a593Smuzhiyun 	 */
1717*4882a593Smuzhiyun 	indirect_hdr = (void *)cmd->add_data + cmd->add_cdb_len;
1718*4882a593Smuzhiyun 
1719*4882a593Smuzhiyun 	ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1720*4882a593Smuzhiyun 				   target->indirect_size, DMA_TO_DEVICE);
1721*4882a593Smuzhiyun 
1722*4882a593Smuzhiyun 	memset(&state, 0, sizeof(state));
1723*4882a593Smuzhiyun 	state.desc = req->indirect_desc;
1724*4882a593Smuzhiyun 	if (dev->use_fast_reg)
1725*4882a593Smuzhiyun 		ret = srp_map_sg_fr(&state, ch, req, scat, count);
1726*4882a593Smuzhiyun 	else
1727*4882a593Smuzhiyun 		ret = srp_map_sg_dma(&state, ch, req, scat, count);
1728*4882a593Smuzhiyun 	req->nmdesc = state.nmdesc;
1729*4882a593Smuzhiyun 	if (ret < 0)
1730*4882a593Smuzhiyun 		goto unmap;
1731*4882a593Smuzhiyun 
1732*4882a593Smuzhiyun 	{
1733*4882a593Smuzhiyun 		DEFINE_DYNAMIC_DEBUG_METADATA(ddm,
1734*4882a593Smuzhiyun 			"Memory mapping consistency check");
1735*4882a593Smuzhiyun 		if (DYNAMIC_DEBUG_BRANCH(ddm))
1736*4882a593Smuzhiyun 			srp_check_mapping(&state, ch, req, scat, count);
1737*4882a593Smuzhiyun 	}
1738*4882a593Smuzhiyun 
1739*4882a593Smuzhiyun 	/* We've mapped the request, now pull as much of the indirect
1740*4882a593Smuzhiyun 	 * descriptor table as we can into the command buffer. If this
1741*4882a593Smuzhiyun 	 * target is not using an external indirect table, we are
1742*4882a593Smuzhiyun 	 * guaranteed to fit into the command, as the SCSI layer won't
1743*4882a593Smuzhiyun 	 * give us more S/G entries than we allow.
1744*4882a593Smuzhiyun 	 */
1745*4882a593Smuzhiyun 	if (state.ndesc == 1) {
1746*4882a593Smuzhiyun 		/*
1747*4882a593Smuzhiyun 		 * Memory registration collapsed the sg-list into one entry,
1748*4882a593Smuzhiyun 		 * so use a direct descriptor.
1749*4882a593Smuzhiyun 		 */
1750*4882a593Smuzhiyun 		struct srp_direct_buf *buf;
1751*4882a593Smuzhiyun 
1752*4882a593Smuzhiyun 		buf = (void *)cmd->add_data + cmd->add_cdb_len;
1753*4882a593Smuzhiyun 		*buf = req->indirect_desc[0];
1754*4882a593Smuzhiyun 		goto map_complete;
1755*4882a593Smuzhiyun 	}
1756*4882a593Smuzhiyun 
1757*4882a593Smuzhiyun 	if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1758*4882a593Smuzhiyun 						!target->allow_ext_sg)) {
1759*4882a593Smuzhiyun 		shost_printk(KERN_ERR, target->scsi_host,
1760*4882a593Smuzhiyun 			     "Could not fit S/G list into SRP_CMD\n");
1761*4882a593Smuzhiyun 		ret = -EIO;
1762*4882a593Smuzhiyun 		goto unmap;
1763*4882a593Smuzhiyun 	}
1764*4882a593Smuzhiyun 
1765*4882a593Smuzhiyun 	count = min(state.ndesc, target->cmd_sg_cnt);
1766*4882a593Smuzhiyun 	table_len = state.ndesc * sizeof (struct srp_direct_buf);
1767*4882a593Smuzhiyun 	idb_len = sizeof(struct srp_indirect_buf) + table_len;
1768*4882a593Smuzhiyun 
1769*4882a593Smuzhiyun 	fmt = SRP_DATA_DESC_INDIRECT;
1770*4882a593Smuzhiyun 	len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
1771*4882a593Smuzhiyun 		sizeof(struct srp_indirect_buf);
1772*4882a593Smuzhiyun 	len += count * sizeof (struct srp_direct_buf);
1773*4882a593Smuzhiyun 
1774*4882a593Smuzhiyun 	memcpy(indirect_hdr->desc_list, req->indirect_desc,
1775*4882a593Smuzhiyun 	       count * sizeof (struct srp_direct_buf));
1776*4882a593Smuzhiyun 
1777*4882a593Smuzhiyun 	if (!target->global_rkey) {
1778*4882a593Smuzhiyun 		ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1779*4882a593Smuzhiyun 				  idb_len, &idb_rkey);
1780*4882a593Smuzhiyun 		if (ret < 0)
1781*4882a593Smuzhiyun 			goto unmap;
1782*4882a593Smuzhiyun 		req->nmdesc++;
1783*4882a593Smuzhiyun 	} else {
1784*4882a593Smuzhiyun 		idb_rkey = cpu_to_be32(target->global_rkey);
1785*4882a593Smuzhiyun 	}
1786*4882a593Smuzhiyun 
1787*4882a593Smuzhiyun 	indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1788*4882a593Smuzhiyun 	indirect_hdr->table_desc.key = idb_rkey;
1789*4882a593Smuzhiyun 	indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1790*4882a593Smuzhiyun 	indirect_hdr->len = cpu_to_be32(state.total_len);
1791*4882a593Smuzhiyun 
1792*4882a593Smuzhiyun 	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1793*4882a593Smuzhiyun 		cmd->data_out_desc_cnt = count;
1794*4882a593Smuzhiyun 	else
1795*4882a593Smuzhiyun 		cmd->data_in_desc_cnt = count;
1796*4882a593Smuzhiyun 
1797*4882a593Smuzhiyun 	ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1798*4882a593Smuzhiyun 				      DMA_TO_DEVICE);
1799*4882a593Smuzhiyun 
1800*4882a593Smuzhiyun map_complete:
1801*4882a593Smuzhiyun 	if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1802*4882a593Smuzhiyun 		cmd->buf_fmt = fmt << 4;
1803*4882a593Smuzhiyun 	else
1804*4882a593Smuzhiyun 		cmd->buf_fmt = fmt;
1805*4882a593Smuzhiyun 
1806*4882a593Smuzhiyun 	return len;
1807*4882a593Smuzhiyun 
1808*4882a593Smuzhiyun unmap:
1809*4882a593Smuzhiyun 	srp_unmap_data(scmnd, ch, req);
1810*4882a593Smuzhiyun 	if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
1811*4882a593Smuzhiyun 		ret = -E2BIG;
1812*4882a593Smuzhiyun 	return ret;
1813*4882a593Smuzhiyun }
1814*4882a593Smuzhiyun 
1815*4882a593Smuzhiyun /*
1816*4882a593Smuzhiyun  * Return an IU and possible credit to the free pool
1817*4882a593Smuzhiyun  */
srp_put_tx_iu(struct srp_rdma_ch * ch,struct srp_iu * iu,enum srp_iu_type iu_type)1818*4882a593Smuzhiyun static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1819*4882a593Smuzhiyun 			  enum srp_iu_type iu_type)
1820*4882a593Smuzhiyun {
1821*4882a593Smuzhiyun 	unsigned long flags;
1822*4882a593Smuzhiyun 
1823*4882a593Smuzhiyun 	spin_lock_irqsave(&ch->lock, flags);
1824*4882a593Smuzhiyun 	list_add(&iu->list, &ch->free_tx);
1825*4882a593Smuzhiyun 	if (iu_type != SRP_IU_RSP)
1826*4882a593Smuzhiyun 		++ch->req_lim;
1827*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ch->lock, flags);
1828*4882a593Smuzhiyun }
1829*4882a593Smuzhiyun 
1830*4882a593Smuzhiyun /*
1831*4882a593Smuzhiyun  * Must be called with ch->lock held to protect req_lim and free_tx.
1832*4882a593Smuzhiyun  * If IU is not sent, it must be returned using srp_put_tx_iu().
1833*4882a593Smuzhiyun  *
1834*4882a593Smuzhiyun  * Note:
1835*4882a593Smuzhiyun  * An upper limit for the number of allocated information units for each
1836*4882a593Smuzhiyun  * request type is:
1837*4882a593Smuzhiyun  * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1838*4882a593Smuzhiyun  *   more than Scsi_Host.can_queue requests.
1839*4882a593Smuzhiyun  * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1840*4882a593Smuzhiyun  * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1841*4882a593Smuzhiyun  *   one unanswered SRP request to an initiator.
1842*4882a593Smuzhiyun  */
__srp_get_tx_iu(struct srp_rdma_ch * ch,enum srp_iu_type iu_type)1843*4882a593Smuzhiyun static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
1844*4882a593Smuzhiyun 				      enum srp_iu_type iu_type)
1845*4882a593Smuzhiyun {
1846*4882a593Smuzhiyun 	struct srp_target_port *target = ch->target;
1847*4882a593Smuzhiyun 	s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1848*4882a593Smuzhiyun 	struct srp_iu *iu;
1849*4882a593Smuzhiyun 
1850*4882a593Smuzhiyun 	lockdep_assert_held(&ch->lock);
1851*4882a593Smuzhiyun 
1852*4882a593Smuzhiyun 	ib_process_cq_direct(ch->send_cq, -1);
1853*4882a593Smuzhiyun 
1854*4882a593Smuzhiyun 	if (list_empty(&ch->free_tx))
1855*4882a593Smuzhiyun 		return NULL;
1856*4882a593Smuzhiyun 
1857*4882a593Smuzhiyun 	/* Initiator responses to target requests do not consume credits */
1858*4882a593Smuzhiyun 	if (iu_type != SRP_IU_RSP) {
1859*4882a593Smuzhiyun 		if (ch->req_lim <= rsv) {
1860*4882a593Smuzhiyun 			++target->zero_req_lim;
1861*4882a593Smuzhiyun 			return NULL;
1862*4882a593Smuzhiyun 		}
1863*4882a593Smuzhiyun 
1864*4882a593Smuzhiyun 		--ch->req_lim;
1865*4882a593Smuzhiyun 	}
1866*4882a593Smuzhiyun 
1867*4882a593Smuzhiyun 	iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1868*4882a593Smuzhiyun 	list_del(&iu->list);
1869*4882a593Smuzhiyun 	return iu;
1870*4882a593Smuzhiyun }
1871*4882a593Smuzhiyun 
1872*4882a593Smuzhiyun /*
1873*4882a593Smuzhiyun  * Note: if this function is called from inside ib_drain_sq() then it will
1874*4882a593Smuzhiyun  * be called without ch->lock being held. If ib_drain_sq() dequeues a WQE
1875*4882a593Smuzhiyun  * with status IB_WC_SUCCESS then that's a bug.
1876*4882a593Smuzhiyun  */
srp_send_done(struct ib_cq * cq,struct ib_wc * wc)1877*4882a593Smuzhiyun static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
1878*4882a593Smuzhiyun {
1879*4882a593Smuzhiyun 	struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1880*4882a593Smuzhiyun 	struct srp_rdma_ch *ch = cq->cq_context;
1881*4882a593Smuzhiyun 
1882*4882a593Smuzhiyun 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
1883*4882a593Smuzhiyun 		srp_handle_qp_err(cq, wc, "SEND");
1884*4882a593Smuzhiyun 		return;
1885*4882a593Smuzhiyun 	}
1886*4882a593Smuzhiyun 
1887*4882a593Smuzhiyun 	lockdep_assert_held(&ch->lock);
1888*4882a593Smuzhiyun 
1889*4882a593Smuzhiyun 	list_add(&iu->list, &ch->free_tx);
1890*4882a593Smuzhiyun }
1891*4882a593Smuzhiyun 
1892*4882a593Smuzhiyun /**
1893*4882a593Smuzhiyun  * srp_post_send() - send an SRP information unit
1894*4882a593Smuzhiyun  * @ch: RDMA channel over which to send the information unit.
1895*4882a593Smuzhiyun  * @iu: Information unit to send.
1896*4882a593Smuzhiyun  * @len: Length of the information unit excluding immediate data.
1897*4882a593Smuzhiyun  */
srp_post_send(struct srp_rdma_ch * ch,struct srp_iu * iu,int len)1898*4882a593Smuzhiyun static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1899*4882a593Smuzhiyun {
1900*4882a593Smuzhiyun 	struct srp_target_port *target = ch->target;
1901*4882a593Smuzhiyun 	struct ib_send_wr wr;
1902*4882a593Smuzhiyun 
1903*4882a593Smuzhiyun 	if (WARN_ON_ONCE(iu->num_sge > SRP_MAX_SGE))
1904*4882a593Smuzhiyun 		return -EINVAL;
1905*4882a593Smuzhiyun 
1906*4882a593Smuzhiyun 	iu->sge[0].addr   = iu->dma;
1907*4882a593Smuzhiyun 	iu->sge[0].length = len;
1908*4882a593Smuzhiyun 	iu->sge[0].lkey   = target->lkey;
1909*4882a593Smuzhiyun 
1910*4882a593Smuzhiyun 	iu->cqe.done = srp_send_done;
1911*4882a593Smuzhiyun 
1912*4882a593Smuzhiyun 	wr.next       = NULL;
1913*4882a593Smuzhiyun 	wr.wr_cqe     = &iu->cqe;
1914*4882a593Smuzhiyun 	wr.sg_list    = &iu->sge[0];
1915*4882a593Smuzhiyun 	wr.num_sge    = iu->num_sge;
1916*4882a593Smuzhiyun 	wr.opcode     = IB_WR_SEND;
1917*4882a593Smuzhiyun 	wr.send_flags = IB_SEND_SIGNALED;
1918*4882a593Smuzhiyun 
1919*4882a593Smuzhiyun 	return ib_post_send(ch->qp, &wr, NULL);
1920*4882a593Smuzhiyun }
1921*4882a593Smuzhiyun 
srp_post_recv(struct srp_rdma_ch * ch,struct srp_iu * iu)1922*4882a593Smuzhiyun static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1923*4882a593Smuzhiyun {
1924*4882a593Smuzhiyun 	struct srp_target_port *target = ch->target;
1925*4882a593Smuzhiyun 	struct ib_recv_wr wr;
1926*4882a593Smuzhiyun 	struct ib_sge list;
1927*4882a593Smuzhiyun 
1928*4882a593Smuzhiyun 	list.addr   = iu->dma;
1929*4882a593Smuzhiyun 	list.length = iu->size;
1930*4882a593Smuzhiyun 	list.lkey   = target->lkey;
1931*4882a593Smuzhiyun 
1932*4882a593Smuzhiyun 	iu->cqe.done = srp_recv_done;
1933*4882a593Smuzhiyun 
1934*4882a593Smuzhiyun 	wr.next     = NULL;
1935*4882a593Smuzhiyun 	wr.wr_cqe   = &iu->cqe;
1936*4882a593Smuzhiyun 	wr.sg_list  = &list;
1937*4882a593Smuzhiyun 	wr.num_sge  = 1;
1938*4882a593Smuzhiyun 
1939*4882a593Smuzhiyun 	return ib_post_recv(ch->qp, &wr, NULL);
1940*4882a593Smuzhiyun }
1941*4882a593Smuzhiyun 
srp_process_rsp(struct srp_rdma_ch * ch,struct srp_rsp * rsp)1942*4882a593Smuzhiyun static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1943*4882a593Smuzhiyun {
1944*4882a593Smuzhiyun 	struct srp_target_port *target = ch->target;
1945*4882a593Smuzhiyun 	struct srp_request *req;
1946*4882a593Smuzhiyun 	struct scsi_cmnd *scmnd;
1947*4882a593Smuzhiyun 	unsigned long flags;
1948*4882a593Smuzhiyun 
1949*4882a593Smuzhiyun 	if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1950*4882a593Smuzhiyun 		spin_lock_irqsave(&ch->lock, flags);
1951*4882a593Smuzhiyun 		ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1952*4882a593Smuzhiyun 		if (rsp->tag == ch->tsk_mgmt_tag) {
1953*4882a593Smuzhiyun 			ch->tsk_mgmt_status = -1;
1954*4882a593Smuzhiyun 			if (be32_to_cpu(rsp->resp_data_len) >= 4)
1955*4882a593Smuzhiyun 				ch->tsk_mgmt_status = rsp->data[3];
1956*4882a593Smuzhiyun 			complete(&ch->tsk_mgmt_done);
1957*4882a593Smuzhiyun 		} else {
1958*4882a593Smuzhiyun 			shost_printk(KERN_ERR, target->scsi_host,
1959*4882a593Smuzhiyun 				     "Received tsk mgmt response too late for tag %#llx\n",
1960*4882a593Smuzhiyun 				     rsp->tag);
1961*4882a593Smuzhiyun 		}
1962*4882a593Smuzhiyun 		spin_unlock_irqrestore(&ch->lock, flags);
1963*4882a593Smuzhiyun 	} else {
1964*4882a593Smuzhiyun 		scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1965*4882a593Smuzhiyun 		if (scmnd && scmnd->host_scribble) {
1966*4882a593Smuzhiyun 			req = (void *)scmnd->host_scribble;
1967*4882a593Smuzhiyun 			scmnd = srp_claim_req(ch, req, NULL, scmnd);
1968*4882a593Smuzhiyun 		} else {
1969*4882a593Smuzhiyun 			scmnd = NULL;
1970*4882a593Smuzhiyun 		}
1971*4882a593Smuzhiyun 		if (!scmnd) {
1972*4882a593Smuzhiyun 			shost_printk(KERN_ERR, target->scsi_host,
1973*4882a593Smuzhiyun 				     "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1974*4882a593Smuzhiyun 				     rsp->tag, ch - target->ch, ch->qp->qp_num);
1975*4882a593Smuzhiyun 
1976*4882a593Smuzhiyun 			spin_lock_irqsave(&ch->lock, flags);
1977*4882a593Smuzhiyun 			ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1978*4882a593Smuzhiyun 			spin_unlock_irqrestore(&ch->lock, flags);
1979*4882a593Smuzhiyun 
1980*4882a593Smuzhiyun 			return;
1981*4882a593Smuzhiyun 		}
1982*4882a593Smuzhiyun 		scmnd->result = rsp->status;
1983*4882a593Smuzhiyun 
1984*4882a593Smuzhiyun 		if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1985*4882a593Smuzhiyun 			memcpy(scmnd->sense_buffer, rsp->data +
1986*4882a593Smuzhiyun 			       be32_to_cpu(rsp->resp_data_len),
1987*4882a593Smuzhiyun 			       min_t(int, be32_to_cpu(rsp->sense_data_len),
1988*4882a593Smuzhiyun 				     SCSI_SENSE_BUFFERSIZE));
1989*4882a593Smuzhiyun 		}
1990*4882a593Smuzhiyun 
1991*4882a593Smuzhiyun 		if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1992*4882a593Smuzhiyun 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1993*4882a593Smuzhiyun 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1994*4882a593Smuzhiyun 			scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1995*4882a593Smuzhiyun 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1996*4882a593Smuzhiyun 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1997*4882a593Smuzhiyun 		else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1998*4882a593Smuzhiyun 			scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1999*4882a593Smuzhiyun 
2000*4882a593Smuzhiyun 		srp_free_req(ch, req, scmnd,
2001*4882a593Smuzhiyun 			     be32_to_cpu(rsp->req_lim_delta));
2002*4882a593Smuzhiyun 
2003*4882a593Smuzhiyun 		scmnd->host_scribble = NULL;
2004*4882a593Smuzhiyun 		scmnd->scsi_done(scmnd);
2005*4882a593Smuzhiyun 	}
2006*4882a593Smuzhiyun }
2007*4882a593Smuzhiyun 
srp_response_common(struct srp_rdma_ch * ch,s32 req_delta,void * rsp,int len)2008*4882a593Smuzhiyun static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
2009*4882a593Smuzhiyun 			       void *rsp, int len)
2010*4882a593Smuzhiyun {
2011*4882a593Smuzhiyun 	struct srp_target_port *target = ch->target;
2012*4882a593Smuzhiyun 	struct ib_device *dev = target->srp_host->srp_dev->dev;
2013*4882a593Smuzhiyun 	unsigned long flags;
2014*4882a593Smuzhiyun 	struct srp_iu *iu;
2015*4882a593Smuzhiyun 	int err;
2016*4882a593Smuzhiyun 
2017*4882a593Smuzhiyun 	spin_lock_irqsave(&ch->lock, flags);
2018*4882a593Smuzhiyun 	ch->req_lim += req_delta;
2019*4882a593Smuzhiyun 	iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
2020*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ch->lock, flags);
2021*4882a593Smuzhiyun 
2022*4882a593Smuzhiyun 	if (!iu) {
2023*4882a593Smuzhiyun 		shost_printk(KERN_ERR, target->scsi_host, PFX
2024*4882a593Smuzhiyun 			     "no IU available to send response\n");
2025*4882a593Smuzhiyun 		return 1;
2026*4882a593Smuzhiyun 	}
2027*4882a593Smuzhiyun 
2028*4882a593Smuzhiyun 	iu->num_sge = 1;
2029*4882a593Smuzhiyun 	ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
2030*4882a593Smuzhiyun 	memcpy(iu->buf, rsp, len);
2031*4882a593Smuzhiyun 	ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
2032*4882a593Smuzhiyun 
2033*4882a593Smuzhiyun 	err = srp_post_send(ch, iu, len);
2034*4882a593Smuzhiyun 	if (err) {
2035*4882a593Smuzhiyun 		shost_printk(KERN_ERR, target->scsi_host, PFX
2036*4882a593Smuzhiyun 			     "unable to post response: %d\n", err);
2037*4882a593Smuzhiyun 		srp_put_tx_iu(ch, iu, SRP_IU_RSP);
2038*4882a593Smuzhiyun 	}
2039*4882a593Smuzhiyun 
2040*4882a593Smuzhiyun 	return err;
2041*4882a593Smuzhiyun }
2042*4882a593Smuzhiyun 
srp_process_cred_req(struct srp_rdma_ch * ch,struct srp_cred_req * req)2043*4882a593Smuzhiyun static void srp_process_cred_req(struct srp_rdma_ch *ch,
2044*4882a593Smuzhiyun 				 struct srp_cred_req *req)
2045*4882a593Smuzhiyun {
2046*4882a593Smuzhiyun 	struct srp_cred_rsp rsp = {
2047*4882a593Smuzhiyun 		.opcode = SRP_CRED_RSP,
2048*4882a593Smuzhiyun 		.tag = req->tag,
2049*4882a593Smuzhiyun 	};
2050*4882a593Smuzhiyun 	s32 delta = be32_to_cpu(req->req_lim_delta);
2051*4882a593Smuzhiyun 
2052*4882a593Smuzhiyun 	if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2053*4882a593Smuzhiyun 		shost_printk(KERN_ERR, ch->target->scsi_host, PFX
2054*4882a593Smuzhiyun 			     "problems processing SRP_CRED_REQ\n");
2055*4882a593Smuzhiyun }
2056*4882a593Smuzhiyun 
srp_process_aer_req(struct srp_rdma_ch * ch,struct srp_aer_req * req)2057*4882a593Smuzhiyun static void srp_process_aer_req(struct srp_rdma_ch *ch,
2058*4882a593Smuzhiyun 				struct srp_aer_req *req)
2059*4882a593Smuzhiyun {
2060*4882a593Smuzhiyun 	struct srp_target_port *target = ch->target;
2061*4882a593Smuzhiyun 	struct srp_aer_rsp rsp = {
2062*4882a593Smuzhiyun 		.opcode = SRP_AER_RSP,
2063*4882a593Smuzhiyun 		.tag = req->tag,
2064*4882a593Smuzhiyun 	};
2065*4882a593Smuzhiyun 	s32 delta = be32_to_cpu(req->req_lim_delta);
2066*4882a593Smuzhiyun 
2067*4882a593Smuzhiyun 	shost_printk(KERN_ERR, target->scsi_host, PFX
2068*4882a593Smuzhiyun 		     "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
2069*4882a593Smuzhiyun 
2070*4882a593Smuzhiyun 	if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2071*4882a593Smuzhiyun 		shost_printk(KERN_ERR, target->scsi_host, PFX
2072*4882a593Smuzhiyun 			     "problems processing SRP_AER_REQ\n");
2073*4882a593Smuzhiyun }
2074*4882a593Smuzhiyun 
srp_recv_done(struct ib_cq * cq,struct ib_wc * wc)2075*4882a593Smuzhiyun static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2076*4882a593Smuzhiyun {
2077*4882a593Smuzhiyun 	struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
2078*4882a593Smuzhiyun 	struct srp_rdma_ch *ch = cq->cq_context;
2079*4882a593Smuzhiyun 	struct srp_target_port *target = ch->target;
2080*4882a593Smuzhiyun 	struct ib_device *dev = target->srp_host->srp_dev->dev;
2081*4882a593Smuzhiyun 	int res;
2082*4882a593Smuzhiyun 	u8 opcode;
2083*4882a593Smuzhiyun 
2084*4882a593Smuzhiyun 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
2085*4882a593Smuzhiyun 		srp_handle_qp_err(cq, wc, "RECV");
2086*4882a593Smuzhiyun 		return;
2087*4882a593Smuzhiyun 	}
2088*4882a593Smuzhiyun 
2089*4882a593Smuzhiyun 	ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
2090*4882a593Smuzhiyun 				   DMA_FROM_DEVICE);
2091*4882a593Smuzhiyun 
2092*4882a593Smuzhiyun 	opcode = *(u8 *) iu->buf;
2093*4882a593Smuzhiyun 
2094*4882a593Smuzhiyun 	if (0) {
2095*4882a593Smuzhiyun 		shost_printk(KERN_ERR, target->scsi_host,
2096*4882a593Smuzhiyun 			     PFX "recv completion, opcode 0x%02x\n", opcode);
2097*4882a593Smuzhiyun 		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
2098*4882a593Smuzhiyun 			       iu->buf, wc->byte_len, true);
2099*4882a593Smuzhiyun 	}
2100*4882a593Smuzhiyun 
2101*4882a593Smuzhiyun 	switch (opcode) {
2102*4882a593Smuzhiyun 	case SRP_RSP:
2103*4882a593Smuzhiyun 		srp_process_rsp(ch, iu->buf);
2104*4882a593Smuzhiyun 		break;
2105*4882a593Smuzhiyun 
2106*4882a593Smuzhiyun 	case SRP_CRED_REQ:
2107*4882a593Smuzhiyun 		srp_process_cred_req(ch, iu->buf);
2108*4882a593Smuzhiyun 		break;
2109*4882a593Smuzhiyun 
2110*4882a593Smuzhiyun 	case SRP_AER_REQ:
2111*4882a593Smuzhiyun 		srp_process_aer_req(ch, iu->buf);
2112*4882a593Smuzhiyun 		break;
2113*4882a593Smuzhiyun 
2114*4882a593Smuzhiyun 	case SRP_T_LOGOUT:
2115*4882a593Smuzhiyun 		/* XXX Handle target logout */
2116*4882a593Smuzhiyun 		shost_printk(KERN_WARNING, target->scsi_host,
2117*4882a593Smuzhiyun 			     PFX "Got target logout request\n");
2118*4882a593Smuzhiyun 		break;
2119*4882a593Smuzhiyun 
2120*4882a593Smuzhiyun 	default:
2121*4882a593Smuzhiyun 		shost_printk(KERN_WARNING, target->scsi_host,
2122*4882a593Smuzhiyun 			     PFX "Unhandled SRP opcode 0x%02x\n", opcode);
2123*4882a593Smuzhiyun 		break;
2124*4882a593Smuzhiyun 	}
2125*4882a593Smuzhiyun 
2126*4882a593Smuzhiyun 	ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
2127*4882a593Smuzhiyun 				      DMA_FROM_DEVICE);
2128*4882a593Smuzhiyun 
2129*4882a593Smuzhiyun 	res = srp_post_recv(ch, iu);
2130*4882a593Smuzhiyun 	if (res != 0)
2131*4882a593Smuzhiyun 		shost_printk(KERN_ERR, target->scsi_host,
2132*4882a593Smuzhiyun 			     PFX "Recv failed with error code %d\n", res);
2133*4882a593Smuzhiyun }
2134*4882a593Smuzhiyun 
2135*4882a593Smuzhiyun /**
2136*4882a593Smuzhiyun  * srp_tl_err_work() - handle a transport layer error
2137*4882a593Smuzhiyun  * @work: Work structure embedded in an SRP target port.
2138*4882a593Smuzhiyun  *
2139*4882a593Smuzhiyun  * Note: This function may get invoked before the rport has been created,
2140*4882a593Smuzhiyun  * hence the target->rport test.
2141*4882a593Smuzhiyun  */
srp_tl_err_work(struct work_struct * work)2142*4882a593Smuzhiyun static void srp_tl_err_work(struct work_struct *work)
2143*4882a593Smuzhiyun {
2144*4882a593Smuzhiyun 	struct srp_target_port *target;
2145*4882a593Smuzhiyun 
2146*4882a593Smuzhiyun 	target = container_of(work, struct srp_target_port, tl_err_work);
2147*4882a593Smuzhiyun 	if (target->rport)
2148*4882a593Smuzhiyun 		srp_start_tl_fail_timers(target->rport);
2149*4882a593Smuzhiyun }
2150*4882a593Smuzhiyun 
srp_handle_qp_err(struct ib_cq * cq,struct ib_wc * wc,const char * opname)2151*4882a593Smuzhiyun static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
2152*4882a593Smuzhiyun 		const char *opname)
2153*4882a593Smuzhiyun {
2154*4882a593Smuzhiyun 	struct srp_rdma_ch *ch = cq->cq_context;
2155*4882a593Smuzhiyun 	struct srp_target_port *target = ch->target;
2156*4882a593Smuzhiyun 
2157*4882a593Smuzhiyun 	if (ch->connected && !target->qp_in_error) {
2158*4882a593Smuzhiyun 		shost_printk(KERN_ERR, target->scsi_host,
2159*4882a593Smuzhiyun 			     PFX "failed %s status %s (%d) for CQE %p\n",
2160*4882a593Smuzhiyun 			     opname, ib_wc_status_msg(wc->status), wc->status,
2161*4882a593Smuzhiyun 			     wc->wr_cqe);
2162*4882a593Smuzhiyun 		queue_work(system_long_wq, &target->tl_err_work);
2163*4882a593Smuzhiyun 	}
2164*4882a593Smuzhiyun 	target->qp_in_error = true;
2165*4882a593Smuzhiyun }
2166*4882a593Smuzhiyun 
srp_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * scmnd)2167*4882a593Smuzhiyun static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
2168*4882a593Smuzhiyun {
2169*4882a593Smuzhiyun 	struct srp_target_port *target = host_to_target(shost);
2170*4882a593Smuzhiyun 	struct srp_rdma_ch *ch;
2171*4882a593Smuzhiyun 	struct srp_request *req;
2172*4882a593Smuzhiyun 	struct srp_iu *iu;
2173*4882a593Smuzhiyun 	struct srp_cmd *cmd;
2174*4882a593Smuzhiyun 	struct ib_device *dev;
2175*4882a593Smuzhiyun 	unsigned long flags;
2176*4882a593Smuzhiyun 	u32 tag;
2177*4882a593Smuzhiyun 	u16 idx;
2178*4882a593Smuzhiyun 	int len, ret;
2179*4882a593Smuzhiyun 
2180*4882a593Smuzhiyun 	scmnd->result = srp_chkready(target->rport);
2181*4882a593Smuzhiyun 	if (unlikely(scmnd->result))
2182*4882a593Smuzhiyun 		goto err;
2183*4882a593Smuzhiyun 
2184*4882a593Smuzhiyun 	WARN_ON_ONCE(scmnd->request->tag < 0);
2185*4882a593Smuzhiyun 	tag = blk_mq_unique_tag(scmnd->request);
2186*4882a593Smuzhiyun 	ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
2187*4882a593Smuzhiyun 	idx = blk_mq_unique_tag_to_tag(tag);
2188*4882a593Smuzhiyun 	WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2189*4882a593Smuzhiyun 		  dev_name(&shost->shost_gendev), tag, idx,
2190*4882a593Smuzhiyun 		  target->req_ring_size);
2191*4882a593Smuzhiyun 
2192*4882a593Smuzhiyun 	spin_lock_irqsave(&ch->lock, flags);
2193*4882a593Smuzhiyun 	iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2194*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ch->lock, flags);
2195*4882a593Smuzhiyun 
2196*4882a593Smuzhiyun 	if (!iu)
2197*4882a593Smuzhiyun 		goto err;
2198*4882a593Smuzhiyun 
2199*4882a593Smuzhiyun 	req = &ch->req_ring[idx];
2200*4882a593Smuzhiyun 	dev = target->srp_host->srp_dev->dev;
2201*4882a593Smuzhiyun 	ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_it_iu_len,
2202*4882a593Smuzhiyun 				   DMA_TO_DEVICE);
2203*4882a593Smuzhiyun 
2204*4882a593Smuzhiyun 	scmnd->host_scribble = (void *) req;
2205*4882a593Smuzhiyun 
2206*4882a593Smuzhiyun 	cmd = iu->buf;
2207*4882a593Smuzhiyun 	memset(cmd, 0, sizeof *cmd);
2208*4882a593Smuzhiyun 
2209*4882a593Smuzhiyun 	cmd->opcode = SRP_CMD;
2210*4882a593Smuzhiyun 	int_to_scsilun(scmnd->device->lun, &cmd->lun);
2211*4882a593Smuzhiyun 	cmd->tag    = tag;
2212*4882a593Smuzhiyun 	memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2213*4882a593Smuzhiyun 	if (unlikely(scmnd->cmd_len > sizeof(cmd->cdb))) {
2214*4882a593Smuzhiyun 		cmd->add_cdb_len = round_up(scmnd->cmd_len - sizeof(cmd->cdb),
2215*4882a593Smuzhiyun 					    4);
2216*4882a593Smuzhiyun 		if (WARN_ON_ONCE(cmd->add_cdb_len > SRP_MAX_ADD_CDB_LEN))
2217*4882a593Smuzhiyun 			goto err_iu;
2218*4882a593Smuzhiyun 	}
2219*4882a593Smuzhiyun 
2220*4882a593Smuzhiyun 	req->scmnd    = scmnd;
2221*4882a593Smuzhiyun 	req->cmd      = iu;
2222*4882a593Smuzhiyun 
2223*4882a593Smuzhiyun 	len = srp_map_data(scmnd, ch, req);
2224*4882a593Smuzhiyun 	if (len < 0) {
2225*4882a593Smuzhiyun 		shost_printk(KERN_ERR, target->scsi_host,
2226*4882a593Smuzhiyun 			     PFX "Failed to map data (%d)\n", len);
2227*4882a593Smuzhiyun 		/*
2228*4882a593Smuzhiyun 		 * If we ran out of memory descriptors (-ENOMEM) because an
2229*4882a593Smuzhiyun 		 * application is queuing many requests with more than
2230*4882a593Smuzhiyun 		 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2231*4882a593Smuzhiyun 		 * to reduce queue depth temporarily.
2232*4882a593Smuzhiyun 		 */
2233*4882a593Smuzhiyun 		scmnd->result = len == -ENOMEM ?
2234*4882a593Smuzhiyun 			DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
2235*4882a593Smuzhiyun 		goto err_iu;
2236*4882a593Smuzhiyun 	}
2237*4882a593Smuzhiyun 
2238*4882a593Smuzhiyun 	ib_dma_sync_single_for_device(dev, iu->dma, ch->max_it_iu_len,
2239*4882a593Smuzhiyun 				      DMA_TO_DEVICE);
2240*4882a593Smuzhiyun 
2241*4882a593Smuzhiyun 	if (srp_post_send(ch, iu, len)) {
2242*4882a593Smuzhiyun 		shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2243*4882a593Smuzhiyun 		scmnd->result = DID_ERROR << 16;
2244*4882a593Smuzhiyun 		goto err_unmap;
2245*4882a593Smuzhiyun 	}
2246*4882a593Smuzhiyun 
2247*4882a593Smuzhiyun 	return 0;
2248*4882a593Smuzhiyun 
2249*4882a593Smuzhiyun err_unmap:
2250*4882a593Smuzhiyun 	srp_unmap_data(scmnd, ch, req);
2251*4882a593Smuzhiyun 
2252*4882a593Smuzhiyun err_iu:
2253*4882a593Smuzhiyun 	srp_put_tx_iu(ch, iu, SRP_IU_CMD);
2254*4882a593Smuzhiyun 
2255*4882a593Smuzhiyun 	/*
2256*4882a593Smuzhiyun 	 * Avoid that the loops that iterate over the request ring can
2257*4882a593Smuzhiyun 	 * encounter a dangling SCSI command pointer.
2258*4882a593Smuzhiyun 	 */
2259*4882a593Smuzhiyun 	req->scmnd = NULL;
2260*4882a593Smuzhiyun 
2261*4882a593Smuzhiyun err:
2262*4882a593Smuzhiyun 	if (scmnd->result) {
2263*4882a593Smuzhiyun 		scmnd->scsi_done(scmnd);
2264*4882a593Smuzhiyun 		ret = 0;
2265*4882a593Smuzhiyun 	} else {
2266*4882a593Smuzhiyun 		ret = SCSI_MLQUEUE_HOST_BUSY;
2267*4882a593Smuzhiyun 	}
2268*4882a593Smuzhiyun 
2269*4882a593Smuzhiyun 	return ret;
2270*4882a593Smuzhiyun }
2271*4882a593Smuzhiyun 
2272*4882a593Smuzhiyun /*
2273*4882a593Smuzhiyun  * Note: the resources allocated in this function are freed in
2274*4882a593Smuzhiyun  * srp_free_ch_ib().
2275*4882a593Smuzhiyun  */
srp_alloc_iu_bufs(struct srp_rdma_ch * ch)2276*4882a593Smuzhiyun static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2277*4882a593Smuzhiyun {
2278*4882a593Smuzhiyun 	struct srp_target_port *target = ch->target;
2279*4882a593Smuzhiyun 	int i;
2280*4882a593Smuzhiyun 
2281*4882a593Smuzhiyun 	ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2282*4882a593Smuzhiyun 			      GFP_KERNEL);
2283*4882a593Smuzhiyun 	if (!ch->rx_ring)
2284*4882a593Smuzhiyun 		goto err_no_ring;
2285*4882a593Smuzhiyun 	ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2286*4882a593Smuzhiyun 			      GFP_KERNEL);
2287*4882a593Smuzhiyun 	if (!ch->tx_ring)
2288*4882a593Smuzhiyun 		goto err_no_ring;
2289*4882a593Smuzhiyun 
2290*4882a593Smuzhiyun 	for (i = 0; i < target->queue_size; ++i) {
2291*4882a593Smuzhiyun 		ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2292*4882a593Smuzhiyun 					      ch->max_ti_iu_len,
2293*4882a593Smuzhiyun 					      GFP_KERNEL, DMA_FROM_DEVICE);
2294*4882a593Smuzhiyun 		if (!ch->rx_ring[i])
2295*4882a593Smuzhiyun 			goto err;
2296*4882a593Smuzhiyun 	}
2297*4882a593Smuzhiyun 
2298*4882a593Smuzhiyun 	for (i = 0; i < target->queue_size; ++i) {
2299*4882a593Smuzhiyun 		ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2300*4882a593Smuzhiyun 					      ch->max_it_iu_len,
2301*4882a593Smuzhiyun 					      GFP_KERNEL, DMA_TO_DEVICE);
2302*4882a593Smuzhiyun 		if (!ch->tx_ring[i])
2303*4882a593Smuzhiyun 			goto err;
2304*4882a593Smuzhiyun 
2305*4882a593Smuzhiyun 		list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2306*4882a593Smuzhiyun 	}
2307*4882a593Smuzhiyun 
2308*4882a593Smuzhiyun 	return 0;
2309*4882a593Smuzhiyun 
2310*4882a593Smuzhiyun err:
2311*4882a593Smuzhiyun 	for (i = 0; i < target->queue_size; ++i) {
2312*4882a593Smuzhiyun 		srp_free_iu(target->srp_host, ch->rx_ring[i]);
2313*4882a593Smuzhiyun 		srp_free_iu(target->srp_host, ch->tx_ring[i]);
2314*4882a593Smuzhiyun 	}
2315*4882a593Smuzhiyun 
2316*4882a593Smuzhiyun 
2317*4882a593Smuzhiyun err_no_ring:
2318*4882a593Smuzhiyun 	kfree(ch->tx_ring);
2319*4882a593Smuzhiyun 	ch->tx_ring = NULL;
2320*4882a593Smuzhiyun 	kfree(ch->rx_ring);
2321*4882a593Smuzhiyun 	ch->rx_ring = NULL;
2322*4882a593Smuzhiyun 
2323*4882a593Smuzhiyun 	return -ENOMEM;
2324*4882a593Smuzhiyun }
2325*4882a593Smuzhiyun 
srp_compute_rq_tmo(struct ib_qp_attr * qp_attr,int attr_mask)2326*4882a593Smuzhiyun static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2327*4882a593Smuzhiyun {
2328*4882a593Smuzhiyun 	uint64_t T_tr_ns, max_compl_time_ms;
2329*4882a593Smuzhiyun 	uint32_t rq_tmo_jiffies;
2330*4882a593Smuzhiyun 
2331*4882a593Smuzhiyun 	/*
2332*4882a593Smuzhiyun 	 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2333*4882a593Smuzhiyun 	 * table 91), both the QP timeout and the retry count have to be set
2334*4882a593Smuzhiyun 	 * for RC QP's during the RTR to RTS transition.
2335*4882a593Smuzhiyun 	 */
2336*4882a593Smuzhiyun 	WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2337*4882a593Smuzhiyun 		     (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2338*4882a593Smuzhiyun 
2339*4882a593Smuzhiyun 	/*
2340*4882a593Smuzhiyun 	 * Set target->rq_tmo_jiffies to one second more than the largest time
2341*4882a593Smuzhiyun 	 * it can take before an error completion is generated. See also
2342*4882a593Smuzhiyun 	 * C9-140..142 in the IBTA spec for more information about how to
2343*4882a593Smuzhiyun 	 * convert the QP Local ACK Timeout value to nanoseconds.
2344*4882a593Smuzhiyun 	 */
2345*4882a593Smuzhiyun 	T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2346*4882a593Smuzhiyun 	max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2347*4882a593Smuzhiyun 	do_div(max_compl_time_ms, NSEC_PER_MSEC);
2348*4882a593Smuzhiyun 	rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2349*4882a593Smuzhiyun 
2350*4882a593Smuzhiyun 	return rq_tmo_jiffies;
2351*4882a593Smuzhiyun }
2352*4882a593Smuzhiyun 
srp_cm_rep_handler(struct ib_cm_id * cm_id,const struct srp_login_rsp * lrsp,struct srp_rdma_ch * ch)2353*4882a593Smuzhiyun static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2354*4882a593Smuzhiyun 			       const struct srp_login_rsp *lrsp,
2355*4882a593Smuzhiyun 			       struct srp_rdma_ch *ch)
2356*4882a593Smuzhiyun {
2357*4882a593Smuzhiyun 	struct srp_target_port *target = ch->target;
2358*4882a593Smuzhiyun 	struct ib_qp_attr *qp_attr = NULL;
2359*4882a593Smuzhiyun 	int attr_mask = 0;
2360*4882a593Smuzhiyun 	int ret = 0;
2361*4882a593Smuzhiyun 	int i;
2362*4882a593Smuzhiyun 
2363*4882a593Smuzhiyun 	if (lrsp->opcode == SRP_LOGIN_RSP) {
2364*4882a593Smuzhiyun 		ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2365*4882a593Smuzhiyun 		ch->req_lim       = be32_to_cpu(lrsp->req_lim_delta);
2366*4882a593Smuzhiyun 		ch->use_imm_data  = srp_use_imm_data &&
2367*4882a593Smuzhiyun 			(lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP);
2368*4882a593Smuzhiyun 		ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
2369*4882a593Smuzhiyun 						      ch->use_imm_data,
2370*4882a593Smuzhiyun 						      target->max_it_iu_size);
2371*4882a593Smuzhiyun 		WARN_ON_ONCE(ch->max_it_iu_len >
2372*4882a593Smuzhiyun 			     be32_to_cpu(lrsp->max_it_iu_len));
2373*4882a593Smuzhiyun 
2374*4882a593Smuzhiyun 		if (ch->use_imm_data)
2375*4882a593Smuzhiyun 			shost_printk(KERN_DEBUG, target->scsi_host,
2376*4882a593Smuzhiyun 				     PFX "using immediate data\n");
2377*4882a593Smuzhiyun 
2378*4882a593Smuzhiyun 		/*
2379*4882a593Smuzhiyun 		 * Reserve credits for task management so we don't
2380*4882a593Smuzhiyun 		 * bounce requests back to the SCSI mid-layer.
2381*4882a593Smuzhiyun 		 */
2382*4882a593Smuzhiyun 		target->scsi_host->can_queue
2383*4882a593Smuzhiyun 			= min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2384*4882a593Smuzhiyun 			      target->scsi_host->can_queue);
2385*4882a593Smuzhiyun 		target->scsi_host->cmd_per_lun
2386*4882a593Smuzhiyun 			= min_t(int, target->scsi_host->can_queue,
2387*4882a593Smuzhiyun 				target->scsi_host->cmd_per_lun);
2388*4882a593Smuzhiyun 	} else {
2389*4882a593Smuzhiyun 		shost_printk(KERN_WARNING, target->scsi_host,
2390*4882a593Smuzhiyun 			     PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2391*4882a593Smuzhiyun 		ret = -ECONNRESET;
2392*4882a593Smuzhiyun 		goto error;
2393*4882a593Smuzhiyun 	}
2394*4882a593Smuzhiyun 
2395*4882a593Smuzhiyun 	if (!ch->rx_ring) {
2396*4882a593Smuzhiyun 		ret = srp_alloc_iu_bufs(ch);
2397*4882a593Smuzhiyun 		if (ret)
2398*4882a593Smuzhiyun 			goto error;
2399*4882a593Smuzhiyun 	}
2400*4882a593Smuzhiyun 
2401*4882a593Smuzhiyun 	for (i = 0; i < target->queue_size; i++) {
2402*4882a593Smuzhiyun 		struct srp_iu *iu = ch->rx_ring[i];
2403*4882a593Smuzhiyun 
2404*4882a593Smuzhiyun 		ret = srp_post_recv(ch, iu);
2405*4882a593Smuzhiyun 		if (ret)
2406*4882a593Smuzhiyun 			goto error;
2407*4882a593Smuzhiyun 	}
2408*4882a593Smuzhiyun 
2409*4882a593Smuzhiyun 	if (!target->using_rdma_cm) {
2410*4882a593Smuzhiyun 		ret = -ENOMEM;
2411*4882a593Smuzhiyun 		qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL);
2412*4882a593Smuzhiyun 		if (!qp_attr)
2413*4882a593Smuzhiyun 			goto error;
2414*4882a593Smuzhiyun 
2415*4882a593Smuzhiyun 		qp_attr->qp_state = IB_QPS_RTR;
2416*4882a593Smuzhiyun 		ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2417*4882a593Smuzhiyun 		if (ret)
2418*4882a593Smuzhiyun 			goto error_free;
2419*4882a593Smuzhiyun 
2420*4882a593Smuzhiyun 		ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2421*4882a593Smuzhiyun 		if (ret)
2422*4882a593Smuzhiyun 			goto error_free;
2423*4882a593Smuzhiyun 
2424*4882a593Smuzhiyun 		qp_attr->qp_state = IB_QPS_RTS;
2425*4882a593Smuzhiyun 		ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2426*4882a593Smuzhiyun 		if (ret)
2427*4882a593Smuzhiyun 			goto error_free;
2428*4882a593Smuzhiyun 
2429*4882a593Smuzhiyun 		target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2430*4882a593Smuzhiyun 
2431*4882a593Smuzhiyun 		ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2432*4882a593Smuzhiyun 		if (ret)
2433*4882a593Smuzhiyun 			goto error_free;
2434*4882a593Smuzhiyun 
2435*4882a593Smuzhiyun 		ret = ib_send_cm_rtu(cm_id, NULL, 0);
2436*4882a593Smuzhiyun 	}
2437*4882a593Smuzhiyun 
2438*4882a593Smuzhiyun error_free:
2439*4882a593Smuzhiyun 	kfree(qp_attr);
2440*4882a593Smuzhiyun 
2441*4882a593Smuzhiyun error:
2442*4882a593Smuzhiyun 	ch->status = ret;
2443*4882a593Smuzhiyun }
2444*4882a593Smuzhiyun 
srp_ib_cm_rej_handler(struct ib_cm_id * cm_id,const struct ib_cm_event * event,struct srp_rdma_ch * ch)2445*4882a593Smuzhiyun static void srp_ib_cm_rej_handler(struct ib_cm_id *cm_id,
2446*4882a593Smuzhiyun 				  const struct ib_cm_event *event,
2447*4882a593Smuzhiyun 				  struct srp_rdma_ch *ch)
2448*4882a593Smuzhiyun {
2449*4882a593Smuzhiyun 	struct srp_target_port *target = ch->target;
2450*4882a593Smuzhiyun 	struct Scsi_Host *shost = target->scsi_host;
2451*4882a593Smuzhiyun 	struct ib_class_port_info *cpi;
2452*4882a593Smuzhiyun 	int opcode;
2453*4882a593Smuzhiyun 	u16 dlid;
2454*4882a593Smuzhiyun 
2455*4882a593Smuzhiyun 	switch (event->param.rej_rcvd.reason) {
2456*4882a593Smuzhiyun 	case IB_CM_REJ_PORT_CM_REDIRECT:
2457*4882a593Smuzhiyun 		cpi = event->param.rej_rcvd.ari;
2458*4882a593Smuzhiyun 		dlid = be16_to_cpu(cpi->redirect_lid);
2459*4882a593Smuzhiyun 		sa_path_set_dlid(&ch->ib_cm.path, dlid);
2460*4882a593Smuzhiyun 		ch->ib_cm.path.pkey = cpi->redirect_pkey;
2461*4882a593Smuzhiyun 		cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2462*4882a593Smuzhiyun 		memcpy(ch->ib_cm.path.dgid.raw, cpi->redirect_gid, 16);
2463*4882a593Smuzhiyun 
2464*4882a593Smuzhiyun 		ch->status = dlid ? SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2465*4882a593Smuzhiyun 		break;
2466*4882a593Smuzhiyun 
2467*4882a593Smuzhiyun 	case IB_CM_REJ_PORT_REDIRECT:
2468*4882a593Smuzhiyun 		if (srp_target_is_topspin(target)) {
2469*4882a593Smuzhiyun 			union ib_gid *dgid = &ch->ib_cm.path.dgid;
2470*4882a593Smuzhiyun 
2471*4882a593Smuzhiyun 			/*
2472*4882a593Smuzhiyun 			 * Topspin/Cisco SRP gateways incorrectly send
2473*4882a593Smuzhiyun 			 * reject reason code 25 when they mean 24
2474*4882a593Smuzhiyun 			 * (port redirect).
2475*4882a593Smuzhiyun 			 */
2476*4882a593Smuzhiyun 			memcpy(dgid->raw, event->param.rej_rcvd.ari, 16);
2477*4882a593Smuzhiyun 
2478*4882a593Smuzhiyun 			shost_printk(KERN_DEBUG, shost,
2479*4882a593Smuzhiyun 				     PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2480*4882a593Smuzhiyun 				     be64_to_cpu(dgid->global.subnet_prefix),
2481*4882a593Smuzhiyun 				     be64_to_cpu(dgid->global.interface_id));
2482*4882a593Smuzhiyun 
2483*4882a593Smuzhiyun 			ch->status = SRP_PORT_REDIRECT;
2484*4882a593Smuzhiyun 		} else {
2485*4882a593Smuzhiyun 			shost_printk(KERN_WARNING, shost,
2486*4882a593Smuzhiyun 				     "  REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2487*4882a593Smuzhiyun 			ch->status = -ECONNRESET;
2488*4882a593Smuzhiyun 		}
2489*4882a593Smuzhiyun 		break;
2490*4882a593Smuzhiyun 
2491*4882a593Smuzhiyun 	case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2492*4882a593Smuzhiyun 		shost_printk(KERN_WARNING, shost,
2493*4882a593Smuzhiyun 			    "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2494*4882a593Smuzhiyun 		ch->status = -ECONNRESET;
2495*4882a593Smuzhiyun 		break;
2496*4882a593Smuzhiyun 
2497*4882a593Smuzhiyun 	case IB_CM_REJ_CONSUMER_DEFINED:
2498*4882a593Smuzhiyun 		opcode = *(u8 *) event->private_data;
2499*4882a593Smuzhiyun 		if (opcode == SRP_LOGIN_REJ) {
2500*4882a593Smuzhiyun 			struct srp_login_rej *rej = event->private_data;
2501*4882a593Smuzhiyun 			u32 reason = be32_to_cpu(rej->reason);
2502*4882a593Smuzhiyun 
2503*4882a593Smuzhiyun 			if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2504*4882a593Smuzhiyun 				shost_printk(KERN_WARNING, shost,
2505*4882a593Smuzhiyun 					     PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2506*4882a593Smuzhiyun 			else
2507*4882a593Smuzhiyun 				shost_printk(KERN_WARNING, shost, PFX
2508*4882a593Smuzhiyun 					     "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2509*4882a593Smuzhiyun 					     target->sgid.raw,
2510*4882a593Smuzhiyun 					     target->ib_cm.orig_dgid.raw,
2511*4882a593Smuzhiyun 					     reason);
2512*4882a593Smuzhiyun 		} else
2513*4882a593Smuzhiyun 			shost_printk(KERN_WARNING, shost,
2514*4882a593Smuzhiyun 				     "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2515*4882a593Smuzhiyun 				     " opcode 0x%02x\n", opcode);
2516*4882a593Smuzhiyun 		ch->status = -ECONNRESET;
2517*4882a593Smuzhiyun 		break;
2518*4882a593Smuzhiyun 
2519*4882a593Smuzhiyun 	case IB_CM_REJ_STALE_CONN:
2520*4882a593Smuzhiyun 		shost_printk(KERN_WARNING, shost, "  REJ reason: stale connection\n");
2521*4882a593Smuzhiyun 		ch->status = SRP_STALE_CONN;
2522*4882a593Smuzhiyun 		break;
2523*4882a593Smuzhiyun 
2524*4882a593Smuzhiyun 	default:
2525*4882a593Smuzhiyun 		shost_printk(KERN_WARNING, shost, "  REJ reason 0x%x\n",
2526*4882a593Smuzhiyun 			     event->param.rej_rcvd.reason);
2527*4882a593Smuzhiyun 		ch->status = -ECONNRESET;
2528*4882a593Smuzhiyun 	}
2529*4882a593Smuzhiyun }
2530*4882a593Smuzhiyun 
srp_ib_cm_handler(struct ib_cm_id * cm_id,const struct ib_cm_event * event)2531*4882a593Smuzhiyun static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
2532*4882a593Smuzhiyun 			     const struct ib_cm_event *event)
2533*4882a593Smuzhiyun {
2534*4882a593Smuzhiyun 	struct srp_rdma_ch *ch = cm_id->context;
2535*4882a593Smuzhiyun 	struct srp_target_port *target = ch->target;
2536*4882a593Smuzhiyun 	int comp = 0;
2537*4882a593Smuzhiyun 
2538*4882a593Smuzhiyun 	switch (event->event) {
2539*4882a593Smuzhiyun 	case IB_CM_REQ_ERROR:
2540*4882a593Smuzhiyun 		shost_printk(KERN_DEBUG, target->scsi_host,
2541*4882a593Smuzhiyun 			     PFX "Sending CM REQ failed\n");
2542*4882a593Smuzhiyun 		comp = 1;
2543*4882a593Smuzhiyun 		ch->status = -ECONNRESET;
2544*4882a593Smuzhiyun 		break;
2545*4882a593Smuzhiyun 
2546*4882a593Smuzhiyun 	case IB_CM_REP_RECEIVED:
2547*4882a593Smuzhiyun 		comp = 1;
2548*4882a593Smuzhiyun 		srp_cm_rep_handler(cm_id, event->private_data, ch);
2549*4882a593Smuzhiyun 		break;
2550*4882a593Smuzhiyun 
2551*4882a593Smuzhiyun 	case IB_CM_REJ_RECEIVED:
2552*4882a593Smuzhiyun 		shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2553*4882a593Smuzhiyun 		comp = 1;
2554*4882a593Smuzhiyun 
2555*4882a593Smuzhiyun 		srp_ib_cm_rej_handler(cm_id, event, ch);
2556*4882a593Smuzhiyun 		break;
2557*4882a593Smuzhiyun 
2558*4882a593Smuzhiyun 	case IB_CM_DREQ_RECEIVED:
2559*4882a593Smuzhiyun 		shost_printk(KERN_WARNING, target->scsi_host,
2560*4882a593Smuzhiyun 			     PFX "DREQ received - connection closed\n");
2561*4882a593Smuzhiyun 		ch->connected = false;
2562*4882a593Smuzhiyun 		if (ib_send_cm_drep(cm_id, NULL, 0))
2563*4882a593Smuzhiyun 			shost_printk(KERN_ERR, target->scsi_host,
2564*4882a593Smuzhiyun 				     PFX "Sending CM DREP failed\n");
2565*4882a593Smuzhiyun 		queue_work(system_long_wq, &target->tl_err_work);
2566*4882a593Smuzhiyun 		break;
2567*4882a593Smuzhiyun 
2568*4882a593Smuzhiyun 	case IB_CM_TIMEWAIT_EXIT:
2569*4882a593Smuzhiyun 		shost_printk(KERN_ERR, target->scsi_host,
2570*4882a593Smuzhiyun 			     PFX "connection closed\n");
2571*4882a593Smuzhiyun 		comp = 1;
2572*4882a593Smuzhiyun 
2573*4882a593Smuzhiyun 		ch->status = 0;
2574*4882a593Smuzhiyun 		break;
2575*4882a593Smuzhiyun 
2576*4882a593Smuzhiyun 	case IB_CM_MRA_RECEIVED:
2577*4882a593Smuzhiyun 	case IB_CM_DREQ_ERROR:
2578*4882a593Smuzhiyun 	case IB_CM_DREP_RECEIVED:
2579*4882a593Smuzhiyun 		break;
2580*4882a593Smuzhiyun 
2581*4882a593Smuzhiyun 	default:
2582*4882a593Smuzhiyun 		shost_printk(KERN_WARNING, target->scsi_host,
2583*4882a593Smuzhiyun 			     PFX "Unhandled CM event %d\n", event->event);
2584*4882a593Smuzhiyun 		break;
2585*4882a593Smuzhiyun 	}
2586*4882a593Smuzhiyun 
2587*4882a593Smuzhiyun 	if (comp)
2588*4882a593Smuzhiyun 		complete(&ch->done);
2589*4882a593Smuzhiyun 
2590*4882a593Smuzhiyun 	return 0;
2591*4882a593Smuzhiyun }
2592*4882a593Smuzhiyun 
srp_rdma_cm_rej_handler(struct srp_rdma_ch * ch,struct rdma_cm_event * event)2593*4882a593Smuzhiyun static void srp_rdma_cm_rej_handler(struct srp_rdma_ch *ch,
2594*4882a593Smuzhiyun 				    struct rdma_cm_event *event)
2595*4882a593Smuzhiyun {
2596*4882a593Smuzhiyun 	struct srp_target_port *target = ch->target;
2597*4882a593Smuzhiyun 	struct Scsi_Host *shost = target->scsi_host;
2598*4882a593Smuzhiyun 	int opcode;
2599*4882a593Smuzhiyun 
2600*4882a593Smuzhiyun 	switch (event->status) {
2601*4882a593Smuzhiyun 	case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2602*4882a593Smuzhiyun 		shost_printk(KERN_WARNING, shost,
2603*4882a593Smuzhiyun 			    "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2604*4882a593Smuzhiyun 		ch->status = -ECONNRESET;
2605*4882a593Smuzhiyun 		break;
2606*4882a593Smuzhiyun 
2607*4882a593Smuzhiyun 	case IB_CM_REJ_CONSUMER_DEFINED:
2608*4882a593Smuzhiyun 		opcode = *(u8 *) event->param.conn.private_data;
2609*4882a593Smuzhiyun 		if (opcode == SRP_LOGIN_REJ) {
2610*4882a593Smuzhiyun 			struct srp_login_rej *rej =
2611*4882a593Smuzhiyun 				(struct srp_login_rej *)
2612*4882a593Smuzhiyun 				event->param.conn.private_data;
2613*4882a593Smuzhiyun 			u32 reason = be32_to_cpu(rej->reason);
2614*4882a593Smuzhiyun 
2615*4882a593Smuzhiyun 			if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2616*4882a593Smuzhiyun 				shost_printk(KERN_WARNING, shost,
2617*4882a593Smuzhiyun 					     PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2618*4882a593Smuzhiyun 			else
2619*4882a593Smuzhiyun 				shost_printk(KERN_WARNING, shost,
2620*4882a593Smuzhiyun 					    PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
2621*4882a593Smuzhiyun 		} else {
2622*4882a593Smuzhiyun 			shost_printk(KERN_WARNING, shost,
2623*4882a593Smuzhiyun 				     "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED, opcode 0x%02x\n",
2624*4882a593Smuzhiyun 				     opcode);
2625*4882a593Smuzhiyun 		}
2626*4882a593Smuzhiyun 		ch->status = -ECONNRESET;
2627*4882a593Smuzhiyun 		break;
2628*4882a593Smuzhiyun 
2629*4882a593Smuzhiyun 	case IB_CM_REJ_STALE_CONN:
2630*4882a593Smuzhiyun 		shost_printk(KERN_WARNING, shost,
2631*4882a593Smuzhiyun 			     "  REJ reason: stale connection\n");
2632*4882a593Smuzhiyun 		ch->status = SRP_STALE_CONN;
2633*4882a593Smuzhiyun 		break;
2634*4882a593Smuzhiyun 
2635*4882a593Smuzhiyun 	default:
2636*4882a593Smuzhiyun 		shost_printk(KERN_WARNING, shost, "  REJ reason 0x%x\n",
2637*4882a593Smuzhiyun 			     event->status);
2638*4882a593Smuzhiyun 		ch->status = -ECONNRESET;
2639*4882a593Smuzhiyun 		break;
2640*4882a593Smuzhiyun 	}
2641*4882a593Smuzhiyun }
2642*4882a593Smuzhiyun 
srp_rdma_cm_handler(struct rdma_cm_id * cm_id,struct rdma_cm_event * event)2643*4882a593Smuzhiyun static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
2644*4882a593Smuzhiyun 			       struct rdma_cm_event *event)
2645*4882a593Smuzhiyun {
2646*4882a593Smuzhiyun 	struct srp_rdma_ch *ch = cm_id->context;
2647*4882a593Smuzhiyun 	struct srp_target_port *target = ch->target;
2648*4882a593Smuzhiyun 	int comp = 0;
2649*4882a593Smuzhiyun 
2650*4882a593Smuzhiyun 	switch (event->event) {
2651*4882a593Smuzhiyun 	case RDMA_CM_EVENT_ADDR_RESOLVED:
2652*4882a593Smuzhiyun 		ch->status = 0;
2653*4882a593Smuzhiyun 		comp = 1;
2654*4882a593Smuzhiyun 		break;
2655*4882a593Smuzhiyun 
2656*4882a593Smuzhiyun 	case RDMA_CM_EVENT_ADDR_ERROR:
2657*4882a593Smuzhiyun 		ch->status = -ENXIO;
2658*4882a593Smuzhiyun 		comp = 1;
2659*4882a593Smuzhiyun 		break;
2660*4882a593Smuzhiyun 
2661*4882a593Smuzhiyun 	case RDMA_CM_EVENT_ROUTE_RESOLVED:
2662*4882a593Smuzhiyun 		ch->status = 0;
2663*4882a593Smuzhiyun 		comp = 1;
2664*4882a593Smuzhiyun 		break;
2665*4882a593Smuzhiyun 
2666*4882a593Smuzhiyun 	case RDMA_CM_EVENT_ROUTE_ERROR:
2667*4882a593Smuzhiyun 	case RDMA_CM_EVENT_UNREACHABLE:
2668*4882a593Smuzhiyun 		ch->status = -EHOSTUNREACH;
2669*4882a593Smuzhiyun 		comp = 1;
2670*4882a593Smuzhiyun 		break;
2671*4882a593Smuzhiyun 
2672*4882a593Smuzhiyun 	case RDMA_CM_EVENT_CONNECT_ERROR:
2673*4882a593Smuzhiyun 		shost_printk(KERN_DEBUG, target->scsi_host,
2674*4882a593Smuzhiyun 			     PFX "Sending CM REQ failed\n");
2675*4882a593Smuzhiyun 		comp = 1;
2676*4882a593Smuzhiyun 		ch->status = -ECONNRESET;
2677*4882a593Smuzhiyun 		break;
2678*4882a593Smuzhiyun 
2679*4882a593Smuzhiyun 	case RDMA_CM_EVENT_ESTABLISHED:
2680*4882a593Smuzhiyun 		comp = 1;
2681*4882a593Smuzhiyun 		srp_cm_rep_handler(NULL, event->param.conn.private_data, ch);
2682*4882a593Smuzhiyun 		break;
2683*4882a593Smuzhiyun 
2684*4882a593Smuzhiyun 	case RDMA_CM_EVENT_REJECTED:
2685*4882a593Smuzhiyun 		shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2686*4882a593Smuzhiyun 		comp = 1;
2687*4882a593Smuzhiyun 
2688*4882a593Smuzhiyun 		srp_rdma_cm_rej_handler(ch, event);
2689*4882a593Smuzhiyun 		break;
2690*4882a593Smuzhiyun 
2691*4882a593Smuzhiyun 	case RDMA_CM_EVENT_DISCONNECTED:
2692*4882a593Smuzhiyun 		if (ch->connected) {
2693*4882a593Smuzhiyun 			shost_printk(KERN_WARNING, target->scsi_host,
2694*4882a593Smuzhiyun 				     PFX "received DREQ\n");
2695*4882a593Smuzhiyun 			rdma_disconnect(ch->rdma_cm.cm_id);
2696*4882a593Smuzhiyun 			comp = 1;
2697*4882a593Smuzhiyun 			ch->status = 0;
2698*4882a593Smuzhiyun 			queue_work(system_long_wq, &target->tl_err_work);
2699*4882a593Smuzhiyun 		}
2700*4882a593Smuzhiyun 		break;
2701*4882a593Smuzhiyun 
2702*4882a593Smuzhiyun 	case RDMA_CM_EVENT_TIMEWAIT_EXIT:
2703*4882a593Smuzhiyun 		shost_printk(KERN_ERR, target->scsi_host,
2704*4882a593Smuzhiyun 			     PFX "connection closed\n");
2705*4882a593Smuzhiyun 
2706*4882a593Smuzhiyun 		comp = 1;
2707*4882a593Smuzhiyun 		ch->status = 0;
2708*4882a593Smuzhiyun 		break;
2709*4882a593Smuzhiyun 
2710*4882a593Smuzhiyun 	default:
2711*4882a593Smuzhiyun 		shost_printk(KERN_WARNING, target->scsi_host,
2712*4882a593Smuzhiyun 			     PFX "Unhandled CM event %d\n", event->event);
2713*4882a593Smuzhiyun 		break;
2714*4882a593Smuzhiyun 	}
2715*4882a593Smuzhiyun 
2716*4882a593Smuzhiyun 	if (comp)
2717*4882a593Smuzhiyun 		complete(&ch->done);
2718*4882a593Smuzhiyun 
2719*4882a593Smuzhiyun 	return 0;
2720*4882a593Smuzhiyun }
2721*4882a593Smuzhiyun 
2722*4882a593Smuzhiyun /**
2723*4882a593Smuzhiyun  * srp_change_queue_depth - setting device queue depth
2724*4882a593Smuzhiyun  * @sdev: scsi device struct
2725*4882a593Smuzhiyun  * @qdepth: requested queue depth
2726*4882a593Smuzhiyun  *
2727*4882a593Smuzhiyun  * Returns queue depth.
2728*4882a593Smuzhiyun  */
2729*4882a593Smuzhiyun static int
srp_change_queue_depth(struct scsi_device * sdev,int qdepth)2730*4882a593Smuzhiyun srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
2731*4882a593Smuzhiyun {
2732*4882a593Smuzhiyun 	if (!sdev->tagged_supported)
2733*4882a593Smuzhiyun 		qdepth = 1;
2734*4882a593Smuzhiyun 	return scsi_change_queue_depth(sdev, qdepth);
2735*4882a593Smuzhiyun }
2736*4882a593Smuzhiyun 
srp_send_tsk_mgmt(struct srp_rdma_ch * ch,u64 req_tag,u64 lun,u8 func,u8 * status)2737*4882a593Smuzhiyun static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2738*4882a593Smuzhiyun 			     u8 func, u8 *status)
2739*4882a593Smuzhiyun {
2740*4882a593Smuzhiyun 	struct srp_target_port *target = ch->target;
2741*4882a593Smuzhiyun 	struct srp_rport *rport = target->rport;
2742*4882a593Smuzhiyun 	struct ib_device *dev = target->srp_host->srp_dev->dev;
2743*4882a593Smuzhiyun 	struct srp_iu *iu;
2744*4882a593Smuzhiyun 	struct srp_tsk_mgmt *tsk_mgmt;
2745*4882a593Smuzhiyun 	int res;
2746*4882a593Smuzhiyun 
2747*4882a593Smuzhiyun 	if (!ch->connected || target->qp_in_error)
2748*4882a593Smuzhiyun 		return -1;
2749*4882a593Smuzhiyun 
2750*4882a593Smuzhiyun 	/*
2751*4882a593Smuzhiyun 	 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2752*4882a593Smuzhiyun 	 * invoked while a task management function is being sent.
2753*4882a593Smuzhiyun 	 */
2754*4882a593Smuzhiyun 	mutex_lock(&rport->mutex);
2755*4882a593Smuzhiyun 	spin_lock_irq(&ch->lock);
2756*4882a593Smuzhiyun 	iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2757*4882a593Smuzhiyun 	spin_unlock_irq(&ch->lock);
2758*4882a593Smuzhiyun 
2759*4882a593Smuzhiyun 	if (!iu) {
2760*4882a593Smuzhiyun 		mutex_unlock(&rport->mutex);
2761*4882a593Smuzhiyun 
2762*4882a593Smuzhiyun 		return -1;
2763*4882a593Smuzhiyun 	}
2764*4882a593Smuzhiyun 
2765*4882a593Smuzhiyun 	iu->num_sge = 1;
2766*4882a593Smuzhiyun 
2767*4882a593Smuzhiyun 	ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2768*4882a593Smuzhiyun 				   DMA_TO_DEVICE);
2769*4882a593Smuzhiyun 	tsk_mgmt = iu->buf;
2770*4882a593Smuzhiyun 	memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2771*4882a593Smuzhiyun 
2772*4882a593Smuzhiyun 	tsk_mgmt->opcode 	= SRP_TSK_MGMT;
2773*4882a593Smuzhiyun 	int_to_scsilun(lun, &tsk_mgmt->lun);
2774*4882a593Smuzhiyun 	tsk_mgmt->tsk_mgmt_func = func;
2775*4882a593Smuzhiyun 	tsk_mgmt->task_tag	= req_tag;
2776*4882a593Smuzhiyun 
2777*4882a593Smuzhiyun 	spin_lock_irq(&ch->lock);
2778*4882a593Smuzhiyun 	ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT;
2779*4882a593Smuzhiyun 	tsk_mgmt->tag = ch->tsk_mgmt_tag;
2780*4882a593Smuzhiyun 	spin_unlock_irq(&ch->lock);
2781*4882a593Smuzhiyun 
2782*4882a593Smuzhiyun 	init_completion(&ch->tsk_mgmt_done);
2783*4882a593Smuzhiyun 
2784*4882a593Smuzhiyun 	ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2785*4882a593Smuzhiyun 				      DMA_TO_DEVICE);
2786*4882a593Smuzhiyun 	if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2787*4882a593Smuzhiyun 		srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2788*4882a593Smuzhiyun 		mutex_unlock(&rport->mutex);
2789*4882a593Smuzhiyun 
2790*4882a593Smuzhiyun 		return -1;
2791*4882a593Smuzhiyun 	}
2792*4882a593Smuzhiyun 	res = wait_for_completion_timeout(&ch->tsk_mgmt_done,
2793*4882a593Smuzhiyun 					msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS));
2794*4882a593Smuzhiyun 	if (res > 0 && status)
2795*4882a593Smuzhiyun 		*status = ch->tsk_mgmt_status;
2796*4882a593Smuzhiyun 	mutex_unlock(&rport->mutex);
2797*4882a593Smuzhiyun 
2798*4882a593Smuzhiyun 	WARN_ON_ONCE(res < 0);
2799*4882a593Smuzhiyun 
2800*4882a593Smuzhiyun 	return res > 0 ? 0 : -1;
2801*4882a593Smuzhiyun }
2802*4882a593Smuzhiyun 
srp_abort(struct scsi_cmnd * scmnd)2803*4882a593Smuzhiyun static int srp_abort(struct scsi_cmnd *scmnd)
2804*4882a593Smuzhiyun {
2805*4882a593Smuzhiyun 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2806*4882a593Smuzhiyun 	struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
2807*4882a593Smuzhiyun 	u32 tag;
2808*4882a593Smuzhiyun 	u16 ch_idx;
2809*4882a593Smuzhiyun 	struct srp_rdma_ch *ch;
2810*4882a593Smuzhiyun 	int ret;
2811*4882a593Smuzhiyun 
2812*4882a593Smuzhiyun 	shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2813*4882a593Smuzhiyun 
2814*4882a593Smuzhiyun 	if (!req)
2815*4882a593Smuzhiyun 		return SUCCESS;
2816*4882a593Smuzhiyun 	tag = blk_mq_unique_tag(scmnd->request);
2817*4882a593Smuzhiyun 	ch_idx = blk_mq_unique_tag_to_hwq(tag);
2818*4882a593Smuzhiyun 	if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2819*4882a593Smuzhiyun 		return SUCCESS;
2820*4882a593Smuzhiyun 	ch = &target->ch[ch_idx];
2821*4882a593Smuzhiyun 	if (!srp_claim_req(ch, req, NULL, scmnd))
2822*4882a593Smuzhiyun 		return SUCCESS;
2823*4882a593Smuzhiyun 	shost_printk(KERN_ERR, target->scsi_host,
2824*4882a593Smuzhiyun 		     "Sending SRP abort for tag %#x\n", tag);
2825*4882a593Smuzhiyun 	if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
2826*4882a593Smuzhiyun 			      SRP_TSK_ABORT_TASK, NULL) == 0)
2827*4882a593Smuzhiyun 		ret = SUCCESS;
2828*4882a593Smuzhiyun 	else if (target->rport->state == SRP_RPORT_LOST)
2829*4882a593Smuzhiyun 		ret = FAST_IO_FAIL;
2830*4882a593Smuzhiyun 	else
2831*4882a593Smuzhiyun 		ret = FAILED;
2832*4882a593Smuzhiyun 	if (ret == SUCCESS) {
2833*4882a593Smuzhiyun 		srp_free_req(ch, req, scmnd, 0);
2834*4882a593Smuzhiyun 		scmnd->result = DID_ABORT << 16;
2835*4882a593Smuzhiyun 		scmnd->scsi_done(scmnd);
2836*4882a593Smuzhiyun 	}
2837*4882a593Smuzhiyun 
2838*4882a593Smuzhiyun 	return ret;
2839*4882a593Smuzhiyun }
2840*4882a593Smuzhiyun 
srp_reset_device(struct scsi_cmnd * scmnd)2841*4882a593Smuzhiyun static int srp_reset_device(struct scsi_cmnd *scmnd)
2842*4882a593Smuzhiyun {
2843*4882a593Smuzhiyun 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2844*4882a593Smuzhiyun 	struct srp_rdma_ch *ch;
2845*4882a593Smuzhiyun 	u8 status;
2846*4882a593Smuzhiyun 
2847*4882a593Smuzhiyun 	shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2848*4882a593Smuzhiyun 
2849*4882a593Smuzhiyun 	ch = &target->ch[0];
2850*4882a593Smuzhiyun 	if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2851*4882a593Smuzhiyun 			      SRP_TSK_LUN_RESET, &status))
2852*4882a593Smuzhiyun 		return FAILED;
2853*4882a593Smuzhiyun 	if (status)
2854*4882a593Smuzhiyun 		return FAILED;
2855*4882a593Smuzhiyun 
2856*4882a593Smuzhiyun 	return SUCCESS;
2857*4882a593Smuzhiyun }
2858*4882a593Smuzhiyun 
srp_reset_host(struct scsi_cmnd * scmnd)2859*4882a593Smuzhiyun static int srp_reset_host(struct scsi_cmnd *scmnd)
2860*4882a593Smuzhiyun {
2861*4882a593Smuzhiyun 	struct srp_target_port *target = host_to_target(scmnd->device->host);
2862*4882a593Smuzhiyun 
2863*4882a593Smuzhiyun 	shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2864*4882a593Smuzhiyun 
2865*4882a593Smuzhiyun 	return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2866*4882a593Smuzhiyun }
2867*4882a593Smuzhiyun 
srp_target_alloc(struct scsi_target * starget)2868*4882a593Smuzhiyun static int srp_target_alloc(struct scsi_target *starget)
2869*4882a593Smuzhiyun {
2870*4882a593Smuzhiyun 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2871*4882a593Smuzhiyun 	struct srp_target_port *target = host_to_target(shost);
2872*4882a593Smuzhiyun 
2873*4882a593Smuzhiyun 	if (target->target_can_queue)
2874*4882a593Smuzhiyun 		starget->can_queue = target->target_can_queue;
2875*4882a593Smuzhiyun 	return 0;
2876*4882a593Smuzhiyun }
2877*4882a593Smuzhiyun 
srp_slave_configure(struct scsi_device * sdev)2878*4882a593Smuzhiyun static int srp_slave_configure(struct scsi_device *sdev)
2879*4882a593Smuzhiyun {
2880*4882a593Smuzhiyun 	struct Scsi_Host *shost = sdev->host;
2881*4882a593Smuzhiyun 	struct srp_target_port *target = host_to_target(shost);
2882*4882a593Smuzhiyun 	struct request_queue *q = sdev->request_queue;
2883*4882a593Smuzhiyun 	unsigned long timeout;
2884*4882a593Smuzhiyun 
2885*4882a593Smuzhiyun 	if (sdev->type == TYPE_DISK) {
2886*4882a593Smuzhiyun 		timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2887*4882a593Smuzhiyun 		blk_queue_rq_timeout(q, timeout);
2888*4882a593Smuzhiyun 	}
2889*4882a593Smuzhiyun 
2890*4882a593Smuzhiyun 	return 0;
2891*4882a593Smuzhiyun }
2892*4882a593Smuzhiyun 
show_id_ext(struct device * dev,struct device_attribute * attr,char * buf)2893*4882a593Smuzhiyun static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2894*4882a593Smuzhiyun 			   char *buf)
2895*4882a593Smuzhiyun {
2896*4882a593Smuzhiyun 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2897*4882a593Smuzhiyun 
2898*4882a593Smuzhiyun 	return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
2899*4882a593Smuzhiyun }
2900*4882a593Smuzhiyun 
show_ioc_guid(struct device * dev,struct device_attribute * attr,char * buf)2901*4882a593Smuzhiyun static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2902*4882a593Smuzhiyun 			     char *buf)
2903*4882a593Smuzhiyun {
2904*4882a593Smuzhiyun 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2905*4882a593Smuzhiyun 
2906*4882a593Smuzhiyun 	return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
2907*4882a593Smuzhiyun }
2908*4882a593Smuzhiyun 
show_service_id(struct device * dev,struct device_attribute * attr,char * buf)2909*4882a593Smuzhiyun static ssize_t show_service_id(struct device *dev,
2910*4882a593Smuzhiyun 			       struct device_attribute *attr, char *buf)
2911*4882a593Smuzhiyun {
2912*4882a593Smuzhiyun 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2913*4882a593Smuzhiyun 
2914*4882a593Smuzhiyun 	if (target->using_rdma_cm)
2915*4882a593Smuzhiyun 		return -ENOENT;
2916*4882a593Smuzhiyun 	return sprintf(buf, "0x%016llx\n",
2917*4882a593Smuzhiyun 		       be64_to_cpu(target->ib_cm.service_id));
2918*4882a593Smuzhiyun }
2919*4882a593Smuzhiyun 
show_pkey(struct device * dev,struct device_attribute * attr,char * buf)2920*4882a593Smuzhiyun static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2921*4882a593Smuzhiyun 			 char *buf)
2922*4882a593Smuzhiyun {
2923*4882a593Smuzhiyun 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2924*4882a593Smuzhiyun 
2925*4882a593Smuzhiyun 	if (target->using_rdma_cm)
2926*4882a593Smuzhiyun 		return -ENOENT;
2927*4882a593Smuzhiyun 	return sprintf(buf, "0x%04x\n", be16_to_cpu(target->ib_cm.pkey));
2928*4882a593Smuzhiyun }
2929*4882a593Smuzhiyun 
show_sgid(struct device * dev,struct device_attribute * attr,char * buf)2930*4882a593Smuzhiyun static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2931*4882a593Smuzhiyun 			 char *buf)
2932*4882a593Smuzhiyun {
2933*4882a593Smuzhiyun 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2934*4882a593Smuzhiyun 
2935*4882a593Smuzhiyun 	return sprintf(buf, "%pI6\n", target->sgid.raw);
2936*4882a593Smuzhiyun }
2937*4882a593Smuzhiyun 
show_dgid(struct device * dev,struct device_attribute * attr,char * buf)2938*4882a593Smuzhiyun static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2939*4882a593Smuzhiyun 			 char *buf)
2940*4882a593Smuzhiyun {
2941*4882a593Smuzhiyun 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2942*4882a593Smuzhiyun 	struct srp_rdma_ch *ch = &target->ch[0];
2943*4882a593Smuzhiyun 
2944*4882a593Smuzhiyun 	if (target->using_rdma_cm)
2945*4882a593Smuzhiyun 		return -ENOENT;
2946*4882a593Smuzhiyun 	return sprintf(buf, "%pI6\n", ch->ib_cm.path.dgid.raw);
2947*4882a593Smuzhiyun }
2948*4882a593Smuzhiyun 
show_orig_dgid(struct device * dev,struct device_attribute * attr,char * buf)2949*4882a593Smuzhiyun static ssize_t show_orig_dgid(struct device *dev,
2950*4882a593Smuzhiyun 			      struct device_attribute *attr, char *buf)
2951*4882a593Smuzhiyun {
2952*4882a593Smuzhiyun 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2953*4882a593Smuzhiyun 
2954*4882a593Smuzhiyun 	if (target->using_rdma_cm)
2955*4882a593Smuzhiyun 		return -ENOENT;
2956*4882a593Smuzhiyun 	return sprintf(buf, "%pI6\n", target->ib_cm.orig_dgid.raw);
2957*4882a593Smuzhiyun }
2958*4882a593Smuzhiyun 
show_req_lim(struct device * dev,struct device_attribute * attr,char * buf)2959*4882a593Smuzhiyun static ssize_t show_req_lim(struct device *dev,
2960*4882a593Smuzhiyun 			    struct device_attribute *attr, char *buf)
2961*4882a593Smuzhiyun {
2962*4882a593Smuzhiyun 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2963*4882a593Smuzhiyun 	struct srp_rdma_ch *ch;
2964*4882a593Smuzhiyun 	int i, req_lim = INT_MAX;
2965*4882a593Smuzhiyun 
2966*4882a593Smuzhiyun 	for (i = 0; i < target->ch_count; i++) {
2967*4882a593Smuzhiyun 		ch = &target->ch[i];
2968*4882a593Smuzhiyun 		req_lim = min(req_lim, ch->req_lim);
2969*4882a593Smuzhiyun 	}
2970*4882a593Smuzhiyun 	return sprintf(buf, "%d\n", req_lim);
2971*4882a593Smuzhiyun }
2972*4882a593Smuzhiyun 
show_zero_req_lim(struct device * dev,struct device_attribute * attr,char * buf)2973*4882a593Smuzhiyun static ssize_t show_zero_req_lim(struct device *dev,
2974*4882a593Smuzhiyun 				 struct device_attribute *attr, char *buf)
2975*4882a593Smuzhiyun {
2976*4882a593Smuzhiyun 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2977*4882a593Smuzhiyun 
2978*4882a593Smuzhiyun 	return sprintf(buf, "%d\n", target->zero_req_lim);
2979*4882a593Smuzhiyun }
2980*4882a593Smuzhiyun 
show_local_ib_port(struct device * dev,struct device_attribute * attr,char * buf)2981*4882a593Smuzhiyun static ssize_t show_local_ib_port(struct device *dev,
2982*4882a593Smuzhiyun 				  struct device_attribute *attr, char *buf)
2983*4882a593Smuzhiyun {
2984*4882a593Smuzhiyun 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2985*4882a593Smuzhiyun 
2986*4882a593Smuzhiyun 	return sprintf(buf, "%d\n", target->srp_host->port);
2987*4882a593Smuzhiyun }
2988*4882a593Smuzhiyun 
show_local_ib_device(struct device * dev,struct device_attribute * attr,char * buf)2989*4882a593Smuzhiyun static ssize_t show_local_ib_device(struct device *dev,
2990*4882a593Smuzhiyun 				    struct device_attribute *attr, char *buf)
2991*4882a593Smuzhiyun {
2992*4882a593Smuzhiyun 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
2993*4882a593Smuzhiyun 
2994*4882a593Smuzhiyun 	return sprintf(buf, "%s\n",
2995*4882a593Smuzhiyun 		       dev_name(&target->srp_host->srp_dev->dev->dev));
2996*4882a593Smuzhiyun }
2997*4882a593Smuzhiyun 
show_ch_count(struct device * dev,struct device_attribute * attr,char * buf)2998*4882a593Smuzhiyun static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2999*4882a593Smuzhiyun 			     char *buf)
3000*4882a593Smuzhiyun {
3001*4882a593Smuzhiyun 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
3002*4882a593Smuzhiyun 
3003*4882a593Smuzhiyun 	return sprintf(buf, "%d\n", target->ch_count);
3004*4882a593Smuzhiyun }
3005*4882a593Smuzhiyun 
show_comp_vector(struct device * dev,struct device_attribute * attr,char * buf)3006*4882a593Smuzhiyun static ssize_t show_comp_vector(struct device *dev,
3007*4882a593Smuzhiyun 				struct device_attribute *attr, char *buf)
3008*4882a593Smuzhiyun {
3009*4882a593Smuzhiyun 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
3010*4882a593Smuzhiyun 
3011*4882a593Smuzhiyun 	return sprintf(buf, "%d\n", target->comp_vector);
3012*4882a593Smuzhiyun }
3013*4882a593Smuzhiyun 
show_tl_retry_count(struct device * dev,struct device_attribute * attr,char * buf)3014*4882a593Smuzhiyun static ssize_t show_tl_retry_count(struct device *dev,
3015*4882a593Smuzhiyun 				   struct device_attribute *attr, char *buf)
3016*4882a593Smuzhiyun {
3017*4882a593Smuzhiyun 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
3018*4882a593Smuzhiyun 
3019*4882a593Smuzhiyun 	return sprintf(buf, "%d\n", target->tl_retry_count);
3020*4882a593Smuzhiyun }
3021*4882a593Smuzhiyun 
show_cmd_sg_entries(struct device * dev,struct device_attribute * attr,char * buf)3022*4882a593Smuzhiyun static ssize_t show_cmd_sg_entries(struct device *dev,
3023*4882a593Smuzhiyun 				   struct device_attribute *attr, char *buf)
3024*4882a593Smuzhiyun {
3025*4882a593Smuzhiyun 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
3026*4882a593Smuzhiyun 
3027*4882a593Smuzhiyun 	return sprintf(buf, "%u\n", target->cmd_sg_cnt);
3028*4882a593Smuzhiyun }
3029*4882a593Smuzhiyun 
show_allow_ext_sg(struct device * dev,struct device_attribute * attr,char * buf)3030*4882a593Smuzhiyun static ssize_t show_allow_ext_sg(struct device *dev,
3031*4882a593Smuzhiyun 				 struct device_attribute *attr, char *buf)
3032*4882a593Smuzhiyun {
3033*4882a593Smuzhiyun 	struct srp_target_port *target = host_to_target(class_to_shost(dev));
3034*4882a593Smuzhiyun 
3035*4882a593Smuzhiyun 	return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
3036*4882a593Smuzhiyun }
3037*4882a593Smuzhiyun 
3038*4882a593Smuzhiyun static DEVICE_ATTR(id_ext,	    S_IRUGO, show_id_ext,	   NULL);
3039*4882a593Smuzhiyun static DEVICE_ATTR(ioc_guid,	    S_IRUGO, show_ioc_guid,	   NULL);
3040*4882a593Smuzhiyun static DEVICE_ATTR(service_id,	    S_IRUGO, show_service_id,	   NULL);
3041*4882a593Smuzhiyun static DEVICE_ATTR(pkey,	    S_IRUGO, show_pkey,		   NULL);
3042*4882a593Smuzhiyun static DEVICE_ATTR(sgid,	    S_IRUGO, show_sgid,		   NULL);
3043*4882a593Smuzhiyun static DEVICE_ATTR(dgid,	    S_IRUGO, show_dgid,		   NULL);
3044*4882a593Smuzhiyun static DEVICE_ATTR(orig_dgid,	    S_IRUGO, show_orig_dgid,	   NULL);
3045*4882a593Smuzhiyun static DEVICE_ATTR(req_lim,         S_IRUGO, show_req_lim,         NULL);
3046*4882a593Smuzhiyun static DEVICE_ATTR(zero_req_lim,    S_IRUGO, show_zero_req_lim,	   NULL);
3047*4882a593Smuzhiyun static DEVICE_ATTR(local_ib_port,   S_IRUGO, show_local_ib_port,   NULL);
3048*4882a593Smuzhiyun static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
3049*4882a593Smuzhiyun static DEVICE_ATTR(ch_count,        S_IRUGO, show_ch_count,        NULL);
3050*4882a593Smuzhiyun static DEVICE_ATTR(comp_vector,     S_IRUGO, show_comp_vector,     NULL);
3051*4882a593Smuzhiyun static DEVICE_ATTR(tl_retry_count,  S_IRUGO, show_tl_retry_count,  NULL);
3052*4882a593Smuzhiyun static DEVICE_ATTR(cmd_sg_entries,  S_IRUGO, show_cmd_sg_entries,  NULL);
3053*4882a593Smuzhiyun static DEVICE_ATTR(allow_ext_sg,    S_IRUGO, show_allow_ext_sg,    NULL);
3054*4882a593Smuzhiyun 
3055*4882a593Smuzhiyun static struct device_attribute *srp_host_attrs[] = {
3056*4882a593Smuzhiyun 	&dev_attr_id_ext,
3057*4882a593Smuzhiyun 	&dev_attr_ioc_guid,
3058*4882a593Smuzhiyun 	&dev_attr_service_id,
3059*4882a593Smuzhiyun 	&dev_attr_pkey,
3060*4882a593Smuzhiyun 	&dev_attr_sgid,
3061*4882a593Smuzhiyun 	&dev_attr_dgid,
3062*4882a593Smuzhiyun 	&dev_attr_orig_dgid,
3063*4882a593Smuzhiyun 	&dev_attr_req_lim,
3064*4882a593Smuzhiyun 	&dev_attr_zero_req_lim,
3065*4882a593Smuzhiyun 	&dev_attr_local_ib_port,
3066*4882a593Smuzhiyun 	&dev_attr_local_ib_device,
3067*4882a593Smuzhiyun 	&dev_attr_ch_count,
3068*4882a593Smuzhiyun 	&dev_attr_comp_vector,
3069*4882a593Smuzhiyun 	&dev_attr_tl_retry_count,
3070*4882a593Smuzhiyun 	&dev_attr_cmd_sg_entries,
3071*4882a593Smuzhiyun 	&dev_attr_allow_ext_sg,
3072*4882a593Smuzhiyun 	NULL
3073*4882a593Smuzhiyun };
3074*4882a593Smuzhiyun 
3075*4882a593Smuzhiyun static struct scsi_host_template srp_template = {
3076*4882a593Smuzhiyun 	.module				= THIS_MODULE,
3077*4882a593Smuzhiyun 	.name				= "InfiniBand SRP initiator",
3078*4882a593Smuzhiyun 	.proc_name			= DRV_NAME,
3079*4882a593Smuzhiyun 	.target_alloc			= srp_target_alloc,
3080*4882a593Smuzhiyun 	.slave_configure		= srp_slave_configure,
3081*4882a593Smuzhiyun 	.info				= srp_target_info,
3082*4882a593Smuzhiyun 	.queuecommand			= srp_queuecommand,
3083*4882a593Smuzhiyun 	.change_queue_depth             = srp_change_queue_depth,
3084*4882a593Smuzhiyun 	.eh_timed_out			= srp_timed_out,
3085*4882a593Smuzhiyun 	.eh_abort_handler		= srp_abort,
3086*4882a593Smuzhiyun 	.eh_device_reset_handler	= srp_reset_device,
3087*4882a593Smuzhiyun 	.eh_host_reset_handler		= srp_reset_host,
3088*4882a593Smuzhiyun 	.skip_settle_delay		= true,
3089*4882a593Smuzhiyun 	.sg_tablesize			= SRP_DEF_SG_TABLESIZE,
3090*4882a593Smuzhiyun 	.can_queue			= SRP_DEFAULT_CMD_SQ_SIZE,
3091*4882a593Smuzhiyun 	.this_id			= -1,
3092*4882a593Smuzhiyun 	.cmd_per_lun			= SRP_DEFAULT_CMD_SQ_SIZE,
3093*4882a593Smuzhiyun 	.shost_attrs			= srp_host_attrs,
3094*4882a593Smuzhiyun 	.track_queue_depth		= 1,
3095*4882a593Smuzhiyun };
3096*4882a593Smuzhiyun 
srp_sdev_count(struct Scsi_Host * host)3097*4882a593Smuzhiyun static int srp_sdev_count(struct Scsi_Host *host)
3098*4882a593Smuzhiyun {
3099*4882a593Smuzhiyun 	struct scsi_device *sdev;
3100*4882a593Smuzhiyun 	int c = 0;
3101*4882a593Smuzhiyun 
3102*4882a593Smuzhiyun 	shost_for_each_device(sdev, host)
3103*4882a593Smuzhiyun 		c++;
3104*4882a593Smuzhiyun 
3105*4882a593Smuzhiyun 	return c;
3106*4882a593Smuzhiyun }
3107*4882a593Smuzhiyun 
3108*4882a593Smuzhiyun /*
3109*4882a593Smuzhiyun  * Return values:
3110*4882a593Smuzhiyun  * < 0 upon failure. Caller is responsible for SRP target port cleanup.
3111*4882a593Smuzhiyun  * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
3112*4882a593Smuzhiyun  *    removal has been scheduled.
3113*4882a593Smuzhiyun  * 0 and target->state != SRP_TARGET_REMOVED upon success.
3114*4882a593Smuzhiyun  */
srp_add_target(struct srp_host * host,struct srp_target_port * target)3115*4882a593Smuzhiyun static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
3116*4882a593Smuzhiyun {
3117*4882a593Smuzhiyun 	struct srp_rport_identifiers ids;
3118*4882a593Smuzhiyun 	struct srp_rport *rport;
3119*4882a593Smuzhiyun 
3120*4882a593Smuzhiyun 	target->state = SRP_TARGET_SCANNING;
3121*4882a593Smuzhiyun 	sprintf(target->target_name, "SRP.T10:%016llX",
3122*4882a593Smuzhiyun 		be64_to_cpu(target->id_ext));
3123*4882a593Smuzhiyun 
3124*4882a593Smuzhiyun 	if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dev.parent))
3125*4882a593Smuzhiyun 		return -ENODEV;
3126*4882a593Smuzhiyun 
3127*4882a593Smuzhiyun 	memcpy(ids.port_id, &target->id_ext, 8);
3128*4882a593Smuzhiyun 	memcpy(ids.port_id + 8, &target->ioc_guid, 8);
3129*4882a593Smuzhiyun 	ids.roles = SRP_RPORT_ROLE_TARGET;
3130*4882a593Smuzhiyun 	rport = srp_rport_add(target->scsi_host, &ids);
3131*4882a593Smuzhiyun 	if (IS_ERR(rport)) {
3132*4882a593Smuzhiyun 		scsi_remove_host(target->scsi_host);
3133*4882a593Smuzhiyun 		return PTR_ERR(rport);
3134*4882a593Smuzhiyun 	}
3135*4882a593Smuzhiyun 
3136*4882a593Smuzhiyun 	rport->lld_data = target;
3137*4882a593Smuzhiyun 	target->rport = rport;
3138*4882a593Smuzhiyun 
3139*4882a593Smuzhiyun 	spin_lock(&host->target_lock);
3140*4882a593Smuzhiyun 	list_add_tail(&target->list, &host->target_list);
3141*4882a593Smuzhiyun 	spin_unlock(&host->target_lock);
3142*4882a593Smuzhiyun 
3143*4882a593Smuzhiyun 	scsi_scan_target(&target->scsi_host->shost_gendev,
3144*4882a593Smuzhiyun 			 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
3145*4882a593Smuzhiyun 
3146*4882a593Smuzhiyun 	if (srp_connected_ch(target) < target->ch_count ||
3147*4882a593Smuzhiyun 	    target->qp_in_error) {
3148*4882a593Smuzhiyun 		shost_printk(KERN_INFO, target->scsi_host,
3149*4882a593Smuzhiyun 			     PFX "SCSI scan failed - removing SCSI host\n");
3150*4882a593Smuzhiyun 		srp_queue_remove_work(target);
3151*4882a593Smuzhiyun 		goto out;
3152*4882a593Smuzhiyun 	}
3153*4882a593Smuzhiyun 
3154*4882a593Smuzhiyun 	pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n",
3155*4882a593Smuzhiyun 		 dev_name(&target->scsi_host->shost_gendev),
3156*4882a593Smuzhiyun 		 srp_sdev_count(target->scsi_host));
3157*4882a593Smuzhiyun 
3158*4882a593Smuzhiyun 	spin_lock_irq(&target->lock);
3159*4882a593Smuzhiyun 	if (target->state == SRP_TARGET_SCANNING)
3160*4882a593Smuzhiyun 		target->state = SRP_TARGET_LIVE;
3161*4882a593Smuzhiyun 	spin_unlock_irq(&target->lock);
3162*4882a593Smuzhiyun 
3163*4882a593Smuzhiyun out:
3164*4882a593Smuzhiyun 	return 0;
3165*4882a593Smuzhiyun }
3166*4882a593Smuzhiyun 
srp_release_dev(struct device * dev)3167*4882a593Smuzhiyun static void srp_release_dev(struct device *dev)
3168*4882a593Smuzhiyun {
3169*4882a593Smuzhiyun 	struct srp_host *host =
3170*4882a593Smuzhiyun 		container_of(dev, struct srp_host, dev);
3171*4882a593Smuzhiyun 
3172*4882a593Smuzhiyun 	complete(&host->released);
3173*4882a593Smuzhiyun }
3174*4882a593Smuzhiyun 
3175*4882a593Smuzhiyun static struct class srp_class = {
3176*4882a593Smuzhiyun 	.name    = "infiniband_srp",
3177*4882a593Smuzhiyun 	.dev_release = srp_release_dev
3178*4882a593Smuzhiyun };
3179*4882a593Smuzhiyun 
3180*4882a593Smuzhiyun /**
3181*4882a593Smuzhiyun  * srp_conn_unique() - check whether the connection to a target is unique
3182*4882a593Smuzhiyun  * @host:   SRP host.
3183*4882a593Smuzhiyun  * @target: SRP target port.
3184*4882a593Smuzhiyun  */
srp_conn_unique(struct srp_host * host,struct srp_target_port * target)3185*4882a593Smuzhiyun static bool srp_conn_unique(struct srp_host *host,
3186*4882a593Smuzhiyun 			    struct srp_target_port *target)
3187*4882a593Smuzhiyun {
3188*4882a593Smuzhiyun 	struct srp_target_port *t;
3189*4882a593Smuzhiyun 	bool ret = false;
3190*4882a593Smuzhiyun 
3191*4882a593Smuzhiyun 	if (target->state == SRP_TARGET_REMOVED)
3192*4882a593Smuzhiyun 		goto out;
3193*4882a593Smuzhiyun 
3194*4882a593Smuzhiyun 	ret = true;
3195*4882a593Smuzhiyun 
3196*4882a593Smuzhiyun 	spin_lock(&host->target_lock);
3197*4882a593Smuzhiyun 	list_for_each_entry(t, &host->target_list, list) {
3198*4882a593Smuzhiyun 		if (t != target &&
3199*4882a593Smuzhiyun 		    target->id_ext == t->id_ext &&
3200*4882a593Smuzhiyun 		    target->ioc_guid == t->ioc_guid &&
3201*4882a593Smuzhiyun 		    target->initiator_ext == t->initiator_ext) {
3202*4882a593Smuzhiyun 			ret = false;
3203*4882a593Smuzhiyun 			break;
3204*4882a593Smuzhiyun 		}
3205*4882a593Smuzhiyun 	}
3206*4882a593Smuzhiyun 	spin_unlock(&host->target_lock);
3207*4882a593Smuzhiyun 
3208*4882a593Smuzhiyun out:
3209*4882a593Smuzhiyun 	return ret;
3210*4882a593Smuzhiyun }
3211*4882a593Smuzhiyun 
3212*4882a593Smuzhiyun /*
3213*4882a593Smuzhiyun  * Target ports are added by writing
3214*4882a593Smuzhiyun  *
3215*4882a593Smuzhiyun  *     id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
3216*4882a593Smuzhiyun  *     pkey=<P_Key>,service_id=<service ID>
3217*4882a593Smuzhiyun  * or
3218*4882a593Smuzhiyun  *     id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,
3219*4882a593Smuzhiyun  *     [src=<IPv4 address>,]dest=<IPv4 address>:<port number>
3220*4882a593Smuzhiyun  *
3221*4882a593Smuzhiyun  * to the add_target sysfs attribute.
3222*4882a593Smuzhiyun  */
3223*4882a593Smuzhiyun enum {
3224*4882a593Smuzhiyun 	SRP_OPT_ERR		= 0,
3225*4882a593Smuzhiyun 	SRP_OPT_ID_EXT		= 1 << 0,
3226*4882a593Smuzhiyun 	SRP_OPT_IOC_GUID	= 1 << 1,
3227*4882a593Smuzhiyun 	SRP_OPT_DGID		= 1 << 2,
3228*4882a593Smuzhiyun 	SRP_OPT_PKEY		= 1 << 3,
3229*4882a593Smuzhiyun 	SRP_OPT_SERVICE_ID	= 1 << 4,
3230*4882a593Smuzhiyun 	SRP_OPT_MAX_SECT	= 1 << 5,
3231*4882a593Smuzhiyun 	SRP_OPT_MAX_CMD_PER_LUN	= 1 << 6,
3232*4882a593Smuzhiyun 	SRP_OPT_IO_CLASS	= 1 << 7,
3233*4882a593Smuzhiyun 	SRP_OPT_INITIATOR_EXT	= 1 << 8,
3234*4882a593Smuzhiyun 	SRP_OPT_CMD_SG_ENTRIES	= 1 << 9,
3235*4882a593Smuzhiyun 	SRP_OPT_ALLOW_EXT_SG	= 1 << 10,
3236*4882a593Smuzhiyun 	SRP_OPT_SG_TABLESIZE	= 1 << 11,
3237*4882a593Smuzhiyun 	SRP_OPT_COMP_VECTOR	= 1 << 12,
3238*4882a593Smuzhiyun 	SRP_OPT_TL_RETRY_COUNT	= 1 << 13,
3239*4882a593Smuzhiyun 	SRP_OPT_QUEUE_SIZE	= 1 << 14,
3240*4882a593Smuzhiyun 	SRP_OPT_IP_SRC		= 1 << 15,
3241*4882a593Smuzhiyun 	SRP_OPT_IP_DEST		= 1 << 16,
3242*4882a593Smuzhiyun 	SRP_OPT_TARGET_CAN_QUEUE= 1 << 17,
3243*4882a593Smuzhiyun 	SRP_OPT_MAX_IT_IU_SIZE  = 1 << 18,
3244*4882a593Smuzhiyun 	SRP_OPT_CH_COUNT	= 1 << 19,
3245*4882a593Smuzhiyun };
3246*4882a593Smuzhiyun 
3247*4882a593Smuzhiyun static unsigned int srp_opt_mandatory[] = {
3248*4882a593Smuzhiyun 	SRP_OPT_ID_EXT		|
3249*4882a593Smuzhiyun 	SRP_OPT_IOC_GUID	|
3250*4882a593Smuzhiyun 	SRP_OPT_DGID		|
3251*4882a593Smuzhiyun 	SRP_OPT_PKEY		|
3252*4882a593Smuzhiyun 	SRP_OPT_SERVICE_ID,
3253*4882a593Smuzhiyun 	SRP_OPT_ID_EXT		|
3254*4882a593Smuzhiyun 	SRP_OPT_IOC_GUID	|
3255*4882a593Smuzhiyun 	SRP_OPT_IP_DEST,
3256*4882a593Smuzhiyun };
3257*4882a593Smuzhiyun 
3258*4882a593Smuzhiyun static const match_table_t srp_opt_tokens = {
3259*4882a593Smuzhiyun 	{ SRP_OPT_ID_EXT,		"id_ext=%s" 		},
3260*4882a593Smuzhiyun 	{ SRP_OPT_IOC_GUID,		"ioc_guid=%s" 		},
3261*4882a593Smuzhiyun 	{ SRP_OPT_DGID,			"dgid=%s" 		},
3262*4882a593Smuzhiyun 	{ SRP_OPT_PKEY,			"pkey=%x" 		},
3263*4882a593Smuzhiyun 	{ SRP_OPT_SERVICE_ID,		"service_id=%s"		},
3264*4882a593Smuzhiyun 	{ SRP_OPT_MAX_SECT,		"max_sect=%d" 		},
3265*4882a593Smuzhiyun 	{ SRP_OPT_MAX_CMD_PER_LUN,	"max_cmd_per_lun=%d" 	},
3266*4882a593Smuzhiyun 	{ SRP_OPT_TARGET_CAN_QUEUE,	"target_can_queue=%d"	},
3267*4882a593Smuzhiyun 	{ SRP_OPT_IO_CLASS,		"io_class=%x"		},
3268*4882a593Smuzhiyun 	{ SRP_OPT_INITIATOR_EXT,	"initiator_ext=%s"	},
3269*4882a593Smuzhiyun 	{ SRP_OPT_CMD_SG_ENTRIES,	"cmd_sg_entries=%u"	},
3270*4882a593Smuzhiyun 	{ SRP_OPT_ALLOW_EXT_SG,		"allow_ext_sg=%u"	},
3271*4882a593Smuzhiyun 	{ SRP_OPT_SG_TABLESIZE,		"sg_tablesize=%u"	},
3272*4882a593Smuzhiyun 	{ SRP_OPT_COMP_VECTOR,		"comp_vector=%u"	},
3273*4882a593Smuzhiyun 	{ SRP_OPT_TL_RETRY_COUNT,	"tl_retry_count=%u"	},
3274*4882a593Smuzhiyun 	{ SRP_OPT_QUEUE_SIZE,		"queue_size=%d"		},
3275*4882a593Smuzhiyun 	{ SRP_OPT_IP_SRC,		"src=%s"		},
3276*4882a593Smuzhiyun 	{ SRP_OPT_IP_DEST,		"dest=%s"		},
3277*4882a593Smuzhiyun 	{ SRP_OPT_MAX_IT_IU_SIZE,	"max_it_iu_size=%d"	},
3278*4882a593Smuzhiyun 	{ SRP_OPT_CH_COUNT,		"ch_count=%u",		},
3279*4882a593Smuzhiyun 	{ SRP_OPT_ERR,			NULL 			}
3280*4882a593Smuzhiyun };
3281*4882a593Smuzhiyun 
3282*4882a593Smuzhiyun /**
3283*4882a593Smuzhiyun  * srp_parse_in - parse an IP address and port number combination
3284*4882a593Smuzhiyun  * @net:	   [in]  Network namespace.
3285*4882a593Smuzhiyun  * @sa:		   [out] Address family, IP address and port number.
3286*4882a593Smuzhiyun  * @addr_port_str: [in]  IP address and port number.
3287*4882a593Smuzhiyun  * @has_port:	   [out] Whether or not @addr_port_str includes a port number.
3288*4882a593Smuzhiyun  *
3289*4882a593Smuzhiyun  * Parse the following address formats:
3290*4882a593Smuzhiyun  * - IPv4: <ip_address>:<port>, e.g. 1.2.3.4:5.
3291*4882a593Smuzhiyun  * - IPv6: \[<ipv6_address>\]:<port>, e.g. [1::2:3%4]:5.
3292*4882a593Smuzhiyun  */
srp_parse_in(struct net * net,struct sockaddr_storage * sa,const char * addr_port_str,bool * has_port)3293*4882a593Smuzhiyun static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
3294*4882a593Smuzhiyun 			const char *addr_port_str, bool *has_port)
3295*4882a593Smuzhiyun {
3296*4882a593Smuzhiyun 	char *addr_end, *addr = kstrdup(addr_port_str, GFP_KERNEL);
3297*4882a593Smuzhiyun 	char *port_str;
3298*4882a593Smuzhiyun 	int ret;
3299*4882a593Smuzhiyun 
3300*4882a593Smuzhiyun 	if (!addr)
3301*4882a593Smuzhiyun 		return -ENOMEM;
3302*4882a593Smuzhiyun 	port_str = strrchr(addr, ':');
3303*4882a593Smuzhiyun 	if (port_str && strchr(port_str, ']'))
3304*4882a593Smuzhiyun 		port_str = NULL;
3305*4882a593Smuzhiyun 	if (port_str)
3306*4882a593Smuzhiyun 		*port_str++ = '\0';
3307*4882a593Smuzhiyun 	if (has_port)
3308*4882a593Smuzhiyun 		*has_port = port_str != NULL;
3309*4882a593Smuzhiyun 	ret = inet_pton_with_scope(net, AF_INET, addr, port_str, sa);
3310*4882a593Smuzhiyun 	if (ret && addr[0]) {
3311*4882a593Smuzhiyun 		addr_end = addr + strlen(addr) - 1;
3312*4882a593Smuzhiyun 		if (addr[0] == '[' && *addr_end == ']') {
3313*4882a593Smuzhiyun 			*addr_end = '\0';
3314*4882a593Smuzhiyun 			ret = inet_pton_with_scope(net, AF_INET6, addr + 1,
3315*4882a593Smuzhiyun 						   port_str, sa);
3316*4882a593Smuzhiyun 		}
3317*4882a593Smuzhiyun 	}
3318*4882a593Smuzhiyun 	kfree(addr);
3319*4882a593Smuzhiyun 	pr_debug("%s -> %pISpfsc\n", addr_port_str, sa);
3320*4882a593Smuzhiyun 	return ret;
3321*4882a593Smuzhiyun }
3322*4882a593Smuzhiyun 
srp_parse_options(struct net * net,const char * buf,struct srp_target_port * target)3323*4882a593Smuzhiyun static int srp_parse_options(struct net *net, const char *buf,
3324*4882a593Smuzhiyun 			     struct srp_target_port *target)
3325*4882a593Smuzhiyun {
3326*4882a593Smuzhiyun 	char *options, *sep_opt;
3327*4882a593Smuzhiyun 	char *p;
3328*4882a593Smuzhiyun 	substring_t args[MAX_OPT_ARGS];
3329*4882a593Smuzhiyun 	unsigned long long ull;
3330*4882a593Smuzhiyun 	bool has_port;
3331*4882a593Smuzhiyun 	int opt_mask = 0;
3332*4882a593Smuzhiyun 	int token;
3333*4882a593Smuzhiyun 	int ret = -EINVAL;
3334*4882a593Smuzhiyun 	int i;
3335*4882a593Smuzhiyun 
3336*4882a593Smuzhiyun 	options = kstrdup(buf, GFP_KERNEL);
3337*4882a593Smuzhiyun 	if (!options)
3338*4882a593Smuzhiyun 		return -ENOMEM;
3339*4882a593Smuzhiyun 
3340*4882a593Smuzhiyun 	sep_opt = options;
3341*4882a593Smuzhiyun 	while ((p = strsep(&sep_opt, ",\n")) != NULL) {
3342*4882a593Smuzhiyun 		if (!*p)
3343*4882a593Smuzhiyun 			continue;
3344*4882a593Smuzhiyun 
3345*4882a593Smuzhiyun 		token = match_token(p, srp_opt_tokens, args);
3346*4882a593Smuzhiyun 		opt_mask |= token;
3347*4882a593Smuzhiyun 
3348*4882a593Smuzhiyun 		switch (token) {
3349*4882a593Smuzhiyun 		case SRP_OPT_ID_EXT:
3350*4882a593Smuzhiyun 			p = match_strdup(args);
3351*4882a593Smuzhiyun 			if (!p) {
3352*4882a593Smuzhiyun 				ret = -ENOMEM;
3353*4882a593Smuzhiyun 				goto out;
3354*4882a593Smuzhiyun 			}
3355*4882a593Smuzhiyun 			ret = kstrtoull(p, 16, &ull);
3356*4882a593Smuzhiyun 			if (ret) {
3357*4882a593Smuzhiyun 				pr_warn("invalid id_ext parameter '%s'\n", p);
3358*4882a593Smuzhiyun 				kfree(p);
3359*4882a593Smuzhiyun 				goto out;
3360*4882a593Smuzhiyun 			}
3361*4882a593Smuzhiyun 			target->id_ext = cpu_to_be64(ull);
3362*4882a593Smuzhiyun 			kfree(p);
3363*4882a593Smuzhiyun 			break;
3364*4882a593Smuzhiyun 
3365*4882a593Smuzhiyun 		case SRP_OPT_IOC_GUID:
3366*4882a593Smuzhiyun 			p = match_strdup(args);
3367*4882a593Smuzhiyun 			if (!p) {
3368*4882a593Smuzhiyun 				ret = -ENOMEM;
3369*4882a593Smuzhiyun 				goto out;
3370*4882a593Smuzhiyun 			}
3371*4882a593Smuzhiyun 			ret = kstrtoull(p, 16, &ull);
3372*4882a593Smuzhiyun 			if (ret) {
3373*4882a593Smuzhiyun 				pr_warn("invalid ioc_guid parameter '%s'\n", p);
3374*4882a593Smuzhiyun 				kfree(p);
3375*4882a593Smuzhiyun 				goto out;
3376*4882a593Smuzhiyun 			}
3377*4882a593Smuzhiyun 			target->ioc_guid = cpu_to_be64(ull);
3378*4882a593Smuzhiyun 			kfree(p);
3379*4882a593Smuzhiyun 			break;
3380*4882a593Smuzhiyun 
3381*4882a593Smuzhiyun 		case SRP_OPT_DGID:
3382*4882a593Smuzhiyun 			p = match_strdup(args);
3383*4882a593Smuzhiyun 			if (!p) {
3384*4882a593Smuzhiyun 				ret = -ENOMEM;
3385*4882a593Smuzhiyun 				goto out;
3386*4882a593Smuzhiyun 			}
3387*4882a593Smuzhiyun 			if (strlen(p) != 32) {
3388*4882a593Smuzhiyun 				pr_warn("bad dest GID parameter '%s'\n", p);
3389*4882a593Smuzhiyun 				kfree(p);
3390*4882a593Smuzhiyun 				goto out;
3391*4882a593Smuzhiyun 			}
3392*4882a593Smuzhiyun 
3393*4882a593Smuzhiyun 			ret = hex2bin(target->ib_cm.orig_dgid.raw, p, 16);
3394*4882a593Smuzhiyun 			kfree(p);
3395*4882a593Smuzhiyun 			if (ret < 0)
3396*4882a593Smuzhiyun 				goto out;
3397*4882a593Smuzhiyun 			break;
3398*4882a593Smuzhiyun 
3399*4882a593Smuzhiyun 		case SRP_OPT_PKEY:
3400*4882a593Smuzhiyun 			if (match_hex(args, &token)) {
3401*4882a593Smuzhiyun 				pr_warn("bad P_Key parameter '%s'\n", p);
3402*4882a593Smuzhiyun 				goto out;
3403*4882a593Smuzhiyun 			}
3404*4882a593Smuzhiyun 			target->ib_cm.pkey = cpu_to_be16(token);
3405*4882a593Smuzhiyun 			break;
3406*4882a593Smuzhiyun 
3407*4882a593Smuzhiyun 		case SRP_OPT_SERVICE_ID:
3408*4882a593Smuzhiyun 			p = match_strdup(args);
3409*4882a593Smuzhiyun 			if (!p) {
3410*4882a593Smuzhiyun 				ret = -ENOMEM;
3411*4882a593Smuzhiyun 				goto out;
3412*4882a593Smuzhiyun 			}
3413*4882a593Smuzhiyun 			ret = kstrtoull(p, 16, &ull);
3414*4882a593Smuzhiyun 			if (ret) {
3415*4882a593Smuzhiyun 				pr_warn("bad service_id parameter '%s'\n", p);
3416*4882a593Smuzhiyun 				kfree(p);
3417*4882a593Smuzhiyun 				goto out;
3418*4882a593Smuzhiyun 			}
3419*4882a593Smuzhiyun 			target->ib_cm.service_id = cpu_to_be64(ull);
3420*4882a593Smuzhiyun 			kfree(p);
3421*4882a593Smuzhiyun 			break;
3422*4882a593Smuzhiyun 
3423*4882a593Smuzhiyun 		case SRP_OPT_IP_SRC:
3424*4882a593Smuzhiyun 			p = match_strdup(args);
3425*4882a593Smuzhiyun 			if (!p) {
3426*4882a593Smuzhiyun 				ret = -ENOMEM;
3427*4882a593Smuzhiyun 				goto out;
3428*4882a593Smuzhiyun 			}
3429*4882a593Smuzhiyun 			ret = srp_parse_in(net, &target->rdma_cm.src.ss, p,
3430*4882a593Smuzhiyun 					   NULL);
3431*4882a593Smuzhiyun 			if (ret < 0) {
3432*4882a593Smuzhiyun 				pr_warn("bad source parameter '%s'\n", p);
3433*4882a593Smuzhiyun 				kfree(p);
3434*4882a593Smuzhiyun 				goto out;
3435*4882a593Smuzhiyun 			}
3436*4882a593Smuzhiyun 			target->rdma_cm.src_specified = true;
3437*4882a593Smuzhiyun 			kfree(p);
3438*4882a593Smuzhiyun 			break;
3439*4882a593Smuzhiyun 
3440*4882a593Smuzhiyun 		case SRP_OPT_IP_DEST:
3441*4882a593Smuzhiyun 			p = match_strdup(args);
3442*4882a593Smuzhiyun 			if (!p) {
3443*4882a593Smuzhiyun 				ret = -ENOMEM;
3444*4882a593Smuzhiyun 				goto out;
3445*4882a593Smuzhiyun 			}
3446*4882a593Smuzhiyun 			ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p,
3447*4882a593Smuzhiyun 					   &has_port);
3448*4882a593Smuzhiyun 			if (!has_port)
3449*4882a593Smuzhiyun 				ret = -EINVAL;
3450*4882a593Smuzhiyun 			if (ret < 0) {
3451*4882a593Smuzhiyun 				pr_warn("bad dest parameter '%s'\n", p);
3452*4882a593Smuzhiyun 				kfree(p);
3453*4882a593Smuzhiyun 				goto out;
3454*4882a593Smuzhiyun 			}
3455*4882a593Smuzhiyun 			target->using_rdma_cm = true;
3456*4882a593Smuzhiyun 			kfree(p);
3457*4882a593Smuzhiyun 			break;
3458*4882a593Smuzhiyun 
3459*4882a593Smuzhiyun 		case SRP_OPT_MAX_SECT:
3460*4882a593Smuzhiyun 			if (match_int(args, &token)) {
3461*4882a593Smuzhiyun 				pr_warn("bad max sect parameter '%s'\n", p);
3462*4882a593Smuzhiyun 				goto out;
3463*4882a593Smuzhiyun 			}
3464*4882a593Smuzhiyun 			target->scsi_host->max_sectors = token;
3465*4882a593Smuzhiyun 			break;
3466*4882a593Smuzhiyun 
3467*4882a593Smuzhiyun 		case SRP_OPT_QUEUE_SIZE:
3468*4882a593Smuzhiyun 			if (match_int(args, &token) || token < 1) {
3469*4882a593Smuzhiyun 				pr_warn("bad queue_size parameter '%s'\n", p);
3470*4882a593Smuzhiyun 				goto out;
3471*4882a593Smuzhiyun 			}
3472*4882a593Smuzhiyun 			target->scsi_host->can_queue = token;
3473*4882a593Smuzhiyun 			target->queue_size = token + SRP_RSP_SQ_SIZE +
3474*4882a593Smuzhiyun 					     SRP_TSK_MGMT_SQ_SIZE;
3475*4882a593Smuzhiyun 			if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3476*4882a593Smuzhiyun 				target->scsi_host->cmd_per_lun = token;
3477*4882a593Smuzhiyun 			break;
3478*4882a593Smuzhiyun 
3479*4882a593Smuzhiyun 		case SRP_OPT_MAX_CMD_PER_LUN:
3480*4882a593Smuzhiyun 			if (match_int(args, &token) || token < 1) {
3481*4882a593Smuzhiyun 				pr_warn("bad max cmd_per_lun parameter '%s'\n",
3482*4882a593Smuzhiyun 					p);
3483*4882a593Smuzhiyun 				goto out;
3484*4882a593Smuzhiyun 			}
3485*4882a593Smuzhiyun 			target->scsi_host->cmd_per_lun = token;
3486*4882a593Smuzhiyun 			break;
3487*4882a593Smuzhiyun 
3488*4882a593Smuzhiyun 		case SRP_OPT_TARGET_CAN_QUEUE:
3489*4882a593Smuzhiyun 			if (match_int(args, &token) || token < 1) {
3490*4882a593Smuzhiyun 				pr_warn("bad max target_can_queue parameter '%s'\n",
3491*4882a593Smuzhiyun 					p);
3492*4882a593Smuzhiyun 				goto out;
3493*4882a593Smuzhiyun 			}
3494*4882a593Smuzhiyun 			target->target_can_queue = token;
3495*4882a593Smuzhiyun 			break;
3496*4882a593Smuzhiyun 
3497*4882a593Smuzhiyun 		case SRP_OPT_IO_CLASS:
3498*4882a593Smuzhiyun 			if (match_hex(args, &token)) {
3499*4882a593Smuzhiyun 				pr_warn("bad IO class parameter '%s'\n", p);
3500*4882a593Smuzhiyun 				goto out;
3501*4882a593Smuzhiyun 			}
3502*4882a593Smuzhiyun 			if (token != SRP_REV10_IB_IO_CLASS &&
3503*4882a593Smuzhiyun 			    token != SRP_REV16A_IB_IO_CLASS) {
3504*4882a593Smuzhiyun 				pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3505*4882a593Smuzhiyun 					token, SRP_REV10_IB_IO_CLASS,
3506*4882a593Smuzhiyun 					SRP_REV16A_IB_IO_CLASS);
3507*4882a593Smuzhiyun 				goto out;
3508*4882a593Smuzhiyun 			}
3509*4882a593Smuzhiyun 			target->io_class = token;
3510*4882a593Smuzhiyun 			break;
3511*4882a593Smuzhiyun 
3512*4882a593Smuzhiyun 		case SRP_OPT_INITIATOR_EXT:
3513*4882a593Smuzhiyun 			p = match_strdup(args);
3514*4882a593Smuzhiyun 			if (!p) {
3515*4882a593Smuzhiyun 				ret = -ENOMEM;
3516*4882a593Smuzhiyun 				goto out;
3517*4882a593Smuzhiyun 			}
3518*4882a593Smuzhiyun 			ret = kstrtoull(p, 16, &ull);
3519*4882a593Smuzhiyun 			if (ret) {
3520*4882a593Smuzhiyun 				pr_warn("bad initiator_ext value '%s'\n", p);
3521*4882a593Smuzhiyun 				kfree(p);
3522*4882a593Smuzhiyun 				goto out;
3523*4882a593Smuzhiyun 			}
3524*4882a593Smuzhiyun 			target->initiator_ext = cpu_to_be64(ull);
3525*4882a593Smuzhiyun 			kfree(p);
3526*4882a593Smuzhiyun 			break;
3527*4882a593Smuzhiyun 
3528*4882a593Smuzhiyun 		case SRP_OPT_CMD_SG_ENTRIES:
3529*4882a593Smuzhiyun 			if (match_int(args, &token) || token < 1 || token > 255) {
3530*4882a593Smuzhiyun 				pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3531*4882a593Smuzhiyun 					p);
3532*4882a593Smuzhiyun 				goto out;
3533*4882a593Smuzhiyun 			}
3534*4882a593Smuzhiyun 			target->cmd_sg_cnt = token;
3535*4882a593Smuzhiyun 			break;
3536*4882a593Smuzhiyun 
3537*4882a593Smuzhiyun 		case SRP_OPT_ALLOW_EXT_SG:
3538*4882a593Smuzhiyun 			if (match_int(args, &token)) {
3539*4882a593Smuzhiyun 				pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3540*4882a593Smuzhiyun 				goto out;
3541*4882a593Smuzhiyun 			}
3542*4882a593Smuzhiyun 			target->allow_ext_sg = !!token;
3543*4882a593Smuzhiyun 			break;
3544*4882a593Smuzhiyun 
3545*4882a593Smuzhiyun 		case SRP_OPT_SG_TABLESIZE:
3546*4882a593Smuzhiyun 			if (match_int(args, &token) || token < 1 ||
3547*4882a593Smuzhiyun 					token > SG_MAX_SEGMENTS) {
3548*4882a593Smuzhiyun 				pr_warn("bad max sg_tablesize parameter '%s'\n",
3549*4882a593Smuzhiyun 					p);
3550*4882a593Smuzhiyun 				goto out;
3551*4882a593Smuzhiyun 			}
3552*4882a593Smuzhiyun 			target->sg_tablesize = token;
3553*4882a593Smuzhiyun 			break;
3554*4882a593Smuzhiyun 
3555*4882a593Smuzhiyun 		case SRP_OPT_COMP_VECTOR:
3556*4882a593Smuzhiyun 			if (match_int(args, &token) || token < 0) {
3557*4882a593Smuzhiyun 				pr_warn("bad comp_vector parameter '%s'\n", p);
3558*4882a593Smuzhiyun 				goto out;
3559*4882a593Smuzhiyun 			}
3560*4882a593Smuzhiyun 			target->comp_vector = token;
3561*4882a593Smuzhiyun 			break;
3562*4882a593Smuzhiyun 
3563*4882a593Smuzhiyun 		case SRP_OPT_TL_RETRY_COUNT:
3564*4882a593Smuzhiyun 			if (match_int(args, &token) || token < 2 || token > 7) {
3565*4882a593Smuzhiyun 				pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3566*4882a593Smuzhiyun 					p);
3567*4882a593Smuzhiyun 				goto out;
3568*4882a593Smuzhiyun 			}
3569*4882a593Smuzhiyun 			target->tl_retry_count = token;
3570*4882a593Smuzhiyun 			break;
3571*4882a593Smuzhiyun 
3572*4882a593Smuzhiyun 		case SRP_OPT_MAX_IT_IU_SIZE:
3573*4882a593Smuzhiyun 			if (match_int(args, &token) || token < 0) {
3574*4882a593Smuzhiyun 				pr_warn("bad maximum initiator to target IU size '%s'\n", p);
3575*4882a593Smuzhiyun 				goto out;
3576*4882a593Smuzhiyun 			}
3577*4882a593Smuzhiyun 			target->max_it_iu_size = token;
3578*4882a593Smuzhiyun 			break;
3579*4882a593Smuzhiyun 
3580*4882a593Smuzhiyun 		case SRP_OPT_CH_COUNT:
3581*4882a593Smuzhiyun 			if (match_int(args, &token) || token < 1) {
3582*4882a593Smuzhiyun 				pr_warn("bad channel count %s\n", p);
3583*4882a593Smuzhiyun 				goto out;
3584*4882a593Smuzhiyun 			}
3585*4882a593Smuzhiyun 			target->ch_count = token;
3586*4882a593Smuzhiyun 			break;
3587*4882a593Smuzhiyun 
3588*4882a593Smuzhiyun 		default:
3589*4882a593Smuzhiyun 			pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3590*4882a593Smuzhiyun 				p);
3591*4882a593Smuzhiyun 			goto out;
3592*4882a593Smuzhiyun 		}
3593*4882a593Smuzhiyun 	}
3594*4882a593Smuzhiyun 
3595*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(srp_opt_mandatory); i++) {
3596*4882a593Smuzhiyun 		if ((opt_mask & srp_opt_mandatory[i]) == srp_opt_mandatory[i]) {
3597*4882a593Smuzhiyun 			ret = 0;
3598*4882a593Smuzhiyun 			break;
3599*4882a593Smuzhiyun 		}
3600*4882a593Smuzhiyun 	}
3601*4882a593Smuzhiyun 	if (ret)
3602*4882a593Smuzhiyun 		pr_warn("target creation request is missing one or more parameters\n");
3603*4882a593Smuzhiyun 
3604*4882a593Smuzhiyun 	if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3605*4882a593Smuzhiyun 	    && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3606*4882a593Smuzhiyun 		pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3607*4882a593Smuzhiyun 			target->scsi_host->cmd_per_lun,
3608*4882a593Smuzhiyun 			target->scsi_host->can_queue);
3609*4882a593Smuzhiyun 
3610*4882a593Smuzhiyun out:
3611*4882a593Smuzhiyun 	kfree(options);
3612*4882a593Smuzhiyun 	return ret;
3613*4882a593Smuzhiyun }
3614*4882a593Smuzhiyun 
srp_create_target(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3615*4882a593Smuzhiyun static ssize_t srp_create_target(struct device *dev,
3616*4882a593Smuzhiyun 				 struct device_attribute *attr,
3617*4882a593Smuzhiyun 				 const char *buf, size_t count)
3618*4882a593Smuzhiyun {
3619*4882a593Smuzhiyun 	struct srp_host *host =
3620*4882a593Smuzhiyun 		container_of(dev, struct srp_host, dev);
3621*4882a593Smuzhiyun 	struct Scsi_Host *target_host;
3622*4882a593Smuzhiyun 	struct srp_target_port *target;
3623*4882a593Smuzhiyun 	struct srp_rdma_ch *ch;
3624*4882a593Smuzhiyun 	struct srp_device *srp_dev = host->srp_dev;
3625*4882a593Smuzhiyun 	struct ib_device *ibdev = srp_dev->dev;
3626*4882a593Smuzhiyun 	int ret, i, ch_idx;
3627*4882a593Smuzhiyun 	unsigned int max_sectors_per_mr, mr_per_cmd = 0;
3628*4882a593Smuzhiyun 	bool multich = false;
3629*4882a593Smuzhiyun 	uint32_t max_iu_len;
3630*4882a593Smuzhiyun 
3631*4882a593Smuzhiyun 	target_host = scsi_host_alloc(&srp_template,
3632*4882a593Smuzhiyun 				      sizeof (struct srp_target_port));
3633*4882a593Smuzhiyun 	if (!target_host)
3634*4882a593Smuzhiyun 		return -ENOMEM;
3635*4882a593Smuzhiyun 
3636*4882a593Smuzhiyun 	target_host->transportt  = ib_srp_transport_template;
3637*4882a593Smuzhiyun 	target_host->max_channel = 0;
3638*4882a593Smuzhiyun 	target_host->max_id      = 1;
3639*4882a593Smuzhiyun 	target_host->max_lun     = -1LL;
3640*4882a593Smuzhiyun 	target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
3641*4882a593Smuzhiyun 	target_host->max_segment_size = ib_dma_max_seg_size(ibdev);
3642*4882a593Smuzhiyun 
3643*4882a593Smuzhiyun 	if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
3644*4882a593Smuzhiyun 		target_host->virt_boundary_mask = ~srp_dev->mr_page_mask;
3645*4882a593Smuzhiyun 
3646*4882a593Smuzhiyun 	target = host_to_target(target_host);
3647*4882a593Smuzhiyun 
3648*4882a593Smuzhiyun 	target->net		= kobj_ns_grab_current(KOBJ_NS_TYPE_NET);
3649*4882a593Smuzhiyun 	target->io_class	= SRP_REV16A_IB_IO_CLASS;
3650*4882a593Smuzhiyun 	target->scsi_host	= target_host;
3651*4882a593Smuzhiyun 	target->srp_host	= host;
3652*4882a593Smuzhiyun 	target->lkey		= host->srp_dev->pd->local_dma_lkey;
3653*4882a593Smuzhiyun 	target->global_rkey	= host->srp_dev->global_rkey;
3654*4882a593Smuzhiyun 	target->cmd_sg_cnt	= cmd_sg_entries;
3655*4882a593Smuzhiyun 	target->sg_tablesize	= indirect_sg_entries ? : cmd_sg_entries;
3656*4882a593Smuzhiyun 	target->allow_ext_sg	= allow_ext_sg;
3657*4882a593Smuzhiyun 	target->tl_retry_count	= 7;
3658*4882a593Smuzhiyun 	target->queue_size	= SRP_DEFAULT_QUEUE_SIZE;
3659*4882a593Smuzhiyun 
3660*4882a593Smuzhiyun 	/*
3661*4882a593Smuzhiyun 	 * Avoid that the SCSI host can be removed by srp_remove_target()
3662*4882a593Smuzhiyun 	 * before this function returns.
3663*4882a593Smuzhiyun 	 */
3664*4882a593Smuzhiyun 	scsi_host_get(target->scsi_host);
3665*4882a593Smuzhiyun 
3666*4882a593Smuzhiyun 	ret = mutex_lock_interruptible(&host->add_target_mutex);
3667*4882a593Smuzhiyun 	if (ret < 0)
3668*4882a593Smuzhiyun 		goto put;
3669*4882a593Smuzhiyun 
3670*4882a593Smuzhiyun 	ret = srp_parse_options(target->net, buf, target);
3671*4882a593Smuzhiyun 	if (ret)
3672*4882a593Smuzhiyun 		goto out;
3673*4882a593Smuzhiyun 
3674*4882a593Smuzhiyun 	target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3675*4882a593Smuzhiyun 
3676*4882a593Smuzhiyun 	if (!srp_conn_unique(target->srp_host, target)) {
3677*4882a593Smuzhiyun 		if (target->using_rdma_cm) {
3678*4882a593Smuzhiyun 			shost_printk(KERN_INFO, target->scsi_host,
3679*4882a593Smuzhiyun 				     PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;dest=%pIS\n",
3680*4882a593Smuzhiyun 				     be64_to_cpu(target->id_ext),
3681*4882a593Smuzhiyun 				     be64_to_cpu(target->ioc_guid),
3682*4882a593Smuzhiyun 				     &target->rdma_cm.dst);
3683*4882a593Smuzhiyun 		} else {
3684*4882a593Smuzhiyun 			shost_printk(KERN_INFO, target->scsi_host,
3685*4882a593Smuzhiyun 				     PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3686*4882a593Smuzhiyun 				     be64_to_cpu(target->id_ext),
3687*4882a593Smuzhiyun 				     be64_to_cpu(target->ioc_guid),
3688*4882a593Smuzhiyun 				     be64_to_cpu(target->initiator_ext));
3689*4882a593Smuzhiyun 		}
3690*4882a593Smuzhiyun 		ret = -EEXIST;
3691*4882a593Smuzhiyun 		goto out;
3692*4882a593Smuzhiyun 	}
3693*4882a593Smuzhiyun 
3694*4882a593Smuzhiyun 	if (!srp_dev->has_fr && !target->allow_ext_sg &&
3695*4882a593Smuzhiyun 	    target->cmd_sg_cnt < target->sg_tablesize) {
3696*4882a593Smuzhiyun 		pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3697*4882a593Smuzhiyun 		target->sg_tablesize = target->cmd_sg_cnt;
3698*4882a593Smuzhiyun 	}
3699*4882a593Smuzhiyun 
3700*4882a593Smuzhiyun 	if (srp_dev->use_fast_reg) {
3701*4882a593Smuzhiyun 		bool gaps_reg = (ibdev->attrs.device_cap_flags &
3702*4882a593Smuzhiyun 				 IB_DEVICE_SG_GAPS_REG);
3703*4882a593Smuzhiyun 
3704*4882a593Smuzhiyun 		max_sectors_per_mr = srp_dev->max_pages_per_mr <<
3705*4882a593Smuzhiyun 				  (ilog2(srp_dev->mr_page_size) - 9);
3706*4882a593Smuzhiyun 		if (!gaps_reg) {
3707*4882a593Smuzhiyun 			/*
3708*4882a593Smuzhiyun 			 * FR can only map one HCA page per entry. If the start
3709*4882a593Smuzhiyun 			 * address is not aligned on a HCA page boundary two
3710*4882a593Smuzhiyun 			 * entries will be used for the head and the tail
3711*4882a593Smuzhiyun 			 * although these two entries combined contain at most
3712*4882a593Smuzhiyun 			 * one HCA page of data. Hence the "+ 1" in the
3713*4882a593Smuzhiyun 			 * calculation below.
3714*4882a593Smuzhiyun 			 *
3715*4882a593Smuzhiyun 			 * The indirect data buffer descriptor is contiguous
3716*4882a593Smuzhiyun 			 * so the memory for that buffer will only be
3717*4882a593Smuzhiyun 			 * registered if register_always is true. Hence add
3718*4882a593Smuzhiyun 			 * one to mr_per_cmd if register_always has been set.
3719*4882a593Smuzhiyun 			 */
3720*4882a593Smuzhiyun 			mr_per_cmd = register_always +
3721*4882a593Smuzhiyun 				(target->scsi_host->max_sectors + 1 +
3722*4882a593Smuzhiyun 				 max_sectors_per_mr - 1) / max_sectors_per_mr;
3723*4882a593Smuzhiyun 		} else {
3724*4882a593Smuzhiyun 			mr_per_cmd = register_always +
3725*4882a593Smuzhiyun 				(target->sg_tablesize +
3726*4882a593Smuzhiyun 				 srp_dev->max_pages_per_mr - 1) /
3727*4882a593Smuzhiyun 				srp_dev->max_pages_per_mr;
3728*4882a593Smuzhiyun 		}
3729*4882a593Smuzhiyun 		pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n",
3730*4882a593Smuzhiyun 			 target->scsi_host->max_sectors, srp_dev->max_pages_per_mr, srp_dev->mr_page_size,
3731*4882a593Smuzhiyun 			 max_sectors_per_mr, mr_per_cmd);
3732*4882a593Smuzhiyun 	}
3733*4882a593Smuzhiyun 
3734*4882a593Smuzhiyun 	target_host->sg_tablesize = target->sg_tablesize;
3735*4882a593Smuzhiyun 	target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd;
3736*4882a593Smuzhiyun 	target->mr_per_cmd = mr_per_cmd;
3737*4882a593Smuzhiyun 	target->indirect_size = target->sg_tablesize *
3738*4882a593Smuzhiyun 				sizeof (struct srp_direct_buf);
3739*4882a593Smuzhiyun 	max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
3740*4882a593Smuzhiyun 				       srp_use_imm_data,
3741*4882a593Smuzhiyun 				       target->max_it_iu_size);
3742*4882a593Smuzhiyun 
3743*4882a593Smuzhiyun 	INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3744*4882a593Smuzhiyun 	INIT_WORK(&target->remove_work, srp_remove_work);
3745*4882a593Smuzhiyun 	spin_lock_init(&target->lock);
3746*4882a593Smuzhiyun 	ret = rdma_query_gid(ibdev, host->port, 0, &target->sgid);
3747*4882a593Smuzhiyun 	if (ret)
3748*4882a593Smuzhiyun 		goto out;
3749*4882a593Smuzhiyun 
3750*4882a593Smuzhiyun 	ret = -ENOMEM;
3751*4882a593Smuzhiyun 	if (target->ch_count == 0) {
3752*4882a593Smuzhiyun 		target->ch_count =
3753*4882a593Smuzhiyun 			min(ch_count ?:
3754*4882a593Smuzhiyun 				max(4 * num_online_nodes(),
3755*4882a593Smuzhiyun 				    ibdev->num_comp_vectors),
3756*4882a593Smuzhiyun 				num_online_cpus());
3757*4882a593Smuzhiyun 	}
3758*4882a593Smuzhiyun 
3759*4882a593Smuzhiyun 	target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3760*4882a593Smuzhiyun 			     GFP_KERNEL);
3761*4882a593Smuzhiyun 	if (!target->ch)
3762*4882a593Smuzhiyun 		goto out;
3763*4882a593Smuzhiyun 
3764*4882a593Smuzhiyun 	for (ch_idx = 0; ch_idx < target->ch_count; ++ch_idx) {
3765*4882a593Smuzhiyun 		ch = &target->ch[ch_idx];
3766*4882a593Smuzhiyun 		ch->target = target;
3767*4882a593Smuzhiyun 		ch->comp_vector = ch_idx % ibdev->num_comp_vectors;
3768*4882a593Smuzhiyun 		spin_lock_init(&ch->lock);
3769*4882a593Smuzhiyun 		INIT_LIST_HEAD(&ch->free_tx);
3770*4882a593Smuzhiyun 		ret = srp_new_cm_id(ch);
3771*4882a593Smuzhiyun 		if (ret)
3772*4882a593Smuzhiyun 			goto err_disconnect;
3773*4882a593Smuzhiyun 
3774*4882a593Smuzhiyun 		ret = srp_create_ch_ib(ch);
3775*4882a593Smuzhiyun 		if (ret)
3776*4882a593Smuzhiyun 			goto err_disconnect;
3777*4882a593Smuzhiyun 
3778*4882a593Smuzhiyun 		ret = srp_alloc_req_data(ch);
3779*4882a593Smuzhiyun 		if (ret)
3780*4882a593Smuzhiyun 			goto err_disconnect;
3781*4882a593Smuzhiyun 
3782*4882a593Smuzhiyun 		ret = srp_connect_ch(ch, max_iu_len, multich);
3783*4882a593Smuzhiyun 		if (ret) {
3784*4882a593Smuzhiyun 			char dst[64];
3785*4882a593Smuzhiyun 
3786*4882a593Smuzhiyun 			if (target->using_rdma_cm)
3787*4882a593Smuzhiyun 				snprintf(dst, sizeof(dst), "%pIS",
3788*4882a593Smuzhiyun 					&target->rdma_cm.dst);
3789*4882a593Smuzhiyun 			else
3790*4882a593Smuzhiyun 				snprintf(dst, sizeof(dst), "%pI6",
3791*4882a593Smuzhiyun 					target->ib_cm.orig_dgid.raw);
3792*4882a593Smuzhiyun 			shost_printk(KERN_ERR, target->scsi_host,
3793*4882a593Smuzhiyun 				PFX "Connection %d/%d to %s failed\n",
3794*4882a593Smuzhiyun 				ch_idx,
3795*4882a593Smuzhiyun 				target->ch_count, dst);
3796*4882a593Smuzhiyun 			if (ch_idx == 0) {
3797*4882a593Smuzhiyun 				goto free_ch;
3798*4882a593Smuzhiyun 			} else {
3799*4882a593Smuzhiyun 				srp_free_ch_ib(target, ch);
3800*4882a593Smuzhiyun 				srp_free_req_data(target, ch);
3801*4882a593Smuzhiyun 				target->ch_count = ch - target->ch;
3802*4882a593Smuzhiyun 				goto connected;
3803*4882a593Smuzhiyun 			}
3804*4882a593Smuzhiyun 		}
3805*4882a593Smuzhiyun 		multich = true;
3806*4882a593Smuzhiyun 	}
3807*4882a593Smuzhiyun 
3808*4882a593Smuzhiyun connected:
3809*4882a593Smuzhiyun 	target->scsi_host->nr_hw_queues = target->ch_count;
3810*4882a593Smuzhiyun 
3811*4882a593Smuzhiyun 	ret = srp_add_target(host, target);
3812*4882a593Smuzhiyun 	if (ret)
3813*4882a593Smuzhiyun 		goto err_disconnect;
3814*4882a593Smuzhiyun 
3815*4882a593Smuzhiyun 	if (target->state != SRP_TARGET_REMOVED) {
3816*4882a593Smuzhiyun 		if (target->using_rdma_cm) {
3817*4882a593Smuzhiyun 			shost_printk(KERN_DEBUG, target->scsi_host, PFX
3818*4882a593Smuzhiyun 				     "new target: id_ext %016llx ioc_guid %016llx sgid %pI6 dest %pIS\n",
3819*4882a593Smuzhiyun 				     be64_to_cpu(target->id_ext),
3820*4882a593Smuzhiyun 				     be64_to_cpu(target->ioc_guid),
3821*4882a593Smuzhiyun 				     target->sgid.raw, &target->rdma_cm.dst);
3822*4882a593Smuzhiyun 		} else {
3823*4882a593Smuzhiyun 			shost_printk(KERN_DEBUG, target->scsi_host, PFX
3824*4882a593Smuzhiyun 				     "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3825*4882a593Smuzhiyun 				     be64_to_cpu(target->id_ext),
3826*4882a593Smuzhiyun 				     be64_to_cpu(target->ioc_guid),
3827*4882a593Smuzhiyun 				     be16_to_cpu(target->ib_cm.pkey),
3828*4882a593Smuzhiyun 				     be64_to_cpu(target->ib_cm.service_id),
3829*4882a593Smuzhiyun 				     target->sgid.raw,
3830*4882a593Smuzhiyun 				     target->ib_cm.orig_dgid.raw);
3831*4882a593Smuzhiyun 		}
3832*4882a593Smuzhiyun 	}
3833*4882a593Smuzhiyun 
3834*4882a593Smuzhiyun 	ret = count;
3835*4882a593Smuzhiyun 
3836*4882a593Smuzhiyun out:
3837*4882a593Smuzhiyun 	mutex_unlock(&host->add_target_mutex);
3838*4882a593Smuzhiyun 
3839*4882a593Smuzhiyun put:
3840*4882a593Smuzhiyun 	scsi_host_put(target->scsi_host);
3841*4882a593Smuzhiyun 	if (ret < 0) {
3842*4882a593Smuzhiyun 		/*
3843*4882a593Smuzhiyun 		 * If a call to srp_remove_target() has not been scheduled,
3844*4882a593Smuzhiyun 		 * drop the network namespace reference now that was obtained
3845*4882a593Smuzhiyun 		 * earlier in this function.
3846*4882a593Smuzhiyun 		 */
3847*4882a593Smuzhiyun 		if (target->state != SRP_TARGET_REMOVED)
3848*4882a593Smuzhiyun 			kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
3849*4882a593Smuzhiyun 		scsi_host_put(target->scsi_host);
3850*4882a593Smuzhiyun 	}
3851*4882a593Smuzhiyun 
3852*4882a593Smuzhiyun 	return ret;
3853*4882a593Smuzhiyun 
3854*4882a593Smuzhiyun err_disconnect:
3855*4882a593Smuzhiyun 	srp_disconnect_target(target);
3856*4882a593Smuzhiyun 
3857*4882a593Smuzhiyun free_ch:
3858*4882a593Smuzhiyun 	for (i = 0; i < target->ch_count; i++) {
3859*4882a593Smuzhiyun 		ch = &target->ch[i];
3860*4882a593Smuzhiyun 		srp_free_ch_ib(target, ch);
3861*4882a593Smuzhiyun 		srp_free_req_data(target, ch);
3862*4882a593Smuzhiyun 	}
3863*4882a593Smuzhiyun 
3864*4882a593Smuzhiyun 	kfree(target->ch);
3865*4882a593Smuzhiyun 	goto out;
3866*4882a593Smuzhiyun }
3867*4882a593Smuzhiyun 
3868*4882a593Smuzhiyun static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
3869*4882a593Smuzhiyun 
show_ibdev(struct device * dev,struct device_attribute * attr,char * buf)3870*4882a593Smuzhiyun static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3871*4882a593Smuzhiyun 			  char *buf)
3872*4882a593Smuzhiyun {
3873*4882a593Smuzhiyun 	struct srp_host *host = container_of(dev, struct srp_host, dev);
3874*4882a593Smuzhiyun 
3875*4882a593Smuzhiyun 	return sprintf(buf, "%s\n", dev_name(&host->srp_dev->dev->dev));
3876*4882a593Smuzhiyun }
3877*4882a593Smuzhiyun 
3878*4882a593Smuzhiyun static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
3879*4882a593Smuzhiyun 
show_port(struct device * dev,struct device_attribute * attr,char * buf)3880*4882a593Smuzhiyun static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3881*4882a593Smuzhiyun 			 char *buf)
3882*4882a593Smuzhiyun {
3883*4882a593Smuzhiyun 	struct srp_host *host = container_of(dev, struct srp_host, dev);
3884*4882a593Smuzhiyun 
3885*4882a593Smuzhiyun 	return sprintf(buf, "%d\n", host->port);
3886*4882a593Smuzhiyun }
3887*4882a593Smuzhiyun 
3888*4882a593Smuzhiyun static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
3889*4882a593Smuzhiyun 
srp_add_port(struct srp_device * device,u8 port)3890*4882a593Smuzhiyun static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
3891*4882a593Smuzhiyun {
3892*4882a593Smuzhiyun 	struct srp_host *host;
3893*4882a593Smuzhiyun 
3894*4882a593Smuzhiyun 	host = kzalloc(sizeof *host, GFP_KERNEL);
3895*4882a593Smuzhiyun 	if (!host)
3896*4882a593Smuzhiyun 		return NULL;
3897*4882a593Smuzhiyun 
3898*4882a593Smuzhiyun 	INIT_LIST_HEAD(&host->target_list);
3899*4882a593Smuzhiyun 	spin_lock_init(&host->target_lock);
3900*4882a593Smuzhiyun 	init_completion(&host->released);
3901*4882a593Smuzhiyun 	mutex_init(&host->add_target_mutex);
3902*4882a593Smuzhiyun 	host->srp_dev = device;
3903*4882a593Smuzhiyun 	host->port = port;
3904*4882a593Smuzhiyun 
3905*4882a593Smuzhiyun 	host->dev.class = &srp_class;
3906*4882a593Smuzhiyun 	host->dev.parent = device->dev->dev.parent;
3907*4882a593Smuzhiyun 	dev_set_name(&host->dev, "srp-%s-%d", dev_name(&device->dev->dev),
3908*4882a593Smuzhiyun 		     port);
3909*4882a593Smuzhiyun 
3910*4882a593Smuzhiyun 	if (device_register(&host->dev))
3911*4882a593Smuzhiyun 		goto free_host;
3912*4882a593Smuzhiyun 	if (device_create_file(&host->dev, &dev_attr_add_target))
3913*4882a593Smuzhiyun 		goto err_class;
3914*4882a593Smuzhiyun 	if (device_create_file(&host->dev, &dev_attr_ibdev))
3915*4882a593Smuzhiyun 		goto err_class;
3916*4882a593Smuzhiyun 	if (device_create_file(&host->dev, &dev_attr_port))
3917*4882a593Smuzhiyun 		goto err_class;
3918*4882a593Smuzhiyun 
3919*4882a593Smuzhiyun 	return host;
3920*4882a593Smuzhiyun 
3921*4882a593Smuzhiyun err_class:
3922*4882a593Smuzhiyun 	device_unregister(&host->dev);
3923*4882a593Smuzhiyun 
3924*4882a593Smuzhiyun free_host:
3925*4882a593Smuzhiyun 	kfree(host);
3926*4882a593Smuzhiyun 
3927*4882a593Smuzhiyun 	return NULL;
3928*4882a593Smuzhiyun }
3929*4882a593Smuzhiyun 
srp_rename_dev(struct ib_device * device,void * client_data)3930*4882a593Smuzhiyun static void srp_rename_dev(struct ib_device *device, void *client_data)
3931*4882a593Smuzhiyun {
3932*4882a593Smuzhiyun 	struct srp_device *srp_dev = client_data;
3933*4882a593Smuzhiyun 	struct srp_host *host, *tmp_host;
3934*4882a593Smuzhiyun 
3935*4882a593Smuzhiyun 	list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3936*4882a593Smuzhiyun 		char name[IB_DEVICE_NAME_MAX + 8];
3937*4882a593Smuzhiyun 
3938*4882a593Smuzhiyun 		snprintf(name, sizeof(name), "srp-%s-%d",
3939*4882a593Smuzhiyun 			 dev_name(&device->dev), host->port);
3940*4882a593Smuzhiyun 		device_rename(&host->dev, name);
3941*4882a593Smuzhiyun 	}
3942*4882a593Smuzhiyun }
3943*4882a593Smuzhiyun 
srp_add_one(struct ib_device * device)3944*4882a593Smuzhiyun static int srp_add_one(struct ib_device *device)
3945*4882a593Smuzhiyun {
3946*4882a593Smuzhiyun 	struct srp_device *srp_dev;
3947*4882a593Smuzhiyun 	struct ib_device_attr *attr = &device->attrs;
3948*4882a593Smuzhiyun 	struct srp_host *host;
3949*4882a593Smuzhiyun 	int mr_page_shift;
3950*4882a593Smuzhiyun 	unsigned int p;
3951*4882a593Smuzhiyun 	u64 max_pages_per_mr;
3952*4882a593Smuzhiyun 	unsigned int flags = 0;
3953*4882a593Smuzhiyun 
3954*4882a593Smuzhiyun 	srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
3955*4882a593Smuzhiyun 	if (!srp_dev)
3956*4882a593Smuzhiyun 		return -ENOMEM;
3957*4882a593Smuzhiyun 
3958*4882a593Smuzhiyun 	/*
3959*4882a593Smuzhiyun 	 * Use the smallest page size supported by the HCA, down to a
3960*4882a593Smuzhiyun 	 * minimum of 4096 bytes. We're unlikely to build large sglists
3961*4882a593Smuzhiyun 	 * out of smaller entries.
3962*4882a593Smuzhiyun 	 */
3963*4882a593Smuzhiyun 	mr_page_shift		= max(12, ffs(attr->page_size_cap) - 1);
3964*4882a593Smuzhiyun 	srp_dev->mr_page_size	= 1 << mr_page_shift;
3965*4882a593Smuzhiyun 	srp_dev->mr_page_mask	= ~((u64) srp_dev->mr_page_size - 1);
3966*4882a593Smuzhiyun 	max_pages_per_mr	= attr->max_mr_size;
3967*4882a593Smuzhiyun 	do_div(max_pages_per_mr, srp_dev->mr_page_size);
3968*4882a593Smuzhiyun 	pr_debug("%s: %llu / %u = %llu <> %u\n", __func__,
3969*4882a593Smuzhiyun 		 attr->max_mr_size, srp_dev->mr_page_size,
3970*4882a593Smuzhiyun 		 max_pages_per_mr, SRP_MAX_PAGES_PER_MR);
3971*4882a593Smuzhiyun 	srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3972*4882a593Smuzhiyun 					  max_pages_per_mr);
3973*4882a593Smuzhiyun 
3974*4882a593Smuzhiyun 	srp_dev->has_fr = (attr->device_cap_flags &
3975*4882a593Smuzhiyun 			   IB_DEVICE_MEM_MGT_EXTENSIONS);
3976*4882a593Smuzhiyun 	if (!never_register && !srp_dev->has_fr)
3977*4882a593Smuzhiyun 		dev_warn(&device->dev, "FR is not supported\n");
3978*4882a593Smuzhiyun 	else if (!never_register &&
3979*4882a593Smuzhiyun 		 attr->max_mr_size >= 2 * srp_dev->mr_page_size)
3980*4882a593Smuzhiyun 		srp_dev->use_fast_reg = srp_dev->has_fr;
3981*4882a593Smuzhiyun 
3982*4882a593Smuzhiyun 	if (never_register || !register_always || !srp_dev->has_fr)
3983*4882a593Smuzhiyun 		flags |= IB_PD_UNSAFE_GLOBAL_RKEY;
3984*4882a593Smuzhiyun 
3985*4882a593Smuzhiyun 	if (srp_dev->use_fast_reg) {
3986*4882a593Smuzhiyun 		srp_dev->max_pages_per_mr =
3987*4882a593Smuzhiyun 			min_t(u32, srp_dev->max_pages_per_mr,
3988*4882a593Smuzhiyun 			      attr->max_fast_reg_page_list_len);
3989*4882a593Smuzhiyun 	}
3990*4882a593Smuzhiyun 	srp_dev->mr_max_size	= srp_dev->mr_page_size *
3991*4882a593Smuzhiyun 				   srp_dev->max_pages_per_mr;
3992*4882a593Smuzhiyun 	pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
3993*4882a593Smuzhiyun 		 dev_name(&device->dev), mr_page_shift, attr->max_mr_size,
3994*4882a593Smuzhiyun 		 attr->max_fast_reg_page_list_len,
3995*4882a593Smuzhiyun 		 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
3996*4882a593Smuzhiyun 
3997*4882a593Smuzhiyun 	INIT_LIST_HEAD(&srp_dev->dev_list);
3998*4882a593Smuzhiyun 
3999*4882a593Smuzhiyun 	srp_dev->dev = device;
4000*4882a593Smuzhiyun 	srp_dev->pd  = ib_alloc_pd(device, flags);
4001*4882a593Smuzhiyun 	if (IS_ERR(srp_dev->pd)) {
4002*4882a593Smuzhiyun 		int ret = PTR_ERR(srp_dev->pd);
4003*4882a593Smuzhiyun 
4004*4882a593Smuzhiyun 		kfree(srp_dev);
4005*4882a593Smuzhiyun 		return ret;
4006*4882a593Smuzhiyun 	}
4007*4882a593Smuzhiyun 
4008*4882a593Smuzhiyun 	if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
4009*4882a593Smuzhiyun 		srp_dev->global_rkey = srp_dev->pd->unsafe_global_rkey;
4010*4882a593Smuzhiyun 		WARN_ON_ONCE(srp_dev->global_rkey == 0);
4011*4882a593Smuzhiyun 	}
4012*4882a593Smuzhiyun 
4013*4882a593Smuzhiyun 	rdma_for_each_port (device, p) {
4014*4882a593Smuzhiyun 		host = srp_add_port(srp_dev, p);
4015*4882a593Smuzhiyun 		if (host)
4016*4882a593Smuzhiyun 			list_add_tail(&host->list, &srp_dev->dev_list);
4017*4882a593Smuzhiyun 	}
4018*4882a593Smuzhiyun 
4019*4882a593Smuzhiyun 	ib_set_client_data(device, &srp_client, srp_dev);
4020*4882a593Smuzhiyun 	return 0;
4021*4882a593Smuzhiyun }
4022*4882a593Smuzhiyun 
srp_remove_one(struct ib_device * device,void * client_data)4023*4882a593Smuzhiyun static void srp_remove_one(struct ib_device *device, void *client_data)
4024*4882a593Smuzhiyun {
4025*4882a593Smuzhiyun 	struct srp_device *srp_dev;
4026*4882a593Smuzhiyun 	struct srp_host *host, *tmp_host;
4027*4882a593Smuzhiyun 	struct srp_target_port *target;
4028*4882a593Smuzhiyun 
4029*4882a593Smuzhiyun 	srp_dev = client_data;
4030*4882a593Smuzhiyun 
4031*4882a593Smuzhiyun 	list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
4032*4882a593Smuzhiyun 		device_unregister(&host->dev);
4033*4882a593Smuzhiyun 		/*
4034*4882a593Smuzhiyun 		 * Wait for the sysfs entry to go away, so that no new
4035*4882a593Smuzhiyun 		 * target ports can be created.
4036*4882a593Smuzhiyun 		 */
4037*4882a593Smuzhiyun 		wait_for_completion(&host->released);
4038*4882a593Smuzhiyun 
4039*4882a593Smuzhiyun 		/*
4040*4882a593Smuzhiyun 		 * Remove all target ports.
4041*4882a593Smuzhiyun 		 */
4042*4882a593Smuzhiyun 		spin_lock(&host->target_lock);
4043*4882a593Smuzhiyun 		list_for_each_entry(target, &host->target_list, list)
4044*4882a593Smuzhiyun 			srp_queue_remove_work(target);
4045*4882a593Smuzhiyun 		spin_unlock(&host->target_lock);
4046*4882a593Smuzhiyun 
4047*4882a593Smuzhiyun 		/*
4048*4882a593Smuzhiyun 		 * srp_queue_remove_work() queues a call to
4049*4882a593Smuzhiyun 		 * srp_remove_target(). The latter function cancels
4050*4882a593Smuzhiyun 		 * target->tl_err_work so waiting for the remove works to
4051*4882a593Smuzhiyun 		 * finish is sufficient.
4052*4882a593Smuzhiyun 		 */
4053*4882a593Smuzhiyun 		flush_workqueue(srp_remove_wq);
4054*4882a593Smuzhiyun 
4055*4882a593Smuzhiyun 		kfree(host);
4056*4882a593Smuzhiyun 	}
4057*4882a593Smuzhiyun 
4058*4882a593Smuzhiyun 	ib_dealloc_pd(srp_dev->pd);
4059*4882a593Smuzhiyun 
4060*4882a593Smuzhiyun 	kfree(srp_dev);
4061*4882a593Smuzhiyun }
4062*4882a593Smuzhiyun 
4063*4882a593Smuzhiyun static struct srp_function_template ib_srp_transport_functions = {
4064*4882a593Smuzhiyun 	.has_rport_state	 = true,
4065*4882a593Smuzhiyun 	.reset_timer_if_blocked	 = true,
4066*4882a593Smuzhiyun 	.reconnect_delay	 = &srp_reconnect_delay,
4067*4882a593Smuzhiyun 	.fast_io_fail_tmo	 = &srp_fast_io_fail_tmo,
4068*4882a593Smuzhiyun 	.dev_loss_tmo		 = &srp_dev_loss_tmo,
4069*4882a593Smuzhiyun 	.reconnect		 = srp_rport_reconnect,
4070*4882a593Smuzhiyun 	.rport_delete		 = srp_rport_delete,
4071*4882a593Smuzhiyun 	.terminate_rport_io	 = srp_terminate_io,
4072*4882a593Smuzhiyun };
4073*4882a593Smuzhiyun 
srp_init_module(void)4074*4882a593Smuzhiyun static int __init srp_init_module(void)
4075*4882a593Smuzhiyun {
4076*4882a593Smuzhiyun 	int ret;
4077*4882a593Smuzhiyun 
4078*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(struct srp_imm_buf) != 4);
4079*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(struct srp_login_req) != 64);
4080*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(struct srp_login_req_rdma) != 56);
4081*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(struct srp_cmd) != 48);
4082*4882a593Smuzhiyun 
4083*4882a593Smuzhiyun 	if (srp_sg_tablesize) {
4084*4882a593Smuzhiyun 		pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
4085*4882a593Smuzhiyun 		if (!cmd_sg_entries)
4086*4882a593Smuzhiyun 			cmd_sg_entries = srp_sg_tablesize;
4087*4882a593Smuzhiyun 	}
4088*4882a593Smuzhiyun 
4089*4882a593Smuzhiyun 	if (!cmd_sg_entries)
4090*4882a593Smuzhiyun 		cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
4091*4882a593Smuzhiyun 
4092*4882a593Smuzhiyun 	if (cmd_sg_entries > 255) {
4093*4882a593Smuzhiyun 		pr_warn("Clamping cmd_sg_entries to 255\n");
4094*4882a593Smuzhiyun 		cmd_sg_entries = 255;
4095*4882a593Smuzhiyun 	}
4096*4882a593Smuzhiyun 
4097*4882a593Smuzhiyun 	if (!indirect_sg_entries)
4098*4882a593Smuzhiyun 		indirect_sg_entries = cmd_sg_entries;
4099*4882a593Smuzhiyun 	else if (indirect_sg_entries < cmd_sg_entries) {
4100*4882a593Smuzhiyun 		pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
4101*4882a593Smuzhiyun 			cmd_sg_entries);
4102*4882a593Smuzhiyun 		indirect_sg_entries = cmd_sg_entries;
4103*4882a593Smuzhiyun 	}
4104*4882a593Smuzhiyun 
4105*4882a593Smuzhiyun 	if (indirect_sg_entries > SG_MAX_SEGMENTS) {
4106*4882a593Smuzhiyun 		pr_warn("Clamping indirect_sg_entries to %u\n",
4107*4882a593Smuzhiyun 			SG_MAX_SEGMENTS);
4108*4882a593Smuzhiyun 		indirect_sg_entries = SG_MAX_SEGMENTS;
4109*4882a593Smuzhiyun 	}
4110*4882a593Smuzhiyun 
4111*4882a593Smuzhiyun 	srp_remove_wq = create_workqueue("srp_remove");
4112*4882a593Smuzhiyun 	if (!srp_remove_wq) {
4113*4882a593Smuzhiyun 		ret = -ENOMEM;
4114*4882a593Smuzhiyun 		goto out;
4115*4882a593Smuzhiyun 	}
4116*4882a593Smuzhiyun 
4117*4882a593Smuzhiyun 	ret = -ENOMEM;
4118*4882a593Smuzhiyun 	ib_srp_transport_template =
4119*4882a593Smuzhiyun 		srp_attach_transport(&ib_srp_transport_functions);
4120*4882a593Smuzhiyun 	if (!ib_srp_transport_template)
4121*4882a593Smuzhiyun 		goto destroy_wq;
4122*4882a593Smuzhiyun 
4123*4882a593Smuzhiyun 	ret = class_register(&srp_class);
4124*4882a593Smuzhiyun 	if (ret) {
4125*4882a593Smuzhiyun 		pr_err("couldn't register class infiniband_srp\n");
4126*4882a593Smuzhiyun 		goto release_tr;
4127*4882a593Smuzhiyun 	}
4128*4882a593Smuzhiyun 
4129*4882a593Smuzhiyun 	ib_sa_register_client(&srp_sa_client);
4130*4882a593Smuzhiyun 
4131*4882a593Smuzhiyun 	ret = ib_register_client(&srp_client);
4132*4882a593Smuzhiyun 	if (ret) {
4133*4882a593Smuzhiyun 		pr_err("couldn't register IB client\n");
4134*4882a593Smuzhiyun 		goto unreg_sa;
4135*4882a593Smuzhiyun 	}
4136*4882a593Smuzhiyun 
4137*4882a593Smuzhiyun out:
4138*4882a593Smuzhiyun 	return ret;
4139*4882a593Smuzhiyun 
4140*4882a593Smuzhiyun unreg_sa:
4141*4882a593Smuzhiyun 	ib_sa_unregister_client(&srp_sa_client);
4142*4882a593Smuzhiyun 	class_unregister(&srp_class);
4143*4882a593Smuzhiyun 
4144*4882a593Smuzhiyun release_tr:
4145*4882a593Smuzhiyun 	srp_release_transport(ib_srp_transport_template);
4146*4882a593Smuzhiyun 
4147*4882a593Smuzhiyun destroy_wq:
4148*4882a593Smuzhiyun 	destroy_workqueue(srp_remove_wq);
4149*4882a593Smuzhiyun 	goto out;
4150*4882a593Smuzhiyun }
4151*4882a593Smuzhiyun 
srp_cleanup_module(void)4152*4882a593Smuzhiyun static void __exit srp_cleanup_module(void)
4153*4882a593Smuzhiyun {
4154*4882a593Smuzhiyun 	ib_unregister_client(&srp_client);
4155*4882a593Smuzhiyun 	ib_sa_unregister_client(&srp_sa_client);
4156*4882a593Smuzhiyun 	class_unregister(&srp_class);
4157*4882a593Smuzhiyun 	srp_release_transport(ib_srp_transport_template);
4158*4882a593Smuzhiyun 	destroy_workqueue(srp_remove_wq);
4159*4882a593Smuzhiyun }
4160*4882a593Smuzhiyun 
4161*4882a593Smuzhiyun module_init(srp_init_module);
4162*4882a593Smuzhiyun module_exit(srp_cleanup_module);
4163