xref: /OK3568_Linux_fs/kernel/drivers/infiniband/ulp/srp/ib_srp.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * This software is available to you under a choice of one of two
5*4882a593Smuzhiyun  * licenses.  You may choose to be licensed under the terms of the GNU
6*4882a593Smuzhiyun  * General Public License (GPL) Version 2, available from the file
7*4882a593Smuzhiyun  * COPYING in the main directory of this source tree, or the
8*4882a593Smuzhiyun  * OpenIB.org BSD license below:
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  *     Redistribution and use in source and binary forms, with or
11*4882a593Smuzhiyun  *     without modification, are permitted provided that the following
12*4882a593Smuzhiyun  *     conditions are met:
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  *      - Redistributions of source code must retain the above
15*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
16*4882a593Smuzhiyun  *        disclaimer.
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  *      - Redistributions in binary form must reproduce the above
19*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
20*4882a593Smuzhiyun  *        disclaimer in the documentation and/or other materials
21*4882a593Smuzhiyun  *        provided with the distribution.
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24*4882a593Smuzhiyun  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25*4882a593Smuzhiyun  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26*4882a593Smuzhiyun  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27*4882a593Smuzhiyun  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28*4882a593Smuzhiyun  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29*4882a593Smuzhiyun  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30*4882a593Smuzhiyun  * SOFTWARE.
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #ifndef IB_SRP_H
34*4882a593Smuzhiyun #define IB_SRP_H
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #include <linux/types.h>
37*4882a593Smuzhiyun #include <linux/list.h>
38*4882a593Smuzhiyun #include <linux/mutex.h>
39*4882a593Smuzhiyun #include <linux/scatterlist.h>
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun #include <scsi/scsi_host.h>
42*4882a593Smuzhiyun #include <scsi/scsi_cmnd.h>
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun #include <rdma/ib_verbs.h>
45*4882a593Smuzhiyun #include <rdma/ib_sa.h>
46*4882a593Smuzhiyun #include <rdma/ib_cm.h>
47*4882a593Smuzhiyun #include <rdma/rdma_cm.h>
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun enum {
50*4882a593Smuzhiyun 	SRP_PATH_REC_TIMEOUT_MS	= 1000,
51*4882a593Smuzhiyun 	SRP_ABORT_TIMEOUT_MS	= 5000,
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	SRP_PORT_REDIRECT	= 1,
54*4882a593Smuzhiyun 	SRP_DLID_REDIRECT	= 2,
55*4882a593Smuzhiyun 	SRP_STALE_CONN		= 3,
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	SRP_DEF_SG_TABLESIZE	= 12,
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	SRP_DEFAULT_QUEUE_SIZE	= 1 << 6,
60*4882a593Smuzhiyun 	SRP_RSP_SQ_SIZE		= 1,
61*4882a593Smuzhiyun 	SRP_TSK_MGMT_SQ_SIZE	= 1,
62*4882a593Smuzhiyun 	SRP_DEFAULT_CMD_SQ_SIZE = SRP_DEFAULT_QUEUE_SIZE - SRP_RSP_SQ_SIZE -
63*4882a593Smuzhiyun 				  SRP_TSK_MGMT_SQ_SIZE,
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	SRP_TAG_NO_REQ		= ~0U,
66*4882a593Smuzhiyun 	SRP_TAG_TSK_MGMT	= 1U << 31,
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	SRP_MAX_PAGES_PER_MR	= 512,
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	SRP_MAX_ADD_CDB_LEN	= 16,
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	SRP_MAX_IMM_SGE		= 2,
73*4882a593Smuzhiyun 	SRP_MAX_SGE		= SRP_MAX_IMM_SGE + 1,
74*4882a593Smuzhiyun 	/*
75*4882a593Smuzhiyun 	 * Choose the immediate data offset such that a 32 byte CDB still fits.
76*4882a593Smuzhiyun 	 */
77*4882a593Smuzhiyun 	SRP_IMM_DATA_OFFSET	= sizeof(struct srp_cmd) +
78*4882a593Smuzhiyun 				  SRP_MAX_ADD_CDB_LEN +
79*4882a593Smuzhiyun 				  sizeof(struct srp_imm_buf),
80*4882a593Smuzhiyun };
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun enum srp_target_state {
83*4882a593Smuzhiyun 	SRP_TARGET_SCANNING,
84*4882a593Smuzhiyun 	SRP_TARGET_LIVE,
85*4882a593Smuzhiyun 	SRP_TARGET_REMOVED,
86*4882a593Smuzhiyun };
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun enum srp_iu_type {
89*4882a593Smuzhiyun 	SRP_IU_CMD,
90*4882a593Smuzhiyun 	SRP_IU_TSK_MGMT,
91*4882a593Smuzhiyun 	SRP_IU_RSP,
92*4882a593Smuzhiyun };
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun /*
95*4882a593Smuzhiyun  * @mr_page_mask: HCA memory registration page mask.
96*4882a593Smuzhiyun  * @mr_page_size: HCA memory registration page size.
97*4882a593Smuzhiyun  * @mr_max_size: Maximum size in bytes of a single FR registration request.
98*4882a593Smuzhiyun  */
99*4882a593Smuzhiyun struct srp_device {
100*4882a593Smuzhiyun 	struct list_head	dev_list;
101*4882a593Smuzhiyun 	struct ib_device       *dev;
102*4882a593Smuzhiyun 	struct ib_pd	       *pd;
103*4882a593Smuzhiyun 	u32			global_rkey;
104*4882a593Smuzhiyun 	u64			mr_page_mask;
105*4882a593Smuzhiyun 	int			mr_page_size;
106*4882a593Smuzhiyun 	int			mr_max_size;
107*4882a593Smuzhiyun 	int			max_pages_per_mr;
108*4882a593Smuzhiyun 	bool			has_fr;
109*4882a593Smuzhiyun 	bool			use_fast_reg;
110*4882a593Smuzhiyun };
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun struct srp_host {
113*4882a593Smuzhiyun 	struct srp_device      *srp_dev;
114*4882a593Smuzhiyun 	u8			port;
115*4882a593Smuzhiyun 	struct device		dev;
116*4882a593Smuzhiyun 	struct list_head	target_list;
117*4882a593Smuzhiyun 	spinlock_t		target_lock;
118*4882a593Smuzhiyun 	struct completion	released;
119*4882a593Smuzhiyun 	struct list_head	list;
120*4882a593Smuzhiyun 	struct mutex		add_target_mutex;
121*4882a593Smuzhiyun };
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun struct srp_request {
124*4882a593Smuzhiyun 	struct scsi_cmnd       *scmnd;
125*4882a593Smuzhiyun 	struct srp_iu	       *cmd;
126*4882a593Smuzhiyun 	struct srp_fr_desc     **fr_list;
127*4882a593Smuzhiyun 	struct srp_direct_buf  *indirect_desc;
128*4882a593Smuzhiyun 	dma_addr_t		indirect_dma_addr;
129*4882a593Smuzhiyun 	short			nmdesc;
130*4882a593Smuzhiyun 	struct ib_cqe		reg_cqe;
131*4882a593Smuzhiyun };
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun /**
134*4882a593Smuzhiyun  * struct srp_rdma_ch
135*4882a593Smuzhiyun  * @comp_vector: Completion vector used by this RDMA channel.
136*4882a593Smuzhiyun  * @max_it_iu_len: Maximum initiator-to-target information unit length.
137*4882a593Smuzhiyun  * @max_ti_iu_len: Maximum target-to-initiator information unit length.
138*4882a593Smuzhiyun  */
139*4882a593Smuzhiyun struct srp_rdma_ch {
140*4882a593Smuzhiyun 	/* These are RW in the hot path, and commonly used together */
141*4882a593Smuzhiyun 	struct list_head	free_tx;
142*4882a593Smuzhiyun 	spinlock_t		lock;
143*4882a593Smuzhiyun 	s32			req_lim;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	/* These are read-only in the hot path */
146*4882a593Smuzhiyun 	struct srp_target_port *target ____cacheline_aligned_in_smp;
147*4882a593Smuzhiyun 	struct ib_cq	       *send_cq;
148*4882a593Smuzhiyun 	struct ib_cq	       *recv_cq;
149*4882a593Smuzhiyun 	struct ib_qp	       *qp;
150*4882a593Smuzhiyun 	struct srp_fr_pool     *fr_pool;
151*4882a593Smuzhiyun 	uint32_t		max_it_iu_len;
152*4882a593Smuzhiyun 	uint32_t		max_ti_iu_len;
153*4882a593Smuzhiyun 	u8			max_imm_sge;
154*4882a593Smuzhiyun 	bool			use_imm_data;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	/* Everything above this point is used in the hot path of
157*4882a593Smuzhiyun 	 * command processing. Try to keep them packed into cachelines.
158*4882a593Smuzhiyun 	 */
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	struct completion	done;
161*4882a593Smuzhiyun 	int			status;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	union {
164*4882a593Smuzhiyun 		struct ib_cm {
165*4882a593Smuzhiyun 			struct sa_path_rec	path;
166*4882a593Smuzhiyun 			struct ib_sa_query	*path_query;
167*4882a593Smuzhiyun 			int			path_query_id;
168*4882a593Smuzhiyun 			struct ib_cm_id		*cm_id;
169*4882a593Smuzhiyun 		} ib_cm;
170*4882a593Smuzhiyun 		struct rdma_cm {
171*4882a593Smuzhiyun 			struct rdma_cm_id	*cm_id;
172*4882a593Smuzhiyun 		} rdma_cm;
173*4882a593Smuzhiyun 	};
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	struct srp_iu	      **tx_ring;
176*4882a593Smuzhiyun 	struct srp_iu	      **rx_ring;
177*4882a593Smuzhiyun 	struct srp_request     *req_ring;
178*4882a593Smuzhiyun 	int			comp_vector;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	u64			tsk_mgmt_tag;
181*4882a593Smuzhiyun 	struct completion	tsk_mgmt_done;
182*4882a593Smuzhiyun 	u8			tsk_mgmt_status;
183*4882a593Smuzhiyun 	bool			connected;
184*4882a593Smuzhiyun };
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun /**
187*4882a593Smuzhiyun  * struct srp_target_port
188*4882a593Smuzhiyun  * @comp_vector: Completion vector used by the first RDMA channel created for
189*4882a593Smuzhiyun  *   this target port.
190*4882a593Smuzhiyun  */
191*4882a593Smuzhiyun struct srp_target_port {
192*4882a593Smuzhiyun 	/* read and written in the hot path */
193*4882a593Smuzhiyun 	spinlock_t		lock;
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	/* read only in the hot path */
196*4882a593Smuzhiyun 	u32			global_rkey;
197*4882a593Smuzhiyun 	struct srp_rdma_ch	*ch;
198*4882a593Smuzhiyun 	struct net		*net;
199*4882a593Smuzhiyun 	u32			ch_count;
200*4882a593Smuzhiyun 	u32			lkey;
201*4882a593Smuzhiyun 	enum srp_target_state	state;
202*4882a593Smuzhiyun 	uint32_t		max_it_iu_size;
203*4882a593Smuzhiyun 	unsigned int		cmd_sg_cnt;
204*4882a593Smuzhiyun 	unsigned int		indirect_size;
205*4882a593Smuzhiyun 	bool			allow_ext_sg;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	/* other member variables */
208*4882a593Smuzhiyun 	union ib_gid		sgid;
209*4882a593Smuzhiyun 	__be64			id_ext;
210*4882a593Smuzhiyun 	__be64			ioc_guid;
211*4882a593Smuzhiyun 	__be64			initiator_ext;
212*4882a593Smuzhiyun 	u16			io_class;
213*4882a593Smuzhiyun 	struct srp_host	       *srp_host;
214*4882a593Smuzhiyun 	struct Scsi_Host       *scsi_host;
215*4882a593Smuzhiyun 	struct srp_rport       *rport;
216*4882a593Smuzhiyun 	char			target_name[32];
217*4882a593Smuzhiyun 	unsigned int		scsi_id;
218*4882a593Smuzhiyun 	unsigned int		sg_tablesize;
219*4882a593Smuzhiyun 	unsigned int		target_can_queue;
220*4882a593Smuzhiyun 	int			mr_pool_size;
221*4882a593Smuzhiyun 	int			mr_per_cmd;
222*4882a593Smuzhiyun 	int			queue_size;
223*4882a593Smuzhiyun 	int			req_ring_size;
224*4882a593Smuzhiyun 	int			comp_vector;
225*4882a593Smuzhiyun 	int			tl_retry_count;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	bool			using_rdma_cm;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	union {
230*4882a593Smuzhiyun 		struct {
231*4882a593Smuzhiyun 			__be64			service_id;
232*4882a593Smuzhiyun 			union ib_gid		orig_dgid;
233*4882a593Smuzhiyun 			__be16			pkey;
234*4882a593Smuzhiyun 		} ib_cm;
235*4882a593Smuzhiyun 		struct {
236*4882a593Smuzhiyun 			union {
237*4882a593Smuzhiyun 				struct sockaddr_in	ip4;
238*4882a593Smuzhiyun 				struct sockaddr_in6	ip6;
239*4882a593Smuzhiyun 				struct sockaddr		sa;
240*4882a593Smuzhiyun 				struct sockaddr_storage ss;
241*4882a593Smuzhiyun 			} src;
242*4882a593Smuzhiyun 			union {
243*4882a593Smuzhiyun 				struct sockaddr_in	ip4;
244*4882a593Smuzhiyun 				struct sockaddr_in6	ip6;
245*4882a593Smuzhiyun 				struct sockaddr		sa;
246*4882a593Smuzhiyun 				struct sockaddr_storage ss;
247*4882a593Smuzhiyun 			} dst;
248*4882a593Smuzhiyun 			bool src_specified;
249*4882a593Smuzhiyun 		} rdma_cm;
250*4882a593Smuzhiyun 	};
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	u32			rq_tmo_jiffies;
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	int			zero_req_lim;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	struct work_struct	tl_err_work;
257*4882a593Smuzhiyun 	struct work_struct	remove_work;
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	struct list_head	list;
260*4882a593Smuzhiyun 	bool			qp_in_error;
261*4882a593Smuzhiyun };
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun struct srp_iu {
264*4882a593Smuzhiyun 	struct list_head	list;
265*4882a593Smuzhiyun 	u64			dma;
266*4882a593Smuzhiyun 	void		       *buf;
267*4882a593Smuzhiyun 	size_t			size;
268*4882a593Smuzhiyun 	enum dma_data_direction	direction;
269*4882a593Smuzhiyun 	u32			num_sge;
270*4882a593Smuzhiyun 	struct ib_sge		sge[SRP_MAX_SGE];
271*4882a593Smuzhiyun 	struct ib_cqe		cqe;
272*4882a593Smuzhiyun };
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun /**
275*4882a593Smuzhiyun  * struct srp_fr_desc - fast registration work request arguments
276*4882a593Smuzhiyun  * @entry: Entry in srp_fr_pool.free_list.
277*4882a593Smuzhiyun  * @mr:    Memory region.
278*4882a593Smuzhiyun  * @frpl:  Fast registration page list.
279*4882a593Smuzhiyun  */
280*4882a593Smuzhiyun struct srp_fr_desc {
281*4882a593Smuzhiyun 	struct list_head		entry;
282*4882a593Smuzhiyun 	struct ib_mr			*mr;
283*4882a593Smuzhiyun };
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun /**
286*4882a593Smuzhiyun  * struct srp_fr_pool - pool of fast registration descriptors
287*4882a593Smuzhiyun  *
288*4882a593Smuzhiyun  * An entry is available for allocation if and only if it occurs in @free_list.
289*4882a593Smuzhiyun  *
290*4882a593Smuzhiyun  * @size:      Number of descriptors in this pool.
291*4882a593Smuzhiyun  * @max_page_list_len: Maximum fast registration work request page list length.
292*4882a593Smuzhiyun  * @lock:      Protects free_list.
293*4882a593Smuzhiyun  * @free_list: List of free descriptors.
294*4882a593Smuzhiyun  * @desc:      Fast registration descriptor pool.
295*4882a593Smuzhiyun  */
296*4882a593Smuzhiyun struct srp_fr_pool {
297*4882a593Smuzhiyun 	int			size;
298*4882a593Smuzhiyun 	int			max_page_list_len;
299*4882a593Smuzhiyun 	spinlock_t		lock;
300*4882a593Smuzhiyun 	struct list_head	free_list;
301*4882a593Smuzhiyun 	struct srp_fr_desc	desc[];
302*4882a593Smuzhiyun };
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun /**
305*4882a593Smuzhiyun  * struct srp_map_state - per-request DMA memory mapping state
306*4882a593Smuzhiyun  * @desc:	    Pointer to the element of the SRP buffer descriptor array
307*4882a593Smuzhiyun  *		    that is being filled in.
308*4882a593Smuzhiyun  * @pages:	    Array with DMA addresses of pages being considered for
309*4882a593Smuzhiyun  *		    memory registration.
310*4882a593Smuzhiyun  * @base_dma_addr:  DMA address of the first page that has not yet been mapped.
311*4882a593Smuzhiyun  * @dma_len:	    Number of bytes that will be registered with the next FR
312*4882a593Smuzhiyun  *                  memory registration call.
313*4882a593Smuzhiyun  * @total_len:	    Total number of bytes in the sg-list being mapped.
314*4882a593Smuzhiyun  * @npages:	    Number of page addresses in the pages[] array.
315*4882a593Smuzhiyun  * @nmdesc:	    Number of FR memory descriptors used for mapping.
316*4882a593Smuzhiyun  * @ndesc:	    Number of SRP buffer descriptors that have been filled in.
317*4882a593Smuzhiyun  */
318*4882a593Smuzhiyun struct srp_map_state {
319*4882a593Smuzhiyun 	union {
320*4882a593Smuzhiyun 		struct {
321*4882a593Smuzhiyun 			struct srp_fr_desc **next;
322*4882a593Smuzhiyun 			struct srp_fr_desc **end;
323*4882a593Smuzhiyun 		} fr;
324*4882a593Smuzhiyun 		struct {
325*4882a593Smuzhiyun 			void		   **next;
326*4882a593Smuzhiyun 			void		   **end;
327*4882a593Smuzhiyun 		} gen;
328*4882a593Smuzhiyun 	};
329*4882a593Smuzhiyun 	struct srp_direct_buf  *desc;
330*4882a593Smuzhiyun 	union {
331*4882a593Smuzhiyun 		u64			*pages;
332*4882a593Smuzhiyun 		struct scatterlist	*sg;
333*4882a593Smuzhiyun 	};
334*4882a593Smuzhiyun 	dma_addr_t		base_dma_addr;
335*4882a593Smuzhiyun 	u32			dma_len;
336*4882a593Smuzhiyun 	u32			total_len;
337*4882a593Smuzhiyun 	unsigned int		npages;
338*4882a593Smuzhiyun 	unsigned int		nmdesc;
339*4882a593Smuzhiyun 	unsigned int		ndesc;
340*4882a593Smuzhiyun };
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun #endif /* IB_SRP_H */
343