1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * iSER transport for the Open iSCSI Initiator & iSER transport internals
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2004 Dmitry Yusupov
5*4882a593Smuzhiyun * Copyright (C) 2004 Alex Aizman
6*4882a593Smuzhiyun * Copyright (C) 2005 Mike Christie
7*4882a593Smuzhiyun * based on code maintained by open-iscsi@googlegroups.com
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
10*4882a593Smuzhiyun * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
11*4882a593Smuzhiyun * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * This software is available to you under a choice of one of two
14*4882a593Smuzhiyun * licenses. You may choose to be licensed under the terms of the GNU
15*4882a593Smuzhiyun * General Public License (GPL) Version 2, available from the file
16*4882a593Smuzhiyun * COPYING in the main directory of this source tree, or the
17*4882a593Smuzhiyun * OpenIB.org BSD license below:
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or
20*4882a593Smuzhiyun * without modification, are permitted provided that the following
21*4882a593Smuzhiyun * conditions are met:
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * - Redistributions of source code must retain the above
24*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
25*4882a593Smuzhiyun * disclaimer.
26*4882a593Smuzhiyun *
27*4882a593Smuzhiyun * - Redistributions in binary form must reproduce the above
28*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
29*4882a593Smuzhiyun * disclaimer in the documentation and/or other materials
30*4882a593Smuzhiyun * provided with the distribution.
31*4882a593Smuzhiyun *
32*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
33*4882a593Smuzhiyun * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
34*4882a593Smuzhiyun * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
35*4882a593Smuzhiyun * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
36*4882a593Smuzhiyun * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
37*4882a593Smuzhiyun * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
38*4882a593Smuzhiyun * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
39*4882a593Smuzhiyun * SOFTWARE.
40*4882a593Smuzhiyun */
41*4882a593Smuzhiyun #ifndef __ISCSI_ISER_H__
42*4882a593Smuzhiyun #define __ISCSI_ISER_H__
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun #include <linux/types.h>
45*4882a593Smuzhiyun #include <linux/net.h>
46*4882a593Smuzhiyun #include <linux/printk.h>
47*4882a593Smuzhiyun #include <scsi/libiscsi.h>
48*4882a593Smuzhiyun #include <scsi/scsi_transport_iscsi.h>
49*4882a593Smuzhiyun #include <scsi/scsi_cmnd.h>
50*4882a593Smuzhiyun #include <scsi/scsi_device.h>
51*4882a593Smuzhiyun #include <scsi/iser.h>
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun #include <linux/interrupt.h>
54*4882a593Smuzhiyun #include <linux/wait.h>
55*4882a593Smuzhiyun #include <linux/sched.h>
56*4882a593Smuzhiyun #include <linux/list.h>
57*4882a593Smuzhiyun #include <linux/slab.h>
58*4882a593Smuzhiyun #include <linux/dma-mapping.h>
59*4882a593Smuzhiyun #include <linux/mutex.h>
60*4882a593Smuzhiyun #include <linux/mempool.h>
61*4882a593Smuzhiyun #include <linux/uio.h>
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun #include <linux/socket.h>
64*4882a593Smuzhiyun #include <linux/in.h>
65*4882a593Smuzhiyun #include <linux/in6.h>
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun #include <rdma/ib_verbs.h>
68*4882a593Smuzhiyun #include <rdma/rdma_cm.h>
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun #define DRV_NAME "iser"
71*4882a593Smuzhiyun #define PFX DRV_NAME ": "
72*4882a593Smuzhiyun #define DRV_VER "1.6"
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun #define iser_dbg(fmt, arg...) \
75*4882a593Smuzhiyun do { \
76*4882a593Smuzhiyun if (unlikely(iser_debug_level > 2)) \
77*4882a593Smuzhiyun printk(KERN_DEBUG PFX "%s: " fmt,\
78*4882a593Smuzhiyun __func__ , ## arg); \
79*4882a593Smuzhiyun } while (0)
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun #define iser_warn(fmt, arg...) \
82*4882a593Smuzhiyun do { \
83*4882a593Smuzhiyun if (unlikely(iser_debug_level > 0)) \
84*4882a593Smuzhiyun pr_warn(PFX "%s: " fmt, \
85*4882a593Smuzhiyun __func__ , ## arg); \
86*4882a593Smuzhiyun } while (0)
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun #define iser_info(fmt, arg...) \
89*4882a593Smuzhiyun do { \
90*4882a593Smuzhiyun if (unlikely(iser_debug_level > 1)) \
91*4882a593Smuzhiyun pr_info(PFX "%s: " fmt, \
92*4882a593Smuzhiyun __func__ , ## arg); \
93*4882a593Smuzhiyun } while (0)
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun #define iser_err(fmt, arg...) \
96*4882a593Smuzhiyun pr_err(PFX "%s: " fmt, __func__ , ## arg)
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /* Default support is 512KB I/O size */
99*4882a593Smuzhiyun #define ISER_DEF_MAX_SECTORS 1024
100*4882a593Smuzhiyun #define ISCSI_ISER_DEF_SG_TABLESIZE \
101*4882a593Smuzhiyun ((ISER_DEF_MAX_SECTORS * SECTOR_SIZE) >> ilog2(SZ_4K))
102*4882a593Smuzhiyun /* Maximum support is 16MB I/O size */
103*4882a593Smuzhiyun #define ISCSI_ISER_MAX_SG_TABLESIZE ((32768 * SECTOR_SIZE) >> ilog2(SZ_4K))
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun #define ISER_DEF_XMIT_CMDS_DEFAULT 512
106*4882a593Smuzhiyun #if ISCSI_DEF_XMIT_CMDS_MAX > ISER_DEF_XMIT_CMDS_DEFAULT
107*4882a593Smuzhiyun #define ISER_DEF_XMIT_CMDS_MAX ISCSI_DEF_XMIT_CMDS_MAX
108*4882a593Smuzhiyun #else
109*4882a593Smuzhiyun #define ISER_DEF_XMIT_CMDS_MAX ISER_DEF_XMIT_CMDS_DEFAULT
110*4882a593Smuzhiyun #endif
111*4882a593Smuzhiyun #define ISER_DEF_CMD_PER_LUN ISER_DEF_XMIT_CMDS_MAX
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun /* QP settings */
114*4882a593Smuzhiyun /* Maximal bounds on received asynchronous PDUs */
115*4882a593Smuzhiyun #define ISER_MAX_RX_MISC_PDUS 4 /* NOOP_IN(2) , ASYNC_EVENT(2) */
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun #define ISER_MAX_TX_MISC_PDUS 6 /* NOOP_OUT(2), TEXT(1), *
118*4882a593Smuzhiyun * SCSI_TMFUNC(2), LOGOUT(1) */
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun #define ISER_QP_MAX_RECV_DTOS (ISER_DEF_XMIT_CMDS_MAX)
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun #define ISER_MIN_POSTED_RX (ISER_DEF_XMIT_CMDS_MAX >> 2)
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /* the max TX (send) WR supported by the iSER QP is defined by *
125*4882a593Smuzhiyun * max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect *
126*4882a593Smuzhiyun * to have at max for SCSI command. The tx posting & completion handling code *
127*4882a593Smuzhiyun * supports -EAGAIN scheme where tx is suspended till the QP has room for more *
128*4882a593Smuzhiyun * send WR. D=8 comes from 64K/8K */
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun #define ISER_INFLIGHT_DATAOUTS 8
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun #define ISER_QP_MAX_REQ_DTOS (ISER_DEF_XMIT_CMDS_MAX * \
133*4882a593Smuzhiyun (1 + ISER_INFLIGHT_DATAOUTS) + \
134*4882a593Smuzhiyun ISER_MAX_TX_MISC_PDUS + \
135*4882a593Smuzhiyun ISER_MAX_RX_MISC_PDUS)
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun /* Max registration work requests per command */
138*4882a593Smuzhiyun #define ISER_MAX_REG_WR_PER_CMD 5
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun /* For Signature we don't support DATAOUTs so no need to make room for them */
141*4882a593Smuzhiyun #define ISER_QP_SIG_MAX_REQ_DTOS (ISER_DEF_XMIT_CMDS_MAX * \
142*4882a593Smuzhiyun (1 + ISER_MAX_REG_WR_PER_CMD) + \
143*4882a593Smuzhiyun ISER_MAX_TX_MISC_PDUS + \
144*4882a593Smuzhiyun ISER_MAX_RX_MISC_PDUS)
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun #define ISER_GET_MAX_XMIT_CMDS(send_wr) ((send_wr \
147*4882a593Smuzhiyun - ISER_MAX_TX_MISC_PDUS \
148*4882a593Smuzhiyun - ISER_MAX_RX_MISC_PDUS) / \
149*4882a593Smuzhiyun (1 + ISER_INFLIGHT_DATAOUTS))
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun #define ISER_SIGNAL_CMD_COUNT 32
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun /* Constant PDU lengths calculations */
154*4882a593Smuzhiyun #define ISER_HEADERS_LEN (sizeof(struct iser_ctrl) + sizeof(struct iscsi_hdr))
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun #define ISER_RECV_DATA_SEG_LEN 128
157*4882a593Smuzhiyun #define ISER_RX_PAYLOAD_SIZE (ISER_HEADERS_LEN + ISER_RECV_DATA_SEG_LEN)
158*4882a593Smuzhiyun #define ISER_RX_LOGIN_SIZE (ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN)
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun /* Length of an object name string */
161*4882a593Smuzhiyun #define ISER_OBJECT_NAME_SIZE 64
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun enum iser_conn_state {
164*4882a593Smuzhiyun ISER_CONN_INIT, /* descriptor allocd, no conn */
165*4882a593Smuzhiyun ISER_CONN_PENDING, /* in the process of being established */
166*4882a593Smuzhiyun ISER_CONN_UP, /* up and running */
167*4882a593Smuzhiyun ISER_CONN_TERMINATING, /* in the process of being terminated */
168*4882a593Smuzhiyun ISER_CONN_DOWN, /* shut down */
169*4882a593Smuzhiyun ISER_CONN_STATES_NUM
170*4882a593Smuzhiyun };
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun enum iser_task_status {
173*4882a593Smuzhiyun ISER_TASK_STATUS_INIT = 0,
174*4882a593Smuzhiyun ISER_TASK_STATUS_STARTED,
175*4882a593Smuzhiyun ISER_TASK_STATUS_COMPLETED
176*4882a593Smuzhiyun };
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun enum iser_data_dir {
179*4882a593Smuzhiyun ISER_DIR_IN = 0, /* to initiator */
180*4882a593Smuzhiyun ISER_DIR_OUT, /* from initiator */
181*4882a593Smuzhiyun ISER_DIRS_NUM
182*4882a593Smuzhiyun };
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun /**
185*4882a593Smuzhiyun * struct iser_data_buf - iSER data buffer
186*4882a593Smuzhiyun *
187*4882a593Smuzhiyun * @sg: pointer to the sg list
188*4882a593Smuzhiyun * @size: num entries of this sg
189*4882a593Smuzhiyun * @data_len: total beffer byte len
190*4882a593Smuzhiyun * @dma_nents: returned by dma_map_sg
191*4882a593Smuzhiyun */
192*4882a593Smuzhiyun struct iser_data_buf {
193*4882a593Smuzhiyun struct scatterlist *sg;
194*4882a593Smuzhiyun int size;
195*4882a593Smuzhiyun unsigned long data_len;
196*4882a593Smuzhiyun int dma_nents;
197*4882a593Smuzhiyun };
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun /* fwd declarations */
200*4882a593Smuzhiyun struct iser_device;
201*4882a593Smuzhiyun struct iscsi_iser_task;
202*4882a593Smuzhiyun struct iscsi_endpoint;
203*4882a593Smuzhiyun struct iser_reg_resources;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun /**
206*4882a593Smuzhiyun * struct iser_mem_reg - iSER memory registration info
207*4882a593Smuzhiyun *
208*4882a593Smuzhiyun * @sge: memory region sg element
209*4882a593Smuzhiyun * @rkey: memory region remote key
210*4882a593Smuzhiyun * @mem_h: pointer to registration context (FMR/Fastreg)
211*4882a593Smuzhiyun */
212*4882a593Smuzhiyun struct iser_mem_reg {
213*4882a593Smuzhiyun struct ib_sge sge;
214*4882a593Smuzhiyun u32 rkey;
215*4882a593Smuzhiyun void *mem_h;
216*4882a593Smuzhiyun };
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun enum iser_desc_type {
219*4882a593Smuzhiyun ISCSI_TX_CONTROL ,
220*4882a593Smuzhiyun ISCSI_TX_SCSI_COMMAND,
221*4882a593Smuzhiyun ISCSI_TX_DATAOUT
222*4882a593Smuzhiyun };
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun /**
225*4882a593Smuzhiyun * struct iser_tx_desc - iSER TX descriptor
226*4882a593Smuzhiyun *
227*4882a593Smuzhiyun * @iser_header: iser header
228*4882a593Smuzhiyun * @iscsi_header: iscsi header
229*4882a593Smuzhiyun * @type: command/control/dataout
230*4882a593Smuzhiyun * @dma_addr: header buffer dma_address
231*4882a593Smuzhiyun * @tx_sg: sg[0] points to iser/iscsi headers
232*4882a593Smuzhiyun * sg[1] optionally points to either of immediate data
233*4882a593Smuzhiyun * unsolicited data-out or control
234*4882a593Smuzhiyun * @num_sge: number sges used on this TX task
235*4882a593Smuzhiyun * @cqe: completion handler
236*4882a593Smuzhiyun * @mapped: Is the task header mapped
237*4882a593Smuzhiyun * @reg_wr: registration WR
238*4882a593Smuzhiyun * @send_wr: send WR
239*4882a593Smuzhiyun * @inv_wr: invalidate WR
240*4882a593Smuzhiyun */
241*4882a593Smuzhiyun struct iser_tx_desc {
242*4882a593Smuzhiyun struct iser_ctrl iser_header;
243*4882a593Smuzhiyun struct iscsi_hdr iscsi_header;
244*4882a593Smuzhiyun enum iser_desc_type type;
245*4882a593Smuzhiyun u64 dma_addr;
246*4882a593Smuzhiyun struct ib_sge tx_sg[2];
247*4882a593Smuzhiyun int num_sge;
248*4882a593Smuzhiyun struct ib_cqe cqe;
249*4882a593Smuzhiyun bool mapped;
250*4882a593Smuzhiyun struct ib_reg_wr reg_wr;
251*4882a593Smuzhiyun struct ib_send_wr send_wr;
252*4882a593Smuzhiyun struct ib_send_wr inv_wr;
253*4882a593Smuzhiyun };
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun #define ISER_RX_PAD_SIZE (256 - (ISER_RX_PAYLOAD_SIZE + \
256*4882a593Smuzhiyun sizeof(u64) + sizeof(struct ib_sge) + \
257*4882a593Smuzhiyun sizeof(struct ib_cqe)))
258*4882a593Smuzhiyun /**
259*4882a593Smuzhiyun * struct iser_rx_desc - iSER RX descriptor
260*4882a593Smuzhiyun *
261*4882a593Smuzhiyun * @iser_header: iser header
262*4882a593Smuzhiyun * @iscsi_header: iscsi header
263*4882a593Smuzhiyun * @data: received data segment
264*4882a593Smuzhiyun * @dma_addr: receive buffer dma address
265*4882a593Smuzhiyun * @rx_sg: ib_sge of receive buffer
266*4882a593Smuzhiyun * @cqe: completion handler
267*4882a593Smuzhiyun * @pad: for sense data TODO: Modify to maximum sense length supported
268*4882a593Smuzhiyun */
269*4882a593Smuzhiyun struct iser_rx_desc {
270*4882a593Smuzhiyun struct iser_ctrl iser_header;
271*4882a593Smuzhiyun struct iscsi_hdr iscsi_header;
272*4882a593Smuzhiyun char data[ISER_RECV_DATA_SEG_LEN];
273*4882a593Smuzhiyun u64 dma_addr;
274*4882a593Smuzhiyun struct ib_sge rx_sg;
275*4882a593Smuzhiyun struct ib_cqe cqe;
276*4882a593Smuzhiyun char pad[ISER_RX_PAD_SIZE];
277*4882a593Smuzhiyun } __packed;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun /**
280*4882a593Smuzhiyun * struct iser_login_desc - iSER login descriptor
281*4882a593Smuzhiyun *
282*4882a593Smuzhiyun * @req: pointer to login request buffer
283*4882a593Smuzhiyun * @rsp: pointer to login response buffer
284*4882a593Smuzhiyun * @req_dma: DMA address of login request buffer
285*4882a593Smuzhiyun * @rsp_dma: DMA address of login response buffer
286*4882a593Smuzhiyun * @sge: IB sge for login post recv
287*4882a593Smuzhiyun * @cqe: completion handler
288*4882a593Smuzhiyun */
289*4882a593Smuzhiyun struct iser_login_desc {
290*4882a593Smuzhiyun void *req;
291*4882a593Smuzhiyun void *rsp;
292*4882a593Smuzhiyun u64 req_dma;
293*4882a593Smuzhiyun u64 rsp_dma;
294*4882a593Smuzhiyun struct ib_sge sge;
295*4882a593Smuzhiyun struct ib_cqe cqe;
296*4882a593Smuzhiyun } __packed;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun struct iser_conn;
299*4882a593Smuzhiyun struct ib_conn;
300*4882a593Smuzhiyun struct iscsi_iser_task;
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun /**
303*4882a593Smuzhiyun * struct iser_device - iSER device handle
304*4882a593Smuzhiyun *
305*4882a593Smuzhiyun * @ib_device: RDMA device
306*4882a593Smuzhiyun * @pd: Protection Domain for this device
307*4882a593Smuzhiyun * @mr: Global DMA memory region
308*4882a593Smuzhiyun * @event_handler: IB events handle routine
309*4882a593Smuzhiyun * @ig_list: entry in devices list
310*4882a593Smuzhiyun * @refcount: Reference counter, dominated by open iser connections
311*4882a593Smuzhiyun */
312*4882a593Smuzhiyun struct iser_device {
313*4882a593Smuzhiyun struct ib_device *ib_device;
314*4882a593Smuzhiyun struct ib_pd *pd;
315*4882a593Smuzhiyun struct ib_event_handler event_handler;
316*4882a593Smuzhiyun struct list_head ig_list;
317*4882a593Smuzhiyun int refcount;
318*4882a593Smuzhiyun };
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun /**
321*4882a593Smuzhiyun * struct iser_reg_resources - Fast registration resources
322*4882a593Smuzhiyun *
323*4882a593Smuzhiyun * @mr: memory region
324*4882a593Smuzhiyun * @sig_mr: signature memory region
325*4882a593Smuzhiyun * @mr_valid: is mr valid indicator
326*4882a593Smuzhiyun */
327*4882a593Smuzhiyun struct iser_reg_resources {
328*4882a593Smuzhiyun struct ib_mr *mr;
329*4882a593Smuzhiyun struct ib_mr *sig_mr;
330*4882a593Smuzhiyun u8 mr_valid:1;
331*4882a593Smuzhiyun };
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun /**
334*4882a593Smuzhiyun * struct iser_fr_desc - Fast registration descriptor
335*4882a593Smuzhiyun *
336*4882a593Smuzhiyun * @list: entry in connection fastreg pool
337*4882a593Smuzhiyun * @rsc: data buffer registration resources
338*4882a593Smuzhiyun * @sig_protected: is region protected indicator
339*4882a593Smuzhiyun * @all_list: first and last list members
340*4882a593Smuzhiyun */
341*4882a593Smuzhiyun struct iser_fr_desc {
342*4882a593Smuzhiyun struct list_head list;
343*4882a593Smuzhiyun struct iser_reg_resources rsc;
344*4882a593Smuzhiyun bool sig_protected;
345*4882a593Smuzhiyun struct list_head all_list;
346*4882a593Smuzhiyun };
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun /**
349*4882a593Smuzhiyun * struct iser_fr_pool - connection fast registration pool
350*4882a593Smuzhiyun *
351*4882a593Smuzhiyun * @list: list of fastreg descriptors
352*4882a593Smuzhiyun * @lock: protects fastreg pool
353*4882a593Smuzhiyun * @size: size of the pool
354*4882a593Smuzhiyun * @all_list: first and last list members
355*4882a593Smuzhiyun */
356*4882a593Smuzhiyun struct iser_fr_pool {
357*4882a593Smuzhiyun struct list_head list;
358*4882a593Smuzhiyun spinlock_t lock;
359*4882a593Smuzhiyun int size;
360*4882a593Smuzhiyun struct list_head all_list;
361*4882a593Smuzhiyun };
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun /**
364*4882a593Smuzhiyun * struct ib_conn - Infiniband related objects
365*4882a593Smuzhiyun *
366*4882a593Smuzhiyun * @cma_id: rdma_cm connection maneger handle
367*4882a593Smuzhiyun * @qp: Connection Queue-pair
368*4882a593Smuzhiyun * @cq: Connection completion queue
369*4882a593Smuzhiyun * @cq_size: The number of max outstanding completions
370*4882a593Smuzhiyun * @post_recv_buf_count: post receive counter
371*4882a593Smuzhiyun * @sig_count: send work request signal count
372*4882a593Smuzhiyun * @rx_wr: receive work request for batch posts
373*4882a593Smuzhiyun * @device: reference to iser device
374*4882a593Smuzhiyun * @fr_pool: connection fast registration poool
375*4882a593Smuzhiyun * @pi_support: Indicate device T10-PI support
376*4882a593Smuzhiyun * @reg_cqe: completion handler
377*4882a593Smuzhiyun */
378*4882a593Smuzhiyun struct ib_conn {
379*4882a593Smuzhiyun struct rdma_cm_id *cma_id;
380*4882a593Smuzhiyun struct ib_qp *qp;
381*4882a593Smuzhiyun struct ib_cq *cq;
382*4882a593Smuzhiyun u32 cq_size;
383*4882a593Smuzhiyun int post_recv_buf_count;
384*4882a593Smuzhiyun u8 sig_count;
385*4882a593Smuzhiyun struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX];
386*4882a593Smuzhiyun struct iser_device *device;
387*4882a593Smuzhiyun struct iser_fr_pool fr_pool;
388*4882a593Smuzhiyun bool pi_support;
389*4882a593Smuzhiyun struct ib_cqe reg_cqe;
390*4882a593Smuzhiyun };
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun /**
393*4882a593Smuzhiyun * struct iser_conn - iSER connection context
394*4882a593Smuzhiyun *
395*4882a593Smuzhiyun * @ib_conn: connection RDMA resources
396*4882a593Smuzhiyun * @iscsi_conn: link to matching iscsi connection
397*4882a593Smuzhiyun * @ep: transport handle
398*4882a593Smuzhiyun * @state: connection logical state
399*4882a593Smuzhiyun * @qp_max_recv_dtos: maximum number of data outs, corresponds
400*4882a593Smuzhiyun * to max number of post recvs
401*4882a593Smuzhiyun * @qp_max_recv_dtos_mask: (qp_max_recv_dtos - 1)
402*4882a593Smuzhiyun * @min_posted_rx: (qp_max_recv_dtos >> 2)
403*4882a593Smuzhiyun * @max_cmds: maximum cmds allowed for this connection
404*4882a593Smuzhiyun * @name: connection peer portal
405*4882a593Smuzhiyun * @release_work: deffered work for release job
406*4882a593Smuzhiyun * @state_mutex: protects iser onnection state
407*4882a593Smuzhiyun * @stop_completion: conn_stop completion
408*4882a593Smuzhiyun * @ib_completion: RDMA cleanup completion
409*4882a593Smuzhiyun * @up_completion: connection establishment completed
410*4882a593Smuzhiyun * (state is ISER_CONN_UP)
411*4882a593Smuzhiyun * @conn_list: entry in ig conn list
412*4882a593Smuzhiyun * @login_desc: login descriptor
413*4882a593Smuzhiyun * @rx_desc_head: head of rx_descs cyclic buffer
414*4882a593Smuzhiyun * @rx_descs: rx buffers array (cyclic buffer)
415*4882a593Smuzhiyun * @num_rx_descs: number of rx descriptors
416*4882a593Smuzhiyun * @scsi_sg_tablesize: scsi host sg_tablesize
417*4882a593Smuzhiyun * @pages_per_mr: maximum pages available for registration
418*4882a593Smuzhiyun * @snd_w_inv: connection uses remote invalidation
419*4882a593Smuzhiyun */
420*4882a593Smuzhiyun struct iser_conn {
421*4882a593Smuzhiyun struct ib_conn ib_conn;
422*4882a593Smuzhiyun struct iscsi_conn *iscsi_conn;
423*4882a593Smuzhiyun struct iscsi_endpoint *ep;
424*4882a593Smuzhiyun enum iser_conn_state state;
425*4882a593Smuzhiyun unsigned qp_max_recv_dtos;
426*4882a593Smuzhiyun unsigned qp_max_recv_dtos_mask;
427*4882a593Smuzhiyun unsigned min_posted_rx;
428*4882a593Smuzhiyun u16 max_cmds;
429*4882a593Smuzhiyun char name[ISER_OBJECT_NAME_SIZE];
430*4882a593Smuzhiyun struct work_struct release_work;
431*4882a593Smuzhiyun struct mutex state_mutex;
432*4882a593Smuzhiyun struct completion stop_completion;
433*4882a593Smuzhiyun struct completion ib_completion;
434*4882a593Smuzhiyun struct completion up_completion;
435*4882a593Smuzhiyun struct list_head conn_list;
436*4882a593Smuzhiyun struct iser_login_desc login_desc;
437*4882a593Smuzhiyun unsigned int rx_desc_head;
438*4882a593Smuzhiyun struct iser_rx_desc *rx_descs;
439*4882a593Smuzhiyun u32 num_rx_descs;
440*4882a593Smuzhiyun unsigned short scsi_sg_tablesize;
441*4882a593Smuzhiyun unsigned short pages_per_mr;
442*4882a593Smuzhiyun bool snd_w_inv;
443*4882a593Smuzhiyun };
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun /**
446*4882a593Smuzhiyun * struct iscsi_iser_task - iser task context
447*4882a593Smuzhiyun *
448*4882a593Smuzhiyun * @desc: TX descriptor
449*4882a593Smuzhiyun * @iser_conn: link to iser connection
450*4882a593Smuzhiyun * @status: current task status
451*4882a593Smuzhiyun * @sc: link to scsi command
452*4882a593Smuzhiyun * @command_sent: indicate if command was sent
453*4882a593Smuzhiyun * @dir: iser data direction
454*4882a593Smuzhiyun * @rdma_reg: task rdma registration desc
455*4882a593Smuzhiyun * @data: iser data buffer desc
456*4882a593Smuzhiyun * @prot: iser protection buffer desc
457*4882a593Smuzhiyun */
458*4882a593Smuzhiyun struct iscsi_iser_task {
459*4882a593Smuzhiyun struct iser_tx_desc desc;
460*4882a593Smuzhiyun struct iser_conn *iser_conn;
461*4882a593Smuzhiyun enum iser_task_status status;
462*4882a593Smuzhiyun struct scsi_cmnd *sc;
463*4882a593Smuzhiyun int command_sent;
464*4882a593Smuzhiyun int dir[ISER_DIRS_NUM];
465*4882a593Smuzhiyun struct iser_mem_reg rdma_reg[ISER_DIRS_NUM];
466*4882a593Smuzhiyun struct iser_data_buf data[ISER_DIRS_NUM];
467*4882a593Smuzhiyun struct iser_data_buf prot[ISER_DIRS_NUM];
468*4882a593Smuzhiyun };
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun /**
471*4882a593Smuzhiyun * struct iser_global - iSER global context
472*4882a593Smuzhiyun *
473*4882a593Smuzhiyun * @device_list_mutex: protects device_list
474*4882a593Smuzhiyun * @device_list: iser devices global list
475*4882a593Smuzhiyun * @connlist_mutex: protects connlist
476*4882a593Smuzhiyun * @connlist: iser connections global list
477*4882a593Smuzhiyun * @desc_cache: kmem cache for tx dataout
478*4882a593Smuzhiyun */
479*4882a593Smuzhiyun struct iser_global {
480*4882a593Smuzhiyun struct mutex device_list_mutex;
481*4882a593Smuzhiyun struct list_head device_list;
482*4882a593Smuzhiyun struct mutex connlist_mutex;
483*4882a593Smuzhiyun struct list_head connlist;
484*4882a593Smuzhiyun struct kmem_cache *desc_cache;
485*4882a593Smuzhiyun };
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun extern struct iser_global ig;
488*4882a593Smuzhiyun extern int iser_debug_level;
489*4882a593Smuzhiyun extern bool iser_pi_enable;
490*4882a593Smuzhiyun extern int iser_pi_guard;
491*4882a593Smuzhiyun extern unsigned int iser_max_sectors;
492*4882a593Smuzhiyun extern bool iser_always_reg;
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun int iser_send_control(struct iscsi_conn *conn,
495*4882a593Smuzhiyun struct iscsi_task *task);
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun int iser_send_command(struct iscsi_conn *conn,
498*4882a593Smuzhiyun struct iscsi_task *task);
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun int iser_send_data_out(struct iscsi_conn *conn,
501*4882a593Smuzhiyun struct iscsi_task *task,
502*4882a593Smuzhiyun struct iscsi_data *hdr);
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun void iscsi_iser_recv(struct iscsi_conn *conn,
505*4882a593Smuzhiyun struct iscsi_hdr *hdr,
506*4882a593Smuzhiyun char *rx_data,
507*4882a593Smuzhiyun int rx_data_len);
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun void iser_conn_init(struct iser_conn *iser_conn);
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun void iser_conn_release(struct iser_conn *iser_conn);
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun int iser_conn_terminate(struct iser_conn *iser_conn);
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun void iser_release_work(struct work_struct *work);
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun void iser_err_comp(struct ib_wc *wc, const char *type);
518*4882a593Smuzhiyun void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc);
519*4882a593Smuzhiyun void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc);
520*4882a593Smuzhiyun void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc);
521*4882a593Smuzhiyun void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc);
522*4882a593Smuzhiyun void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc);
523*4882a593Smuzhiyun void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc);
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun void iser_task_rdma_init(struct iscsi_iser_task *task);
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun void iser_task_rdma_finalize(struct iscsi_iser_task *task);
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun void iser_free_rx_descriptors(struct iser_conn *iser_conn);
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
532*4882a593Smuzhiyun struct iser_data_buf *mem,
533*4882a593Smuzhiyun enum iser_data_dir cmd_dir);
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun int iser_reg_mem_fastreg(struct iscsi_iser_task *task,
536*4882a593Smuzhiyun enum iser_data_dir dir,
537*4882a593Smuzhiyun bool all_imm);
538*4882a593Smuzhiyun void iser_unreg_mem_fastreg(struct iscsi_iser_task *task,
539*4882a593Smuzhiyun enum iser_data_dir dir);
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun int iser_connect(struct iser_conn *iser_conn,
542*4882a593Smuzhiyun struct sockaddr *src_addr,
543*4882a593Smuzhiyun struct sockaddr *dst_addr,
544*4882a593Smuzhiyun int non_blocking);
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun int iser_post_recvl(struct iser_conn *iser_conn);
547*4882a593Smuzhiyun int iser_post_recvm(struct iser_conn *iser_conn, int count);
548*4882a593Smuzhiyun int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
549*4882a593Smuzhiyun bool signal);
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
552*4882a593Smuzhiyun struct iser_data_buf *data,
553*4882a593Smuzhiyun enum iser_data_dir iser_dir,
554*4882a593Smuzhiyun enum dma_data_direction dma_dir);
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
557*4882a593Smuzhiyun struct iser_data_buf *data,
558*4882a593Smuzhiyun enum dma_data_direction dir);
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun int iser_initialize_task_headers(struct iscsi_task *task,
561*4882a593Smuzhiyun struct iser_tx_desc *tx_desc);
562*4882a593Smuzhiyun int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
563*4882a593Smuzhiyun struct iscsi_session *session);
564*4882a593Smuzhiyun int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
565*4882a593Smuzhiyun unsigned cmds_max,
566*4882a593Smuzhiyun unsigned int size);
567*4882a593Smuzhiyun void iser_free_fastreg_pool(struct ib_conn *ib_conn);
568*4882a593Smuzhiyun u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
569*4882a593Smuzhiyun enum iser_data_dir cmd_dir, sector_t *sector);
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun static inline struct iser_conn *
to_iser_conn(struct ib_conn * ib_conn)572*4882a593Smuzhiyun to_iser_conn(struct ib_conn *ib_conn)
573*4882a593Smuzhiyun {
574*4882a593Smuzhiyun return container_of(ib_conn, struct iser_conn, ib_conn);
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun static inline struct iser_rx_desc *
iser_rx(struct ib_cqe * cqe)578*4882a593Smuzhiyun iser_rx(struct ib_cqe *cqe)
579*4882a593Smuzhiyun {
580*4882a593Smuzhiyun return container_of(cqe, struct iser_rx_desc, cqe);
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun static inline struct iser_tx_desc *
iser_tx(struct ib_cqe * cqe)584*4882a593Smuzhiyun iser_tx(struct ib_cqe *cqe)
585*4882a593Smuzhiyun {
586*4882a593Smuzhiyun return container_of(cqe, struct iser_tx_desc, cqe);
587*4882a593Smuzhiyun }
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun static inline struct iser_login_desc *
iser_login(struct ib_cqe * cqe)590*4882a593Smuzhiyun iser_login(struct ib_cqe *cqe)
591*4882a593Smuzhiyun {
592*4882a593Smuzhiyun return container_of(cqe, struct iser_login_desc, cqe);
593*4882a593Smuzhiyun }
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun #endif
596