1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
2*4882a593Smuzhiyun
3*4882a593Smuzhiyun /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
4*4882a593Smuzhiyun /* Copyright (c) 2008-2019, IBM Corporation */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #ifndef _SIW_H
7*4882a593Smuzhiyun #define _SIW_H
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <rdma/ib_verbs.h>
10*4882a593Smuzhiyun #include <rdma/restrack.h>
11*4882a593Smuzhiyun #include <linux/socket.h>
12*4882a593Smuzhiyun #include <linux/skbuff.h>
13*4882a593Smuzhiyun #include <crypto/hash.h>
14*4882a593Smuzhiyun #include <linux/crc32.h>
15*4882a593Smuzhiyun #include <linux/crc32c.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include <rdma/siw-abi.h>
18*4882a593Smuzhiyun #include "iwarp.h"
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #define SIW_VENDOR_ID 0x626d74 /* ascii 'bmt' for now */
21*4882a593Smuzhiyun #define SIW_VENDORT_PART_ID 0
22*4882a593Smuzhiyun #define SIW_MAX_QP (1024 * 100)
23*4882a593Smuzhiyun #define SIW_MAX_QP_WR (1024 * 32)
24*4882a593Smuzhiyun #define SIW_MAX_ORD_QP 128
25*4882a593Smuzhiyun #define SIW_MAX_IRD_QP 128
26*4882a593Smuzhiyun #define SIW_MAX_SGE_PBL 256 /* max num sge's for PBL */
27*4882a593Smuzhiyun #define SIW_MAX_SGE_RD 1 /* iwarp limitation. we could relax */
28*4882a593Smuzhiyun #define SIW_MAX_CQ (1024 * 100)
29*4882a593Smuzhiyun #define SIW_MAX_CQE (SIW_MAX_QP_WR * 100)
30*4882a593Smuzhiyun #define SIW_MAX_MR (SIW_MAX_QP * 10)
31*4882a593Smuzhiyun #define SIW_MAX_PD SIW_MAX_QP
32*4882a593Smuzhiyun #define SIW_MAX_MW 0 /* to be set if MW's are supported */
33*4882a593Smuzhiyun #define SIW_MAX_SRQ SIW_MAX_QP
34*4882a593Smuzhiyun #define SIW_MAX_SRQ_WR (SIW_MAX_QP_WR * 10)
35*4882a593Smuzhiyun #define SIW_MAX_CONTEXT SIW_MAX_PD
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /* Min number of bytes for using zero copy transmit */
38*4882a593Smuzhiyun #define SENDPAGE_THRESH PAGE_SIZE
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /* Maximum number of frames which can be send in one SQ processing */
41*4882a593Smuzhiyun #define SQ_USER_MAXBURST 100
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun /* Maximum number of consecutive IRQ elements which get served
44*4882a593Smuzhiyun * if SQ has pending work. Prevents starving local SQ processing
45*4882a593Smuzhiyun * by serving peer Read Requests.
46*4882a593Smuzhiyun */
47*4882a593Smuzhiyun #define SIW_IRQ_MAXBURST_SQ_ACTIVE 4
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun struct siw_dev_cap {
50*4882a593Smuzhiyun int max_qp;
51*4882a593Smuzhiyun int max_qp_wr;
52*4882a593Smuzhiyun int max_ord; /* max. outbound read queue depth */
53*4882a593Smuzhiyun int max_ird; /* max. inbound read queue depth */
54*4882a593Smuzhiyun int max_sge;
55*4882a593Smuzhiyun int max_sge_rd;
56*4882a593Smuzhiyun int max_cq;
57*4882a593Smuzhiyun int max_cqe;
58*4882a593Smuzhiyun int max_mr;
59*4882a593Smuzhiyun int max_pd;
60*4882a593Smuzhiyun int max_mw;
61*4882a593Smuzhiyun int max_srq;
62*4882a593Smuzhiyun int max_srq_wr;
63*4882a593Smuzhiyun int max_srq_sge;
64*4882a593Smuzhiyun };
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun struct siw_pd {
67*4882a593Smuzhiyun struct ib_pd base_pd;
68*4882a593Smuzhiyun };
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun struct siw_device {
71*4882a593Smuzhiyun struct ib_device base_dev;
72*4882a593Smuzhiyun struct net_device *netdev;
73*4882a593Smuzhiyun struct siw_dev_cap attrs;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun u32 vendor_part_id;
76*4882a593Smuzhiyun int numa_node;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /* physical port state (only one port per device) */
79*4882a593Smuzhiyun enum ib_port_state state;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun spinlock_t lock;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun struct xarray qp_xa;
84*4882a593Smuzhiyun struct xarray mem_xa;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun struct list_head cep_list;
87*4882a593Smuzhiyun struct list_head qp_list;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun /* active objects statistics to enforce limits */
90*4882a593Smuzhiyun atomic_t num_qp;
91*4882a593Smuzhiyun atomic_t num_cq;
92*4882a593Smuzhiyun atomic_t num_pd;
93*4882a593Smuzhiyun atomic_t num_mr;
94*4882a593Smuzhiyun atomic_t num_srq;
95*4882a593Smuzhiyun atomic_t num_ctx;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun struct work_struct netdev_down;
98*4882a593Smuzhiyun };
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun struct siw_ucontext {
101*4882a593Smuzhiyun struct ib_ucontext base_ucontext;
102*4882a593Smuzhiyun struct siw_device *sdev;
103*4882a593Smuzhiyun };
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun /*
106*4882a593Smuzhiyun * The RDMA core does not define LOCAL_READ access, which is always
107*4882a593Smuzhiyun * enabled implictely.
108*4882a593Smuzhiyun */
109*4882a593Smuzhiyun #define IWARP_ACCESS_MASK \
110*4882a593Smuzhiyun (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | \
111*4882a593Smuzhiyun IB_ACCESS_REMOTE_READ)
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun /*
114*4882a593Smuzhiyun * siw presentation of user memory registered as source
115*4882a593Smuzhiyun * or target of RDMA operations.
116*4882a593Smuzhiyun */
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun struct siw_page_chunk {
119*4882a593Smuzhiyun struct page **plist;
120*4882a593Smuzhiyun };
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun struct siw_umem {
123*4882a593Smuzhiyun struct siw_page_chunk *page_chunk;
124*4882a593Smuzhiyun int num_pages;
125*4882a593Smuzhiyun bool writable;
126*4882a593Smuzhiyun u64 fp_addr; /* First page base address */
127*4882a593Smuzhiyun struct mm_struct *owning_mm;
128*4882a593Smuzhiyun };
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun struct siw_pble {
131*4882a593Smuzhiyun dma_addr_t addr; /* Address of assigned buffer */
132*4882a593Smuzhiyun unsigned int size; /* Size of this entry */
133*4882a593Smuzhiyun unsigned long pbl_off; /* Total offset from start of PBL */
134*4882a593Smuzhiyun };
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun struct siw_pbl {
137*4882a593Smuzhiyun unsigned int num_buf;
138*4882a593Smuzhiyun unsigned int max_buf;
139*4882a593Smuzhiyun struct siw_pble pbe[];
140*4882a593Smuzhiyun };
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /*
143*4882a593Smuzhiyun * Generic memory representation for registered siw memory.
144*4882a593Smuzhiyun * Memory lookup always via higher 24 bit of STag (STag index).
145*4882a593Smuzhiyun */
146*4882a593Smuzhiyun struct siw_mem {
147*4882a593Smuzhiyun struct siw_device *sdev;
148*4882a593Smuzhiyun struct kref ref;
149*4882a593Smuzhiyun u64 va; /* VA of memory */
150*4882a593Smuzhiyun u64 len; /* length of the memory buffer in bytes */
151*4882a593Smuzhiyun u32 stag; /* iWarp memory access steering tag */
152*4882a593Smuzhiyun u8 stag_valid; /* VALID or INVALID */
153*4882a593Smuzhiyun u8 is_pbl; /* PBL or user space mem */
154*4882a593Smuzhiyun u8 is_mw; /* Memory Region or Memory Window */
155*4882a593Smuzhiyun enum ib_access_flags perms; /* local/remote READ & WRITE */
156*4882a593Smuzhiyun union {
157*4882a593Smuzhiyun struct siw_umem *umem;
158*4882a593Smuzhiyun struct siw_pbl *pbl;
159*4882a593Smuzhiyun void *mem_obj;
160*4882a593Smuzhiyun };
161*4882a593Smuzhiyun struct ib_pd *pd;
162*4882a593Smuzhiyun };
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun struct siw_mr {
165*4882a593Smuzhiyun struct ib_mr base_mr;
166*4882a593Smuzhiyun struct siw_mem *mem;
167*4882a593Smuzhiyun struct rcu_head rcu;
168*4882a593Smuzhiyun };
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun /*
171*4882a593Smuzhiyun * Error codes for local or remote
172*4882a593Smuzhiyun * access to registered memory
173*4882a593Smuzhiyun */
174*4882a593Smuzhiyun enum siw_access_state {
175*4882a593Smuzhiyun E_ACCESS_OK,
176*4882a593Smuzhiyun E_STAG_INVALID,
177*4882a593Smuzhiyun E_BASE_BOUNDS,
178*4882a593Smuzhiyun E_ACCESS_PERM,
179*4882a593Smuzhiyun E_PD_MISMATCH
180*4882a593Smuzhiyun };
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun enum siw_wr_state {
183*4882a593Smuzhiyun SIW_WR_IDLE,
184*4882a593Smuzhiyun SIW_WR_QUEUED, /* processing has not started yet */
185*4882a593Smuzhiyun SIW_WR_INPROGRESS /* initiated processing of the WR */
186*4882a593Smuzhiyun };
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun /* The WQE currently being processed (RX or TX) */
189*4882a593Smuzhiyun struct siw_wqe {
190*4882a593Smuzhiyun /* Copy of applications SQE or RQE */
191*4882a593Smuzhiyun union {
192*4882a593Smuzhiyun struct siw_sqe sqe;
193*4882a593Smuzhiyun struct siw_rqe rqe;
194*4882a593Smuzhiyun };
195*4882a593Smuzhiyun struct siw_mem *mem[SIW_MAX_SGE]; /* per sge's resolved mem */
196*4882a593Smuzhiyun enum siw_wr_state wr_status;
197*4882a593Smuzhiyun enum siw_wc_status wc_status;
198*4882a593Smuzhiyun u32 bytes; /* total bytes to process */
199*4882a593Smuzhiyun u32 processed; /* bytes processed */
200*4882a593Smuzhiyun };
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun struct siw_cq {
203*4882a593Smuzhiyun struct ib_cq base_cq;
204*4882a593Smuzhiyun spinlock_t lock;
205*4882a593Smuzhiyun struct siw_cq_ctrl *notify;
206*4882a593Smuzhiyun struct siw_cqe *queue;
207*4882a593Smuzhiyun u32 cq_put;
208*4882a593Smuzhiyun u32 cq_get;
209*4882a593Smuzhiyun u32 num_cqe;
210*4882a593Smuzhiyun struct rdma_user_mmap_entry *cq_entry; /* mmap info for CQE array */
211*4882a593Smuzhiyun u32 id; /* For debugging only */
212*4882a593Smuzhiyun };
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun enum siw_qp_state {
215*4882a593Smuzhiyun SIW_QP_STATE_IDLE,
216*4882a593Smuzhiyun SIW_QP_STATE_RTR,
217*4882a593Smuzhiyun SIW_QP_STATE_RTS,
218*4882a593Smuzhiyun SIW_QP_STATE_CLOSING,
219*4882a593Smuzhiyun SIW_QP_STATE_TERMINATE,
220*4882a593Smuzhiyun SIW_QP_STATE_ERROR,
221*4882a593Smuzhiyun SIW_QP_STATE_COUNT
222*4882a593Smuzhiyun };
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun enum siw_qp_flags {
225*4882a593Smuzhiyun SIW_RDMA_BIND_ENABLED = (1 << 0),
226*4882a593Smuzhiyun SIW_RDMA_WRITE_ENABLED = (1 << 1),
227*4882a593Smuzhiyun SIW_RDMA_READ_ENABLED = (1 << 2),
228*4882a593Smuzhiyun SIW_SIGNAL_ALL_WR = (1 << 3),
229*4882a593Smuzhiyun SIW_MPA_CRC = (1 << 4),
230*4882a593Smuzhiyun SIW_QP_IN_DESTROY = (1 << 5)
231*4882a593Smuzhiyun };
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun enum siw_qp_attr_mask {
234*4882a593Smuzhiyun SIW_QP_ATTR_STATE = (1 << 0),
235*4882a593Smuzhiyun SIW_QP_ATTR_ACCESS_FLAGS = (1 << 1),
236*4882a593Smuzhiyun SIW_QP_ATTR_LLP_HANDLE = (1 << 2),
237*4882a593Smuzhiyun SIW_QP_ATTR_ORD = (1 << 3),
238*4882a593Smuzhiyun SIW_QP_ATTR_IRD = (1 << 4),
239*4882a593Smuzhiyun SIW_QP_ATTR_SQ_SIZE = (1 << 5),
240*4882a593Smuzhiyun SIW_QP_ATTR_RQ_SIZE = (1 << 6),
241*4882a593Smuzhiyun SIW_QP_ATTR_MPA = (1 << 7)
242*4882a593Smuzhiyun };
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun struct siw_srq {
245*4882a593Smuzhiyun struct ib_srq base_srq;
246*4882a593Smuzhiyun spinlock_t lock;
247*4882a593Smuzhiyun u32 max_sge;
248*4882a593Smuzhiyun u32 limit; /* low watermark for async event */
249*4882a593Smuzhiyun struct siw_rqe *recvq;
250*4882a593Smuzhiyun u32 rq_put;
251*4882a593Smuzhiyun u32 rq_get;
252*4882a593Smuzhiyun u32 num_rqe; /* max # of wqe's allowed */
253*4882a593Smuzhiyun struct rdma_user_mmap_entry *srq_entry; /* mmap info for SRQ array */
254*4882a593Smuzhiyun bool armed:1; /* inform user if limit hit */
255*4882a593Smuzhiyun bool is_kernel_res:1; /* true if kernel client */
256*4882a593Smuzhiyun };
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun struct siw_qp_attrs {
259*4882a593Smuzhiyun enum siw_qp_state state;
260*4882a593Smuzhiyun u32 sq_size;
261*4882a593Smuzhiyun u32 rq_size;
262*4882a593Smuzhiyun u32 orq_size;
263*4882a593Smuzhiyun u32 irq_size;
264*4882a593Smuzhiyun u32 sq_max_sges;
265*4882a593Smuzhiyun u32 rq_max_sges;
266*4882a593Smuzhiyun enum siw_qp_flags flags;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun struct socket *sk;
269*4882a593Smuzhiyun };
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun enum siw_tx_ctx {
272*4882a593Smuzhiyun SIW_SEND_HDR, /* start or continue sending HDR */
273*4882a593Smuzhiyun SIW_SEND_DATA, /* start or continue sending DDP payload */
274*4882a593Smuzhiyun SIW_SEND_TRAILER, /* start or continue sending TRAILER */
275*4882a593Smuzhiyun SIW_SEND_SHORT_FPDU/* send whole FPDU hdr|data|trailer at once */
276*4882a593Smuzhiyun };
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun enum siw_rx_state {
279*4882a593Smuzhiyun SIW_GET_HDR, /* await new hdr or within hdr */
280*4882a593Smuzhiyun SIW_GET_DATA_START, /* start of inbound DDP payload */
281*4882a593Smuzhiyun SIW_GET_DATA_MORE, /* continuation of (misaligned) DDP payload */
282*4882a593Smuzhiyun SIW_GET_TRAILER/* await new trailer or within trailer */
283*4882a593Smuzhiyun };
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun struct siw_rx_stream {
286*4882a593Smuzhiyun struct sk_buff *skb;
287*4882a593Smuzhiyun int skb_new; /* pending unread bytes in skb */
288*4882a593Smuzhiyun int skb_offset; /* offset in skb */
289*4882a593Smuzhiyun int skb_copied; /* processed bytes in skb */
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun union iwarp_hdr hdr;
292*4882a593Smuzhiyun struct mpa_trailer trailer;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun enum siw_rx_state state;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun /*
297*4882a593Smuzhiyun * For each FPDU, main RX loop runs through 3 stages:
298*4882a593Smuzhiyun * Receiving protocol headers, placing DDP payload and receiving
299*4882a593Smuzhiyun * trailer information (CRC + possibly padding).
300*4882a593Smuzhiyun * Next two variables keep state on receive status of the
301*4882a593Smuzhiyun * current FPDU part (hdr, data, trailer).
302*4882a593Smuzhiyun */
303*4882a593Smuzhiyun int fpdu_part_rcvd; /* bytes in pkt part copied */
304*4882a593Smuzhiyun int fpdu_part_rem; /* bytes in pkt part not seen */
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun /*
307*4882a593Smuzhiyun * Next expected DDP MSN for each QN +
308*4882a593Smuzhiyun * expected steering tag +
309*4882a593Smuzhiyun * expected DDP tagget offset (all HBO)
310*4882a593Smuzhiyun */
311*4882a593Smuzhiyun u32 ddp_msn[RDMAP_UNTAGGED_QN_COUNT];
312*4882a593Smuzhiyun u32 ddp_stag;
313*4882a593Smuzhiyun u64 ddp_to;
314*4882a593Smuzhiyun u32 inval_stag; /* Stag to be invalidated */
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun struct shash_desc *mpa_crc_hd;
317*4882a593Smuzhiyun u8 rx_suspend : 1;
318*4882a593Smuzhiyun u8 pad : 2; /* # of pad bytes expected */
319*4882a593Smuzhiyun u8 rdmap_op : 4; /* opcode of current frame */
320*4882a593Smuzhiyun };
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun struct siw_rx_fpdu {
323*4882a593Smuzhiyun /*
324*4882a593Smuzhiyun * Local destination memory of inbound RDMA operation.
325*4882a593Smuzhiyun * Valid, according to wqe->wr_status
326*4882a593Smuzhiyun */
327*4882a593Smuzhiyun struct siw_wqe wqe_active;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun unsigned int pbl_idx; /* Index into current PBL */
330*4882a593Smuzhiyun unsigned int sge_idx; /* current sge in rx */
331*4882a593Smuzhiyun unsigned int sge_off; /* already rcvd in curr. sge */
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun char first_ddp_seg; /* this is the first DDP seg */
334*4882a593Smuzhiyun char more_ddp_segs; /* more DDP segs expected */
335*4882a593Smuzhiyun u8 prev_rdmap_op : 4; /* opcode of prev frame */
336*4882a593Smuzhiyun };
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun /*
339*4882a593Smuzhiyun * Shorthands for short packets w/o payload
340*4882a593Smuzhiyun * to be transmitted more efficient.
341*4882a593Smuzhiyun */
342*4882a593Smuzhiyun struct siw_send_pkt {
343*4882a593Smuzhiyun struct iwarp_send send;
344*4882a593Smuzhiyun __be32 crc;
345*4882a593Smuzhiyun };
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun struct siw_write_pkt {
348*4882a593Smuzhiyun struct iwarp_rdma_write write;
349*4882a593Smuzhiyun __be32 crc;
350*4882a593Smuzhiyun };
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun struct siw_rreq_pkt {
353*4882a593Smuzhiyun struct iwarp_rdma_rreq rreq;
354*4882a593Smuzhiyun __be32 crc;
355*4882a593Smuzhiyun };
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun struct siw_rresp_pkt {
358*4882a593Smuzhiyun struct iwarp_rdma_rresp rresp;
359*4882a593Smuzhiyun __be32 crc;
360*4882a593Smuzhiyun };
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun struct siw_iwarp_tx {
363*4882a593Smuzhiyun union {
364*4882a593Smuzhiyun union iwarp_hdr hdr;
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun /* Generic part of FPDU header */
367*4882a593Smuzhiyun struct iwarp_ctrl ctrl;
368*4882a593Smuzhiyun struct iwarp_ctrl_untagged c_untagged;
369*4882a593Smuzhiyun struct iwarp_ctrl_tagged c_tagged;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun /* FPDU headers */
372*4882a593Smuzhiyun struct iwarp_rdma_write rwrite;
373*4882a593Smuzhiyun struct iwarp_rdma_rreq rreq;
374*4882a593Smuzhiyun struct iwarp_rdma_rresp rresp;
375*4882a593Smuzhiyun struct iwarp_terminate terminate;
376*4882a593Smuzhiyun struct iwarp_send send;
377*4882a593Smuzhiyun struct iwarp_send_inv send_inv;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun /* complete short FPDUs */
380*4882a593Smuzhiyun struct siw_send_pkt send_pkt;
381*4882a593Smuzhiyun struct siw_write_pkt write_pkt;
382*4882a593Smuzhiyun struct siw_rreq_pkt rreq_pkt;
383*4882a593Smuzhiyun struct siw_rresp_pkt rresp_pkt;
384*4882a593Smuzhiyun } pkt;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun struct mpa_trailer trailer;
387*4882a593Smuzhiyun /* DDP MSN for untagged messages */
388*4882a593Smuzhiyun u32 ddp_msn[RDMAP_UNTAGGED_QN_COUNT];
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun enum siw_tx_ctx state;
391*4882a593Smuzhiyun u16 ctrl_len; /* ddp+rdmap hdr */
392*4882a593Smuzhiyun u16 ctrl_sent;
393*4882a593Smuzhiyun int burst;
394*4882a593Smuzhiyun int bytes_unsent; /* ddp payload bytes */
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun struct shash_desc *mpa_crc_hd;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun u8 do_crc : 1; /* do crc for segment */
399*4882a593Smuzhiyun u8 use_sendpage : 1; /* send w/o copy */
400*4882a593Smuzhiyun u8 tx_suspend : 1; /* stop sending DDP segs. */
401*4882a593Smuzhiyun u8 pad : 2; /* # pad in current fpdu */
402*4882a593Smuzhiyun u8 orq_fence : 1; /* ORQ full or Send fenced */
403*4882a593Smuzhiyun u8 in_syscall : 1; /* TX out of user context */
404*4882a593Smuzhiyun u8 zcopy_tx : 1; /* Use TCP_SENDPAGE if possible */
405*4882a593Smuzhiyun u8 gso_seg_limit; /* Maximum segments for GSO, 0 = unbound */
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun u16 fpdu_len; /* len of FPDU to tx */
408*4882a593Smuzhiyun unsigned int tcp_seglen; /* remaining tcp seg space */
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun struct siw_wqe wqe_active;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun int pbl_idx; /* Index into current PBL */
413*4882a593Smuzhiyun int sge_idx; /* current sge in tx */
414*4882a593Smuzhiyun u32 sge_off; /* already sent in curr. sge */
415*4882a593Smuzhiyun };
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun struct siw_qp {
418*4882a593Smuzhiyun struct ib_qp base_qp;
419*4882a593Smuzhiyun struct siw_device *sdev;
420*4882a593Smuzhiyun struct kref ref;
421*4882a593Smuzhiyun struct list_head devq;
422*4882a593Smuzhiyun int tx_cpu;
423*4882a593Smuzhiyun struct siw_qp_attrs attrs;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun struct siw_cep *cep;
426*4882a593Smuzhiyun struct rw_semaphore state_lock;
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun struct ib_pd *pd;
429*4882a593Smuzhiyun struct siw_cq *scq;
430*4882a593Smuzhiyun struct siw_cq *rcq;
431*4882a593Smuzhiyun struct siw_srq *srq;
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun struct siw_iwarp_tx tx_ctx; /* Transmit context */
434*4882a593Smuzhiyun spinlock_t sq_lock;
435*4882a593Smuzhiyun struct siw_sqe *sendq; /* send queue element array */
436*4882a593Smuzhiyun uint32_t sq_get; /* consumer index into sq array */
437*4882a593Smuzhiyun uint32_t sq_put; /* kernel prod. index into sq array */
438*4882a593Smuzhiyun struct llist_node tx_list;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun struct siw_sqe *orq; /* outbound read queue element array */
441*4882a593Smuzhiyun spinlock_t orq_lock;
442*4882a593Smuzhiyun uint32_t orq_get; /* consumer index into orq array */
443*4882a593Smuzhiyun uint32_t orq_put; /* shared producer index for ORQ */
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun struct siw_rx_stream rx_stream;
446*4882a593Smuzhiyun struct siw_rx_fpdu *rx_fpdu;
447*4882a593Smuzhiyun struct siw_rx_fpdu rx_tagged;
448*4882a593Smuzhiyun struct siw_rx_fpdu rx_untagged;
449*4882a593Smuzhiyun spinlock_t rq_lock;
450*4882a593Smuzhiyun struct siw_rqe *recvq; /* recv queue element array */
451*4882a593Smuzhiyun uint32_t rq_get; /* consumer index into rq array */
452*4882a593Smuzhiyun uint32_t rq_put; /* kernel prod. index into rq array */
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun struct siw_sqe *irq; /* inbound read queue element array */
455*4882a593Smuzhiyun uint32_t irq_get; /* consumer index into irq array */
456*4882a593Smuzhiyun uint32_t irq_put; /* producer index into irq array */
457*4882a593Smuzhiyun int irq_burst;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun struct { /* information to be carried in TERMINATE pkt, if valid */
460*4882a593Smuzhiyun u8 valid;
461*4882a593Smuzhiyun u8 in_tx;
462*4882a593Smuzhiyun u8 layer : 4, etype : 4;
463*4882a593Smuzhiyun u8 ecode;
464*4882a593Smuzhiyun } term_info;
465*4882a593Smuzhiyun struct rdma_user_mmap_entry *sq_entry; /* mmap info for SQE array */
466*4882a593Smuzhiyun struct rdma_user_mmap_entry *rq_entry; /* mmap info for RQE array */
467*4882a593Smuzhiyun struct rcu_head rcu;
468*4882a593Smuzhiyun };
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun /* helper macros */
471*4882a593Smuzhiyun #define rx_qp(rx) container_of(rx, struct siw_qp, rx_stream)
472*4882a593Smuzhiyun #define tx_qp(tx) container_of(tx, struct siw_qp, tx_ctx)
473*4882a593Smuzhiyun #define tx_wqe(qp) (&(qp)->tx_ctx.wqe_active)
474*4882a593Smuzhiyun #define rx_wqe(rctx) (&(rctx)->wqe_active)
475*4882a593Smuzhiyun #define rx_mem(rctx) ((rctx)->wqe_active.mem[0])
476*4882a593Smuzhiyun #define tx_type(wqe) ((wqe)->sqe.opcode)
477*4882a593Smuzhiyun #define rx_type(wqe) ((wqe)->rqe.opcode)
478*4882a593Smuzhiyun #define tx_flags(wqe) ((wqe)->sqe.flags)
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun struct iwarp_msg_info {
481*4882a593Smuzhiyun int hdr_len;
482*4882a593Smuzhiyun struct iwarp_ctrl ctrl;
483*4882a593Smuzhiyun int (*rx_data)(struct siw_qp *qp);
484*4882a593Smuzhiyun };
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun struct siw_user_mmap_entry {
487*4882a593Smuzhiyun struct rdma_user_mmap_entry rdma_entry;
488*4882a593Smuzhiyun void *address;
489*4882a593Smuzhiyun };
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun /* Global siw parameters. Currently set in siw_main.c */
492*4882a593Smuzhiyun extern const bool zcopy_tx;
493*4882a593Smuzhiyun extern const bool try_gso;
494*4882a593Smuzhiyun extern const bool loopback_enabled;
495*4882a593Smuzhiyun extern const bool mpa_crc_required;
496*4882a593Smuzhiyun extern const bool mpa_crc_strict;
497*4882a593Smuzhiyun extern const bool siw_tcp_nagle;
498*4882a593Smuzhiyun extern u_char mpa_version;
499*4882a593Smuzhiyun extern const bool peer_to_peer;
500*4882a593Smuzhiyun extern struct task_struct *siw_tx_thread[];
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun extern struct crypto_shash *siw_crypto_shash;
503*4882a593Smuzhiyun extern struct iwarp_msg_info iwarp_pktinfo[RDMAP_TERMINATE + 1];
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun /* QP general functions */
506*4882a593Smuzhiyun int siw_qp_modify(struct siw_qp *qp, struct siw_qp_attrs *attr,
507*4882a593Smuzhiyun enum siw_qp_attr_mask mask);
508*4882a593Smuzhiyun int siw_qp_mpa_rts(struct siw_qp *qp, enum mpa_v2_ctrl ctrl);
509*4882a593Smuzhiyun void siw_qp_llp_close(struct siw_qp *qp);
510*4882a593Smuzhiyun void siw_qp_cm_drop(struct siw_qp *qp, int schedule);
511*4882a593Smuzhiyun void siw_send_terminate(struct siw_qp *qp);
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun void siw_qp_get_ref(struct ib_qp *qp);
514*4882a593Smuzhiyun void siw_qp_put_ref(struct ib_qp *qp);
515*4882a593Smuzhiyun int siw_qp_add(struct siw_device *sdev, struct siw_qp *qp);
516*4882a593Smuzhiyun void siw_free_qp(struct kref *ref);
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun void siw_init_terminate(struct siw_qp *qp, enum term_elayer layer,
519*4882a593Smuzhiyun u8 etype, u8 ecode, int in_tx);
520*4882a593Smuzhiyun enum ddp_ecode siw_tagged_error(enum siw_access_state state);
521*4882a593Smuzhiyun enum rdmap_ecode siw_rdmap_error(enum siw_access_state state);
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun void siw_read_to_orq(struct siw_sqe *rreq, struct siw_sqe *sqe);
524*4882a593Smuzhiyun int siw_sqe_complete(struct siw_qp *qp, struct siw_sqe *sqe, u32 bytes,
525*4882a593Smuzhiyun enum siw_wc_status status);
526*4882a593Smuzhiyun int siw_rqe_complete(struct siw_qp *qp, struct siw_rqe *rqe, u32 bytes,
527*4882a593Smuzhiyun u32 inval_stag, enum siw_wc_status status);
528*4882a593Smuzhiyun void siw_qp_llp_data_ready(struct sock *sk);
529*4882a593Smuzhiyun void siw_qp_llp_write_space(struct sock *sk);
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun /* QP TX path functions */
532*4882a593Smuzhiyun int siw_run_sq(void *arg);
533*4882a593Smuzhiyun int siw_qp_sq_process(struct siw_qp *qp);
534*4882a593Smuzhiyun int siw_sq_start(struct siw_qp *qp);
535*4882a593Smuzhiyun int siw_activate_tx(struct siw_qp *qp);
536*4882a593Smuzhiyun void siw_stop_tx_thread(int nr_cpu);
537*4882a593Smuzhiyun int siw_get_tx_cpu(struct siw_device *sdev);
538*4882a593Smuzhiyun void siw_put_tx_cpu(int cpu);
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun /* QP RX path functions */
541*4882a593Smuzhiyun int siw_proc_send(struct siw_qp *qp);
542*4882a593Smuzhiyun int siw_proc_rreq(struct siw_qp *qp);
543*4882a593Smuzhiyun int siw_proc_rresp(struct siw_qp *qp);
544*4882a593Smuzhiyun int siw_proc_write(struct siw_qp *qp);
545*4882a593Smuzhiyun int siw_proc_terminate(struct siw_qp *qp);
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun int siw_tcp_rx_data(read_descriptor_t *rd_desc, struct sk_buff *skb,
548*4882a593Smuzhiyun unsigned int off, size_t len);
549*4882a593Smuzhiyun
set_rx_fpdu_context(struct siw_qp * qp,u8 opcode)550*4882a593Smuzhiyun static inline void set_rx_fpdu_context(struct siw_qp *qp, u8 opcode)
551*4882a593Smuzhiyun {
552*4882a593Smuzhiyun if (opcode == RDMAP_RDMA_WRITE || opcode == RDMAP_RDMA_READ_RESP)
553*4882a593Smuzhiyun qp->rx_fpdu = &qp->rx_tagged;
554*4882a593Smuzhiyun else
555*4882a593Smuzhiyun qp->rx_fpdu = &qp->rx_untagged;
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun qp->rx_stream.rdmap_op = opcode;
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun
to_siw_ctx(struct ib_ucontext * base_ctx)560*4882a593Smuzhiyun static inline struct siw_ucontext *to_siw_ctx(struct ib_ucontext *base_ctx)
561*4882a593Smuzhiyun {
562*4882a593Smuzhiyun return container_of(base_ctx, struct siw_ucontext, base_ucontext);
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun
to_siw_qp(struct ib_qp * base_qp)565*4882a593Smuzhiyun static inline struct siw_qp *to_siw_qp(struct ib_qp *base_qp)
566*4882a593Smuzhiyun {
567*4882a593Smuzhiyun return container_of(base_qp, struct siw_qp, base_qp);
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun
to_siw_cq(struct ib_cq * base_cq)570*4882a593Smuzhiyun static inline struct siw_cq *to_siw_cq(struct ib_cq *base_cq)
571*4882a593Smuzhiyun {
572*4882a593Smuzhiyun return container_of(base_cq, struct siw_cq, base_cq);
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun
to_siw_srq(struct ib_srq * base_srq)575*4882a593Smuzhiyun static inline struct siw_srq *to_siw_srq(struct ib_srq *base_srq)
576*4882a593Smuzhiyun {
577*4882a593Smuzhiyun return container_of(base_srq, struct siw_srq, base_srq);
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun
to_siw_dev(struct ib_device * base_dev)580*4882a593Smuzhiyun static inline struct siw_device *to_siw_dev(struct ib_device *base_dev)
581*4882a593Smuzhiyun {
582*4882a593Smuzhiyun return container_of(base_dev, struct siw_device, base_dev);
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun
to_siw_mr(struct ib_mr * base_mr)585*4882a593Smuzhiyun static inline struct siw_mr *to_siw_mr(struct ib_mr *base_mr)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun return container_of(base_mr, struct siw_mr, base_mr);
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun static inline struct siw_user_mmap_entry *
to_siw_mmap_entry(struct rdma_user_mmap_entry * rdma_mmap)591*4882a593Smuzhiyun to_siw_mmap_entry(struct rdma_user_mmap_entry *rdma_mmap)
592*4882a593Smuzhiyun {
593*4882a593Smuzhiyun return container_of(rdma_mmap, struct siw_user_mmap_entry, rdma_entry);
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun
siw_qp_id2obj(struct siw_device * sdev,int id)596*4882a593Smuzhiyun static inline struct siw_qp *siw_qp_id2obj(struct siw_device *sdev, int id)
597*4882a593Smuzhiyun {
598*4882a593Smuzhiyun struct siw_qp *qp;
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun rcu_read_lock();
601*4882a593Smuzhiyun qp = xa_load(&sdev->qp_xa, id);
602*4882a593Smuzhiyun if (likely(qp && kref_get_unless_zero(&qp->ref))) {
603*4882a593Smuzhiyun rcu_read_unlock();
604*4882a593Smuzhiyun return qp;
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun rcu_read_unlock();
607*4882a593Smuzhiyun return NULL;
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun
qp_id(struct siw_qp * qp)610*4882a593Smuzhiyun static inline u32 qp_id(struct siw_qp *qp)
611*4882a593Smuzhiyun {
612*4882a593Smuzhiyun return qp->base_qp.qp_num;
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun
siw_qp_get(struct siw_qp * qp)615*4882a593Smuzhiyun static inline void siw_qp_get(struct siw_qp *qp)
616*4882a593Smuzhiyun {
617*4882a593Smuzhiyun kref_get(&qp->ref);
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun
siw_qp_put(struct siw_qp * qp)620*4882a593Smuzhiyun static inline void siw_qp_put(struct siw_qp *qp)
621*4882a593Smuzhiyun {
622*4882a593Smuzhiyun kref_put(&qp->ref, siw_free_qp);
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun
siw_sq_empty(struct siw_qp * qp)625*4882a593Smuzhiyun static inline int siw_sq_empty(struct siw_qp *qp)
626*4882a593Smuzhiyun {
627*4882a593Smuzhiyun struct siw_sqe *sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size];
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun return READ_ONCE(sqe->flags) == 0;
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun
sq_get_next(struct siw_qp * qp)632*4882a593Smuzhiyun static inline struct siw_sqe *sq_get_next(struct siw_qp *qp)
633*4882a593Smuzhiyun {
634*4882a593Smuzhiyun struct siw_sqe *sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size];
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun if (READ_ONCE(sqe->flags) & SIW_WQE_VALID)
637*4882a593Smuzhiyun return sqe;
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun return NULL;
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun
orq_get_current(struct siw_qp * qp)642*4882a593Smuzhiyun static inline struct siw_sqe *orq_get_current(struct siw_qp *qp)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun return &qp->orq[qp->orq_get % qp->attrs.orq_size];
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun
orq_get_free(struct siw_qp * qp)647*4882a593Smuzhiyun static inline struct siw_sqe *orq_get_free(struct siw_qp *qp)
648*4882a593Smuzhiyun {
649*4882a593Smuzhiyun struct siw_sqe *orq_e = &qp->orq[qp->orq_put % qp->attrs.orq_size];
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun if (READ_ONCE(orq_e->flags) == 0)
652*4882a593Smuzhiyun return orq_e;
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun return NULL;
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun
siw_orq_empty(struct siw_qp * qp)657*4882a593Smuzhiyun static inline int siw_orq_empty(struct siw_qp *qp)
658*4882a593Smuzhiyun {
659*4882a593Smuzhiyun return qp->orq[qp->orq_get % qp->attrs.orq_size].flags == 0 ? 1 : 0;
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun
irq_alloc_free(struct siw_qp * qp)662*4882a593Smuzhiyun static inline struct siw_sqe *irq_alloc_free(struct siw_qp *qp)
663*4882a593Smuzhiyun {
664*4882a593Smuzhiyun struct siw_sqe *irq_e = &qp->irq[qp->irq_put % qp->attrs.irq_size];
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun if (READ_ONCE(irq_e->flags) == 0) {
667*4882a593Smuzhiyun qp->irq_put++;
668*4882a593Smuzhiyun return irq_e;
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun return NULL;
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun
siw_csum_update(const void * buff,int len,__wsum sum)673*4882a593Smuzhiyun static inline __wsum siw_csum_update(const void *buff, int len, __wsum sum)
674*4882a593Smuzhiyun {
675*4882a593Smuzhiyun return (__force __wsum)crc32c((__force __u32)sum, buff, len);
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun
siw_csum_combine(__wsum csum,__wsum csum2,int offset,int len)678*4882a593Smuzhiyun static inline __wsum siw_csum_combine(__wsum csum, __wsum csum2, int offset,
679*4882a593Smuzhiyun int len)
680*4882a593Smuzhiyun {
681*4882a593Smuzhiyun return (__force __wsum)__crc32c_le_combine((__force __u32)csum,
682*4882a593Smuzhiyun (__force __u32)csum2, len);
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun
siw_crc_skb(struct siw_rx_stream * srx,unsigned int len)685*4882a593Smuzhiyun static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len)
686*4882a593Smuzhiyun {
687*4882a593Smuzhiyun const struct skb_checksum_ops siw_cs_ops = {
688*4882a593Smuzhiyun .update = siw_csum_update,
689*4882a593Smuzhiyun .combine = siw_csum_combine,
690*4882a593Smuzhiyun };
691*4882a593Smuzhiyun __wsum crc = *(u32 *)shash_desc_ctx(srx->mpa_crc_hd);
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun crc = __skb_checksum(srx->skb, srx->skb_offset, len, crc,
694*4882a593Smuzhiyun &siw_cs_ops);
695*4882a593Smuzhiyun *(u32 *)shash_desc_ctx(srx->mpa_crc_hd) = crc;
696*4882a593Smuzhiyun }
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun #define siw_dbg(ibdev, fmt, ...) \
699*4882a593Smuzhiyun ibdev_dbg(ibdev, "%s: " fmt, __func__, ##__VA_ARGS__)
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun #define siw_dbg_qp(qp, fmt, ...) \
702*4882a593Smuzhiyun ibdev_dbg(&qp->sdev->base_dev, "QP[%u] %s: " fmt, qp_id(qp), __func__, \
703*4882a593Smuzhiyun ##__VA_ARGS__)
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun #define siw_dbg_cq(cq, fmt, ...) \
706*4882a593Smuzhiyun ibdev_dbg(cq->base_cq.device, "CQ[%u] %s: " fmt, cq->id, __func__, \
707*4882a593Smuzhiyun ##__VA_ARGS__)
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun #define siw_dbg_pd(pd, fmt, ...) \
710*4882a593Smuzhiyun ibdev_dbg(pd->device, "PD[%u] %s: " fmt, pd->res.id, __func__, \
711*4882a593Smuzhiyun ##__VA_ARGS__)
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun #define siw_dbg_mem(mem, fmt, ...) \
714*4882a593Smuzhiyun ibdev_dbg(&mem->sdev->base_dev, \
715*4882a593Smuzhiyun "MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__)
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun #define siw_dbg_cep(cep, fmt, ...) \
718*4882a593Smuzhiyun ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%pK] %s: " fmt, \
719*4882a593Smuzhiyun cep, __func__, ##__VA_ARGS__)
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun void siw_cq_flush(struct siw_cq *cq);
722*4882a593Smuzhiyun void siw_sq_flush(struct siw_qp *qp);
723*4882a593Smuzhiyun void siw_rq_flush(struct siw_qp *qp);
724*4882a593Smuzhiyun int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc);
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun #endif
727