xref: /OK3568_Linux_fs/kernel/include/rdma/rdmavt_qp.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright(c) 2016 - 2020 Intel Corporation.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #ifndef DEF_RDMAVT_INCQP_H
7*4882a593Smuzhiyun #define DEF_RDMAVT_INCQP_H
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <rdma/rdma_vt.h>
10*4882a593Smuzhiyun #include <rdma/ib_pack.h>
11*4882a593Smuzhiyun #include <rdma/ib_verbs.h>
12*4882a593Smuzhiyun #include <rdma/rdmavt_cq.h>
13*4882a593Smuzhiyun #include <rdma/rvt-abi.h>
14*4882a593Smuzhiyun /*
15*4882a593Smuzhiyun  * Atomic bit definitions for r_aflags.
16*4882a593Smuzhiyun  */
17*4882a593Smuzhiyun #define RVT_R_WRID_VALID        0
18*4882a593Smuzhiyun #define RVT_R_REWIND_SGE        1
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun /*
21*4882a593Smuzhiyun  * Bit definitions for r_flags.
22*4882a593Smuzhiyun  */
23*4882a593Smuzhiyun #define RVT_R_REUSE_SGE 0x01
24*4882a593Smuzhiyun #define RVT_R_RDMAR_SEQ 0x02
25*4882a593Smuzhiyun #define RVT_R_RSP_NAK   0x04
26*4882a593Smuzhiyun #define RVT_R_RSP_SEND  0x08
27*4882a593Smuzhiyun #define RVT_R_COMM_EST  0x10
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun /*
30*4882a593Smuzhiyun  * If a packet's QP[23:16] bits match this value, then it is
31*4882a593Smuzhiyun  * a PSM packet and the hardware will expect a KDETH header
32*4882a593Smuzhiyun  * following the BTH.
33*4882a593Smuzhiyun  */
34*4882a593Smuzhiyun #define RVT_KDETH_QP_PREFIX       0x80
35*4882a593Smuzhiyun #define RVT_KDETH_QP_SUFFIX       0xffff
36*4882a593Smuzhiyun #define RVT_KDETH_QP_PREFIX_MASK  0x00ff0000
37*4882a593Smuzhiyun #define RVT_KDETH_QP_PREFIX_SHIFT 16
38*4882a593Smuzhiyun #define RVT_KDETH_QP_BASE         (u32)(RVT_KDETH_QP_PREFIX << \
39*4882a593Smuzhiyun 					RVT_KDETH_QP_PREFIX_SHIFT)
40*4882a593Smuzhiyun #define RVT_KDETH_QP_MAX          (u32)(RVT_KDETH_QP_BASE + RVT_KDETH_QP_SUFFIX)
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun /*
43*4882a593Smuzhiyun  * If a packet's LNH == BTH and DEST QPN[23:16] in the BTH match this
44*4882a593Smuzhiyun  * prefix value, then it is an AIP packet with a DETH containing the entropy
45*4882a593Smuzhiyun  * value in byte 4 following the BTH.
46*4882a593Smuzhiyun  */
47*4882a593Smuzhiyun #define RVT_AIP_QP_PREFIX       0x81
48*4882a593Smuzhiyun #define RVT_AIP_QP_SUFFIX       0xffff
49*4882a593Smuzhiyun #define RVT_AIP_QP_PREFIX_MASK  0x00ff0000
50*4882a593Smuzhiyun #define RVT_AIP_QP_PREFIX_SHIFT 16
51*4882a593Smuzhiyun #define RVT_AIP_QP_BASE         (u32)(RVT_AIP_QP_PREFIX << \
52*4882a593Smuzhiyun 				      RVT_AIP_QP_PREFIX_SHIFT)
53*4882a593Smuzhiyun #define RVT_AIP_QPN_MAX         BIT(RVT_AIP_QP_PREFIX_SHIFT)
54*4882a593Smuzhiyun #define RVT_AIP_QP_MAX          (u32)(RVT_AIP_QP_BASE + RVT_AIP_QPN_MAX - 1)
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun /*
57*4882a593Smuzhiyun  * Bit definitions for s_flags.
58*4882a593Smuzhiyun  *
59*4882a593Smuzhiyun  * RVT_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
60*4882a593Smuzhiyun  * RVT_S_BUSY - send tasklet is processing the QP
61*4882a593Smuzhiyun  * RVT_S_TIMER - the RC retry timer is active
62*4882a593Smuzhiyun  * RVT_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
63*4882a593Smuzhiyun  * RVT_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
64*4882a593Smuzhiyun  *                         before processing the next SWQE
65*4882a593Smuzhiyun  * RVT_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
66*4882a593Smuzhiyun  *                         before processing the next SWQE
67*4882a593Smuzhiyun  * RVT_S_WAIT_RNR - waiting for RNR timeout
68*4882a593Smuzhiyun  * RVT_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
69*4882a593Smuzhiyun  * RVT_S_WAIT_DMA - waiting for send DMA queue to drain before generating
70*4882a593Smuzhiyun  *                  next send completion entry not via send DMA
71*4882a593Smuzhiyun  * RVT_S_WAIT_PIO - waiting for a send buffer to be available
72*4882a593Smuzhiyun  * RVT_S_WAIT_TX - waiting for a struct verbs_txreq to be available
73*4882a593Smuzhiyun  * RVT_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
74*4882a593Smuzhiyun  * RVT_S_WAIT_KMEM - waiting for kernel memory to be available
75*4882a593Smuzhiyun  * RVT_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
76*4882a593Smuzhiyun  * RVT_S_WAIT_ACK - waiting for an ACK packet before sending more requests
77*4882a593Smuzhiyun  * RVT_S_SEND_ONE - send one packet, request ACK, then wait for ACK
78*4882a593Smuzhiyun  * RVT_S_ECN - a BECN was queued to the send engine
79*4882a593Smuzhiyun  * RVT_S_MAX_BIT_MASK - The max bit that can be used by rdmavt
80*4882a593Smuzhiyun  */
81*4882a593Smuzhiyun #define RVT_S_SIGNAL_REQ_WR	0x0001
82*4882a593Smuzhiyun #define RVT_S_BUSY		0x0002
83*4882a593Smuzhiyun #define RVT_S_TIMER		0x0004
84*4882a593Smuzhiyun #define RVT_S_RESP_PENDING	0x0008
85*4882a593Smuzhiyun #define RVT_S_ACK_PENDING	0x0010
86*4882a593Smuzhiyun #define RVT_S_WAIT_FENCE	0x0020
87*4882a593Smuzhiyun #define RVT_S_WAIT_RDMAR	0x0040
88*4882a593Smuzhiyun #define RVT_S_WAIT_RNR		0x0080
89*4882a593Smuzhiyun #define RVT_S_WAIT_SSN_CREDIT	0x0100
90*4882a593Smuzhiyun #define RVT_S_WAIT_DMA		0x0200
91*4882a593Smuzhiyun #define RVT_S_WAIT_PIO		0x0400
92*4882a593Smuzhiyun #define RVT_S_WAIT_TX		0x0800
93*4882a593Smuzhiyun #define RVT_S_WAIT_DMA_DESC	0x1000
94*4882a593Smuzhiyun #define RVT_S_WAIT_KMEM		0x2000
95*4882a593Smuzhiyun #define RVT_S_WAIT_PSN		0x4000
96*4882a593Smuzhiyun #define RVT_S_WAIT_ACK		0x8000
97*4882a593Smuzhiyun #define RVT_S_SEND_ONE		0x10000
98*4882a593Smuzhiyun #define RVT_S_UNLIMITED_CREDIT	0x20000
99*4882a593Smuzhiyun #define RVT_S_ECN		0x40000
100*4882a593Smuzhiyun #define RVT_S_MAX_BIT_MASK	0x800000
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun /*
103*4882a593Smuzhiyun  * Drivers should use s_flags starting with bit 31 down to the bit next to
104*4882a593Smuzhiyun  * RVT_S_MAX_BIT_MASK
105*4882a593Smuzhiyun  */
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun /*
108*4882a593Smuzhiyun  * Wait flags that would prevent any packet type from being sent.
109*4882a593Smuzhiyun  */
110*4882a593Smuzhiyun #define RVT_S_ANY_WAIT_IO \
111*4882a593Smuzhiyun 	(RVT_S_WAIT_PIO | RVT_S_WAIT_TX | \
112*4882a593Smuzhiyun 	 RVT_S_WAIT_DMA_DESC | RVT_S_WAIT_KMEM)
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun /*
115*4882a593Smuzhiyun  * Wait flags that would prevent send work requests from making progress.
116*4882a593Smuzhiyun  */
117*4882a593Smuzhiyun #define RVT_S_ANY_WAIT_SEND (RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | \
118*4882a593Smuzhiyun 	RVT_S_WAIT_RNR | RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_DMA | \
119*4882a593Smuzhiyun 	RVT_S_WAIT_PSN | RVT_S_WAIT_ACK)
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun #define RVT_S_ANY_WAIT (RVT_S_ANY_WAIT_IO | RVT_S_ANY_WAIT_SEND)
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun /* Number of bits to pay attention to in the opcode for checking qp type */
124*4882a593Smuzhiyun #define RVT_OPCODE_QP_MASK 0xE0
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun /* Flags for checking QP state (see ib_rvt_state_ops[]) */
127*4882a593Smuzhiyun #define RVT_POST_SEND_OK                0x01
128*4882a593Smuzhiyun #define RVT_POST_RECV_OK                0x02
129*4882a593Smuzhiyun #define RVT_PROCESS_RECV_OK             0x04
130*4882a593Smuzhiyun #define RVT_PROCESS_SEND_OK             0x08
131*4882a593Smuzhiyun #define RVT_PROCESS_NEXT_SEND_OK        0x10
132*4882a593Smuzhiyun #define RVT_FLUSH_SEND			0x20
133*4882a593Smuzhiyun #define RVT_FLUSH_RECV			0x40
134*4882a593Smuzhiyun #define RVT_PROCESS_OR_FLUSH_SEND \
135*4882a593Smuzhiyun 	(RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND)
136*4882a593Smuzhiyun #define RVT_SEND_OR_FLUSH_OR_RECV_OK \
137*4882a593Smuzhiyun 	(RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND | RVT_PROCESS_RECV_OK)
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun /*
140*4882a593Smuzhiyun  * Internal send flags
141*4882a593Smuzhiyun  */
142*4882a593Smuzhiyun #define RVT_SEND_RESERVE_USED           IB_SEND_RESERVED_START
143*4882a593Smuzhiyun #define RVT_SEND_COMPLETION_ONLY	(IB_SEND_RESERVED_START << 1)
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun /**
146*4882a593Smuzhiyun  * rvt_ud_wr - IB UD work plus AH cache
147*4882a593Smuzhiyun  * @wr: valid IB work request
148*4882a593Smuzhiyun  * @attr: pointer to an allocated AH attribute
149*4882a593Smuzhiyun  *
150*4882a593Smuzhiyun  * Special case the UD WR so we can keep track of the AH attributes.
151*4882a593Smuzhiyun  *
152*4882a593Smuzhiyun  * NOTE: This data structure is stricly ordered wr then attr. I.e the attr
153*4882a593Smuzhiyun  * MUST come after wr.  The ib_ud_wr is sized and copied in rvt_post_one_wr.
154*4882a593Smuzhiyun  * The copy assumes that wr is first.
155*4882a593Smuzhiyun  */
156*4882a593Smuzhiyun struct rvt_ud_wr {
157*4882a593Smuzhiyun 	struct ib_ud_wr wr;
158*4882a593Smuzhiyun 	struct rdma_ah_attr *attr;
159*4882a593Smuzhiyun };
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun /*
162*4882a593Smuzhiyun  * Send work request queue entry.
163*4882a593Smuzhiyun  * The size of the sg_list is determined when the QP is created and stored
164*4882a593Smuzhiyun  * in qp->s_max_sge.
165*4882a593Smuzhiyun  */
166*4882a593Smuzhiyun struct rvt_swqe {
167*4882a593Smuzhiyun 	union {
168*4882a593Smuzhiyun 		struct ib_send_wr wr;   /* don't use wr.sg_list */
169*4882a593Smuzhiyun 		struct rvt_ud_wr ud_wr;
170*4882a593Smuzhiyun 		struct ib_reg_wr reg_wr;
171*4882a593Smuzhiyun 		struct ib_rdma_wr rdma_wr;
172*4882a593Smuzhiyun 		struct ib_atomic_wr atomic_wr;
173*4882a593Smuzhiyun 	};
174*4882a593Smuzhiyun 	u32 psn;                /* first packet sequence number */
175*4882a593Smuzhiyun 	u32 lpsn;               /* last packet sequence number */
176*4882a593Smuzhiyun 	u32 ssn;                /* send sequence number */
177*4882a593Smuzhiyun 	u32 length;             /* total length of data in sg_list */
178*4882a593Smuzhiyun 	void *priv;             /* driver dependent field */
179*4882a593Smuzhiyun 	struct rvt_sge sg_list[];
180*4882a593Smuzhiyun };
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun /**
183*4882a593Smuzhiyun  * struct rvt_krwq - kernel struct receive work request
184*4882a593Smuzhiyun  * @p_lock: lock to protect producer of the kernel buffer
185*4882a593Smuzhiyun  * @head: index of next entry to fill
186*4882a593Smuzhiyun  * @c_lock:lock to protect consumer of the kernel buffer
187*4882a593Smuzhiyun  * @tail: index of next entry to pull
188*4882a593Smuzhiyun  * @count: count is aproximate of total receive enteries posted
189*4882a593Smuzhiyun  * @rvt_rwqe: struct of receive work request queue entry
190*4882a593Smuzhiyun  *
191*4882a593Smuzhiyun  * This structure is used to contain the head pointer,
192*4882a593Smuzhiyun  * tail pointer and receive work queue entries for kernel
193*4882a593Smuzhiyun  * mode user.
194*4882a593Smuzhiyun  */
195*4882a593Smuzhiyun struct rvt_krwq {
196*4882a593Smuzhiyun 	spinlock_t p_lock;	/* protect producer */
197*4882a593Smuzhiyun 	u32 head;               /* new work requests posted to the head */
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	/* protect consumer */
200*4882a593Smuzhiyun 	spinlock_t c_lock ____cacheline_aligned_in_smp;
201*4882a593Smuzhiyun 	u32 tail;               /* receives pull requests from here. */
202*4882a593Smuzhiyun 	u32 count;		/* approx count of receive entries posted */
203*4882a593Smuzhiyun 	struct rvt_rwqe *curr_wq;
204*4882a593Smuzhiyun 	struct rvt_rwqe wq[];
205*4882a593Smuzhiyun };
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun /*
208*4882a593Smuzhiyun  * rvt_get_swqe_ah - Return the pointer to the struct rvt_ah
209*4882a593Smuzhiyun  * @swqe: valid Send WQE
210*4882a593Smuzhiyun  *
211*4882a593Smuzhiyun  */
rvt_get_swqe_ah(struct rvt_swqe * swqe)212*4882a593Smuzhiyun static inline struct rvt_ah *rvt_get_swqe_ah(struct rvt_swqe *swqe)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun 	return ibah_to_rvtah(swqe->ud_wr.wr.ah);
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun /**
218*4882a593Smuzhiyun  * rvt_get_swqe_ah_attr - Return the cached ah attribute information
219*4882a593Smuzhiyun  * @swqe: valid Send WQE
220*4882a593Smuzhiyun  *
221*4882a593Smuzhiyun  */
rvt_get_swqe_ah_attr(struct rvt_swqe * swqe)222*4882a593Smuzhiyun static inline struct rdma_ah_attr *rvt_get_swqe_ah_attr(struct rvt_swqe *swqe)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun 	return swqe->ud_wr.attr;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun /**
228*4882a593Smuzhiyun  * rvt_get_swqe_remote_qpn - Access the remote QPN value
229*4882a593Smuzhiyun  * @swqe: valid Send WQE
230*4882a593Smuzhiyun  *
231*4882a593Smuzhiyun  */
rvt_get_swqe_remote_qpn(struct rvt_swqe * swqe)232*4882a593Smuzhiyun static inline u32 rvt_get_swqe_remote_qpn(struct rvt_swqe *swqe)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun 	return swqe->ud_wr.wr.remote_qpn;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun /**
238*4882a593Smuzhiyun  * rvt_get_swqe_remote_qkey - Acces the remote qkey value
239*4882a593Smuzhiyun  * @swqe: valid Send WQE
240*4882a593Smuzhiyun  *
241*4882a593Smuzhiyun  */
rvt_get_swqe_remote_qkey(struct rvt_swqe * swqe)242*4882a593Smuzhiyun static inline u32 rvt_get_swqe_remote_qkey(struct rvt_swqe *swqe)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun 	return swqe->ud_wr.wr.remote_qkey;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun /**
248*4882a593Smuzhiyun  * rvt_get_swqe_pkey_index - Access the pkey index
249*4882a593Smuzhiyun  * @swqe: valid Send WQE
250*4882a593Smuzhiyun  *
251*4882a593Smuzhiyun  */
rvt_get_swqe_pkey_index(struct rvt_swqe * swqe)252*4882a593Smuzhiyun static inline u16 rvt_get_swqe_pkey_index(struct rvt_swqe *swqe)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun 	return swqe->ud_wr.wr.pkey_index;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun struct rvt_rq {
258*4882a593Smuzhiyun 	struct rvt_rwq *wq;
259*4882a593Smuzhiyun 	struct rvt_krwq *kwq;
260*4882a593Smuzhiyun 	u32 size;               /* size of RWQE array */
261*4882a593Smuzhiyun 	u8 max_sge;
262*4882a593Smuzhiyun 	/* protect changes in this struct */
263*4882a593Smuzhiyun 	spinlock_t lock ____cacheline_aligned_in_smp;
264*4882a593Smuzhiyun };
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun /**
267*4882a593Smuzhiyun  * rvt_get_rq_count - count numbers of request work queue entries
268*4882a593Smuzhiyun  * in circular buffer
269*4882a593Smuzhiyun  * @rq: data structure for request queue entry
270*4882a593Smuzhiyun  * @head: head indices of the circular buffer
271*4882a593Smuzhiyun  * @tail: tail indices of the circular buffer
272*4882a593Smuzhiyun  *
273*4882a593Smuzhiyun  * Return - total number of entries in the Receive Queue
274*4882a593Smuzhiyun  */
275*4882a593Smuzhiyun 
rvt_get_rq_count(struct rvt_rq * rq,u32 head,u32 tail)276*4882a593Smuzhiyun static inline u32 rvt_get_rq_count(struct rvt_rq *rq, u32 head, u32 tail)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun 	u32 count = head - tail;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	if ((s32)count < 0)
281*4882a593Smuzhiyun 		count += rq->size;
282*4882a593Smuzhiyun 	return count;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun /*
286*4882a593Smuzhiyun  * This structure holds the information that the send tasklet needs
287*4882a593Smuzhiyun  * to send a RDMA read response or atomic operation.
288*4882a593Smuzhiyun  */
289*4882a593Smuzhiyun struct rvt_ack_entry {
290*4882a593Smuzhiyun 	struct rvt_sge rdma_sge;
291*4882a593Smuzhiyun 	u64 atomic_data;
292*4882a593Smuzhiyun 	u32 psn;
293*4882a593Smuzhiyun 	u32 lpsn;
294*4882a593Smuzhiyun 	u8 opcode;
295*4882a593Smuzhiyun 	u8 sent;
296*4882a593Smuzhiyun 	void *priv;
297*4882a593Smuzhiyun };
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun #define	RC_QP_SCALING_INTERVAL	5
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun #define RVT_OPERATION_PRIV        0x00000001
302*4882a593Smuzhiyun #define RVT_OPERATION_ATOMIC      0x00000002
303*4882a593Smuzhiyun #define RVT_OPERATION_ATOMIC_SGE  0x00000004
304*4882a593Smuzhiyun #define RVT_OPERATION_LOCAL       0x00000008
305*4882a593Smuzhiyun #define RVT_OPERATION_USE_RESERVE 0x00000010
306*4882a593Smuzhiyun #define RVT_OPERATION_IGN_RNR_CNT 0x00000020
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun #define RVT_OPERATION_MAX (IB_WR_RESERVED10 + 1)
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun /**
311*4882a593Smuzhiyun  * rvt_operation_params - op table entry
312*4882a593Smuzhiyun  * @length - the length to copy into the swqe entry
313*4882a593Smuzhiyun  * @qpt_support - a bit mask indicating QP type support
314*4882a593Smuzhiyun  * @flags - RVT_OPERATION flags (see above)
315*4882a593Smuzhiyun  *
316*4882a593Smuzhiyun  * This supports table driven post send so that
317*4882a593Smuzhiyun  * the driver can have differing an potentially
318*4882a593Smuzhiyun  * different sets of operations.
319*4882a593Smuzhiyun  *
320*4882a593Smuzhiyun  **/
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun struct rvt_operation_params {
323*4882a593Smuzhiyun 	size_t length;
324*4882a593Smuzhiyun 	u32 qpt_support;
325*4882a593Smuzhiyun 	u32 flags;
326*4882a593Smuzhiyun };
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun /*
329*4882a593Smuzhiyun  * Common variables are protected by both r_rq.lock and s_lock in that order
330*4882a593Smuzhiyun  * which only happens in modify_qp() or changing the QP 'state'.
331*4882a593Smuzhiyun  */
332*4882a593Smuzhiyun struct rvt_qp {
333*4882a593Smuzhiyun 	struct ib_qp ibqp;
334*4882a593Smuzhiyun 	void *priv; /* Driver private data */
335*4882a593Smuzhiyun 	/* read mostly fields above and below */
336*4882a593Smuzhiyun 	struct rdma_ah_attr remote_ah_attr;
337*4882a593Smuzhiyun 	struct rdma_ah_attr alt_ah_attr;
338*4882a593Smuzhiyun 	struct rvt_qp __rcu *next;           /* link list for QPN hash table */
339*4882a593Smuzhiyun 	struct rvt_swqe *s_wq;  /* send work queue */
340*4882a593Smuzhiyun 	struct rvt_mmap_info *ip;
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	unsigned long timeout_jiffies;  /* computed from timeout */
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	int srate_mbps;		/* s_srate (below) converted to Mbit/s */
345*4882a593Smuzhiyun 	pid_t pid;		/* pid for user mode QPs */
346*4882a593Smuzhiyun 	u32 remote_qpn;
347*4882a593Smuzhiyun 	u32 qkey;               /* QKEY for this QP (for UD or RD) */
348*4882a593Smuzhiyun 	u32 s_size;             /* send work queue size */
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	u16 pmtu;		/* decoded from path_mtu */
351*4882a593Smuzhiyun 	u8 log_pmtu;		/* shift for pmtu */
352*4882a593Smuzhiyun 	u8 state;               /* QP state */
353*4882a593Smuzhiyun 	u8 allowed_ops;		/* high order bits of allowed opcodes */
354*4882a593Smuzhiyun 	u8 qp_access_flags;
355*4882a593Smuzhiyun 	u8 alt_timeout;         /* Alternate path timeout for this QP */
356*4882a593Smuzhiyun 	u8 timeout;             /* Timeout for this QP */
357*4882a593Smuzhiyun 	u8 s_srate;
358*4882a593Smuzhiyun 	u8 s_mig_state;
359*4882a593Smuzhiyun 	u8 port_num;
360*4882a593Smuzhiyun 	u8 s_pkey_index;        /* PKEY index to use */
361*4882a593Smuzhiyun 	u8 s_alt_pkey_index;    /* Alternate path PKEY index to use */
362*4882a593Smuzhiyun 	u8 r_max_rd_atomic;     /* max number of RDMA read/atomic to receive */
363*4882a593Smuzhiyun 	u8 s_max_rd_atomic;     /* max number of RDMA read/atomic to send */
364*4882a593Smuzhiyun 	u8 s_retry_cnt;         /* number of times to retry */
365*4882a593Smuzhiyun 	u8 s_rnr_retry_cnt;
366*4882a593Smuzhiyun 	u8 r_min_rnr_timer;     /* retry timeout value for RNR NAKs */
367*4882a593Smuzhiyun 	u8 s_max_sge;           /* size of s_wq->sg_list */
368*4882a593Smuzhiyun 	u8 s_draining;
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	/* start of read/write fields */
371*4882a593Smuzhiyun 	atomic_t refcount ____cacheline_aligned_in_smp;
372*4882a593Smuzhiyun 	wait_queue_head_t wait;
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	struct rvt_ack_entry *s_ack_queue;
375*4882a593Smuzhiyun 	struct rvt_sge_state s_rdma_read_sge;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	spinlock_t r_lock ____cacheline_aligned_in_smp;      /* used for APM */
378*4882a593Smuzhiyun 	u32 r_psn;              /* expected rcv packet sequence number */
379*4882a593Smuzhiyun 	unsigned long r_aflags;
380*4882a593Smuzhiyun 	u64 r_wr_id;            /* ID for current receive WQE */
381*4882a593Smuzhiyun 	u32 r_ack_psn;          /* PSN for next ACK or atomic ACK */
382*4882a593Smuzhiyun 	u32 r_len;              /* total length of r_sge */
383*4882a593Smuzhiyun 	u32 r_rcv_len;          /* receive data len processed */
384*4882a593Smuzhiyun 	u32 r_msn;              /* message sequence number */
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	u8 r_state;             /* opcode of last packet received */
387*4882a593Smuzhiyun 	u8 r_flags;
388*4882a593Smuzhiyun 	u8 r_head_ack_queue;    /* index into s_ack_queue[] */
389*4882a593Smuzhiyun 	u8 r_adefered;          /* defered ack count */
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	struct list_head rspwait;       /* link for waiting to respond */
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	struct rvt_sge_state r_sge;     /* current receive data */
394*4882a593Smuzhiyun 	struct rvt_rq r_rq;             /* receive work queue */
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	/* post send line */
397*4882a593Smuzhiyun 	spinlock_t s_hlock ____cacheline_aligned_in_smp;
398*4882a593Smuzhiyun 	u32 s_head;             /* new entries added here */
399*4882a593Smuzhiyun 	u32 s_next_psn;         /* PSN for next request */
400*4882a593Smuzhiyun 	u32 s_avail;            /* number of entries avail */
401*4882a593Smuzhiyun 	u32 s_ssn;              /* SSN of tail entry */
402*4882a593Smuzhiyun 	atomic_t s_reserved_used; /* reserved entries in use */
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	spinlock_t s_lock ____cacheline_aligned_in_smp;
405*4882a593Smuzhiyun 	u32 s_flags;
406*4882a593Smuzhiyun 	struct rvt_sge_state *s_cur_sge;
407*4882a593Smuzhiyun 	struct rvt_swqe *s_wqe;
408*4882a593Smuzhiyun 	struct rvt_sge_state s_sge;     /* current send request data */
409*4882a593Smuzhiyun 	struct rvt_mregion *s_rdma_mr;
410*4882a593Smuzhiyun 	u32 s_len;              /* total length of s_sge */
411*4882a593Smuzhiyun 	u32 s_rdma_read_len;    /* total length of s_rdma_read_sge */
412*4882a593Smuzhiyun 	u32 s_last_psn;         /* last response PSN processed */
413*4882a593Smuzhiyun 	u32 s_sending_psn;      /* lowest PSN that is being sent */
414*4882a593Smuzhiyun 	u32 s_sending_hpsn;     /* highest PSN that is being sent */
415*4882a593Smuzhiyun 	u32 s_psn;              /* current packet sequence number */
416*4882a593Smuzhiyun 	u32 s_ack_rdma_psn;     /* PSN for sending RDMA read responses */
417*4882a593Smuzhiyun 	u32 s_ack_psn;          /* PSN for acking sends and RDMA writes */
418*4882a593Smuzhiyun 	u32 s_tail;             /* next entry to process */
419*4882a593Smuzhiyun 	u32 s_cur;              /* current work queue entry */
420*4882a593Smuzhiyun 	u32 s_acked;            /* last un-ACK'ed entry */
421*4882a593Smuzhiyun 	u32 s_last;             /* last completed entry */
422*4882a593Smuzhiyun 	u32 s_lsn;              /* limit sequence number (credit) */
423*4882a593Smuzhiyun 	u32 s_ahgpsn;           /* set to the psn in the copy of the header */
424*4882a593Smuzhiyun 	u16 s_cur_size;         /* size of send packet in bytes */
425*4882a593Smuzhiyun 	u16 s_rdma_ack_cnt;
426*4882a593Smuzhiyun 	u8 s_hdrwords;         /* size of s_hdr in 32 bit words */
427*4882a593Smuzhiyun 	s8 s_ahgidx;
428*4882a593Smuzhiyun 	u8 s_state;             /* opcode of last packet sent */
429*4882a593Smuzhiyun 	u8 s_ack_state;         /* opcode of packet to ACK */
430*4882a593Smuzhiyun 	u8 s_nak_state;         /* non-zero if NAK is pending */
431*4882a593Smuzhiyun 	u8 r_nak_state;         /* non-zero if NAK is pending */
432*4882a593Smuzhiyun 	u8 s_retry;             /* requester retry counter */
433*4882a593Smuzhiyun 	u8 s_rnr_retry;         /* requester RNR retry counter */
434*4882a593Smuzhiyun 	u8 s_num_rd_atomic;     /* number of RDMA read/atomic pending */
435*4882a593Smuzhiyun 	u8 s_tail_ack_queue;    /* index into s_ack_queue[] */
436*4882a593Smuzhiyun 	u8 s_acked_ack_queue;   /* index into s_ack_queue[] */
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	struct rvt_sge_state s_ack_rdma_sge;
439*4882a593Smuzhiyun 	struct timer_list s_timer;
440*4882a593Smuzhiyun 	struct hrtimer s_rnr_timer;
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	atomic_t local_ops_pending; /* number of fast_reg/local_inv reqs */
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	/*
445*4882a593Smuzhiyun 	 * This sge list MUST be last. Do not add anything below here.
446*4882a593Smuzhiyun 	 */
447*4882a593Smuzhiyun 	struct rvt_sge r_sg_list[] /* verified SGEs */
448*4882a593Smuzhiyun 		____cacheline_aligned_in_smp;
449*4882a593Smuzhiyun };
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun struct rvt_srq {
452*4882a593Smuzhiyun 	struct ib_srq ibsrq;
453*4882a593Smuzhiyun 	struct rvt_rq rq;
454*4882a593Smuzhiyun 	struct rvt_mmap_info *ip;
455*4882a593Smuzhiyun 	/* send signal when number of RWQEs < limit */
456*4882a593Smuzhiyun 	u32 limit;
457*4882a593Smuzhiyun };
458*4882a593Smuzhiyun 
ibsrq_to_rvtsrq(struct ib_srq * ibsrq)459*4882a593Smuzhiyun static inline struct rvt_srq *ibsrq_to_rvtsrq(struct ib_srq *ibsrq)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun 	return container_of(ibsrq, struct rvt_srq, ibsrq);
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun 
ibqp_to_rvtqp(struct ib_qp * ibqp)464*4882a593Smuzhiyun static inline struct rvt_qp *ibqp_to_rvtqp(struct ib_qp *ibqp)
465*4882a593Smuzhiyun {
466*4882a593Smuzhiyun 	return container_of(ibqp, struct rvt_qp, ibqp);
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun #define RVT_QPN_MAX                 BIT(24)
470*4882a593Smuzhiyun #define RVT_QPNMAP_ENTRIES          (RVT_QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
471*4882a593Smuzhiyun #define RVT_BITS_PER_PAGE           (PAGE_SIZE * BITS_PER_BYTE)
472*4882a593Smuzhiyun #define RVT_BITS_PER_PAGE_MASK      (RVT_BITS_PER_PAGE - 1)
473*4882a593Smuzhiyun #define RVT_QPN_MASK		    IB_QPN_MASK
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun /*
476*4882a593Smuzhiyun  * QPN-map pages start out as NULL, they get allocated upon
477*4882a593Smuzhiyun  * first use and are never deallocated. This way,
478*4882a593Smuzhiyun  * large bitmaps are not allocated unless large numbers of QPs are used.
479*4882a593Smuzhiyun  */
480*4882a593Smuzhiyun struct rvt_qpn_map {
481*4882a593Smuzhiyun 	void *page;
482*4882a593Smuzhiyun };
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun struct rvt_qpn_table {
485*4882a593Smuzhiyun 	spinlock_t lock; /* protect changes to the qp table */
486*4882a593Smuzhiyun 	unsigned flags;         /* flags for QP0/1 allocated for each port */
487*4882a593Smuzhiyun 	u32 last;               /* last QP number allocated */
488*4882a593Smuzhiyun 	u32 nmaps;              /* size of the map table */
489*4882a593Smuzhiyun 	u16 limit;
490*4882a593Smuzhiyun 	u8  incr;
491*4882a593Smuzhiyun 	/* bit map of free QP numbers other than 0/1 */
492*4882a593Smuzhiyun 	struct rvt_qpn_map map[RVT_QPNMAP_ENTRIES];
493*4882a593Smuzhiyun };
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun struct rvt_qp_ibdev {
496*4882a593Smuzhiyun 	u32 qp_table_size;
497*4882a593Smuzhiyun 	u32 qp_table_bits;
498*4882a593Smuzhiyun 	struct rvt_qp __rcu **qp_table;
499*4882a593Smuzhiyun 	spinlock_t qpt_lock; /* qptable lock */
500*4882a593Smuzhiyun 	struct rvt_qpn_table qpn_table;
501*4882a593Smuzhiyun };
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun /*
504*4882a593Smuzhiyun  * There is one struct rvt_mcast for each multicast GID.
505*4882a593Smuzhiyun  * All attached QPs are then stored as a list of
506*4882a593Smuzhiyun  * struct rvt_mcast_qp.
507*4882a593Smuzhiyun  */
508*4882a593Smuzhiyun struct rvt_mcast_qp {
509*4882a593Smuzhiyun 	struct list_head list;
510*4882a593Smuzhiyun 	struct rvt_qp *qp;
511*4882a593Smuzhiyun };
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun struct rvt_mcast_addr {
514*4882a593Smuzhiyun 	union ib_gid mgid;
515*4882a593Smuzhiyun 	u16 lid;
516*4882a593Smuzhiyun };
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun struct rvt_mcast {
519*4882a593Smuzhiyun 	struct rb_node rb_node;
520*4882a593Smuzhiyun 	struct rvt_mcast_addr mcast_addr;
521*4882a593Smuzhiyun 	struct list_head qp_list;
522*4882a593Smuzhiyun 	wait_queue_head_t wait;
523*4882a593Smuzhiyun 	atomic_t refcount;
524*4882a593Smuzhiyun 	int n_attached;
525*4882a593Smuzhiyun };
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun /*
528*4882a593Smuzhiyun  * Since struct rvt_swqe is not a fixed size, we can't simply index into
529*4882a593Smuzhiyun  * struct rvt_qp.s_wq.  This function does the array index computation.
530*4882a593Smuzhiyun  */
rvt_get_swqe_ptr(struct rvt_qp * qp,unsigned n)531*4882a593Smuzhiyun static inline struct rvt_swqe *rvt_get_swqe_ptr(struct rvt_qp *qp,
532*4882a593Smuzhiyun 						unsigned n)
533*4882a593Smuzhiyun {
534*4882a593Smuzhiyun 	return (struct rvt_swqe *)((char *)qp->s_wq +
535*4882a593Smuzhiyun 				     (sizeof(struct rvt_swqe) +
536*4882a593Smuzhiyun 				      qp->s_max_sge *
537*4882a593Smuzhiyun 				      sizeof(struct rvt_sge)) * n);
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun /*
541*4882a593Smuzhiyun  * Since struct rvt_rwqe is not a fixed size, we can't simply index into
542*4882a593Smuzhiyun  * struct rvt_rwq.wq.  This function does the array index computation.
543*4882a593Smuzhiyun  */
rvt_get_rwqe_ptr(struct rvt_rq * rq,unsigned n)544*4882a593Smuzhiyun static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
545*4882a593Smuzhiyun {
546*4882a593Smuzhiyun 	return (struct rvt_rwqe *)
547*4882a593Smuzhiyun 		((char *)rq->kwq->curr_wq +
548*4882a593Smuzhiyun 		 (sizeof(struct rvt_rwqe) +
549*4882a593Smuzhiyun 		  rq->max_sge * sizeof(struct ib_sge)) * n);
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun /**
553*4882a593Smuzhiyun  * rvt_is_user_qp - return if this is user mode QP
554*4882a593Smuzhiyun  * @qp - the target QP
555*4882a593Smuzhiyun  */
rvt_is_user_qp(struct rvt_qp * qp)556*4882a593Smuzhiyun static inline bool rvt_is_user_qp(struct rvt_qp *qp)
557*4882a593Smuzhiyun {
558*4882a593Smuzhiyun 	return !!qp->pid;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun /**
562*4882a593Smuzhiyun  * rvt_get_qp - get a QP reference
563*4882a593Smuzhiyun  * @qp - the QP to hold
564*4882a593Smuzhiyun  */
rvt_get_qp(struct rvt_qp * qp)565*4882a593Smuzhiyun static inline void rvt_get_qp(struct rvt_qp *qp)
566*4882a593Smuzhiyun {
567*4882a593Smuzhiyun 	atomic_inc(&qp->refcount);
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun /**
571*4882a593Smuzhiyun  * rvt_put_qp - release a QP reference
572*4882a593Smuzhiyun  * @qp - the QP to release
573*4882a593Smuzhiyun  */
rvt_put_qp(struct rvt_qp * qp)574*4882a593Smuzhiyun static inline void rvt_put_qp(struct rvt_qp *qp)
575*4882a593Smuzhiyun {
576*4882a593Smuzhiyun 	if (qp && atomic_dec_and_test(&qp->refcount))
577*4882a593Smuzhiyun 		wake_up(&qp->wait);
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun /**
581*4882a593Smuzhiyun  * rvt_put_swqe - drop mr refs held by swqe
582*4882a593Smuzhiyun  * @wqe - the send wqe
583*4882a593Smuzhiyun  *
584*4882a593Smuzhiyun  * This drops any mr references held by the swqe
585*4882a593Smuzhiyun  */
rvt_put_swqe(struct rvt_swqe * wqe)586*4882a593Smuzhiyun static inline void rvt_put_swqe(struct rvt_swqe *wqe)
587*4882a593Smuzhiyun {
588*4882a593Smuzhiyun 	int i;
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 	for (i = 0; i < wqe->wr.num_sge; i++) {
591*4882a593Smuzhiyun 		struct rvt_sge *sge = &wqe->sg_list[i];
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 		rvt_put_mr(sge->mr);
594*4882a593Smuzhiyun 	}
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun /**
598*4882a593Smuzhiyun  * rvt_qp_wqe_reserve - reserve operation
599*4882a593Smuzhiyun  * @qp - the rvt qp
600*4882a593Smuzhiyun  * @wqe - the send wqe
601*4882a593Smuzhiyun  *
602*4882a593Smuzhiyun  * This routine used in post send to record
603*4882a593Smuzhiyun  * a wqe relative reserved operation use.
604*4882a593Smuzhiyun  */
rvt_qp_wqe_reserve(struct rvt_qp * qp,struct rvt_swqe * wqe)605*4882a593Smuzhiyun static inline void rvt_qp_wqe_reserve(
606*4882a593Smuzhiyun 	struct rvt_qp *qp,
607*4882a593Smuzhiyun 	struct rvt_swqe *wqe)
608*4882a593Smuzhiyun {
609*4882a593Smuzhiyun 	atomic_inc(&qp->s_reserved_used);
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun /**
613*4882a593Smuzhiyun  * rvt_qp_wqe_unreserve - clean reserved operation
614*4882a593Smuzhiyun  * @qp - the rvt qp
615*4882a593Smuzhiyun  * @flags - send wqe flags
616*4882a593Smuzhiyun  *
617*4882a593Smuzhiyun  * This decrements the reserve use count.
618*4882a593Smuzhiyun  *
619*4882a593Smuzhiyun  * This call MUST precede the change to
620*4882a593Smuzhiyun  * s_last to insure that post send sees a stable
621*4882a593Smuzhiyun  * s_avail.
622*4882a593Smuzhiyun  *
623*4882a593Smuzhiyun  * An smp_mp__after_atomic() is used to insure
624*4882a593Smuzhiyun  * the compiler does not juggle the order of the s_last
625*4882a593Smuzhiyun  * ring index and the decrementing of s_reserved_used.
626*4882a593Smuzhiyun  */
rvt_qp_wqe_unreserve(struct rvt_qp * qp,int flags)627*4882a593Smuzhiyun static inline void rvt_qp_wqe_unreserve(struct rvt_qp *qp, int flags)
628*4882a593Smuzhiyun {
629*4882a593Smuzhiyun 	if (unlikely(flags & RVT_SEND_RESERVE_USED)) {
630*4882a593Smuzhiyun 		atomic_dec(&qp->s_reserved_used);
631*4882a593Smuzhiyun 		/* insure no compiler re-order up to s_last change */
632*4882a593Smuzhiyun 		smp_mb__after_atomic();
633*4882a593Smuzhiyun 	}
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun extern const enum ib_wc_opcode ib_rvt_wc_opcode[];
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun /*
639*4882a593Smuzhiyun  * Compare the lower 24 bits of the msn values.
640*4882a593Smuzhiyun  * Returns an integer <, ==, or > than zero.
641*4882a593Smuzhiyun  */
rvt_cmp_msn(u32 a,u32 b)642*4882a593Smuzhiyun static inline int rvt_cmp_msn(u32 a, u32 b)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun 	return (((int)a) - ((int)b)) << 8;
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun __be32 rvt_compute_aeth(struct rvt_qp *qp);
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun void rvt_get_credit(struct rvt_qp *qp, u32 aeth);
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len);
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun /**
654*4882a593Smuzhiyun  * rvt_div_round_up_mtu - round up divide
655*4882a593Smuzhiyun  * @qp - the qp pair
656*4882a593Smuzhiyun  * @len - the length
657*4882a593Smuzhiyun  *
658*4882a593Smuzhiyun  * Perform a shift based mtu round up divide
659*4882a593Smuzhiyun  */
rvt_div_round_up_mtu(struct rvt_qp * qp,u32 len)660*4882a593Smuzhiyun static inline u32 rvt_div_round_up_mtu(struct rvt_qp *qp, u32 len)
661*4882a593Smuzhiyun {
662*4882a593Smuzhiyun 	return (len + qp->pmtu - 1) >> qp->log_pmtu;
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun /**
666*4882a593Smuzhiyun  * @qp - the qp pair
667*4882a593Smuzhiyun  * @len - the length
668*4882a593Smuzhiyun  *
669*4882a593Smuzhiyun  * Perform a shift based mtu divide
670*4882a593Smuzhiyun  */
rvt_div_mtu(struct rvt_qp * qp,u32 len)671*4882a593Smuzhiyun static inline u32 rvt_div_mtu(struct rvt_qp *qp, u32 len)
672*4882a593Smuzhiyun {
673*4882a593Smuzhiyun 	return len >> qp->log_pmtu;
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun /**
677*4882a593Smuzhiyun  * rvt_timeout_to_jiffies - Convert a ULP timeout input into jiffies
678*4882a593Smuzhiyun  * @timeout - timeout input(0 - 31).
679*4882a593Smuzhiyun  *
680*4882a593Smuzhiyun  * Return a timeout value in jiffies.
681*4882a593Smuzhiyun  */
rvt_timeout_to_jiffies(u8 timeout)682*4882a593Smuzhiyun static inline unsigned long rvt_timeout_to_jiffies(u8 timeout)
683*4882a593Smuzhiyun {
684*4882a593Smuzhiyun 	if (timeout > 31)
685*4882a593Smuzhiyun 		timeout = 31;
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	return usecs_to_jiffies(1U << timeout) * 4096UL / 1000UL;
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun /**
691*4882a593Smuzhiyun  * rvt_lookup_qpn - return the QP with the given QPN
692*4882a593Smuzhiyun  * @ibp: the ibport
693*4882a593Smuzhiyun  * @qpn: the QP number to look up
694*4882a593Smuzhiyun  *
695*4882a593Smuzhiyun  * The caller must hold the rcu_read_lock(), and keep the lock until
696*4882a593Smuzhiyun  * the returned qp is no longer in use.
697*4882a593Smuzhiyun  */
rvt_lookup_qpn(struct rvt_dev_info * rdi,struct rvt_ibport * rvp,u32 qpn)698*4882a593Smuzhiyun static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi,
699*4882a593Smuzhiyun 					    struct rvt_ibport *rvp,
700*4882a593Smuzhiyun 					    u32 qpn) __must_hold(RCU)
701*4882a593Smuzhiyun {
702*4882a593Smuzhiyun 	struct rvt_qp *qp = NULL;
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 	if (unlikely(qpn <= 1)) {
705*4882a593Smuzhiyun 		qp = rcu_dereference(rvp->qp[qpn]);
706*4882a593Smuzhiyun 	} else {
707*4882a593Smuzhiyun 		u32 n = hash_32(qpn, rdi->qp_dev->qp_table_bits);
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 		for (qp = rcu_dereference(rdi->qp_dev->qp_table[n]); qp;
710*4882a593Smuzhiyun 			qp = rcu_dereference(qp->next))
711*4882a593Smuzhiyun 			if (qp->ibqp.qp_num == qpn)
712*4882a593Smuzhiyun 				break;
713*4882a593Smuzhiyun 	}
714*4882a593Smuzhiyun 	return qp;
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun /**
718*4882a593Smuzhiyun  * rvt_mod_retry_timer - mod a retry timer
719*4882a593Smuzhiyun  * @qp - the QP
720*4882a593Smuzhiyun  * @shift - timeout shift to wait for multiple packets
721*4882a593Smuzhiyun  * Modify a potentially already running retry timer
722*4882a593Smuzhiyun  */
rvt_mod_retry_timer_ext(struct rvt_qp * qp,u8 shift)723*4882a593Smuzhiyun static inline void rvt_mod_retry_timer_ext(struct rvt_qp *qp, u8 shift)
724*4882a593Smuzhiyun {
725*4882a593Smuzhiyun 	struct ib_qp *ibqp = &qp->ibqp;
726*4882a593Smuzhiyun 	struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 	lockdep_assert_held(&qp->s_lock);
729*4882a593Smuzhiyun 	qp->s_flags |= RVT_S_TIMER;
730*4882a593Smuzhiyun 	/* 4.096 usec. * (1 << qp->timeout) */
731*4882a593Smuzhiyun 	mod_timer(&qp->s_timer, jiffies + rdi->busy_jiffies +
732*4882a593Smuzhiyun 		  (qp->timeout_jiffies << shift));
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun 
rvt_mod_retry_timer(struct rvt_qp * qp)735*4882a593Smuzhiyun static inline void rvt_mod_retry_timer(struct rvt_qp *qp)
736*4882a593Smuzhiyun {
737*4882a593Smuzhiyun 	return rvt_mod_retry_timer_ext(qp, 0);
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun /**
741*4882a593Smuzhiyun  * rvt_put_qp_swqe - drop refs held by swqe
742*4882a593Smuzhiyun  * @qp: the send qp
743*4882a593Smuzhiyun  * @wqe: the send wqe
744*4882a593Smuzhiyun  *
745*4882a593Smuzhiyun  * This drops any references held by the swqe
746*4882a593Smuzhiyun  */
rvt_put_qp_swqe(struct rvt_qp * qp,struct rvt_swqe * wqe)747*4882a593Smuzhiyun static inline void rvt_put_qp_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
748*4882a593Smuzhiyun {
749*4882a593Smuzhiyun 	rvt_put_swqe(wqe);
750*4882a593Smuzhiyun 	if (qp->allowed_ops == IB_OPCODE_UD)
751*4882a593Smuzhiyun 		rdma_destroy_ah_attr(wqe->ud_wr.attr);
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun /**
755*4882a593Smuzhiyun  * rvt_qp_sqwe_incr - increment ring index
756*4882a593Smuzhiyun  * @qp: the qp
757*4882a593Smuzhiyun  * @val: the starting value
758*4882a593Smuzhiyun  *
759*4882a593Smuzhiyun  * Return: the new value wrapping as appropriate
760*4882a593Smuzhiyun  */
761*4882a593Smuzhiyun static inline u32
rvt_qp_swqe_incr(struct rvt_qp * qp,u32 val)762*4882a593Smuzhiyun rvt_qp_swqe_incr(struct rvt_qp *qp, u32 val)
763*4882a593Smuzhiyun {
764*4882a593Smuzhiyun 	if (++val >= qp->s_size)
765*4882a593Smuzhiyun 		val = 0;
766*4882a593Smuzhiyun 	return val;
767*4882a593Smuzhiyun }
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err);
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun /**
772*4882a593Smuzhiyun  * rvt_recv_cq - add a new entry to completion queue
773*4882a593Smuzhiyun  *			by receive queue
774*4882a593Smuzhiyun  * @qp: receive queue
775*4882a593Smuzhiyun  * @wc: work completion entry to add
776*4882a593Smuzhiyun  * @solicited: true if @entry is solicited
777*4882a593Smuzhiyun  *
778*4882a593Smuzhiyun  * This is wrapper function for rvt_enter_cq function call by
779*4882a593Smuzhiyun  * receive queue. If rvt_cq_enter return false, it means cq is
780*4882a593Smuzhiyun  * full and the qp is put into error state.
781*4882a593Smuzhiyun  */
rvt_recv_cq(struct rvt_qp * qp,struct ib_wc * wc,bool solicited)782*4882a593Smuzhiyun static inline void rvt_recv_cq(struct rvt_qp *qp, struct ib_wc *wc,
783*4882a593Smuzhiyun 			       bool solicited)
784*4882a593Smuzhiyun {
785*4882a593Smuzhiyun 	struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.recv_cq);
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun 	if (unlikely(!rvt_cq_enter(cq, wc, solicited)))
788*4882a593Smuzhiyun 		rvt_error_qp(qp, IB_WC_LOC_QP_OP_ERR);
789*4882a593Smuzhiyun }
790*4882a593Smuzhiyun 
791*4882a593Smuzhiyun /**
792*4882a593Smuzhiyun  * rvt_send_cq - add a new entry to completion queue
793*4882a593Smuzhiyun  *                        by send queue
794*4882a593Smuzhiyun  * @qp: send queue
795*4882a593Smuzhiyun  * @wc: work completion entry to add
796*4882a593Smuzhiyun  * @solicited: true if @entry is solicited
797*4882a593Smuzhiyun  *
798*4882a593Smuzhiyun  * This is wrapper function for rvt_enter_cq function call by
799*4882a593Smuzhiyun  * send queue. If rvt_cq_enter return false, it means cq is
800*4882a593Smuzhiyun  * full and the qp is put into error state.
801*4882a593Smuzhiyun  */
rvt_send_cq(struct rvt_qp * qp,struct ib_wc * wc,bool solicited)802*4882a593Smuzhiyun static inline void rvt_send_cq(struct rvt_qp *qp, struct ib_wc *wc,
803*4882a593Smuzhiyun 			       bool solicited)
804*4882a593Smuzhiyun {
805*4882a593Smuzhiyun 	struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.send_cq);
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 	if (unlikely(!rvt_cq_enter(cq, wc, solicited)))
808*4882a593Smuzhiyun 		rvt_error_qp(qp, IB_WC_LOC_QP_OP_ERR);
809*4882a593Smuzhiyun }
810*4882a593Smuzhiyun 
811*4882a593Smuzhiyun /**
812*4882a593Smuzhiyun  * rvt_qp_complete_swqe - insert send completion
813*4882a593Smuzhiyun  * @qp - the qp
814*4882a593Smuzhiyun  * @wqe - the send wqe
815*4882a593Smuzhiyun  * @opcode - wc operation (driver dependent)
816*4882a593Smuzhiyun  * @status - completion status
817*4882a593Smuzhiyun  *
818*4882a593Smuzhiyun  * Update the s_last information, and then insert a send
819*4882a593Smuzhiyun  * completion into the completion
820*4882a593Smuzhiyun  * queue if the qp indicates it should be done.
821*4882a593Smuzhiyun  *
822*4882a593Smuzhiyun  * See IBTA 10.7.3.1 for info on completion
823*4882a593Smuzhiyun  * control.
824*4882a593Smuzhiyun  *
825*4882a593Smuzhiyun  * Return: new last
826*4882a593Smuzhiyun  */
827*4882a593Smuzhiyun static inline u32
rvt_qp_complete_swqe(struct rvt_qp * qp,struct rvt_swqe * wqe,enum ib_wc_opcode opcode,enum ib_wc_status status)828*4882a593Smuzhiyun rvt_qp_complete_swqe(struct rvt_qp *qp,
829*4882a593Smuzhiyun 		     struct rvt_swqe *wqe,
830*4882a593Smuzhiyun 		     enum ib_wc_opcode opcode,
831*4882a593Smuzhiyun 		     enum ib_wc_status status)
832*4882a593Smuzhiyun {
833*4882a593Smuzhiyun 	bool need_completion;
834*4882a593Smuzhiyun 	u64 wr_id;
835*4882a593Smuzhiyun 	u32 byte_len, last;
836*4882a593Smuzhiyun 	int flags = wqe->wr.send_flags;
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 	rvt_qp_wqe_unreserve(qp, flags);
839*4882a593Smuzhiyun 	rvt_put_qp_swqe(qp, wqe);
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 	need_completion =
842*4882a593Smuzhiyun 		!(flags & RVT_SEND_RESERVE_USED) &&
843*4882a593Smuzhiyun 		(!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
844*4882a593Smuzhiyun 		(flags & IB_SEND_SIGNALED) ||
845*4882a593Smuzhiyun 		status != IB_WC_SUCCESS);
846*4882a593Smuzhiyun 	if (need_completion) {
847*4882a593Smuzhiyun 		wr_id = wqe->wr.wr_id;
848*4882a593Smuzhiyun 		byte_len = wqe->length;
849*4882a593Smuzhiyun 		/* above fields required before writing s_last */
850*4882a593Smuzhiyun 	}
851*4882a593Smuzhiyun 	last = rvt_qp_swqe_incr(qp, qp->s_last);
852*4882a593Smuzhiyun 	/* see rvt_qp_is_avail() */
853*4882a593Smuzhiyun 	smp_store_release(&qp->s_last, last);
854*4882a593Smuzhiyun 	if (need_completion) {
855*4882a593Smuzhiyun 		struct ib_wc w = {
856*4882a593Smuzhiyun 			.wr_id = wr_id,
857*4882a593Smuzhiyun 			.status = status,
858*4882a593Smuzhiyun 			.opcode = opcode,
859*4882a593Smuzhiyun 			.qp = &qp->ibqp,
860*4882a593Smuzhiyun 			.byte_len = byte_len,
861*4882a593Smuzhiyun 		};
862*4882a593Smuzhiyun 		rvt_send_cq(qp, &w, status != IB_WC_SUCCESS);
863*4882a593Smuzhiyun 	}
864*4882a593Smuzhiyun 	return last;
865*4882a593Smuzhiyun }
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun extern const int  ib_rvt_state_ops[];
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun struct rvt_dev_info;
870*4882a593Smuzhiyun int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only);
871*4882a593Smuzhiyun void rvt_comm_est(struct rvt_qp *qp);
872*4882a593Smuzhiyun void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err);
873*4882a593Smuzhiyun unsigned long rvt_rnr_tbl_to_usec(u32 index);
874*4882a593Smuzhiyun enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t);
875*4882a593Smuzhiyun void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth);
876*4882a593Smuzhiyun void rvt_del_timers_sync(struct rvt_qp *qp);
877*4882a593Smuzhiyun void rvt_stop_rc_timers(struct rvt_qp *qp);
878*4882a593Smuzhiyun void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift);
rvt_add_retry_timer(struct rvt_qp * qp)879*4882a593Smuzhiyun static inline void rvt_add_retry_timer(struct rvt_qp *qp)
880*4882a593Smuzhiyun {
881*4882a593Smuzhiyun 	rvt_add_retry_timer_ext(qp, 0);
882*4882a593Smuzhiyun }
883*4882a593Smuzhiyun 
884*4882a593Smuzhiyun void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
885*4882a593Smuzhiyun 		  void *data, u32 length,
886*4882a593Smuzhiyun 		  bool release, bool copy_last);
887*4882a593Smuzhiyun void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
888*4882a593Smuzhiyun 		       enum ib_wc_status status);
889*4882a593Smuzhiyun void rvt_ruc_loopback(struct rvt_qp *qp);
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun /**
892*4882a593Smuzhiyun  * struct rvt_qp_iter - the iterator for QPs
893*4882a593Smuzhiyun  * @qp - the current QP
894*4882a593Smuzhiyun  *
895*4882a593Smuzhiyun  * This structure defines the current iterator
896*4882a593Smuzhiyun  * state for sequenced access to all QPs relative
897*4882a593Smuzhiyun  * to an rvt_dev_info.
898*4882a593Smuzhiyun  */
899*4882a593Smuzhiyun struct rvt_qp_iter {
900*4882a593Smuzhiyun 	struct rvt_qp *qp;
901*4882a593Smuzhiyun 	/* private: backpointer */
902*4882a593Smuzhiyun 	struct rvt_dev_info *rdi;
903*4882a593Smuzhiyun 	/* private: callback routine */
904*4882a593Smuzhiyun 	void (*cb)(struct rvt_qp *qp, u64 v);
905*4882a593Smuzhiyun 	/* private: for arg to callback routine */
906*4882a593Smuzhiyun 	u64 v;
907*4882a593Smuzhiyun 	/* private: number of SMI,GSI QPs for device */
908*4882a593Smuzhiyun 	int specials;
909*4882a593Smuzhiyun 	/* private: current iterator index */
910*4882a593Smuzhiyun 	int n;
911*4882a593Smuzhiyun };
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun /**
914*4882a593Smuzhiyun  * ib_cq_tail - Return tail index of cq buffer
915*4882a593Smuzhiyun  * @send_cq - The cq for send
916*4882a593Smuzhiyun  *
917*4882a593Smuzhiyun  * This is called in qp_iter_print to get tail
918*4882a593Smuzhiyun  * of cq buffer.
919*4882a593Smuzhiyun  */
ib_cq_tail(struct ib_cq * send_cq)920*4882a593Smuzhiyun static inline u32 ib_cq_tail(struct ib_cq *send_cq)
921*4882a593Smuzhiyun {
922*4882a593Smuzhiyun 	struct rvt_cq *cq = ibcq_to_rvtcq(send_cq);
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 	return ibcq_to_rvtcq(send_cq)->ip ?
925*4882a593Smuzhiyun 	       RDMA_READ_UAPI_ATOMIC(cq->queue->tail) :
926*4882a593Smuzhiyun 	       ibcq_to_rvtcq(send_cq)->kqueue->tail;
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun /**
930*4882a593Smuzhiyun  * ib_cq_head - Return head index of cq buffer
931*4882a593Smuzhiyun  * @send_cq - The cq for send
932*4882a593Smuzhiyun  *
933*4882a593Smuzhiyun  * This is called in qp_iter_print to get head
934*4882a593Smuzhiyun  * of cq buffer.
935*4882a593Smuzhiyun  */
ib_cq_head(struct ib_cq * send_cq)936*4882a593Smuzhiyun static inline u32 ib_cq_head(struct ib_cq *send_cq)
937*4882a593Smuzhiyun {
938*4882a593Smuzhiyun 	struct rvt_cq *cq = ibcq_to_rvtcq(send_cq);
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun 	return ibcq_to_rvtcq(send_cq)->ip ?
941*4882a593Smuzhiyun 	       RDMA_READ_UAPI_ATOMIC(cq->queue->head) :
942*4882a593Smuzhiyun 	       ibcq_to_rvtcq(send_cq)->kqueue->head;
943*4882a593Smuzhiyun }
944*4882a593Smuzhiyun 
945*4882a593Smuzhiyun /**
946*4882a593Smuzhiyun  * rvt_free_rq - free memory allocated for rvt_rq struct
947*4882a593Smuzhiyun  * @rvt_rq: request queue data structure
948*4882a593Smuzhiyun  *
949*4882a593Smuzhiyun  * This function should only be called if the rvt_mmap_info()
950*4882a593Smuzhiyun  * has not succeeded.
951*4882a593Smuzhiyun  */
rvt_free_rq(struct rvt_rq * rq)952*4882a593Smuzhiyun static inline void rvt_free_rq(struct rvt_rq *rq)
953*4882a593Smuzhiyun {
954*4882a593Smuzhiyun 	kvfree(rq->kwq);
955*4882a593Smuzhiyun 	rq->kwq = NULL;
956*4882a593Smuzhiyun 	vfree(rq->wq);
957*4882a593Smuzhiyun 	rq->wq = NULL;
958*4882a593Smuzhiyun }
959*4882a593Smuzhiyun 
960*4882a593Smuzhiyun /**
961*4882a593Smuzhiyun  * rvt_to_iport - Get the ibport pointer
962*4882a593Smuzhiyun  * @qp: the qp pointer
963*4882a593Smuzhiyun  *
964*4882a593Smuzhiyun  * This function returns the ibport pointer from the qp pointer.
965*4882a593Smuzhiyun  */
rvt_to_iport(struct rvt_qp * qp)966*4882a593Smuzhiyun static inline struct rvt_ibport *rvt_to_iport(struct rvt_qp *qp)
967*4882a593Smuzhiyun {
968*4882a593Smuzhiyun 	struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun 	return rdi->ports[qp->port_num - 1];
971*4882a593Smuzhiyun }
972*4882a593Smuzhiyun 
973*4882a593Smuzhiyun /**
974*4882a593Smuzhiyun  * rvt_rc_credit_avail - Check if there are enough RC credits for the request
975*4882a593Smuzhiyun  * @qp: the qp
976*4882a593Smuzhiyun  * @wqe: the request
977*4882a593Smuzhiyun  *
978*4882a593Smuzhiyun  * This function returns false when there are not enough credits for the given
979*4882a593Smuzhiyun  * request and true otherwise.
980*4882a593Smuzhiyun  */
rvt_rc_credit_avail(struct rvt_qp * qp,struct rvt_swqe * wqe)981*4882a593Smuzhiyun static inline bool rvt_rc_credit_avail(struct rvt_qp *qp, struct rvt_swqe *wqe)
982*4882a593Smuzhiyun {
983*4882a593Smuzhiyun 	lockdep_assert_held(&qp->s_lock);
984*4882a593Smuzhiyun 	if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
985*4882a593Smuzhiyun 	    rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
986*4882a593Smuzhiyun 		struct rvt_ibport *rvp = rvt_to_iport(qp);
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun 		qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
989*4882a593Smuzhiyun 		rvp->n_rc_crwaits++;
990*4882a593Smuzhiyun 		return false;
991*4882a593Smuzhiyun 	}
992*4882a593Smuzhiyun 	return true;
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
996*4882a593Smuzhiyun 				     u64 v,
997*4882a593Smuzhiyun 				     void (*cb)(struct rvt_qp *qp, u64 v));
998*4882a593Smuzhiyun int rvt_qp_iter_next(struct rvt_qp_iter *iter);
999*4882a593Smuzhiyun void rvt_qp_iter(struct rvt_dev_info *rdi,
1000*4882a593Smuzhiyun 		 u64 v,
1001*4882a593Smuzhiyun 		 void (*cb)(struct rvt_qp *qp, u64 v));
1002*4882a593Smuzhiyun void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey);
1003*4882a593Smuzhiyun #endif          /* DEF_RDMAVT_INCQP_H */
1004