xref: /OK3568_Linux_fs/kernel/net/sunrpc/xprtrdma/frwr_ops.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2015, 2017 Oracle.  All rights reserved.
4*4882a593Smuzhiyun  * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun /* Lightweight memory registration using Fast Registration Work
8*4882a593Smuzhiyun  * Requests (FRWR).
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * FRWR features ordered asynchronous registration and invalidation
11*4882a593Smuzhiyun  * of arbitrarily-sized memory regions. This is the fastest and safest
12*4882a593Smuzhiyun  * but most complex memory registration mode.
13*4882a593Smuzhiyun  */
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun /* Normal operation
16*4882a593Smuzhiyun  *
17*4882a593Smuzhiyun  * A Memory Region is prepared for RDMA Read or Write using a FAST_REG
18*4882a593Smuzhiyun  * Work Request (frwr_map). When the RDMA operation is finished, this
19*4882a593Smuzhiyun  * Memory Region is invalidated using a LOCAL_INV Work Request
20*4882a593Smuzhiyun  * (frwr_unmap_async and frwr_unmap_sync).
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  * Typically FAST_REG Work Requests are not signaled, and neither are
23*4882a593Smuzhiyun  * RDMA Send Work Requests (with the exception of signaling occasionally
24*4882a593Smuzhiyun  * to prevent provider work queue overflows). This greatly reduces HCA
25*4882a593Smuzhiyun  * interrupt workload.
26*4882a593Smuzhiyun  */
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun /* Transport recovery
29*4882a593Smuzhiyun  *
30*4882a593Smuzhiyun  * frwr_map and frwr_unmap_* cannot run at the same time the transport
31*4882a593Smuzhiyun  * connect worker is running. The connect worker holds the transport
32*4882a593Smuzhiyun  * send lock, just as ->send_request does. This prevents frwr_map and
33*4882a593Smuzhiyun  * the connect worker from running concurrently. When a connection is
34*4882a593Smuzhiyun  * closed, the Receive completion queue is drained before the allowing
35*4882a593Smuzhiyun  * the connect worker to get control. This prevents frwr_unmap and the
36*4882a593Smuzhiyun  * connect worker from running concurrently.
37*4882a593Smuzhiyun  *
38*4882a593Smuzhiyun  * When the underlying transport disconnects, MRs that are in flight
39*4882a593Smuzhiyun  * are flushed and are likely unusable. Thus all MRs are destroyed.
40*4882a593Smuzhiyun  * New MRs are created on demand.
41*4882a593Smuzhiyun  */
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun #include <linux/sunrpc/svc_rdma.h>
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #include "xprt_rdma.h"
46*4882a593Smuzhiyun #include <trace/events/rpcrdma.h>
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
49*4882a593Smuzhiyun # define RPCDBG_FACILITY	RPCDBG_TRANS
50*4882a593Smuzhiyun #endif
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun /**
53*4882a593Smuzhiyun  * frwr_release_mr - Destroy one MR
54*4882a593Smuzhiyun  * @mr: MR allocated by frwr_mr_init
55*4882a593Smuzhiyun  *
56*4882a593Smuzhiyun  */
frwr_release_mr(struct rpcrdma_mr * mr)57*4882a593Smuzhiyun void frwr_release_mr(struct rpcrdma_mr *mr)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	int rc;
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	rc = ib_dereg_mr(mr->frwr.fr_mr);
62*4882a593Smuzhiyun 	if (rc)
63*4882a593Smuzhiyun 		trace_xprtrdma_frwr_dereg(mr, rc);
64*4882a593Smuzhiyun 	kfree(mr->mr_sg);
65*4882a593Smuzhiyun 	kfree(mr);
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun 
frwr_mr_recycle(struct rpcrdma_mr * mr)68*4882a593Smuzhiyun static void frwr_mr_recycle(struct rpcrdma_mr *mr)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun 	struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	trace_xprtrdma_mr_recycle(mr);
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	if (mr->mr_dir != DMA_NONE) {
75*4882a593Smuzhiyun 		trace_xprtrdma_mr_unmap(mr);
76*4882a593Smuzhiyun 		ib_dma_unmap_sg(r_xprt->rx_ep->re_id->device,
77*4882a593Smuzhiyun 				mr->mr_sg, mr->mr_nents, mr->mr_dir);
78*4882a593Smuzhiyun 		mr->mr_dir = DMA_NONE;
79*4882a593Smuzhiyun 	}
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	spin_lock(&r_xprt->rx_buf.rb_lock);
82*4882a593Smuzhiyun 	list_del(&mr->mr_all);
83*4882a593Smuzhiyun 	r_xprt->rx_stats.mrs_recycled++;
84*4882a593Smuzhiyun 	spin_unlock(&r_xprt->rx_buf.rb_lock);
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	frwr_release_mr(mr);
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun /* frwr_reset - Place MRs back on the free list
90*4882a593Smuzhiyun  * @req: request to reset
91*4882a593Smuzhiyun  *
92*4882a593Smuzhiyun  * Used after a failed marshal. For FRWR, this means the MRs
93*4882a593Smuzhiyun  * don't have to be fully released and recreated.
94*4882a593Smuzhiyun  *
95*4882a593Smuzhiyun  * NB: This is safe only as long as none of @req's MRs are
96*4882a593Smuzhiyun  * involved with an ongoing asynchronous FAST_REG or LOCAL_INV
97*4882a593Smuzhiyun  * Work Request.
98*4882a593Smuzhiyun  */
frwr_reset(struct rpcrdma_req * req)99*4882a593Smuzhiyun void frwr_reset(struct rpcrdma_req *req)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	struct rpcrdma_mr *mr;
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	while ((mr = rpcrdma_mr_pop(&req->rl_registered)))
104*4882a593Smuzhiyun 		rpcrdma_mr_put(mr);
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun /**
108*4882a593Smuzhiyun  * frwr_mr_init - Initialize one MR
109*4882a593Smuzhiyun  * @r_xprt: controlling transport instance
110*4882a593Smuzhiyun  * @mr: generic MR to prepare for FRWR
111*4882a593Smuzhiyun  *
112*4882a593Smuzhiyun  * Returns zero if successful. Otherwise a negative errno
113*4882a593Smuzhiyun  * is returned.
114*4882a593Smuzhiyun  */
frwr_mr_init(struct rpcrdma_xprt * r_xprt,struct rpcrdma_mr * mr)115*4882a593Smuzhiyun int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	struct rpcrdma_ep *ep = r_xprt->rx_ep;
118*4882a593Smuzhiyun 	unsigned int depth = ep->re_max_fr_depth;
119*4882a593Smuzhiyun 	struct scatterlist *sg;
120*4882a593Smuzhiyun 	struct ib_mr *frmr;
121*4882a593Smuzhiyun 	int rc;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	frmr = ib_alloc_mr(ep->re_pd, ep->re_mrtype, depth);
124*4882a593Smuzhiyun 	if (IS_ERR(frmr))
125*4882a593Smuzhiyun 		goto out_mr_err;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	sg = kmalloc_array(depth, sizeof(*sg), GFP_NOFS);
128*4882a593Smuzhiyun 	if (!sg)
129*4882a593Smuzhiyun 		goto out_list_err;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	mr->mr_xprt = r_xprt;
132*4882a593Smuzhiyun 	mr->frwr.fr_mr = frmr;
133*4882a593Smuzhiyun 	mr->mr_dir = DMA_NONE;
134*4882a593Smuzhiyun 	INIT_LIST_HEAD(&mr->mr_list);
135*4882a593Smuzhiyun 	init_completion(&mr->frwr.fr_linv_done);
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	sg_init_table(sg, depth);
138*4882a593Smuzhiyun 	mr->mr_sg = sg;
139*4882a593Smuzhiyun 	return 0;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun out_mr_err:
142*4882a593Smuzhiyun 	rc = PTR_ERR(frmr);
143*4882a593Smuzhiyun 	trace_xprtrdma_frwr_alloc(mr, rc);
144*4882a593Smuzhiyun 	return rc;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun out_list_err:
147*4882a593Smuzhiyun 	ib_dereg_mr(frmr);
148*4882a593Smuzhiyun 	return -ENOMEM;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun /**
152*4882a593Smuzhiyun  * frwr_query_device - Prepare a transport for use with FRWR
153*4882a593Smuzhiyun  * @ep: endpoint to fill in
154*4882a593Smuzhiyun  * @device: RDMA device to query
155*4882a593Smuzhiyun  *
156*4882a593Smuzhiyun  * On success, sets:
157*4882a593Smuzhiyun  *	ep->re_attr
158*4882a593Smuzhiyun  *	ep->re_max_requests
159*4882a593Smuzhiyun  *	ep->re_max_rdma_segs
160*4882a593Smuzhiyun  *	ep->re_max_fr_depth
161*4882a593Smuzhiyun  *	ep->re_mrtype
162*4882a593Smuzhiyun  *
163*4882a593Smuzhiyun  * Return values:
164*4882a593Smuzhiyun  *   On success, returns zero.
165*4882a593Smuzhiyun  *   %-EINVAL - the device does not support FRWR memory registration
166*4882a593Smuzhiyun  *   %-ENOMEM - the device is not sufficiently capable for NFS/RDMA
167*4882a593Smuzhiyun  */
frwr_query_device(struct rpcrdma_ep * ep,const struct ib_device * device)168*4882a593Smuzhiyun int frwr_query_device(struct rpcrdma_ep *ep, const struct ib_device *device)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	const struct ib_device_attr *attrs = &device->attrs;
171*4882a593Smuzhiyun 	int max_qp_wr, depth, delta;
172*4882a593Smuzhiyun 	unsigned int max_sge;
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) ||
175*4882a593Smuzhiyun 	    attrs->max_fast_reg_page_list_len == 0) {
176*4882a593Smuzhiyun 		pr_err("rpcrdma: 'frwr' mode is not supported by device %s\n",
177*4882a593Smuzhiyun 		       device->name);
178*4882a593Smuzhiyun 		return -EINVAL;
179*4882a593Smuzhiyun 	}
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	max_sge = min_t(unsigned int, attrs->max_send_sge,
182*4882a593Smuzhiyun 			RPCRDMA_MAX_SEND_SGES);
183*4882a593Smuzhiyun 	if (max_sge < RPCRDMA_MIN_SEND_SGES) {
184*4882a593Smuzhiyun 		pr_err("rpcrdma: HCA provides only %u send SGEs\n", max_sge);
185*4882a593Smuzhiyun 		return -ENOMEM;
186*4882a593Smuzhiyun 	}
187*4882a593Smuzhiyun 	ep->re_attr.cap.max_send_sge = max_sge;
188*4882a593Smuzhiyun 	ep->re_attr.cap.max_recv_sge = 1;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	ep->re_mrtype = IB_MR_TYPE_MEM_REG;
191*4882a593Smuzhiyun 	if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
192*4882a593Smuzhiyun 		ep->re_mrtype = IB_MR_TYPE_SG_GAPS;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	/* Quirk: Some devices advertise a large max_fast_reg_page_list_len
195*4882a593Smuzhiyun 	 * capability, but perform optimally when the MRs are not larger
196*4882a593Smuzhiyun 	 * than a page.
197*4882a593Smuzhiyun 	 */
198*4882a593Smuzhiyun 	if (attrs->max_sge_rd > RPCRDMA_MAX_HDR_SEGS)
199*4882a593Smuzhiyun 		ep->re_max_fr_depth = attrs->max_sge_rd;
200*4882a593Smuzhiyun 	else
201*4882a593Smuzhiyun 		ep->re_max_fr_depth = attrs->max_fast_reg_page_list_len;
202*4882a593Smuzhiyun 	if (ep->re_max_fr_depth > RPCRDMA_MAX_DATA_SEGS)
203*4882a593Smuzhiyun 		ep->re_max_fr_depth = RPCRDMA_MAX_DATA_SEGS;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	/* Add room for frwr register and invalidate WRs.
206*4882a593Smuzhiyun 	 * 1. FRWR reg WR for head
207*4882a593Smuzhiyun 	 * 2. FRWR invalidate WR for head
208*4882a593Smuzhiyun 	 * 3. N FRWR reg WRs for pagelist
209*4882a593Smuzhiyun 	 * 4. N FRWR invalidate WRs for pagelist
210*4882a593Smuzhiyun 	 * 5. FRWR reg WR for tail
211*4882a593Smuzhiyun 	 * 6. FRWR invalidate WR for tail
212*4882a593Smuzhiyun 	 * 7. The RDMA_SEND WR
213*4882a593Smuzhiyun 	 */
214*4882a593Smuzhiyun 	depth = 7;
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	/* Calculate N if the device max FRWR depth is smaller than
217*4882a593Smuzhiyun 	 * RPCRDMA_MAX_DATA_SEGS.
218*4882a593Smuzhiyun 	 */
219*4882a593Smuzhiyun 	if (ep->re_max_fr_depth < RPCRDMA_MAX_DATA_SEGS) {
220*4882a593Smuzhiyun 		delta = RPCRDMA_MAX_DATA_SEGS - ep->re_max_fr_depth;
221*4882a593Smuzhiyun 		do {
222*4882a593Smuzhiyun 			depth += 2; /* FRWR reg + invalidate */
223*4882a593Smuzhiyun 			delta -= ep->re_max_fr_depth;
224*4882a593Smuzhiyun 		} while (delta > 0);
225*4882a593Smuzhiyun 	}
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	max_qp_wr = attrs->max_qp_wr;
228*4882a593Smuzhiyun 	max_qp_wr -= RPCRDMA_BACKWARD_WRS;
229*4882a593Smuzhiyun 	max_qp_wr -= 1;
230*4882a593Smuzhiyun 	if (max_qp_wr < RPCRDMA_MIN_SLOT_TABLE)
231*4882a593Smuzhiyun 		return -ENOMEM;
232*4882a593Smuzhiyun 	if (ep->re_max_requests > max_qp_wr)
233*4882a593Smuzhiyun 		ep->re_max_requests = max_qp_wr;
234*4882a593Smuzhiyun 	ep->re_attr.cap.max_send_wr = ep->re_max_requests * depth;
235*4882a593Smuzhiyun 	if (ep->re_attr.cap.max_send_wr > max_qp_wr) {
236*4882a593Smuzhiyun 		ep->re_max_requests = max_qp_wr / depth;
237*4882a593Smuzhiyun 		if (!ep->re_max_requests)
238*4882a593Smuzhiyun 			return -ENOMEM;
239*4882a593Smuzhiyun 		ep->re_attr.cap.max_send_wr = ep->re_max_requests * depth;
240*4882a593Smuzhiyun 	}
241*4882a593Smuzhiyun 	ep->re_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
242*4882a593Smuzhiyun 	ep->re_attr.cap.max_send_wr += 1; /* for ib_drain_sq */
243*4882a593Smuzhiyun 	ep->re_attr.cap.max_recv_wr = ep->re_max_requests;
244*4882a593Smuzhiyun 	ep->re_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
245*4882a593Smuzhiyun 	ep->re_attr.cap.max_recv_wr += RPCRDMA_MAX_RECV_BATCH;
246*4882a593Smuzhiyun 	ep->re_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	ep->re_max_rdma_segs =
249*4882a593Smuzhiyun 		DIV_ROUND_UP(RPCRDMA_MAX_DATA_SEGS, ep->re_max_fr_depth);
250*4882a593Smuzhiyun 	/* Reply chunks require segments for head and tail buffers */
251*4882a593Smuzhiyun 	ep->re_max_rdma_segs += 2;
252*4882a593Smuzhiyun 	if (ep->re_max_rdma_segs > RPCRDMA_MAX_HDR_SEGS)
253*4882a593Smuzhiyun 		ep->re_max_rdma_segs = RPCRDMA_MAX_HDR_SEGS;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	/* Ensure the underlying device is capable of conveying the
256*4882a593Smuzhiyun 	 * largest r/wsize NFS will ask for. This guarantees that
257*4882a593Smuzhiyun 	 * failing over from one RDMA device to another will not
258*4882a593Smuzhiyun 	 * break NFS I/O.
259*4882a593Smuzhiyun 	 */
260*4882a593Smuzhiyun 	if ((ep->re_max_rdma_segs * ep->re_max_fr_depth) < RPCRDMA_MAX_SEGS)
261*4882a593Smuzhiyun 		return -ENOMEM;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	return 0;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun /**
267*4882a593Smuzhiyun  * frwr_map - Register a memory region
268*4882a593Smuzhiyun  * @r_xprt: controlling transport
269*4882a593Smuzhiyun  * @seg: memory region co-ordinates
270*4882a593Smuzhiyun  * @nsegs: number of segments remaining
271*4882a593Smuzhiyun  * @writing: true when RDMA Write will be used
272*4882a593Smuzhiyun  * @xid: XID of RPC using the registered memory
273*4882a593Smuzhiyun  * @mr: MR to fill in
274*4882a593Smuzhiyun  *
275*4882a593Smuzhiyun  * Prepare a REG_MR Work Request to register a memory region
276*4882a593Smuzhiyun  * for remote access via RDMA READ or RDMA WRITE.
277*4882a593Smuzhiyun  *
278*4882a593Smuzhiyun  * Returns the next segment or a negative errno pointer.
279*4882a593Smuzhiyun  * On success, @mr is filled in.
280*4882a593Smuzhiyun  */
frwr_map(struct rpcrdma_xprt * r_xprt,struct rpcrdma_mr_seg * seg,int nsegs,bool writing,__be32 xid,struct rpcrdma_mr * mr)281*4882a593Smuzhiyun struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
282*4882a593Smuzhiyun 				struct rpcrdma_mr_seg *seg,
283*4882a593Smuzhiyun 				int nsegs, bool writing, __be32 xid,
284*4882a593Smuzhiyun 				struct rpcrdma_mr *mr)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun 	struct rpcrdma_ep *ep = r_xprt->rx_ep;
287*4882a593Smuzhiyun 	struct ib_reg_wr *reg_wr;
288*4882a593Smuzhiyun 	int i, n, dma_nents;
289*4882a593Smuzhiyun 	struct ib_mr *ibmr;
290*4882a593Smuzhiyun 	u8 key;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	if (nsegs > ep->re_max_fr_depth)
293*4882a593Smuzhiyun 		nsegs = ep->re_max_fr_depth;
294*4882a593Smuzhiyun 	for (i = 0; i < nsegs;) {
295*4882a593Smuzhiyun 		if (seg->mr_page)
296*4882a593Smuzhiyun 			sg_set_page(&mr->mr_sg[i],
297*4882a593Smuzhiyun 				    seg->mr_page,
298*4882a593Smuzhiyun 				    seg->mr_len,
299*4882a593Smuzhiyun 				    offset_in_page(seg->mr_offset));
300*4882a593Smuzhiyun 		else
301*4882a593Smuzhiyun 			sg_set_buf(&mr->mr_sg[i], seg->mr_offset,
302*4882a593Smuzhiyun 				   seg->mr_len);
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 		++seg;
305*4882a593Smuzhiyun 		++i;
306*4882a593Smuzhiyun 		if (ep->re_mrtype == IB_MR_TYPE_SG_GAPS)
307*4882a593Smuzhiyun 			continue;
308*4882a593Smuzhiyun 		if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
309*4882a593Smuzhiyun 		    offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
310*4882a593Smuzhiyun 			break;
311*4882a593Smuzhiyun 	}
312*4882a593Smuzhiyun 	mr->mr_dir = rpcrdma_data_dir(writing);
313*4882a593Smuzhiyun 	mr->mr_nents = i;
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	dma_nents = ib_dma_map_sg(ep->re_id->device, mr->mr_sg, mr->mr_nents,
316*4882a593Smuzhiyun 				  mr->mr_dir);
317*4882a593Smuzhiyun 	if (!dma_nents)
318*4882a593Smuzhiyun 		goto out_dmamap_err;
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	ibmr = mr->frwr.fr_mr;
321*4882a593Smuzhiyun 	n = ib_map_mr_sg(ibmr, mr->mr_sg, dma_nents, NULL, PAGE_SIZE);
322*4882a593Smuzhiyun 	if (n != dma_nents)
323*4882a593Smuzhiyun 		goto out_mapmr_err;
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	ibmr->iova &= 0x00000000ffffffff;
326*4882a593Smuzhiyun 	ibmr->iova |= ((u64)be32_to_cpu(xid)) << 32;
327*4882a593Smuzhiyun 	key = (u8)(ibmr->rkey & 0x000000FF);
328*4882a593Smuzhiyun 	ib_update_fast_reg_key(ibmr, ++key);
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	reg_wr = &mr->frwr.fr_regwr;
331*4882a593Smuzhiyun 	reg_wr->mr = ibmr;
332*4882a593Smuzhiyun 	reg_wr->key = ibmr->rkey;
333*4882a593Smuzhiyun 	reg_wr->access = writing ?
334*4882a593Smuzhiyun 			 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
335*4882a593Smuzhiyun 			 IB_ACCESS_REMOTE_READ;
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	mr->mr_handle = ibmr->rkey;
338*4882a593Smuzhiyun 	mr->mr_length = ibmr->length;
339*4882a593Smuzhiyun 	mr->mr_offset = ibmr->iova;
340*4882a593Smuzhiyun 	trace_xprtrdma_mr_map(mr);
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	return seg;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun out_dmamap_err:
345*4882a593Smuzhiyun 	mr->mr_dir = DMA_NONE;
346*4882a593Smuzhiyun 	trace_xprtrdma_frwr_sgerr(mr, i);
347*4882a593Smuzhiyun 	return ERR_PTR(-EIO);
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun out_mapmr_err:
350*4882a593Smuzhiyun 	trace_xprtrdma_frwr_maperr(mr, n);
351*4882a593Smuzhiyun 	return ERR_PTR(-EIO);
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun /**
355*4882a593Smuzhiyun  * frwr_wc_fastreg - Invoked by RDMA provider for a flushed FastReg WC
356*4882a593Smuzhiyun  * @cq: completion queue
357*4882a593Smuzhiyun  * @wc: WCE for a completed FastReg WR
358*4882a593Smuzhiyun  *
359*4882a593Smuzhiyun  */
frwr_wc_fastreg(struct ib_cq * cq,struct ib_wc * wc)360*4882a593Smuzhiyun static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun 	struct ib_cqe *cqe = wc->wr_cqe;
363*4882a593Smuzhiyun 	struct rpcrdma_frwr *frwr =
364*4882a593Smuzhiyun 		container_of(cqe, struct rpcrdma_frwr, fr_cqe);
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	/* WARNING: Only wr_cqe and status are reliable at this point */
367*4882a593Smuzhiyun 	trace_xprtrdma_wc_fastreg(wc, frwr);
368*4882a593Smuzhiyun 	/* The MR will get recycled when the associated req is retransmitted */
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	rpcrdma_flush_disconnect(cq->cq_context, wc);
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun /**
374*4882a593Smuzhiyun  * frwr_send - post Send WRs containing the RPC Call message
375*4882a593Smuzhiyun  * @r_xprt: controlling transport instance
376*4882a593Smuzhiyun  * @req: prepared RPC Call
377*4882a593Smuzhiyun  *
378*4882a593Smuzhiyun  * For FRWR, chain any FastReg WRs to the Send WR. Only a
379*4882a593Smuzhiyun  * single ib_post_send call is needed to register memory
380*4882a593Smuzhiyun  * and then post the Send WR.
381*4882a593Smuzhiyun  *
382*4882a593Smuzhiyun  * Returns the return code from ib_post_send.
383*4882a593Smuzhiyun  *
384*4882a593Smuzhiyun  * Caller must hold the transport send lock to ensure that the
385*4882a593Smuzhiyun  * pointers to the transport's rdma_cm_id and QP are stable.
386*4882a593Smuzhiyun  */
frwr_send(struct rpcrdma_xprt * r_xprt,struct rpcrdma_req * req)387*4882a593Smuzhiyun int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun 	struct ib_send_wr *post_wr;
390*4882a593Smuzhiyun 	struct rpcrdma_mr *mr;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	post_wr = &req->rl_wr;
393*4882a593Smuzhiyun 	list_for_each_entry(mr, &req->rl_registered, mr_list) {
394*4882a593Smuzhiyun 		struct rpcrdma_frwr *frwr;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 		frwr = &mr->frwr;
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 		frwr->fr_cqe.done = frwr_wc_fastreg;
399*4882a593Smuzhiyun 		frwr->fr_regwr.wr.next = post_wr;
400*4882a593Smuzhiyun 		frwr->fr_regwr.wr.wr_cqe = &frwr->fr_cqe;
401*4882a593Smuzhiyun 		frwr->fr_regwr.wr.num_sge = 0;
402*4882a593Smuzhiyun 		frwr->fr_regwr.wr.opcode = IB_WR_REG_MR;
403*4882a593Smuzhiyun 		frwr->fr_regwr.wr.send_flags = 0;
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 		post_wr = &frwr->fr_regwr.wr;
406*4882a593Smuzhiyun 	}
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	return ib_post_send(r_xprt->rx_ep->re_id->qp, post_wr, NULL);
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun /**
412*4882a593Smuzhiyun  * frwr_reminv - handle a remotely invalidated mr on the @mrs list
413*4882a593Smuzhiyun  * @rep: Received reply
414*4882a593Smuzhiyun  * @mrs: list of MRs to check
415*4882a593Smuzhiyun  *
416*4882a593Smuzhiyun  */
frwr_reminv(struct rpcrdma_rep * rep,struct list_head * mrs)417*4882a593Smuzhiyun void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun 	struct rpcrdma_mr *mr;
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	list_for_each_entry(mr, mrs, mr_list)
422*4882a593Smuzhiyun 		if (mr->mr_handle == rep->rr_inv_rkey) {
423*4882a593Smuzhiyun 			list_del_init(&mr->mr_list);
424*4882a593Smuzhiyun 			trace_xprtrdma_mr_reminv(mr);
425*4882a593Smuzhiyun 			rpcrdma_mr_put(mr);
426*4882a593Smuzhiyun 			break;	/* only one invalidated MR per RPC */
427*4882a593Smuzhiyun 		}
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun 
__frwr_release_mr(struct ib_wc * wc,struct rpcrdma_mr * mr)430*4882a593Smuzhiyun static void __frwr_release_mr(struct ib_wc *wc, struct rpcrdma_mr *mr)
431*4882a593Smuzhiyun {
432*4882a593Smuzhiyun 	if (wc->status != IB_WC_SUCCESS)
433*4882a593Smuzhiyun 		frwr_mr_recycle(mr);
434*4882a593Smuzhiyun 	else
435*4882a593Smuzhiyun 		rpcrdma_mr_put(mr);
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun /**
439*4882a593Smuzhiyun  * frwr_wc_localinv - Invoked by RDMA provider for a LOCAL_INV WC
440*4882a593Smuzhiyun  * @cq: completion queue
441*4882a593Smuzhiyun  * @wc: WCE for a completed LocalInv WR
442*4882a593Smuzhiyun  *
443*4882a593Smuzhiyun  */
frwr_wc_localinv(struct ib_cq * cq,struct ib_wc * wc)444*4882a593Smuzhiyun static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun 	struct ib_cqe *cqe = wc->wr_cqe;
447*4882a593Smuzhiyun 	struct rpcrdma_frwr *frwr =
448*4882a593Smuzhiyun 		container_of(cqe, struct rpcrdma_frwr, fr_cqe);
449*4882a593Smuzhiyun 	struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	/* WARNING: Only wr_cqe and status are reliable at this point */
452*4882a593Smuzhiyun 	trace_xprtrdma_wc_li(wc, frwr);
453*4882a593Smuzhiyun 	__frwr_release_mr(wc, mr);
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	rpcrdma_flush_disconnect(cq->cq_context, wc);
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun /**
459*4882a593Smuzhiyun  * frwr_wc_localinv_wake - Invoked by RDMA provider for a LOCAL_INV WC
460*4882a593Smuzhiyun  * @cq: completion queue
461*4882a593Smuzhiyun  * @wc: WCE for a completed LocalInv WR
462*4882a593Smuzhiyun  *
463*4882a593Smuzhiyun  * Awaken anyone waiting for an MR to finish being fenced.
464*4882a593Smuzhiyun  */
frwr_wc_localinv_wake(struct ib_cq * cq,struct ib_wc * wc)465*4882a593Smuzhiyun static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
466*4882a593Smuzhiyun {
467*4882a593Smuzhiyun 	struct ib_cqe *cqe = wc->wr_cqe;
468*4882a593Smuzhiyun 	struct rpcrdma_frwr *frwr =
469*4882a593Smuzhiyun 		container_of(cqe, struct rpcrdma_frwr, fr_cqe);
470*4882a593Smuzhiyun 	struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	/* WARNING: Only wr_cqe and status are reliable at this point */
473*4882a593Smuzhiyun 	trace_xprtrdma_wc_li_wake(wc, frwr);
474*4882a593Smuzhiyun 	__frwr_release_mr(wc, mr);
475*4882a593Smuzhiyun 	complete(&frwr->fr_linv_done);
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	rpcrdma_flush_disconnect(cq->cq_context, wc);
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun /**
481*4882a593Smuzhiyun  * frwr_unmap_sync - invalidate memory regions that were registered for @req
482*4882a593Smuzhiyun  * @r_xprt: controlling transport instance
483*4882a593Smuzhiyun  * @req: rpcrdma_req with a non-empty list of MRs to process
484*4882a593Smuzhiyun  *
485*4882a593Smuzhiyun  * Sleeps until it is safe for the host CPU to access the previously mapped
486*4882a593Smuzhiyun  * memory regions. This guarantees that registered MRs are properly fenced
487*4882a593Smuzhiyun  * from the server before the RPC consumer accesses the data in them. It
488*4882a593Smuzhiyun  * also ensures proper Send flow control: waking the next RPC waits until
489*4882a593Smuzhiyun  * this RPC has relinquished all its Send Queue entries.
490*4882a593Smuzhiyun  */
frwr_unmap_sync(struct rpcrdma_xprt * r_xprt,struct rpcrdma_req * req)491*4882a593Smuzhiyun void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun 	struct ib_send_wr *first, **prev, *last;
494*4882a593Smuzhiyun 	const struct ib_send_wr *bad_wr;
495*4882a593Smuzhiyun 	struct rpcrdma_frwr *frwr;
496*4882a593Smuzhiyun 	struct rpcrdma_mr *mr;
497*4882a593Smuzhiyun 	int rc;
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	/* ORDER: Invalidate all of the MRs first
500*4882a593Smuzhiyun 	 *
501*4882a593Smuzhiyun 	 * Chain the LOCAL_INV Work Requests and post them with
502*4882a593Smuzhiyun 	 * a single ib_post_send() call.
503*4882a593Smuzhiyun 	 */
504*4882a593Smuzhiyun 	frwr = NULL;
505*4882a593Smuzhiyun 	prev = &first;
506*4882a593Smuzhiyun 	while ((mr = rpcrdma_mr_pop(&req->rl_registered))) {
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 		trace_xprtrdma_mr_localinv(mr);
509*4882a593Smuzhiyun 		r_xprt->rx_stats.local_inv_needed++;
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 		frwr = &mr->frwr;
512*4882a593Smuzhiyun 		frwr->fr_cqe.done = frwr_wc_localinv;
513*4882a593Smuzhiyun 		last = &frwr->fr_invwr;
514*4882a593Smuzhiyun 		last->next = NULL;
515*4882a593Smuzhiyun 		last->wr_cqe = &frwr->fr_cqe;
516*4882a593Smuzhiyun 		last->sg_list = NULL;
517*4882a593Smuzhiyun 		last->num_sge = 0;
518*4882a593Smuzhiyun 		last->opcode = IB_WR_LOCAL_INV;
519*4882a593Smuzhiyun 		last->send_flags = IB_SEND_SIGNALED;
520*4882a593Smuzhiyun 		last->ex.invalidate_rkey = mr->mr_handle;
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 		*prev = last;
523*4882a593Smuzhiyun 		prev = &last->next;
524*4882a593Smuzhiyun 	}
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	/* Strong send queue ordering guarantees that when the
527*4882a593Smuzhiyun 	 * last WR in the chain completes, all WRs in the chain
528*4882a593Smuzhiyun 	 * are complete.
529*4882a593Smuzhiyun 	 */
530*4882a593Smuzhiyun 	frwr->fr_cqe.done = frwr_wc_localinv_wake;
531*4882a593Smuzhiyun 	reinit_completion(&frwr->fr_linv_done);
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	/* Transport disconnect drains the receive CQ before it
534*4882a593Smuzhiyun 	 * replaces the QP. The RPC reply handler won't call us
535*4882a593Smuzhiyun 	 * unless re_id->qp is a valid pointer.
536*4882a593Smuzhiyun 	 */
537*4882a593Smuzhiyun 	bad_wr = NULL;
538*4882a593Smuzhiyun 	rc = ib_post_send(r_xprt->rx_ep->re_id->qp, first, &bad_wr);
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 	/* The final LOCAL_INV WR in the chain is supposed to
541*4882a593Smuzhiyun 	 * do the wake. If it was never posted, the wake will
542*4882a593Smuzhiyun 	 * not happen, so don't wait in that case.
543*4882a593Smuzhiyun 	 */
544*4882a593Smuzhiyun 	if (bad_wr != first)
545*4882a593Smuzhiyun 		wait_for_completion(&frwr->fr_linv_done);
546*4882a593Smuzhiyun 	if (!rc)
547*4882a593Smuzhiyun 		return;
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	/* Recycle MRs in the LOCAL_INV chain that did not get posted.
550*4882a593Smuzhiyun 	 */
551*4882a593Smuzhiyun 	trace_xprtrdma_post_linv(req, rc);
552*4882a593Smuzhiyun 	while (bad_wr) {
553*4882a593Smuzhiyun 		frwr = container_of(bad_wr, struct rpcrdma_frwr,
554*4882a593Smuzhiyun 				    fr_invwr);
555*4882a593Smuzhiyun 		mr = container_of(frwr, struct rpcrdma_mr, frwr);
556*4882a593Smuzhiyun 		bad_wr = bad_wr->next;
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 		frwr_mr_recycle(mr);
559*4882a593Smuzhiyun 	}
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun /**
563*4882a593Smuzhiyun  * frwr_wc_localinv_done - Invoked by RDMA provider for a signaled LOCAL_INV WC
564*4882a593Smuzhiyun  * @cq:	completion queue
565*4882a593Smuzhiyun  * @wc:	WCE for a completed LocalInv WR
566*4882a593Smuzhiyun  *
567*4882a593Smuzhiyun  */
frwr_wc_localinv_done(struct ib_cq * cq,struct ib_wc * wc)568*4882a593Smuzhiyun static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc)
569*4882a593Smuzhiyun {
570*4882a593Smuzhiyun 	struct ib_cqe *cqe = wc->wr_cqe;
571*4882a593Smuzhiyun 	struct rpcrdma_frwr *frwr =
572*4882a593Smuzhiyun 		container_of(cqe, struct rpcrdma_frwr, fr_cqe);
573*4882a593Smuzhiyun 	struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
574*4882a593Smuzhiyun 	struct rpcrdma_rep *rep = mr->mr_req->rl_reply;
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	/* WARNING: Only wr_cqe and status are reliable at this point */
577*4882a593Smuzhiyun 	trace_xprtrdma_wc_li_done(wc, frwr);
578*4882a593Smuzhiyun 	__frwr_release_mr(wc, mr);
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	/* Ensure @rep is generated before __frwr_release_mr */
581*4882a593Smuzhiyun 	smp_rmb();
582*4882a593Smuzhiyun 	rpcrdma_complete_rqst(rep);
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	rpcrdma_flush_disconnect(cq->cq_context, wc);
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun /**
588*4882a593Smuzhiyun  * frwr_unmap_async - invalidate memory regions that were registered for @req
589*4882a593Smuzhiyun  * @r_xprt: controlling transport instance
590*4882a593Smuzhiyun  * @req: rpcrdma_req with a non-empty list of MRs to process
591*4882a593Smuzhiyun  *
592*4882a593Smuzhiyun  * This guarantees that registered MRs are properly fenced from the
593*4882a593Smuzhiyun  * server before the RPC consumer accesses the data in them. It also
594*4882a593Smuzhiyun  * ensures proper Send flow control: waking the next RPC waits until
595*4882a593Smuzhiyun  * this RPC has relinquished all its Send Queue entries.
596*4882a593Smuzhiyun  */
frwr_unmap_async(struct rpcrdma_xprt * r_xprt,struct rpcrdma_req * req)597*4882a593Smuzhiyun void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
598*4882a593Smuzhiyun {
599*4882a593Smuzhiyun 	struct ib_send_wr *first, *last, **prev;
600*4882a593Smuzhiyun 	const struct ib_send_wr *bad_wr;
601*4882a593Smuzhiyun 	struct rpcrdma_frwr *frwr;
602*4882a593Smuzhiyun 	struct rpcrdma_mr *mr;
603*4882a593Smuzhiyun 	int rc;
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	/* Chain the LOCAL_INV Work Requests and post them with
606*4882a593Smuzhiyun 	 * a single ib_post_send() call.
607*4882a593Smuzhiyun 	 */
608*4882a593Smuzhiyun 	frwr = NULL;
609*4882a593Smuzhiyun 	prev = &first;
610*4882a593Smuzhiyun 	while ((mr = rpcrdma_mr_pop(&req->rl_registered))) {
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 		trace_xprtrdma_mr_localinv(mr);
613*4882a593Smuzhiyun 		r_xprt->rx_stats.local_inv_needed++;
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 		frwr = &mr->frwr;
616*4882a593Smuzhiyun 		frwr->fr_cqe.done = frwr_wc_localinv;
617*4882a593Smuzhiyun 		last = &frwr->fr_invwr;
618*4882a593Smuzhiyun 		last->next = NULL;
619*4882a593Smuzhiyun 		last->wr_cqe = &frwr->fr_cqe;
620*4882a593Smuzhiyun 		last->sg_list = NULL;
621*4882a593Smuzhiyun 		last->num_sge = 0;
622*4882a593Smuzhiyun 		last->opcode = IB_WR_LOCAL_INV;
623*4882a593Smuzhiyun 		last->send_flags = IB_SEND_SIGNALED;
624*4882a593Smuzhiyun 		last->ex.invalidate_rkey = mr->mr_handle;
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 		*prev = last;
627*4882a593Smuzhiyun 		prev = &last->next;
628*4882a593Smuzhiyun 	}
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 	/* Strong send queue ordering guarantees that when the
631*4882a593Smuzhiyun 	 * last WR in the chain completes, all WRs in the chain
632*4882a593Smuzhiyun 	 * are complete. The last completion will wake up the
633*4882a593Smuzhiyun 	 * RPC waiter.
634*4882a593Smuzhiyun 	 */
635*4882a593Smuzhiyun 	frwr->fr_cqe.done = frwr_wc_localinv_done;
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 	/* Transport disconnect drains the receive CQ before it
638*4882a593Smuzhiyun 	 * replaces the QP. The RPC reply handler won't call us
639*4882a593Smuzhiyun 	 * unless re_id->qp is a valid pointer.
640*4882a593Smuzhiyun 	 */
641*4882a593Smuzhiyun 	bad_wr = NULL;
642*4882a593Smuzhiyun 	rc = ib_post_send(r_xprt->rx_ep->re_id->qp, first, &bad_wr);
643*4882a593Smuzhiyun 	if (!rc)
644*4882a593Smuzhiyun 		return;
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun 	/* Recycle MRs in the LOCAL_INV chain that did not get posted.
647*4882a593Smuzhiyun 	 */
648*4882a593Smuzhiyun 	trace_xprtrdma_post_linv(req, rc);
649*4882a593Smuzhiyun 	while (bad_wr) {
650*4882a593Smuzhiyun 		frwr = container_of(bad_wr, struct rpcrdma_frwr, fr_invwr);
651*4882a593Smuzhiyun 		mr = container_of(frwr, struct rpcrdma_mr, frwr);
652*4882a593Smuzhiyun 		bad_wr = bad_wr->next;
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 		frwr_mr_recycle(mr);
655*4882a593Smuzhiyun 	}
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 	/* The final LOCAL_INV WR in the chain is supposed to
658*4882a593Smuzhiyun 	 * do the wake. If it was never posted, the wake will
659*4882a593Smuzhiyun 	 * not happen, so wake here in that case.
660*4882a593Smuzhiyun 	 */
661*4882a593Smuzhiyun 	rpcrdma_complete_rqst(req->rl_reply);
662*4882a593Smuzhiyun }
663