xref: /OK3568_Linux_fs/kernel/net/sunrpc/xprtrdma/svc_rdma_rw.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2016-2018 Oracle.  All rights reserved.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Use the core R/W API to move RPC-over-RDMA Read and Write chunks.
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <rdma/rw.h>
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/sunrpc/xdr.h>
11*4882a593Smuzhiyun #include <linux/sunrpc/rpc_rdma.h>
12*4882a593Smuzhiyun #include <linux/sunrpc/svc_rdma.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include "xprt_rdma.h"
15*4882a593Smuzhiyun #include <trace/events/rpcrdma.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc);
18*4882a593Smuzhiyun static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc);
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun /* Each R/W context contains state for one chain of RDMA Read or
21*4882a593Smuzhiyun  * Write Work Requests.
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  * Each WR chain handles a single contiguous server-side buffer,
24*4882a593Smuzhiyun  * because scatterlist entries after the first have to start on
25*4882a593Smuzhiyun  * page alignment. xdr_buf iovecs cannot guarantee alignment.
26*4882a593Smuzhiyun  *
27*4882a593Smuzhiyun  * Each WR chain handles only one R_key. Each RPC-over-RDMA segment
28*4882a593Smuzhiyun  * from a client may contain a unique R_key, so each WR chain moves
29*4882a593Smuzhiyun  * up to one segment at a time.
30*4882a593Smuzhiyun  *
31*4882a593Smuzhiyun  * The scatterlist makes this data structure over 4KB in size. To
32*4882a593Smuzhiyun  * make it less likely to fail, and to handle the allocation for
33*4882a593Smuzhiyun  * smaller I/O requests without disabling bottom-halves, these
34*4882a593Smuzhiyun  * contexts are created on demand, but cached and reused until the
35*4882a593Smuzhiyun  * controlling svcxprt_rdma is destroyed.
36*4882a593Smuzhiyun  */
37*4882a593Smuzhiyun struct svc_rdma_rw_ctxt {
38*4882a593Smuzhiyun 	struct list_head	rw_list;
39*4882a593Smuzhiyun 	struct rdma_rw_ctx	rw_ctx;
40*4882a593Smuzhiyun 	unsigned int		rw_nents;
41*4882a593Smuzhiyun 	struct sg_table		rw_sg_table;
42*4882a593Smuzhiyun 	struct scatterlist	rw_first_sgl[];
43*4882a593Smuzhiyun };
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun static inline struct svc_rdma_rw_ctxt *
svc_rdma_next_ctxt(struct list_head * list)46*4882a593Smuzhiyun svc_rdma_next_ctxt(struct list_head *list)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun 	return list_first_entry_or_null(list, struct svc_rdma_rw_ctxt,
49*4882a593Smuzhiyun 					rw_list);
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun static struct svc_rdma_rw_ctxt *
svc_rdma_get_rw_ctxt(struct svcxprt_rdma * rdma,unsigned int sges)53*4882a593Smuzhiyun svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun 	struct svc_rdma_rw_ctxt *ctxt;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	spin_lock(&rdma->sc_rw_ctxt_lock);
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts);
60*4882a593Smuzhiyun 	if (ctxt) {
61*4882a593Smuzhiyun 		list_del(&ctxt->rw_list);
62*4882a593Smuzhiyun 		spin_unlock(&rdma->sc_rw_ctxt_lock);
63*4882a593Smuzhiyun 	} else {
64*4882a593Smuzhiyun 		spin_unlock(&rdma->sc_rw_ctxt_lock);
65*4882a593Smuzhiyun 		ctxt = kmalloc(struct_size(ctxt, rw_first_sgl, SG_CHUNK_SIZE),
66*4882a593Smuzhiyun 			       GFP_KERNEL);
67*4882a593Smuzhiyun 		if (!ctxt)
68*4882a593Smuzhiyun 			goto out_noctx;
69*4882a593Smuzhiyun 		INIT_LIST_HEAD(&ctxt->rw_list);
70*4882a593Smuzhiyun 	}
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl;
73*4882a593Smuzhiyun 	if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges,
74*4882a593Smuzhiyun 				   ctxt->rw_sg_table.sgl,
75*4882a593Smuzhiyun 				   SG_CHUNK_SIZE))
76*4882a593Smuzhiyun 		goto out_free;
77*4882a593Smuzhiyun 	return ctxt;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun out_free:
80*4882a593Smuzhiyun 	kfree(ctxt);
81*4882a593Smuzhiyun out_noctx:
82*4882a593Smuzhiyun 	trace_svcrdma_no_rwctx_err(rdma, sges);
83*4882a593Smuzhiyun 	return NULL;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun 
svc_rdma_put_rw_ctxt(struct svcxprt_rdma * rdma,struct svc_rdma_rw_ctxt * ctxt)86*4882a593Smuzhiyun static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
87*4882a593Smuzhiyun 				 struct svc_rdma_rw_ctxt *ctxt)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun 	sg_free_table_chained(&ctxt->rw_sg_table, SG_CHUNK_SIZE);
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	spin_lock(&rdma->sc_rw_ctxt_lock);
92*4882a593Smuzhiyun 	list_add(&ctxt->rw_list, &rdma->sc_rw_ctxts);
93*4882a593Smuzhiyun 	spin_unlock(&rdma->sc_rw_ctxt_lock);
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun /**
97*4882a593Smuzhiyun  * svc_rdma_destroy_rw_ctxts - Free accumulated R/W contexts
98*4882a593Smuzhiyun  * @rdma: transport about to be destroyed
99*4882a593Smuzhiyun  *
100*4882a593Smuzhiyun  */
svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma * rdma)101*4882a593Smuzhiyun void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun 	struct svc_rdma_rw_ctxt *ctxt;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	while ((ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts)) != NULL) {
106*4882a593Smuzhiyun 		list_del(&ctxt->rw_list);
107*4882a593Smuzhiyun 		kfree(ctxt);
108*4882a593Smuzhiyun 	}
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun /**
112*4882a593Smuzhiyun  * svc_rdma_rw_ctx_init - Prepare a R/W context for I/O
113*4882a593Smuzhiyun  * @rdma: controlling transport instance
114*4882a593Smuzhiyun  * @ctxt: R/W context to prepare
115*4882a593Smuzhiyun  * @offset: RDMA offset
116*4882a593Smuzhiyun  * @handle: RDMA tag/handle
117*4882a593Smuzhiyun  * @direction: I/O direction
118*4882a593Smuzhiyun  *
119*4882a593Smuzhiyun  * Returns on success, the number of WQEs that will be needed
120*4882a593Smuzhiyun  * on the workqueue, or a negative errno.
121*4882a593Smuzhiyun  */
svc_rdma_rw_ctx_init(struct svcxprt_rdma * rdma,struct svc_rdma_rw_ctxt * ctxt,u64 offset,u32 handle,enum dma_data_direction direction)122*4882a593Smuzhiyun static int svc_rdma_rw_ctx_init(struct svcxprt_rdma *rdma,
123*4882a593Smuzhiyun 				struct svc_rdma_rw_ctxt *ctxt,
124*4882a593Smuzhiyun 				u64 offset, u32 handle,
125*4882a593Smuzhiyun 				enum dma_data_direction direction)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun 	int ret;
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp, rdma->sc_port_num,
130*4882a593Smuzhiyun 			       ctxt->rw_sg_table.sgl, ctxt->rw_nents,
131*4882a593Smuzhiyun 			       0, offset, handle, direction);
132*4882a593Smuzhiyun 	if (unlikely(ret < 0)) {
133*4882a593Smuzhiyun 		svc_rdma_put_rw_ctxt(rdma, ctxt);
134*4882a593Smuzhiyun 		trace_svcrdma_dma_map_rw_err(rdma, ctxt->rw_nents, ret);
135*4882a593Smuzhiyun 	}
136*4882a593Smuzhiyun 	return ret;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun /* A chunk context tracks all I/O for moving one Read or Write
140*4882a593Smuzhiyun  * chunk. This is a set of rdma_rw's that handle data movement
141*4882a593Smuzhiyun  * for all segments of one chunk.
142*4882a593Smuzhiyun  *
143*4882a593Smuzhiyun  * These are small, acquired with a single allocator call, and
144*4882a593Smuzhiyun  * no more than one is needed per chunk. They are allocated on
145*4882a593Smuzhiyun  * demand, and not cached.
146*4882a593Smuzhiyun  */
147*4882a593Smuzhiyun struct svc_rdma_chunk_ctxt {
148*4882a593Smuzhiyun 	struct rpc_rdma_cid	cc_cid;
149*4882a593Smuzhiyun 	struct ib_cqe		cc_cqe;
150*4882a593Smuzhiyun 	struct svcxprt_rdma	*cc_rdma;
151*4882a593Smuzhiyun 	struct list_head	cc_rwctxts;
152*4882a593Smuzhiyun 	int			cc_sqecount;
153*4882a593Smuzhiyun };
154*4882a593Smuzhiyun 
svc_rdma_cc_cid_init(struct svcxprt_rdma * rdma,struct rpc_rdma_cid * cid)155*4882a593Smuzhiyun static void svc_rdma_cc_cid_init(struct svcxprt_rdma *rdma,
156*4882a593Smuzhiyun 				 struct rpc_rdma_cid *cid)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun 	cid->ci_queue_id = rdma->sc_sq_cq->res.id;
159*4882a593Smuzhiyun 	cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun 
svc_rdma_cc_init(struct svcxprt_rdma * rdma,struct svc_rdma_chunk_ctxt * cc)162*4882a593Smuzhiyun static void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
163*4882a593Smuzhiyun 			     struct svc_rdma_chunk_ctxt *cc)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun 	svc_rdma_cc_cid_init(rdma, &cc->cc_cid);
166*4882a593Smuzhiyun 	cc->cc_rdma = rdma;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	INIT_LIST_HEAD(&cc->cc_rwctxts);
169*4882a593Smuzhiyun 	cc->cc_sqecount = 0;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun 
svc_rdma_cc_release(struct svc_rdma_chunk_ctxt * cc,enum dma_data_direction dir)172*4882a593Smuzhiyun static void svc_rdma_cc_release(struct svc_rdma_chunk_ctxt *cc,
173*4882a593Smuzhiyun 				enum dma_data_direction dir)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun 	struct svcxprt_rdma *rdma = cc->cc_rdma;
176*4882a593Smuzhiyun 	struct svc_rdma_rw_ctxt *ctxt;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	while ((ctxt = svc_rdma_next_ctxt(&cc->cc_rwctxts)) != NULL) {
179*4882a593Smuzhiyun 		list_del(&ctxt->rw_list);
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 		rdma_rw_ctx_destroy(&ctxt->rw_ctx, rdma->sc_qp,
182*4882a593Smuzhiyun 				    rdma->sc_port_num, ctxt->rw_sg_table.sgl,
183*4882a593Smuzhiyun 				    ctxt->rw_nents, dir);
184*4882a593Smuzhiyun 		svc_rdma_put_rw_ctxt(rdma, ctxt);
185*4882a593Smuzhiyun 	}
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun /* State for sending a Write or Reply chunk.
189*4882a593Smuzhiyun  *  - Tracks progress of writing one chunk over all its segments
190*4882a593Smuzhiyun  *  - Stores arguments for the SGL constructor functions
191*4882a593Smuzhiyun  */
192*4882a593Smuzhiyun struct svc_rdma_write_info {
193*4882a593Smuzhiyun 	/* write state of this chunk */
194*4882a593Smuzhiyun 	unsigned int		wi_seg_off;
195*4882a593Smuzhiyun 	unsigned int		wi_seg_no;
196*4882a593Smuzhiyun 	unsigned int		wi_nsegs;
197*4882a593Smuzhiyun 	__be32			*wi_segs;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	/* SGL constructor arguments */
200*4882a593Smuzhiyun 	struct xdr_buf		*wi_xdr;
201*4882a593Smuzhiyun 	unsigned char		*wi_base;
202*4882a593Smuzhiyun 	unsigned int		wi_next_off;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	struct svc_rdma_chunk_ctxt	wi_cc;
205*4882a593Smuzhiyun };
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun static struct svc_rdma_write_info *
svc_rdma_write_info_alloc(struct svcxprt_rdma * rdma,__be32 * chunk)208*4882a593Smuzhiyun svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma, __be32 *chunk)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun 	struct svc_rdma_write_info *info;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	info = kmalloc(sizeof(*info), GFP_KERNEL);
213*4882a593Smuzhiyun 	if (!info)
214*4882a593Smuzhiyun 		return info;
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	info->wi_seg_off = 0;
217*4882a593Smuzhiyun 	info->wi_seg_no = 0;
218*4882a593Smuzhiyun 	info->wi_nsegs = be32_to_cpup(++chunk);
219*4882a593Smuzhiyun 	info->wi_segs = ++chunk;
220*4882a593Smuzhiyun 	svc_rdma_cc_init(rdma, &info->wi_cc);
221*4882a593Smuzhiyun 	info->wi_cc.cc_cqe.done = svc_rdma_write_done;
222*4882a593Smuzhiyun 	return info;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun 
svc_rdma_write_info_free(struct svc_rdma_write_info * info)225*4882a593Smuzhiyun static void svc_rdma_write_info_free(struct svc_rdma_write_info *info)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun 	svc_rdma_cc_release(&info->wi_cc, DMA_TO_DEVICE);
228*4882a593Smuzhiyun 	kfree(info);
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun /**
232*4882a593Smuzhiyun  * svc_rdma_write_done - Write chunk completion
233*4882a593Smuzhiyun  * @cq: controlling Completion Queue
234*4882a593Smuzhiyun  * @wc: Work Completion
235*4882a593Smuzhiyun  *
236*4882a593Smuzhiyun  * Pages under I/O are freed by a subsequent Send completion.
237*4882a593Smuzhiyun  */
svc_rdma_write_done(struct ib_cq * cq,struct ib_wc * wc)238*4882a593Smuzhiyun static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun 	struct ib_cqe *cqe = wc->wr_cqe;
241*4882a593Smuzhiyun 	struct svc_rdma_chunk_ctxt *cc =
242*4882a593Smuzhiyun 			container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
243*4882a593Smuzhiyun 	struct svcxprt_rdma *rdma = cc->cc_rdma;
244*4882a593Smuzhiyun 	struct svc_rdma_write_info *info =
245*4882a593Smuzhiyun 			container_of(cc, struct svc_rdma_write_info, wi_cc);
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	trace_svcrdma_wc_write(wc, &cc->cc_cid);
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
250*4882a593Smuzhiyun 	wake_up(&rdma->sc_send_wait);
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	if (unlikely(wc->status != IB_WC_SUCCESS))
253*4882a593Smuzhiyun 		set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	svc_rdma_write_info_free(info);
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun /* State for pulling a Read chunk.
259*4882a593Smuzhiyun  */
260*4882a593Smuzhiyun struct svc_rdma_read_info {
261*4882a593Smuzhiyun 	struct svc_rdma_recv_ctxt	*ri_readctxt;
262*4882a593Smuzhiyun 	unsigned int			ri_position;
263*4882a593Smuzhiyun 	unsigned int			ri_pageno;
264*4882a593Smuzhiyun 	unsigned int			ri_pageoff;
265*4882a593Smuzhiyun 	unsigned int			ri_chunklen;
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	struct svc_rdma_chunk_ctxt	ri_cc;
268*4882a593Smuzhiyun };
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun static struct svc_rdma_read_info *
svc_rdma_read_info_alloc(struct svcxprt_rdma * rdma)271*4882a593Smuzhiyun svc_rdma_read_info_alloc(struct svcxprt_rdma *rdma)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	struct svc_rdma_read_info *info;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	info = kmalloc(sizeof(*info), GFP_KERNEL);
276*4882a593Smuzhiyun 	if (!info)
277*4882a593Smuzhiyun 		return info;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	svc_rdma_cc_init(rdma, &info->ri_cc);
280*4882a593Smuzhiyun 	info->ri_cc.cc_cqe.done = svc_rdma_wc_read_done;
281*4882a593Smuzhiyun 	return info;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun 
svc_rdma_read_info_free(struct svc_rdma_read_info * info)284*4882a593Smuzhiyun static void svc_rdma_read_info_free(struct svc_rdma_read_info *info)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun 	svc_rdma_cc_release(&info->ri_cc, DMA_FROM_DEVICE);
287*4882a593Smuzhiyun 	kfree(info);
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun /**
291*4882a593Smuzhiyun  * svc_rdma_wc_read_done - Handle completion of an RDMA Read ctx
292*4882a593Smuzhiyun  * @cq: controlling Completion Queue
293*4882a593Smuzhiyun  * @wc: Work Completion
294*4882a593Smuzhiyun  *
295*4882a593Smuzhiyun  */
svc_rdma_wc_read_done(struct ib_cq * cq,struct ib_wc * wc)296*4882a593Smuzhiyun static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun 	struct ib_cqe *cqe = wc->wr_cqe;
299*4882a593Smuzhiyun 	struct svc_rdma_chunk_ctxt *cc =
300*4882a593Smuzhiyun 			container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
301*4882a593Smuzhiyun 	struct svcxprt_rdma *rdma = cc->cc_rdma;
302*4882a593Smuzhiyun 	struct svc_rdma_read_info *info =
303*4882a593Smuzhiyun 			container_of(cc, struct svc_rdma_read_info, ri_cc);
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	trace_svcrdma_wc_read(wc, &cc->cc_cid);
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
308*4882a593Smuzhiyun 	wake_up(&rdma->sc_send_wait);
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
311*4882a593Smuzhiyun 		set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
312*4882a593Smuzhiyun 		svc_rdma_recv_ctxt_put(rdma, info->ri_readctxt);
313*4882a593Smuzhiyun 	} else {
314*4882a593Smuzhiyun 		spin_lock(&rdma->sc_rq_dto_lock);
315*4882a593Smuzhiyun 		list_add_tail(&info->ri_readctxt->rc_list,
316*4882a593Smuzhiyun 			      &rdma->sc_read_complete_q);
317*4882a593Smuzhiyun 		/* Note the unlock pairs with the smp_rmb in svc_xprt_ready: */
318*4882a593Smuzhiyun 		set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
319*4882a593Smuzhiyun 		spin_unlock(&rdma->sc_rq_dto_lock);
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 		svc_xprt_enqueue(&rdma->sc_xprt);
322*4882a593Smuzhiyun 	}
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	svc_rdma_read_info_free(info);
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun /* This function sleeps when the transport's Send Queue is congested.
328*4882a593Smuzhiyun  *
329*4882a593Smuzhiyun  * Assumptions:
330*4882a593Smuzhiyun  * - If ib_post_send() succeeds, only one completion is expected,
331*4882a593Smuzhiyun  *   even if one or more WRs are flushed. This is true when posting
332*4882a593Smuzhiyun  *   an rdma_rw_ctx or when posting a single signaled WR.
333*4882a593Smuzhiyun  */
svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt * cc)334*4882a593Smuzhiyun static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun 	struct svcxprt_rdma *rdma = cc->cc_rdma;
337*4882a593Smuzhiyun 	struct svc_xprt *xprt = &rdma->sc_xprt;
338*4882a593Smuzhiyun 	struct ib_send_wr *first_wr;
339*4882a593Smuzhiyun 	const struct ib_send_wr *bad_wr;
340*4882a593Smuzhiyun 	struct list_head *tmp;
341*4882a593Smuzhiyun 	struct ib_cqe *cqe;
342*4882a593Smuzhiyun 	int ret;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	if (cc->cc_sqecount > rdma->sc_sq_depth)
345*4882a593Smuzhiyun 		return -EINVAL;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	first_wr = NULL;
348*4882a593Smuzhiyun 	cqe = &cc->cc_cqe;
349*4882a593Smuzhiyun 	list_for_each(tmp, &cc->cc_rwctxts) {
350*4882a593Smuzhiyun 		struct svc_rdma_rw_ctxt *ctxt;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 		ctxt = list_entry(tmp, struct svc_rdma_rw_ctxt, rw_list);
353*4882a593Smuzhiyun 		first_wr = rdma_rw_ctx_wrs(&ctxt->rw_ctx, rdma->sc_qp,
354*4882a593Smuzhiyun 					   rdma->sc_port_num, cqe, first_wr);
355*4882a593Smuzhiyun 		cqe = NULL;
356*4882a593Smuzhiyun 	}
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	do {
359*4882a593Smuzhiyun 		if (atomic_sub_return(cc->cc_sqecount,
360*4882a593Smuzhiyun 				      &rdma->sc_sq_avail) > 0) {
361*4882a593Smuzhiyun 			trace_svcrdma_post_chunk(&cc->cc_cid, cc->cc_sqecount);
362*4882a593Smuzhiyun 			ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr);
363*4882a593Smuzhiyun 			if (ret)
364*4882a593Smuzhiyun 				break;
365*4882a593Smuzhiyun 			return 0;
366*4882a593Smuzhiyun 		}
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 		trace_svcrdma_sq_full(rdma);
369*4882a593Smuzhiyun 		atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
370*4882a593Smuzhiyun 		wait_event(rdma->sc_send_wait,
371*4882a593Smuzhiyun 			   atomic_read(&rdma->sc_sq_avail) > cc->cc_sqecount);
372*4882a593Smuzhiyun 		trace_svcrdma_sq_retry(rdma);
373*4882a593Smuzhiyun 	} while (1);
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	trace_svcrdma_sq_post_err(rdma, ret);
376*4882a593Smuzhiyun 	set_bit(XPT_CLOSE, &xprt->xpt_flags);
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	/* If even one was posted, there will be a completion. */
379*4882a593Smuzhiyun 	if (bad_wr != first_wr)
380*4882a593Smuzhiyun 		return 0;
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
383*4882a593Smuzhiyun 	wake_up(&rdma->sc_send_wait);
384*4882a593Smuzhiyun 	return -ENOTCONN;
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun /* Build and DMA-map an SGL that covers one kvec in an xdr_buf
388*4882a593Smuzhiyun  */
svc_rdma_vec_to_sg(struct svc_rdma_write_info * info,unsigned int len,struct svc_rdma_rw_ctxt * ctxt)389*4882a593Smuzhiyun static void svc_rdma_vec_to_sg(struct svc_rdma_write_info *info,
390*4882a593Smuzhiyun 			       unsigned int len,
391*4882a593Smuzhiyun 			       struct svc_rdma_rw_ctxt *ctxt)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun 	struct scatterlist *sg = ctxt->rw_sg_table.sgl;
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	sg_set_buf(&sg[0], info->wi_base, len);
396*4882a593Smuzhiyun 	info->wi_base += len;
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	ctxt->rw_nents = 1;
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun /* Build and DMA-map an SGL that covers part of an xdr_buf's pagelist.
402*4882a593Smuzhiyun  */
svc_rdma_pagelist_to_sg(struct svc_rdma_write_info * info,unsigned int remaining,struct svc_rdma_rw_ctxt * ctxt)403*4882a593Smuzhiyun static void svc_rdma_pagelist_to_sg(struct svc_rdma_write_info *info,
404*4882a593Smuzhiyun 				    unsigned int remaining,
405*4882a593Smuzhiyun 				    struct svc_rdma_rw_ctxt *ctxt)
406*4882a593Smuzhiyun {
407*4882a593Smuzhiyun 	unsigned int sge_no, sge_bytes, page_off, page_no;
408*4882a593Smuzhiyun 	struct xdr_buf *xdr = info->wi_xdr;
409*4882a593Smuzhiyun 	struct scatterlist *sg;
410*4882a593Smuzhiyun 	struct page **page;
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	page_off = info->wi_next_off + xdr->page_base;
413*4882a593Smuzhiyun 	page_no = page_off >> PAGE_SHIFT;
414*4882a593Smuzhiyun 	page_off = offset_in_page(page_off);
415*4882a593Smuzhiyun 	page = xdr->pages + page_no;
416*4882a593Smuzhiyun 	info->wi_next_off += remaining;
417*4882a593Smuzhiyun 	sg = ctxt->rw_sg_table.sgl;
418*4882a593Smuzhiyun 	sge_no = 0;
419*4882a593Smuzhiyun 	do {
420*4882a593Smuzhiyun 		sge_bytes = min_t(unsigned int, remaining,
421*4882a593Smuzhiyun 				  PAGE_SIZE - page_off);
422*4882a593Smuzhiyun 		sg_set_page(sg, *page, sge_bytes, page_off);
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 		remaining -= sge_bytes;
425*4882a593Smuzhiyun 		sg = sg_next(sg);
426*4882a593Smuzhiyun 		page_off = 0;
427*4882a593Smuzhiyun 		sge_no++;
428*4882a593Smuzhiyun 		page++;
429*4882a593Smuzhiyun 	} while (remaining);
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	ctxt->rw_nents = sge_no;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun /* Construct RDMA Write WRs to send a portion of an xdr_buf containing
435*4882a593Smuzhiyun  * an RPC Reply.
436*4882a593Smuzhiyun  */
437*4882a593Smuzhiyun static int
svc_rdma_build_writes(struct svc_rdma_write_info * info,void (* constructor)(struct svc_rdma_write_info * info,unsigned int len,struct svc_rdma_rw_ctxt * ctxt),unsigned int remaining)438*4882a593Smuzhiyun svc_rdma_build_writes(struct svc_rdma_write_info *info,
439*4882a593Smuzhiyun 		      void (*constructor)(struct svc_rdma_write_info *info,
440*4882a593Smuzhiyun 					  unsigned int len,
441*4882a593Smuzhiyun 					  struct svc_rdma_rw_ctxt *ctxt),
442*4882a593Smuzhiyun 		      unsigned int remaining)
443*4882a593Smuzhiyun {
444*4882a593Smuzhiyun 	struct svc_rdma_chunk_ctxt *cc = &info->wi_cc;
445*4882a593Smuzhiyun 	struct svcxprt_rdma *rdma = cc->cc_rdma;
446*4882a593Smuzhiyun 	struct svc_rdma_rw_ctxt *ctxt;
447*4882a593Smuzhiyun 	__be32 *seg;
448*4882a593Smuzhiyun 	int ret;
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	seg = info->wi_segs + info->wi_seg_no * rpcrdma_segment_maxsz;
451*4882a593Smuzhiyun 	do {
452*4882a593Smuzhiyun 		unsigned int write_len;
453*4882a593Smuzhiyun 		u32 handle, length;
454*4882a593Smuzhiyun 		u64 offset;
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 		if (info->wi_seg_no >= info->wi_nsegs)
457*4882a593Smuzhiyun 			goto out_overflow;
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 		xdr_decode_rdma_segment(seg, &handle, &length, &offset);
460*4882a593Smuzhiyun 		offset += info->wi_seg_off;
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 		write_len = min(remaining, length - info->wi_seg_off);
463*4882a593Smuzhiyun 		ctxt = svc_rdma_get_rw_ctxt(rdma,
464*4882a593Smuzhiyun 					    (write_len >> PAGE_SHIFT) + 2);
465*4882a593Smuzhiyun 		if (!ctxt)
466*4882a593Smuzhiyun 			return -ENOMEM;
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 		constructor(info, write_len, ctxt);
469*4882a593Smuzhiyun 		ret = svc_rdma_rw_ctx_init(rdma, ctxt, offset, handle,
470*4882a593Smuzhiyun 					   DMA_TO_DEVICE);
471*4882a593Smuzhiyun 		if (ret < 0)
472*4882a593Smuzhiyun 			return -EIO;
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 		trace_svcrdma_send_wseg(handle, write_len, offset);
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 		list_add(&ctxt->rw_list, &cc->cc_rwctxts);
477*4882a593Smuzhiyun 		cc->cc_sqecount += ret;
478*4882a593Smuzhiyun 		if (write_len == length - info->wi_seg_off) {
479*4882a593Smuzhiyun 			seg += 4;
480*4882a593Smuzhiyun 			info->wi_seg_no++;
481*4882a593Smuzhiyun 			info->wi_seg_off = 0;
482*4882a593Smuzhiyun 		} else {
483*4882a593Smuzhiyun 			info->wi_seg_off += write_len;
484*4882a593Smuzhiyun 		}
485*4882a593Smuzhiyun 		remaining -= write_len;
486*4882a593Smuzhiyun 	} while (remaining);
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	return 0;
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun out_overflow:
491*4882a593Smuzhiyun 	trace_svcrdma_small_wrch_err(rdma, remaining, info->wi_seg_no,
492*4882a593Smuzhiyun 				     info->wi_nsegs);
493*4882a593Smuzhiyun 	return -E2BIG;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun /* Send one of an xdr_buf's kvecs by itself. To send a Reply
497*4882a593Smuzhiyun  * chunk, the whole RPC Reply is written back to the client.
498*4882a593Smuzhiyun  * This function writes either the head or tail of the xdr_buf
499*4882a593Smuzhiyun  * containing the Reply.
500*4882a593Smuzhiyun  */
svc_rdma_send_xdr_kvec(struct svc_rdma_write_info * info,struct kvec * vec)501*4882a593Smuzhiyun static int svc_rdma_send_xdr_kvec(struct svc_rdma_write_info *info,
502*4882a593Smuzhiyun 				  struct kvec *vec)
503*4882a593Smuzhiyun {
504*4882a593Smuzhiyun 	info->wi_base = vec->iov_base;
505*4882a593Smuzhiyun 	return svc_rdma_build_writes(info, svc_rdma_vec_to_sg,
506*4882a593Smuzhiyun 				     vec->iov_len);
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun /* Send an xdr_buf's page list by itself. A Write chunk is just
510*4882a593Smuzhiyun  * the page list. A Reply chunk is @xdr's head, page list, and
511*4882a593Smuzhiyun  * tail. This function is shared between the two types of chunk.
512*4882a593Smuzhiyun  */
svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info * info,struct xdr_buf * xdr,unsigned int offset,unsigned long length)513*4882a593Smuzhiyun static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info,
514*4882a593Smuzhiyun 				      struct xdr_buf *xdr,
515*4882a593Smuzhiyun 				      unsigned int offset,
516*4882a593Smuzhiyun 				      unsigned long length)
517*4882a593Smuzhiyun {
518*4882a593Smuzhiyun 	info->wi_xdr = xdr;
519*4882a593Smuzhiyun 	info->wi_next_off = offset - xdr->head[0].iov_len;
520*4882a593Smuzhiyun 	return svc_rdma_build_writes(info, svc_rdma_pagelist_to_sg,
521*4882a593Smuzhiyun 				     length);
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun /**
525*4882a593Smuzhiyun  * svc_rdma_send_write_chunk - Write all segments in a Write chunk
526*4882a593Smuzhiyun  * @rdma: controlling RDMA transport
527*4882a593Smuzhiyun  * @wr_ch: Write chunk provided by client
528*4882a593Smuzhiyun  * @xdr: xdr_buf containing the data payload
529*4882a593Smuzhiyun  * @offset: payload's byte offset in @xdr
530*4882a593Smuzhiyun  * @length: size of payload, in bytes
531*4882a593Smuzhiyun  *
532*4882a593Smuzhiyun  * Returns a non-negative number of bytes the chunk consumed, or
533*4882a593Smuzhiyun  *	%-E2BIG if the payload was larger than the Write chunk,
534*4882a593Smuzhiyun  *	%-EINVAL if client provided too many segments,
535*4882a593Smuzhiyun  *	%-ENOMEM if rdma_rw context pool was exhausted,
536*4882a593Smuzhiyun  *	%-ENOTCONN if posting failed (connection is lost),
537*4882a593Smuzhiyun  *	%-EIO if rdma_rw initialization failed (DMA mapping, etc).
538*4882a593Smuzhiyun  */
svc_rdma_send_write_chunk(struct svcxprt_rdma * rdma,__be32 * wr_ch,struct xdr_buf * xdr,unsigned int offset,unsigned long length)539*4882a593Smuzhiyun int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, __be32 *wr_ch,
540*4882a593Smuzhiyun 			      struct xdr_buf *xdr,
541*4882a593Smuzhiyun 			      unsigned int offset, unsigned long length)
542*4882a593Smuzhiyun {
543*4882a593Smuzhiyun 	struct svc_rdma_write_info *info;
544*4882a593Smuzhiyun 	int ret;
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	if (!length)
547*4882a593Smuzhiyun 		return 0;
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	info = svc_rdma_write_info_alloc(rdma, wr_ch);
550*4882a593Smuzhiyun 	if (!info)
551*4882a593Smuzhiyun 		return -ENOMEM;
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	ret = svc_rdma_send_xdr_pagelist(info, xdr, offset, length);
554*4882a593Smuzhiyun 	if (ret < 0)
555*4882a593Smuzhiyun 		goto out_err;
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	ret = svc_rdma_post_chunk_ctxt(&info->wi_cc);
558*4882a593Smuzhiyun 	if (ret < 0)
559*4882a593Smuzhiyun 		goto out_err;
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 	trace_svcrdma_send_write_chunk(xdr->page_len);
562*4882a593Smuzhiyun 	return length;
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun out_err:
565*4882a593Smuzhiyun 	svc_rdma_write_info_free(info);
566*4882a593Smuzhiyun 	return ret;
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun /**
570*4882a593Smuzhiyun  * svc_rdma_send_reply_chunk - Write all segments in the Reply chunk
571*4882a593Smuzhiyun  * @rdma: controlling RDMA transport
572*4882a593Smuzhiyun  * @rctxt: Write and Reply chunks from client
573*4882a593Smuzhiyun  * @xdr: xdr_buf containing an RPC Reply
574*4882a593Smuzhiyun  *
575*4882a593Smuzhiyun  * Returns a non-negative number of bytes the chunk consumed, or
576*4882a593Smuzhiyun  *	%-E2BIG if the payload was larger than the Reply chunk,
577*4882a593Smuzhiyun  *	%-EINVAL if client provided too many segments,
578*4882a593Smuzhiyun  *	%-ENOMEM if rdma_rw context pool was exhausted,
579*4882a593Smuzhiyun  *	%-ENOTCONN if posting failed (connection is lost),
580*4882a593Smuzhiyun  *	%-EIO if rdma_rw initialization failed (DMA mapping, etc).
581*4882a593Smuzhiyun  */
svc_rdma_send_reply_chunk(struct svcxprt_rdma * rdma,const struct svc_rdma_recv_ctxt * rctxt,struct xdr_buf * xdr)582*4882a593Smuzhiyun int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma,
583*4882a593Smuzhiyun 			      const struct svc_rdma_recv_ctxt *rctxt,
584*4882a593Smuzhiyun 			      struct xdr_buf *xdr)
585*4882a593Smuzhiyun {
586*4882a593Smuzhiyun 	struct svc_rdma_write_info *info;
587*4882a593Smuzhiyun 	int consumed, ret;
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	info = svc_rdma_write_info_alloc(rdma, rctxt->rc_reply_chunk);
590*4882a593Smuzhiyun 	if (!info)
591*4882a593Smuzhiyun 		return -ENOMEM;
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	ret = svc_rdma_send_xdr_kvec(info, &xdr->head[0]);
594*4882a593Smuzhiyun 	if (ret < 0)
595*4882a593Smuzhiyun 		goto out_err;
596*4882a593Smuzhiyun 	consumed = xdr->head[0].iov_len;
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	/* Send the page list in the Reply chunk only if the
599*4882a593Smuzhiyun 	 * client did not provide Write chunks.
600*4882a593Smuzhiyun 	 */
601*4882a593Smuzhiyun 	if (!rctxt->rc_write_list && xdr->page_len) {
602*4882a593Smuzhiyun 		ret = svc_rdma_send_xdr_pagelist(info, xdr,
603*4882a593Smuzhiyun 						 xdr->head[0].iov_len,
604*4882a593Smuzhiyun 						 xdr->page_len);
605*4882a593Smuzhiyun 		if (ret < 0)
606*4882a593Smuzhiyun 			goto out_err;
607*4882a593Smuzhiyun 		consumed += xdr->page_len;
608*4882a593Smuzhiyun 	}
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	if (xdr->tail[0].iov_len) {
611*4882a593Smuzhiyun 		ret = svc_rdma_send_xdr_kvec(info, &xdr->tail[0]);
612*4882a593Smuzhiyun 		if (ret < 0)
613*4882a593Smuzhiyun 			goto out_err;
614*4882a593Smuzhiyun 		consumed += xdr->tail[0].iov_len;
615*4882a593Smuzhiyun 	}
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	ret = svc_rdma_post_chunk_ctxt(&info->wi_cc);
618*4882a593Smuzhiyun 	if (ret < 0)
619*4882a593Smuzhiyun 		goto out_err;
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 	trace_svcrdma_send_reply_chunk(consumed);
622*4882a593Smuzhiyun 	return consumed;
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun out_err:
625*4882a593Smuzhiyun 	svc_rdma_write_info_free(info);
626*4882a593Smuzhiyun 	return ret;
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun 
svc_rdma_build_read_segment(struct svc_rdma_read_info * info,struct svc_rqst * rqstp,u32 rkey,u32 len,u64 offset)629*4882a593Smuzhiyun static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info,
630*4882a593Smuzhiyun 				       struct svc_rqst *rqstp,
631*4882a593Smuzhiyun 				       u32 rkey, u32 len, u64 offset)
632*4882a593Smuzhiyun {
633*4882a593Smuzhiyun 	struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
634*4882a593Smuzhiyun 	struct svc_rdma_chunk_ctxt *cc = &info->ri_cc;
635*4882a593Smuzhiyun 	struct svc_rdma_rw_ctxt *ctxt;
636*4882a593Smuzhiyun 	unsigned int sge_no, seg_len;
637*4882a593Smuzhiyun 	struct scatterlist *sg;
638*4882a593Smuzhiyun 	int ret;
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	sge_no = PAGE_ALIGN(info->ri_pageoff + len) >> PAGE_SHIFT;
641*4882a593Smuzhiyun 	ctxt = svc_rdma_get_rw_ctxt(cc->cc_rdma, sge_no);
642*4882a593Smuzhiyun 	if (!ctxt)
643*4882a593Smuzhiyun 		return -ENOMEM;
644*4882a593Smuzhiyun 	ctxt->rw_nents = sge_no;
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun 	sg = ctxt->rw_sg_table.sgl;
647*4882a593Smuzhiyun 	for (sge_no = 0; sge_no < ctxt->rw_nents; sge_no++) {
648*4882a593Smuzhiyun 		seg_len = min_t(unsigned int, len,
649*4882a593Smuzhiyun 				PAGE_SIZE - info->ri_pageoff);
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 		head->rc_arg.pages[info->ri_pageno] =
652*4882a593Smuzhiyun 			rqstp->rq_pages[info->ri_pageno];
653*4882a593Smuzhiyun 		if (!info->ri_pageoff)
654*4882a593Smuzhiyun 			head->rc_page_count++;
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 		sg_set_page(sg, rqstp->rq_pages[info->ri_pageno],
657*4882a593Smuzhiyun 			    seg_len, info->ri_pageoff);
658*4882a593Smuzhiyun 		sg = sg_next(sg);
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 		info->ri_pageoff += seg_len;
661*4882a593Smuzhiyun 		if (info->ri_pageoff == PAGE_SIZE) {
662*4882a593Smuzhiyun 			info->ri_pageno++;
663*4882a593Smuzhiyun 			info->ri_pageoff = 0;
664*4882a593Smuzhiyun 		}
665*4882a593Smuzhiyun 		len -= seg_len;
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 		/* Safety check */
668*4882a593Smuzhiyun 		if (len &&
669*4882a593Smuzhiyun 		    &rqstp->rq_pages[info->ri_pageno + 1] > rqstp->rq_page_end)
670*4882a593Smuzhiyun 			goto out_overrun;
671*4882a593Smuzhiyun 	}
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 	ret = svc_rdma_rw_ctx_init(cc->cc_rdma, ctxt, offset, rkey,
674*4882a593Smuzhiyun 				   DMA_FROM_DEVICE);
675*4882a593Smuzhiyun 	if (ret < 0)
676*4882a593Smuzhiyun 		return -EIO;
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 	list_add(&ctxt->rw_list, &cc->cc_rwctxts);
679*4882a593Smuzhiyun 	cc->cc_sqecount += ret;
680*4882a593Smuzhiyun 	return 0;
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun out_overrun:
683*4882a593Smuzhiyun 	trace_svcrdma_page_overrun_err(cc->cc_rdma, rqstp, info->ri_pageno);
684*4882a593Smuzhiyun 	return -EINVAL;
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun /* Walk the segments in the Read chunk starting at @p and construct
688*4882a593Smuzhiyun  * RDMA Read operations to pull the chunk to the server.
689*4882a593Smuzhiyun  */
svc_rdma_build_read_chunk(struct svc_rqst * rqstp,struct svc_rdma_read_info * info,__be32 * p)690*4882a593Smuzhiyun static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp,
691*4882a593Smuzhiyun 				     struct svc_rdma_read_info *info,
692*4882a593Smuzhiyun 				     __be32 *p)
693*4882a593Smuzhiyun {
694*4882a593Smuzhiyun 	int ret;
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	ret = -EINVAL;
697*4882a593Smuzhiyun 	info->ri_chunklen = 0;
698*4882a593Smuzhiyun 	while (*p++ != xdr_zero && be32_to_cpup(p++) == info->ri_position) {
699*4882a593Smuzhiyun 		u32 handle, length;
700*4882a593Smuzhiyun 		u64 offset;
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun 		p = xdr_decode_rdma_segment(p, &handle, &length, &offset);
703*4882a593Smuzhiyun 		ret = svc_rdma_build_read_segment(info, rqstp, handle, length,
704*4882a593Smuzhiyun 						  offset);
705*4882a593Smuzhiyun 		if (ret < 0)
706*4882a593Smuzhiyun 			break;
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun 		trace_svcrdma_send_rseg(handle, length, offset);
709*4882a593Smuzhiyun 		info->ri_chunklen += length;
710*4882a593Smuzhiyun 	}
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 	return ret;
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun /* Construct RDMA Reads to pull over a normal Read chunk. The chunk
716*4882a593Smuzhiyun  * data lands in the page list of head->rc_arg.pages.
717*4882a593Smuzhiyun  *
718*4882a593Smuzhiyun  * Currently NFSD does not look at the head->rc_arg.tail[0] iovec.
719*4882a593Smuzhiyun  * Therefore, XDR round-up of the Read chunk and trailing
720*4882a593Smuzhiyun  * inline content must both be added at the end of the pagelist.
721*4882a593Smuzhiyun  */
svc_rdma_build_normal_read_chunk(struct svc_rqst * rqstp,struct svc_rdma_read_info * info,__be32 * p)722*4882a593Smuzhiyun static int svc_rdma_build_normal_read_chunk(struct svc_rqst *rqstp,
723*4882a593Smuzhiyun 					    struct svc_rdma_read_info *info,
724*4882a593Smuzhiyun 					    __be32 *p)
725*4882a593Smuzhiyun {
726*4882a593Smuzhiyun 	struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
727*4882a593Smuzhiyun 	int ret;
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 	ret = svc_rdma_build_read_chunk(rqstp, info, p);
730*4882a593Smuzhiyun 	if (ret < 0)
731*4882a593Smuzhiyun 		goto out;
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 	trace_svcrdma_send_read_chunk(info->ri_chunklen, info->ri_position);
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun 	head->rc_hdr_count = 0;
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	/* Split the Receive buffer between the head and tail
738*4882a593Smuzhiyun 	 * buffers at Read chunk's position. XDR roundup of the
739*4882a593Smuzhiyun 	 * chunk is not included in either the pagelist or in
740*4882a593Smuzhiyun 	 * the tail.
741*4882a593Smuzhiyun 	 */
742*4882a593Smuzhiyun 	head->rc_arg.tail[0].iov_base =
743*4882a593Smuzhiyun 		head->rc_arg.head[0].iov_base + info->ri_position;
744*4882a593Smuzhiyun 	head->rc_arg.tail[0].iov_len =
745*4882a593Smuzhiyun 		head->rc_arg.head[0].iov_len - info->ri_position;
746*4882a593Smuzhiyun 	head->rc_arg.head[0].iov_len = info->ri_position;
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 	/* Read chunk may need XDR roundup (see RFC 8166, s. 3.4.5.2).
749*4882a593Smuzhiyun 	 *
750*4882a593Smuzhiyun 	 * If the client already rounded up the chunk length, the
751*4882a593Smuzhiyun 	 * length does not change. Otherwise, the length of the page
752*4882a593Smuzhiyun 	 * list is increased to include XDR round-up.
753*4882a593Smuzhiyun 	 *
754*4882a593Smuzhiyun 	 * Currently these chunks always start at page offset 0,
755*4882a593Smuzhiyun 	 * thus the rounded-up length never crosses a page boundary.
756*4882a593Smuzhiyun 	 */
757*4882a593Smuzhiyun 	info->ri_chunklen = XDR_QUADLEN(info->ri_chunklen) << 2;
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 	head->rc_arg.page_len = info->ri_chunklen;
760*4882a593Smuzhiyun 	head->rc_arg.len += info->ri_chunklen;
761*4882a593Smuzhiyun 	head->rc_arg.buflen += info->ri_chunklen;
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun out:
764*4882a593Smuzhiyun 	return ret;
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun /* Construct RDMA Reads to pull over a Position Zero Read chunk.
768*4882a593Smuzhiyun  * The start of the data lands in the first page just after
769*4882a593Smuzhiyun  * the Transport header, and the rest lands in the page list of
770*4882a593Smuzhiyun  * head->rc_arg.pages.
771*4882a593Smuzhiyun  *
772*4882a593Smuzhiyun  * Assumptions:
773*4882a593Smuzhiyun  *	- A PZRC has an XDR-aligned length (no implicit round-up).
774*4882a593Smuzhiyun  *	- There can be no trailing inline content (IOW, we assume
775*4882a593Smuzhiyun  *	  a PZRC is never sent in an RDMA_MSG message, though it's
776*4882a593Smuzhiyun  *	  allowed by spec).
777*4882a593Smuzhiyun  */
svc_rdma_build_pz_read_chunk(struct svc_rqst * rqstp,struct svc_rdma_read_info * info,__be32 * p)778*4882a593Smuzhiyun static int svc_rdma_build_pz_read_chunk(struct svc_rqst *rqstp,
779*4882a593Smuzhiyun 					struct svc_rdma_read_info *info,
780*4882a593Smuzhiyun 					__be32 *p)
781*4882a593Smuzhiyun {
782*4882a593Smuzhiyun 	struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
783*4882a593Smuzhiyun 	int ret;
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 	ret = svc_rdma_build_read_chunk(rqstp, info, p);
786*4882a593Smuzhiyun 	if (ret < 0)
787*4882a593Smuzhiyun 		goto out;
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	trace_svcrdma_send_pzr(info->ri_chunklen);
790*4882a593Smuzhiyun 
791*4882a593Smuzhiyun 	head->rc_arg.len += info->ri_chunklen;
792*4882a593Smuzhiyun 	head->rc_arg.buflen += info->ri_chunklen;
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 	head->rc_hdr_count = 1;
795*4882a593Smuzhiyun 	head->rc_arg.head[0].iov_base = page_address(head->rc_pages[0]);
796*4882a593Smuzhiyun 	head->rc_arg.head[0].iov_len = min_t(size_t, PAGE_SIZE,
797*4882a593Smuzhiyun 					     info->ri_chunklen);
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun 	head->rc_arg.page_len = info->ri_chunklen -
800*4882a593Smuzhiyun 				head->rc_arg.head[0].iov_len;
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun out:
803*4882a593Smuzhiyun 	return ret;
804*4882a593Smuzhiyun }
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun /* Pages under I/O have been copied to head->rc_pages. Ensure they
807*4882a593Smuzhiyun  * are not released by svc_xprt_release() until the I/O is complete.
808*4882a593Smuzhiyun  *
809*4882a593Smuzhiyun  * This has to be done after all Read WRs are constructed to properly
810*4882a593Smuzhiyun  * handle a page that is part of I/O on behalf of two different RDMA
811*4882a593Smuzhiyun  * segments.
812*4882a593Smuzhiyun  *
813*4882a593Smuzhiyun  * Do this only if I/O has been posted. Otherwise, we do indeed want
814*4882a593Smuzhiyun  * svc_xprt_release() to clean things up properly.
815*4882a593Smuzhiyun  */
svc_rdma_save_io_pages(struct svc_rqst * rqstp,const unsigned int start,const unsigned int num_pages)816*4882a593Smuzhiyun static void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
817*4882a593Smuzhiyun 				   const unsigned int start,
818*4882a593Smuzhiyun 				   const unsigned int num_pages)
819*4882a593Smuzhiyun {
820*4882a593Smuzhiyun 	unsigned int i;
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 	for (i = start; i < num_pages + start; i++)
823*4882a593Smuzhiyun 		rqstp->rq_pages[i] = NULL;
824*4882a593Smuzhiyun }
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun /**
827*4882a593Smuzhiyun  * svc_rdma_recv_read_chunk - Pull a Read chunk from the client
828*4882a593Smuzhiyun  * @rdma: controlling RDMA transport
829*4882a593Smuzhiyun  * @rqstp: set of pages to use as Read sink buffers
830*4882a593Smuzhiyun  * @head: pages under I/O collect here
831*4882a593Smuzhiyun  * @p: pointer to start of Read chunk
832*4882a593Smuzhiyun  *
833*4882a593Smuzhiyun  * Returns:
834*4882a593Smuzhiyun  *	%0 if all needed RDMA Reads were posted successfully,
835*4882a593Smuzhiyun  *	%-EINVAL if client provided too many segments,
836*4882a593Smuzhiyun  *	%-ENOMEM if rdma_rw context pool was exhausted,
837*4882a593Smuzhiyun  *	%-ENOTCONN if posting failed (connection is lost),
838*4882a593Smuzhiyun  *	%-EIO if rdma_rw initialization failed (DMA mapping, etc).
839*4882a593Smuzhiyun  *
840*4882a593Smuzhiyun  * Assumptions:
841*4882a593Smuzhiyun  * - All Read segments in @p have the same Position value.
842*4882a593Smuzhiyun  */
svc_rdma_recv_read_chunk(struct svcxprt_rdma * rdma,struct svc_rqst * rqstp,struct svc_rdma_recv_ctxt * head,__be32 * p)843*4882a593Smuzhiyun int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp,
844*4882a593Smuzhiyun 			     struct svc_rdma_recv_ctxt *head, __be32 *p)
845*4882a593Smuzhiyun {
846*4882a593Smuzhiyun 	struct svc_rdma_read_info *info;
847*4882a593Smuzhiyun 	int ret;
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun 	/* The request (with page list) is constructed in
850*4882a593Smuzhiyun 	 * head->rc_arg. Pages involved with RDMA Read I/O are
851*4882a593Smuzhiyun 	 * transferred there.
852*4882a593Smuzhiyun 	 */
853*4882a593Smuzhiyun 	head->rc_arg.head[0] = rqstp->rq_arg.head[0];
854*4882a593Smuzhiyun 	head->rc_arg.tail[0] = rqstp->rq_arg.tail[0];
855*4882a593Smuzhiyun 	head->rc_arg.pages = head->rc_pages;
856*4882a593Smuzhiyun 	head->rc_arg.page_base = 0;
857*4882a593Smuzhiyun 	head->rc_arg.page_len = 0;
858*4882a593Smuzhiyun 	head->rc_arg.len = rqstp->rq_arg.len;
859*4882a593Smuzhiyun 	head->rc_arg.buflen = rqstp->rq_arg.buflen;
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 	info = svc_rdma_read_info_alloc(rdma);
862*4882a593Smuzhiyun 	if (!info)
863*4882a593Smuzhiyun 		return -ENOMEM;
864*4882a593Smuzhiyun 	info->ri_readctxt = head;
865*4882a593Smuzhiyun 	info->ri_pageno = 0;
866*4882a593Smuzhiyun 	info->ri_pageoff = 0;
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun 	info->ri_position = be32_to_cpup(p + 1);
869*4882a593Smuzhiyun 	if (info->ri_position)
870*4882a593Smuzhiyun 		ret = svc_rdma_build_normal_read_chunk(rqstp, info, p);
871*4882a593Smuzhiyun 	else
872*4882a593Smuzhiyun 		ret = svc_rdma_build_pz_read_chunk(rqstp, info, p);
873*4882a593Smuzhiyun 	if (ret < 0)
874*4882a593Smuzhiyun 		goto out_err;
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun 	ret = svc_rdma_post_chunk_ctxt(&info->ri_cc);
877*4882a593Smuzhiyun 	if (ret < 0)
878*4882a593Smuzhiyun 		goto out_err;
879*4882a593Smuzhiyun 	svc_rdma_save_io_pages(rqstp, 0, head->rc_page_count);
880*4882a593Smuzhiyun 	return 0;
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun out_err:
883*4882a593Smuzhiyun 	svc_rdma_read_info_free(info);
884*4882a593Smuzhiyun 	return ret;
885*4882a593Smuzhiyun }
886