1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2014-2017 Oracle. All rights reserved.
4*4882a593Smuzhiyun * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * This software is available to you under a choice of one of two
7*4882a593Smuzhiyun * licenses. You may choose to be licensed under the terms of the GNU
8*4882a593Smuzhiyun * General Public License (GPL) Version 2, available from the file
9*4882a593Smuzhiyun * COPYING in the main directory of this source tree, or the BSD-type
10*4882a593Smuzhiyun * license below:
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or without
13*4882a593Smuzhiyun * modification, are permitted provided that the following conditions
14*4882a593Smuzhiyun * are met:
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * Redistributions of source code must retain the above copyright
17*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer.
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * Redistributions in binary form must reproduce the above
20*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
21*4882a593Smuzhiyun * disclaimer in the documentation and/or other materials provided
22*4882a593Smuzhiyun * with the distribution.
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * Neither the name of the Network Appliance, Inc. nor the names of
25*4882a593Smuzhiyun * its contributors may be used to endorse or promote products
26*4882a593Smuzhiyun * derived from this software without specific prior written
27*4882a593Smuzhiyun * permission.
28*4882a593Smuzhiyun *
29*4882a593Smuzhiyun * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30*4882a593Smuzhiyun * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31*4882a593Smuzhiyun * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32*4882a593Smuzhiyun * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33*4882a593Smuzhiyun * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34*4882a593Smuzhiyun * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35*4882a593Smuzhiyun * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36*4882a593Smuzhiyun * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37*4882a593Smuzhiyun * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38*4882a593Smuzhiyun * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39*4882a593Smuzhiyun * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40*4882a593Smuzhiyun */
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /*
43*4882a593Smuzhiyun * verbs.c
44*4882a593Smuzhiyun *
45*4882a593Smuzhiyun * Encapsulates the major functions managing:
46*4882a593Smuzhiyun * o adapters
47*4882a593Smuzhiyun * o endpoints
48*4882a593Smuzhiyun * o connections
49*4882a593Smuzhiyun * o buffer memory
50*4882a593Smuzhiyun */
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun #include <linux/interrupt.h>
53*4882a593Smuzhiyun #include <linux/slab.h>
54*4882a593Smuzhiyun #include <linux/sunrpc/addr.h>
55*4882a593Smuzhiyun #include <linux/sunrpc/svc_rdma.h>
56*4882a593Smuzhiyun #include <linux/log2.h>
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun #include <asm-generic/barrier.h>
59*4882a593Smuzhiyun #include <asm/bitops.h>
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun #include <rdma/ib_cm.h>
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun #include "xprt_rdma.h"
64*4882a593Smuzhiyun #include <trace/events/rpcrdma.h>
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /*
67*4882a593Smuzhiyun * Globals/Macros
68*4882a593Smuzhiyun */
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
71*4882a593Smuzhiyun # define RPCDBG_FACILITY RPCDBG_TRANS
72*4882a593Smuzhiyun #endif
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /*
75*4882a593Smuzhiyun * internal functions
76*4882a593Smuzhiyun */
77*4882a593Smuzhiyun static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt);
78*4882a593Smuzhiyun static void rpcrdma_sendctxs_destroy(struct rpcrdma_xprt *r_xprt);
79*4882a593Smuzhiyun static void rpcrdma_sendctx_put_locked(struct rpcrdma_xprt *r_xprt,
80*4882a593Smuzhiyun struct rpcrdma_sendctx *sc);
81*4882a593Smuzhiyun static int rpcrdma_reqs_setup(struct rpcrdma_xprt *r_xprt);
82*4882a593Smuzhiyun static void rpcrdma_reqs_reset(struct rpcrdma_xprt *r_xprt);
83*4882a593Smuzhiyun static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep);
84*4882a593Smuzhiyun static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt);
85*4882a593Smuzhiyun static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt);
86*4882a593Smuzhiyun static void rpcrdma_mrs_destroy(struct rpcrdma_xprt *r_xprt);
87*4882a593Smuzhiyun static void rpcrdma_ep_get(struct rpcrdma_ep *ep);
88*4882a593Smuzhiyun static int rpcrdma_ep_put(struct rpcrdma_ep *ep);
89*4882a593Smuzhiyun static struct rpcrdma_regbuf *
90*4882a593Smuzhiyun rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction,
91*4882a593Smuzhiyun gfp_t flags);
92*4882a593Smuzhiyun static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb);
93*4882a593Smuzhiyun static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb);
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun /* Wait for outstanding transport work to finish. ib_drain_qp
96*4882a593Smuzhiyun * handles the drains in the wrong order for us, so open code
97*4882a593Smuzhiyun * them here.
98*4882a593Smuzhiyun */
rpcrdma_xprt_drain(struct rpcrdma_xprt * r_xprt)99*4882a593Smuzhiyun static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun struct rpcrdma_ep *ep = r_xprt->rx_ep;
102*4882a593Smuzhiyun struct rdma_cm_id *id = ep->re_id;
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun /* Flush Receives, then wait for deferred Reply work
105*4882a593Smuzhiyun * to complete.
106*4882a593Smuzhiyun */
107*4882a593Smuzhiyun ib_drain_rq(id->qp);
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /* Deferred Reply processing might have scheduled
110*4882a593Smuzhiyun * local invalidations.
111*4882a593Smuzhiyun */
112*4882a593Smuzhiyun ib_drain_sq(id->qp);
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun rpcrdma_ep_put(ep);
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /**
118*4882a593Smuzhiyun * rpcrdma_qp_event_handler - Handle one QP event (error notification)
119*4882a593Smuzhiyun * @event: details of the event
120*4882a593Smuzhiyun * @context: ep that owns QP where event occurred
121*4882a593Smuzhiyun *
122*4882a593Smuzhiyun * Called from the RDMA provider (device driver) possibly in an interrupt
123*4882a593Smuzhiyun * context. The QP is always destroyed before the ID, so the ID will be
124*4882a593Smuzhiyun * reliably available when this handler is invoked.
125*4882a593Smuzhiyun */
rpcrdma_qp_event_handler(struct ib_event * event,void * context)126*4882a593Smuzhiyun static void rpcrdma_qp_event_handler(struct ib_event *event, void *context)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun struct rpcrdma_ep *ep = context;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun trace_xprtrdma_qp_event(ep, event);
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun /* Ensure xprt_force_disconnect() is invoked exactly once when a
134*4882a593Smuzhiyun * connection is closed or lost. (The important thing is it needs
135*4882a593Smuzhiyun * to be invoked "at least" once).
136*4882a593Smuzhiyun */
rpcrdma_force_disconnect(struct rpcrdma_ep * ep)137*4882a593Smuzhiyun static void rpcrdma_force_disconnect(struct rpcrdma_ep *ep)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun if (atomic_add_unless(&ep->re_force_disconnect, 1, 1))
140*4882a593Smuzhiyun xprt_force_disconnect(ep->re_xprt);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun /**
144*4882a593Smuzhiyun * rpcrdma_flush_disconnect - Disconnect on flushed completion
145*4882a593Smuzhiyun * @r_xprt: transport to disconnect
146*4882a593Smuzhiyun * @wc: work completion entry
147*4882a593Smuzhiyun *
148*4882a593Smuzhiyun * Must be called in process context.
149*4882a593Smuzhiyun */
rpcrdma_flush_disconnect(struct rpcrdma_xprt * r_xprt,struct ib_wc * wc)150*4882a593Smuzhiyun void rpcrdma_flush_disconnect(struct rpcrdma_xprt *r_xprt, struct ib_wc *wc)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun if (wc->status != IB_WC_SUCCESS)
153*4882a593Smuzhiyun rpcrdma_force_disconnect(r_xprt->rx_ep);
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun /**
157*4882a593Smuzhiyun * rpcrdma_wc_send - Invoked by RDMA provider for each polled Send WC
158*4882a593Smuzhiyun * @cq: completion queue
159*4882a593Smuzhiyun * @wc: WCE for a completed Send WR
160*4882a593Smuzhiyun *
161*4882a593Smuzhiyun */
rpcrdma_wc_send(struct ib_cq * cq,struct ib_wc * wc)162*4882a593Smuzhiyun static void rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun struct ib_cqe *cqe = wc->wr_cqe;
165*4882a593Smuzhiyun struct rpcrdma_sendctx *sc =
166*4882a593Smuzhiyun container_of(cqe, struct rpcrdma_sendctx, sc_cqe);
167*4882a593Smuzhiyun struct rpcrdma_xprt *r_xprt = cq->cq_context;
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun /* WARNING: Only wr_cqe and status are reliable at this point */
170*4882a593Smuzhiyun trace_xprtrdma_wc_send(sc, wc);
171*4882a593Smuzhiyun rpcrdma_sendctx_put_locked(r_xprt, sc);
172*4882a593Smuzhiyun rpcrdma_flush_disconnect(r_xprt, wc);
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /**
176*4882a593Smuzhiyun * rpcrdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
177*4882a593Smuzhiyun * @cq: completion queue
178*4882a593Smuzhiyun * @wc: WCE for a completed Receive WR
179*4882a593Smuzhiyun *
180*4882a593Smuzhiyun */
rpcrdma_wc_receive(struct ib_cq * cq,struct ib_wc * wc)181*4882a593Smuzhiyun static void rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun struct ib_cqe *cqe = wc->wr_cqe;
184*4882a593Smuzhiyun struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep,
185*4882a593Smuzhiyun rr_cqe);
186*4882a593Smuzhiyun struct rpcrdma_xprt *r_xprt = cq->cq_context;
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun /* WARNING: Only wr_cqe and status are reliable at this point */
189*4882a593Smuzhiyun trace_xprtrdma_wc_receive(wc);
190*4882a593Smuzhiyun --r_xprt->rx_ep->re_receive_count;
191*4882a593Smuzhiyun if (wc->status != IB_WC_SUCCESS)
192*4882a593Smuzhiyun goto out_flushed;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun /* status == SUCCESS means all fields in wc are trustworthy */
195*4882a593Smuzhiyun rpcrdma_set_xdrlen(&rep->rr_hdrbuf, wc->byte_len);
196*4882a593Smuzhiyun rep->rr_wc_flags = wc->wc_flags;
197*4882a593Smuzhiyun rep->rr_inv_rkey = wc->ex.invalidate_rkey;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun ib_dma_sync_single_for_cpu(rdmab_device(rep->rr_rdmabuf),
200*4882a593Smuzhiyun rdmab_addr(rep->rr_rdmabuf),
201*4882a593Smuzhiyun wc->byte_len, DMA_FROM_DEVICE);
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun rpcrdma_reply_handler(rep);
204*4882a593Smuzhiyun return;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun out_flushed:
207*4882a593Smuzhiyun rpcrdma_flush_disconnect(r_xprt, wc);
208*4882a593Smuzhiyun rpcrdma_rep_destroy(rep);
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
rpcrdma_update_cm_private(struct rpcrdma_ep * ep,struct rdma_conn_param * param)211*4882a593Smuzhiyun static void rpcrdma_update_cm_private(struct rpcrdma_ep *ep,
212*4882a593Smuzhiyun struct rdma_conn_param *param)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun const struct rpcrdma_connect_private *pmsg = param->private_data;
215*4882a593Smuzhiyun unsigned int rsize, wsize;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun /* Default settings for RPC-over-RDMA Version One */
218*4882a593Smuzhiyun ep->re_implicit_roundup = xprt_rdma_pad_optimize;
219*4882a593Smuzhiyun rsize = RPCRDMA_V1_DEF_INLINE_SIZE;
220*4882a593Smuzhiyun wsize = RPCRDMA_V1_DEF_INLINE_SIZE;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun if (pmsg &&
223*4882a593Smuzhiyun pmsg->cp_magic == rpcrdma_cmp_magic &&
224*4882a593Smuzhiyun pmsg->cp_version == RPCRDMA_CMP_VERSION) {
225*4882a593Smuzhiyun ep->re_implicit_roundup = true;
226*4882a593Smuzhiyun rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size);
227*4882a593Smuzhiyun wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size);
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun if (rsize < ep->re_inline_recv)
231*4882a593Smuzhiyun ep->re_inline_recv = rsize;
232*4882a593Smuzhiyun if (wsize < ep->re_inline_send)
233*4882a593Smuzhiyun ep->re_inline_send = wsize;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun rpcrdma_set_max_header_sizes(ep);
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun /**
239*4882a593Smuzhiyun * rpcrdma_cm_event_handler - Handle RDMA CM events
240*4882a593Smuzhiyun * @id: rdma_cm_id on which an event has occurred
241*4882a593Smuzhiyun * @event: details of the event
242*4882a593Smuzhiyun *
243*4882a593Smuzhiyun * Called with @id's mutex held. Returns 1 if caller should
244*4882a593Smuzhiyun * destroy @id, otherwise 0.
245*4882a593Smuzhiyun */
246*4882a593Smuzhiyun static int
rpcrdma_cm_event_handler(struct rdma_cm_id * id,struct rdma_cm_event * event)247*4882a593Smuzhiyun rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun struct sockaddr *sap = (struct sockaddr *)&id->route.addr.dst_addr;
250*4882a593Smuzhiyun struct rpcrdma_ep *ep = id->context;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun might_sleep();
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun switch (event->event) {
255*4882a593Smuzhiyun case RDMA_CM_EVENT_ADDR_RESOLVED:
256*4882a593Smuzhiyun case RDMA_CM_EVENT_ROUTE_RESOLVED:
257*4882a593Smuzhiyun ep->re_async_rc = 0;
258*4882a593Smuzhiyun complete(&ep->re_done);
259*4882a593Smuzhiyun return 0;
260*4882a593Smuzhiyun case RDMA_CM_EVENT_ADDR_ERROR:
261*4882a593Smuzhiyun ep->re_async_rc = -EPROTO;
262*4882a593Smuzhiyun complete(&ep->re_done);
263*4882a593Smuzhiyun return 0;
264*4882a593Smuzhiyun case RDMA_CM_EVENT_ROUTE_ERROR:
265*4882a593Smuzhiyun ep->re_async_rc = -ENETUNREACH;
266*4882a593Smuzhiyun complete(&ep->re_done);
267*4882a593Smuzhiyun return 0;
268*4882a593Smuzhiyun case RDMA_CM_EVENT_DEVICE_REMOVAL:
269*4882a593Smuzhiyun pr_info("rpcrdma: removing device %s for %pISpc\n",
270*4882a593Smuzhiyun ep->re_id->device->name, sap);
271*4882a593Smuzhiyun fallthrough;
272*4882a593Smuzhiyun case RDMA_CM_EVENT_ADDR_CHANGE:
273*4882a593Smuzhiyun ep->re_connect_status = -ENODEV;
274*4882a593Smuzhiyun goto disconnected;
275*4882a593Smuzhiyun case RDMA_CM_EVENT_ESTABLISHED:
276*4882a593Smuzhiyun rpcrdma_ep_get(ep);
277*4882a593Smuzhiyun ep->re_connect_status = 1;
278*4882a593Smuzhiyun rpcrdma_update_cm_private(ep, &event->param.conn);
279*4882a593Smuzhiyun trace_xprtrdma_inline_thresh(ep);
280*4882a593Smuzhiyun wake_up_all(&ep->re_connect_wait);
281*4882a593Smuzhiyun break;
282*4882a593Smuzhiyun case RDMA_CM_EVENT_CONNECT_ERROR:
283*4882a593Smuzhiyun ep->re_connect_status = -ENOTCONN;
284*4882a593Smuzhiyun goto wake_connect_worker;
285*4882a593Smuzhiyun case RDMA_CM_EVENT_UNREACHABLE:
286*4882a593Smuzhiyun ep->re_connect_status = -ENETUNREACH;
287*4882a593Smuzhiyun goto wake_connect_worker;
288*4882a593Smuzhiyun case RDMA_CM_EVENT_REJECTED:
289*4882a593Smuzhiyun dprintk("rpcrdma: connection to %pISpc rejected: %s\n",
290*4882a593Smuzhiyun sap, rdma_reject_msg(id, event->status));
291*4882a593Smuzhiyun ep->re_connect_status = -ECONNREFUSED;
292*4882a593Smuzhiyun if (event->status == IB_CM_REJ_STALE_CONN)
293*4882a593Smuzhiyun ep->re_connect_status = -ENOTCONN;
294*4882a593Smuzhiyun wake_connect_worker:
295*4882a593Smuzhiyun wake_up_all(&ep->re_connect_wait);
296*4882a593Smuzhiyun return 0;
297*4882a593Smuzhiyun case RDMA_CM_EVENT_DISCONNECTED:
298*4882a593Smuzhiyun ep->re_connect_status = -ECONNABORTED;
299*4882a593Smuzhiyun disconnected:
300*4882a593Smuzhiyun rpcrdma_force_disconnect(ep);
301*4882a593Smuzhiyun return rpcrdma_ep_put(ep);
302*4882a593Smuzhiyun default:
303*4882a593Smuzhiyun break;
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun dprintk("RPC: %s: %pISpc on %s/frwr: %s\n", __func__, sap,
307*4882a593Smuzhiyun ep->re_id->device->name, rdma_event_msg(event->event));
308*4882a593Smuzhiyun return 0;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
rpcrdma_create_id(struct rpcrdma_xprt * r_xprt,struct rpcrdma_ep * ep)311*4882a593Smuzhiyun static struct rdma_cm_id *rpcrdma_create_id(struct rpcrdma_xprt *r_xprt,
312*4882a593Smuzhiyun struct rpcrdma_ep *ep)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun unsigned long wtimeout = msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1;
315*4882a593Smuzhiyun struct rpc_xprt *xprt = &r_xprt->rx_xprt;
316*4882a593Smuzhiyun struct rdma_cm_id *id;
317*4882a593Smuzhiyun int rc;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun init_completion(&ep->re_done);
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun id = rdma_create_id(xprt->xprt_net, rpcrdma_cm_event_handler, ep,
322*4882a593Smuzhiyun RDMA_PS_TCP, IB_QPT_RC);
323*4882a593Smuzhiyun if (IS_ERR(id))
324*4882a593Smuzhiyun return id;
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun ep->re_async_rc = -ETIMEDOUT;
327*4882a593Smuzhiyun rc = rdma_resolve_addr(id, NULL, (struct sockaddr *)&xprt->addr,
328*4882a593Smuzhiyun RDMA_RESOLVE_TIMEOUT);
329*4882a593Smuzhiyun if (rc)
330*4882a593Smuzhiyun goto out;
331*4882a593Smuzhiyun rc = wait_for_completion_interruptible_timeout(&ep->re_done, wtimeout);
332*4882a593Smuzhiyun if (rc < 0)
333*4882a593Smuzhiyun goto out;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun rc = ep->re_async_rc;
336*4882a593Smuzhiyun if (rc)
337*4882a593Smuzhiyun goto out;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun ep->re_async_rc = -ETIMEDOUT;
340*4882a593Smuzhiyun rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
341*4882a593Smuzhiyun if (rc)
342*4882a593Smuzhiyun goto out;
343*4882a593Smuzhiyun rc = wait_for_completion_interruptible_timeout(&ep->re_done, wtimeout);
344*4882a593Smuzhiyun if (rc < 0)
345*4882a593Smuzhiyun goto out;
346*4882a593Smuzhiyun rc = ep->re_async_rc;
347*4882a593Smuzhiyun if (rc)
348*4882a593Smuzhiyun goto out;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun return id;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun out:
353*4882a593Smuzhiyun rdma_destroy_id(id);
354*4882a593Smuzhiyun return ERR_PTR(rc);
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
rpcrdma_ep_destroy(struct kref * kref)357*4882a593Smuzhiyun static void rpcrdma_ep_destroy(struct kref *kref)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun struct rpcrdma_ep *ep = container_of(kref, struct rpcrdma_ep, re_kref);
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun if (ep->re_id->qp) {
362*4882a593Smuzhiyun rdma_destroy_qp(ep->re_id);
363*4882a593Smuzhiyun ep->re_id->qp = NULL;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun if (ep->re_attr.recv_cq)
367*4882a593Smuzhiyun ib_free_cq(ep->re_attr.recv_cq);
368*4882a593Smuzhiyun ep->re_attr.recv_cq = NULL;
369*4882a593Smuzhiyun if (ep->re_attr.send_cq)
370*4882a593Smuzhiyun ib_free_cq(ep->re_attr.send_cq);
371*4882a593Smuzhiyun ep->re_attr.send_cq = NULL;
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun if (ep->re_pd)
374*4882a593Smuzhiyun ib_dealloc_pd(ep->re_pd);
375*4882a593Smuzhiyun ep->re_pd = NULL;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun kfree(ep);
378*4882a593Smuzhiyun module_put(THIS_MODULE);
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun
rpcrdma_ep_get(struct rpcrdma_ep * ep)381*4882a593Smuzhiyun static noinline void rpcrdma_ep_get(struct rpcrdma_ep *ep)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun kref_get(&ep->re_kref);
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun /* Returns:
387*4882a593Smuzhiyun * %0 if @ep still has a positive kref count, or
388*4882a593Smuzhiyun * %1 if @ep was destroyed successfully.
389*4882a593Smuzhiyun */
rpcrdma_ep_put(struct rpcrdma_ep * ep)390*4882a593Smuzhiyun static noinline int rpcrdma_ep_put(struct rpcrdma_ep *ep)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun return kref_put(&ep->re_kref, rpcrdma_ep_destroy);
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
rpcrdma_ep_create(struct rpcrdma_xprt * r_xprt)395*4882a593Smuzhiyun static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun struct rpcrdma_connect_private *pmsg;
398*4882a593Smuzhiyun struct ib_device *device;
399*4882a593Smuzhiyun struct rdma_cm_id *id;
400*4882a593Smuzhiyun struct rpcrdma_ep *ep;
401*4882a593Smuzhiyun int rc;
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun ep = kzalloc(sizeof(*ep), GFP_NOFS);
404*4882a593Smuzhiyun if (!ep)
405*4882a593Smuzhiyun return -ENOTCONN;
406*4882a593Smuzhiyun ep->re_xprt = &r_xprt->rx_xprt;
407*4882a593Smuzhiyun kref_init(&ep->re_kref);
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun id = rpcrdma_create_id(r_xprt, ep);
410*4882a593Smuzhiyun if (IS_ERR(id)) {
411*4882a593Smuzhiyun kfree(ep);
412*4882a593Smuzhiyun return PTR_ERR(id);
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun __module_get(THIS_MODULE);
415*4882a593Smuzhiyun device = id->device;
416*4882a593Smuzhiyun ep->re_id = id;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun ep->re_max_requests = r_xprt->rx_xprt.max_reqs;
419*4882a593Smuzhiyun ep->re_inline_send = xprt_rdma_max_inline_write;
420*4882a593Smuzhiyun ep->re_inline_recv = xprt_rdma_max_inline_read;
421*4882a593Smuzhiyun rc = frwr_query_device(ep, device);
422*4882a593Smuzhiyun if (rc)
423*4882a593Smuzhiyun goto out_destroy;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun r_xprt->rx_buf.rb_max_requests = cpu_to_be32(ep->re_max_requests);
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun ep->re_attr.event_handler = rpcrdma_qp_event_handler;
428*4882a593Smuzhiyun ep->re_attr.qp_context = ep;
429*4882a593Smuzhiyun ep->re_attr.srq = NULL;
430*4882a593Smuzhiyun ep->re_attr.cap.max_inline_data = 0;
431*4882a593Smuzhiyun ep->re_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
432*4882a593Smuzhiyun ep->re_attr.qp_type = IB_QPT_RC;
433*4882a593Smuzhiyun ep->re_attr.port_num = ~0;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun dprintk("RPC: %s: requested max: dtos: send %d recv %d; "
436*4882a593Smuzhiyun "iovs: send %d recv %d\n",
437*4882a593Smuzhiyun __func__,
438*4882a593Smuzhiyun ep->re_attr.cap.max_send_wr,
439*4882a593Smuzhiyun ep->re_attr.cap.max_recv_wr,
440*4882a593Smuzhiyun ep->re_attr.cap.max_send_sge,
441*4882a593Smuzhiyun ep->re_attr.cap.max_recv_sge);
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun ep->re_send_batch = ep->re_max_requests >> 3;
444*4882a593Smuzhiyun ep->re_send_count = ep->re_send_batch;
445*4882a593Smuzhiyun init_waitqueue_head(&ep->re_connect_wait);
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun ep->re_attr.send_cq = ib_alloc_cq_any(device, r_xprt,
448*4882a593Smuzhiyun ep->re_attr.cap.max_send_wr,
449*4882a593Smuzhiyun IB_POLL_WORKQUEUE);
450*4882a593Smuzhiyun if (IS_ERR(ep->re_attr.send_cq)) {
451*4882a593Smuzhiyun rc = PTR_ERR(ep->re_attr.send_cq);
452*4882a593Smuzhiyun ep->re_attr.send_cq = NULL;
453*4882a593Smuzhiyun goto out_destroy;
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun ep->re_attr.recv_cq = ib_alloc_cq_any(device, r_xprt,
457*4882a593Smuzhiyun ep->re_attr.cap.max_recv_wr,
458*4882a593Smuzhiyun IB_POLL_WORKQUEUE);
459*4882a593Smuzhiyun if (IS_ERR(ep->re_attr.recv_cq)) {
460*4882a593Smuzhiyun rc = PTR_ERR(ep->re_attr.recv_cq);
461*4882a593Smuzhiyun ep->re_attr.recv_cq = NULL;
462*4882a593Smuzhiyun goto out_destroy;
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun ep->re_receive_count = 0;
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun /* Initialize cma parameters */
467*4882a593Smuzhiyun memset(&ep->re_remote_cma, 0, sizeof(ep->re_remote_cma));
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun /* Prepare RDMA-CM private message */
470*4882a593Smuzhiyun pmsg = &ep->re_cm_private;
471*4882a593Smuzhiyun pmsg->cp_magic = rpcrdma_cmp_magic;
472*4882a593Smuzhiyun pmsg->cp_version = RPCRDMA_CMP_VERSION;
473*4882a593Smuzhiyun pmsg->cp_flags |= RPCRDMA_CMP_F_SND_W_INV_OK;
474*4882a593Smuzhiyun pmsg->cp_send_size = rpcrdma_encode_buffer_size(ep->re_inline_send);
475*4882a593Smuzhiyun pmsg->cp_recv_size = rpcrdma_encode_buffer_size(ep->re_inline_recv);
476*4882a593Smuzhiyun ep->re_remote_cma.private_data = pmsg;
477*4882a593Smuzhiyun ep->re_remote_cma.private_data_len = sizeof(*pmsg);
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun /* Client offers RDMA Read but does not initiate */
480*4882a593Smuzhiyun ep->re_remote_cma.initiator_depth = 0;
481*4882a593Smuzhiyun ep->re_remote_cma.responder_resources =
482*4882a593Smuzhiyun min_t(int, U8_MAX, device->attrs.max_qp_rd_atom);
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun /* Limit transport retries so client can detect server
485*4882a593Smuzhiyun * GID changes quickly. RPC layer handles re-establishing
486*4882a593Smuzhiyun * transport connection and retransmission.
487*4882a593Smuzhiyun */
488*4882a593Smuzhiyun ep->re_remote_cma.retry_count = 6;
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun /* RPC-over-RDMA handles its own flow control. In addition,
491*4882a593Smuzhiyun * make all RNR NAKs visible so we know that RPC-over-RDMA
492*4882a593Smuzhiyun * flow control is working correctly (no NAKs should be seen).
493*4882a593Smuzhiyun */
494*4882a593Smuzhiyun ep->re_remote_cma.flow_control = 0;
495*4882a593Smuzhiyun ep->re_remote_cma.rnr_retry_count = 0;
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun ep->re_pd = ib_alloc_pd(device, 0);
498*4882a593Smuzhiyun if (IS_ERR(ep->re_pd)) {
499*4882a593Smuzhiyun rc = PTR_ERR(ep->re_pd);
500*4882a593Smuzhiyun ep->re_pd = NULL;
501*4882a593Smuzhiyun goto out_destroy;
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun rc = rdma_create_qp(id, ep->re_pd, &ep->re_attr);
505*4882a593Smuzhiyun if (rc)
506*4882a593Smuzhiyun goto out_destroy;
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun r_xprt->rx_ep = ep;
509*4882a593Smuzhiyun return 0;
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun out_destroy:
512*4882a593Smuzhiyun rpcrdma_ep_put(ep);
513*4882a593Smuzhiyun rdma_destroy_id(id);
514*4882a593Smuzhiyun return rc;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun /**
518*4882a593Smuzhiyun * rpcrdma_xprt_connect - Connect an unconnected transport
519*4882a593Smuzhiyun * @r_xprt: controlling transport instance
520*4882a593Smuzhiyun *
521*4882a593Smuzhiyun * Returns 0 on success or a negative errno.
522*4882a593Smuzhiyun */
rpcrdma_xprt_connect(struct rpcrdma_xprt * r_xprt)523*4882a593Smuzhiyun int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun struct rpc_xprt *xprt = &r_xprt->rx_xprt;
526*4882a593Smuzhiyun struct rpcrdma_ep *ep;
527*4882a593Smuzhiyun int rc;
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun rc = rpcrdma_ep_create(r_xprt);
530*4882a593Smuzhiyun if (rc)
531*4882a593Smuzhiyun return rc;
532*4882a593Smuzhiyun ep = r_xprt->rx_ep;
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun xprt_clear_connected(xprt);
535*4882a593Smuzhiyun rpcrdma_reset_cwnd(r_xprt);
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun /* Bump the ep's reference count while there are
538*4882a593Smuzhiyun * outstanding Receives.
539*4882a593Smuzhiyun */
540*4882a593Smuzhiyun rpcrdma_ep_get(ep);
541*4882a593Smuzhiyun rpcrdma_post_recvs(r_xprt, 1, true);
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun rc = rdma_connect(ep->re_id, &ep->re_remote_cma);
544*4882a593Smuzhiyun if (rc)
545*4882a593Smuzhiyun goto out;
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun if (xprt->reestablish_timeout < RPCRDMA_INIT_REEST_TO)
548*4882a593Smuzhiyun xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO;
549*4882a593Smuzhiyun wait_event_interruptible(ep->re_connect_wait,
550*4882a593Smuzhiyun ep->re_connect_status != 0);
551*4882a593Smuzhiyun if (ep->re_connect_status <= 0) {
552*4882a593Smuzhiyun rc = ep->re_connect_status;
553*4882a593Smuzhiyun goto out;
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun rc = rpcrdma_sendctxs_create(r_xprt);
557*4882a593Smuzhiyun if (rc) {
558*4882a593Smuzhiyun rc = -ENOTCONN;
559*4882a593Smuzhiyun goto out;
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun rc = rpcrdma_reqs_setup(r_xprt);
563*4882a593Smuzhiyun if (rc) {
564*4882a593Smuzhiyun rc = -ENOTCONN;
565*4882a593Smuzhiyun goto out;
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun rpcrdma_mrs_create(r_xprt);
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun out:
570*4882a593Smuzhiyun trace_xprtrdma_connect(r_xprt, rc);
571*4882a593Smuzhiyun return rc;
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun /**
575*4882a593Smuzhiyun * rpcrdma_xprt_disconnect - Disconnect underlying transport
576*4882a593Smuzhiyun * @r_xprt: controlling transport instance
577*4882a593Smuzhiyun *
578*4882a593Smuzhiyun * Caller serializes. Either the transport send lock is held,
579*4882a593Smuzhiyun * or we're being called to destroy the transport.
580*4882a593Smuzhiyun *
581*4882a593Smuzhiyun * On return, @r_xprt is completely divested of all hardware
582*4882a593Smuzhiyun * resources and prepared for the next ->connect operation.
583*4882a593Smuzhiyun */
rpcrdma_xprt_disconnect(struct rpcrdma_xprt * r_xprt)584*4882a593Smuzhiyun void rpcrdma_xprt_disconnect(struct rpcrdma_xprt *r_xprt)
585*4882a593Smuzhiyun {
586*4882a593Smuzhiyun struct rpcrdma_ep *ep = r_xprt->rx_ep;
587*4882a593Smuzhiyun struct rdma_cm_id *id;
588*4882a593Smuzhiyun int rc;
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun if (!ep)
591*4882a593Smuzhiyun return;
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun id = ep->re_id;
594*4882a593Smuzhiyun rc = rdma_disconnect(id);
595*4882a593Smuzhiyun trace_xprtrdma_disconnect(r_xprt, rc);
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun rpcrdma_xprt_drain(r_xprt);
598*4882a593Smuzhiyun rpcrdma_reps_unmap(r_xprt);
599*4882a593Smuzhiyun rpcrdma_reqs_reset(r_xprt);
600*4882a593Smuzhiyun rpcrdma_mrs_destroy(r_xprt);
601*4882a593Smuzhiyun rpcrdma_sendctxs_destroy(r_xprt);
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun if (rpcrdma_ep_put(ep))
604*4882a593Smuzhiyun rdma_destroy_id(id);
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun r_xprt->rx_ep = NULL;
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun /* Fixed-size circular FIFO queue. This implementation is wait-free and
610*4882a593Smuzhiyun * lock-free.
611*4882a593Smuzhiyun *
612*4882a593Smuzhiyun * Consumer is the code path that posts Sends. This path dequeues a
613*4882a593Smuzhiyun * sendctx for use by a Send operation. Multiple consumer threads
614*4882a593Smuzhiyun * are serialized by the RPC transport lock, which allows only one
615*4882a593Smuzhiyun * ->send_request call at a time.
616*4882a593Smuzhiyun *
617*4882a593Smuzhiyun * Producer is the code path that handles Send completions. This path
618*4882a593Smuzhiyun * enqueues a sendctx that has been completed. Multiple producer
619*4882a593Smuzhiyun * threads are serialized by the ib_poll_cq() function.
620*4882a593Smuzhiyun */
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun /* rpcrdma_sendctxs_destroy() assumes caller has already quiesced
623*4882a593Smuzhiyun * queue activity, and rpcrdma_xprt_drain has flushed all remaining
624*4882a593Smuzhiyun * Send requests.
625*4882a593Smuzhiyun */
rpcrdma_sendctxs_destroy(struct rpcrdma_xprt * r_xprt)626*4882a593Smuzhiyun static void rpcrdma_sendctxs_destroy(struct rpcrdma_xprt *r_xprt)
627*4882a593Smuzhiyun {
628*4882a593Smuzhiyun struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
629*4882a593Smuzhiyun unsigned long i;
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun if (!buf->rb_sc_ctxs)
632*4882a593Smuzhiyun return;
633*4882a593Smuzhiyun for (i = 0; i <= buf->rb_sc_last; i++)
634*4882a593Smuzhiyun kfree(buf->rb_sc_ctxs[i]);
635*4882a593Smuzhiyun kfree(buf->rb_sc_ctxs);
636*4882a593Smuzhiyun buf->rb_sc_ctxs = NULL;
637*4882a593Smuzhiyun }
638*4882a593Smuzhiyun
rpcrdma_sendctx_create(struct rpcrdma_ep * ep)639*4882a593Smuzhiyun static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ep *ep)
640*4882a593Smuzhiyun {
641*4882a593Smuzhiyun struct rpcrdma_sendctx *sc;
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun sc = kzalloc(struct_size(sc, sc_sges, ep->re_attr.cap.max_send_sge),
644*4882a593Smuzhiyun GFP_KERNEL);
645*4882a593Smuzhiyun if (!sc)
646*4882a593Smuzhiyun return NULL;
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun sc->sc_cqe.done = rpcrdma_wc_send;
649*4882a593Smuzhiyun return sc;
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun
rpcrdma_sendctxs_create(struct rpcrdma_xprt * r_xprt)652*4882a593Smuzhiyun static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
653*4882a593Smuzhiyun {
654*4882a593Smuzhiyun struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
655*4882a593Smuzhiyun struct rpcrdma_sendctx *sc;
656*4882a593Smuzhiyun unsigned long i;
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun /* Maximum number of concurrent outstanding Send WRs. Capping
659*4882a593Smuzhiyun * the circular queue size stops Send Queue overflow by causing
660*4882a593Smuzhiyun * the ->send_request call to fail temporarily before too many
661*4882a593Smuzhiyun * Sends are posted.
662*4882a593Smuzhiyun */
663*4882a593Smuzhiyun i = r_xprt->rx_ep->re_max_requests + RPCRDMA_MAX_BC_REQUESTS;
664*4882a593Smuzhiyun buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), GFP_KERNEL);
665*4882a593Smuzhiyun if (!buf->rb_sc_ctxs)
666*4882a593Smuzhiyun return -ENOMEM;
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun buf->rb_sc_last = i - 1;
669*4882a593Smuzhiyun for (i = 0; i <= buf->rb_sc_last; i++) {
670*4882a593Smuzhiyun sc = rpcrdma_sendctx_create(r_xprt->rx_ep);
671*4882a593Smuzhiyun if (!sc)
672*4882a593Smuzhiyun return -ENOMEM;
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun buf->rb_sc_ctxs[i] = sc;
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun buf->rb_sc_head = 0;
678*4882a593Smuzhiyun buf->rb_sc_tail = 0;
679*4882a593Smuzhiyun return 0;
680*4882a593Smuzhiyun }
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun /* The sendctx queue is not guaranteed to have a size that is a
683*4882a593Smuzhiyun * power of two, thus the helpers in circ_buf.h cannot be used.
684*4882a593Smuzhiyun * The other option is to use modulus (%), which can be expensive.
685*4882a593Smuzhiyun */
rpcrdma_sendctx_next(struct rpcrdma_buffer * buf,unsigned long item)686*4882a593Smuzhiyun static unsigned long rpcrdma_sendctx_next(struct rpcrdma_buffer *buf,
687*4882a593Smuzhiyun unsigned long item)
688*4882a593Smuzhiyun {
689*4882a593Smuzhiyun return likely(item < buf->rb_sc_last) ? item + 1 : 0;
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun /**
693*4882a593Smuzhiyun * rpcrdma_sendctx_get_locked - Acquire a send context
694*4882a593Smuzhiyun * @r_xprt: controlling transport instance
695*4882a593Smuzhiyun *
696*4882a593Smuzhiyun * Returns pointer to a free send completion context; or NULL if
697*4882a593Smuzhiyun * the queue is empty.
698*4882a593Smuzhiyun *
699*4882a593Smuzhiyun * Usage: Called to acquire an SGE array before preparing a Send WR.
700*4882a593Smuzhiyun *
701*4882a593Smuzhiyun * The caller serializes calls to this function (per transport), and
702*4882a593Smuzhiyun * provides an effective memory barrier that flushes the new value
703*4882a593Smuzhiyun * of rb_sc_head.
704*4882a593Smuzhiyun */
rpcrdma_sendctx_get_locked(struct rpcrdma_xprt * r_xprt)705*4882a593Smuzhiyun struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_xprt *r_xprt)
706*4882a593Smuzhiyun {
707*4882a593Smuzhiyun struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
708*4882a593Smuzhiyun struct rpcrdma_sendctx *sc;
709*4882a593Smuzhiyun unsigned long next_head;
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun next_head = rpcrdma_sendctx_next(buf, buf->rb_sc_head);
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun if (next_head == READ_ONCE(buf->rb_sc_tail))
714*4882a593Smuzhiyun goto out_emptyq;
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun /* ORDER: item must be accessed _before_ head is updated */
717*4882a593Smuzhiyun sc = buf->rb_sc_ctxs[next_head];
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun /* Releasing the lock in the caller acts as a memory
720*4882a593Smuzhiyun * barrier that flushes rb_sc_head.
721*4882a593Smuzhiyun */
722*4882a593Smuzhiyun buf->rb_sc_head = next_head;
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun return sc;
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun out_emptyq:
727*4882a593Smuzhiyun /* The queue is "empty" if there have not been enough Send
728*4882a593Smuzhiyun * completions recently. This is a sign the Send Queue is
729*4882a593Smuzhiyun * backing up. Cause the caller to pause and try again.
730*4882a593Smuzhiyun */
731*4882a593Smuzhiyun xprt_wait_for_buffer_space(&r_xprt->rx_xprt);
732*4882a593Smuzhiyun r_xprt->rx_stats.empty_sendctx_q++;
733*4882a593Smuzhiyun return NULL;
734*4882a593Smuzhiyun }
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun /**
737*4882a593Smuzhiyun * rpcrdma_sendctx_put_locked - Release a send context
738*4882a593Smuzhiyun * @r_xprt: controlling transport instance
739*4882a593Smuzhiyun * @sc: send context to release
740*4882a593Smuzhiyun *
741*4882a593Smuzhiyun * Usage: Called from Send completion to return a sendctxt
742*4882a593Smuzhiyun * to the queue.
743*4882a593Smuzhiyun *
744*4882a593Smuzhiyun * The caller serializes calls to this function (per transport).
745*4882a593Smuzhiyun */
rpcrdma_sendctx_put_locked(struct rpcrdma_xprt * r_xprt,struct rpcrdma_sendctx * sc)746*4882a593Smuzhiyun static void rpcrdma_sendctx_put_locked(struct rpcrdma_xprt *r_xprt,
747*4882a593Smuzhiyun struct rpcrdma_sendctx *sc)
748*4882a593Smuzhiyun {
749*4882a593Smuzhiyun struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
750*4882a593Smuzhiyun unsigned long next_tail;
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun /* Unmap SGEs of previously completed but unsignaled
753*4882a593Smuzhiyun * Sends by walking up the queue until @sc is found.
754*4882a593Smuzhiyun */
755*4882a593Smuzhiyun next_tail = buf->rb_sc_tail;
756*4882a593Smuzhiyun do {
757*4882a593Smuzhiyun next_tail = rpcrdma_sendctx_next(buf, next_tail);
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun /* ORDER: item must be accessed _before_ tail is updated */
760*4882a593Smuzhiyun rpcrdma_sendctx_unmap(buf->rb_sc_ctxs[next_tail]);
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun } while (buf->rb_sc_ctxs[next_tail] != sc);
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun /* Paired with READ_ONCE */
765*4882a593Smuzhiyun smp_store_release(&buf->rb_sc_tail, next_tail);
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun xprt_write_space(&r_xprt->rx_xprt);
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun static void
rpcrdma_mrs_create(struct rpcrdma_xprt * r_xprt)771*4882a593Smuzhiyun rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
772*4882a593Smuzhiyun {
773*4882a593Smuzhiyun struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
774*4882a593Smuzhiyun struct rpcrdma_ep *ep = r_xprt->rx_ep;
775*4882a593Smuzhiyun unsigned int count;
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun for (count = 0; count < ep->re_max_rdma_segs; count++) {
778*4882a593Smuzhiyun struct rpcrdma_mr *mr;
779*4882a593Smuzhiyun int rc;
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun mr = kzalloc(sizeof(*mr), GFP_NOFS);
782*4882a593Smuzhiyun if (!mr)
783*4882a593Smuzhiyun break;
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun rc = frwr_mr_init(r_xprt, mr);
786*4882a593Smuzhiyun if (rc) {
787*4882a593Smuzhiyun kfree(mr);
788*4882a593Smuzhiyun break;
789*4882a593Smuzhiyun }
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun spin_lock(&buf->rb_lock);
792*4882a593Smuzhiyun rpcrdma_mr_push(mr, &buf->rb_mrs);
793*4882a593Smuzhiyun list_add(&mr->mr_all, &buf->rb_all_mrs);
794*4882a593Smuzhiyun spin_unlock(&buf->rb_lock);
795*4882a593Smuzhiyun }
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun r_xprt->rx_stats.mrs_allocated += count;
798*4882a593Smuzhiyun trace_xprtrdma_createmrs(r_xprt, count);
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun static void
rpcrdma_mr_refresh_worker(struct work_struct * work)802*4882a593Smuzhiyun rpcrdma_mr_refresh_worker(struct work_struct *work)
803*4882a593Smuzhiyun {
804*4882a593Smuzhiyun struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer,
805*4882a593Smuzhiyun rb_refresh_worker);
806*4882a593Smuzhiyun struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
807*4882a593Smuzhiyun rx_buf);
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun rpcrdma_mrs_create(r_xprt);
810*4882a593Smuzhiyun xprt_write_space(&r_xprt->rx_xprt);
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun /**
814*4882a593Smuzhiyun * rpcrdma_mrs_refresh - Wake the MR refresh worker
815*4882a593Smuzhiyun * @r_xprt: controlling transport instance
816*4882a593Smuzhiyun *
817*4882a593Smuzhiyun */
rpcrdma_mrs_refresh(struct rpcrdma_xprt * r_xprt)818*4882a593Smuzhiyun void rpcrdma_mrs_refresh(struct rpcrdma_xprt *r_xprt)
819*4882a593Smuzhiyun {
820*4882a593Smuzhiyun struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
821*4882a593Smuzhiyun struct rpcrdma_ep *ep = r_xprt->rx_ep;
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun /* If there is no underlying connection, it's no use
824*4882a593Smuzhiyun * to wake the refresh worker.
825*4882a593Smuzhiyun */
826*4882a593Smuzhiyun if (ep->re_connect_status == 1) {
827*4882a593Smuzhiyun /* The work is scheduled on a WQ_MEM_RECLAIM
828*4882a593Smuzhiyun * workqueue in order to prevent MR allocation
829*4882a593Smuzhiyun * from recursing into NFS during direct reclaim.
830*4882a593Smuzhiyun */
831*4882a593Smuzhiyun queue_work(xprtiod_workqueue, &buf->rb_refresh_worker);
832*4882a593Smuzhiyun }
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun /**
836*4882a593Smuzhiyun * rpcrdma_req_create - Allocate an rpcrdma_req object
837*4882a593Smuzhiyun * @r_xprt: controlling r_xprt
838*4882a593Smuzhiyun * @size: initial size, in bytes, of send and receive buffers
839*4882a593Smuzhiyun * @flags: GFP flags passed to memory allocators
840*4882a593Smuzhiyun *
841*4882a593Smuzhiyun * Returns an allocated and fully initialized rpcrdma_req or NULL.
842*4882a593Smuzhiyun */
rpcrdma_req_create(struct rpcrdma_xprt * r_xprt,size_t size,gfp_t flags)843*4882a593Smuzhiyun struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size,
844*4882a593Smuzhiyun gfp_t flags)
845*4882a593Smuzhiyun {
846*4882a593Smuzhiyun struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
847*4882a593Smuzhiyun struct rpcrdma_req *req;
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun req = kzalloc(sizeof(*req), flags);
850*4882a593Smuzhiyun if (req == NULL)
851*4882a593Smuzhiyun goto out1;
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun req->rl_sendbuf = rpcrdma_regbuf_alloc(size, DMA_TO_DEVICE, flags);
854*4882a593Smuzhiyun if (!req->rl_sendbuf)
855*4882a593Smuzhiyun goto out2;
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun req->rl_recvbuf = rpcrdma_regbuf_alloc(size, DMA_NONE, flags);
858*4882a593Smuzhiyun if (!req->rl_recvbuf)
859*4882a593Smuzhiyun goto out3;
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun INIT_LIST_HEAD(&req->rl_free_mrs);
862*4882a593Smuzhiyun INIT_LIST_HEAD(&req->rl_registered);
863*4882a593Smuzhiyun spin_lock(&buffer->rb_lock);
864*4882a593Smuzhiyun list_add(&req->rl_all, &buffer->rb_allreqs);
865*4882a593Smuzhiyun spin_unlock(&buffer->rb_lock);
866*4882a593Smuzhiyun return req;
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun out3:
869*4882a593Smuzhiyun kfree(req->rl_sendbuf);
870*4882a593Smuzhiyun out2:
871*4882a593Smuzhiyun kfree(req);
872*4882a593Smuzhiyun out1:
873*4882a593Smuzhiyun return NULL;
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun /**
877*4882a593Smuzhiyun * rpcrdma_req_setup - Per-connection instance setup of an rpcrdma_req object
878*4882a593Smuzhiyun * @r_xprt: controlling transport instance
879*4882a593Smuzhiyun * @req: rpcrdma_req object to set up
880*4882a593Smuzhiyun *
881*4882a593Smuzhiyun * Returns zero on success, and a negative errno on failure.
882*4882a593Smuzhiyun */
rpcrdma_req_setup(struct rpcrdma_xprt * r_xprt,struct rpcrdma_req * req)883*4882a593Smuzhiyun int rpcrdma_req_setup(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
884*4882a593Smuzhiyun {
885*4882a593Smuzhiyun struct rpcrdma_regbuf *rb;
886*4882a593Smuzhiyun size_t maxhdrsize;
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun /* Compute maximum header buffer size in bytes */
889*4882a593Smuzhiyun maxhdrsize = rpcrdma_fixed_maxsz + 3 +
890*4882a593Smuzhiyun r_xprt->rx_ep->re_max_rdma_segs * rpcrdma_readchunk_maxsz;
891*4882a593Smuzhiyun maxhdrsize *= sizeof(__be32);
892*4882a593Smuzhiyun rb = rpcrdma_regbuf_alloc(__roundup_pow_of_two(maxhdrsize),
893*4882a593Smuzhiyun DMA_TO_DEVICE, GFP_KERNEL);
894*4882a593Smuzhiyun if (!rb)
895*4882a593Smuzhiyun goto out;
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun if (!__rpcrdma_regbuf_dma_map(r_xprt, rb))
898*4882a593Smuzhiyun goto out_free;
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun req->rl_rdmabuf = rb;
901*4882a593Smuzhiyun xdr_buf_init(&req->rl_hdrbuf, rdmab_data(rb), rdmab_length(rb));
902*4882a593Smuzhiyun return 0;
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun out_free:
905*4882a593Smuzhiyun rpcrdma_regbuf_free(rb);
906*4882a593Smuzhiyun out:
907*4882a593Smuzhiyun return -ENOMEM;
908*4882a593Smuzhiyun }
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun /* ASSUMPTION: the rb_allreqs list is stable for the duration,
911*4882a593Smuzhiyun * and thus can be walked without holding rb_lock. Eg. the
912*4882a593Smuzhiyun * caller is holding the transport send lock to exclude
913*4882a593Smuzhiyun * device removal or disconnection.
914*4882a593Smuzhiyun */
rpcrdma_reqs_setup(struct rpcrdma_xprt * r_xprt)915*4882a593Smuzhiyun static int rpcrdma_reqs_setup(struct rpcrdma_xprt *r_xprt)
916*4882a593Smuzhiyun {
917*4882a593Smuzhiyun struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
918*4882a593Smuzhiyun struct rpcrdma_req *req;
919*4882a593Smuzhiyun int rc;
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun list_for_each_entry(req, &buf->rb_allreqs, rl_all) {
922*4882a593Smuzhiyun rc = rpcrdma_req_setup(r_xprt, req);
923*4882a593Smuzhiyun if (rc)
924*4882a593Smuzhiyun return rc;
925*4882a593Smuzhiyun }
926*4882a593Smuzhiyun return 0;
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun
rpcrdma_req_reset(struct rpcrdma_req * req)929*4882a593Smuzhiyun static void rpcrdma_req_reset(struct rpcrdma_req *req)
930*4882a593Smuzhiyun {
931*4882a593Smuzhiyun /* Credits are valid for only one connection */
932*4882a593Smuzhiyun req->rl_slot.rq_cong = 0;
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun rpcrdma_regbuf_free(req->rl_rdmabuf);
935*4882a593Smuzhiyun req->rl_rdmabuf = NULL;
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun rpcrdma_regbuf_dma_unmap(req->rl_sendbuf);
938*4882a593Smuzhiyun rpcrdma_regbuf_dma_unmap(req->rl_recvbuf);
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun frwr_reset(req);
941*4882a593Smuzhiyun }
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun /* ASSUMPTION: the rb_allreqs list is stable for the duration,
944*4882a593Smuzhiyun * and thus can be walked without holding rb_lock. Eg. the
945*4882a593Smuzhiyun * caller is holding the transport send lock to exclude
946*4882a593Smuzhiyun * device removal or disconnection.
947*4882a593Smuzhiyun */
rpcrdma_reqs_reset(struct rpcrdma_xprt * r_xprt)948*4882a593Smuzhiyun static void rpcrdma_reqs_reset(struct rpcrdma_xprt *r_xprt)
949*4882a593Smuzhiyun {
950*4882a593Smuzhiyun struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
951*4882a593Smuzhiyun struct rpcrdma_req *req;
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun list_for_each_entry(req, &buf->rb_allreqs, rl_all)
954*4882a593Smuzhiyun rpcrdma_req_reset(req);
955*4882a593Smuzhiyun }
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun /* No locking needed here. This function is called only by the
958*4882a593Smuzhiyun * Receive completion handler.
959*4882a593Smuzhiyun */
960*4882a593Smuzhiyun static noinline
rpcrdma_rep_create(struct rpcrdma_xprt * r_xprt,bool temp)961*4882a593Smuzhiyun struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt,
962*4882a593Smuzhiyun bool temp)
963*4882a593Smuzhiyun {
964*4882a593Smuzhiyun struct rpcrdma_rep *rep;
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun rep = kzalloc(sizeof(*rep), GFP_KERNEL);
967*4882a593Smuzhiyun if (rep == NULL)
968*4882a593Smuzhiyun goto out;
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun rep->rr_rdmabuf = rpcrdma_regbuf_alloc(r_xprt->rx_ep->re_inline_recv,
971*4882a593Smuzhiyun DMA_FROM_DEVICE, GFP_KERNEL);
972*4882a593Smuzhiyun if (!rep->rr_rdmabuf)
973*4882a593Smuzhiyun goto out_free;
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf))
976*4882a593Smuzhiyun goto out_free_regbuf;
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun xdr_buf_init(&rep->rr_hdrbuf, rdmab_data(rep->rr_rdmabuf),
979*4882a593Smuzhiyun rdmab_length(rep->rr_rdmabuf));
980*4882a593Smuzhiyun rep->rr_cqe.done = rpcrdma_wc_receive;
981*4882a593Smuzhiyun rep->rr_rxprt = r_xprt;
982*4882a593Smuzhiyun rep->rr_recv_wr.next = NULL;
983*4882a593Smuzhiyun rep->rr_recv_wr.wr_cqe = &rep->rr_cqe;
984*4882a593Smuzhiyun rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
985*4882a593Smuzhiyun rep->rr_recv_wr.num_sge = 1;
986*4882a593Smuzhiyun rep->rr_temp = temp;
987*4882a593Smuzhiyun list_add(&rep->rr_all, &r_xprt->rx_buf.rb_all_reps);
988*4882a593Smuzhiyun return rep;
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun out_free_regbuf:
991*4882a593Smuzhiyun rpcrdma_regbuf_free(rep->rr_rdmabuf);
992*4882a593Smuzhiyun out_free:
993*4882a593Smuzhiyun kfree(rep);
994*4882a593Smuzhiyun out:
995*4882a593Smuzhiyun return NULL;
996*4882a593Smuzhiyun }
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun /* No locking needed here. This function is invoked only by the
999*4882a593Smuzhiyun * Receive completion handler, or during transport shutdown.
1000*4882a593Smuzhiyun */
rpcrdma_rep_destroy(struct rpcrdma_rep * rep)1001*4882a593Smuzhiyun static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep)
1002*4882a593Smuzhiyun {
1003*4882a593Smuzhiyun list_del(&rep->rr_all);
1004*4882a593Smuzhiyun rpcrdma_regbuf_free(rep->rr_rdmabuf);
1005*4882a593Smuzhiyun kfree(rep);
1006*4882a593Smuzhiyun }
1007*4882a593Smuzhiyun
rpcrdma_rep_get_locked(struct rpcrdma_buffer * buf)1008*4882a593Smuzhiyun static struct rpcrdma_rep *rpcrdma_rep_get_locked(struct rpcrdma_buffer *buf)
1009*4882a593Smuzhiyun {
1010*4882a593Smuzhiyun struct llist_node *node;
1011*4882a593Smuzhiyun
1012*4882a593Smuzhiyun /* Calls to llist_del_first are required to be serialized */
1013*4882a593Smuzhiyun node = llist_del_first(&buf->rb_free_reps);
1014*4882a593Smuzhiyun if (!node)
1015*4882a593Smuzhiyun return NULL;
1016*4882a593Smuzhiyun return llist_entry(node, struct rpcrdma_rep, rr_node);
1017*4882a593Smuzhiyun }
1018*4882a593Smuzhiyun
rpcrdma_rep_put(struct rpcrdma_buffer * buf,struct rpcrdma_rep * rep)1019*4882a593Smuzhiyun static void rpcrdma_rep_put(struct rpcrdma_buffer *buf,
1020*4882a593Smuzhiyun struct rpcrdma_rep *rep)
1021*4882a593Smuzhiyun {
1022*4882a593Smuzhiyun llist_add(&rep->rr_node, &buf->rb_free_reps);
1023*4882a593Smuzhiyun }
1024*4882a593Smuzhiyun
rpcrdma_reps_unmap(struct rpcrdma_xprt * r_xprt)1025*4882a593Smuzhiyun static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt)
1026*4882a593Smuzhiyun {
1027*4882a593Smuzhiyun struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1028*4882a593Smuzhiyun struct rpcrdma_rep *rep;
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun list_for_each_entry(rep, &buf->rb_all_reps, rr_all) {
1031*4882a593Smuzhiyun rpcrdma_regbuf_dma_unmap(rep->rr_rdmabuf);
1032*4882a593Smuzhiyun rep->rr_temp = true;
1033*4882a593Smuzhiyun }
1034*4882a593Smuzhiyun }
1035*4882a593Smuzhiyun
rpcrdma_reps_destroy(struct rpcrdma_buffer * buf)1036*4882a593Smuzhiyun static void rpcrdma_reps_destroy(struct rpcrdma_buffer *buf)
1037*4882a593Smuzhiyun {
1038*4882a593Smuzhiyun struct rpcrdma_rep *rep;
1039*4882a593Smuzhiyun
1040*4882a593Smuzhiyun while ((rep = rpcrdma_rep_get_locked(buf)) != NULL)
1041*4882a593Smuzhiyun rpcrdma_rep_destroy(rep);
1042*4882a593Smuzhiyun }
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun /**
1045*4882a593Smuzhiyun * rpcrdma_buffer_create - Create initial set of req/rep objects
1046*4882a593Smuzhiyun * @r_xprt: transport instance to (re)initialize
1047*4882a593Smuzhiyun *
1048*4882a593Smuzhiyun * Returns zero on success, otherwise a negative errno.
1049*4882a593Smuzhiyun */
rpcrdma_buffer_create(struct rpcrdma_xprt * r_xprt)1050*4882a593Smuzhiyun int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
1051*4882a593Smuzhiyun {
1052*4882a593Smuzhiyun struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1053*4882a593Smuzhiyun int i, rc;
1054*4882a593Smuzhiyun
1055*4882a593Smuzhiyun buf->rb_bc_srv_max_requests = 0;
1056*4882a593Smuzhiyun spin_lock_init(&buf->rb_lock);
1057*4882a593Smuzhiyun INIT_LIST_HEAD(&buf->rb_mrs);
1058*4882a593Smuzhiyun INIT_LIST_HEAD(&buf->rb_all_mrs);
1059*4882a593Smuzhiyun INIT_WORK(&buf->rb_refresh_worker, rpcrdma_mr_refresh_worker);
1060*4882a593Smuzhiyun
1061*4882a593Smuzhiyun INIT_LIST_HEAD(&buf->rb_send_bufs);
1062*4882a593Smuzhiyun INIT_LIST_HEAD(&buf->rb_allreqs);
1063*4882a593Smuzhiyun INIT_LIST_HEAD(&buf->rb_all_reps);
1064*4882a593Smuzhiyun
1065*4882a593Smuzhiyun rc = -ENOMEM;
1066*4882a593Smuzhiyun for (i = 0; i < r_xprt->rx_xprt.max_reqs; i++) {
1067*4882a593Smuzhiyun struct rpcrdma_req *req;
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun req = rpcrdma_req_create(r_xprt, RPCRDMA_V1_DEF_INLINE_SIZE * 2,
1070*4882a593Smuzhiyun GFP_KERNEL);
1071*4882a593Smuzhiyun if (!req)
1072*4882a593Smuzhiyun goto out;
1073*4882a593Smuzhiyun list_add(&req->rl_list, &buf->rb_send_bufs);
1074*4882a593Smuzhiyun }
1075*4882a593Smuzhiyun
1076*4882a593Smuzhiyun init_llist_head(&buf->rb_free_reps);
1077*4882a593Smuzhiyun
1078*4882a593Smuzhiyun return 0;
1079*4882a593Smuzhiyun out:
1080*4882a593Smuzhiyun rpcrdma_buffer_destroy(buf);
1081*4882a593Smuzhiyun return rc;
1082*4882a593Smuzhiyun }
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun /**
1085*4882a593Smuzhiyun * rpcrdma_req_destroy - Destroy an rpcrdma_req object
1086*4882a593Smuzhiyun * @req: unused object to be destroyed
1087*4882a593Smuzhiyun *
1088*4882a593Smuzhiyun * Relies on caller holding the transport send lock to protect
1089*4882a593Smuzhiyun * removing req->rl_all from buf->rb_all_reqs safely.
1090*4882a593Smuzhiyun */
rpcrdma_req_destroy(struct rpcrdma_req * req)1091*4882a593Smuzhiyun void rpcrdma_req_destroy(struct rpcrdma_req *req)
1092*4882a593Smuzhiyun {
1093*4882a593Smuzhiyun struct rpcrdma_mr *mr;
1094*4882a593Smuzhiyun
1095*4882a593Smuzhiyun list_del(&req->rl_all);
1096*4882a593Smuzhiyun
1097*4882a593Smuzhiyun while ((mr = rpcrdma_mr_pop(&req->rl_free_mrs))) {
1098*4882a593Smuzhiyun struct rpcrdma_buffer *buf = &mr->mr_xprt->rx_buf;
1099*4882a593Smuzhiyun
1100*4882a593Smuzhiyun spin_lock(&buf->rb_lock);
1101*4882a593Smuzhiyun list_del(&mr->mr_all);
1102*4882a593Smuzhiyun spin_unlock(&buf->rb_lock);
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun frwr_release_mr(mr);
1105*4882a593Smuzhiyun }
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun rpcrdma_regbuf_free(req->rl_recvbuf);
1108*4882a593Smuzhiyun rpcrdma_regbuf_free(req->rl_sendbuf);
1109*4882a593Smuzhiyun rpcrdma_regbuf_free(req->rl_rdmabuf);
1110*4882a593Smuzhiyun kfree(req);
1111*4882a593Smuzhiyun }
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun /**
1114*4882a593Smuzhiyun * rpcrdma_mrs_destroy - Release all of a transport's MRs
1115*4882a593Smuzhiyun * @r_xprt: controlling transport instance
1116*4882a593Smuzhiyun *
1117*4882a593Smuzhiyun * Relies on caller holding the transport send lock to protect
1118*4882a593Smuzhiyun * removing mr->mr_list from req->rl_free_mrs safely.
1119*4882a593Smuzhiyun */
rpcrdma_mrs_destroy(struct rpcrdma_xprt * r_xprt)1120*4882a593Smuzhiyun static void rpcrdma_mrs_destroy(struct rpcrdma_xprt *r_xprt)
1121*4882a593Smuzhiyun {
1122*4882a593Smuzhiyun struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1123*4882a593Smuzhiyun struct rpcrdma_mr *mr;
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun cancel_work_sync(&buf->rb_refresh_worker);
1126*4882a593Smuzhiyun
1127*4882a593Smuzhiyun spin_lock(&buf->rb_lock);
1128*4882a593Smuzhiyun while ((mr = list_first_entry_or_null(&buf->rb_all_mrs,
1129*4882a593Smuzhiyun struct rpcrdma_mr,
1130*4882a593Smuzhiyun mr_all)) != NULL) {
1131*4882a593Smuzhiyun list_del(&mr->mr_list);
1132*4882a593Smuzhiyun list_del(&mr->mr_all);
1133*4882a593Smuzhiyun spin_unlock(&buf->rb_lock);
1134*4882a593Smuzhiyun
1135*4882a593Smuzhiyun frwr_release_mr(mr);
1136*4882a593Smuzhiyun
1137*4882a593Smuzhiyun spin_lock(&buf->rb_lock);
1138*4882a593Smuzhiyun }
1139*4882a593Smuzhiyun spin_unlock(&buf->rb_lock);
1140*4882a593Smuzhiyun }
1141*4882a593Smuzhiyun
1142*4882a593Smuzhiyun /**
1143*4882a593Smuzhiyun * rpcrdma_buffer_destroy - Release all hw resources
1144*4882a593Smuzhiyun * @buf: root control block for resources
1145*4882a593Smuzhiyun *
1146*4882a593Smuzhiyun * ORDERING: relies on a prior rpcrdma_xprt_drain :
1147*4882a593Smuzhiyun * - No more Send or Receive completions can occur
1148*4882a593Smuzhiyun * - All MRs, reps, and reqs are returned to their free lists
1149*4882a593Smuzhiyun */
1150*4882a593Smuzhiyun void
rpcrdma_buffer_destroy(struct rpcrdma_buffer * buf)1151*4882a593Smuzhiyun rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
1152*4882a593Smuzhiyun {
1153*4882a593Smuzhiyun rpcrdma_reps_destroy(buf);
1154*4882a593Smuzhiyun
1155*4882a593Smuzhiyun while (!list_empty(&buf->rb_send_bufs)) {
1156*4882a593Smuzhiyun struct rpcrdma_req *req;
1157*4882a593Smuzhiyun
1158*4882a593Smuzhiyun req = list_first_entry(&buf->rb_send_bufs,
1159*4882a593Smuzhiyun struct rpcrdma_req, rl_list);
1160*4882a593Smuzhiyun list_del(&req->rl_list);
1161*4882a593Smuzhiyun rpcrdma_req_destroy(req);
1162*4882a593Smuzhiyun }
1163*4882a593Smuzhiyun }
1164*4882a593Smuzhiyun
1165*4882a593Smuzhiyun /**
1166*4882a593Smuzhiyun * rpcrdma_mr_get - Allocate an rpcrdma_mr object
1167*4882a593Smuzhiyun * @r_xprt: controlling transport
1168*4882a593Smuzhiyun *
1169*4882a593Smuzhiyun * Returns an initialized rpcrdma_mr or NULL if no free
1170*4882a593Smuzhiyun * rpcrdma_mr objects are available.
1171*4882a593Smuzhiyun */
1172*4882a593Smuzhiyun struct rpcrdma_mr *
rpcrdma_mr_get(struct rpcrdma_xprt * r_xprt)1173*4882a593Smuzhiyun rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt)
1174*4882a593Smuzhiyun {
1175*4882a593Smuzhiyun struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1176*4882a593Smuzhiyun struct rpcrdma_mr *mr;
1177*4882a593Smuzhiyun
1178*4882a593Smuzhiyun spin_lock(&buf->rb_lock);
1179*4882a593Smuzhiyun mr = rpcrdma_mr_pop(&buf->rb_mrs);
1180*4882a593Smuzhiyun spin_unlock(&buf->rb_lock);
1181*4882a593Smuzhiyun return mr;
1182*4882a593Smuzhiyun }
1183*4882a593Smuzhiyun
1184*4882a593Smuzhiyun /**
1185*4882a593Smuzhiyun * rpcrdma_mr_put - DMA unmap an MR and release it
1186*4882a593Smuzhiyun * @mr: MR to release
1187*4882a593Smuzhiyun *
1188*4882a593Smuzhiyun */
rpcrdma_mr_put(struct rpcrdma_mr * mr)1189*4882a593Smuzhiyun void rpcrdma_mr_put(struct rpcrdma_mr *mr)
1190*4882a593Smuzhiyun {
1191*4882a593Smuzhiyun struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
1192*4882a593Smuzhiyun
1193*4882a593Smuzhiyun if (mr->mr_dir != DMA_NONE) {
1194*4882a593Smuzhiyun trace_xprtrdma_mr_unmap(mr);
1195*4882a593Smuzhiyun ib_dma_unmap_sg(r_xprt->rx_ep->re_id->device,
1196*4882a593Smuzhiyun mr->mr_sg, mr->mr_nents, mr->mr_dir);
1197*4882a593Smuzhiyun mr->mr_dir = DMA_NONE;
1198*4882a593Smuzhiyun }
1199*4882a593Smuzhiyun
1200*4882a593Smuzhiyun rpcrdma_mr_push(mr, &mr->mr_req->rl_free_mrs);
1201*4882a593Smuzhiyun }
1202*4882a593Smuzhiyun
1203*4882a593Smuzhiyun /**
1204*4882a593Smuzhiyun * rpcrdma_reply_put - Put reply buffers back into pool
1205*4882a593Smuzhiyun * @buffers: buffer pool
1206*4882a593Smuzhiyun * @req: object to return
1207*4882a593Smuzhiyun *
1208*4882a593Smuzhiyun */
rpcrdma_reply_put(struct rpcrdma_buffer * buffers,struct rpcrdma_req * req)1209*4882a593Smuzhiyun void rpcrdma_reply_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
1210*4882a593Smuzhiyun {
1211*4882a593Smuzhiyun if (req->rl_reply) {
1212*4882a593Smuzhiyun rpcrdma_rep_put(buffers, req->rl_reply);
1213*4882a593Smuzhiyun req->rl_reply = NULL;
1214*4882a593Smuzhiyun }
1215*4882a593Smuzhiyun }
1216*4882a593Smuzhiyun
1217*4882a593Smuzhiyun /**
1218*4882a593Smuzhiyun * rpcrdma_buffer_get - Get a request buffer
1219*4882a593Smuzhiyun * @buffers: Buffer pool from which to obtain a buffer
1220*4882a593Smuzhiyun *
1221*4882a593Smuzhiyun * Returns a fresh rpcrdma_req, or NULL if none are available.
1222*4882a593Smuzhiyun */
1223*4882a593Smuzhiyun struct rpcrdma_req *
rpcrdma_buffer_get(struct rpcrdma_buffer * buffers)1224*4882a593Smuzhiyun rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
1225*4882a593Smuzhiyun {
1226*4882a593Smuzhiyun struct rpcrdma_req *req;
1227*4882a593Smuzhiyun
1228*4882a593Smuzhiyun spin_lock(&buffers->rb_lock);
1229*4882a593Smuzhiyun req = list_first_entry_or_null(&buffers->rb_send_bufs,
1230*4882a593Smuzhiyun struct rpcrdma_req, rl_list);
1231*4882a593Smuzhiyun if (req)
1232*4882a593Smuzhiyun list_del_init(&req->rl_list);
1233*4882a593Smuzhiyun spin_unlock(&buffers->rb_lock);
1234*4882a593Smuzhiyun return req;
1235*4882a593Smuzhiyun }
1236*4882a593Smuzhiyun
1237*4882a593Smuzhiyun /**
1238*4882a593Smuzhiyun * rpcrdma_buffer_put - Put request/reply buffers back into pool
1239*4882a593Smuzhiyun * @buffers: buffer pool
1240*4882a593Smuzhiyun * @req: object to return
1241*4882a593Smuzhiyun *
1242*4882a593Smuzhiyun */
rpcrdma_buffer_put(struct rpcrdma_buffer * buffers,struct rpcrdma_req * req)1243*4882a593Smuzhiyun void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
1244*4882a593Smuzhiyun {
1245*4882a593Smuzhiyun rpcrdma_reply_put(buffers, req);
1246*4882a593Smuzhiyun
1247*4882a593Smuzhiyun spin_lock(&buffers->rb_lock);
1248*4882a593Smuzhiyun list_add(&req->rl_list, &buffers->rb_send_bufs);
1249*4882a593Smuzhiyun spin_unlock(&buffers->rb_lock);
1250*4882a593Smuzhiyun }
1251*4882a593Smuzhiyun
1252*4882a593Smuzhiyun /**
1253*4882a593Smuzhiyun * rpcrdma_recv_buffer_put - Release rpcrdma_rep back to free list
1254*4882a593Smuzhiyun * @rep: rep to release
1255*4882a593Smuzhiyun *
1256*4882a593Smuzhiyun * Used after error conditions.
1257*4882a593Smuzhiyun */
rpcrdma_recv_buffer_put(struct rpcrdma_rep * rep)1258*4882a593Smuzhiyun void rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
1259*4882a593Smuzhiyun {
1260*4882a593Smuzhiyun rpcrdma_rep_put(&rep->rr_rxprt->rx_buf, rep);
1261*4882a593Smuzhiyun }
1262*4882a593Smuzhiyun
1263*4882a593Smuzhiyun /* Returns a pointer to a rpcrdma_regbuf object, or NULL.
1264*4882a593Smuzhiyun *
1265*4882a593Smuzhiyun * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for
1266*4882a593Smuzhiyun * receiving the payload of RDMA RECV operations. During Long Calls
1267*4882a593Smuzhiyun * or Replies they may be registered externally via frwr_map.
1268*4882a593Smuzhiyun */
1269*4882a593Smuzhiyun static struct rpcrdma_regbuf *
rpcrdma_regbuf_alloc(size_t size,enum dma_data_direction direction,gfp_t flags)1270*4882a593Smuzhiyun rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction,
1271*4882a593Smuzhiyun gfp_t flags)
1272*4882a593Smuzhiyun {
1273*4882a593Smuzhiyun struct rpcrdma_regbuf *rb;
1274*4882a593Smuzhiyun
1275*4882a593Smuzhiyun rb = kmalloc(sizeof(*rb), flags);
1276*4882a593Smuzhiyun if (!rb)
1277*4882a593Smuzhiyun return NULL;
1278*4882a593Smuzhiyun rb->rg_data = kmalloc(size, flags);
1279*4882a593Smuzhiyun if (!rb->rg_data) {
1280*4882a593Smuzhiyun kfree(rb);
1281*4882a593Smuzhiyun return NULL;
1282*4882a593Smuzhiyun }
1283*4882a593Smuzhiyun
1284*4882a593Smuzhiyun rb->rg_device = NULL;
1285*4882a593Smuzhiyun rb->rg_direction = direction;
1286*4882a593Smuzhiyun rb->rg_iov.length = size;
1287*4882a593Smuzhiyun return rb;
1288*4882a593Smuzhiyun }
1289*4882a593Smuzhiyun
1290*4882a593Smuzhiyun /**
1291*4882a593Smuzhiyun * rpcrdma_regbuf_realloc - re-allocate a SEND/RECV buffer
1292*4882a593Smuzhiyun * @rb: regbuf to reallocate
1293*4882a593Smuzhiyun * @size: size of buffer to be allocated, in bytes
1294*4882a593Smuzhiyun * @flags: GFP flags
1295*4882a593Smuzhiyun *
1296*4882a593Smuzhiyun * Returns true if reallocation was successful. If false is
1297*4882a593Smuzhiyun * returned, @rb is left untouched.
1298*4882a593Smuzhiyun */
rpcrdma_regbuf_realloc(struct rpcrdma_regbuf * rb,size_t size,gfp_t flags)1299*4882a593Smuzhiyun bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size, gfp_t flags)
1300*4882a593Smuzhiyun {
1301*4882a593Smuzhiyun void *buf;
1302*4882a593Smuzhiyun
1303*4882a593Smuzhiyun buf = kmalloc(size, flags);
1304*4882a593Smuzhiyun if (!buf)
1305*4882a593Smuzhiyun return false;
1306*4882a593Smuzhiyun
1307*4882a593Smuzhiyun rpcrdma_regbuf_dma_unmap(rb);
1308*4882a593Smuzhiyun kfree(rb->rg_data);
1309*4882a593Smuzhiyun
1310*4882a593Smuzhiyun rb->rg_data = buf;
1311*4882a593Smuzhiyun rb->rg_iov.length = size;
1312*4882a593Smuzhiyun return true;
1313*4882a593Smuzhiyun }
1314*4882a593Smuzhiyun
1315*4882a593Smuzhiyun /**
1316*4882a593Smuzhiyun * __rpcrdma_regbuf_dma_map - DMA-map a regbuf
1317*4882a593Smuzhiyun * @r_xprt: controlling transport instance
1318*4882a593Smuzhiyun * @rb: regbuf to be mapped
1319*4882a593Smuzhiyun *
1320*4882a593Smuzhiyun * Returns true if the buffer is now DMA mapped to @r_xprt's device
1321*4882a593Smuzhiyun */
__rpcrdma_regbuf_dma_map(struct rpcrdma_xprt * r_xprt,struct rpcrdma_regbuf * rb)1322*4882a593Smuzhiyun bool __rpcrdma_regbuf_dma_map(struct rpcrdma_xprt *r_xprt,
1323*4882a593Smuzhiyun struct rpcrdma_regbuf *rb)
1324*4882a593Smuzhiyun {
1325*4882a593Smuzhiyun struct ib_device *device = r_xprt->rx_ep->re_id->device;
1326*4882a593Smuzhiyun
1327*4882a593Smuzhiyun if (rb->rg_direction == DMA_NONE)
1328*4882a593Smuzhiyun return false;
1329*4882a593Smuzhiyun
1330*4882a593Smuzhiyun rb->rg_iov.addr = ib_dma_map_single(device, rdmab_data(rb),
1331*4882a593Smuzhiyun rdmab_length(rb), rb->rg_direction);
1332*4882a593Smuzhiyun if (ib_dma_mapping_error(device, rdmab_addr(rb))) {
1333*4882a593Smuzhiyun trace_xprtrdma_dma_maperr(rdmab_addr(rb));
1334*4882a593Smuzhiyun return false;
1335*4882a593Smuzhiyun }
1336*4882a593Smuzhiyun
1337*4882a593Smuzhiyun rb->rg_device = device;
1338*4882a593Smuzhiyun rb->rg_iov.lkey = r_xprt->rx_ep->re_pd->local_dma_lkey;
1339*4882a593Smuzhiyun return true;
1340*4882a593Smuzhiyun }
1341*4882a593Smuzhiyun
rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf * rb)1342*4882a593Smuzhiyun static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb)
1343*4882a593Smuzhiyun {
1344*4882a593Smuzhiyun if (!rb)
1345*4882a593Smuzhiyun return;
1346*4882a593Smuzhiyun
1347*4882a593Smuzhiyun if (!rpcrdma_regbuf_is_mapped(rb))
1348*4882a593Smuzhiyun return;
1349*4882a593Smuzhiyun
1350*4882a593Smuzhiyun ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb), rdmab_length(rb),
1351*4882a593Smuzhiyun rb->rg_direction);
1352*4882a593Smuzhiyun rb->rg_device = NULL;
1353*4882a593Smuzhiyun }
1354*4882a593Smuzhiyun
rpcrdma_regbuf_free(struct rpcrdma_regbuf * rb)1355*4882a593Smuzhiyun static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb)
1356*4882a593Smuzhiyun {
1357*4882a593Smuzhiyun rpcrdma_regbuf_dma_unmap(rb);
1358*4882a593Smuzhiyun if (rb)
1359*4882a593Smuzhiyun kfree(rb->rg_data);
1360*4882a593Smuzhiyun kfree(rb);
1361*4882a593Smuzhiyun }
1362*4882a593Smuzhiyun
1363*4882a593Smuzhiyun /**
1364*4882a593Smuzhiyun * rpcrdma_post_sends - Post WRs to a transport's Send Queue
1365*4882a593Smuzhiyun * @r_xprt: controlling transport instance
1366*4882a593Smuzhiyun * @req: rpcrdma_req containing the Send WR to post
1367*4882a593Smuzhiyun *
1368*4882a593Smuzhiyun * Returns 0 if the post was successful, otherwise -ENOTCONN
1369*4882a593Smuzhiyun * is returned.
1370*4882a593Smuzhiyun */
rpcrdma_post_sends(struct rpcrdma_xprt * r_xprt,struct rpcrdma_req * req)1371*4882a593Smuzhiyun int rpcrdma_post_sends(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
1372*4882a593Smuzhiyun {
1373*4882a593Smuzhiyun struct ib_send_wr *send_wr = &req->rl_wr;
1374*4882a593Smuzhiyun struct rpcrdma_ep *ep = r_xprt->rx_ep;
1375*4882a593Smuzhiyun int rc;
1376*4882a593Smuzhiyun
1377*4882a593Smuzhiyun if (!ep->re_send_count || kref_read(&req->rl_kref) > 1) {
1378*4882a593Smuzhiyun send_wr->send_flags |= IB_SEND_SIGNALED;
1379*4882a593Smuzhiyun ep->re_send_count = ep->re_send_batch;
1380*4882a593Smuzhiyun } else {
1381*4882a593Smuzhiyun send_wr->send_flags &= ~IB_SEND_SIGNALED;
1382*4882a593Smuzhiyun --ep->re_send_count;
1383*4882a593Smuzhiyun }
1384*4882a593Smuzhiyun
1385*4882a593Smuzhiyun trace_xprtrdma_post_send(req);
1386*4882a593Smuzhiyun rc = frwr_send(r_xprt, req);
1387*4882a593Smuzhiyun if (rc)
1388*4882a593Smuzhiyun return -ENOTCONN;
1389*4882a593Smuzhiyun return 0;
1390*4882a593Smuzhiyun }
1391*4882a593Smuzhiyun
1392*4882a593Smuzhiyun /**
1393*4882a593Smuzhiyun * rpcrdma_post_recvs - Refill the Receive Queue
1394*4882a593Smuzhiyun * @r_xprt: controlling transport instance
1395*4882a593Smuzhiyun * @needed: current credit grant
1396*4882a593Smuzhiyun * @temp: mark Receive buffers to be deleted after one use
1397*4882a593Smuzhiyun *
1398*4882a593Smuzhiyun */
rpcrdma_post_recvs(struct rpcrdma_xprt * r_xprt,int needed,bool temp)1399*4882a593Smuzhiyun void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp)
1400*4882a593Smuzhiyun {
1401*4882a593Smuzhiyun struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1402*4882a593Smuzhiyun struct rpcrdma_ep *ep = r_xprt->rx_ep;
1403*4882a593Smuzhiyun struct ib_recv_wr *wr, *bad_wr;
1404*4882a593Smuzhiyun struct rpcrdma_rep *rep;
1405*4882a593Smuzhiyun int count, rc;
1406*4882a593Smuzhiyun
1407*4882a593Smuzhiyun rc = 0;
1408*4882a593Smuzhiyun count = 0;
1409*4882a593Smuzhiyun
1410*4882a593Smuzhiyun if (likely(ep->re_receive_count > needed))
1411*4882a593Smuzhiyun goto out;
1412*4882a593Smuzhiyun needed -= ep->re_receive_count;
1413*4882a593Smuzhiyun if (!temp)
1414*4882a593Smuzhiyun needed += RPCRDMA_MAX_RECV_BATCH;
1415*4882a593Smuzhiyun
1416*4882a593Smuzhiyun /* fast path: all needed reps can be found on the free list */
1417*4882a593Smuzhiyun wr = NULL;
1418*4882a593Smuzhiyun while (needed) {
1419*4882a593Smuzhiyun rep = rpcrdma_rep_get_locked(buf);
1420*4882a593Smuzhiyun if (rep && rep->rr_temp) {
1421*4882a593Smuzhiyun rpcrdma_rep_destroy(rep);
1422*4882a593Smuzhiyun continue;
1423*4882a593Smuzhiyun }
1424*4882a593Smuzhiyun if (!rep)
1425*4882a593Smuzhiyun rep = rpcrdma_rep_create(r_xprt, temp);
1426*4882a593Smuzhiyun if (!rep)
1427*4882a593Smuzhiyun break;
1428*4882a593Smuzhiyun
1429*4882a593Smuzhiyun trace_xprtrdma_post_recv(rep);
1430*4882a593Smuzhiyun rep->rr_recv_wr.next = wr;
1431*4882a593Smuzhiyun wr = &rep->rr_recv_wr;
1432*4882a593Smuzhiyun --needed;
1433*4882a593Smuzhiyun ++count;
1434*4882a593Smuzhiyun }
1435*4882a593Smuzhiyun if (!wr)
1436*4882a593Smuzhiyun goto out;
1437*4882a593Smuzhiyun
1438*4882a593Smuzhiyun rc = ib_post_recv(ep->re_id->qp, wr,
1439*4882a593Smuzhiyun (const struct ib_recv_wr **)&bad_wr);
1440*4882a593Smuzhiyun out:
1441*4882a593Smuzhiyun trace_xprtrdma_post_recvs(r_xprt, count, rc);
1442*4882a593Smuzhiyun if (rc) {
1443*4882a593Smuzhiyun for (wr = bad_wr; wr;) {
1444*4882a593Smuzhiyun struct rpcrdma_rep *rep;
1445*4882a593Smuzhiyun
1446*4882a593Smuzhiyun rep = container_of(wr, struct rpcrdma_rep, rr_recv_wr);
1447*4882a593Smuzhiyun wr = wr->next;
1448*4882a593Smuzhiyun rpcrdma_recv_buffer_put(rep);
1449*4882a593Smuzhiyun --count;
1450*4882a593Smuzhiyun }
1451*4882a593Smuzhiyun }
1452*4882a593Smuzhiyun ep->re_receive_count += count;
1453*4882a593Smuzhiyun return;
1454*4882a593Smuzhiyun }
1455