1*4882a593Smuzhiyun /******************************************************************************
2*4882a593Smuzhiyun
3*4882a593Smuzhiyun (c) 2007 Network Appliance, Inc. All Rights Reserved.
4*4882a593Smuzhiyun (c) 2009 NetApp. All Rights Reserved.
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun NetApp provides this source code under the GPL v2 License.
7*4882a593Smuzhiyun The GPL v2 license is available at
8*4882a593Smuzhiyun https://opensource.org/licenses/gpl-license.php.
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
11*4882a593Smuzhiyun "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
12*4882a593Smuzhiyun LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
13*4882a593Smuzhiyun A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
14*4882a593Smuzhiyun CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
15*4882a593Smuzhiyun EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
16*4882a593Smuzhiyun PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
17*4882a593Smuzhiyun PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
18*4882a593Smuzhiyun LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
19*4882a593Smuzhiyun NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
20*4882a593Smuzhiyun SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun ******************************************************************************/
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #include <linux/tcp.h>
25*4882a593Smuzhiyun #include <linux/slab.h>
26*4882a593Smuzhiyun #include <linux/sunrpc/xprt.h>
27*4882a593Smuzhiyun #include <linux/export.h>
28*4882a593Smuzhiyun #include <linux/sunrpc/bc_xprt.h>
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
31*4882a593Smuzhiyun #define RPCDBG_FACILITY RPCDBG_TRANS
32*4882a593Smuzhiyun #endif
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #define BC_MAX_SLOTS 64U
35*4882a593Smuzhiyun
xprt_bc_max_slots(struct rpc_xprt * xprt)36*4882a593Smuzhiyun unsigned int xprt_bc_max_slots(struct rpc_xprt *xprt)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun return BC_MAX_SLOTS;
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun /*
42*4882a593Smuzhiyun * Helper routines that track the number of preallocation elements
43*4882a593Smuzhiyun * on the transport.
44*4882a593Smuzhiyun */
xprt_need_to_requeue(struct rpc_xprt * xprt)45*4882a593Smuzhiyun static inline int xprt_need_to_requeue(struct rpc_xprt *xprt)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun return xprt->bc_alloc_count < xprt->bc_alloc_max;
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun /*
51*4882a593Smuzhiyun * Free the preallocated rpc_rqst structure and the memory
52*4882a593Smuzhiyun * buffers hanging off of it.
53*4882a593Smuzhiyun */
xprt_free_allocation(struct rpc_rqst * req)54*4882a593Smuzhiyun static void xprt_free_allocation(struct rpc_rqst *req)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun struct xdr_buf *xbufp;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun dprintk("RPC: free allocations for req= %p\n", req);
59*4882a593Smuzhiyun WARN_ON_ONCE(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
60*4882a593Smuzhiyun xbufp = &req->rq_rcv_buf;
61*4882a593Smuzhiyun free_page((unsigned long)xbufp->head[0].iov_base);
62*4882a593Smuzhiyun xbufp = &req->rq_snd_buf;
63*4882a593Smuzhiyun free_page((unsigned long)xbufp->head[0].iov_base);
64*4882a593Smuzhiyun kfree(req);
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun
xprt_bc_reinit_xdr_buf(struct xdr_buf * buf)67*4882a593Smuzhiyun static void xprt_bc_reinit_xdr_buf(struct xdr_buf *buf)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun buf->head[0].iov_len = PAGE_SIZE;
70*4882a593Smuzhiyun buf->tail[0].iov_len = 0;
71*4882a593Smuzhiyun buf->pages = NULL;
72*4882a593Smuzhiyun buf->page_len = 0;
73*4882a593Smuzhiyun buf->flags = 0;
74*4882a593Smuzhiyun buf->len = 0;
75*4882a593Smuzhiyun buf->buflen = PAGE_SIZE;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
xprt_alloc_xdr_buf(struct xdr_buf * buf,gfp_t gfp_flags)78*4882a593Smuzhiyun static int xprt_alloc_xdr_buf(struct xdr_buf *buf, gfp_t gfp_flags)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun struct page *page;
81*4882a593Smuzhiyun /* Preallocate one XDR receive buffer */
82*4882a593Smuzhiyun page = alloc_page(gfp_flags);
83*4882a593Smuzhiyun if (page == NULL)
84*4882a593Smuzhiyun return -ENOMEM;
85*4882a593Smuzhiyun xdr_buf_init(buf, page_address(page), PAGE_SIZE);
86*4882a593Smuzhiyun return 0;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun static
xprt_alloc_bc_req(struct rpc_xprt * xprt,gfp_t gfp_flags)90*4882a593Smuzhiyun struct rpc_rqst *xprt_alloc_bc_req(struct rpc_xprt *xprt, gfp_t gfp_flags)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun struct rpc_rqst *req;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun /* Pre-allocate one backchannel rpc_rqst */
95*4882a593Smuzhiyun req = kzalloc(sizeof(*req), gfp_flags);
96*4882a593Smuzhiyun if (req == NULL)
97*4882a593Smuzhiyun return NULL;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun req->rq_xprt = xprt;
100*4882a593Smuzhiyun INIT_LIST_HEAD(&req->rq_bc_list);
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun /* Preallocate one XDR receive buffer */
103*4882a593Smuzhiyun if (xprt_alloc_xdr_buf(&req->rq_rcv_buf, gfp_flags) < 0) {
104*4882a593Smuzhiyun printk(KERN_ERR "Failed to create bc receive xbuf\n");
105*4882a593Smuzhiyun goto out_free;
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun req->rq_rcv_buf.len = PAGE_SIZE;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /* Preallocate one XDR send buffer */
110*4882a593Smuzhiyun if (xprt_alloc_xdr_buf(&req->rq_snd_buf, gfp_flags) < 0) {
111*4882a593Smuzhiyun printk(KERN_ERR "Failed to create bc snd xbuf\n");
112*4882a593Smuzhiyun goto out_free;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun return req;
115*4882a593Smuzhiyun out_free:
116*4882a593Smuzhiyun xprt_free_allocation(req);
117*4882a593Smuzhiyun return NULL;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun /*
121*4882a593Smuzhiyun * Preallocate up to min_reqs structures and related buffers for use
122*4882a593Smuzhiyun * by the backchannel. This function can be called multiple times
123*4882a593Smuzhiyun * when creating new sessions that use the same rpc_xprt. The
124*4882a593Smuzhiyun * preallocated buffers are added to the pool of resources used by
125*4882a593Smuzhiyun * the rpc_xprt. Any one of these resources may be used by an
126*4882a593Smuzhiyun * incoming callback request. It's up to the higher levels in the
127*4882a593Smuzhiyun * stack to enforce that the maximum number of session slots is not
128*4882a593Smuzhiyun * being exceeded.
129*4882a593Smuzhiyun *
130*4882a593Smuzhiyun * Some callback arguments can be large. For example, a pNFS server
131*4882a593Smuzhiyun * using multiple deviceids. The list can be unbound, but the client
132*4882a593Smuzhiyun * has the ability to tell the server the maximum size of the callback
133*4882a593Smuzhiyun * requests. Each deviceID is 16 bytes, so allocate one page
134*4882a593Smuzhiyun * for the arguments to have enough room to receive a number of these
135*4882a593Smuzhiyun * deviceIDs. The NFS client indicates to the pNFS server that its
136*4882a593Smuzhiyun * callback requests can be up to 4096 bytes in size.
137*4882a593Smuzhiyun */
xprt_setup_backchannel(struct rpc_xprt * xprt,unsigned int min_reqs)138*4882a593Smuzhiyun int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun if (!xprt->ops->bc_setup)
141*4882a593Smuzhiyun return 0;
142*4882a593Smuzhiyun return xprt->ops->bc_setup(xprt, min_reqs);
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xprt_setup_backchannel);
145*4882a593Smuzhiyun
xprt_setup_bc(struct rpc_xprt * xprt,unsigned int min_reqs)146*4882a593Smuzhiyun int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun struct rpc_rqst *req;
149*4882a593Smuzhiyun struct list_head tmp_list;
150*4882a593Smuzhiyun int i;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun dprintk("RPC: setup backchannel transport\n");
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun if (min_reqs > BC_MAX_SLOTS)
155*4882a593Smuzhiyun min_reqs = BC_MAX_SLOTS;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun /*
158*4882a593Smuzhiyun * We use a temporary list to keep track of the preallocated
159*4882a593Smuzhiyun * buffers. Once we're done building the list we splice it
160*4882a593Smuzhiyun * into the backchannel preallocation list off of the rpc_xprt
161*4882a593Smuzhiyun * struct. This helps minimize the amount of time the list
162*4882a593Smuzhiyun * lock is held on the rpc_xprt struct. It also makes cleanup
163*4882a593Smuzhiyun * easier in case of memory allocation errors.
164*4882a593Smuzhiyun */
165*4882a593Smuzhiyun INIT_LIST_HEAD(&tmp_list);
166*4882a593Smuzhiyun for (i = 0; i < min_reqs; i++) {
167*4882a593Smuzhiyun /* Pre-allocate one backchannel rpc_rqst */
168*4882a593Smuzhiyun req = xprt_alloc_bc_req(xprt, GFP_KERNEL);
169*4882a593Smuzhiyun if (req == NULL) {
170*4882a593Smuzhiyun printk(KERN_ERR "Failed to create bc rpc_rqst\n");
171*4882a593Smuzhiyun goto out_free;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun /* Add the allocated buffer to the tmp list */
175*4882a593Smuzhiyun dprintk("RPC: adding req= %p\n", req);
176*4882a593Smuzhiyun list_add(&req->rq_bc_pa_list, &tmp_list);
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun /*
180*4882a593Smuzhiyun * Add the temporary list to the backchannel preallocation list
181*4882a593Smuzhiyun */
182*4882a593Smuzhiyun spin_lock(&xprt->bc_pa_lock);
183*4882a593Smuzhiyun list_splice(&tmp_list, &xprt->bc_pa_list);
184*4882a593Smuzhiyun xprt->bc_alloc_count += min_reqs;
185*4882a593Smuzhiyun xprt->bc_alloc_max += min_reqs;
186*4882a593Smuzhiyun atomic_add(min_reqs, &xprt->bc_slot_count);
187*4882a593Smuzhiyun spin_unlock(&xprt->bc_pa_lock);
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun dprintk("RPC: setup backchannel transport done\n");
190*4882a593Smuzhiyun return 0;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun out_free:
193*4882a593Smuzhiyun /*
194*4882a593Smuzhiyun * Memory allocation failed, free the temporary list
195*4882a593Smuzhiyun */
196*4882a593Smuzhiyun while (!list_empty(&tmp_list)) {
197*4882a593Smuzhiyun req = list_first_entry(&tmp_list,
198*4882a593Smuzhiyun struct rpc_rqst,
199*4882a593Smuzhiyun rq_bc_pa_list);
200*4882a593Smuzhiyun list_del(&req->rq_bc_pa_list);
201*4882a593Smuzhiyun xprt_free_allocation(req);
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun dprintk("RPC: setup backchannel transport failed\n");
205*4882a593Smuzhiyun return -ENOMEM;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun /**
209*4882a593Smuzhiyun * xprt_destroy_backchannel - Destroys the backchannel preallocated structures.
210*4882a593Smuzhiyun * @xprt: the transport holding the preallocated strucures
211*4882a593Smuzhiyun * @max_reqs: the maximum number of preallocated structures to destroy
212*4882a593Smuzhiyun *
213*4882a593Smuzhiyun * Since these structures may have been allocated by multiple calls
214*4882a593Smuzhiyun * to xprt_setup_backchannel, we only destroy up to the maximum number
215*4882a593Smuzhiyun * of reqs specified by the caller.
216*4882a593Smuzhiyun */
xprt_destroy_backchannel(struct rpc_xprt * xprt,unsigned int max_reqs)217*4882a593Smuzhiyun void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun if (xprt->ops->bc_destroy)
220*4882a593Smuzhiyun xprt->ops->bc_destroy(xprt, max_reqs);
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xprt_destroy_backchannel);
223*4882a593Smuzhiyun
xprt_destroy_bc(struct rpc_xprt * xprt,unsigned int max_reqs)224*4882a593Smuzhiyun void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun struct rpc_rqst *req = NULL, *tmp = NULL;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun dprintk("RPC: destroy backchannel transport\n");
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun if (max_reqs == 0)
231*4882a593Smuzhiyun goto out;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun spin_lock_bh(&xprt->bc_pa_lock);
234*4882a593Smuzhiyun xprt->bc_alloc_max -= min(max_reqs, xprt->bc_alloc_max);
235*4882a593Smuzhiyun list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
236*4882a593Smuzhiyun dprintk("RPC: req=%p\n", req);
237*4882a593Smuzhiyun list_del(&req->rq_bc_pa_list);
238*4882a593Smuzhiyun xprt_free_allocation(req);
239*4882a593Smuzhiyun xprt->bc_alloc_count--;
240*4882a593Smuzhiyun atomic_dec(&xprt->bc_slot_count);
241*4882a593Smuzhiyun if (--max_reqs == 0)
242*4882a593Smuzhiyun break;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun spin_unlock_bh(&xprt->bc_pa_lock);
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun out:
247*4882a593Smuzhiyun dprintk("RPC: backchannel list empty= %s\n",
248*4882a593Smuzhiyun list_empty(&xprt->bc_pa_list) ? "true" : "false");
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
xprt_get_bc_request(struct rpc_xprt * xprt,__be32 xid,struct rpc_rqst * new)251*4882a593Smuzhiyun static struct rpc_rqst *xprt_get_bc_request(struct rpc_xprt *xprt, __be32 xid,
252*4882a593Smuzhiyun struct rpc_rqst *new)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun struct rpc_rqst *req = NULL;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun dprintk("RPC: allocate a backchannel request\n");
257*4882a593Smuzhiyun if (list_empty(&xprt->bc_pa_list)) {
258*4882a593Smuzhiyun if (!new)
259*4882a593Smuzhiyun goto not_found;
260*4882a593Smuzhiyun if (atomic_read(&xprt->bc_slot_count) >= BC_MAX_SLOTS)
261*4882a593Smuzhiyun goto not_found;
262*4882a593Smuzhiyun list_add_tail(&new->rq_bc_pa_list, &xprt->bc_pa_list);
263*4882a593Smuzhiyun xprt->bc_alloc_count++;
264*4882a593Smuzhiyun atomic_inc(&xprt->bc_slot_count);
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
267*4882a593Smuzhiyun rq_bc_pa_list);
268*4882a593Smuzhiyun req->rq_reply_bytes_recvd = 0;
269*4882a593Smuzhiyun memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
270*4882a593Smuzhiyun sizeof(req->rq_private_buf));
271*4882a593Smuzhiyun req->rq_xid = xid;
272*4882a593Smuzhiyun req->rq_connect_cookie = xprt->connect_cookie;
273*4882a593Smuzhiyun dprintk("RPC: backchannel req=%p\n", req);
274*4882a593Smuzhiyun not_found:
275*4882a593Smuzhiyun return req;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun /*
279*4882a593Smuzhiyun * Return the preallocated rpc_rqst structure and XDR buffers
280*4882a593Smuzhiyun * associated with this rpc_task.
281*4882a593Smuzhiyun */
xprt_free_bc_request(struct rpc_rqst * req)282*4882a593Smuzhiyun void xprt_free_bc_request(struct rpc_rqst *req)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun struct rpc_xprt *xprt = req->rq_xprt;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun xprt->ops->bc_free_rqst(req);
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
xprt_free_bc_rqst(struct rpc_rqst * req)289*4882a593Smuzhiyun void xprt_free_bc_rqst(struct rpc_rqst *req)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun struct rpc_xprt *xprt = req->rq_xprt;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun dprintk("RPC: free backchannel req=%p\n", req);
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun req->rq_connect_cookie = xprt->connect_cookie - 1;
296*4882a593Smuzhiyun smp_mb__before_atomic();
297*4882a593Smuzhiyun clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
298*4882a593Smuzhiyun smp_mb__after_atomic();
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun /*
301*4882a593Smuzhiyun * Return it to the list of preallocations so that it
302*4882a593Smuzhiyun * may be reused by a new callback request.
303*4882a593Smuzhiyun */
304*4882a593Smuzhiyun spin_lock_bh(&xprt->bc_pa_lock);
305*4882a593Smuzhiyun if (xprt_need_to_requeue(xprt)) {
306*4882a593Smuzhiyun xprt_bc_reinit_xdr_buf(&req->rq_snd_buf);
307*4882a593Smuzhiyun xprt_bc_reinit_xdr_buf(&req->rq_rcv_buf);
308*4882a593Smuzhiyun req->rq_rcv_buf.len = PAGE_SIZE;
309*4882a593Smuzhiyun list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
310*4882a593Smuzhiyun xprt->bc_alloc_count++;
311*4882a593Smuzhiyun atomic_inc(&xprt->bc_slot_count);
312*4882a593Smuzhiyun req = NULL;
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun spin_unlock_bh(&xprt->bc_pa_lock);
315*4882a593Smuzhiyun if (req != NULL) {
316*4882a593Smuzhiyun /*
317*4882a593Smuzhiyun * The last remaining session was destroyed while this
318*4882a593Smuzhiyun * entry was in use. Free the entry and don't attempt
319*4882a593Smuzhiyun * to add back to the list because there is no need to
320*4882a593Smuzhiyun * have anymore preallocated entries.
321*4882a593Smuzhiyun */
322*4882a593Smuzhiyun dprintk("RPC: Last session removed req=%p\n", req);
323*4882a593Smuzhiyun xprt_free_allocation(req);
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun xprt_put(xprt);
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun /*
329*4882a593Smuzhiyun * One or more rpc_rqst structure have been preallocated during the
330*4882a593Smuzhiyun * backchannel setup. Buffer space for the send and private XDR buffers
331*4882a593Smuzhiyun * has been preallocated as well. Use xprt_alloc_bc_request to allocate
332*4882a593Smuzhiyun * to this request. Use xprt_free_bc_request to return it.
333*4882a593Smuzhiyun *
334*4882a593Smuzhiyun * We know that we're called in soft interrupt context, grab the spin_lock
335*4882a593Smuzhiyun * since there is no need to grab the bottom half spin_lock.
336*4882a593Smuzhiyun *
337*4882a593Smuzhiyun * Return an available rpc_rqst, otherwise NULL if non are available.
338*4882a593Smuzhiyun */
xprt_lookup_bc_request(struct rpc_xprt * xprt,__be32 xid)339*4882a593Smuzhiyun struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun struct rpc_rqst *req, *new = NULL;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun do {
344*4882a593Smuzhiyun spin_lock(&xprt->bc_pa_lock);
345*4882a593Smuzhiyun list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) {
346*4882a593Smuzhiyun if (req->rq_connect_cookie != xprt->connect_cookie)
347*4882a593Smuzhiyun continue;
348*4882a593Smuzhiyun if (req->rq_xid == xid)
349*4882a593Smuzhiyun goto found;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun req = xprt_get_bc_request(xprt, xid, new);
352*4882a593Smuzhiyun found:
353*4882a593Smuzhiyun spin_unlock(&xprt->bc_pa_lock);
354*4882a593Smuzhiyun if (new) {
355*4882a593Smuzhiyun if (req != new)
356*4882a593Smuzhiyun xprt_free_allocation(new);
357*4882a593Smuzhiyun break;
358*4882a593Smuzhiyun } else if (req)
359*4882a593Smuzhiyun break;
360*4882a593Smuzhiyun new = xprt_alloc_bc_req(xprt, GFP_KERNEL);
361*4882a593Smuzhiyun } while (new);
362*4882a593Smuzhiyun return req;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun /*
366*4882a593Smuzhiyun * Add callback request to callback list. The callback
367*4882a593Smuzhiyun * service sleeps on the sv_cb_waitq waiting for new
368*4882a593Smuzhiyun * requests. Wake it up after adding enqueing the
369*4882a593Smuzhiyun * request.
370*4882a593Smuzhiyun */
xprt_complete_bc_request(struct rpc_rqst * req,uint32_t copied)371*4882a593Smuzhiyun void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun struct rpc_xprt *xprt = req->rq_xprt;
374*4882a593Smuzhiyun struct svc_serv *bc_serv = xprt->bc_serv;
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun spin_lock(&xprt->bc_pa_lock);
377*4882a593Smuzhiyun list_del(&req->rq_bc_pa_list);
378*4882a593Smuzhiyun xprt->bc_alloc_count--;
379*4882a593Smuzhiyun spin_unlock(&xprt->bc_pa_lock);
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun req->rq_private_buf.len = copied;
382*4882a593Smuzhiyun set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun dprintk("RPC: add callback request to list\n");
385*4882a593Smuzhiyun xprt_get(xprt);
386*4882a593Smuzhiyun spin_lock(&bc_serv->sv_cb_lock);
387*4882a593Smuzhiyun list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
388*4882a593Smuzhiyun wake_up(&bc_serv->sv_cb_waitq);
389*4882a593Smuzhiyun spin_unlock(&bc_serv->sv_cb_lock);
390*4882a593Smuzhiyun }
391