xref: /OK3568_Linux_fs/kernel/net/rds/rdma.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (c) 2007, 2020 Oracle and/or its affiliates.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * This software is available to you under a choice of one of two
5*4882a593Smuzhiyun  * licenses.  You may choose to be licensed under the terms of the GNU
6*4882a593Smuzhiyun  * General Public License (GPL) Version 2, available from the file
7*4882a593Smuzhiyun  * COPYING in the main directory of this source tree, or the
8*4882a593Smuzhiyun  * OpenIB.org BSD license below:
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  *     Redistribution and use in source and binary forms, with or
11*4882a593Smuzhiyun  *     without modification, are permitted provided that the following
12*4882a593Smuzhiyun  *     conditions are met:
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  *      - Redistributions of source code must retain the above
15*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
16*4882a593Smuzhiyun  *        disclaimer.
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  *      - Redistributions in binary form must reproduce the above
19*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
20*4882a593Smuzhiyun  *        disclaimer in the documentation and/or other materials
21*4882a593Smuzhiyun  *        provided with the distribution.
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24*4882a593Smuzhiyun  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25*4882a593Smuzhiyun  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26*4882a593Smuzhiyun  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27*4882a593Smuzhiyun  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28*4882a593Smuzhiyun  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29*4882a593Smuzhiyun  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30*4882a593Smuzhiyun  * SOFTWARE.
31*4882a593Smuzhiyun  *
32*4882a593Smuzhiyun  */
33*4882a593Smuzhiyun #include <linux/pagemap.h>
34*4882a593Smuzhiyun #include <linux/slab.h>
35*4882a593Smuzhiyun #include <linux/rbtree.h>
36*4882a593Smuzhiyun #include <linux/dma-mapping.h> /* for DMA_*_DEVICE */
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun #include "rds.h"
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun /*
41*4882a593Smuzhiyun  * XXX
42*4882a593Smuzhiyun  *  - build with sparse
43*4882a593Smuzhiyun  *  - should we detect duplicate keys on a socket?  hmm.
44*4882a593Smuzhiyun  *  - an rdma is an mlock, apply rlimit?
45*4882a593Smuzhiyun  */
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun /*
48*4882a593Smuzhiyun  * get the number of pages by looking at the page indices that the start and
49*4882a593Smuzhiyun  * end addresses fall in.
50*4882a593Smuzhiyun  *
51*4882a593Smuzhiyun  * Returns 0 if the vec is invalid.  It is invalid if the number of bytes
52*4882a593Smuzhiyun  * causes the address to wrap or overflows an unsigned int.  This comes
53*4882a593Smuzhiyun  * from being stored in the 'length' member of 'struct scatterlist'.
54*4882a593Smuzhiyun  */
rds_pages_in_vec(struct rds_iovec * vec)55*4882a593Smuzhiyun static unsigned int rds_pages_in_vec(struct rds_iovec *vec)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun 	if ((vec->addr + vec->bytes <= vec->addr) ||
58*4882a593Smuzhiyun 	    (vec->bytes > (u64)UINT_MAX))
59*4882a593Smuzhiyun 		return 0;
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	return ((vec->addr + vec->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT) -
62*4882a593Smuzhiyun 		(vec->addr >> PAGE_SHIFT);
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun 
rds_mr_tree_walk(struct rb_root * root,u64 key,struct rds_mr * insert)65*4882a593Smuzhiyun static struct rds_mr *rds_mr_tree_walk(struct rb_root *root, u64 key,
66*4882a593Smuzhiyun 				       struct rds_mr *insert)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	struct rb_node **p = &root->rb_node;
69*4882a593Smuzhiyun 	struct rb_node *parent = NULL;
70*4882a593Smuzhiyun 	struct rds_mr *mr;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	while (*p) {
73*4882a593Smuzhiyun 		parent = *p;
74*4882a593Smuzhiyun 		mr = rb_entry(parent, struct rds_mr, r_rb_node);
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 		if (key < mr->r_key)
77*4882a593Smuzhiyun 			p = &(*p)->rb_left;
78*4882a593Smuzhiyun 		else if (key > mr->r_key)
79*4882a593Smuzhiyun 			p = &(*p)->rb_right;
80*4882a593Smuzhiyun 		else
81*4882a593Smuzhiyun 			return mr;
82*4882a593Smuzhiyun 	}
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	if (insert) {
85*4882a593Smuzhiyun 		rb_link_node(&insert->r_rb_node, parent, p);
86*4882a593Smuzhiyun 		rb_insert_color(&insert->r_rb_node, root);
87*4882a593Smuzhiyun 		kref_get(&insert->r_kref);
88*4882a593Smuzhiyun 	}
89*4882a593Smuzhiyun 	return NULL;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun /*
93*4882a593Smuzhiyun  * Destroy the transport-specific part of a MR.
94*4882a593Smuzhiyun  */
rds_destroy_mr(struct rds_mr * mr)95*4882a593Smuzhiyun static void rds_destroy_mr(struct rds_mr *mr)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun 	struct rds_sock *rs = mr->r_sock;
98*4882a593Smuzhiyun 	void *trans_private = NULL;
99*4882a593Smuzhiyun 	unsigned long flags;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	rdsdebug("RDS: destroy mr key is %x refcnt %u\n",
102*4882a593Smuzhiyun 		 mr->r_key, kref_read(&mr->r_kref));
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
105*4882a593Smuzhiyun 	if (!RB_EMPTY_NODE(&mr->r_rb_node))
106*4882a593Smuzhiyun 		rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
107*4882a593Smuzhiyun 	trans_private = mr->r_trans_private;
108*4882a593Smuzhiyun 	mr->r_trans_private = NULL;
109*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	if (trans_private)
112*4882a593Smuzhiyun 		mr->r_trans->free_mr(trans_private, mr->r_invalidate);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
__rds_put_mr_final(struct kref * kref)115*4882a593Smuzhiyun void __rds_put_mr_final(struct kref *kref)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	struct rds_mr *mr = container_of(kref, struct rds_mr, r_kref);
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	rds_destroy_mr(mr);
120*4882a593Smuzhiyun 	kfree(mr);
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun /*
124*4882a593Smuzhiyun  * By the time this is called we can't have any more ioctls called on
125*4882a593Smuzhiyun  * the socket so we don't need to worry about racing with others.
126*4882a593Smuzhiyun  */
rds_rdma_drop_keys(struct rds_sock * rs)127*4882a593Smuzhiyun void rds_rdma_drop_keys(struct rds_sock *rs)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun 	struct rds_mr *mr;
130*4882a593Smuzhiyun 	struct rb_node *node;
131*4882a593Smuzhiyun 	unsigned long flags;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	/* Release any MRs associated with this socket */
134*4882a593Smuzhiyun 	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
135*4882a593Smuzhiyun 	while ((node = rb_first(&rs->rs_rdma_keys))) {
136*4882a593Smuzhiyun 		mr = rb_entry(node, struct rds_mr, r_rb_node);
137*4882a593Smuzhiyun 		if (mr->r_trans == rs->rs_transport)
138*4882a593Smuzhiyun 			mr->r_invalidate = 0;
139*4882a593Smuzhiyun 		rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
140*4882a593Smuzhiyun 		RB_CLEAR_NODE(&mr->r_rb_node);
141*4882a593Smuzhiyun 		spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
142*4882a593Smuzhiyun 		kref_put(&mr->r_kref, __rds_put_mr_final);
143*4882a593Smuzhiyun 		spin_lock_irqsave(&rs->rs_rdma_lock, flags);
144*4882a593Smuzhiyun 	}
145*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	if (rs->rs_transport && rs->rs_transport->flush_mrs)
148*4882a593Smuzhiyun 		rs->rs_transport->flush_mrs();
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun /*
152*4882a593Smuzhiyun  * Helper function to pin user pages.
153*4882a593Smuzhiyun  */
rds_pin_pages(unsigned long user_addr,unsigned int nr_pages,struct page ** pages,int write)154*4882a593Smuzhiyun static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages,
155*4882a593Smuzhiyun 			struct page **pages, int write)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun 	unsigned int gup_flags = FOLL_LONGTERM;
158*4882a593Smuzhiyun 	int ret;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	if (write)
161*4882a593Smuzhiyun 		gup_flags |= FOLL_WRITE;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	ret = pin_user_pages_fast(user_addr, nr_pages, gup_flags, pages);
164*4882a593Smuzhiyun 	if (ret >= 0 && ret < nr_pages) {
165*4882a593Smuzhiyun 		unpin_user_pages(pages, ret);
166*4882a593Smuzhiyun 		ret = -EFAULT;
167*4882a593Smuzhiyun 	}
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	return ret;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun 
__rds_rdma_map(struct rds_sock * rs,struct rds_get_mr_args * args,u64 * cookie_ret,struct rds_mr ** mr_ret,struct rds_conn_path * cp)172*4882a593Smuzhiyun static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
173*4882a593Smuzhiyun 			  u64 *cookie_ret, struct rds_mr **mr_ret,
174*4882a593Smuzhiyun 			  struct rds_conn_path *cp)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	struct rds_mr *mr = NULL, *found;
177*4882a593Smuzhiyun 	struct scatterlist *sg = NULL;
178*4882a593Smuzhiyun 	unsigned int nr_pages;
179*4882a593Smuzhiyun 	struct page **pages = NULL;
180*4882a593Smuzhiyun 	void *trans_private;
181*4882a593Smuzhiyun 	unsigned long flags;
182*4882a593Smuzhiyun 	rds_rdma_cookie_t cookie;
183*4882a593Smuzhiyun 	unsigned int nents = 0;
184*4882a593Smuzhiyun 	int need_odp = 0;
185*4882a593Smuzhiyun 	long i;
186*4882a593Smuzhiyun 	int ret;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	if (ipv6_addr_any(&rs->rs_bound_addr) || !rs->rs_transport) {
189*4882a593Smuzhiyun 		ret = -ENOTCONN; /* XXX not a great errno */
190*4882a593Smuzhiyun 		goto out;
191*4882a593Smuzhiyun 	}
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	if (!rs->rs_transport->get_mr) {
194*4882a593Smuzhiyun 		ret = -EOPNOTSUPP;
195*4882a593Smuzhiyun 		goto out;
196*4882a593Smuzhiyun 	}
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	/* If the combination of the addr and size requested for this memory
199*4882a593Smuzhiyun 	 * region causes an integer overflow, return error.
200*4882a593Smuzhiyun 	 */
201*4882a593Smuzhiyun 	if (((args->vec.addr + args->vec.bytes) < args->vec.addr) ||
202*4882a593Smuzhiyun 	    PAGE_ALIGN(args->vec.addr + args->vec.bytes) <
203*4882a593Smuzhiyun 		    (args->vec.addr + args->vec.bytes)) {
204*4882a593Smuzhiyun 		ret = -EINVAL;
205*4882a593Smuzhiyun 		goto out;
206*4882a593Smuzhiyun 	}
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	if (!can_do_mlock()) {
209*4882a593Smuzhiyun 		ret = -EPERM;
210*4882a593Smuzhiyun 		goto out;
211*4882a593Smuzhiyun 	}
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	nr_pages = rds_pages_in_vec(&args->vec);
214*4882a593Smuzhiyun 	if (nr_pages == 0) {
215*4882a593Smuzhiyun 		ret = -EINVAL;
216*4882a593Smuzhiyun 		goto out;
217*4882a593Smuzhiyun 	}
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	/* Restrict the size of mr irrespective of underlying transport
220*4882a593Smuzhiyun 	 * To account for unaligned mr regions, subtract one from nr_pages
221*4882a593Smuzhiyun 	 */
222*4882a593Smuzhiyun 	if ((nr_pages - 1) > (RDS_MAX_MSG_SIZE >> PAGE_SHIFT)) {
223*4882a593Smuzhiyun 		ret = -EMSGSIZE;
224*4882a593Smuzhiyun 		goto out;
225*4882a593Smuzhiyun 	}
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n",
228*4882a593Smuzhiyun 		args->vec.addr, args->vec.bytes, nr_pages);
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	/* XXX clamp nr_pages to limit the size of this alloc? */
231*4882a593Smuzhiyun 	pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
232*4882a593Smuzhiyun 	if (!pages) {
233*4882a593Smuzhiyun 		ret = -ENOMEM;
234*4882a593Smuzhiyun 		goto out;
235*4882a593Smuzhiyun 	}
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL);
238*4882a593Smuzhiyun 	if (!mr) {
239*4882a593Smuzhiyun 		ret = -ENOMEM;
240*4882a593Smuzhiyun 		goto out;
241*4882a593Smuzhiyun 	}
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	kref_init(&mr->r_kref);
244*4882a593Smuzhiyun 	RB_CLEAR_NODE(&mr->r_rb_node);
245*4882a593Smuzhiyun 	mr->r_trans = rs->rs_transport;
246*4882a593Smuzhiyun 	mr->r_sock = rs;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	if (args->flags & RDS_RDMA_USE_ONCE)
249*4882a593Smuzhiyun 		mr->r_use_once = 1;
250*4882a593Smuzhiyun 	if (args->flags & RDS_RDMA_INVALIDATE)
251*4882a593Smuzhiyun 		mr->r_invalidate = 1;
252*4882a593Smuzhiyun 	if (args->flags & RDS_RDMA_READWRITE)
253*4882a593Smuzhiyun 		mr->r_write = 1;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	/*
256*4882a593Smuzhiyun 	 * Pin the pages that make up the user buffer and transfer the page
257*4882a593Smuzhiyun 	 * pointers to the mr's sg array.  We check to see if we've mapped
258*4882a593Smuzhiyun 	 * the whole region after transferring the partial page references
259*4882a593Smuzhiyun 	 * to the sg array so that we can have one page ref cleanup path.
260*4882a593Smuzhiyun 	 *
261*4882a593Smuzhiyun 	 * For now we have no flag that tells us whether the mapping is
262*4882a593Smuzhiyun 	 * r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to
263*4882a593Smuzhiyun 	 * the zero page.
264*4882a593Smuzhiyun 	 */
265*4882a593Smuzhiyun 	ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1);
266*4882a593Smuzhiyun 	if (ret == -EOPNOTSUPP) {
267*4882a593Smuzhiyun 		need_odp = 1;
268*4882a593Smuzhiyun 	} else if (ret <= 0) {
269*4882a593Smuzhiyun 		goto out;
270*4882a593Smuzhiyun 	} else {
271*4882a593Smuzhiyun 		nents = ret;
272*4882a593Smuzhiyun 		sg = kmalloc_array(nents, sizeof(*sg), GFP_KERNEL);
273*4882a593Smuzhiyun 		if (!sg) {
274*4882a593Smuzhiyun 			ret = -ENOMEM;
275*4882a593Smuzhiyun 			goto out;
276*4882a593Smuzhiyun 		}
277*4882a593Smuzhiyun 		WARN_ON(!nents);
278*4882a593Smuzhiyun 		sg_init_table(sg, nents);
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 		/* Stick all pages into the scatterlist */
281*4882a593Smuzhiyun 		for (i = 0 ; i < nents; i++)
282*4882a593Smuzhiyun 			sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0);
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 		rdsdebug("RDS: trans_private nents is %u\n", nents);
285*4882a593Smuzhiyun 	}
286*4882a593Smuzhiyun 	/* Obtain a transport specific MR. If this succeeds, the
287*4882a593Smuzhiyun 	 * s/g list is now owned by the MR.
288*4882a593Smuzhiyun 	 * Note that dma_map() implies that pending writes are
289*4882a593Smuzhiyun 	 * flushed to RAM, so no dma_sync is needed here. */
290*4882a593Smuzhiyun 	trans_private = rs->rs_transport->get_mr(
291*4882a593Smuzhiyun 		sg, nents, rs, &mr->r_key, cp ? cp->cp_conn : NULL,
292*4882a593Smuzhiyun 		args->vec.addr, args->vec.bytes,
293*4882a593Smuzhiyun 		need_odp ? ODP_ZEROBASED : ODP_NOT_NEEDED);
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	if (IS_ERR(trans_private)) {
296*4882a593Smuzhiyun 		/* In ODP case, we don't GUP pages, so don't need
297*4882a593Smuzhiyun 		 * to release anything.
298*4882a593Smuzhiyun 		 */
299*4882a593Smuzhiyun 		if (!need_odp) {
300*4882a593Smuzhiyun 			unpin_user_pages(pages, nr_pages);
301*4882a593Smuzhiyun 			kfree(sg);
302*4882a593Smuzhiyun 		}
303*4882a593Smuzhiyun 		ret = PTR_ERR(trans_private);
304*4882a593Smuzhiyun 		goto out;
305*4882a593Smuzhiyun 	}
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	mr->r_trans_private = trans_private;
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n",
310*4882a593Smuzhiyun 	       mr->r_key, (void *)(unsigned long) args->cookie_addr);
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	/* The user may pass us an unaligned address, but we can only
313*4882a593Smuzhiyun 	 * map page aligned regions. So we keep the offset, and build
314*4882a593Smuzhiyun 	 * a 64bit cookie containing <R_Key, offset> and pass that
315*4882a593Smuzhiyun 	 * around. */
316*4882a593Smuzhiyun 	if (need_odp)
317*4882a593Smuzhiyun 		cookie = rds_rdma_make_cookie(mr->r_key, 0);
318*4882a593Smuzhiyun 	else
319*4882a593Smuzhiyun 		cookie = rds_rdma_make_cookie(mr->r_key,
320*4882a593Smuzhiyun 					      args->vec.addr & ~PAGE_MASK);
321*4882a593Smuzhiyun 	if (cookie_ret)
322*4882a593Smuzhiyun 		*cookie_ret = cookie;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	if (args->cookie_addr &&
325*4882a593Smuzhiyun 	    put_user(cookie, (u64 __user *)(unsigned long)args->cookie_addr)) {
326*4882a593Smuzhiyun 		if (!need_odp) {
327*4882a593Smuzhiyun 			unpin_user_pages(pages, nr_pages);
328*4882a593Smuzhiyun 			kfree(sg);
329*4882a593Smuzhiyun 		}
330*4882a593Smuzhiyun 		ret = -EFAULT;
331*4882a593Smuzhiyun 		goto out;
332*4882a593Smuzhiyun 	}
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	/* Inserting the new MR into the rbtree bumps its
335*4882a593Smuzhiyun 	 * reference count. */
336*4882a593Smuzhiyun 	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
337*4882a593Smuzhiyun 	found = rds_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr);
338*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	BUG_ON(found && found != mr);
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	rdsdebug("RDS: get_mr key is %x\n", mr->r_key);
343*4882a593Smuzhiyun 	if (mr_ret) {
344*4882a593Smuzhiyun 		kref_get(&mr->r_kref);
345*4882a593Smuzhiyun 		*mr_ret = mr;
346*4882a593Smuzhiyun 	}
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	ret = 0;
349*4882a593Smuzhiyun out:
350*4882a593Smuzhiyun 	kfree(pages);
351*4882a593Smuzhiyun 	if (mr)
352*4882a593Smuzhiyun 		kref_put(&mr->r_kref, __rds_put_mr_final);
353*4882a593Smuzhiyun 	return ret;
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun 
rds_get_mr(struct rds_sock * rs,sockptr_t optval,int optlen)356*4882a593Smuzhiyun int rds_get_mr(struct rds_sock *rs, sockptr_t optval, int optlen)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun 	struct rds_get_mr_args args;
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	if (optlen != sizeof(struct rds_get_mr_args))
361*4882a593Smuzhiyun 		return -EINVAL;
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	if (copy_from_sockptr(&args, optval, sizeof(struct rds_get_mr_args)))
364*4882a593Smuzhiyun 		return -EFAULT;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	return __rds_rdma_map(rs, &args, NULL, NULL, NULL);
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun 
rds_get_mr_for_dest(struct rds_sock * rs,sockptr_t optval,int optlen)369*4882a593Smuzhiyun int rds_get_mr_for_dest(struct rds_sock *rs, sockptr_t optval, int optlen)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun 	struct rds_get_mr_for_dest_args args;
372*4882a593Smuzhiyun 	struct rds_get_mr_args new_args;
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	if (optlen != sizeof(struct rds_get_mr_for_dest_args))
375*4882a593Smuzhiyun 		return -EINVAL;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	if (copy_from_sockptr(&args, optval,
378*4882a593Smuzhiyun 			   sizeof(struct rds_get_mr_for_dest_args)))
379*4882a593Smuzhiyun 		return -EFAULT;
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	/*
382*4882a593Smuzhiyun 	 * Initially, just behave like get_mr().
383*4882a593Smuzhiyun 	 * TODO: Implement get_mr as wrapper around this
384*4882a593Smuzhiyun 	 *	 and deprecate it.
385*4882a593Smuzhiyun 	 */
386*4882a593Smuzhiyun 	new_args.vec = args.vec;
387*4882a593Smuzhiyun 	new_args.cookie_addr = args.cookie_addr;
388*4882a593Smuzhiyun 	new_args.flags = args.flags;
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	return __rds_rdma_map(rs, &new_args, NULL, NULL, NULL);
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun /*
394*4882a593Smuzhiyun  * Free the MR indicated by the given R_Key
395*4882a593Smuzhiyun  */
rds_free_mr(struct rds_sock * rs,sockptr_t optval,int optlen)396*4882a593Smuzhiyun int rds_free_mr(struct rds_sock *rs, sockptr_t optval, int optlen)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun 	struct rds_free_mr_args args;
399*4882a593Smuzhiyun 	struct rds_mr *mr;
400*4882a593Smuzhiyun 	unsigned long flags;
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	if (optlen != sizeof(struct rds_free_mr_args))
403*4882a593Smuzhiyun 		return -EINVAL;
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	if (copy_from_sockptr(&args, optval, sizeof(struct rds_free_mr_args)))
406*4882a593Smuzhiyun 		return -EFAULT;
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	/* Special case - a null cookie means flush all unused MRs */
409*4882a593Smuzhiyun 	if (args.cookie == 0) {
410*4882a593Smuzhiyun 		if (!rs->rs_transport || !rs->rs_transport->flush_mrs)
411*4882a593Smuzhiyun 			return -EINVAL;
412*4882a593Smuzhiyun 		rs->rs_transport->flush_mrs();
413*4882a593Smuzhiyun 		return 0;
414*4882a593Smuzhiyun 	}
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	/* Look up the MR given its R_key and remove it from the rbtree
417*4882a593Smuzhiyun 	 * so nobody else finds it.
418*4882a593Smuzhiyun 	 * This should also prevent races with rds_rdma_unuse.
419*4882a593Smuzhiyun 	 */
420*4882a593Smuzhiyun 	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
421*4882a593Smuzhiyun 	mr = rds_mr_tree_walk(&rs->rs_rdma_keys, rds_rdma_cookie_key(args.cookie), NULL);
422*4882a593Smuzhiyun 	if (mr) {
423*4882a593Smuzhiyun 		rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
424*4882a593Smuzhiyun 		RB_CLEAR_NODE(&mr->r_rb_node);
425*4882a593Smuzhiyun 		if (args.flags & RDS_RDMA_INVALIDATE)
426*4882a593Smuzhiyun 			mr->r_invalidate = 1;
427*4882a593Smuzhiyun 	}
428*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	if (!mr)
431*4882a593Smuzhiyun 		return -EINVAL;
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	kref_put(&mr->r_kref, __rds_put_mr_final);
434*4882a593Smuzhiyun 	return 0;
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun /*
438*4882a593Smuzhiyun  * This is called when we receive an extension header that
439*4882a593Smuzhiyun  * tells us this MR was used. It allows us to implement
440*4882a593Smuzhiyun  * use_once semantics
441*4882a593Smuzhiyun  */
rds_rdma_unuse(struct rds_sock * rs,u32 r_key,int force)442*4882a593Smuzhiyun void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
443*4882a593Smuzhiyun {
444*4882a593Smuzhiyun 	struct rds_mr *mr;
445*4882a593Smuzhiyun 	unsigned long flags;
446*4882a593Smuzhiyun 	int zot_me = 0;
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
449*4882a593Smuzhiyun 	mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
450*4882a593Smuzhiyun 	if (!mr) {
451*4882a593Smuzhiyun 		pr_debug("rds: trying to unuse MR with unknown r_key %u!\n",
452*4882a593Smuzhiyun 			 r_key);
453*4882a593Smuzhiyun 		spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
454*4882a593Smuzhiyun 		return;
455*4882a593Smuzhiyun 	}
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 	/* Get a reference so that the MR won't go away before calling
458*4882a593Smuzhiyun 	 * sync_mr() below.
459*4882a593Smuzhiyun 	 */
460*4882a593Smuzhiyun 	kref_get(&mr->r_kref);
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	/* If it is going to be freed, remove it from the tree now so
463*4882a593Smuzhiyun 	 * that no other thread can find it and free it.
464*4882a593Smuzhiyun 	 */
465*4882a593Smuzhiyun 	if (mr->r_use_once || force) {
466*4882a593Smuzhiyun 		rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
467*4882a593Smuzhiyun 		RB_CLEAR_NODE(&mr->r_rb_node);
468*4882a593Smuzhiyun 		zot_me = 1;
469*4882a593Smuzhiyun 	}
470*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	/* May have to issue a dma_sync on this memory region.
473*4882a593Smuzhiyun 	 * Note we could avoid this if the operation was a RDMA READ,
474*4882a593Smuzhiyun 	 * but at this point we can't tell. */
475*4882a593Smuzhiyun 	if (mr->r_trans->sync_mr)
476*4882a593Smuzhiyun 		mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	/* Release the reference held above. */
479*4882a593Smuzhiyun 	kref_put(&mr->r_kref, __rds_put_mr_final);
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	/* If the MR was marked as invalidate, this will
482*4882a593Smuzhiyun 	 * trigger an async flush. */
483*4882a593Smuzhiyun 	if (zot_me)
484*4882a593Smuzhiyun 		kref_put(&mr->r_kref, __rds_put_mr_final);
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun 
rds_rdma_free_op(struct rm_rdma_op * ro)487*4882a593Smuzhiyun void rds_rdma_free_op(struct rm_rdma_op *ro)
488*4882a593Smuzhiyun {
489*4882a593Smuzhiyun 	unsigned int i;
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	if (ro->op_odp_mr) {
492*4882a593Smuzhiyun 		kref_put(&ro->op_odp_mr->r_kref, __rds_put_mr_final);
493*4882a593Smuzhiyun 	} else {
494*4882a593Smuzhiyun 		for (i = 0; i < ro->op_nents; i++) {
495*4882a593Smuzhiyun 			struct page *page = sg_page(&ro->op_sg[i]);
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 			/* Mark page dirty if it was possibly modified, which
498*4882a593Smuzhiyun 			 * is the case for a RDMA_READ which copies from remote
499*4882a593Smuzhiyun 			 * to local memory
500*4882a593Smuzhiyun 			 */
501*4882a593Smuzhiyun 			unpin_user_pages_dirty_lock(&page, 1, !ro->op_write);
502*4882a593Smuzhiyun 		}
503*4882a593Smuzhiyun 	}
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	kfree(ro->op_notifier);
506*4882a593Smuzhiyun 	ro->op_notifier = NULL;
507*4882a593Smuzhiyun 	ro->op_active = 0;
508*4882a593Smuzhiyun 	ro->op_odp_mr = NULL;
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun 
rds_atomic_free_op(struct rm_atomic_op * ao)511*4882a593Smuzhiyun void rds_atomic_free_op(struct rm_atomic_op *ao)
512*4882a593Smuzhiyun {
513*4882a593Smuzhiyun 	struct page *page = sg_page(ao->op_sg);
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	/* Mark page dirty if it was possibly modified, which
516*4882a593Smuzhiyun 	 * is the case for a RDMA_READ which copies from remote
517*4882a593Smuzhiyun 	 * to local memory */
518*4882a593Smuzhiyun 	unpin_user_pages_dirty_lock(&page, 1, true);
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	kfree(ao->op_notifier);
521*4882a593Smuzhiyun 	ao->op_notifier = NULL;
522*4882a593Smuzhiyun 	ao->op_active = 0;
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun /*
527*4882a593Smuzhiyun  * Count the number of pages needed to describe an incoming iovec array.
528*4882a593Smuzhiyun  */
rds_rdma_pages(struct rds_iovec iov[],int nr_iovecs)529*4882a593Smuzhiyun static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs)
530*4882a593Smuzhiyun {
531*4882a593Smuzhiyun 	int tot_pages = 0;
532*4882a593Smuzhiyun 	unsigned int nr_pages;
533*4882a593Smuzhiyun 	unsigned int i;
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	/* figure out the number of pages in the vector */
536*4882a593Smuzhiyun 	for (i = 0; i < nr_iovecs; i++) {
537*4882a593Smuzhiyun 		nr_pages = rds_pages_in_vec(&iov[i]);
538*4882a593Smuzhiyun 		if (nr_pages == 0)
539*4882a593Smuzhiyun 			return -EINVAL;
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 		tot_pages += nr_pages;
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 		/*
544*4882a593Smuzhiyun 		 * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
545*4882a593Smuzhiyun 		 * so tot_pages cannot overflow without first going negative.
546*4882a593Smuzhiyun 		 */
547*4882a593Smuzhiyun 		if (tot_pages < 0)
548*4882a593Smuzhiyun 			return -EINVAL;
549*4882a593Smuzhiyun 	}
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	return tot_pages;
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun 
rds_rdma_extra_size(struct rds_rdma_args * args,struct rds_iov_vector * iov)554*4882a593Smuzhiyun int rds_rdma_extra_size(struct rds_rdma_args *args,
555*4882a593Smuzhiyun 			struct rds_iov_vector *iov)
556*4882a593Smuzhiyun {
557*4882a593Smuzhiyun 	struct rds_iovec *vec;
558*4882a593Smuzhiyun 	struct rds_iovec __user *local_vec;
559*4882a593Smuzhiyun 	int tot_pages = 0;
560*4882a593Smuzhiyun 	unsigned int nr_pages;
561*4882a593Smuzhiyun 	unsigned int i;
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 	local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	if (args->nr_local == 0)
566*4882a593Smuzhiyun 		return -EINVAL;
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	if (args->nr_local > UIO_MAXIOV)
569*4882a593Smuzhiyun 		return -EMSGSIZE;
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	iov->iov = kcalloc(args->nr_local,
572*4882a593Smuzhiyun 			   sizeof(struct rds_iovec),
573*4882a593Smuzhiyun 			   GFP_KERNEL);
574*4882a593Smuzhiyun 	if (!iov->iov)
575*4882a593Smuzhiyun 		return -ENOMEM;
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	vec = &iov->iov[0];
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	if (copy_from_user(vec, local_vec, args->nr_local *
580*4882a593Smuzhiyun 			   sizeof(struct rds_iovec)))
581*4882a593Smuzhiyun 		return -EFAULT;
582*4882a593Smuzhiyun 	iov->len = args->nr_local;
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	/* figure out the number of pages in the vector */
585*4882a593Smuzhiyun 	for (i = 0; i < args->nr_local; i++, vec++) {
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 		nr_pages = rds_pages_in_vec(vec);
588*4882a593Smuzhiyun 		if (nr_pages == 0)
589*4882a593Smuzhiyun 			return -EINVAL;
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 		tot_pages += nr_pages;
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 		/*
594*4882a593Smuzhiyun 		 * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
595*4882a593Smuzhiyun 		 * so tot_pages cannot overflow without first going negative.
596*4882a593Smuzhiyun 		 */
597*4882a593Smuzhiyun 		if (tot_pages < 0)
598*4882a593Smuzhiyun 			return -EINVAL;
599*4882a593Smuzhiyun 	}
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 	return tot_pages * sizeof(struct scatterlist);
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun /*
605*4882a593Smuzhiyun  * The application asks for a RDMA transfer.
606*4882a593Smuzhiyun  * Extract all arguments and set up the rdma_op
607*4882a593Smuzhiyun  */
rds_cmsg_rdma_args(struct rds_sock * rs,struct rds_message * rm,struct cmsghdr * cmsg,struct rds_iov_vector * vec)608*4882a593Smuzhiyun int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
609*4882a593Smuzhiyun 		       struct cmsghdr *cmsg,
610*4882a593Smuzhiyun 		       struct rds_iov_vector *vec)
611*4882a593Smuzhiyun {
612*4882a593Smuzhiyun 	struct rds_rdma_args *args;
613*4882a593Smuzhiyun 	struct rm_rdma_op *op = &rm->rdma;
614*4882a593Smuzhiyun 	int nr_pages;
615*4882a593Smuzhiyun 	unsigned int nr_bytes;
616*4882a593Smuzhiyun 	struct page **pages = NULL;
617*4882a593Smuzhiyun 	struct rds_iovec *iovs;
618*4882a593Smuzhiyun 	unsigned int i, j;
619*4882a593Smuzhiyun 	int ret = 0;
620*4882a593Smuzhiyun 	bool odp_supported = true;
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 	if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args))
623*4882a593Smuzhiyun 	    || rm->rdma.op_active)
624*4882a593Smuzhiyun 		return -EINVAL;
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 	args = CMSG_DATA(cmsg);
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 	if (ipv6_addr_any(&rs->rs_bound_addr)) {
629*4882a593Smuzhiyun 		ret = -ENOTCONN; /* XXX not a great errno */
630*4882a593Smuzhiyun 		goto out_ret;
631*4882a593Smuzhiyun 	}
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	if (args->nr_local > UIO_MAXIOV) {
634*4882a593Smuzhiyun 		ret = -EMSGSIZE;
635*4882a593Smuzhiyun 		goto out_ret;
636*4882a593Smuzhiyun 	}
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 	if (vec->len != args->nr_local) {
639*4882a593Smuzhiyun 		ret = -EINVAL;
640*4882a593Smuzhiyun 		goto out_ret;
641*4882a593Smuzhiyun 	}
642*4882a593Smuzhiyun 	/* odp-mr is not supported for multiple requests within one message */
643*4882a593Smuzhiyun 	if (args->nr_local != 1)
644*4882a593Smuzhiyun 		odp_supported = false;
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun 	iovs = vec->iov;
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun 	nr_pages = rds_rdma_pages(iovs, args->nr_local);
649*4882a593Smuzhiyun 	if (nr_pages < 0) {
650*4882a593Smuzhiyun 		ret = -EINVAL;
651*4882a593Smuzhiyun 		goto out_ret;
652*4882a593Smuzhiyun 	}
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
655*4882a593Smuzhiyun 	if (!pages) {
656*4882a593Smuzhiyun 		ret = -ENOMEM;
657*4882a593Smuzhiyun 		goto out_ret;
658*4882a593Smuzhiyun 	}
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	op->op_write = !!(args->flags & RDS_RDMA_READWRITE);
661*4882a593Smuzhiyun 	op->op_fence = !!(args->flags & RDS_RDMA_FENCE);
662*4882a593Smuzhiyun 	op->op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
663*4882a593Smuzhiyun 	op->op_silent = !!(args->flags & RDS_RDMA_SILENT);
664*4882a593Smuzhiyun 	op->op_active = 1;
665*4882a593Smuzhiyun 	op->op_recverr = rs->rs_recverr;
666*4882a593Smuzhiyun 	op->op_odp_mr = NULL;
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 	WARN_ON(!nr_pages);
669*4882a593Smuzhiyun 	op->op_sg = rds_message_alloc_sgs(rm, nr_pages);
670*4882a593Smuzhiyun 	if (IS_ERR(op->op_sg)) {
671*4882a593Smuzhiyun 		ret = PTR_ERR(op->op_sg);
672*4882a593Smuzhiyun 		goto out_pages;
673*4882a593Smuzhiyun 	}
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	if (op->op_notify || op->op_recverr) {
676*4882a593Smuzhiyun 		/* We allocate an uninitialized notifier here, because
677*4882a593Smuzhiyun 		 * we don't want to do that in the completion handler. We
678*4882a593Smuzhiyun 		 * would have to use GFP_ATOMIC there, and don't want to deal
679*4882a593Smuzhiyun 		 * with failed allocations.
680*4882a593Smuzhiyun 		 */
681*4882a593Smuzhiyun 		op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
682*4882a593Smuzhiyun 		if (!op->op_notifier) {
683*4882a593Smuzhiyun 			ret = -ENOMEM;
684*4882a593Smuzhiyun 			goto out_pages;
685*4882a593Smuzhiyun 		}
686*4882a593Smuzhiyun 		op->op_notifier->n_user_token = args->user_token;
687*4882a593Smuzhiyun 		op->op_notifier->n_status = RDS_RDMA_SUCCESS;
688*4882a593Smuzhiyun 	}
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	/* The cookie contains the R_Key of the remote memory region, and
691*4882a593Smuzhiyun 	 * optionally an offset into it. This is how we implement RDMA into
692*4882a593Smuzhiyun 	 * unaligned memory.
693*4882a593Smuzhiyun 	 * When setting up the RDMA, we need to add that offset to the
694*4882a593Smuzhiyun 	 * destination address (which is really an offset into the MR)
695*4882a593Smuzhiyun 	 * FIXME: We may want to move this into ib_rdma.c
696*4882a593Smuzhiyun 	 */
697*4882a593Smuzhiyun 	op->op_rkey = rds_rdma_cookie_key(args->cookie);
698*4882a593Smuzhiyun 	op->op_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie);
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	nr_bytes = 0;
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun 	rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n",
703*4882a593Smuzhiyun 	       (unsigned long long)args->nr_local,
704*4882a593Smuzhiyun 	       (unsigned long long)args->remote_vec.addr,
705*4882a593Smuzhiyun 	       op->op_rkey);
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 	for (i = 0; i < args->nr_local; i++) {
708*4882a593Smuzhiyun 		struct rds_iovec *iov = &iovs[i];
709*4882a593Smuzhiyun 		/* don't need to check, rds_rdma_pages() verified nr will be +nonzero */
710*4882a593Smuzhiyun 		unsigned int nr = rds_pages_in_vec(iov);
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 		rs->rs_user_addr = iov->addr;
713*4882a593Smuzhiyun 		rs->rs_user_bytes = iov->bytes;
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 		/* If it's a WRITE operation, we want to pin the pages for reading.
716*4882a593Smuzhiyun 		 * If it's a READ operation, we need to pin the pages for writing.
717*4882a593Smuzhiyun 		 */
718*4882a593Smuzhiyun 		ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write);
719*4882a593Smuzhiyun 		if ((!odp_supported && ret <= 0) ||
720*4882a593Smuzhiyun 		    (odp_supported && ret <= 0 && ret != -EOPNOTSUPP))
721*4882a593Smuzhiyun 			goto out_pages;
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 		if (ret == -EOPNOTSUPP) {
724*4882a593Smuzhiyun 			struct rds_mr *local_odp_mr;
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun 			if (!rs->rs_transport->get_mr) {
727*4882a593Smuzhiyun 				ret = -EOPNOTSUPP;
728*4882a593Smuzhiyun 				goto out_pages;
729*4882a593Smuzhiyun 			}
730*4882a593Smuzhiyun 			local_odp_mr =
731*4882a593Smuzhiyun 				kzalloc(sizeof(*local_odp_mr), GFP_KERNEL);
732*4882a593Smuzhiyun 			if (!local_odp_mr) {
733*4882a593Smuzhiyun 				ret = -ENOMEM;
734*4882a593Smuzhiyun 				goto out_pages;
735*4882a593Smuzhiyun 			}
736*4882a593Smuzhiyun 			RB_CLEAR_NODE(&local_odp_mr->r_rb_node);
737*4882a593Smuzhiyun 			kref_init(&local_odp_mr->r_kref);
738*4882a593Smuzhiyun 			local_odp_mr->r_trans = rs->rs_transport;
739*4882a593Smuzhiyun 			local_odp_mr->r_sock = rs;
740*4882a593Smuzhiyun 			local_odp_mr->r_trans_private =
741*4882a593Smuzhiyun 				rs->rs_transport->get_mr(
742*4882a593Smuzhiyun 					NULL, 0, rs, &local_odp_mr->r_key, NULL,
743*4882a593Smuzhiyun 					iov->addr, iov->bytes, ODP_VIRTUAL);
744*4882a593Smuzhiyun 			if (IS_ERR(local_odp_mr->r_trans_private)) {
745*4882a593Smuzhiyun 				ret = IS_ERR(local_odp_mr->r_trans_private);
746*4882a593Smuzhiyun 				rdsdebug("get_mr ret %d %p\"", ret,
747*4882a593Smuzhiyun 					 local_odp_mr->r_trans_private);
748*4882a593Smuzhiyun 				kfree(local_odp_mr);
749*4882a593Smuzhiyun 				ret = -EOPNOTSUPP;
750*4882a593Smuzhiyun 				goto out_pages;
751*4882a593Smuzhiyun 			}
752*4882a593Smuzhiyun 			rdsdebug("Need odp; local_odp_mr %p trans_private %p\n",
753*4882a593Smuzhiyun 				 local_odp_mr, local_odp_mr->r_trans_private);
754*4882a593Smuzhiyun 			op->op_odp_mr = local_odp_mr;
755*4882a593Smuzhiyun 			op->op_odp_addr = iov->addr;
756*4882a593Smuzhiyun 		}
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 		rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n",
759*4882a593Smuzhiyun 			 nr_bytes, nr, iov->bytes, iov->addr);
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 		nr_bytes += iov->bytes;
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun 		for (j = 0; j < nr; j++) {
764*4882a593Smuzhiyun 			unsigned int offset = iov->addr & ~PAGE_MASK;
765*4882a593Smuzhiyun 			struct scatterlist *sg;
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 			sg = &op->op_sg[op->op_nents + j];
768*4882a593Smuzhiyun 			sg_set_page(sg, pages[j],
769*4882a593Smuzhiyun 					min_t(unsigned int, iov->bytes, PAGE_SIZE - offset),
770*4882a593Smuzhiyun 					offset);
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 			sg_dma_len(sg) = sg->length;
773*4882a593Smuzhiyun 			rdsdebug("RDS: sg->offset %x sg->len %x iov->addr %llx iov->bytes %llu\n",
774*4882a593Smuzhiyun 			       sg->offset, sg->length, iov->addr, iov->bytes);
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 			iov->addr += sg->length;
777*4882a593Smuzhiyun 			iov->bytes -= sg->length;
778*4882a593Smuzhiyun 		}
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 		op->op_nents += nr;
781*4882a593Smuzhiyun 	}
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	if (nr_bytes > args->remote_vec.bytes) {
784*4882a593Smuzhiyun 		rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n",
785*4882a593Smuzhiyun 				nr_bytes,
786*4882a593Smuzhiyun 				(unsigned int) args->remote_vec.bytes);
787*4882a593Smuzhiyun 		ret = -EINVAL;
788*4882a593Smuzhiyun 		goto out_pages;
789*4882a593Smuzhiyun 	}
790*4882a593Smuzhiyun 	op->op_bytes = nr_bytes;
791*4882a593Smuzhiyun 	ret = 0;
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun out_pages:
794*4882a593Smuzhiyun 	kfree(pages);
795*4882a593Smuzhiyun out_ret:
796*4882a593Smuzhiyun 	if (ret)
797*4882a593Smuzhiyun 		rds_rdma_free_op(op);
798*4882a593Smuzhiyun 	else
799*4882a593Smuzhiyun 		rds_stats_inc(s_send_rdma);
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 	return ret;
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun /*
805*4882a593Smuzhiyun  * The application wants us to pass an RDMA destination (aka MR)
806*4882a593Smuzhiyun  * to the remote
807*4882a593Smuzhiyun  */
rds_cmsg_rdma_dest(struct rds_sock * rs,struct rds_message * rm,struct cmsghdr * cmsg)808*4882a593Smuzhiyun int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
809*4882a593Smuzhiyun 			  struct cmsghdr *cmsg)
810*4882a593Smuzhiyun {
811*4882a593Smuzhiyun 	unsigned long flags;
812*4882a593Smuzhiyun 	struct rds_mr *mr;
813*4882a593Smuzhiyun 	u32 r_key;
814*4882a593Smuzhiyun 	int err = 0;
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 	if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) ||
817*4882a593Smuzhiyun 	    rm->m_rdma_cookie != 0)
818*4882a593Smuzhiyun 		return -EINVAL;
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun 	memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie));
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 	/* We are reusing a previously mapped MR here. Most likely, the
823*4882a593Smuzhiyun 	 * application has written to the buffer, so we need to explicitly
824*4882a593Smuzhiyun 	 * flush those writes to RAM. Otherwise the HCA may not see them
825*4882a593Smuzhiyun 	 * when doing a DMA from that buffer.
826*4882a593Smuzhiyun 	 */
827*4882a593Smuzhiyun 	r_key = rds_rdma_cookie_key(rm->m_rdma_cookie);
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 	spin_lock_irqsave(&rs->rs_rdma_lock, flags);
830*4882a593Smuzhiyun 	mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
831*4882a593Smuzhiyun 	if (!mr)
832*4882a593Smuzhiyun 		err = -EINVAL;	/* invalid r_key */
833*4882a593Smuzhiyun 	else
834*4882a593Smuzhiyun 		kref_get(&mr->r_kref);
835*4882a593Smuzhiyun 	spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 	if (mr) {
838*4882a593Smuzhiyun 		mr->r_trans->sync_mr(mr->r_trans_private,
839*4882a593Smuzhiyun 				     DMA_TO_DEVICE);
840*4882a593Smuzhiyun 		rm->rdma.op_rdma_mr = mr;
841*4882a593Smuzhiyun 	}
842*4882a593Smuzhiyun 	return err;
843*4882a593Smuzhiyun }
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun /*
846*4882a593Smuzhiyun  * The application passes us an address range it wants to enable RDMA
847*4882a593Smuzhiyun  * to/from. We map the area, and save the <R_Key,offset> pair
848*4882a593Smuzhiyun  * in rm->m_rdma_cookie. This causes it to be sent along to the peer
849*4882a593Smuzhiyun  * in an extension header.
850*4882a593Smuzhiyun  */
rds_cmsg_rdma_map(struct rds_sock * rs,struct rds_message * rm,struct cmsghdr * cmsg)851*4882a593Smuzhiyun int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
852*4882a593Smuzhiyun 			  struct cmsghdr *cmsg)
853*4882a593Smuzhiyun {
854*4882a593Smuzhiyun 	if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) ||
855*4882a593Smuzhiyun 	    rm->m_rdma_cookie != 0)
856*4882a593Smuzhiyun 		return -EINVAL;
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun 	return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie,
859*4882a593Smuzhiyun 			      &rm->rdma.op_rdma_mr, rm->m_conn_path);
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun /*
863*4882a593Smuzhiyun  * Fill in rds_message for an atomic request.
864*4882a593Smuzhiyun  */
rds_cmsg_atomic(struct rds_sock * rs,struct rds_message * rm,struct cmsghdr * cmsg)865*4882a593Smuzhiyun int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
866*4882a593Smuzhiyun 		    struct cmsghdr *cmsg)
867*4882a593Smuzhiyun {
868*4882a593Smuzhiyun 	struct page *page = NULL;
869*4882a593Smuzhiyun 	struct rds_atomic_args *args;
870*4882a593Smuzhiyun 	int ret = 0;
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 	if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_atomic_args))
873*4882a593Smuzhiyun 	 || rm->atomic.op_active)
874*4882a593Smuzhiyun 		return -EINVAL;
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun 	args = CMSG_DATA(cmsg);
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun 	/* Nonmasked & masked cmsg ops converted to masked hw ops */
879*4882a593Smuzhiyun 	switch (cmsg->cmsg_type) {
880*4882a593Smuzhiyun 	case RDS_CMSG_ATOMIC_FADD:
881*4882a593Smuzhiyun 		rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
882*4882a593Smuzhiyun 		rm->atomic.op_m_fadd.add = args->fadd.add;
883*4882a593Smuzhiyun 		rm->atomic.op_m_fadd.nocarry_mask = 0;
884*4882a593Smuzhiyun 		break;
885*4882a593Smuzhiyun 	case RDS_CMSG_MASKED_ATOMIC_FADD:
886*4882a593Smuzhiyun 		rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
887*4882a593Smuzhiyun 		rm->atomic.op_m_fadd.add = args->m_fadd.add;
888*4882a593Smuzhiyun 		rm->atomic.op_m_fadd.nocarry_mask = args->m_fadd.nocarry_mask;
889*4882a593Smuzhiyun 		break;
890*4882a593Smuzhiyun 	case RDS_CMSG_ATOMIC_CSWP:
891*4882a593Smuzhiyun 		rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
892*4882a593Smuzhiyun 		rm->atomic.op_m_cswp.compare = args->cswp.compare;
893*4882a593Smuzhiyun 		rm->atomic.op_m_cswp.swap = args->cswp.swap;
894*4882a593Smuzhiyun 		rm->atomic.op_m_cswp.compare_mask = ~0;
895*4882a593Smuzhiyun 		rm->atomic.op_m_cswp.swap_mask = ~0;
896*4882a593Smuzhiyun 		break;
897*4882a593Smuzhiyun 	case RDS_CMSG_MASKED_ATOMIC_CSWP:
898*4882a593Smuzhiyun 		rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
899*4882a593Smuzhiyun 		rm->atomic.op_m_cswp.compare = args->m_cswp.compare;
900*4882a593Smuzhiyun 		rm->atomic.op_m_cswp.swap = args->m_cswp.swap;
901*4882a593Smuzhiyun 		rm->atomic.op_m_cswp.compare_mask = args->m_cswp.compare_mask;
902*4882a593Smuzhiyun 		rm->atomic.op_m_cswp.swap_mask = args->m_cswp.swap_mask;
903*4882a593Smuzhiyun 		break;
904*4882a593Smuzhiyun 	default:
905*4882a593Smuzhiyun 		BUG(); /* should never happen */
906*4882a593Smuzhiyun 	}
907*4882a593Smuzhiyun 
908*4882a593Smuzhiyun 	rm->atomic.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
909*4882a593Smuzhiyun 	rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT);
910*4882a593Smuzhiyun 	rm->atomic.op_active = 1;
911*4882a593Smuzhiyun 	rm->atomic.op_recverr = rs->rs_recverr;
912*4882a593Smuzhiyun 	rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1);
913*4882a593Smuzhiyun 	if (IS_ERR(rm->atomic.op_sg)) {
914*4882a593Smuzhiyun 		ret = PTR_ERR(rm->atomic.op_sg);
915*4882a593Smuzhiyun 		goto err;
916*4882a593Smuzhiyun 	}
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun 	/* verify 8 byte-aligned */
919*4882a593Smuzhiyun 	if (args->local_addr & 0x7) {
920*4882a593Smuzhiyun 		ret = -EFAULT;
921*4882a593Smuzhiyun 		goto err;
922*4882a593Smuzhiyun 	}
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 	ret = rds_pin_pages(args->local_addr, 1, &page, 1);
925*4882a593Smuzhiyun 	if (ret != 1)
926*4882a593Smuzhiyun 		goto err;
927*4882a593Smuzhiyun 	ret = 0;
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 	sg_set_page(rm->atomic.op_sg, page, 8, offset_in_page(args->local_addr));
930*4882a593Smuzhiyun 
931*4882a593Smuzhiyun 	if (rm->atomic.op_notify || rm->atomic.op_recverr) {
932*4882a593Smuzhiyun 		/* We allocate an uninitialized notifier here, because
933*4882a593Smuzhiyun 		 * we don't want to do that in the completion handler. We
934*4882a593Smuzhiyun 		 * would have to use GFP_ATOMIC there, and don't want to deal
935*4882a593Smuzhiyun 		 * with failed allocations.
936*4882a593Smuzhiyun 		 */
937*4882a593Smuzhiyun 		rm->atomic.op_notifier = kmalloc(sizeof(*rm->atomic.op_notifier), GFP_KERNEL);
938*4882a593Smuzhiyun 		if (!rm->atomic.op_notifier) {
939*4882a593Smuzhiyun 			ret = -ENOMEM;
940*4882a593Smuzhiyun 			goto err;
941*4882a593Smuzhiyun 		}
942*4882a593Smuzhiyun 
943*4882a593Smuzhiyun 		rm->atomic.op_notifier->n_user_token = args->user_token;
944*4882a593Smuzhiyun 		rm->atomic.op_notifier->n_status = RDS_RDMA_SUCCESS;
945*4882a593Smuzhiyun 	}
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 	rm->atomic.op_rkey = rds_rdma_cookie_key(args->cookie);
948*4882a593Smuzhiyun 	rm->atomic.op_remote_addr = args->remote_addr + rds_rdma_cookie_offset(args->cookie);
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun 	return ret;
951*4882a593Smuzhiyun err:
952*4882a593Smuzhiyun 	if (page)
953*4882a593Smuzhiyun 		unpin_user_page(page);
954*4882a593Smuzhiyun 	rm->atomic.op_active = 0;
955*4882a593Smuzhiyun 	kfree(rm->atomic.op_notifier);
956*4882a593Smuzhiyun 
957*4882a593Smuzhiyun 	return ret;
958*4882a593Smuzhiyun }
959