xref: /OK3568_Linux_fs/kernel/drivers/infiniband/sw/rxe/rxe_mr.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4*4882a593Smuzhiyun  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include "rxe.h"
8*4882a593Smuzhiyun #include "rxe_loc.h"
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun /*
11*4882a593Smuzhiyun  * lfsr (linear feedback shift register) with period 255
12*4882a593Smuzhiyun  */
rxe_get_key(void)13*4882a593Smuzhiyun static u8 rxe_get_key(void)
14*4882a593Smuzhiyun {
15*4882a593Smuzhiyun 	static u32 key = 1;
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun 	key = key << 1;
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun 	key |= (0 != (key & 0x100)) ^ (0 != (key & 0x10))
20*4882a593Smuzhiyun 		^ (0 != (key & 0x80)) ^ (0 != (key & 0x40));
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun 	key &= 0xff;
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun 	return key;
25*4882a593Smuzhiyun }
26*4882a593Smuzhiyun 
mem_check_range(struct rxe_mem * mem,u64 iova,size_t length)27*4882a593Smuzhiyun int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun 	switch (mem->type) {
30*4882a593Smuzhiyun 	case RXE_MEM_TYPE_DMA:
31*4882a593Smuzhiyun 		return 0;
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun 	case RXE_MEM_TYPE_MR:
34*4882a593Smuzhiyun 	case RXE_MEM_TYPE_FMR:
35*4882a593Smuzhiyun 		if (iova < mem->iova ||
36*4882a593Smuzhiyun 		    length > mem->length ||
37*4882a593Smuzhiyun 		    iova > mem->iova + mem->length - length)
38*4882a593Smuzhiyun 			return -EFAULT;
39*4882a593Smuzhiyun 		return 0;
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	default:
42*4882a593Smuzhiyun 		return -EFAULT;
43*4882a593Smuzhiyun 	}
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun #define IB_ACCESS_REMOTE	(IB_ACCESS_REMOTE_READ		\
47*4882a593Smuzhiyun 				| IB_ACCESS_REMOTE_WRITE	\
48*4882a593Smuzhiyun 				| IB_ACCESS_REMOTE_ATOMIC)
49*4882a593Smuzhiyun 
rxe_mem_init(int access,struct rxe_mem * mem)50*4882a593Smuzhiyun static void rxe_mem_init(int access, struct rxe_mem *mem)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun 	u32 lkey = mem->pelem.index << 8 | rxe_get_key();
53*4882a593Smuzhiyun 	u32 rkey = (access & IB_ACCESS_REMOTE) ? lkey : 0;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	mem->ibmr.lkey		= lkey;
56*4882a593Smuzhiyun 	mem->ibmr.rkey		= rkey;
57*4882a593Smuzhiyun 	mem->state		= RXE_MEM_STATE_INVALID;
58*4882a593Smuzhiyun 	mem->type		= RXE_MEM_TYPE_NONE;
59*4882a593Smuzhiyun 	mem->map_shift		= ilog2(RXE_BUF_PER_MAP);
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun 
rxe_mem_cleanup(struct rxe_pool_entry * arg)62*4882a593Smuzhiyun void rxe_mem_cleanup(struct rxe_pool_entry *arg)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun 	struct rxe_mem *mem = container_of(arg, typeof(*mem), pelem);
65*4882a593Smuzhiyun 	int i;
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	ib_umem_release(mem->umem);
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	if (mem->map) {
70*4882a593Smuzhiyun 		for (i = 0; i < mem->num_map; i++)
71*4882a593Smuzhiyun 			kfree(mem->map[i]);
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 		kfree(mem->map);
74*4882a593Smuzhiyun 	}
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun 
rxe_mem_alloc(struct rxe_mem * mem,int num_buf)77*4882a593Smuzhiyun static int rxe_mem_alloc(struct rxe_mem *mem, int num_buf)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun 	int i;
80*4882a593Smuzhiyun 	int num_map;
81*4882a593Smuzhiyun 	struct rxe_map **map = mem->map;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	mem->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL);
86*4882a593Smuzhiyun 	if (!mem->map)
87*4882a593Smuzhiyun 		goto err1;
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	for (i = 0; i < num_map; i++) {
90*4882a593Smuzhiyun 		mem->map[i] = kmalloc(sizeof(**map), GFP_KERNEL);
91*4882a593Smuzhiyun 		if (!mem->map[i])
92*4882a593Smuzhiyun 			goto err2;
93*4882a593Smuzhiyun 	}
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	BUILD_BUG_ON(!is_power_of_2(RXE_BUF_PER_MAP));
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	mem->map_shift	= ilog2(RXE_BUF_PER_MAP);
98*4882a593Smuzhiyun 	mem->map_mask	= RXE_BUF_PER_MAP - 1;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	mem->num_buf = num_buf;
101*4882a593Smuzhiyun 	mem->num_map = num_map;
102*4882a593Smuzhiyun 	mem->max_buf = num_map * RXE_BUF_PER_MAP;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	return 0;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun err2:
107*4882a593Smuzhiyun 	for (i--; i >= 0; i--)
108*4882a593Smuzhiyun 		kfree(mem->map[i]);
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	kfree(mem->map);
111*4882a593Smuzhiyun err1:
112*4882a593Smuzhiyun 	return -ENOMEM;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
rxe_mem_init_dma(struct rxe_pd * pd,int access,struct rxe_mem * mem)115*4882a593Smuzhiyun void rxe_mem_init_dma(struct rxe_pd *pd,
116*4882a593Smuzhiyun 		      int access, struct rxe_mem *mem)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	rxe_mem_init(access, mem);
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	mem->ibmr.pd		= &pd->ibpd;
121*4882a593Smuzhiyun 	mem->access		= access;
122*4882a593Smuzhiyun 	mem->state		= RXE_MEM_STATE_VALID;
123*4882a593Smuzhiyun 	mem->type		= RXE_MEM_TYPE_DMA;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun 
rxe_mem_init_user(struct rxe_pd * pd,u64 start,u64 length,u64 iova,int access,struct ib_udata * udata,struct rxe_mem * mem)126*4882a593Smuzhiyun int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
127*4882a593Smuzhiyun 		      u64 length, u64 iova, int access, struct ib_udata *udata,
128*4882a593Smuzhiyun 		      struct rxe_mem *mem)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	struct rxe_map		**map;
131*4882a593Smuzhiyun 	struct rxe_phys_buf	*buf = NULL;
132*4882a593Smuzhiyun 	struct ib_umem		*umem;
133*4882a593Smuzhiyun 	struct sg_page_iter	sg_iter;
134*4882a593Smuzhiyun 	int			num_buf;
135*4882a593Smuzhiyun 	void			*vaddr;
136*4882a593Smuzhiyun 	int err;
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	umem = ib_umem_get(pd->ibpd.device, start, length, access);
139*4882a593Smuzhiyun 	if (IS_ERR(umem)) {
140*4882a593Smuzhiyun 		pr_warn("err %d from rxe_umem_get\n",
141*4882a593Smuzhiyun 			(int)PTR_ERR(umem));
142*4882a593Smuzhiyun 		err = PTR_ERR(umem);
143*4882a593Smuzhiyun 		goto err1;
144*4882a593Smuzhiyun 	}
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	mem->umem = umem;
147*4882a593Smuzhiyun 	num_buf = ib_umem_num_pages(umem);
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	rxe_mem_init(access, mem);
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	err = rxe_mem_alloc(mem, num_buf);
152*4882a593Smuzhiyun 	if (err) {
153*4882a593Smuzhiyun 		pr_warn("err %d from rxe_mem_alloc\n", err);
154*4882a593Smuzhiyun 		ib_umem_release(umem);
155*4882a593Smuzhiyun 		goto err1;
156*4882a593Smuzhiyun 	}
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	mem->page_shift		= PAGE_SHIFT;
159*4882a593Smuzhiyun 	mem->page_mask = PAGE_SIZE - 1;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	num_buf			= 0;
162*4882a593Smuzhiyun 	map			= mem->map;
163*4882a593Smuzhiyun 	if (length > 0) {
164*4882a593Smuzhiyun 		buf = map[0]->buf;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 		for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
167*4882a593Smuzhiyun 			if (num_buf >= RXE_BUF_PER_MAP) {
168*4882a593Smuzhiyun 				map++;
169*4882a593Smuzhiyun 				buf = map[0]->buf;
170*4882a593Smuzhiyun 				num_buf = 0;
171*4882a593Smuzhiyun 			}
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 			vaddr = page_address(sg_page_iter_page(&sg_iter));
174*4882a593Smuzhiyun 			if (!vaddr) {
175*4882a593Smuzhiyun 				pr_warn("null vaddr\n");
176*4882a593Smuzhiyun 				ib_umem_release(umem);
177*4882a593Smuzhiyun 				err = -ENOMEM;
178*4882a593Smuzhiyun 				goto err1;
179*4882a593Smuzhiyun 			}
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 			buf->addr = (uintptr_t)vaddr;
182*4882a593Smuzhiyun 			buf->size = PAGE_SIZE;
183*4882a593Smuzhiyun 			num_buf++;
184*4882a593Smuzhiyun 			buf++;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 		}
187*4882a593Smuzhiyun 	}
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	mem->ibmr.pd		= &pd->ibpd;
190*4882a593Smuzhiyun 	mem->umem		= umem;
191*4882a593Smuzhiyun 	mem->access		= access;
192*4882a593Smuzhiyun 	mem->length		= length;
193*4882a593Smuzhiyun 	mem->iova		= iova;
194*4882a593Smuzhiyun 	mem->va			= start;
195*4882a593Smuzhiyun 	mem->offset		= ib_umem_offset(umem);
196*4882a593Smuzhiyun 	mem->state		= RXE_MEM_STATE_VALID;
197*4882a593Smuzhiyun 	mem->type		= RXE_MEM_TYPE_MR;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	return 0;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun err1:
202*4882a593Smuzhiyun 	return err;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun 
rxe_mem_init_fast(struct rxe_pd * pd,int max_pages,struct rxe_mem * mem)205*4882a593Smuzhiyun int rxe_mem_init_fast(struct rxe_pd *pd,
206*4882a593Smuzhiyun 		      int max_pages, struct rxe_mem *mem)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun 	int err;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	rxe_mem_init(0, mem);
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	/* In fastreg, we also set the rkey */
213*4882a593Smuzhiyun 	mem->ibmr.rkey = mem->ibmr.lkey;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	err = rxe_mem_alloc(mem, max_pages);
216*4882a593Smuzhiyun 	if (err)
217*4882a593Smuzhiyun 		goto err1;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	mem->ibmr.pd		= &pd->ibpd;
220*4882a593Smuzhiyun 	mem->max_buf		= max_pages;
221*4882a593Smuzhiyun 	mem->state		= RXE_MEM_STATE_FREE;
222*4882a593Smuzhiyun 	mem->type		= RXE_MEM_TYPE_MR;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	return 0;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun err1:
227*4882a593Smuzhiyun 	return err;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun 
lookup_iova(struct rxe_mem * mem,u64 iova,int * m_out,int * n_out,size_t * offset_out)230*4882a593Smuzhiyun static void lookup_iova(
231*4882a593Smuzhiyun 	struct rxe_mem	*mem,
232*4882a593Smuzhiyun 	u64			iova,
233*4882a593Smuzhiyun 	int			*m_out,
234*4882a593Smuzhiyun 	int			*n_out,
235*4882a593Smuzhiyun 	size_t			*offset_out)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun 	size_t			offset = iova - mem->iova + mem->offset;
238*4882a593Smuzhiyun 	int			map_index;
239*4882a593Smuzhiyun 	int			buf_index;
240*4882a593Smuzhiyun 	u64			length;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	if (likely(mem->page_shift)) {
243*4882a593Smuzhiyun 		*offset_out = offset & mem->page_mask;
244*4882a593Smuzhiyun 		offset >>= mem->page_shift;
245*4882a593Smuzhiyun 		*n_out = offset & mem->map_mask;
246*4882a593Smuzhiyun 		*m_out = offset >> mem->map_shift;
247*4882a593Smuzhiyun 	} else {
248*4882a593Smuzhiyun 		map_index = 0;
249*4882a593Smuzhiyun 		buf_index = 0;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 		length = mem->map[map_index]->buf[buf_index].size;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 		while (offset >= length) {
254*4882a593Smuzhiyun 			offset -= length;
255*4882a593Smuzhiyun 			buf_index++;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 			if (buf_index == RXE_BUF_PER_MAP) {
258*4882a593Smuzhiyun 				map_index++;
259*4882a593Smuzhiyun 				buf_index = 0;
260*4882a593Smuzhiyun 			}
261*4882a593Smuzhiyun 			length = mem->map[map_index]->buf[buf_index].size;
262*4882a593Smuzhiyun 		}
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 		*m_out = map_index;
265*4882a593Smuzhiyun 		*n_out = buf_index;
266*4882a593Smuzhiyun 		*offset_out = offset;
267*4882a593Smuzhiyun 	}
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun 
iova_to_vaddr(struct rxe_mem * mem,u64 iova,int length)270*4882a593Smuzhiyun void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length)
271*4882a593Smuzhiyun {
272*4882a593Smuzhiyun 	size_t offset;
273*4882a593Smuzhiyun 	int m, n;
274*4882a593Smuzhiyun 	void *addr;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	if (mem->state != RXE_MEM_STATE_VALID) {
277*4882a593Smuzhiyun 		pr_warn("mem not in valid state\n");
278*4882a593Smuzhiyun 		addr = NULL;
279*4882a593Smuzhiyun 		goto out;
280*4882a593Smuzhiyun 	}
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	if (!mem->map) {
283*4882a593Smuzhiyun 		addr = (void *)(uintptr_t)iova;
284*4882a593Smuzhiyun 		goto out;
285*4882a593Smuzhiyun 	}
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	if (mem_check_range(mem, iova, length)) {
288*4882a593Smuzhiyun 		pr_warn("range violation\n");
289*4882a593Smuzhiyun 		addr = NULL;
290*4882a593Smuzhiyun 		goto out;
291*4882a593Smuzhiyun 	}
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	lookup_iova(mem, iova, &m, &n, &offset);
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	if (offset + length > mem->map[m]->buf[n].size) {
296*4882a593Smuzhiyun 		pr_warn("crosses page boundary\n");
297*4882a593Smuzhiyun 		addr = NULL;
298*4882a593Smuzhiyun 		goto out;
299*4882a593Smuzhiyun 	}
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	addr = (void *)(uintptr_t)mem->map[m]->buf[n].addr + offset;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun out:
304*4882a593Smuzhiyun 	return addr;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun /* copy data from a range (vaddr, vaddr+length-1) to or from
308*4882a593Smuzhiyun  * a mem object starting at iova. Compute incremental value of
309*4882a593Smuzhiyun  * crc32 if crcp is not zero. caller must hold a reference to mem
310*4882a593Smuzhiyun  */
rxe_mem_copy(struct rxe_mem * mem,u64 iova,void * addr,int length,enum copy_direction dir,u32 * crcp)311*4882a593Smuzhiyun int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
312*4882a593Smuzhiyun 		 enum copy_direction dir, u32 *crcp)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun 	int			err;
315*4882a593Smuzhiyun 	int			bytes;
316*4882a593Smuzhiyun 	u8			*va;
317*4882a593Smuzhiyun 	struct rxe_map		**map;
318*4882a593Smuzhiyun 	struct rxe_phys_buf	*buf;
319*4882a593Smuzhiyun 	int			m;
320*4882a593Smuzhiyun 	int			i;
321*4882a593Smuzhiyun 	size_t			offset;
322*4882a593Smuzhiyun 	u32			crc = crcp ? (*crcp) : 0;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	if (length == 0)
325*4882a593Smuzhiyun 		return 0;
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	if (mem->type == RXE_MEM_TYPE_DMA) {
328*4882a593Smuzhiyun 		u8 *src, *dest;
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 		src  = (dir == to_mem_obj) ?
331*4882a593Smuzhiyun 			addr : ((void *)(uintptr_t)iova);
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 		dest = (dir == to_mem_obj) ?
334*4882a593Smuzhiyun 			((void *)(uintptr_t)iova) : addr;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 		memcpy(dest, src, length);
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 		if (crcp)
339*4882a593Smuzhiyun 			*crcp = rxe_crc32(to_rdev(mem->ibmr.device),
340*4882a593Smuzhiyun 					*crcp, dest, length);
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 		return 0;
343*4882a593Smuzhiyun 	}
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	WARN_ON_ONCE(!mem->map);
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	err = mem_check_range(mem, iova, length);
348*4882a593Smuzhiyun 	if (err) {
349*4882a593Smuzhiyun 		err = -EFAULT;
350*4882a593Smuzhiyun 		goto err1;
351*4882a593Smuzhiyun 	}
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	lookup_iova(mem, iova, &m, &i, &offset);
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	map	= mem->map + m;
356*4882a593Smuzhiyun 	buf	= map[0]->buf + i;
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	while (length > 0) {
359*4882a593Smuzhiyun 		u8 *src, *dest;
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 		va	= (u8 *)(uintptr_t)buf->addr + offset;
362*4882a593Smuzhiyun 		src  = (dir == to_mem_obj) ? addr : va;
363*4882a593Smuzhiyun 		dest = (dir == to_mem_obj) ? va : addr;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 		bytes	= buf->size - offset;
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 		if (bytes > length)
368*4882a593Smuzhiyun 			bytes = length;
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 		memcpy(dest, src, bytes);
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 		if (crcp)
373*4882a593Smuzhiyun 			crc = rxe_crc32(to_rdev(mem->ibmr.device),
374*4882a593Smuzhiyun 					crc, dest, bytes);
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 		length	-= bytes;
377*4882a593Smuzhiyun 		addr	+= bytes;
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 		offset	= 0;
380*4882a593Smuzhiyun 		buf++;
381*4882a593Smuzhiyun 		i++;
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 		if (i == RXE_BUF_PER_MAP) {
384*4882a593Smuzhiyun 			i = 0;
385*4882a593Smuzhiyun 			map++;
386*4882a593Smuzhiyun 			buf = map[0]->buf;
387*4882a593Smuzhiyun 		}
388*4882a593Smuzhiyun 	}
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	if (crcp)
391*4882a593Smuzhiyun 		*crcp = crc;
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	return 0;
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun err1:
396*4882a593Smuzhiyun 	return err;
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun /* copy data in or out of a wqe, i.e. sg list
400*4882a593Smuzhiyun  * under the control of a dma descriptor
401*4882a593Smuzhiyun  */
copy_data(struct rxe_pd * pd,int access,struct rxe_dma_info * dma,void * addr,int length,enum copy_direction dir,u32 * crcp)402*4882a593Smuzhiyun int copy_data(
403*4882a593Smuzhiyun 	struct rxe_pd		*pd,
404*4882a593Smuzhiyun 	int			access,
405*4882a593Smuzhiyun 	struct rxe_dma_info	*dma,
406*4882a593Smuzhiyun 	void			*addr,
407*4882a593Smuzhiyun 	int			length,
408*4882a593Smuzhiyun 	enum copy_direction	dir,
409*4882a593Smuzhiyun 	u32			*crcp)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun 	int			bytes;
412*4882a593Smuzhiyun 	struct rxe_sge		*sge	= &dma->sge[dma->cur_sge];
413*4882a593Smuzhiyun 	int			offset	= dma->sge_offset;
414*4882a593Smuzhiyun 	int			resid	= dma->resid;
415*4882a593Smuzhiyun 	struct rxe_mem		*mem	= NULL;
416*4882a593Smuzhiyun 	u64			iova;
417*4882a593Smuzhiyun 	int			err;
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	if (length == 0)
420*4882a593Smuzhiyun 		return 0;
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	if (length > resid) {
423*4882a593Smuzhiyun 		err = -EINVAL;
424*4882a593Smuzhiyun 		goto err2;
425*4882a593Smuzhiyun 	}
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	if (sge->length && (offset < sge->length)) {
428*4882a593Smuzhiyun 		mem = lookup_mem(pd, access, sge->lkey, lookup_local);
429*4882a593Smuzhiyun 		if (!mem) {
430*4882a593Smuzhiyun 			err = -EINVAL;
431*4882a593Smuzhiyun 			goto err1;
432*4882a593Smuzhiyun 		}
433*4882a593Smuzhiyun 	}
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	while (length > 0) {
436*4882a593Smuzhiyun 		bytes = length;
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 		if (offset >= sge->length) {
439*4882a593Smuzhiyun 			if (mem) {
440*4882a593Smuzhiyun 				rxe_drop_ref(mem);
441*4882a593Smuzhiyun 				mem = NULL;
442*4882a593Smuzhiyun 			}
443*4882a593Smuzhiyun 			sge++;
444*4882a593Smuzhiyun 			dma->cur_sge++;
445*4882a593Smuzhiyun 			offset = 0;
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 			if (dma->cur_sge >= dma->num_sge) {
448*4882a593Smuzhiyun 				err = -ENOSPC;
449*4882a593Smuzhiyun 				goto err2;
450*4882a593Smuzhiyun 			}
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 			if (sge->length) {
453*4882a593Smuzhiyun 				mem = lookup_mem(pd, access, sge->lkey,
454*4882a593Smuzhiyun 						 lookup_local);
455*4882a593Smuzhiyun 				if (!mem) {
456*4882a593Smuzhiyun 					err = -EINVAL;
457*4882a593Smuzhiyun 					goto err1;
458*4882a593Smuzhiyun 				}
459*4882a593Smuzhiyun 			} else {
460*4882a593Smuzhiyun 				continue;
461*4882a593Smuzhiyun 			}
462*4882a593Smuzhiyun 		}
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 		if (bytes > sge->length - offset)
465*4882a593Smuzhiyun 			bytes = sge->length - offset;
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 		if (bytes > 0) {
468*4882a593Smuzhiyun 			iova = sge->addr + offset;
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 			err = rxe_mem_copy(mem, iova, addr, bytes, dir, crcp);
471*4882a593Smuzhiyun 			if (err)
472*4882a593Smuzhiyun 				goto err2;
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 			offset	+= bytes;
475*4882a593Smuzhiyun 			resid	-= bytes;
476*4882a593Smuzhiyun 			length	-= bytes;
477*4882a593Smuzhiyun 			addr	+= bytes;
478*4882a593Smuzhiyun 		}
479*4882a593Smuzhiyun 	}
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	dma->sge_offset = offset;
482*4882a593Smuzhiyun 	dma->resid	= resid;
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	if (mem)
485*4882a593Smuzhiyun 		rxe_drop_ref(mem);
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	return 0;
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun err2:
490*4882a593Smuzhiyun 	if (mem)
491*4882a593Smuzhiyun 		rxe_drop_ref(mem);
492*4882a593Smuzhiyun err1:
493*4882a593Smuzhiyun 	return err;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun 
advance_dma_data(struct rxe_dma_info * dma,unsigned int length)496*4882a593Smuzhiyun int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
497*4882a593Smuzhiyun {
498*4882a593Smuzhiyun 	struct rxe_sge		*sge	= &dma->sge[dma->cur_sge];
499*4882a593Smuzhiyun 	int			offset	= dma->sge_offset;
500*4882a593Smuzhiyun 	int			resid	= dma->resid;
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	while (length) {
503*4882a593Smuzhiyun 		unsigned int bytes;
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 		if (offset >= sge->length) {
506*4882a593Smuzhiyun 			sge++;
507*4882a593Smuzhiyun 			dma->cur_sge++;
508*4882a593Smuzhiyun 			offset = 0;
509*4882a593Smuzhiyun 			if (dma->cur_sge >= dma->num_sge)
510*4882a593Smuzhiyun 				return -ENOSPC;
511*4882a593Smuzhiyun 		}
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 		bytes = length;
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 		if (bytes > sge->length - offset)
516*4882a593Smuzhiyun 			bytes = sge->length - offset;
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 		offset	+= bytes;
519*4882a593Smuzhiyun 		resid	-= bytes;
520*4882a593Smuzhiyun 		length	-= bytes;
521*4882a593Smuzhiyun 	}
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	dma->sge_offset = offset;
524*4882a593Smuzhiyun 	dma->resid	= resid;
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	return 0;
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun /* (1) find the mem (mr or mw) corresponding to lkey/rkey
530*4882a593Smuzhiyun  *     depending on lookup_type
531*4882a593Smuzhiyun  * (2) verify that the (qp) pd matches the mem pd
532*4882a593Smuzhiyun  * (3) verify that the mem can support the requested access
533*4882a593Smuzhiyun  * (4) verify that mem state is valid
534*4882a593Smuzhiyun  */
lookup_mem(struct rxe_pd * pd,int access,u32 key,enum lookup_type type)535*4882a593Smuzhiyun struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
536*4882a593Smuzhiyun 			   enum lookup_type type)
537*4882a593Smuzhiyun {
538*4882a593Smuzhiyun 	struct rxe_mem *mem;
539*4882a593Smuzhiyun 	struct rxe_dev *rxe = to_rdev(pd->ibpd.device);
540*4882a593Smuzhiyun 	int index = key >> 8;
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	mem = rxe_pool_get_index(&rxe->mr_pool, index);
543*4882a593Smuzhiyun 	if (!mem)
544*4882a593Smuzhiyun 		return NULL;
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	if (unlikely((type == lookup_local && mr_lkey(mem) != key) ||
547*4882a593Smuzhiyun 		     (type == lookup_remote && mr_rkey(mem) != key) ||
548*4882a593Smuzhiyun 		     mr_pd(mem) != pd ||
549*4882a593Smuzhiyun 		     (access && !(access & mem->access)) ||
550*4882a593Smuzhiyun 		     mem->state != RXE_MEM_STATE_VALID)) {
551*4882a593Smuzhiyun 		rxe_drop_ref(mem);
552*4882a593Smuzhiyun 		mem = NULL;
553*4882a593Smuzhiyun 	}
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	return mem;
556*4882a593Smuzhiyun }
557