1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (c) 2005 Topspin Communications. All rights reserved.
3*4882a593Smuzhiyun * Copyright (c) 2005 Cisco Systems. All rights reserved.
4*4882a593Smuzhiyun * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * This software is available to you under a choice of one of two
7*4882a593Smuzhiyun * licenses. You may choose to be licensed under the terms of the GNU
8*4882a593Smuzhiyun * General Public License (GPL) Version 2, available from the file
9*4882a593Smuzhiyun * COPYING in the main directory of this source tree, or the
10*4882a593Smuzhiyun * OpenIB.org BSD license below:
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or
13*4882a593Smuzhiyun * without modification, are permitted provided that the following
14*4882a593Smuzhiyun * conditions are met:
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * - Redistributions of source code must retain the above
17*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
18*4882a593Smuzhiyun * disclaimer.
19*4882a593Smuzhiyun *
20*4882a593Smuzhiyun * - Redistributions in binary form must reproduce the above
21*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
22*4882a593Smuzhiyun * disclaimer in the documentation and/or other materials
23*4882a593Smuzhiyun * provided with the distribution.
24*4882a593Smuzhiyun *
25*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26*4882a593Smuzhiyun * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27*4882a593Smuzhiyun * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28*4882a593Smuzhiyun * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29*4882a593Smuzhiyun * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30*4882a593Smuzhiyun * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31*4882a593Smuzhiyun * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32*4882a593Smuzhiyun * SOFTWARE.
33*4882a593Smuzhiyun */
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #include <linux/mm.h>
36*4882a593Smuzhiyun #include <linux/dma-mapping.h>
37*4882a593Smuzhiyun #include <linux/sched/signal.h>
38*4882a593Smuzhiyun #include <linux/sched/mm.h>
39*4882a593Smuzhiyun #include <linux/export.h>
40*4882a593Smuzhiyun #include <linux/slab.h>
41*4882a593Smuzhiyun #include <linux/pagemap.h>
42*4882a593Smuzhiyun #include <linux/count_zeros.h>
43*4882a593Smuzhiyun #include <rdma/ib_umem_odp.h>
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun #include "uverbs.h"
46*4882a593Smuzhiyun
__ib_umem_release(struct ib_device * dev,struct ib_umem * umem,int dirty)47*4882a593Smuzhiyun static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun struct sg_page_iter sg_iter;
50*4882a593Smuzhiyun struct page *page;
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun if (umem->nmap > 0)
53*4882a593Smuzhiyun ib_dma_unmap_sg(dev, umem->sg_head.sgl, umem->sg_nents,
54*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->sg_nents, 0) {
57*4882a593Smuzhiyun page = sg_page_iter_page(&sg_iter);
58*4882a593Smuzhiyun unpin_user_pages_dirty_lock(&page, 1, umem->writable && dirty);
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun sg_free_table(&umem->sg_head);
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /**
65*4882a593Smuzhiyun * ib_umem_find_best_pgsz - Find best HW page size to use for this MR
66*4882a593Smuzhiyun *
67*4882a593Smuzhiyun * @umem: umem struct
68*4882a593Smuzhiyun * @pgsz_bitmap: bitmap of HW supported page sizes
69*4882a593Smuzhiyun * @virt: IOVA
70*4882a593Smuzhiyun *
71*4882a593Smuzhiyun * This helper is intended for HW that support multiple page
72*4882a593Smuzhiyun * sizes but can do only a single page size in an MR.
73*4882a593Smuzhiyun *
74*4882a593Smuzhiyun * Returns 0 if the umem requires page sizes not supported by
75*4882a593Smuzhiyun * the driver to be mapped. Drivers always supporting PAGE_SIZE
76*4882a593Smuzhiyun * or smaller will never see a 0 result.
77*4882a593Smuzhiyun */
ib_umem_find_best_pgsz(struct ib_umem * umem,unsigned long pgsz_bitmap,unsigned long virt)78*4882a593Smuzhiyun unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
79*4882a593Smuzhiyun unsigned long pgsz_bitmap,
80*4882a593Smuzhiyun unsigned long virt)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun struct scatterlist *sg;
83*4882a593Smuzhiyun unsigned long va, pgoff;
84*4882a593Smuzhiyun dma_addr_t mask;
85*4882a593Smuzhiyun int i;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /* rdma_for_each_block() has a bug if the page size is smaller than the
88*4882a593Smuzhiyun * page size used to build the umem. For now prevent smaller page sizes
89*4882a593Smuzhiyun * from being returned.
90*4882a593Smuzhiyun */
91*4882a593Smuzhiyun pgsz_bitmap &= GENMASK(BITS_PER_LONG - 1, PAGE_SHIFT);
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /* At minimum, drivers must support PAGE_SIZE or smaller */
94*4882a593Smuzhiyun if (WARN_ON(!(pgsz_bitmap & GENMASK(PAGE_SHIFT, 0))))
95*4882a593Smuzhiyun return 0;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun umem->iova = va = virt;
98*4882a593Smuzhiyun /* The best result is the smallest page size that results in the minimum
99*4882a593Smuzhiyun * number of required pages. Compute the largest page size that could
100*4882a593Smuzhiyun * work based on VA address bits that don't change.
101*4882a593Smuzhiyun */
102*4882a593Smuzhiyun mask = pgsz_bitmap &
103*4882a593Smuzhiyun GENMASK(BITS_PER_LONG - 1,
104*4882a593Smuzhiyun bits_per((umem->length - 1 + virt) ^ virt));
105*4882a593Smuzhiyun /* offset into first SGL */
106*4882a593Smuzhiyun pgoff = umem->address & ~PAGE_MASK;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
109*4882a593Smuzhiyun /* Walk SGL and reduce max page size if VA/PA bits differ
110*4882a593Smuzhiyun * for any address.
111*4882a593Smuzhiyun */
112*4882a593Smuzhiyun mask |= (sg_dma_address(sg) + pgoff) ^ va;
113*4882a593Smuzhiyun va += sg_dma_len(sg) - pgoff;
114*4882a593Smuzhiyun /* Except for the last entry, the ending iova alignment sets
115*4882a593Smuzhiyun * the maximum possible page size as the low bits of the iova
116*4882a593Smuzhiyun * must be zero when starting the next chunk.
117*4882a593Smuzhiyun */
118*4882a593Smuzhiyun if (i != (umem->nmap - 1))
119*4882a593Smuzhiyun mask |= va;
120*4882a593Smuzhiyun pgoff = 0;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun /* The mask accumulates 1's in each position where the VA and physical
124*4882a593Smuzhiyun * address differ, thus the length of trailing 0 is the largest page
125*4882a593Smuzhiyun * size that can pass the VA through to the physical.
126*4882a593Smuzhiyun */
127*4882a593Smuzhiyun if (mask)
128*4882a593Smuzhiyun pgsz_bitmap &= GENMASK(count_trailing_zeros(mask), 0);
129*4882a593Smuzhiyun return pgsz_bitmap ? rounddown_pow_of_two(pgsz_bitmap) : 0;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun EXPORT_SYMBOL(ib_umem_find_best_pgsz);
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun /**
134*4882a593Smuzhiyun * ib_umem_get - Pin and DMA map userspace memory.
135*4882a593Smuzhiyun *
136*4882a593Smuzhiyun * @device: IB device to connect UMEM
137*4882a593Smuzhiyun * @addr: userspace virtual address to start at
138*4882a593Smuzhiyun * @size: length of region to pin
139*4882a593Smuzhiyun * @access: IB_ACCESS_xxx flags for memory being pinned
140*4882a593Smuzhiyun */
ib_umem_get(struct ib_device * device,unsigned long addr,size_t size,int access)141*4882a593Smuzhiyun struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
142*4882a593Smuzhiyun size_t size, int access)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun struct ib_umem *umem;
145*4882a593Smuzhiyun struct page **page_list;
146*4882a593Smuzhiyun unsigned long lock_limit;
147*4882a593Smuzhiyun unsigned long new_pinned;
148*4882a593Smuzhiyun unsigned long cur_base;
149*4882a593Smuzhiyun unsigned long dma_attr = 0;
150*4882a593Smuzhiyun struct mm_struct *mm;
151*4882a593Smuzhiyun unsigned long npages;
152*4882a593Smuzhiyun int ret;
153*4882a593Smuzhiyun struct scatterlist *sg = NULL;
154*4882a593Smuzhiyun unsigned int gup_flags = FOLL_WRITE;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun /*
157*4882a593Smuzhiyun * If the combination of the addr and size requested for this memory
158*4882a593Smuzhiyun * region causes an integer overflow, return error.
159*4882a593Smuzhiyun */
160*4882a593Smuzhiyun if (((addr + size) < addr) ||
161*4882a593Smuzhiyun PAGE_ALIGN(addr + size) < (addr + size))
162*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun if (!can_do_mlock())
165*4882a593Smuzhiyun return ERR_PTR(-EPERM);
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun if (access & IB_ACCESS_ON_DEMAND)
168*4882a593Smuzhiyun return ERR_PTR(-EOPNOTSUPP);
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun umem = kzalloc(sizeof(*umem), GFP_KERNEL);
171*4882a593Smuzhiyun if (!umem)
172*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
173*4882a593Smuzhiyun umem->ibdev = device;
174*4882a593Smuzhiyun umem->length = size;
175*4882a593Smuzhiyun umem->address = addr;
176*4882a593Smuzhiyun /*
177*4882a593Smuzhiyun * Drivers should call ib_umem_find_best_pgsz() to set the iova
178*4882a593Smuzhiyun * correctly.
179*4882a593Smuzhiyun */
180*4882a593Smuzhiyun umem->iova = addr;
181*4882a593Smuzhiyun umem->writable = ib_access_writable(access);
182*4882a593Smuzhiyun umem->owning_mm = mm = current->mm;
183*4882a593Smuzhiyun mmgrab(mm);
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun page_list = (struct page **) __get_free_page(GFP_KERNEL);
186*4882a593Smuzhiyun if (!page_list) {
187*4882a593Smuzhiyun ret = -ENOMEM;
188*4882a593Smuzhiyun goto umem_kfree;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun npages = ib_umem_num_pages(umem);
192*4882a593Smuzhiyun if (npages == 0 || npages > UINT_MAX) {
193*4882a593Smuzhiyun ret = -EINVAL;
194*4882a593Smuzhiyun goto out;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun new_pinned = atomic64_add_return(npages, &mm->pinned_vm);
200*4882a593Smuzhiyun if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
201*4882a593Smuzhiyun atomic64_sub(npages, &mm->pinned_vm);
202*4882a593Smuzhiyun ret = -ENOMEM;
203*4882a593Smuzhiyun goto out;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun cur_base = addr & PAGE_MASK;
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun if (!umem->writable)
209*4882a593Smuzhiyun gup_flags |= FOLL_FORCE;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun while (npages) {
212*4882a593Smuzhiyun cond_resched();
213*4882a593Smuzhiyun ret = pin_user_pages_fast(cur_base,
214*4882a593Smuzhiyun min_t(unsigned long, npages,
215*4882a593Smuzhiyun PAGE_SIZE /
216*4882a593Smuzhiyun sizeof(struct page *)),
217*4882a593Smuzhiyun gup_flags | FOLL_LONGTERM, page_list);
218*4882a593Smuzhiyun if (ret < 0)
219*4882a593Smuzhiyun goto umem_release;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun cur_base += ret * PAGE_SIZE;
222*4882a593Smuzhiyun npages -= ret;
223*4882a593Smuzhiyun sg = __sg_alloc_table_from_pages(&umem->sg_head, page_list, ret,
224*4882a593Smuzhiyun 0, ret << PAGE_SHIFT,
225*4882a593Smuzhiyun ib_dma_max_seg_size(device), sg, npages,
226*4882a593Smuzhiyun GFP_KERNEL);
227*4882a593Smuzhiyun umem->sg_nents = umem->sg_head.nents;
228*4882a593Smuzhiyun if (IS_ERR(sg)) {
229*4882a593Smuzhiyun unpin_user_pages_dirty_lock(page_list, ret, 0);
230*4882a593Smuzhiyun ret = PTR_ERR(sg);
231*4882a593Smuzhiyun goto umem_release;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun if (access & IB_ACCESS_RELAXED_ORDERING)
236*4882a593Smuzhiyun dma_attr |= DMA_ATTR_WEAK_ORDERING;
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun umem->nmap =
239*4882a593Smuzhiyun ib_dma_map_sg_attrs(device, umem->sg_head.sgl, umem->sg_nents,
240*4882a593Smuzhiyun DMA_BIDIRECTIONAL, dma_attr);
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun if (!umem->nmap) {
243*4882a593Smuzhiyun ret = -ENOMEM;
244*4882a593Smuzhiyun goto umem_release;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun ret = 0;
248*4882a593Smuzhiyun goto out;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun umem_release:
251*4882a593Smuzhiyun __ib_umem_release(device, umem, 0);
252*4882a593Smuzhiyun atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm);
253*4882a593Smuzhiyun out:
254*4882a593Smuzhiyun free_page((unsigned long) page_list);
255*4882a593Smuzhiyun umem_kfree:
256*4882a593Smuzhiyun if (ret) {
257*4882a593Smuzhiyun mmdrop(umem->owning_mm);
258*4882a593Smuzhiyun kfree(umem);
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun return ret ? ERR_PTR(ret) : umem;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun EXPORT_SYMBOL(ib_umem_get);
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun /**
265*4882a593Smuzhiyun * ib_umem_release - release memory pinned with ib_umem_get
266*4882a593Smuzhiyun * @umem: umem struct to release
267*4882a593Smuzhiyun */
ib_umem_release(struct ib_umem * umem)268*4882a593Smuzhiyun void ib_umem_release(struct ib_umem *umem)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun if (!umem)
271*4882a593Smuzhiyun return;
272*4882a593Smuzhiyun if (umem->is_odp)
273*4882a593Smuzhiyun return ib_umem_odp_release(to_ib_umem_odp(umem));
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun __ib_umem_release(umem->ibdev, umem, 1);
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun atomic64_sub(ib_umem_num_pages(umem), &umem->owning_mm->pinned_vm);
278*4882a593Smuzhiyun mmdrop(umem->owning_mm);
279*4882a593Smuzhiyun kfree(umem);
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun EXPORT_SYMBOL(ib_umem_release);
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun /*
284*4882a593Smuzhiyun * Copy from the given ib_umem's pages to the given buffer.
285*4882a593Smuzhiyun *
286*4882a593Smuzhiyun * umem - the umem to copy from
287*4882a593Smuzhiyun * offset - offset to start copying from
288*4882a593Smuzhiyun * dst - destination buffer
289*4882a593Smuzhiyun * length - buffer length
290*4882a593Smuzhiyun *
291*4882a593Smuzhiyun * Returns 0 on success, or an error code.
292*4882a593Smuzhiyun */
ib_umem_copy_from(void * dst,struct ib_umem * umem,size_t offset,size_t length)293*4882a593Smuzhiyun int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
294*4882a593Smuzhiyun size_t length)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun size_t end = offset + length;
297*4882a593Smuzhiyun int ret;
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun if (offset > umem->length || length > umem->length - offset) {
300*4882a593Smuzhiyun pr_err("ib_umem_copy_from not in range. offset: %zd umem length: %zd end: %zd\n",
301*4882a593Smuzhiyun offset, umem->length, end);
302*4882a593Smuzhiyun return -EINVAL;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->sg_nents, dst, length,
306*4882a593Smuzhiyun offset + ib_umem_offset(umem));
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun if (ret < 0)
309*4882a593Smuzhiyun return ret;
310*4882a593Smuzhiyun else if (ret != length)
311*4882a593Smuzhiyun return -EINVAL;
312*4882a593Smuzhiyun else
313*4882a593Smuzhiyun return 0;
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun EXPORT_SYMBOL(ib_umem_copy_from);
316