xref: /OK3568_Linux_fs/kernel/drivers/infiniband/hw/usnic/usnic_uiom.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (c) 2005 Topspin Communications.  All rights reserved.
3*4882a593Smuzhiyun  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4*4882a593Smuzhiyun  * Copyright (c) 2013 Cisco Systems.  All rights reserved.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * This software is available to you under a choice of one of two
7*4882a593Smuzhiyun  * licenses.  You may choose to be licensed under the terms of the GNU
8*4882a593Smuzhiyun  * General Public License (GPL) Version 2, available from the file
9*4882a593Smuzhiyun  * COPYING in the main directory of this source tree, or the
10*4882a593Smuzhiyun  * BSD license below:
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  *     Redistribution and use in source and binary forms, with or
13*4882a593Smuzhiyun  *     without modification, are permitted provided that the following
14*4882a593Smuzhiyun  *     conditions are met:
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  *      - Redistributions of source code must retain the above
17*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
18*4882a593Smuzhiyun  *        disclaimer.
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  *      - Redistributions in binary form must reproduce the above
21*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
22*4882a593Smuzhiyun  *        disclaimer in the documentation and/or other materials
23*4882a593Smuzhiyun  *        provided with the distribution.
24*4882a593Smuzhiyun  *
25*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26*4882a593Smuzhiyun  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27*4882a593Smuzhiyun  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28*4882a593Smuzhiyun  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29*4882a593Smuzhiyun  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30*4882a593Smuzhiyun  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31*4882a593Smuzhiyun  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32*4882a593Smuzhiyun  * SOFTWARE.
33*4882a593Smuzhiyun  */
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #include <linux/mm.h>
36*4882a593Smuzhiyun #include <linux/dma-mapping.h>
37*4882a593Smuzhiyun #include <linux/sched/signal.h>
38*4882a593Smuzhiyun #include <linux/sched/mm.h>
39*4882a593Smuzhiyun #include <linux/hugetlb.h>
40*4882a593Smuzhiyun #include <linux/iommu.h>
41*4882a593Smuzhiyun #include <linux/workqueue.h>
42*4882a593Smuzhiyun #include <linux/list.h>
43*4882a593Smuzhiyun #include <linux/pci.h>
44*4882a593Smuzhiyun #include <rdma/ib_verbs.h>
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun #include "usnic_log.h"
47*4882a593Smuzhiyun #include "usnic_uiom.h"
48*4882a593Smuzhiyun #include "usnic_uiom_interval_tree.h"
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun #define USNIC_UIOM_PAGE_CHUNK						\
51*4882a593Smuzhiyun 	((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list))	/\
52*4882a593Smuzhiyun 	((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] -	\
53*4882a593Smuzhiyun 	(void *) &((struct usnic_uiom_chunk *) 0)->page_list[0]))
54*4882a593Smuzhiyun 
usnic_uiom_dma_fault(struct iommu_domain * domain,struct device * dev,unsigned long iova,int flags,void * token)55*4882a593Smuzhiyun static int usnic_uiom_dma_fault(struct iommu_domain *domain,
56*4882a593Smuzhiyun 				struct device *dev,
57*4882a593Smuzhiyun 				unsigned long iova, int flags,
58*4882a593Smuzhiyun 				void *token)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	usnic_err("Device %s iommu fault domain 0x%pK va 0x%lx flags 0x%x\n",
61*4882a593Smuzhiyun 		dev_name(dev),
62*4882a593Smuzhiyun 		domain, iova, flags);
63*4882a593Smuzhiyun 	return -ENOSYS;
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun 
usnic_uiom_put_pages(struct list_head * chunk_list,int dirty)66*4882a593Smuzhiyun static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	struct usnic_uiom_chunk *chunk, *tmp;
69*4882a593Smuzhiyun 	struct page *page;
70*4882a593Smuzhiyun 	struct scatterlist *sg;
71*4882a593Smuzhiyun 	int i;
72*4882a593Smuzhiyun 	dma_addr_t pa;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	list_for_each_entry_safe(chunk, tmp, chunk_list, list) {
75*4882a593Smuzhiyun 		for_each_sg(chunk->page_list, sg, chunk->nents, i) {
76*4882a593Smuzhiyun 			page = sg_page(sg);
77*4882a593Smuzhiyun 			pa = sg_phys(sg);
78*4882a593Smuzhiyun 			unpin_user_pages_dirty_lock(&page, 1, dirty);
79*4882a593Smuzhiyun 			usnic_dbg("pa: %pa\n", &pa);
80*4882a593Smuzhiyun 		}
81*4882a593Smuzhiyun 		kfree(chunk);
82*4882a593Smuzhiyun 	}
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun 
usnic_uiom_get_pages(unsigned long addr,size_t size,int writable,int dmasync,struct usnic_uiom_reg * uiomr)85*4882a593Smuzhiyun static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
86*4882a593Smuzhiyun 				int dmasync, struct usnic_uiom_reg *uiomr)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun 	struct list_head *chunk_list = &uiomr->chunk_list;
89*4882a593Smuzhiyun 	struct page **page_list;
90*4882a593Smuzhiyun 	struct scatterlist *sg;
91*4882a593Smuzhiyun 	struct usnic_uiom_chunk *chunk;
92*4882a593Smuzhiyun 	unsigned long locked;
93*4882a593Smuzhiyun 	unsigned long lock_limit;
94*4882a593Smuzhiyun 	unsigned long cur_base;
95*4882a593Smuzhiyun 	unsigned long npages;
96*4882a593Smuzhiyun 	int ret;
97*4882a593Smuzhiyun 	int off;
98*4882a593Smuzhiyun 	int i;
99*4882a593Smuzhiyun 	int flags;
100*4882a593Smuzhiyun 	dma_addr_t pa;
101*4882a593Smuzhiyun 	unsigned int gup_flags;
102*4882a593Smuzhiyun 	struct mm_struct *mm;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	/*
105*4882a593Smuzhiyun 	 * If the combination of the addr and size requested for this memory
106*4882a593Smuzhiyun 	 * region causes an integer overflow, return error.
107*4882a593Smuzhiyun 	 */
108*4882a593Smuzhiyun 	if (((addr + size) < addr) || PAGE_ALIGN(addr + size) < (addr + size))
109*4882a593Smuzhiyun 		return -EINVAL;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	if (!size)
112*4882a593Smuzhiyun 		return -EINVAL;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	if (!can_do_mlock())
115*4882a593Smuzhiyun 		return -EPERM;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	INIT_LIST_HEAD(chunk_list);
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	page_list = (struct page **) __get_free_page(GFP_KERNEL);
120*4882a593Smuzhiyun 	if (!page_list)
121*4882a593Smuzhiyun 		return -ENOMEM;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT;
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	uiomr->owning_mm = mm = current->mm;
126*4882a593Smuzhiyun 	mmap_read_lock(mm);
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	locked = atomic64_add_return(npages, &current->mm->pinned_vm);
129*4882a593Smuzhiyun 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
132*4882a593Smuzhiyun 		ret = -ENOMEM;
133*4882a593Smuzhiyun 		goto out;
134*4882a593Smuzhiyun 	}
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	flags = IOMMU_READ | IOMMU_CACHE;
137*4882a593Smuzhiyun 	flags |= (writable) ? IOMMU_WRITE : 0;
138*4882a593Smuzhiyun 	gup_flags = FOLL_WRITE;
139*4882a593Smuzhiyun 	gup_flags |= (writable) ? 0 : FOLL_FORCE;
140*4882a593Smuzhiyun 	cur_base = addr & PAGE_MASK;
141*4882a593Smuzhiyun 	ret = 0;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	while (npages) {
144*4882a593Smuzhiyun 		ret = pin_user_pages(cur_base,
145*4882a593Smuzhiyun 				     min_t(unsigned long, npages,
146*4882a593Smuzhiyun 				     PAGE_SIZE / sizeof(struct page *)),
147*4882a593Smuzhiyun 				     gup_flags | FOLL_LONGTERM,
148*4882a593Smuzhiyun 				     page_list, NULL);
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 		if (ret < 0)
151*4882a593Smuzhiyun 			goto out;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 		npages -= ret;
154*4882a593Smuzhiyun 		off = 0;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 		while (ret) {
157*4882a593Smuzhiyun 			chunk = kmalloc(struct_size(chunk, page_list,
158*4882a593Smuzhiyun 					min_t(int, ret, USNIC_UIOM_PAGE_CHUNK)),
159*4882a593Smuzhiyun 					GFP_KERNEL);
160*4882a593Smuzhiyun 			if (!chunk) {
161*4882a593Smuzhiyun 				ret = -ENOMEM;
162*4882a593Smuzhiyun 				goto out;
163*4882a593Smuzhiyun 			}
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 			chunk->nents = min_t(int, ret, USNIC_UIOM_PAGE_CHUNK);
166*4882a593Smuzhiyun 			sg_init_table(chunk->page_list, chunk->nents);
167*4882a593Smuzhiyun 			for_each_sg(chunk->page_list, sg, chunk->nents, i) {
168*4882a593Smuzhiyun 				sg_set_page(sg, page_list[i + off],
169*4882a593Smuzhiyun 						PAGE_SIZE, 0);
170*4882a593Smuzhiyun 				pa = sg_phys(sg);
171*4882a593Smuzhiyun 				usnic_dbg("va: 0x%lx pa: %pa\n",
172*4882a593Smuzhiyun 						cur_base + i*PAGE_SIZE, &pa);
173*4882a593Smuzhiyun 			}
174*4882a593Smuzhiyun 			cur_base += chunk->nents * PAGE_SIZE;
175*4882a593Smuzhiyun 			ret -= chunk->nents;
176*4882a593Smuzhiyun 			off += chunk->nents;
177*4882a593Smuzhiyun 			list_add_tail(&chunk->list, chunk_list);
178*4882a593Smuzhiyun 		}
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 		ret = 0;
181*4882a593Smuzhiyun 	}
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun out:
184*4882a593Smuzhiyun 	if (ret < 0) {
185*4882a593Smuzhiyun 		usnic_uiom_put_pages(chunk_list, 0);
186*4882a593Smuzhiyun 		atomic64_sub(npages, &current->mm->pinned_vm);
187*4882a593Smuzhiyun 	} else
188*4882a593Smuzhiyun 		mmgrab(uiomr->owning_mm);
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	mmap_read_unlock(mm);
191*4882a593Smuzhiyun 	free_page((unsigned long) page_list);
192*4882a593Smuzhiyun 	return ret;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun 
usnic_uiom_unmap_sorted_intervals(struct list_head * intervals,struct usnic_uiom_pd * pd)195*4882a593Smuzhiyun static void usnic_uiom_unmap_sorted_intervals(struct list_head *intervals,
196*4882a593Smuzhiyun 						struct usnic_uiom_pd *pd)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun 	struct usnic_uiom_interval_node *interval, *tmp;
199*4882a593Smuzhiyun 	long unsigned va, size;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	list_for_each_entry_safe(interval, tmp, intervals, link) {
202*4882a593Smuzhiyun 		va = interval->start << PAGE_SHIFT;
203*4882a593Smuzhiyun 		size = ((interval->last - interval->start) + 1) << PAGE_SHIFT;
204*4882a593Smuzhiyun 		while (size > 0) {
205*4882a593Smuzhiyun 			/* Workaround for RH 970401 */
206*4882a593Smuzhiyun 			usnic_dbg("va 0x%lx size 0x%lx", va, PAGE_SIZE);
207*4882a593Smuzhiyun 			iommu_unmap(pd->domain, va, PAGE_SIZE);
208*4882a593Smuzhiyun 			va += PAGE_SIZE;
209*4882a593Smuzhiyun 			size -= PAGE_SIZE;
210*4882a593Smuzhiyun 		}
211*4882a593Smuzhiyun 	}
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun 
__usnic_uiom_reg_release(struct usnic_uiom_pd * pd,struct usnic_uiom_reg * uiomr,int dirty)214*4882a593Smuzhiyun static void __usnic_uiom_reg_release(struct usnic_uiom_pd *pd,
215*4882a593Smuzhiyun 					struct usnic_uiom_reg *uiomr,
216*4882a593Smuzhiyun 					int dirty)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun 	int npages;
219*4882a593Smuzhiyun 	unsigned long vpn_start, vpn_last;
220*4882a593Smuzhiyun 	struct usnic_uiom_interval_node *interval, *tmp;
221*4882a593Smuzhiyun 	int writable = 0;
222*4882a593Smuzhiyun 	LIST_HEAD(rm_intervals);
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	npages = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
225*4882a593Smuzhiyun 	vpn_start = (uiomr->va & PAGE_MASK) >> PAGE_SHIFT;
226*4882a593Smuzhiyun 	vpn_last = vpn_start + npages - 1;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	spin_lock(&pd->lock);
229*4882a593Smuzhiyun 	usnic_uiom_remove_interval(&pd->root, vpn_start,
230*4882a593Smuzhiyun 					vpn_last, &rm_intervals);
231*4882a593Smuzhiyun 	usnic_uiom_unmap_sorted_intervals(&rm_intervals, pd);
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	list_for_each_entry_safe(interval, tmp, &rm_intervals, link) {
234*4882a593Smuzhiyun 		if (interval->flags & IOMMU_WRITE)
235*4882a593Smuzhiyun 			writable = 1;
236*4882a593Smuzhiyun 		list_del(&interval->link);
237*4882a593Smuzhiyun 		kfree(interval);
238*4882a593Smuzhiyun 	}
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	usnic_uiom_put_pages(&uiomr->chunk_list, dirty & writable);
241*4882a593Smuzhiyun 	spin_unlock(&pd->lock);
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
usnic_uiom_map_sorted_intervals(struct list_head * intervals,struct usnic_uiom_reg * uiomr)244*4882a593Smuzhiyun static int usnic_uiom_map_sorted_intervals(struct list_head *intervals,
245*4882a593Smuzhiyun 						struct usnic_uiom_reg *uiomr)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun 	int i, err;
248*4882a593Smuzhiyun 	size_t size;
249*4882a593Smuzhiyun 	struct usnic_uiom_chunk *chunk;
250*4882a593Smuzhiyun 	struct usnic_uiom_interval_node *interval_node;
251*4882a593Smuzhiyun 	dma_addr_t pa;
252*4882a593Smuzhiyun 	dma_addr_t pa_start = 0;
253*4882a593Smuzhiyun 	dma_addr_t pa_end = 0;
254*4882a593Smuzhiyun 	long int va_start = -EINVAL;
255*4882a593Smuzhiyun 	struct usnic_uiom_pd *pd = uiomr->pd;
256*4882a593Smuzhiyun 	long int va = uiomr->va & PAGE_MASK;
257*4882a593Smuzhiyun 	int flags = IOMMU_READ | IOMMU_CACHE;
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	flags |= (uiomr->writable) ? IOMMU_WRITE : 0;
260*4882a593Smuzhiyun 	chunk = list_first_entry(&uiomr->chunk_list, struct usnic_uiom_chunk,
261*4882a593Smuzhiyun 									list);
262*4882a593Smuzhiyun 	list_for_each_entry(interval_node, intervals, link) {
263*4882a593Smuzhiyun iter_chunk:
264*4882a593Smuzhiyun 		for (i = 0; i < chunk->nents; i++, va += PAGE_SIZE) {
265*4882a593Smuzhiyun 			pa = sg_phys(&chunk->page_list[i]);
266*4882a593Smuzhiyun 			if ((va >> PAGE_SHIFT) < interval_node->start)
267*4882a593Smuzhiyun 				continue;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 			if ((va >> PAGE_SHIFT) == interval_node->start) {
270*4882a593Smuzhiyun 				/* First page of the interval */
271*4882a593Smuzhiyun 				va_start = va;
272*4882a593Smuzhiyun 				pa_start = pa;
273*4882a593Smuzhiyun 				pa_end = pa;
274*4882a593Smuzhiyun 			}
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 			WARN_ON(va_start == -EINVAL);
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 			if ((pa_end + PAGE_SIZE != pa) &&
279*4882a593Smuzhiyun 					(pa != pa_start)) {
280*4882a593Smuzhiyun 				/* PAs are not contiguous */
281*4882a593Smuzhiyun 				size = pa_end - pa_start + PAGE_SIZE;
282*4882a593Smuzhiyun 				usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
283*4882a593Smuzhiyun 					va_start, &pa_start, size, flags);
284*4882a593Smuzhiyun 				err = iommu_map(pd->domain, va_start, pa_start,
285*4882a593Smuzhiyun 							size, flags);
286*4882a593Smuzhiyun 				if (err) {
287*4882a593Smuzhiyun 					usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
288*4882a593Smuzhiyun 						va_start, &pa_start, size, err);
289*4882a593Smuzhiyun 					goto err_out;
290*4882a593Smuzhiyun 				}
291*4882a593Smuzhiyun 				va_start = va;
292*4882a593Smuzhiyun 				pa_start = pa;
293*4882a593Smuzhiyun 				pa_end = pa;
294*4882a593Smuzhiyun 			}
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 			if ((va >> PAGE_SHIFT) == interval_node->last) {
297*4882a593Smuzhiyun 				/* Last page of the interval */
298*4882a593Smuzhiyun 				size = pa - pa_start + PAGE_SIZE;
299*4882a593Smuzhiyun 				usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
300*4882a593Smuzhiyun 					va_start, &pa_start, size, flags);
301*4882a593Smuzhiyun 				err = iommu_map(pd->domain, va_start, pa_start,
302*4882a593Smuzhiyun 						size, flags);
303*4882a593Smuzhiyun 				if (err) {
304*4882a593Smuzhiyun 					usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
305*4882a593Smuzhiyun 						va_start, &pa_start, size, err);
306*4882a593Smuzhiyun 					goto err_out;
307*4882a593Smuzhiyun 				}
308*4882a593Smuzhiyun 				break;
309*4882a593Smuzhiyun 			}
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 			if (pa != pa_start)
312*4882a593Smuzhiyun 				pa_end += PAGE_SIZE;
313*4882a593Smuzhiyun 		}
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 		if (i == chunk->nents) {
316*4882a593Smuzhiyun 			/*
317*4882a593Smuzhiyun 			 * Hit last entry of the chunk,
318*4882a593Smuzhiyun 			 * hence advance to next chunk
319*4882a593Smuzhiyun 			 */
320*4882a593Smuzhiyun 			chunk = list_first_entry(&chunk->list,
321*4882a593Smuzhiyun 							struct usnic_uiom_chunk,
322*4882a593Smuzhiyun 							list);
323*4882a593Smuzhiyun 			goto iter_chunk;
324*4882a593Smuzhiyun 		}
325*4882a593Smuzhiyun 	}
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	return 0;
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun err_out:
330*4882a593Smuzhiyun 	usnic_uiom_unmap_sorted_intervals(intervals, pd);
331*4882a593Smuzhiyun 	return err;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun 
usnic_uiom_reg_get(struct usnic_uiom_pd * pd,unsigned long addr,size_t size,int writable,int dmasync)334*4882a593Smuzhiyun struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
335*4882a593Smuzhiyun 						unsigned long addr, size_t size,
336*4882a593Smuzhiyun 						int writable, int dmasync)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun 	struct usnic_uiom_reg *uiomr;
339*4882a593Smuzhiyun 	unsigned long va_base, vpn_start, vpn_last;
340*4882a593Smuzhiyun 	unsigned long npages;
341*4882a593Smuzhiyun 	int offset, err;
342*4882a593Smuzhiyun 	LIST_HEAD(sorted_diff_intervals);
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	/*
345*4882a593Smuzhiyun 	 * Intel IOMMU map throws an error if a translation entry is
346*4882a593Smuzhiyun 	 * changed from read to write.  This module may not unmap
347*4882a593Smuzhiyun 	 * and then remap the entry after fixing the permission
348*4882a593Smuzhiyun 	 * b/c this open up a small windows where hw DMA may page fault
349*4882a593Smuzhiyun 	 * Hence, make all entries to be writable.
350*4882a593Smuzhiyun 	 */
351*4882a593Smuzhiyun 	writable = 1;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	va_base = addr & PAGE_MASK;
354*4882a593Smuzhiyun 	offset = addr & ~PAGE_MASK;
355*4882a593Smuzhiyun 	npages = PAGE_ALIGN(size + offset) >> PAGE_SHIFT;
356*4882a593Smuzhiyun 	vpn_start = (addr & PAGE_MASK) >> PAGE_SHIFT;
357*4882a593Smuzhiyun 	vpn_last = vpn_start + npages - 1;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	uiomr = kmalloc(sizeof(*uiomr), GFP_KERNEL);
360*4882a593Smuzhiyun 	if (!uiomr)
361*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	uiomr->va = va_base;
364*4882a593Smuzhiyun 	uiomr->offset = offset;
365*4882a593Smuzhiyun 	uiomr->length = size;
366*4882a593Smuzhiyun 	uiomr->writable = writable;
367*4882a593Smuzhiyun 	uiomr->pd = pd;
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	err = usnic_uiom_get_pages(addr, size, writable, dmasync,
370*4882a593Smuzhiyun 				   uiomr);
371*4882a593Smuzhiyun 	if (err) {
372*4882a593Smuzhiyun 		usnic_err("Failed get_pages vpn [0x%lx,0x%lx] err %d\n",
373*4882a593Smuzhiyun 				vpn_start, vpn_last, err);
374*4882a593Smuzhiyun 		goto out_free_uiomr;
375*4882a593Smuzhiyun 	}
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	spin_lock(&pd->lock);
378*4882a593Smuzhiyun 	err = usnic_uiom_get_intervals_diff(vpn_start, vpn_last,
379*4882a593Smuzhiyun 						(writable) ? IOMMU_WRITE : 0,
380*4882a593Smuzhiyun 						IOMMU_WRITE,
381*4882a593Smuzhiyun 						&pd->root,
382*4882a593Smuzhiyun 						&sorted_diff_intervals);
383*4882a593Smuzhiyun 	if (err) {
384*4882a593Smuzhiyun 		usnic_err("Failed disjoint interval vpn [0x%lx,0x%lx] err %d\n",
385*4882a593Smuzhiyun 						vpn_start, vpn_last, err);
386*4882a593Smuzhiyun 		goto out_put_pages;
387*4882a593Smuzhiyun 	}
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	err = usnic_uiom_map_sorted_intervals(&sorted_diff_intervals, uiomr);
390*4882a593Smuzhiyun 	if (err) {
391*4882a593Smuzhiyun 		usnic_err("Failed map interval vpn [0x%lx,0x%lx] err %d\n",
392*4882a593Smuzhiyun 						vpn_start, vpn_last, err);
393*4882a593Smuzhiyun 		goto out_put_intervals;
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	}
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	err = usnic_uiom_insert_interval(&pd->root, vpn_start, vpn_last,
398*4882a593Smuzhiyun 					(writable) ? IOMMU_WRITE : 0);
399*4882a593Smuzhiyun 	if (err) {
400*4882a593Smuzhiyun 		usnic_err("Failed insert interval vpn [0x%lx,0x%lx] err %d\n",
401*4882a593Smuzhiyun 						vpn_start, vpn_last, err);
402*4882a593Smuzhiyun 		goto out_unmap_intervals;
403*4882a593Smuzhiyun 	}
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	usnic_uiom_put_interval_set(&sorted_diff_intervals);
406*4882a593Smuzhiyun 	spin_unlock(&pd->lock);
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	return uiomr;
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun out_unmap_intervals:
411*4882a593Smuzhiyun 	usnic_uiom_unmap_sorted_intervals(&sorted_diff_intervals, pd);
412*4882a593Smuzhiyun out_put_intervals:
413*4882a593Smuzhiyun 	usnic_uiom_put_interval_set(&sorted_diff_intervals);
414*4882a593Smuzhiyun out_put_pages:
415*4882a593Smuzhiyun 	usnic_uiom_put_pages(&uiomr->chunk_list, 0);
416*4882a593Smuzhiyun 	spin_unlock(&pd->lock);
417*4882a593Smuzhiyun 	mmdrop(uiomr->owning_mm);
418*4882a593Smuzhiyun out_free_uiomr:
419*4882a593Smuzhiyun 	kfree(uiomr);
420*4882a593Smuzhiyun 	return ERR_PTR(err);
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun 
__usnic_uiom_release_tail(struct usnic_uiom_reg * uiomr)423*4882a593Smuzhiyun static void __usnic_uiom_release_tail(struct usnic_uiom_reg *uiomr)
424*4882a593Smuzhiyun {
425*4882a593Smuzhiyun 	mmdrop(uiomr->owning_mm);
426*4882a593Smuzhiyun 	kfree(uiomr);
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun 
usnic_uiom_num_pages(struct usnic_uiom_reg * uiomr)429*4882a593Smuzhiyun static inline size_t usnic_uiom_num_pages(struct usnic_uiom_reg *uiomr)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun 	return PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun 
usnic_uiom_reg_release(struct usnic_uiom_reg * uiomr)434*4882a593Smuzhiyun void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun 	__usnic_uiom_reg_release(uiomr->pd, uiomr, 1);
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	atomic64_sub(usnic_uiom_num_pages(uiomr), &uiomr->owning_mm->pinned_vm);
439*4882a593Smuzhiyun 	__usnic_uiom_release_tail(uiomr);
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun 
usnic_uiom_alloc_pd(void)442*4882a593Smuzhiyun struct usnic_uiom_pd *usnic_uiom_alloc_pd(void)
443*4882a593Smuzhiyun {
444*4882a593Smuzhiyun 	struct usnic_uiom_pd *pd;
445*4882a593Smuzhiyun 	void *domain;
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
448*4882a593Smuzhiyun 	if (!pd)
449*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	pd->domain = domain = iommu_domain_alloc(&pci_bus_type);
452*4882a593Smuzhiyun 	if (!domain) {
453*4882a593Smuzhiyun 		usnic_err("Failed to allocate IOMMU domain");
454*4882a593Smuzhiyun 		kfree(pd);
455*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
456*4882a593Smuzhiyun 	}
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	iommu_set_fault_handler(pd->domain, usnic_uiom_dma_fault, NULL);
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	spin_lock_init(&pd->lock);
461*4882a593Smuzhiyun 	INIT_LIST_HEAD(&pd->devs);
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	return pd;
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun 
usnic_uiom_dealloc_pd(struct usnic_uiom_pd * pd)466*4882a593Smuzhiyun void usnic_uiom_dealloc_pd(struct usnic_uiom_pd *pd)
467*4882a593Smuzhiyun {
468*4882a593Smuzhiyun 	iommu_domain_free(pd->domain);
469*4882a593Smuzhiyun 	kfree(pd);
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun 
usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd * pd,struct device * dev)472*4882a593Smuzhiyun int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev)
473*4882a593Smuzhiyun {
474*4882a593Smuzhiyun 	struct usnic_uiom_dev *uiom_dev;
475*4882a593Smuzhiyun 	int err;
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	uiom_dev = kzalloc(sizeof(*uiom_dev), GFP_ATOMIC);
478*4882a593Smuzhiyun 	if (!uiom_dev)
479*4882a593Smuzhiyun 		return -ENOMEM;
480*4882a593Smuzhiyun 	uiom_dev->dev = dev;
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	err = iommu_attach_device(pd->domain, dev);
483*4882a593Smuzhiyun 	if (err)
484*4882a593Smuzhiyun 		goto out_free_dev;
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	if (!iommu_capable(dev->bus, IOMMU_CAP_CACHE_COHERENCY)) {
487*4882a593Smuzhiyun 		usnic_err("IOMMU of %s does not support cache coherency\n",
488*4882a593Smuzhiyun 				dev_name(dev));
489*4882a593Smuzhiyun 		err = -EINVAL;
490*4882a593Smuzhiyun 		goto out_detach_device;
491*4882a593Smuzhiyun 	}
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	spin_lock(&pd->lock);
494*4882a593Smuzhiyun 	list_add_tail(&uiom_dev->link, &pd->devs);
495*4882a593Smuzhiyun 	pd->dev_cnt++;
496*4882a593Smuzhiyun 	spin_unlock(&pd->lock);
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 	return 0;
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun out_detach_device:
501*4882a593Smuzhiyun 	iommu_detach_device(pd->domain, dev);
502*4882a593Smuzhiyun out_free_dev:
503*4882a593Smuzhiyun 	kfree(uiom_dev);
504*4882a593Smuzhiyun 	return err;
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun 
usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd * pd,struct device * dev)507*4882a593Smuzhiyun void usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd *pd, struct device *dev)
508*4882a593Smuzhiyun {
509*4882a593Smuzhiyun 	struct usnic_uiom_dev *uiom_dev;
510*4882a593Smuzhiyun 	int found = 0;
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	spin_lock(&pd->lock);
513*4882a593Smuzhiyun 	list_for_each_entry(uiom_dev, &pd->devs, link) {
514*4882a593Smuzhiyun 		if (uiom_dev->dev == dev) {
515*4882a593Smuzhiyun 			found = 1;
516*4882a593Smuzhiyun 			break;
517*4882a593Smuzhiyun 		}
518*4882a593Smuzhiyun 	}
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	if (!found) {
521*4882a593Smuzhiyun 		usnic_err("Unable to free dev %s - not found\n",
522*4882a593Smuzhiyun 				dev_name(dev));
523*4882a593Smuzhiyun 		spin_unlock(&pd->lock);
524*4882a593Smuzhiyun 		return;
525*4882a593Smuzhiyun 	}
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	list_del(&uiom_dev->link);
528*4882a593Smuzhiyun 	pd->dev_cnt--;
529*4882a593Smuzhiyun 	spin_unlock(&pd->lock);
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	return iommu_detach_device(pd->domain, dev);
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun 
usnic_uiom_get_dev_list(struct usnic_uiom_pd * pd)534*4882a593Smuzhiyun struct device **usnic_uiom_get_dev_list(struct usnic_uiom_pd *pd)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun 	struct usnic_uiom_dev *uiom_dev;
537*4882a593Smuzhiyun 	struct device **devs;
538*4882a593Smuzhiyun 	int i = 0;
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 	spin_lock(&pd->lock);
541*4882a593Smuzhiyun 	devs = kcalloc(pd->dev_cnt + 1, sizeof(*devs), GFP_ATOMIC);
542*4882a593Smuzhiyun 	if (!devs) {
543*4882a593Smuzhiyun 		devs = ERR_PTR(-ENOMEM);
544*4882a593Smuzhiyun 		goto out;
545*4882a593Smuzhiyun 	}
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	list_for_each_entry(uiom_dev, &pd->devs, link) {
548*4882a593Smuzhiyun 		devs[i++] = uiom_dev->dev;
549*4882a593Smuzhiyun 	}
550*4882a593Smuzhiyun out:
551*4882a593Smuzhiyun 	spin_unlock(&pd->lock);
552*4882a593Smuzhiyun 	return devs;
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun 
usnic_uiom_free_dev_list(struct device ** devs)555*4882a593Smuzhiyun void usnic_uiom_free_dev_list(struct device **devs)
556*4882a593Smuzhiyun {
557*4882a593Smuzhiyun 	kfree(devs);
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun 
usnic_uiom_init(char * drv_name)560*4882a593Smuzhiyun int usnic_uiom_init(char *drv_name)
561*4882a593Smuzhiyun {
562*4882a593Smuzhiyun 	if (!iommu_present(&pci_bus_type)) {
563*4882a593Smuzhiyun 		usnic_err("IOMMU required but not present or enabled.  USNIC QPs will not function w/o enabling IOMMU\n");
564*4882a593Smuzhiyun 		return -EPERM;
565*4882a593Smuzhiyun 	}
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	return 0;
568*4882a593Smuzhiyun }
569