xref: /OK3568_Linux_fs/kernel/drivers/fpga/dfl-afu-dma-region.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Driver for FPGA Accelerated Function Unit (AFU) DMA Region Management
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2017-2018 Intel Corporation, Inc.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Authors:
8*4882a593Smuzhiyun  *   Wu Hao <hao.wu@intel.com>
9*4882a593Smuzhiyun  *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/dma-mapping.h>
13*4882a593Smuzhiyun #include <linux/sched/signal.h>
14*4882a593Smuzhiyun #include <linux/uaccess.h>
15*4882a593Smuzhiyun #include <linux/mm.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include "dfl-afu.h"
18*4882a593Smuzhiyun 
afu_dma_region_init(struct dfl_feature_platform_data * pdata)19*4882a593Smuzhiyun void afu_dma_region_init(struct dfl_feature_platform_data *pdata)
20*4882a593Smuzhiyun {
21*4882a593Smuzhiyun 	struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun 	afu->dma_regions = RB_ROOT;
24*4882a593Smuzhiyun }
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun /**
27*4882a593Smuzhiyun  * afu_dma_pin_pages - pin pages of given dma memory region
28*4882a593Smuzhiyun  * @pdata: feature device platform data
29*4882a593Smuzhiyun  * @region: dma memory region to be pinned
30*4882a593Smuzhiyun  *
31*4882a593Smuzhiyun  * Pin all the pages of given dfl_afu_dma_region.
32*4882a593Smuzhiyun  * Return 0 for success or negative error code.
33*4882a593Smuzhiyun  */
afu_dma_pin_pages(struct dfl_feature_platform_data * pdata,struct dfl_afu_dma_region * region)34*4882a593Smuzhiyun static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata,
35*4882a593Smuzhiyun 			     struct dfl_afu_dma_region *region)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun 	int npages = region->length >> PAGE_SHIFT;
38*4882a593Smuzhiyun 	struct device *dev = &pdata->dev->dev;
39*4882a593Smuzhiyun 	int ret, pinned;
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	ret = account_locked_vm(current->mm, npages, true);
42*4882a593Smuzhiyun 	if (ret)
43*4882a593Smuzhiyun 		return ret;
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	region->pages = kcalloc(npages, sizeof(struct page *), GFP_KERNEL);
46*4882a593Smuzhiyun 	if (!region->pages) {
47*4882a593Smuzhiyun 		ret = -ENOMEM;
48*4882a593Smuzhiyun 		goto unlock_vm;
49*4882a593Smuzhiyun 	}
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	pinned = pin_user_pages_fast(region->user_addr, npages, FOLL_WRITE,
52*4882a593Smuzhiyun 				     region->pages);
53*4882a593Smuzhiyun 	if (pinned < 0) {
54*4882a593Smuzhiyun 		ret = pinned;
55*4882a593Smuzhiyun 		goto free_pages;
56*4882a593Smuzhiyun 	} else if (pinned != npages) {
57*4882a593Smuzhiyun 		ret = -EFAULT;
58*4882a593Smuzhiyun 		goto unpin_pages;
59*4882a593Smuzhiyun 	}
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	dev_dbg(dev, "%d pages pinned\n", pinned);
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	return 0;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun unpin_pages:
66*4882a593Smuzhiyun 	unpin_user_pages(region->pages, pinned);
67*4882a593Smuzhiyun free_pages:
68*4882a593Smuzhiyun 	kfree(region->pages);
69*4882a593Smuzhiyun unlock_vm:
70*4882a593Smuzhiyun 	account_locked_vm(current->mm, npages, false);
71*4882a593Smuzhiyun 	return ret;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun /**
75*4882a593Smuzhiyun  * afu_dma_unpin_pages - unpin pages of given dma memory region
76*4882a593Smuzhiyun  * @pdata: feature device platform data
77*4882a593Smuzhiyun  * @region: dma memory region to be unpinned
78*4882a593Smuzhiyun  *
79*4882a593Smuzhiyun  * Unpin all the pages of given dfl_afu_dma_region.
80*4882a593Smuzhiyun  * Return 0 for success or negative error code.
81*4882a593Smuzhiyun  */
afu_dma_unpin_pages(struct dfl_feature_platform_data * pdata,struct dfl_afu_dma_region * region)82*4882a593Smuzhiyun static void afu_dma_unpin_pages(struct dfl_feature_platform_data *pdata,
83*4882a593Smuzhiyun 				struct dfl_afu_dma_region *region)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	long npages = region->length >> PAGE_SHIFT;
86*4882a593Smuzhiyun 	struct device *dev = &pdata->dev->dev;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	unpin_user_pages(region->pages, npages);
89*4882a593Smuzhiyun 	kfree(region->pages);
90*4882a593Smuzhiyun 	account_locked_vm(current->mm, npages, false);
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	dev_dbg(dev, "%ld pages unpinned\n", npages);
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun /**
96*4882a593Smuzhiyun  * afu_dma_check_continuous_pages - check if pages are continuous
97*4882a593Smuzhiyun  * @region: dma memory region
98*4882a593Smuzhiyun  *
99*4882a593Smuzhiyun  * Return true if pages of given dma memory region have continuous physical
100*4882a593Smuzhiyun  * address, otherwise return false.
101*4882a593Smuzhiyun  */
afu_dma_check_continuous_pages(struct dfl_afu_dma_region * region)102*4882a593Smuzhiyun static bool afu_dma_check_continuous_pages(struct dfl_afu_dma_region *region)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	int npages = region->length >> PAGE_SHIFT;
105*4882a593Smuzhiyun 	int i;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	for (i = 0; i < npages - 1; i++)
108*4882a593Smuzhiyun 		if (page_to_pfn(region->pages[i]) + 1 !=
109*4882a593Smuzhiyun 				page_to_pfn(region->pages[i + 1]))
110*4882a593Smuzhiyun 			return false;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	return true;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun /**
116*4882a593Smuzhiyun  * dma_region_check_iova - check if memory area is fully contained in the region
117*4882a593Smuzhiyun  * @region: dma memory region
118*4882a593Smuzhiyun  * @iova: address of the dma memory area
119*4882a593Smuzhiyun  * @size: size of the dma memory area
120*4882a593Smuzhiyun  *
121*4882a593Smuzhiyun  * Compare the dma memory area defined by @iova and @size with given dma region.
122*4882a593Smuzhiyun  * Return true if memory area is fully contained in the region, otherwise false.
123*4882a593Smuzhiyun  */
dma_region_check_iova(struct dfl_afu_dma_region * region,u64 iova,u64 size)124*4882a593Smuzhiyun static bool dma_region_check_iova(struct dfl_afu_dma_region *region,
125*4882a593Smuzhiyun 				  u64 iova, u64 size)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun 	if (!size && region->iova != iova)
128*4882a593Smuzhiyun 		return false;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	return (region->iova <= iova) &&
131*4882a593Smuzhiyun 		(region->length + region->iova >= iova + size);
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun /**
135*4882a593Smuzhiyun  * afu_dma_region_add - add given dma region to rbtree
136*4882a593Smuzhiyun  * @pdata: feature device platform data
137*4882a593Smuzhiyun  * @region: dma region to be added
138*4882a593Smuzhiyun  *
139*4882a593Smuzhiyun  * Return 0 for success, -EEXIST if dma region has already been added.
140*4882a593Smuzhiyun  *
141*4882a593Smuzhiyun  * Needs to be called with pdata->lock heold.
142*4882a593Smuzhiyun  */
afu_dma_region_add(struct dfl_feature_platform_data * pdata,struct dfl_afu_dma_region * region)143*4882a593Smuzhiyun static int afu_dma_region_add(struct dfl_feature_platform_data *pdata,
144*4882a593Smuzhiyun 			      struct dfl_afu_dma_region *region)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun 	struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
147*4882a593Smuzhiyun 	struct rb_node **new, *parent = NULL;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	dev_dbg(&pdata->dev->dev, "add region (iova = %llx)\n",
150*4882a593Smuzhiyun 		(unsigned long long)region->iova);
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	new = &afu->dma_regions.rb_node;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	while (*new) {
155*4882a593Smuzhiyun 		struct dfl_afu_dma_region *this;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 		this = container_of(*new, struct dfl_afu_dma_region, node);
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 		parent = *new;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 		if (dma_region_check_iova(this, region->iova, region->length))
162*4882a593Smuzhiyun 			return -EEXIST;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 		if (region->iova < this->iova)
165*4882a593Smuzhiyun 			new = &((*new)->rb_left);
166*4882a593Smuzhiyun 		else if (region->iova > this->iova)
167*4882a593Smuzhiyun 			new = &((*new)->rb_right);
168*4882a593Smuzhiyun 		else
169*4882a593Smuzhiyun 			return -EEXIST;
170*4882a593Smuzhiyun 	}
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	rb_link_node(&region->node, parent, new);
173*4882a593Smuzhiyun 	rb_insert_color(&region->node, &afu->dma_regions);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	return 0;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun /**
179*4882a593Smuzhiyun  * afu_dma_region_remove - remove given dma region from rbtree
180*4882a593Smuzhiyun  * @pdata: feature device platform data
181*4882a593Smuzhiyun  * @region: dma region to be removed
182*4882a593Smuzhiyun  *
183*4882a593Smuzhiyun  * Needs to be called with pdata->lock heold.
184*4882a593Smuzhiyun  */
afu_dma_region_remove(struct dfl_feature_platform_data * pdata,struct dfl_afu_dma_region * region)185*4882a593Smuzhiyun static void afu_dma_region_remove(struct dfl_feature_platform_data *pdata,
186*4882a593Smuzhiyun 				  struct dfl_afu_dma_region *region)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	struct dfl_afu *afu;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n",
191*4882a593Smuzhiyun 		(unsigned long long)region->iova);
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	afu = dfl_fpga_pdata_get_private(pdata);
194*4882a593Smuzhiyun 	rb_erase(&region->node, &afu->dma_regions);
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun /**
198*4882a593Smuzhiyun  * afu_dma_region_destroy - destroy all regions in rbtree
199*4882a593Smuzhiyun  * @pdata: feature device platform data
200*4882a593Smuzhiyun  *
201*4882a593Smuzhiyun  * Needs to be called with pdata->lock heold.
202*4882a593Smuzhiyun  */
afu_dma_region_destroy(struct dfl_feature_platform_data * pdata)203*4882a593Smuzhiyun void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun 	struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
206*4882a593Smuzhiyun 	struct rb_node *node = rb_first(&afu->dma_regions);
207*4882a593Smuzhiyun 	struct dfl_afu_dma_region *region;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	while (node) {
210*4882a593Smuzhiyun 		region = container_of(node, struct dfl_afu_dma_region, node);
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 		dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n",
213*4882a593Smuzhiyun 			(unsigned long long)region->iova);
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 		rb_erase(node, &afu->dma_regions);
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 		if (region->iova)
218*4882a593Smuzhiyun 			dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
219*4882a593Smuzhiyun 				       region->iova, region->length,
220*4882a593Smuzhiyun 				       DMA_BIDIRECTIONAL);
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 		if (region->pages)
223*4882a593Smuzhiyun 			afu_dma_unpin_pages(pdata, region);
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 		node = rb_next(node);
226*4882a593Smuzhiyun 		kfree(region);
227*4882a593Smuzhiyun 	}
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun /**
231*4882a593Smuzhiyun  * afu_dma_region_find - find the dma region from rbtree based on iova and size
232*4882a593Smuzhiyun  * @pdata: feature device platform data
233*4882a593Smuzhiyun  * @iova: address of the dma memory area
234*4882a593Smuzhiyun  * @size: size of the dma memory area
235*4882a593Smuzhiyun  *
236*4882a593Smuzhiyun  * It finds the dma region from the rbtree based on @iova and @size:
237*4882a593Smuzhiyun  * - if @size == 0, it finds the dma region which starts from @iova
238*4882a593Smuzhiyun  * - otherwise, it finds the dma region which fully contains
239*4882a593Smuzhiyun  *   [@iova, @iova+size)
240*4882a593Smuzhiyun  * If nothing is matched returns NULL.
241*4882a593Smuzhiyun  *
242*4882a593Smuzhiyun  * Needs to be called with pdata->lock held.
243*4882a593Smuzhiyun  */
244*4882a593Smuzhiyun struct dfl_afu_dma_region *
afu_dma_region_find(struct dfl_feature_platform_data * pdata,u64 iova,u64 size)245*4882a593Smuzhiyun afu_dma_region_find(struct dfl_feature_platform_data *pdata, u64 iova, u64 size)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun 	struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
248*4882a593Smuzhiyun 	struct rb_node *node = afu->dma_regions.rb_node;
249*4882a593Smuzhiyun 	struct device *dev = &pdata->dev->dev;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	while (node) {
252*4882a593Smuzhiyun 		struct dfl_afu_dma_region *region;
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 		region = container_of(node, struct dfl_afu_dma_region, node);
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 		if (dma_region_check_iova(region, iova, size)) {
257*4882a593Smuzhiyun 			dev_dbg(dev, "find region (iova = %llx)\n",
258*4882a593Smuzhiyun 				(unsigned long long)region->iova);
259*4882a593Smuzhiyun 			return region;
260*4882a593Smuzhiyun 		}
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 		if (iova < region->iova)
263*4882a593Smuzhiyun 			node = node->rb_left;
264*4882a593Smuzhiyun 		else if (iova > region->iova)
265*4882a593Smuzhiyun 			node = node->rb_right;
266*4882a593Smuzhiyun 		else
267*4882a593Smuzhiyun 			/* the iova region is not fully covered. */
268*4882a593Smuzhiyun 			break;
269*4882a593Smuzhiyun 	}
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	dev_dbg(dev, "region with iova %llx and size %llx is not found\n",
272*4882a593Smuzhiyun 		(unsigned long long)iova, (unsigned long long)size);
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	return NULL;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun /**
278*4882a593Smuzhiyun  * afu_dma_region_find_iova - find the dma region from rbtree by iova
279*4882a593Smuzhiyun  * @pdata: feature device platform data
280*4882a593Smuzhiyun  * @iova: address of the dma region
281*4882a593Smuzhiyun  *
282*4882a593Smuzhiyun  * Needs to be called with pdata->lock held.
283*4882a593Smuzhiyun  */
284*4882a593Smuzhiyun static struct dfl_afu_dma_region *
afu_dma_region_find_iova(struct dfl_feature_platform_data * pdata,u64 iova)285*4882a593Smuzhiyun afu_dma_region_find_iova(struct dfl_feature_platform_data *pdata, u64 iova)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun 	return afu_dma_region_find(pdata, iova, 0);
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun /**
291*4882a593Smuzhiyun  * afu_dma_map_region - map memory region for dma
292*4882a593Smuzhiyun  * @pdata: feature device platform data
293*4882a593Smuzhiyun  * @user_addr: address of the memory region
294*4882a593Smuzhiyun  * @length: size of the memory region
295*4882a593Smuzhiyun  * @iova: pointer of iova address
296*4882a593Smuzhiyun  *
297*4882a593Smuzhiyun  * Map memory region defined by @user_addr and @length, and return dma address
298*4882a593Smuzhiyun  * of the memory region via @iova.
299*4882a593Smuzhiyun  * Return 0 for success, otherwise error code.
300*4882a593Smuzhiyun  */
afu_dma_map_region(struct dfl_feature_platform_data * pdata,u64 user_addr,u64 length,u64 * iova)301*4882a593Smuzhiyun int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
302*4882a593Smuzhiyun 		       u64 user_addr, u64 length, u64 *iova)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun 	struct dfl_afu_dma_region *region;
305*4882a593Smuzhiyun 	int ret;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	/*
308*4882a593Smuzhiyun 	 * Check Inputs, only accept page-aligned user memory region with
309*4882a593Smuzhiyun 	 * valid length.
310*4882a593Smuzhiyun 	 */
311*4882a593Smuzhiyun 	if (!PAGE_ALIGNED(user_addr) || !PAGE_ALIGNED(length) || !length)
312*4882a593Smuzhiyun 		return -EINVAL;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	/* Check overflow */
315*4882a593Smuzhiyun 	if (user_addr + length < user_addr)
316*4882a593Smuzhiyun 		return -EINVAL;
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	region = kzalloc(sizeof(*region), GFP_KERNEL);
319*4882a593Smuzhiyun 	if (!region)
320*4882a593Smuzhiyun 		return -ENOMEM;
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	region->user_addr = user_addr;
323*4882a593Smuzhiyun 	region->length = length;
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	/* Pin the user memory region */
326*4882a593Smuzhiyun 	ret = afu_dma_pin_pages(pdata, region);
327*4882a593Smuzhiyun 	if (ret) {
328*4882a593Smuzhiyun 		dev_err(&pdata->dev->dev, "failed to pin memory region\n");
329*4882a593Smuzhiyun 		goto free_region;
330*4882a593Smuzhiyun 	}
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	/* Only accept continuous pages, return error else */
333*4882a593Smuzhiyun 	if (!afu_dma_check_continuous_pages(region)) {
334*4882a593Smuzhiyun 		dev_err(&pdata->dev->dev, "pages are not continuous\n");
335*4882a593Smuzhiyun 		ret = -EINVAL;
336*4882a593Smuzhiyun 		goto unpin_pages;
337*4882a593Smuzhiyun 	}
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	/* As pages are continuous then start to do DMA mapping */
340*4882a593Smuzhiyun 	region->iova = dma_map_page(dfl_fpga_pdata_to_parent(pdata),
341*4882a593Smuzhiyun 				    region->pages[0], 0,
342*4882a593Smuzhiyun 				    region->length,
343*4882a593Smuzhiyun 				    DMA_BIDIRECTIONAL);
344*4882a593Smuzhiyun 	if (dma_mapping_error(dfl_fpga_pdata_to_parent(pdata), region->iova)) {
345*4882a593Smuzhiyun 		dev_err(&pdata->dev->dev, "failed to map for dma\n");
346*4882a593Smuzhiyun 		ret = -EFAULT;
347*4882a593Smuzhiyun 		goto unpin_pages;
348*4882a593Smuzhiyun 	}
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	*iova = region->iova;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	mutex_lock(&pdata->lock);
353*4882a593Smuzhiyun 	ret = afu_dma_region_add(pdata, region);
354*4882a593Smuzhiyun 	mutex_unlock(&pdata->lock);
355*4882a593Smuzhiyun 	if (ret) {
356*4882a593Smuzhiyun 		dev_err(&pdata->dev->dev, "failed to add dma region\n");
357*4882a593Smuzhiyun 		goto unmap_dma;
358*4882a593Smuzhiyun 	}
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	return 0;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun unmap_dma:
363*4882a593Smuzhiyun 	dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
364*4882a593Smuzhiyun 		       region->iova, region->length, DMA_BIDIRECTIONAL);
365*4882a593Smuzhiyun unpin_pages:
366*4882a593Smuzhiyun 	afu_dma_unpin_pages(pdata, region);
367*4882a593Smuzhiyun free_region:
368*4882a593Smuzhiyun 	kfree(region);
369*4882a593Smuzhiyun 	return ret;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun /**
373*4882a593Smuzhiyun  * afu_dma_unmap_region - unmap dma memory region
374*4882a593Smuzhiyun  * @pdata: feature device platform data
375*4882a593Smuzhiyun  * @iova: dma address of the region
376*4882a593Smuzhiyun  *
377*4882a593Smuzhiyun  * Unmap dma memory region based on @iova.
378*4882a593Smuzhiyun  * Return 0 for success, otherwise error code.
379*4882a593Smuzhiyun  */
afu_dma_unmap_region(struct dfl_feature_platform_data * pdata,u64 iova)380*4882a593Smuzhiyun int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun 	struct dfl_afu_dma_region *region;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	mutex_lock(&pdata->lock);
385*4882a593Smuzhiyun 	region = afu_dma_region_find_iova(pdata, iova);
386*4882a593Smuzhiyun 	if (!region) {
387*4882a593Smuzhiyun 		mutex_unlock(&pdata->lock);
388*4882a593Smuzhiyun 		return -EINVAL;
389*4882a593Smuzhiyun 	}
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	if (region->in_use) {
392*4882a593Smuzhiyun 		mutex_unlock(&pdata->lock);
393*4882a593Smuzhiyun 		return -EBUSY;
394*4882a593Smuzhiyun 	}
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	afu_dma_region_remove(pdata, region);
397*4882a593Smuzhiyun 	mutex_unlock(&pdata->lock);
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
400*4882a593Smuzhiyun 		       region->iova, region->length, DMA_BIDIRECTIONAL);
401*4882a593Smuzhiyun 	afu_dma_unpin_pages(pdata, region);
402*4882a593Smuzhiyun 	kfree(region);
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	return 0;
405*4882a593Smuzhiyun }
406