xref: /OK3568_Linux_fs/kernel/drivers/rknpu/rknpu_mem.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) Rockchip Electronics Co.Ltd
4  * Author: Felix Zeng <felix.zeng@rock-chips.com>
5  */
6 
7 #include <linux/version.h>
8 #include <linux/rk-dma-heap.h>
9 
10 #if KERNEL_VERSION(5, 10, 0) <= LINUX_VERSION_CODE
11 #include <linux/dma-map-ops.h>
12 #endif
13 
14 #include "rknpu_drv.h"
15 #include "rknpu_ioctl.h"
16 #include "rknpu_mem.h"
17 
rknpu_mem_create_ioctl(struct rknpu_device * rknpu_dev,unsigned long data,struct file * file)18 int rknpu_mem_create_ioctl(struct rknpu_device *rknpu_dev, unsigned long data,
19 			   struct file *file)
20 {
21 	struct rknpu_mem_create args;
22 	int ret = -EINVAL;
23 	struct dma_buf_attachment *attachment;
24 	struct sg_table *table;
25 	struct scatterlist *sgl;
26 	dma_addr_t phys;
27 	struct dma_buf *dmabuf;
28 	struct page **pages;
29 	struct page *page;
30 	struct rknpu_mem_object *rknpu_obj = NULL;
31 	struct rknpu_session *session = NULL;
32 	int i, fd;
33 	unsigned int length, page_count;
34 
35 	if (unlikely(copy_from_user(&args, (struct rknpu_mem_create *)data,
36 				    sizeof(struct rknpu_mem_create)))) {
37 		LOG_ERROR("%s: copy_from_user failed\n", __func__);
38 		ret = -EFAULT;
39 		return ret;
40 	}
41 
42 	if (args.flags & RKNPU_MEM_NON_CONTIGUOUS) {
43 		LOG_ERROR("%s: malloc iommu memory unsupported in current!\n",
44 			  __func__);
45 		ret = -EINVAL;
46 		return ret;
47 	}
48 
49 	rknpu_obj = kzalloc(sizeof(*rknpu_obj), GFP_KERNEL);
50 	if (!rknpu_obj)
51 		return -ENOMEM;
52 
53 	if (args.handle > 0) {
54 		fd = args.handle;
55 
56 		dmabuf = dma_buf_get(fd);
57 		if (IS_ERR(dmabuf)) {
58 			ret = PTR_ERR(dmabuf);
59 			goto err_free_obj;
60 		}
61 
62 		rknpu_obj->dmabuf = dmabuf;
63 		rknpu_obj->owner = 0;
64 	} else {
65 		/* Start test kernel alloc/free dma buf */
66 		dmabuf = rk_dma_heap_buffer_alloc(rknpu_dev->heap, args.size,
67 						  O_CLOEXEC | O_RDWR, 0x0,
68 						  dev_name(rknpu_dev->dev));
69 		if (IS_ERR(dmabuf)) {
70 			LOG_ERROR("dmabuf alloc failed, args.size = %llu\n",
71 				  args.size);
72 			ret = PTR_ERR(dmabuf);
73 			goto err_free_obj;
74 		}
75 
76 		rknpu_obj->dmabuf = dmabuf;
77 		rknpu_obj->owner = 1;
78 
79 		fd = dma_buf_fd(dmabuf, O_CLOEXEC | O_RDWR);
80 		if (fd < 0) {
81 			LOG_ERROR("dmabuf fd get failed\n");
82 			ret = -EFAULT;
83 			goto err_free_dma_buf;
84 		}
85 	}
86 
87 	attachment = dma_buf_attach(dmabuf, rknpu_dev->dev);
88 	if (IS_ERR(attachment)) {
89 		LOG_ERROR("dma_buf_attach failed\n");
90 		ret = PTR_ERR(attachment);
91 		goto err_free_dma_buf;
92 	}
93 
94 	table = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL);
95 	if (IS_ERR(table)) {
96 		LOG_ERROR("dma_buf_attach failed\n");
97 		dma_buf_detach(dmabuf, attachment);
98 		ret = PTR_ERR(table);
99 		goto err_free_dma_buf;
100 	}
101 
102 	for_each_sgtable_sg(table, sgl, i) {
103 		phys = sg_dma_address(sgl);
104 		page = sg_page(sgl);
105 		length = sg_dma_len(sgl);
106 		LOG_DEBUG("%s, %d, phys: %pad, length: %u\n", __func__,
107 			  __LINE__, &phys, length);
108 	}
109 
110 	page_count = length >> PAGE_SHIFT;
111 	pages = kmalloc_array(page_count, sizeof(struct page), GFP_KERNEL);
112 	if (!pages) {
113 		LOG_ERROR("alloc pages failed\n");
114 		ret = -ENOMEM;
115 		goto err_detach_dma_buf;
116 	}
117 
118 	for (i = 0; i < page_count; i++)
119 		pages[i] = &page[i];
120 
121 	rknpu_obj->kv_addr = vmap(pages, page_count, VM_MAP, PAGE_KERNEL);
122 	if (!rknpu_obj->kv_addr) {
123 		LOG_ERROR("vmap pages addr failed\n");
124 		ret = -ENOMEM;
125 		goto err_free_pages;
126 	}
127 
128 	rknpu_obj->size = PAGE_ALIGN(args.size);
129 	rknpu_obj->dma_addr = phys;
130 	rknpu_obj->sgt = table;
131 
132 	args.size = rknpu_obj->size;
133 	args.obj_addr = (__u64)(uintptr_t)rknpu_obj;
134 	args.dma_addr = rknpu_obj->dma_addr;
135 	args.handle = fd;
136 
137 	LOG_DEBUG(
138 		"args.handle: %d, args.size: %lld, rknpu_obj: %#llx, rknpu_obj->dma_addr: %#llx\n",
139 		args.handle, args.size, (__u64)(uintptr_t)rknpu_obj,
140 		(__u64)rknpu_obj->dma_addr);
141 
142 	if (unlikely(copy_to_user((struct rknpu_mem_create *)data, &args,
143 				  sizeof(struct rknpu_mem_create)))) {
144 		LOG_ERROR("%s: copy_to_user failed\n", __func__);
145 		ret = -EFAULT;
146 		goto err_unmap_kv_addr;
147 	}
148 
149 	kfree(pages);
150 	dma_buf_unmap_attachment(attachment, table, DMA_BIDIRECTIONAL);
151 	dma_buf_detach(dmabuf, attachment);
152 
153 	spin_lock(&rknpu_dev->lock);
154 
155 	session = file->private_data;
156 	if (!session) {
157 		spin_unlock(&rknpu_dev->lock);
158 		ret = -EFAULT;
159 		goto err_unmap_kv_addr;
160 	}
161 	list_add_tail(&rknpu_obj->head, &session->list);
162 
163 	spin_unlock(&rknpu_dev->lock);
164 
165 	return 0;
166 
167 err_unmap_kv_addr:
168 	vunmap(rknpu_obj->kv_addr);
169 	rknpu_obj->kv_addr = NULL;
170 
171 err_free_pages:
172 	kfree(pages);
173 
174 err_detach_dma_buf:
175 	dma_buf_unmap_attachment(attachment, table, DMA_BIDIRECTIONAL);
176 	dma_buf_detach(dmabuf, attachment);
177 
178 err_free_dma_buf:
179 	if (rknpu_obj->owner)
180 		rk_dma_heap_buffer_free(dmabuf);
181 	else
182 		dma_buf_put(dmabuf);
183 
184 err_free_obj:
185 	kfree(rknpu_obj);
186 
187 	return ret;
188 }
189 
rknpu_mem_destroy_ioctl(struct rknpu_device * rknpu_dev,unsigned long data,struct file * file)190 int rknpu_mem_destroy_ioctl(struct rknpu_device *rknpu_dev, unsigned long data,
191 			    struct file *file)
192 {
193 	struct rknpu_mem_object *rknpu_obj, *entry, *q;
194 	struct rknpu_session *session = NULL;
195 	struct rknpu_mem_destroy args;
196 	int ret = -EFAULT;
197 
198 	if (unlikely(copy_from_user(&args, (struct rknpu_mem_destroy *)data,
199 				    sizeof(struct rknpu_mem_destroy)))) {
200 		LOG_ERROR("%s: copy_from_user failed\n", __func__);
201 		ret = -EFAULT;
202 		return ret;
203 	}
204 
205 	if (!kern_addr_valid(args.obj_addr)) {
206 		LOG_ERROR("%s: invalid obj_addr: %#llx\n", __func__,
207 			  (__u64)(uintptr_t)args.obj_addr);
208 		ret = -EINVAL;
209 		return ret;
210 	}
211 
212 	rknpu_obj = (struct rknpu_mem_object *)(uintptr_t)args.obj_addr;
213 	LOG_DEBUG(
214 		"free args.handle: %d, rknpu_obj: %#llx, rknpu_obj->dma_addr: %#llx\n",
215 		args.handle, (__u64)(uintptr_t)rknpu_obj,
216 		(__u64)rknpu_obj->dma_addr);
217 
218 	spin_lock(&rknpu_dev->lock);
219 	session = file->private_data;
220 	if (!session) {
221 		spin_unlock(&rknpu_dev->lock);
222 		ret = -EFAULT;
223 		return ret;
224 	}
225 	list_for_each_entry_safe(entry, q, &session->list, head) {
226 		if (entry == rknpu_obj) {
227 			list_del(&entry->head);
228 			break;
229 		}
230 	}
231 	spin_unlock(&rknpu_dev->lock);
232 
233 	if (rknpu_obj == entry) {
234 		vunmap(rknpu_obj->kv_addr);
235 		rknpu_obj->kv_addr = NULL;
236 
237 		if (!rknpu_obj->owner)
238 			dma_buf_put(rknpu_obj->dmabuf);
239 
240 		kfree(rknpu_obj);
241 	}
242 
243 	return 0;
244 }
245 
246 /*
247  * begin cpu access => for_cpu = true
248  * end cpu access => for_cpu = false
249  */
rknpu_dma_buf_sync(struct rknpu_device * rknpu_dev,struct rknpu_mem_object * rknpu_obj,u32 offset,u32 length,enum dma_data_direction dir,bool for_cpu)250 static void __maybe_unused rknpu_dma_buf_sync(
251 	struct rknpu_device *rknpu_dev, struct rknpu_mem_object *rknpu_obj,
252 	u32 offset, u32 length, enum dma_data_direction dir, bool for_cpu)
253 {
254 	struct device *dev = rknpu_dev->dev;
255 	struct sg_table *sgt = rknpu_obj->sgt;
256 	struct scatterlist *sg = sgt->sgl;
257 	dma_addr_t sg_dma_addr = sg_dma_address(sg);
258 	unsigned int len = 0;
259 	int i;
260 
261 	for_each_sgtable_sg(sgt, sg, i) {
262 		unsigned int sg_offset, sg_left, size = 0;
263 
264 		len += sg->length;
265 		if (len <= offset) {
266 			sg_dma_addr += sg->length;
267 			continue;
268 		}
269 
270 		sg_left = len - offset;
271 		sg_offset = sg->length - sg_left;
272 
273 		size = (length < sg_left) ? length : sg_left;
274 
275 		if (for_cpu)
276 			dma_sync_single_range_for_cpu(dev, sg_dma_addr,
277 						      sg_offset, size, dir);
278 		else
279 			dma_sync_single_range_for_device(dev, sg_dma_addr,
280 							 sg_offset, size, dir);
281 
282 		offset += size;
283 		length -= size;
284 		sg_dma_addr += sg->length;
285 
286 		if (length == 0)
287 			break;
288 	}
289 }
290 
rknpu_mem_sync_ioctl(struct rknpu_device * rknpu_dev,unsigned long data)291 int rknpu_mem_sync_ioctl(struct rknpu_device *rknpu_dev, unsigned long data)
292 {
293 	struct rknpu_mem_object *rknpu_obj = NULL;
294 	struct rknpu_mem_sync args;
295 	struct dma_buf *dmabuf;
296 	int ret = -EFAULT;
297 
298 	if (unlikely(copy_from_user(&args, (struct rknpu_mem_sync *)data,
299 				    sizeof(struct rknpu_mem_sync)))) {
300 		LOG_ERROR("%s: copy_from_user failed\n", __func__);
301 		ret = -EFAULT;
302 		return ret;
303 	}
304 
305 	if (!kern_addr_valid(args.obj_addr)) {
306 		LOG_ERROR("%s: invalid obj_addr: %#llx\n", __func__,
307 			  (__u64)(uintptr_t)args.obj_addr);
308 		ret = -EINVAL;
309 		return ret;
310 	}
311 
312 	rknpu_obj = (struct rknpu_mem_object *)(uintptr_t)args.obj_addr;
313 	dmabuf = rknpu_obj->dmabuf;
314 
315 #ifndef CONFIG_DMABUF_PARTIAL
316 	if (args.flags & RKNPU_MEM_SYNC_TO_DEVICE) {
317 		rknpu_dma_buf_sync(rknpu_dev, rknpu_obj, args.offset, args.size,
318 				   DMA_TO_DEVICE, false);
319 	}
320 	if (args.flags & RKNPU_MEM_SYNC_FROM_DEVICE) {
321 		rknpu_dma_buf_sync(rknpu_dev, rknpu_obj, args.offset, args.size,
322 				   DMA_FROM_DEVICE, true);
323 	}
324 #else
325 	if (args.flags & RKNPU_MEM_SYNC_TO_DEVICE) {
326 		dmabuf->ops->end_cpu_access_partial(dmabuf, DMA_TO_DEVICE,
327 						    args.offset, args.size);
328 	}
329 	if (args.flags & RKNPU_MEM_SYNC_FROM_DEVICE) {
330 		dmabuf->ops->begin_cpu_access_partial(dmabuf, DMA_FROM_DEVICE,
331 						      args.offset, args.size);
332 	}
333 #endif
334 
335 	return 0;
336 }
337