xref: /OK3568_Linux_fs/kernel/drivers/base/arm/dma_buf_test_exporter/dma-buf-test-exporter.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3  *
4  * (C) COPYRIGHT 2012-2022 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 #include <uapi/base/arm/dma_buf_test_exporter/dma-buf-test-exporter.h>
23 #include <linux/dma-buf.h>
24 #include <linux/miscdevice.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <linux/version.h>
28 #include <linux/module.h>
29 #include <linux/fs.h>
30 #include <linux/atomic.h>
31 #include <linux/mm.h>
32 #include <linux/highmem.h>
33 #include <linux/dma-mapping.h>
34 
35 #define DMA_BUF_TE_VER_MAJOR 1
36 #define DMA_BUF_TE_VER_MINOR 0
37 
38 /* Maximum size allowed in a single DMA_BUF_TE_ALLOC call */
39 #define DMA_BUF_TE_ALLOC_MAX_SIZE ((8ull << 30) >> PAGE_SHIFT) /* 8 GB */
40 
41 /* Since kernel version 5.0 CONFIG_ARCH_NO_SG_CHAIN replaced CONFIG_ARCH_HAS_SG_CHAIN */
42 #if KERNEL_VERSION(5, 0, 0) > LINUX_VERSION_CODE
43 #if (!defined(ARCH_HAS_SG_CHAIN) && !defined(CONFIG_ARCH_HAS_SG_CHAIN))
44 #define NO_SG_CHAIN
45 #endif
46 #elif defined(CONFIG_ARCH_NO_SG_CHAIN)
47 #define NO_SG_CHAIN
48 #endif
49 
50 struct dma_buf_te_alloc {
51 	/* the real alloc */
52 	size_t nr_pages;
53 	struct page **pages;
54 
55 	/* the debug usage tracking */
56 	int nr_attached_devices;
57 	int nr_device_mappings;
58 	int nr_cpu_mappings;
59 
60 	/* failure simulation */
61 	int fail_attach;
62 	int fail_map;
63 	int fail_mmap;
64 
65 	bool contiguous;
66 	dma_addr_t contig_dma_addr;
67 	void *contig_cpu_addr;
68 };
69 
70 struct dma_buf_te_attachment {
71 	struct sg_table *sg;
72 	bool attachment_mapped;
73 };
74 
75 static struct miscdevice te_device;
76 
77 #if (KERNEL_VERSION(4, 19, 0) > LINUX_VERSION_CODE)
dma_buf_te_attach(struct dma_buf * buf,struct device * dev,struct dma_buf_attachment * attachment)78 static int dma_buf_te_attach(struct dma_buf *buf, struct device *dev, struct dma_buf_attachment *attachment)
79 #else
80 static int dma_buf_te_attach(struct dma_buf *buf, struct dma_buf_attachment *attachment)
81 #endif
82 {
83 	struct dma_buf_te_alloc	*alloc;
84 
85 	alloc = buf->priv;
86 
87 	if (alloc->fail_attach)
88 		return -EFAULT;
89 
90 	attachment->priv = kzalloc(sizeof(struct dma_buf_te_attachment), GFP_KERNEL);
91 	if (!attachment->priv)
92 		return -ENOMEM;
93 
94 	/* dma_buf is externally locked during call */
95 	alloc->nr_attached_devices++;
96 	return 0;
97 }
98 
99 /**
100  * dma_buf_te_detach - The detach callback function to release &attachment
101  *
102  * @buf: buffer for the &attachment
103  * @attachment: attachment data to be released
104  */
dma_buf_te_detach(struct dma_buf * buf,struct dma_buf_attachment * attachment)105 static void dma_buf_te_detach(struct dma_buf *buf, struct dma_buf_attachment *attachment)
106 {
107 	struct dma_buf_te_alloc *alloc = buf->priv;
108 	struct dma_buf_te_attachment *pa = attachment->priv;
109 
110 	/* dma_buf is externally locked during call */
111 
112 	WARN(pa->attachment_mapped, "WARNING: dma-buf-test-exporter detected detach with open device mappings");
113 
114 	alloc->nr_attached_devices--;
115 
116 	kfree(pa);
117 }
118 
dma_buf_te_map(struct dma_buf_attachment * attachment,enum dma_data_direction direction)119 static struct sg_table *dma_buf_te_map(struct dma_buf_attachment *attachment, enum dma_data_direction direction)
120 {
121 	struct sg_table *sg;
122 	struct scatterlist *iter;
123 	struct dma_buf_te_alloc	*alloc;
124 	struct dma_buf_te_attachment *pa = attachment->priv;
125 	size_t i;
126 	int ret;
127 
128 	alloc = attachment->dmabuf->priv;
129 
130 	if (alloc->fail_map)
131 		return ERR_PTR(-ENOMEM);
132 
133 	if (WARN(pa->attachment_mapped,
134 	    "WARNING: Attempted to map already mapped attachment."))
135 		return ERR_PTR(-EBUSY);
136 
137 #ifdef NO_SG_CHAIN
138 	/* if the ARCH can't chain we can't have allocs larger than a single sg can hold */
139 	if (alloc->nr_pages > SG_MAX_SINGLE_ALLOC)
140 		return ERR_PTR(-EINVAL);
141 #endif /* NO_SG_CHAIN */
142 
143 	sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
144 	if (!sg)
145 		return ERR_PTR(-ENOMEM);
146 
147 	/* from here we access the allocation object, so lock the dmabuf pointing to it */
148 	mutex_lock(&attachment->dmabuf->lock);
149 
150 	if (alloc->contiguous)
151 		ret = sg_alloc_table(sg, 1, GFP_KERNEL);
152 	else
153 		ret = sg_alloc_table(sg, alloc->nr_pages, GFP_KERNEL);
154 	if (ret) {
155 		mutex_unlock(&attachment->dmabuf->lock);
156 		kfree(sg);
157 		return ERR_PTR(ret);
158 	}
159 
160 	if (alloc->contiguous) {
161 		sg_dma_len(sg->sgl) = alloc->nr_pages * PAGE_SIZE;
162 		sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(alloc->contig_dma_addr)), alloc->nr_pages * PAGE_SIZE, 0);
163 		sg_dma_address(sg->sgl) = alloc->contig_dma_addr;
164 	} else {
165 		for_each_sg(sg->sgl, iter, alloc->nr_pages, i)
166 			sg_set_page(iter, alloc->pages[i], PAGE_SIZE, 0);
167 	}
168 
169 	if (!dma_map_sg(attachment->dev, sg->sgl, sg->nents, direction)) {
170 		mutex_unlock(&attachment->dmabuf->lock);
171 		sg_free_table(sg);
172 		kfree(sg);
173 		return ERR_PTR(-ENOMEM);
174 	}
175 
176 	alloc->nr_device_mappings++;
177 	pa->attachment_mapped = true;
178 	pa->sg = sg;
179 	mutex_unlock(&attachment->dmabuf->lock);
180 	return sg;
181 }
182 
dma_buf_te_unmap(struct dma_buf_attachment * attachment,struct sg_table * sg,enum dma_data_direction direction)183 static void dma_buf_te_unmap(struct dma_buf_attachment *attachment,
184 							 struct sg_table *sg, enum dma_data_direction direction)
185 {
186 	struct dma_buf_te_alloc *alloc;
187 	struct dma_buf_te_attachment *pa = attachment->priv;
188 
189 	alloc = attachment->dmabuf->priv;
190 
191 	mutex_lock(&attachment->dmabuf->lock);
192 
193 	WARN(!pa->attachment_mapped, "WARNING: Unmatched unmap of attachment.");
194 
195 	alloc->nr_device_mappings--;
196 	pa->attachment_mapped = false;
197 	pa->sg = NULL;
198 	mutex_unlock(&attachment->dmabuf->lock);
199 
200 	dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, direction);
201 	sg_free_table(sg);
202 	kfree(sg);
203 }
204 
dma_buf_te_release(struct dma_buf * buf)205 static void dma_buf_te_release(struct dma_buf *buf)
206 {
207 	size_t i;
208 	struct dma_buf_te_alloc *alloc;
209 
210 	alloc = buf->priv;
211 	/* no need for locking */
212 
213 	if (alloc->contiguous) {
214 		dma_free_attrs(te_device.this_device,
215 						alloc->nr_pages * PAGE_SIZE,
216 						alloc->contig_cpu_addr,
217 						alloc->contig_dma_addr,
218 						DMA_ATTR_WRITE_COMBINE);
219 	} else {
220 		for (i = 0; i < alloc->nr_pages; i++)
221 			__free_page(alloc->pages[i]);
222 	}
223 #if (KERNEL_VERSION(4, 12, 0) <= LINUX_VERSION_CODE)
224 	kvfree(alloc->pages);
225 #else
226 	kfree(alloc->pages);
227 #endif
228 	kfree(alloc);
229 }
230 
dma_buf_te_sync(struct dma_buf * dmabuf,enum dma_data_direction direction,bool start_cpu_access)231 static int dma_buf_te_sync(struct dma_buf *dmabuf,
232 			enum dma_data_direction direction,
233 			bool start_cpu_access)
234 {
235 	struct dma_buf_attachment *attachment;
236 
237 	mutex_lock(&dmabuf->lock);
238 
239 	list_for_each_entry(attachment, &dmabuf->attachments, node) {
240 		struct dma_buf_te_attachment *pa = attachment->priv;
241 		struct sg_table *sg = pa->sg;
242 
243 		if (!sg) {
244 			dev_dbg(te_device.this_device, "no mapping for device %s\n", dev_name(attachment->dev));
245 			continue;
246 		}
247 
248 		if (start_cpu_access) {
249 			dev_dbg(te_device.this_device, "sync cpu with device %s\n", dev_name(attachment->dev));
250 
251 			dma_sync_sg_for_cpu(attachment->dev, sg->sgl, sg->nents, direction);
252 		} else {
253 			dev_dbg(te_device.this_device, "sync device %s with cpu\n", dev_name(attachment->dev));
254 
255 			dma_sync_sg_for_device(attachment->dev, sg->sgl, sg->nents, direction);
256 		}
257 	}
258 
259 	mutex_unlock(&dmabuf->lock);
260 	return 0;
261 }
262 
dma_buf_te_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)263 static int dma_buf_te_begin_cpu_access(struct dma_buf *dmabuf,
264 					enum dma_data_direction direction)
265 {
266 	return dma_buf_te_sync(dmabuf, direction, true);
267 }
268 
dma_buf_te_end_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)269 static int dma_buf_te_end_cpu_access(struct dma_buf *dmabuf,
270 				enum dma_data_direction direction)
271 {
272 	return dma_buf_te_sync(dmabuf, direction, false);
273 }
274 
dma_buf_te_mmap_open(struct vm_area_struct * vma)275 static void dma_buf_te_mmap_open(struct vm_area_struct *vma)
276 {
277 	struct dma_buf *dma_buf;
278 	struct dma_buf_te_alloc *alloc;
279 
280 	dma_buf = vma->vm_private_data;
281 	alloc = dma_buf->priv;
282 
283 	mutex_lock(&dma_buf->lock);
284 	alloc->nr_cpu_mappings++;
285 	mutex_unlock(&dma_buf->lock);
286 }
287 
dma_buf_te_mmap_close(struct vm_area_struct * vma)288 static void dma_buf_te_mmap_close(struct vm_area_struct *vma)
289 {
290 	struct dma_buf *dma_buf;
291 	struct dma_buf_te_alloc *alloc;
292 
293 	dma_buf = vma->vm_private_data;
294 	alloc = dma_buf->priv;
295 
296 	BUG_ON(alloc->nr_cpu_mappings <= 0);
297 	mutex_lock(&dma_buf->lock);
298 	alloc->nr_cpu_mappings--;
299 	mutex_unlock(&dma_buf->lock);
300 }
301 
302 #if KERNEL_VERSION(4, 11, 0) > LINUX_VERSION_CODE
dma_buf_te_mmap_fault(struct vm_area_struct * vma,struct vm_fault * vmf)303 static int dma_buf_te_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
304 #elif KERNEL_VERSION(5, 1, 0) > LINUX_VERSION_CODE
305 static int dma_buf_te_mmap_fault(struct vm_fault *vmf)
306 #else
307 static vm_fault_t dma_buf_te_mmap_fault(struct vm_fault *vmf)
308 #endif
309 {
310 	struct dma_buf_te_alloc *alloc;
311 	struct dma_buf *dmabuf;
312 	struct page *pageptr;
313 
314 #if KERNEL_VERSION(4, 11, 0) > LINUX_VERSION_CODE
315 	dmabuf = vma->vm_private_data;
316 #else
317 	dmabuf = vmf->vma->vm_private_data;
318 #endif
319 	alloc = dmabuf->priv;
320 
321 	if (vmf->pgoff > alloc->nr_pages)
322 		return VM_FAULT_SIGBUS;
323 
324 	pageptr = alloc->pages[vmf->pgoff];
325 
326 	BUG_ON(!pageptr);
327 
328 	get_page(pageptr);
329 	vmf->page = pageptr;
330 
331 	return 0;
332 }
333 
334 static const struct vm_operations_struct dma_buf_te_vm_ops = {
335 	.open = dma_buf_te_mmap_open,
336 	.close = dma_buf_te_mmap_close,
337 	.fault = dma_buf_te_mmap_fault
338 };
339 
dma_buf_te_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma)340 static int dma_buf_te_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
341 {
342 	struct dma_buf_te_alloc *alloc;
343 
344 	alloc = dmabuf->priv;
345 
346 	if (alloc->fail_mmap)
347 		return -ENOMEM;
348 
349 	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
350 	vma->vm_ops = &dma_buf_te_vm_ops;
351 	vma->vm_private_data = dmabuf;
352 
353 	/*  we fault in the pages on access */
354 
355 	/* call open to do the ref-counting */
356 	dma_buf_te_vm_ops.open(vma);
357 
358 	return 0;
359 }
360 
361 #if KERNEL_VERSION(4, 19, 0) > LINUX_VERSION_CODE
dma_buf_te_kmap_atomic(struct dma_buf * buf,unsigned long page_num)362 static void *dma_buf_te_kmap_atomic(struct dma_buf *buf, unsigned long page_num)
363 {
364 	/* IGNORE */
365 	return NULL;
366 }
367 #endif
368 
dma_buf_te_kmap(struct dma_buf * buf,unsigned long page_num)369 static void *dma_buf_te_kmap(struct dma_buf *buf, unsigned long page_num)
370 {
371 	struct dma_buf_te_alloc *alloc;
372 
373 	alloc = buf->priv;
374 	if (page_num >= alloc->nr_pages)
375 		return NULL;
376 
377 	return kmap(alloc->pages[page_num]);
378 }
dma_buf_te_kunmap(struct dma_buf * buf,unsigned long page_num,void * addr)379 static void dma_buf_te_kunmap(struct dma_buf *buf,
380 		unsigned long page_num, void *addr)
381 {
382 	struct dma_buf_te_alloc *alloc;
383 
384 	alloc = buf->priv;
385 	if (page_num >= alloc->nr_pages)
386 		return;
387 
388 	kunmap(alloc->pages[page_num]);
389 }
390 
391 static struct dma_buf_ops dma_buf_te_ops = {
392 	/* real handlers */
393 	.attach = dma_buf_te_attach,
394 	.detach = dma_buf_te_detach,
395 	.map_dma_buf = dma_buf_te_map,
396 	.unmap_dma_buf = dma_buf_te_unmap,
397 	.release = dma_buf_te_release,
398 	.mmap = dma_buf_te_mmap,
399 	.begin_cpu_access = dma_buf_te_begin_cpu_access,
400 	.end_cpu_access = dma_buf_te_end_cpu_access,
401 #if KERNEL_VERSION(4, 12, 0) > LINUX_VERSION_CODE
402 	.kmap = dma_buf_te_kmap,
403 	.kunmap = dma_buf_te_kunmap,
404 
405 	/* nop handlers for mandatory functions we ignore */
406 	.kmap_atomic = dma_buf_te_kmap_atomic
407 #else
408 #if KERNEL_VERSION(5, 6, 0) > LINUX_VERSION_CODE
409 	.map = dma_buf_te_kmap,
410 	.unmap = dma_buf_te_kunmap,
411 #endif
412 
413 #if KERNEL_VERSION(4, 19, 0) > LINUX_VERSION_CODE
414 	/* nop handlers for mandatory functions we ignore */
415 	.map_atomic = dma_buf_te_kmap_atomic
416 #endif
417 #endif
418 };
419 
do_dma_buf_te_ioctl_version(struct dma_buf_te_ioctl_version __user * buf)420 static int do_dma_buf_te_ioctl_version(struct dma_buf_te_ioctl_version __user *buf)
421 {
422 	struct dma_buf_te_ioctl_version v;
423 
424 	if (copy_from_user(&v, buf, sizeof(v)))
425 		return -EFAULT;
426 
427 	if (v.op != DMA_BUF_TE_ENQ)
428 		return -EFAULT;
429 
430 	v.op = DMA_BUF_TE_ACK;
431 	v.major = DMA_BUF_TE_VER_MAJOR;
432 	v.minor = DMA_BUF_TE_VER_MINOR;
433 
434 	if (copy_to_user(buf, &v, sizeof(v)))
435 		return -EFAULT;
436 	else
437 		return 0;
438 }
439 
do_dma_buf_te_ioctl_alloc(struct dma_buf_te_ioctl_alloc __user * buf,bool contiguous)440 static int do_dma_buf_te_ioctl_alloc(struct dma_buf_te_ioctl_alloc __user *buf, bool contiguous)
441 {
442 	struct dma_buf_te_ioctl_alloc alloc_req;
443 	struct dma_buf_te_alloc *alloc;
444 	struct dma_buf *dma_buf;
445 	size_t i = 0;
446 	size_t max_nr_pages = DMA_BUF_TE_ALLOC_MAX_SIZE;
447 	int fd;
448 
449 	if (copy_from_user(&alloc_req, buf, sizeof(alloc_req))) {
450 		dev_err(te_device.this_device, "%s: couldn't get user data", __func__);
451 		goto no_input;
452 	}
453 
454 	if (!alloc_req.size) {
455 		dev_err(te_device.this_device, "%s: no size specified", __func__);
456 		goto invalid_size;
457 	}
458 
459 #ifdef NO_SG_CHAIN
460 	/* Whilst it is possible to allocate larger buffer, we won't be able to
461 	 * map it during actual usage (mmap() still succeeds). We fail here so
462 	 * userspace code can deal with it early than having driver failure
463 	 * later on.
464 	 */
465 	if (max_nr_pages > SG_MAX_SINGLE_ALLOC)
466 		max_nr_pages = SG_MAX_SINGLE_ALLOC;
467 #endif /* NO_SG_CHAIN */
468 
469 	if (alloc_req.size > max_nr_pages) {
470 		dev_err(te_device.this_device, "%s: buffer size of %llu pages exceeded the mapping limit of %zu pages",
471 				__func__, alloc_req.size, max_nr_pages);
472 		goto invalid_size;
473 	}
474 
475 	alloc = kzalloc(sizeof(struct dma_buf_te_alloc), GFP_KERNEL);
476 	if (alloc == NULL) {
477 		dev_err(te_device.this_device, "%s: couldn't alloc object", __func__);
478 		goto no_alloc_object;
479 	}
480 
481 	alloc->nr_pages = alloc_req.size;
482 	alloc->contiguous = contiguous;
483 
484 #if (KERNEL_VERSION(4, 12, 0) <= LINUX_VERSION_CODE)
485 	alloc->pages = kvzalloc(sizeof(struct page *) * alloc->nr_pages, GFP_KERNEL);
486 #else
487 	alloc->pages = kzalloc(sizeof(struct page *) * alloc->nr_pages, GFP_KERNEL);
488 #endif
489 
490 	if (!alloc->pages) {
491 		dev_err(te_device.this_device,
492 				"%s: couldn't alloc %zu page structures",
493 				__func__, alloc->nr_pages);
494 		goto free_alloc_object;
495 	}
496 
497 	if (contiguous) {
498 		dma_addr_t dma_aux;
499 
500 		alloc->contig_cpu_addr = dma_alloc_attrs(te_device.this_device,
501 				alloc->nr_pages * PAGE_SIZE,
502 				&alloc->contig_dma_addr,
503 				GFP_KERNEL | __GFP_ZERO,
504 				DMA_ATTR_WRITE_COMBINE);
505 		if (!alloc->contig_cpu_addr) {
506 			dev_err(te_device.this_device, "%s: couldn't alloc contiguous buffer %zu pages",
507 				__func__, alloc->nr_pages);
508 			goto free_page_struct;
509 		}
510 		dma_aux = alloc->contig_dma_addr;
511 		for (i = 0; i < alloc->nr_pages; i++) {
512 			alloc->pages[i] = pfn_to_page(PFN_DOWN(dma_aux));
513 			dma_aux += PAGE_SIZE;
514 		}
515 	} else {
516 		for (i = 0; i < alloc->nr_pages; i++) {
517 			alloc->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
518 			if (alloc->pages[i] == NULL) {
519 				dev_err(te_device.this_device, "%s: couldn't alloc page", __func__);
520 				goto no_page;
521 			}
522 		}
523 	}
524 
525 	/* alloc ready, let's export it */
526 	{
527 		struct dma_buf_export_info export_info = {
528 			.exp_name = "dma_buf_te",
529 			.owner = THIS_MODULE,
530 			.ops = &dma_buf_te_ops,
531 			.size = alloc->nr_pages << PAGE_SHIFT,
532 			.flags = O_CLOEXEC | O_RDWR,
533 			.priv = alloc,
534 		};
535 
536 		dma_buf = dma_buf_export(&export_info);
537 	}
538 
539 	if (IS_ERR_OR_NULL(dma_buf)) {
540 		dev_err(te_device.this_device, "%s: couldn't export dma_buf", __func__);
541 		goto no_export;
542 	}
543 
544 	/* get fd for buf */
545 	fd = dma_buf_fd(dma_buf, O_CLOEXEC);
546 
547 	if (fd < 0) {
548 		dev_err(te_device.this_device, "%s: couldn't get fd from dma_buf", __func__);
549 		goto no_fd;
550 	}
551 
552 	return fd;
553 
554 no_fd:
555 	dma_buf_put(dma_buf);
556 no_export:
557 	/* i still valid */
558 no_page:
559 	if (contiguous) {
560 		dma_free_attrs(te_device.this_device,
561 						alloc->nr_pages * PAGE_SIZE,
562 						alloc->contig_cpu_addr,
563 						alloc->contig_dma_addr,
564 						DMA_ATTR_WRITE_COMBINE);
565 	} else {
566 		while (i-- > 0)
567 			__free_page(alloc->pages[i]);
568 	}
569 free_page_struct:
570 #if (KERNEL_VERSION(4, 12, 0) <= LINUX_VERSION_CODE)
571 	kvfree(alloc->pages);
572 #else
573 	kfree(alloc->pages);
574 #endif
575 free_alloc_object:
576 	kfree(alloc);
577 no_alloc_object:
578 invalid_size:
579 no_input:
580 	return -EFAULT;
581 }
582 
do_dma_buf_te_ioctl_status(struct dma_buf_te_ioctl_status __user * arg)583 static int do_dma_buf_te_ioctl_status(struct dma_buf_te_ioctl_status __user *arg)
584 {
585 	struct dma_buf_te_ioctl_status status;
586 	struct dma_buf *dmabuf;
587 	struct dma_buf_te_alloc *alloc;
588 	int res = -EINVAL;
589 
590 	if (copy_from_user(&status, arg, sizeof(status)))
591 		return -EFAULT;
592 
593 	dmabuf = dma_buf_get(status.fd);
594 	if (IS_ERR_OR_NULL(dmabuf))
595 		return -EINVAL;
596 
597 	/* verify it's one of ours */
598 	if (dmabuf->ops != &dma_buf_te_ops)
599 		goto err_have_dmabuf;
600 
601 	/* ours, get the current status */
602 	alloc = dmabuf->priv;
603 
604 	/* lock while reading status to take a snapshot */
605 	mutex_lock(&dmabuf->lock);
606 	status.attached_devices = alloc->nr_attached_devices;
607 	status.device_mappings = alloc->nr_device_mappings;
608 	status.cpu_mappings = alloc->nr_cpu_mappings;
609 	mutex_unlock(&dmabuf->lock);
610 
611 	if (copy_to_user(arg, &status, sizeof(status)))
612 		goto err_have_dmabuf;
613 
614 	/* All OK */
615 	res = 0;
616 
617 err_have_dmabuf:
618 	dma_buf_put(dmabuf);
619 	return res;
620 }
621 
do_dma_buf_te_ioctl_set_failing(struct dma_buf_te_ioctl_set_failing __user * arg)622 static int do_dma_buf_te_ioctl_set_failing(struct dma_buf_te_ioctl_set_failing __user *arg)
623 {
624 	struct dma_buf *dmabuf;
625 	struct dma_buf_te_ioctl_set_failing f;
626 	struct dma_buf_te_alloc *alloc;
627 	int res = -EINVAL;
628 
629 	if (copy_from_user(&f, arg, sizeof(f)))
630 		return -EFAULT;
631 
632 	dmabuf = dma_buf_get(f.fd);
633 	if (IS_ERR_OR_NULL(dmabuf))
634 		return -EINVAL;
635 
636 	/* verify it's one of ours */
637 	if (dmabuf->ops != &dma_buf_te_ops)
638 		goto err_have_dmabuf;
639 
640 	/* ours, set the fail modes */
641 	alloc = dmabuf->priv;
642 	/* lock to set the fail modes atomically */
643 	mutex_lock(&dmabuf->lock);
644 	alloc->fail_attach = f.fail_attach;
645 	alloc->fail_map    = f.fail_map;
646 	alloc->fail_mmap   = f.fail_mmap;
647 	mutex_unlock(&dmabuf->lock);
648 
649 	/* success */
650 	res = 0;
651 
652 err_have_dmabuf:
653 	dma_buf_put(dmabuf);
654 	return res;
655 }
656 
dma_te_buf_fill(struct dma_buf * dma_buf,unsigned int value)657 static u32 dma_te_buf_fill(struct dma_buf *dma_buf, unsigned int value)
658 {
659 	struct dma_buf_attachment *attachment;
660 	struct sg_table *sgt;
661 	struct scatterlist *sg;
662 	unsigned int count;
663 	int ret = 0;
664 	size_t i;
665 
666 	attachment = dma_buf_attach(dma_buf, te_device.this_device);
667 	if (IS_ERR_OR_NULL(attachment))
668 		return -EBUSY;
669 
670 	sgt = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL);
671 	if (IS_ERR_OR_NULL(sgt)) {
672 		ret = PTR_ERR(sgt);
673 		goto no_import;
674 	}
675 
676 	ret = dma_buf_begin_cpu_access(dma_buf, DMA_BIDIRECTIONAL);
677 	if (ret)
678 		goto no_cpu_access;
679 
680 	for_each_sg(sgt->sgl, sg, sgt->nents, count) {
681 		for (i = 0; i < sg_dma_len(sg); i = i + PAGE_SIZE) {
682 			void *addr = NULL;
683 #if KERNEL_VERSION(5, 6, 0) <= LINUX_VERSION_CODE
684 			addr = dma_buf_te_kmap(dma_buf, i >> PAGE_SHIFT);
685 #else
686 			addr = dma_buf_kmap(dma_buf, i >> PAGE_SHIFT);
687 #endif
688 			if (!addr) {
689 				ret = -EPERM;
690 				goto no_kmap;
691 			}
692 			memset(addr, value, PAGE_SIZE);
693 #if KERNEL_VERSION(5, 6, 0) <= LINUX_VERSION_CODE
694 			dma_buf_te_kunmap(dma_buf, i >> PAGE_SHIFT, addr);
695 #else
696 			dma_buf_kunmap(dma_buf, i >> PAGE_SHIFT, addr);
697 #endif
698 		}
699 	}
700 
701 no_kmap:
702 	dma_buf_end_cpu_access(dma_buf, DMA_BIDIRECTIONAL);
703 no_cpu_access:
704 	dma_buf_unmap_attachment(attachment, sgt, DMA_BIDIRECTIONAL);
705 no_import:
706 	dma_buf_detach(dma_buf, attachment);
707 	return ret;
708 }
709 
do_dma_buf_te_ioctl_fill(struct dma_buf_te_ioctl_fill __user * arg)710 static int do_dma_buf_te_ioctl_fill(struct dma_buf_te_ioctl_fill __user *arg)
711 {
712 
713 	struct dma_buf *dmabuf;
714 	struct dma_buf_te_ioctl_fill f;
715 	int ret;
716 
717 	if (copy_from_user(&f, arg, sizeof(f)))
718 		return -EFAULT;
719 
720 	dmabuf = dma_buf_get(f.fd);
721 	if (IS_ERR_OR_NULL(dmabuf))
722 		return -EINVAL;
723 
724 	ret = dma_te_buf_fill(dmabuf, f.value);
725 	dma_buf_put(dmabuf);
726 
727 	return ret;
728 }
729 
dma_buf_te_ioctl(struct file * file,unsigned int cmd,unsigned long arg)730 static long dma_buf_te_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
731 {
732 	switch (cmd) {
733 	case DMA_BUF_TE_VERSION:
734 		return do_dma_buf_te_ioctl_version((struct dma_buf_te_ioctl_version __user *)arg);
735 	case DMA_BUF_TE_ALLOC:
736 		return do_dma_buf_te_ioctl_alloc((struct dma_buf_te_ioctl_alloc __user *)arg, false);
737 	case DMA_BUF_TE_ALLOC_CONT:
738 		return do_dma_buf_te_ioctl_alloc((struct dma_buf_te_ioctl_alloc __user *)arg, true);
739 	case DMA_BUF_TE_QUERY:
740 		return do_dma_buf_te_ioctl_status((struct dma_buf_te_ioctl_status __user *)arg);
741 	case DMA_BUF_TE_SET_FAILING:
742 		return do_dma_buf_te_ioctl_set_failing((struct dma_buf_te_ioctl_set_failing __user *)arg);
743 	case DMA_BUF_TE_FILL:
744 		return do_dma_buf_te_ioctl_fill((struct dma_buf_te_ioctl_fill __user *)arg);
745 	default:
746 		return -ENOTTY;
747 	}
748 }
749 
750 static const struct file_operations dma_buf_te_fops = {
751 	.owner = THIS_MODULE,
752 	.unlocked_ioctl = dma_buf_te_ioctl,
753 	.compat_ioctl = dma_buf_te_ioctl,
754 };
755 
dma_buf_te_init(void)756 static int __init dma_buf_te_init(void)
757 {
758 	int res;
759 
760 	te_device.minor = MISC_DYNAMIC_MINOR;
761 	te_device.name = "dma_buf_te";
762 	te_device.fops = &dma_buf_te_fops;
763 
764 	res = misc_register(&te_device);
765 	if (res) {
766 		pr_warn("Misc device registration failed of 'dma_buf_te'\n");
767 		return res;
768 	}
769 	te_device.this_device->coherent_dma_mask = DMA_BIT_MASK(32);
770 
771 	dev_info(te_device.this_device, "dma_buf_te ready\n");
772 	return 0;
773 
774 }
775 
dma_buf_te_exit(void)776 static void __exit dma_buf_te_exit(void)
777 {
778 	misc_deregister(&te_device);
779 }
780 
781 module_init(dma_buf_te_init);
782 module_exit(dma_buf_te_exit);
783 MODULE_LICENSE("GPL");
784 MODULE_INFO(import_ns, "DMA_BUF");
785