xref: /OK3568_Linux_fs/kernel/drivers/dma-buf/dma-buf.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Framework for buffer objects that can be shared across devices/subsystems.
4  *
5  * Copyright(C) 2011 Linaro Limited. All rights reserved.
6  * Author: Sumit Semwal <sumit.semwal@ti.com>
7  *
8  * Many thanks to linaro-mm-sig list, and specially
9  * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
10  * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
11  * refining of this idea.
12  */
13 
14 #include <linux/fs.h>
15 #include <linux/slab.h>
16 #include <linux/dma-buf.h>
17 #include <linux/dma-fence.h>
18 #include <linux/anon_inodes.h>
19 #include <linux/export.h>
20 #include <linux/debugfs.h>
21 #include <linux/module.h>
22 #include <linux/seq_file.h>
23 #include <linux/poll.h>
24 #include <linux/dma-resv.h>
25 #include <linux/mm.h>
26 #include <linux/mount.h>
27 #include <linux/pseudo_fs.h>
28 #include <linux/sched/task.h>
29 
30 #include <uapi/linux/dma-buf.h>
31 #include <uapi/linux/magic.h>
32 
33 #include "dma-buf-sysfs-stats.h"
34 
35 struct dma_buf_list {
36 	struct list_head head;
37 	struct mutex lock;
38 };
39 
40 static struct dma_buf_list db_list;
41 
42 /*
43  * This function helps in traversing the db_list and calls the
44  * callback function which can extract required info out of each
45  * dmabuf.
46  */
get_each_dmabuf(int (* callback)(const struct dma_buf * dmabuf,void * private),void * private)47 int get_each_dmabuf(int (*callback)(const struct dma_buf *dmabuf,
48 		    void *private), void *private)
49 {
50 	struct dma_buf *buf;
51 	int ret = mutex_lock_interruptible(&db_list.lock);
52 
53 	if (ret)
54 		return ret;
55 
56 	list_for_each_entry(buf, &db_list.head, list_node) {
57 		ret = callback(buf, private);
58 		if (ret)
59 			break;
60 	}
61 	mutex_unlock(&db_list.lock);
62 	return ret;
63 }
64 EXPORT_SYMBOL_GPL(get_each_dmabuf);
65 
66 #if IS_ENABLED(CONFIG_RK_DMABUF_DEBUG)
67 static size_t db_total_size;
68 static size_t db_peak_size;
69 
dma_buf_reset_peak_size(void)70 void dma_buf_reset_peak_size(void)
71 {
72 	mutex_lock(&db_list.lock);
73 	db_peak_size = 0;
74 	mutex_unlock(&db_list.lock);
75 }
76 EXPORT_SYMBOL_GPL(dma_buf_reset_peak_size);
77 
dma_buf_get_peak_size(void)78 size_t dma_buf_get_peak_size(void)
79 {
80 	size_t sz;
81 
82 	mutex_lock(&db_list.lock);
83 	sz = db_peak_size;
84 	mutex_unlock(&db_list.lock);
85 
86 	return sz;
87 }
88 EXPORT_SYMBOL_GPL(dma_buf_get_peak_size);
89 
dma_buf_get_total_size(void)90 size_t dma_buf_get_total_size(void)
91 {
92 	size_t sz;
93 
94 	mutex_lock(&db_list.lock);
95 	sz = db_total_size;
96 	mutex_unlock(&db_list.lock);
97 
98 	return sz;
99 }
100 EXPORT_SYMBOL_GPL(dma_buf_get_total_size);
101 #endif
102 
dmabuffs_dname(struct dentry * dentry,char * buffer,int buflen)103 static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
104 {
105 	struct dma_buf *dmabuf;
106 	char name[DMA_BUF_NAME_LEN];
107 	size_t ret = 0;
108 
109 	dmabuf = dentry->d_fsdata;
110 	spin_lock(&dmabuf->name_lock);
111 	if (dmabuf->name)
112 		ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
113 	spin_unlock(&dmabuf->name_lock);
114 
115 	return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
116 			     dentry->d_name.name, ret > 0 ? name : "");
117 }
118 
dma_buf_release(struct dentry * dentry)119 static void dma_buf_release(struct dentry *dentry)
120 {
121 	struct dma_buf *dmabuf;
122 #ifdef CONFIG_DMABUF_CACHE
123 	int dtor_ret = 0;
124 #endif
125 
126 	dmabuf = dentry->d_fsdata;
127 	if (unlikely(!dmabuf))
128 		return;
129 
130 	BUG_ON(dmabuf->vmapping_counter);
131 
132 	/*
133 	 * Any fences that a dma-buf poll can wait on should be signaled
134 	 * before releasing dma-buf. This is the responsibility of each
135 	 * driver that uses the reservation objects.
136 	 *
137 	 * If you hit this BUG() it means someone dropped their ref to the
138 	 * dma-buf while still having pending operation to the buffer.
139 	 */
140 	BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
141 
142 	dma_buf_stats_teardown(dmabuf);
143 #ifdef CONFIG_DMABUF_CACHE
144 	if (dmabuf->dtor)
145 		dtor_ret = dmabuf->dtor(dmabuf, dmabuf->dtor_data);
146 
147 	if (!dtor_ret)
148 #endif
149 		dmabuf->ops->release(dmabuf);
150 
151 	if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
152 		dma_resv_fini(dmabuf->resv);
153 
154 	WARN_ON(!list_empty(&dmabuf->attachments));
155 	module_put(dmabuf->owner);
156 	kfree(dmabuf->name);
157 	kfree(dmabuf);
158 }
159 
dma_buf_file_release(struct inode * inode,struct file * file)160 static int dma_buf_file_release(struct inode *inode, struct file *file)
161 {
162 	struct dma_buf *dmabuf;
163 
164 	if (!is_dma_buf_file(file))
165 		return -EINVAL;
166 
167 	dmabuf = file->private_data;
168 
169 	mutex_lock(&db_list.lock);
170 #if IS_ENABLED(CONFIG_RK_DMABUF_DEBUG)
171 	db_total_size -= dmabuf->size;
172 #endif
173 	list_del(&dmabuf->list_node);
174 	mutex_unlock(&db_list.lock);
175 
176 	return 0;
177 }
178 
179 static const struct dentry_operations dma_buf_dentry_ops = {
180 	.d_dname = dmabuffs_dname,
181 	.d_release = dma_buf_release,
182 };
183 
184 static struct vfsmount *dma_buf_mnt;
185 
dma_buf_fs_init_context(struct fs_context * fc)186 static int dma_buf_fs_init_context(struct fs_context *fc)
187 {
188 	struct pseudo_fs_context *ctx;
189 
190 	ctx = init_pseudo(fc, DMA_BUF_MAGIC);
191 	if (!ctx)
192 		return -ENOMEM;
193 	ctx->dops = &dma_buf_dentry_ops;
194 	return 0;
195 }
196 
197 static struct file_system_type dma_buf_fs_type = {
198 	.name = "dmabuf",
199 	.init_fs_context = dma_buf_fs_init_context,
200 	.kill_sb = kill_anon_super,
201 };
202 
dma_buf_mmap_internal(struct file * file,struct vm_area_struct * vma)203 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
204 {
205 	struct dma_buf *dmabuf;
206 
207 	if (!is_dma_buf_file(file))
208 		return -EINVAL;
209 
210 	dmabuf = file->private_data;
211 
212 	/* check if buffer supports mmap */
213 	if (!dmabuf->ops->mmap)
214 		return -EINVAL;
215 
216 	/* check for overflowing the buffer's size */
217 	if (vma->vm_pgoff + vma_pages(vma) >
218 	    dmabuf->size >> PAGE_SHIFT)
219 		return -EINVAL;
220 
221 	return dmabuf->ops->mmap(dmabuf, vma);
222 }
223 
dma_buf_llseek(struct file * file,loff_t offset,int whence)224 static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
225 {
226 	struct dma_buf *dmabuf;
227 	loff_t base;
228 
229 	if (!is_dma_buf_file(file))
230 		return -EBADF;
231 
232 	dmabuf = file->private_data;
233 
234 	/* only support discovering the end of the buffer,
235 	   but also allow SEEK_SET to maintain the idiomatic
236 	   SEEK_END(0), SEEK_CUR(0) pattern */
237 	if (whence == SEEK_END)
238 		base = dmabuf->size;
239 	else if (whence == SEEK_SET)
240 		base = 0;
241 	else
242 		return -EINVAL;
243 
244 	if (offset != 0)
245 		return -EINVAL;
246 
247 	return base + offset;
248 }
249 
250 /**
251  * DOC: implicit fence polling
252  *
253  * To support cross-device and cross-driver synchronization of buffer access
254  * implicit fences (represented internally in the kernel with &struct dma_fence)
255  * can be attached to a &dma_buf. The glue for that and a few related things are
256  * provided in the &dma_resv structure.
257  *
258  * Userspace can query the state of these implicitly tracked fences using poll()
259  * and related system calls:
260  *
261  * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
262  *   most recent write or exclusive fence.
263  *
264  * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
265  *   all attached fences, shared and exclusive ones.
266  *
267  * Note that this only signals the completion of the respective fences, i.e. the
268  * DMA transfers are complete. Cache flushing and any other necessary
269  * preparations before CPU access can begin still need to happen.
270  */
271 
dma_buf_poll_cb(struct dma_fence * fence,struct dma_fence_cb * cb)272 static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
273 {
274 	struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
275 	unsigned long flags;
276 
277 	spin_lock_irqsave(&dcb->poll->lock, flags);
278 	wake_up_locked_poll(dcb->poll, dcb->active);
279 	dcb->active = 0;
280 	spin_unlock_irqrestore(&dcb->poll->lock, flags);
281 }
282 
dma_buf_poll(struct file * file,poll_table * poll)283 static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
284 {
285 	struct dma_buf *dmabuf;
286 	struct dma_resv *resv;
287 	struct dma_resv_list *fobj;
288 	struct dma_fence *fence_excl;
289 	__poll_t events;
290 	unsigned shared_count, seq;
291 
292 	dmabuf = file->private_data;
293 	if (!dmabuf || !dmabuf->resv)
294 		return EPOLLERR;
295 
296 	resv = dmabuf->resv;
297 
298 	poll_wait(file, &dmabuf->poll, poll);
299 
300 	events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
301 	if (!events)
302 		return 0;
303 
304 retry:
305 	seq = read_seqcount_begin(&resv->seq);
306 	rcu_read_lock();
307 
308 	fobj = rcu_dereference(resv->fence);
309 	if (fobj)
310 		shared_count = fobj->shared_count;
311 	else
312 		shared_count = 0;
313 	fence_excl = rcu_dereference(resv->fence_excl);
314 	if (read_seqcount_retry(&resv->seq, seq)) {
315 		rcu_read_unlock();
316 		goto retry;
317 	}
318 
319 	if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
320 		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
321 		__poll_t pevents = EPOLLIN;
322 
323 		if (shared_count == 0)
324 			pevents |= EPOLLOUT;
325 
326 		spin_lock_irq(&dmabuf->poll.lock);
327 		if (dcb->active) {
328 			dcb->active |= pevents;
329 			events &= ~pevents;
330 		} else
331 			dcb->active = pevents;
332 		spin_unlock_irq(&dmabuf->poll.lock);
333 
334 		if (events & pevents) {
335 			if (!dma_fence_get_rcu(fence_excl)) {
336 				/* force a recheck */
337 				events &= ~pevents;
338 				dma_buf_poll_cb(NULL, &dcb->cb);
339 			} else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
340 							   dma_buf_poll_cb)) {
341 				events &= ~pevents;
342 				dma_fence_put(fence_excl);
343 			} else {
344 				/*
345 				 * No callback queued, wake up any additional
346 				 * waiters.
347 				 */
348 				dma_fence_put(fence_excl);
349 				dma_buf_poll_cb(NULL, &dcb->cb);
350 			}
351 		}
352 	}
353 
354 	if ((events & EPOLLOUT) && shared_count > 0) {
355 		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
356 		int i;
357 
358 		/* Only queue a new callback if no event has fired yet */
359 		spin_lock_irq(&dmabuf->poll.lock);
360 		if (dcb->active)
361 			events &= ~EPOLLOUT;
362 		else
363 			dcb->active = EPOLLOUT;
364 		spin_unlock_irq(&dmabuf->poll.lock);
365 
366 		if (!(events & EPOLLOUT))
367 			goto out;
368 
369 		for (i = 0; i < shared_count; ++i) {
370 			struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
371 
372 			if (!dma_fence_get_rcu(fence)) {
373 				/*
374 				 * fence refcount dropped to zero, this means
375 				 * that fobj has been freed
376 				 *
377 				 * call dma_buf_poll_cb and force a recheck!
378 				 */
379 				events &= ~EPOLLOUT;
380 				dma_buf_poll_cb(NULL, &dcb->cb);
381 				break;
382 			}
383 			if (!dma_fence_add_callback(fence, &dcb->cb,
384 						    dma_buf_poll_cb)) {
385 				dma_fence_put(fence);
386 				events &= ~EPOLLOUT;
387 				break;
388 			}
389 			dma_fence_put(fence);
390 		}
391 
392 		/* No callback queued, wake up any additional waiters. */
393 		if (i == shared_count)
394 			dma_buf_poll_cb(NULL, &dcb->cb);
395 	}
396 
397 out:
398 	rcu_read_unlock();
399 	return events;
400 }
401 
_dma_buf_set_name(struct dma_buf * dmabuf,const char * name)402 static long _dma_buf_set_name(struct dma_buf *dmabuf, const char *name)
403 {
404 	spin_lock(&dmabuf->name_lock);
405 	kfree(dmabuf->name);
406 	dmabuf->name = name;
407 	spin_unlock(&dmabuf->name_lock);
408 
409 	return 0;
410 }
411 
412 /**
413  * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
414  * It could support changing the name of the dma-buf if the same piece of
415  * memory is used for multiple purpose between different devices.
416  *
417  * @dmabuf: [in]     dmabuf buffer that will be renamed.
418  * @buf:    [in]     A piece of userspace memory that contains the name of
419  *                   the dma-buf.
420  *
421  * Returns 0 on success. If the dma-buf buffer is already attached to
422  * devices, return -EBUSY.
423  *
424  */
dma_buf_set_name(struct dma_buf * dmabuf,const char * name)425 long dma_buf_set_name(struct dma_buf *dmabuf, const char *name)
426 {
427 	long ret = 0;
428 	char *buf = kstrndup(name, DMA_BUF_NAME_LEN, GFP_KERNEL);
429 
430 	if (!buf)
431 		return -ENOMEM;
432 
433 	ret = _dma_buf_set_name(dmabuf, buf);
434 	if (ret)
435 		kfree(buf);
436 
437 	return ret;
438 }
439 EXPORT_SYMBOL_GPL(dma_buf_set_name);
440 
dma_buf_set_name_user(struct dma_buf * dmabuf,const char __user * buf)441 static long dma_buf_set_name_user(struct dma_buf *dmabuf, const char __user *buf)
442 {
443 	char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
444 	long ret = 0;
445 
446 	if (IS_ERR(name))
447 		return PTR_ERR(name);
448 
449 	ret = _dma_buf_set_name(dmabuf, name);
450 	if (ret)
451 		kfree(name);
452 
453 	return ret;
454 }
455 
dma_buf_ioctl(struct file * file,unsigned int cmd,unsigned long arg)456 static long dma_buf_ioctl(struct file *file,
457 			  unsigned int cmd, unsigned long arg)
458 {
459 	struct dma_buf *dmabuf;
460 	struct dma_buf_sync sync;
461 	struct dma_buf_sync_partial sync_p;
462 	enum dma_data_direction direction;
463 	int ret;
464 
465 	dmabuf = file->private_data;
466 
467 	switch (cmd) {
468 	case DMA_BUF_IOCTL_SYNC:
469 		if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
470 			return -EFAULT;
471 
472 		if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
473 			return -EINVAL;
474 
475 		switch (sync.flags & DMA_BUF_SYNC_RW) {
476 		case DMA_BUF_SYNC_READ:
477 			direction = DMA_FROM_DEVICE;
478 			break;
479 		case DMA_BUF_SYNC_WRITE:
480 			direction = DMA_TO_DEVICE;
481 			break;
482 		case DMA_BUF_SYNC_RW:
483 			direction = DMA_BIDIRECTIONAL;
484 			break;
485 		default:
486 			return -EINVAL;
487 		}
488 
489 		if (sync.flags & DMA_BUF_SYNC_END)
490 			ret = dma_buf_end_cpu_access(dmabuf, direction);
491 		else
492 			ret = dma_buf_begin_cpu_access(dmabuf, direction);
493 
494 		return ret;
495 
496 	case DMA_BUF_SET_NAME_A:
497 	case DMA_BUF_SET_NAME_B:
498 		return dma_buf_set_name_user(dmabuf, (const char __user *)arg);
499 
500 	case DMA_BUF_IOCTL_SYNC_PARTIAL:
501 		if (copy_from_user(&sync_p, (void __user *) arg, sizeof(sync_p)))
502 			return -EFAULT;
503 
504 		if (sync_p.len == 0)
505 			return 0;
506 
507 		if (sync_p.len > dmabuf->size || sync_p.offset > dmabuf->size - sync_p.len)
508 			return -EINVAL;
509 
510 		if (sync_p.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
511 			return -EINVAL;
512 
513 		switch (sync_p.flags & DMA_BUF_SYNC_RW) {
514 		case DMA_BUF_SYNC_READ:
515 			direction = DMA_FROM_DEVICE;
516 			break;
517 		case DMA_BUF_SYNC_WRITE:
518 			direction = DMA_TO_DEVICE;
519 			break;
520 		case DMA_BUF_SYNC_RW:
521 			direction = DMA_BIDIRECTIONAL;
522 			break;
523 		default:
524 			return -EINVAL;
525 		}
526 
527 		if (sync_p.flags & DMA_BUF_SYNC_END)
528 			ret = dma_buf_end_cpu_access_partial(dmabuf, direction,
529 							     sync_p.offset,
530 							     sync_p.len);
531 		else
532 			ret = dma_buf_begin_cpu_access_partial(dmabuf, direction,
533 							       sync_p.offset,
534 							       sync_p.len);
535 
536 		return ret;
537 
538 	default:
539 		return -ENOTTY;
540 	}
541 }
542 
dma_buf_show_fdinfo(struct seq_file * m,struct file * file)543 static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
544 {
545 	struct dma_buf *dmabuf = file->private_data;
546 
547 	seq_printf(m, "size:\t%zu\n", dmabuf->size);
548 	/* Don't count the temporary reference taken inside procfs seq_show */
549 	seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
550 	seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
551 	spin_lock(&dmabuf->name_lock);
552 	if (dmabuf->name)
553 		seq_printf(m, "name:\t%s\n", dmabuf->name);
554 	spin_unlock(&dmabuf->name_lock);
555 }
556 
557 static const struct file_operations dma_buf_fops = {
558 	.release	= dma_buf_file_release,
559 	.mmap		= dma_buf_mmap_internal,
560 	.llseek		= dma_buf_llseek,
561 	.poll		= dma_buf_poll,
562 	.unlocked_ioctl	= dma_buf_ioctl,
563 	.compat_ioctl	= compat_ptr_ioctl,
564 	.show_fdinfo	= dma_buf_show_fdinfo,
565 };
566 
567 /*
568  * is_dma_buf_file - Check if struct file* is associated with dma_buf
569  */
is_dma_buf_file(struct file * file)570 int is_dma_buf_file(struct file *file)
571 {
572 	return file->f_op == &dma_buf_fops;
573 }
574 EXPORT_SYMBOL_GPL(is_dma_buf_file);
575 
dma_buf_getfile(struct dma_buf * dmabuf,int flags)576 static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
577 {
578 	static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
579 	struct file *file;
580 	struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
581 
582 	if (IS_ERR(inode))
583 		return ERR_CAST(inode);
584 
585 	inode->i_size = dmabuf->size;
586 	inode_set_bytes(inode, dmabuf->size);
587 
588 	/*
589 	 * The ->i_ino acquired from get_next_ino() is not unique thus
590 	 * not suitable for using it as dentry name by dmabuf stats.
591 	 * Override ->i_ino with the unique and dmabuffs specific
592 	 * value.
593 	 */
594 	inode->i_ino = atomic64_add_return(1, &dmabuf_inode);
595 	file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
596 				 flags, &dma_buf_fops);
597 	if (IS_ERR(file))
598 		goto err_alloc_file;
599 	file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
600 	file->private_data = dmabuf;
601 	file->f_path.dentry->d_fsdata = dmabuf;
602 
603 	return file;
604 
605 err_alloc_file:
606 	iput(inode);
607 	return file;
608 }
609 
dma_buf_set_default_name(struct dma_buf * dmabuf)610 static void dma_buf_set_default_name(struct dma_buf *dmabuf)
611 {
612 	char task_comm[TASK_COMM_LEN];
613 	char *name;
614 
615 	get_task_comm(task_comm, current->group_leader);
616 	name = kasprintf(GFP_KERNEL, "%d-%s", current->tgid, task_comm);
617 	dma_buf_set_name(dmabuf, name);
618 	kfree(name);
619 }
620 
621 /**
622  * DOC: dma buf device access
623  *
624  * For device DMA access to a shared DMA buffer the usual sequence of operations
625  * is fairly simple:
626  *
627  * 1. The exporter defines his exporter instance using
628  *    DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
629  *    buffer object into a &dma_buf. It then exports that &dma_buf to userspace
630  *    as a file descriptor by calling dma_buf_fd().
631  *
632  * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
633  *    to share with: First the filedescriptor is converted to a &dma_buf using
634  *    dma_buf_get(). Then the buffer is attached to the device using
635  *    dma_buf_attach().
636  *
637  *    Up to this stage the exporter is still free to migrate or reallocate the
638  *    backing storage.
639  *
640  * 3. Once the buffer is attached to all devices userspace can initiate DMA
641  *    access to the shared buffer. In the kernel this is done by calling
642  *    dma_buf_map_attachment() and dma_buf_unmap_attachment().
643  *
644  * 4. Once a driver is done with a shared buffer it needs to call
645  *    dma_buf_detach() (after cleaning up any mappings) and then release the
646  *    reference acquired with dma_buf_get by calling dma_buf_put().
647  *
648  * For the detailed semantics exporters are expected to implement see
649  * &dma_buf_ops.
650  */
651 
652 /**
653  * dma_buf_export - Creates a new dma_buf, and associates an anon file
654  * with this buffer, so it can be exported.
655  * Also connect the allocator specific data and ops to the buffer.
656  * Additionally, provide a name string for exporter; useful in debugging.
657  *
658  * @exp_info:	[in]	holds all the export related information provided
659  *			by the exporter. see &struct dma_buf_export_info
660  *			for further details.
661  *
662  * Returns, on success, a newly created dma_buf object, which wraps the
663  * supplied private data and operations for dma_buf_ops. On either missing
664  * ops, or error in allocating struct dma_buf, will return negative error.
665  *
666  * For most cases the easiest way to create @exp_info is through the
667  * %DEFINE_DMA_BUF_EXPORT_INFO macro.
668  */
dma_buf_export(const struct dma_buf_export_info * exp_info)669 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
670 {
671 	struct dma_buf *dmabuf;
672 	struct dma_resv *resv = exp_info->resv;
673 	struct file *file;
674 	size_t alloc_size = sizeof(struct dma_buf);
675 	int ret;
676 
677 	if (!exp_info->resv)
678 		alloc_size += sizeof(struct dma_resv);
679 	else
680 		/* prevent &dma_buf[1] == dma_buf->resv */
681 		alloc_size += 1;
682 
683 	if (WARN_ON(!exp_info->priv
684 			  || !exp_info->ops
685 			  || !exp_info->ops->map_dma_buf
686 			  || !exp_info->ops->unmap_dma_buf
687 			  || !exp_info->ops->release)) {
688 		return ERR_PTR(-EINVAL);
689 	}
690 
691 	if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
692 		    (exp_info->ops->pin || exp_info->ops->unpin)))
693 		return ERR_PTR(-EINVAL);
694 
695 	if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
696 		return ERR_PTR(-EINVAL);
697 
698 	if (!try_module_get(exp_info->owner))
699 		return ERR_PTR(-ENOENT);
700 
701 	dmabuf = kzalloc(alloc_size, GFP_KERNEL);
702 	if (!dmabuf) {
703 		ret = -ENOMEM;
704 		goto err_module;
705 	}
706 
707 	dmabuf->priv = exp_info->priv;
708 	dmabuf->ops = exp_info->ops;
709 	dmabuf->size = exp_info->size;
710 	dmabuf->exp_name = exp_info->exp_name;
711 	dmabuf->owner = exp_info->owner;
712 	spin_lock_init(&dmabuf->name_lock);
713 #ifdef CONFIG_DMABUF_CACHE
714 	mutex_init(&dmabuf->cache_lock);
715 #endif
716 	init_waitqueue_head(&dmabuf->poll);
717 	dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
718 	dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
719 
720 	if (!resv) {
721 		resv = (struct dma_resv *)&dmabuf[1];
722 		dma_resv_init(resv);
723 	}
724 	dmabuf->resv = resv;
725 
726 	file = dma_buf_getfile(dmabuf, exp_info->flags);
727 	if (IS_ERR(file)) {
728 		ret = PTR_ERR(file);
729 		goto err_dmabuf;
730 	}
731 
732 	file->f_mode |= FMODE_LSEEK;
733 	dmabuf->file = file;
734 
735 	mutex_init(&dmabuf->lock);
736 	INIT_LIST_HEAD(&dmabuf->attachments);
737 
738 	mutex_lock(&db_list.lock);
739 	list_add(&dmabuf->list_node, &db_list.head);
740 #if IS_ENABLED(CONFIG_RK_DMABUF_DEBUG)
741 	db_total_size += dmabuf->size;
742 	db_peak_size = max(db_total_size, db_peak_size);
743 #endif
744 	mutex_unlock(&db_list.lock);
745 
746 	ret = dma_buf_stats_setup(dmabuf);
747 	if (ret)
748 		goto err_sysfs;
749 
750 	if (IS_ENABLED(CONFIG_RK_DMABUF_DEBUG))
751 		dma_buf_set_default_name(dmabuf);
752 
753 	return dmabuf;
754 
755 err_sysfs:
756 	/*
757 	 * Set file->f_path.dentry->d_fsdata to NULL so that when
758 	 * dma_buf_release() gets invoked by dentry_ops, it exits
759 	 * early before calling the release() dma_buf op.
760 	 */
761 	file->f_path.dentry->d_fsdata = NULL;
762 	fput(file);
763 err_dmabuf:
764 	kfree(dmabuf);
765 err_module:
766 	module_put(exp_info->owner);
767 	return ERR_PTR(ret);
768 }
769 EXPORT_SYMBOL_GPL(dma_buf_export);
770 
771 /**
772  * dma_buf_fd - returns a file descriptor for the given dma_buf
773  * @dmabuf:	[in]	pointer to dma_buf for which fd is required.
774  * @flags:      [in]    flags to give to fd
775  *
776  * On success, returns an associated 'fd'. Else, returns error.
777  */
dma_buf_fd(struct dma_buf * dmabuf,int flags)778 int dma_buf_fd(struct dma_buf *dmabuf, int flags)
779 {
780 	int fd;
781 
782 	if (!dmabuf || !dmabuf->file)
783 		return -EINVAL;
784 
785 	fd = get_unused_fd_flags(flags);
786 	if (fd < 0)
787 		return fd;
788 
789 	fd_install(fd, dmabuf->file);
790 
791 	return fd;
792 }
793 EXPORT_SYMBOL_GPL(dma_buf_fd);
794 
795 /**
796  * dma_buf_get - returns the dma_buf structure related to an fd
797  * @fd:	[in]	fd associated with the dma_buf to be returned
798  *
799  * On success, returns the dma_buf structure associated with an fd; uses
800  * file's refcounting done by fget to increase refcount. returns ERR_PTR
801  * otherwise.
802  */
dma_buf_get(int fd)803 struct dma_buf *dma_buf_get(int fd)
804 {
805 	struct file *file;
806 
807 	file = fget(fd);
808 
809 	if (!file)
810 		return ERR_PTR(-EBADF);
811 
812 	if (!is_dma_buf_file(file)) {
813 		fput(file);
814 		return ERR_PTR(-EINVAL);
815 	}
816 
817 	return file->private_data;
818 }
819 EXPORT_SYMBOL_GPL(dma_buf_get);
820 
821 /**
822  * dma_buf_put - decreases refcount of the buffer
823  * @dmabuf:	[in]	buffer to reduce refcount of
824  *
825  * Uses file's refcounting done implicitly by fput().
826  *
827  * If, as a result of this call, the refcount becomes 0, the 'release' file
828  * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
829  * in turn, and frees the memory allocated for dmabuf when exported.
830  */
dma_buf_put(struct dma_buf * dmabuf)831 void dma_buf_put(struct dma_buf *dmabuf)
832 {
833 	if (WARN_ON(!dmabuf || !dmabuf->file))
834 		return;
835 
836 	fput(dmabuf->file);
837 }
838 EXPORT_SYMBOL_GPL(dma_buf_put);
839 
840 /**
841  * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list; optionally,
842  * calls attach() of dma_buf_ops to allow device-specific attach functionality
843  * @dmabuf:		[in]	buffer to attach device to.
844  * @dev:		[in]	device to be attached.
845  * @importer_ops:	[in]	importer operations for the attachment
846  * @importer_priv:	[in]	importer private pointer for the attachment
847  *
848  * Returns struct dma_buf_attachment pointer for this attachment. Attachments
849  * must be cleaned up by calling dma_buf_detach().
850  *
851  * Returns:
852  *
853  * A pointer to newly created &dma_buf_attachment on success, or a negative
854  * error code wrapped into a pointer on failure.
855  *
856  * Note that this can fail if the backing storage of @dmabuf is in a place not
857  * accessible to @dev, and cannot be moved to a more suitable place. This is
858  * indicated with the error code -EBUSY.
859  */
860 struct dma_buf_attachment *
dma_buf_dynamic_attach(struct dma_buf * dmabuf,struct device * dev,const struct dma_buf_attach_ops * importer_ops,void * importer_priv)861 dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
862 		       const struct dma_buf_attach_ops *importer_ops,
863 		       void *importer_priv)
864 {
865 	struct dma_buf_attachment *attach;
866 	int ret;
867 
868 	if (WARN_ON(!dmabuf || !dev))
869 		return ERR_PTR(-EINVAL);
870 
871 	if (WARN_ON(importer_ops && !importer_ops->move_notify))
872 		return ERR_PTR(-EINVAL);
873 
874 	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
875 	if (!attach)
876 		return ERR_PTR(-ENOMEM);
877 
878 	attach->dev = dev;
879 	attach->dmabuf = dmabuf;
880 	if (importer_ops)
881 		attach->peer2peer = importer_ops->allow_peer2peer;
882 	attach->importer_ops = importer_ops;
883 	attach->importer_priv = importer_priv;
884 
885 	if (dmabuf->ops->attach) {
886 		ret = dmabuf->ops->attach(dmabuf, attach);
887 		if (ret)
888 			goto err_attach;
889 	}
890 	dma_resv_lock(dmabuf->resv, NULL);
891 	list_add(&attach->node, &dmabuf->attachments);
892 	dma_resv_unlock(dmabuf->resv);
893 
894 	/* When either the importer or the exporter can't handle dynamic
895 	 * mappings we cache the mapping here to avoid issues with the
896 	 * reservation object lock.
897 	 */
898 	if (dma_buf_attachment_is_dynamic(attach) !=
899 	    dma_buf_is_dynamic(dmabuf)) {
900 		struct sg_table *sgt;
901 
902 		if (dma_buf_is_dynamic(attach->dmabuf)) {
903 			dma_resv_lock(attach->dmabuf->resv, NULL);
904 			ret = dma_buf_pin(attach);
905 			if (ret)
906 				goto err_unlock;
907 		}
908 
909 		sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL);
910 		if (!sgt)
911 			sgt = ERR_PTR(-ENOMEM);
912 		if (IS_ERR(sgt)) {
913 			ret = PTR_ERR(sgt);
914 			goto err_unpin;
915 		}
916 		if (dma_buf_is_dynamic(attach->dmabuf))
917 			dma_resv_unlock(attach->dmabuf->resv);
918 		attach->sgt = sgt;
919 		attach->dir = DMA_BIDIRECTIONAL;
920 	}
921 
922 	return attach;
923 
924 err_attach:
925 	kfree(attach);
926 	return ERR_PTR(ret);
927 
928 err_unpin:
929 	if (dma_buf_is_dynamic(attach->dmabuf))
930 		dma_buf_unpin(attach);
931 
932 err_unlock:
933 	if (dma_buf_is_dynamic(attach->dmabuf))
934 		dma_resv_unlock(attach->dmabuf->resv);
935 
936 	dma_buf_detach(dmabuf, attach);
937 	return ERR_PTR(ret);
938 }
939 EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach);
940 
941 /**
942  * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
943  * @dmabuf:	[in]	buffer to attach device to.
944  * @dev:	[in]	device to be attached.
945  *
946  * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
947  * mapping.
948  */
dma_buf_attach(struct dma_buf * dmabuf,struct device * dev)949 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
950 					  struct device *dev)
951 {
952 	return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
953 }
954 EXPORT_SYMBOL_GPL(dma_buf_attach);
955 
956 /**
957  * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
958  * optionally calls detach() of dma_buf_ops for device-specific detach
959  * @dmabuf:	[in]	buffer to detach from.
960  * @attach:	[in]	attachment to be detached; is free'd after this call.
961  *
962  * Clean up a device attachment obtained by calling dma_buf_attach().
963  */
dma_buf_detach(struct dma_buf * dmabuf,struct dma_buf_attachment * attach)964 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
965 {
966 	if (WARN_ON(!dmabuf || !attach))
967 		return;
968 
969 	if (attach->sgt) {
970 		if (dma_buf_is_dynamic(attach->dmabuf))
971 			dma_resv_lock(attach->dmabuf->resv, NULL);
972 
973 		dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
974 
975 		if (dma_buf_is_dynamic(attach->dmabuf)) {
976 			dma_buf_unpin(attach);
977 			dma_resv_unlock(attach->dmabuf->resv);
978 		}
979 	}
980 
981 	dma_resv_lock(dmabuf->resv, NULL);
982 	list_del(&attach->node);
983 	dma_resv_unlock(dmabuf->resv);
984 	if (dmabuf->ops->detach)
985 		dmabuf->ops->detach(dmabuf, attach);
986 
987 	kfree(attach);
988 }
989 EXPORT_SYMBOL_GPL(dma_buf_detach);
990 
991 /**
992  * dma_buf_pin - Lock down the DMA-buf
993  *
994  * @attach:	[in]	attachment which should be pinned
995  *
996  * Returns:
997  * 0 on success, negative error code on failure.
998  */
dma_buf_pin(struct dma_buf_attachment * attach)999 int dma_buf_pin(struct dma_buf_attachment *attach)
1000 {
1001 	struct dma_buf *dmabuf = attach->dmabuf;
1002 	int ret = 0;
1003 
1004 	dma_resv_assert_held(dmabuf->resv);
1005 
1006 	if (dmabuf->ops->pin)
1007 		ret = dmabuf->ops->pin(attach);
1008 
1009 	return ret;
1010 }
1011 EXPORT_SYMBOL_GPL(dma_buf_pin);
1012 
1013 /**
1014  * dma_buf_unpin - Remove lock from DMA-buf
1015  *
1016  * @attach:	[in]	attachment which should be unpinned
1017  */
dma_buf_unpin(struct dma_buf_attachment * attach)1018 void dma_buf_unpin(struct dma_buf_attachment *attach)
1019 {
1020 	struct dma_buf *dmabuf = attach->dmabuf;
1021 
1022 	dma_resv_assert_held(dmabuf->resv);
1023 
1024 	if (dmabuf->ops->unpin)
1025 		dmabuf->ops->unpin(attach);
1026 }
1027 EXPORT_SYMBOL_GPL(dma_buf_unpin);
1028 
1029 /**
1030  * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
1031  * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
1032  * dma_buf_ops.
1033  * @attach:	[in]	attachment whose scatterlist is to be returned
1034  * @direction:	[in]	direction of DMA transfer
1035  *
1036  * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
1037  * on error. May return -EINTR if it is interrupted by a signal.
1038  *
1039  * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
1040  * the underlying backing storage is pinned for as long as a mapping exists,
1041  * therefore users/importers should not hold onto a mapping for undue amounts of
1042  * time.
1043  */
dma_buf_map_attachment(struct dma_buf_attachment * attach,enum dma_data_direction direction)1044 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
1045 					enum dma_data_direction direction)
1046 {
1047 	struct sg_table *sg_table;
1048 	int r;
1049 
1050 	might_sleep();
1051 
1052 	if (WARN_ON(!attach || !attach->dmabuf))
1053 		return ERR_PTR(-EINVAL);
1054 
1055 	if (dma_buf_attachment_is_dynamic(attach))
1056 		dma_resv_assert_held(attach->dmabuf->resv);
1057 
1058 	if (attach->sgt) {
1059 		/*
1060 		 * Two mappings with different directions for the same
1061 		 * attachment are not allowed.
1062 		 */
1063 		if (attach->dir != direction &&
1064 		    attach->dir != DMA_BIDIRECTIONAL)
1065 			return ERR_PTR(-EBUSY);
1066 
1067 		return attach->sgt;
1068 	}
1069 
1070 	if (dma_buf_is_dynamic(attach->dmabuf)) {
1071 		dma_resv_assert_held(attach->dmabuf->resv);
1072 		if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
1073 			r = dma_buf_pin(attach);
1074 			if (r)
1075 				return ERR_PTR(r);
1076 		}
1077 	}
1078 
1079 	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
1080 	if (!sg_table)
1081 		sg_table = ERR_PTR(-ENOMEM);
1082 
1083 	if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
1084 	     !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1085 		dma_buf_unpin(attach);
1086 
1087 	if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
1088 		attach->sgt = sg_table;
1089 		attach->dir = direction;
1090 	}
1091 
1092 	return sg_table;
1093 }
1094 EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
1095 
1096 /**
1097  * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
1098  * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
1099  * dma_buf_ops.
1100  * @attach:	[in]	attachment to unmap buffer from
1101  * @sg_table:	[in]	scatterlist info of the buffer to unmap
1102  * @direction:  [in]    direction of DMA transfer
1103  *
1104  * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
1105  */
dma_buf_unmap_attachment(struct dma_buf_attachment * attach,struct sg_table * sg_table,enum dma_data_direction direction)1106 void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
1107 				struct sg_table *sg_table,
1108 				enum dma_data_direction direction)
1109 {
1110 	might_sleep();
1111 
1112 	if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
1113 		return;
1114 
1115 	if (dma_buf_attachment_is_dynamic(attach))
1116 		dma_resv_assert_held(attach->dmabuf->resv);
1117 
1118 	if (attach->sgt == sg_table)
1119 		return;
1120 
1121 	if (dma_buf_is_dynamic(attach->dmabuf))
1122 		dma_resv_assert_held(attach->dmabuf->resv);
1123 
1124 	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
1125 
1126 	if (dma_buf_is_dynamic(attach->dmabuf) &&
1127 	    !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1128 		dma_buf_unpin(attach);
1129 }
1130 EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
1131 
1132 /**
1133  * dma_buf_move_notify - notify attachments that DMA-buf is moving
1134  *
1135  * @dmabuf:	[in]	buffer which is moving
1136  *
1137  * Informs all attachmenst that they need to destroy and recreated all their
1138  * mappings.
1139  */
dma_buf_move_notify(struct dma_buf * dmabuf)1140 void dma_buf_move_notify(struct dma_buf *dmabuf)
1141 {
1142 	struct dma_buf_attachment *attach;
1143 
1144 	dma_resv_assert_held(dmabuf->resv);
1145 
1146 	list_for_each_entry(attach, &dmabuf->attachments, node)
1147 		if (attach->importer_ops)
1148 			attach->importer_ops->move_notify(attach);
1149 }
1150 EXPORT_SYMBOL_GPL(dma_buf_move_notify);
1151 
1152 /**
1153  * DOC: cpu access
1154  *
1155  * There are mutliple reasons for supporting CPU access to a dma buffer object:
1156  *
1157  * - Fallback operations in the kernel, for example when a device is connected
1158  *   over USB and the kernel needs to shuffle the data around first before
1159  *   sending it away. Cache coherency is handled by braketing any transactions
1160  *   with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
1161  *   access.
1162  *
1163  *   Since for most kernel internal dma-buf accesses need the entire buffer, a
1164  *   vmap interface is introduced. Note that on very old 32-bit architectures
1165  *   vmalloc space might be limited and result in vmap calls failing.
1166  *
1167  *   Interfaces::
1168  *      void \*dma_buf_vmap(struct dma_buf \*dmabuf)
1169  *      void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr)
1170  *
1171  *   The vmap call can fail if there is no vmap support in the exporter, or if
1172  *   it runs out of vmalloc space. Fallback to kmap should be implemented. Note
1173  *   that the dma-buf layer keeps a reference count for all vmap access and
1174  *   calls down into the exporter's vmap function only when no vmapping exists,
1175  *   and only unmaps it once. Protection against concurrent vmap/vunmap calls is
1176  *   provided by taking the dma_buf->lock mutex.
1177  *
1178  * - For full compatibility on the importer side with existing userspace
1179  *   interfaces, which might already support mmap'ing buffers. This is needed in
1180  *   many processing pipelines (e.g. feeding a software rendered image into a
1181  *   hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
1182  *   framework already supported this and for DMA buffer file descriptors to
1183  *   replace ION buffers mmap support was needed.
1184  *
1185  *   There is no special interfaces, userspace simply calls mmap on the dma-buf
1186  *   fd. But like for CPU access there's a need to braket the actual access,
1187  *   which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
1188  *   DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
1189  *   be restarted.
1190  *
1191  *   Some systems might need some sort of cache coherency management e.g. when
1192  *   CPU and GPU domains are being accessed through dma-buf at the same time.
1193  *   To circumvent this problem there are begin/end coherency markers, that
1194  *   forward directly to existing dma-buf device drivers vfunc hooks. Userspace
1195  *   can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
1196  *   sequence would be used like following:
1197  *
1198  *     - mmap dma-buf fd
1199  *     - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
1200  *       to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
1201  *       want (with the new data being consumed by say the GPU or the scanout
1202  *       device)
1203  *     - munmap once you don't need the buffer any more
1204  *
1205  *    For correctness and optimal performance, it is always required to use
1206  *    SYNC_START and SYNC_END before and after, respectively, when accessing the
1207  *    mapped address. Userspace cannot rely on coherent access, even when there
1208  *    are systems where it just works without calling these ioctls.
1209  *
1210  * - And as a CPU fallback in userspace processing pipelines.
1211  *
1212  *   Similar to the motivation for kernel cpu access it is again important that
1213  *   the userspace code of a given importing subsystem can use the same
1214  *   interfaces with a imported dma-buf buffer object as with a native buffer
1215  *   object. This is especially important for drm where the userspace part of
1216  *   contemporary OpenGL, X, and other drivers is huge, and reworking them to
1217  *   use a different way to mmap a buffer rather invasive.
1218  *
1219  *   The assumption in the current dma-buf interfaces is that redirecting the
1220  *   initial mmap is all that's needed. A survey of some of the existing
1221  *   subsystems shows that no driver seems to do any nefarious thing like
1222  *   syncing up with outstanding asynchronous processing on the device or
1223  *   allocating special resources at fault time. So hopefully this is good
1224  *   enough, since adding interfaces to intercept pagefaults and allow pte
1225  *   shootdowns would increase the complexity quite a bit.
1226  *
1227  *   Interface::
1228  *      int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
1229  *		       unsigned long);
1230  *
1231  *   If the importing subsystem simply provides a special-purpose mmap call to
1232  *   set up a mapping in userspace, calling do_mmap with dma_buf->file will
1233  *   equally achieve that for a dma-buf object.
1234  */
1235 
__dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)1236 static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1237 				      enum dma_data_direction direction)
1238 {
1239 	bool write = (direction == DMA_BIDIRECTIONAL ||
1240 		      direction == DMA_TO_DEVICE);
1241 	struct dma_resv *resv = dmabuf->resv;
1242 	long ret;
1243 
1244 	/* Wait on any implicit rendering fences */
1245 	ret = dma_resv_wait_timeout_rcu(resv, write, true,
1246 						  MAX_SCHEDULE_TIMEOUT);
1247 	if (ret < 0)
1248 		return ret;
1249 
1250 	return 0;
1251 }
1252 
1253 /**
1254  * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
1255  * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
1256  * preparations. Coherency is only guaranteed in the specified range for the
1257  * specified access direction.
1258  * @dmabuf:	[in]	buffer to prepare cpu access for.
1259  * @direction:	[in]	length of range for cpu access.
1260  *
1261  * After the cpu access is complete the caller should call
1262  * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
1263  * it guaranteed to be coherent with other DMA access.
1264  *
1265  * Can return negative error values, returns 0 on success.
1266  */
dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)1267 int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1268 			     enum dma_data_direction direction)
1269 {
1270 	int ret = 0;
1271 
1272 	if (WARN_ON(!dmabuf))
1273 		return -EINVAL;
1274 
1275 	if (dmabuf->ops->begin_cpu_access)
1276 		ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
1277 
1278 	/* Ensure that all fences are waited upon - but we first allow
1279 	 * the native handler the chance to do so more efficiently if it
1280 	 * chooses. A double invocation here will be reasonably cheap no-op.
1281 	 */
1282 	if (ret == 0)
1283 		ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1284 
1285 	return ret;
1286 }
1287 EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
1288 
dma_buf_begin_cpu_access_partial(struct dma_buf * dmabuf,enum dma_data_direction direction,unsigned int offset,unsigned int len)1289 int dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
1290 				     enum dma_data_direction direction,
1291 				     unsigned int offset, unsigned int len)
1292 {
1293 	int ret = 0;
1294 
1295 	if (WARN_ON(!dmabuf))
1296 		return -EINVAL;
1297 
1298 	if (dmabuf->ops->begin_cpu_access_partial)
1299 		ret = dmabuf->ops->begin_cpu_access_partial(dmabuf, direction,
1300 							    offset, len);
1301 
1302 	/* Ensure that all fences are waited upon - but we first allow
1303 	 * the native handler the chance to do so more efficiently if it
1304 	 * chooses. A double invocation here will be reasonably cheap no-op.
1305 	 */
1306 	if (ret == 0)
1307 		ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1308 
1309 	return ret;
1310 }
1311 EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access_partial);
1312 
1313 /**
1314  * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
1315  * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
1316  * actions. Coherency is only guaranteed in the specified range for the
1317  * specified access direction.
1318  * @dmabuf:	[in]	buffer to complete cpu access for.
1319  * @direction:	[in]	length of range for cpu access.
1320  *
1321  * This terminates CPU access started with dma_buf_begin_cpu_access().
1322  *
1323  * Can return negative error values, returns 0 on success.
1324  */
dma_buf_end_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)1325 int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1326 			   enum dma_data_direction direction)
1327 {
1328 	int ret = 0;
1329 
1330 	WARN_ON(!dmabuf);
1331 
1332 	if (dmabuf->ops->end_cpu_access)
1333 		ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1334 
1335 	return ret;
1336 }
1337 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
1338 
dma_buf_end_cpu_access_partial(struct dma_buf * dmabuf,enum dma_data_direction direction,unsigned int offset,unsigned int len)1339 int dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
1340 				   enum dma_data_direction direction,
1341 				   unsigned int offset, unsigned int len)
1342 {
1343 	int ret = 0;
1344 
1345 	WARN_ON(!dmabuf);
1346 
1347 	if (dmabuf->ops->end_cpu_access_partial)
1348 		ret = dmabuf->ops->end_cpu_access_partial(dmabuf, direction,
1349 							  offset, len);
1350 
1351 	return ret;
1352 }
1353 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access_partial);
1354 
1355 /**
1356  * dma_buf_mmap - Setup up a userspace mmap with the given vma
1357  * @dmabuf:	[in]	buffer that should back the vma
1358  * @vma:	[in]	vma for the mmap
1359  * @pgoff:	[in]	offset in pages where this mmap should start within the
1360  *			dma-buf buffer.
1361  *
1362  * This function adjusts the passed in vma so that it points at the file of the
1363  * dma_buf operation. It also adjusts the starting pgoff and does bounds
1364  * checking on the size of the vma. Then it calls the exporters mmap function to
1365  * set up the mapping.
1366  *
1367  * Can return negative error values, returns 0 on success.
1368  */
dma_buf_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma,unsigned long pgoff)1369 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1370 		 unsigned long pgoff)
1371 {
1372 	struct file *oldfile;
1373 	int ret;
1374 
1375 	if (WARN_ON(!dmabuf || !vma))
1376 		return -EINVAL;
1377 
1378 	/* check if buffer supports mmap */
1379 	if (!dmabuf->ops->mmap)
1380 		return -EINVAL;
1381 
1382 	/* check for offset overflow */
1383 	if (pgoff + vma_pages(vma) < pgoff)
1384 		return -EOVERFLOW;
1385 
1386 	/* check for overflowing the buffer's size */
1387 	if (pgoff + vma_pages(vma) >
1388 	    dmabuf->size >> PAGE_SHIFT)
1389 		return -EINVAL;
1390 
1391 	/* readjust the vma */
1392 	get_file(dmabuf->file);
1393 	oldfile = vma->vm_file;
1394 	vma->vm_file = dmabuf->file;
1395 	vma->vm_pgoff = pgoff;
1396 
1397 	ret = dmabuf->ops->mmap(dmabuf, vma);
1398 	if (ret) {
1399 		/* restore old parameters on failure */
1400 		vma->vm_file = oldfile;
1401 		fput(dmabuf->file);
1402 	} else {
1403 		if (oldfile)
1404 			fput(oldfile);
1405 	}
1406 	return ret;
1407 
1408 }
1409 EXPORT_SYMBOL_GPL(dma_buf_mmap);
1410 
1411 /**
1412  * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
1413  * address space. Same restrictions as for vmap and friends apply.
1414  * @dmabuf:	[in]	buffer to vmap
1415  *
1416  * This call may fail due to lack of virtual mapping address space.
1417  * These calls are optional in drivers. The intended use for them
1418  * is for mapping objects linear in kernel space for high use objects.
1419  * Please attempt to use kmap/kunmap before thinking about these interfaces.
1420  *
1421  * Returns NULL on error.
1422  */
dma_buf_vmap(struct dma_buf * dmabuf)1423 void *dma_buf_vmap(struct dma_buf *dmabuf)
1424 {
1425 	void *ptr;
1426 
1427 	if (WARN_ON(!dmabuf))
1428 		return NULL;
1429 
1430 	if (!dmabuf->ops->vmap)
1431 		return NULL;
1432 
1433 	mutex_lock(&dmabuf->lock);
1434 	if (dmabuf->vmapping_counter) {
1435 		dmabuf->vmapping_counter++;
1436 		BUG_ON(!dmabuf->vmap_ptr);
1437 		ptr = dmabuf->vmap_ptr;
1438 		goto out_unlock;
1439 	}
1440 
1441 	BUG_ON(dmabuf->vmap_ptr);
1442 
1443 	ptr = dmabuf->ops->vmap(dmabuf);
1444 	if (WARN_ON_ONCE(IS_ERR(ptr)))
1445 		ptr = NULL;
1446 	if (!ptr)
1447 		goto out_unlock;
1448 
1449 	dmabuf->vmap_ptr = ptr;
1450 	dmabuf->vmapping_counter = 1;
1451 
1452 out_unlock:
1453 	mutex_unlock(&dmabuf->lock);
1454 	return ptr;
1455 }
1456 EXPORT_SYMBOL_GPL(dma_buf_vmap);
1457 
1458 /**
1459  * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
1460  * @dmabuf:	[in]	buffer to vunmap
1461  * @vaddr:	[in]	vmap to vunmap
1462  */
dma_buf_vunmap(struct dma_buf * dmabuf,void * vaddr)1463 void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
1464 {
1465 	if (WARN_ON(!dmabuf))
1466 		return;
1467 
1468 	BUG_ON(!dmabuf->vmap_ptr);
1469 	BUG_ON(dmabuf->vmapping_counter == 0);
1470 	BUG_ON(dmabuf->vmap_ptr != vaddr);
1471 
1472 	mutex_lock(&dmabuf->lock);
1473 	if (--dmabuf->vmapping_counter == 0) {
1474 		if (dmabuf->ops->vunmap)
1475 			dmabuf->ops->vunmap(dmabuf, vaddr);
1476 		dmabuf->vmap_ptr = NULL;
1477 	}
1478 	mutex_unlock(&dmabuf->lock);
1479 }
1480 EXPORT_SYMBOL_GPL(dma_buf_vunmap);
1481 
dma_buf_get_flags(struct dma_buf * dmabuf,unsigned long * flags)1482 int dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags)
1483 {
1484 	int ret = 0;
1485 
1486 	if (WARN_ON(!dmabuf) || !flags)
1487 		return -EINVAL;
1488 
1489 	if (dmabuf->ops->get_flags)
1490 		ret = dmabuf->ops->get_flags(dmabuf, flags);
1491 
1492 	return ret;
1493 }
1494 EXPORT_SYMBOL_GPL(dma_buf_get_flags);
1495 
dma_buf_get_uuid(struct dma_buf * dmabuf,uuid_t * uuid)1496 int dma_buf_get_uuid(struct dma_buf *dmabuf, uuid_t *uuid)
1497 {
1498 	if (WARN_ON(!dmabuf) || !uuid)
1499 		return -EINVAL;
1500 
1501 	if (!dmabuf->ops->get_uuid)
1502 		return -ENODEV;
1503 
1504 	return dmabuf->ops->get_uuid(dmabuf, uuid);
1505 }
1506 EXPORT_SYMBOL_GPL(dma_buf_get_uuid);
1507 
1508 #ifdef CONFIG_DEBUG_FS
dma_buf_debug_show(struct seq_file * s,void * unused)1509 static int dma_buf_debug_show(struct seq_file *s, void *unused)
1510 {
1511 	int ret;
1512 	struct dma_buf *buf_obj;
1513 	struct dma_buf_attachment *attach_obj;
1514 	struct dma_resv *robj;
1515 	struct dma_resv_list *fobj;
1516 	struct dma_fence *fence;
1517 	unsigned seq;
1518 	int count = 0, attach_count, shared_count, i;
1519 	size_t size = 0;
1520 
1521 	ret = mutex_lock_interruptible(&db_list.lock);
1522 
1523 	if (ret)
1524 		return ret;
1525 
1526 	seq_puts(s, "\nDma-buf Objects:\n");
1527 	seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n",
1528 		   "size", "flags", "mode", "count", "ino");
1529 
1530 	list_for_each_entry(buf_obj, &db_list.head, list_node) {
1531 
1532 		ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
1533 		if (ret)
1534 			goto error_unlock;
1535 
1536 		spin_lock(&buf_obj->name_lock);
1537 		seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1538 				buf_obj->size,
1539 				buf_obj->file->f_flags, buf_obj->file->f_mode,
1540 				file_count(buf_obj->file),
1541 				buf_obj->exp_name,
1542 				file_inode(buf_obj->file)->i_ino,
1543 				buf_obj->name ?: "");
1544 		spin_unlock(&buf_obj->name_lock);
1545 
1546 		robj = buf_obj->resv;
1547 		while (true) {
1548 			seq = read_seqcount_begin(&robj->seq);
1549 			rcu_read_lock();
1550 			fobj = rcu_dereference(robj->fence);
1551 			shared_count = fobj ? fobj->shared_count : 0;
1552 			fence = rcu_dereference(robj->fence_excl);
1553 			if (!read_seqcount_retry(&robj->seq, seq))
1554 				break;
1555 			rcu_read_unlock();
1556 		}
1557 
1558 		if (fence)
1559 			seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
1560 				   fence->ops->get_driver_name(fence),
1561 				   fence->ops->get_timeline_name(fence),
1562 				   dma_fence_is_signaled(fence) ? "" : "un");
1563 		for (i = 0; i < shared_count; i++) {
1564 			fence = rcu_dereference(fobj->shared[i]);
1565 			if (!dma_fence_get_rcu(fence))
1566 				continue;
1567 			seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
1568 				   fence->ops->get_driver_name(fence),
1569 				   fence->ops->get_timeline_name(fence),
1570 				   dma_fence_is_signaled(fence) ? "" : "un");
1571 			dma_fence_put(fence);
1572 		}
1573 		rcu_read_unlock();
1574 
1575 		seq_puts(s, "\tAttached Devices:\n");
1576 		attach_count = 0;
1577 
1578 		list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1579 			seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1580 			attach_count++;
1581 		}
1582 		dma_resv_unlock(buf_obj->resv);
1583 
1584 		seq_printf(s, "Total %d devices attached\n\n",
1585 				attach_count);
1586 
1587 		count++;
1588 		size += buf_obj->size;
1589 	}
1590 
1591 	seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1592 
1593 	mutex_unlock(&db_list.lock);
1594 	return 0;
1595 
1596 error_unlock:
1597 	mutex_unlock(&db_list.lock);
1598 	return ret;
1599 }
1600 
1601 DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1602 
1603 static struct dentry *dma_buf_debugfs_dir;
1604 
dma_buf_init_debugfs(void)1605 static int dma_buf_init_debugfs(void)
1606 {
1607 	struct dentry *d;
1608 	int err = 0;
1609 
1610 	d = debugfs_create_dir("dma_buf", NULL);
1611 	if (IS_ERR(d))
1612 		return PTR_ERR(d);
1613 
1614 	dma_buf_debugfs_dir = d;
1615 
1616 	d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1617 				NULL, &dma_buf_debug_fops);
1618 	if (IS_ERR(d)) {
1619 		pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1620 		debugfs_remove_recursive(dma_buf_debugfs_dir);
1621 		dma_buf_debugfs_dir = NULL;
1622 		err = PTR_ERR(d);
1623 	}
1624 
1625 	return err;
1626 }
1627 
dma_buf_uninit_debugfs(void)1628 static void dma_buf_uninit_debugfs(void)
1629 {
1630 	debugfs_remove_recursive(dma_buf_debugfs_dir);
1631 }
1632 #else
dma_buf_init_debugfs(void)1633 static inline int dma_buf_init_debugfs(void)
1634 {
1635 	return 0;
1636 }
dma_buf_uninit_debugfs(void)1637 static inline void dma_buf_uninit_debugfs(void)
1638 {
1639 }
1640 #endif
1641 
dma_buf_init(void)1642 static int __init dma_buf_init(void)
1643 {
1644 	int ret;
1645 
1646 	ret = dma_buf_init_sysfs_statistics();
1647 	if (ret)
1648 		return ret;
1649 
1650 	dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1651 	if (IS_ERR(dma_buf_mnt))
1652 		return PTR_ERR(dma_buf_mnt);
1653 
1654 	mutex_init(&db_list.lock);
1655 	INIT_LIST_HEAD(&db_list.head);
1656 	dma_buf_init_debugfs();
1657 	return 0;
1658 }
1659 subsys_initcall(dma_buf_init);
1660 
dma_buf_deinit(void)1661 static void __exit dma_buf_deinit(void)
1662 {
1663 	dma_buf_uninit_debugfs();
1664 	kern_unmount(dma_buf_mnt);
1665 	dma_buf_uninit_sysfs_statistics();
1666 }
1667 __exitcall(dma_buf_deinit);
1668