xref: /OK3568_Linux_fs/kernel/drivers/dma-buf/udmabuf.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <linux/cred.h>
3*4882a593Smuzhiyun #include <linux/device.h>
4*4882a593Smuzhiyun #include <linux/dma-buf.h>
5*4882a593Smuzhiyun #include <linux/highmem.h>
6*4882a593Smuzhiyun #include <linux/init.h>
7*4882a593Smuzhiyun #include <linux/kernel.h>
8*4882a593Smuzhiyun #include <linux/memfd.h>
9*4882a593Smuzhiyun #include <linux/miscdevice.h>
10*4882a593Smuzhiyun #include <linux/module.h>
11*4882a593Smuzhiyun #include <linux/shmem_fs.h>
12*4882a593Smuzhiyun #include <linux/slab.h>
13*4882a593Smuzhiyun #include <linux/udmabuf.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun static const u32    list_limit = 1024;  /* udmabuf_create_list->count limit */
16*4882a593Smuzhiyun static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes  */
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun struct udmabuf {
19*4882a593Smuzhiyun 	pgoff_t pagecount;
20*4882a593Smuzhiyun 	struct page **pages;
21*4882a593Smuzhiyun 	struct sg_table *sg;
22*4882a593Smuzhiyun 	struct miscdevice *device;
23*4882a593Smuzhiyun };
24*4882a593Smuzhiyun 
udmabuf_vm_fault(struct vm_fault * vmf)25*4882a593Smuzhiyun static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun 	struct vm_area_struct *vma = vmf->vma;
28*4882a593Smuzhiyun 	struct udmabuf *ubuf = vma->vm_private_data;
29*4882a593Smuzhiyun 	pgoff_t pgoff = vmf->pgoff;
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	if (pgoff >= ubuf->pagecount)
32*4882a593Smuzhiyun 		return VM_FAULT_SIGBUS;
33*4882a593Smuzhiyun 	vmf->page = ubuf->pages[pgoff];
34*4882a593Smuzhiyun 	get_page(vmf->page);
35*4882a593Smuzhiyun 	return 0;
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun static const struct vm_operations_struct udmabuf_vm_ops = {
39*4882a593Smuzhiyun 	.fault = udmabuf_vm_fault,
40*4882a593Smuzhiyun };
41*4882a593Smuzhiyun 
mmap_udmabuf(struct dma_buf * buf,struct vm_area_struct * vma)42*4882a593Smuzhiyun static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun 	struct udmabuf *ubuf = buf->priv;
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
47*4882a593Smuzhiyun 		return -EINVAL;
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	vma->vm_ops = &udmabuf_vm_ops;
50*4882a593Smuzhiyun 	vma->vm_private_data = ubuf;
51*4882a593Smuzhiyun 	return 0;
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun 
get_sg_table(struct device * dev,struct dma_buf * buf,enum dma_data_direction direction)54*4882a593Smuzhiyun static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
55*4882a593Smuzhiyun 				     enum dma_data_direction direction)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun 	struct udmabuf *ubuf = buf->priv;
58*4882a593Smuzhiyun 	struct sg_table *sg;
59*4882a593Smuzhiyun 	int ret;
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	sg = kzalloc(sizeof(*sg), GFP_KERNEL);
62*4882a593Smuzhiyun 	if (!sg)
63*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
64*4882a593Smuzhiyun 	ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->pagecount,
65*4882a593Smuzhiyun 					0, ubuf->pagecount << PAGE_SHIFT,
66*4882a593Smuzhiyun 					GFP_KERNEL);
67*4882a593Smuzhiyun 	if (ret < 0)
68*4882a593Smuzhiyun 		goto err;
69*4882a593Smuzhiyun 	ret = dma_map_sgtable(dev, sg, direction, 0);
70*4882a593Smuzhiyun 	if (ret < 0)
71*4882a593Smuzhiyun 		goto err;
72*4882a593Smuzhiyun 	return sg;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun err:
75*4882a593Smuzhiyun 	sg_free_table(sg);
76*4882a593Smuzhiyun 	kfree(sg);
77*4882a593Smuzhiyun 	return ERR_PTR(ret);
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun 
put_sg_table(struct device * dev,struct sg_table * sg,enum dma_data_direction direction)80*4882a593Smuzhiyun static void put_sg_table(struct device *dev, struct sg_table *sg,
81*4882a593Smuzhiyun 			 enum dma_data_direction direction)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	dma_unmap_sgtable(dev, sg, direction, 0);
84*4882a593Smuzhiyun 	sg_free_table(sg);
85*4882a593Smuzhiyun 	kfree(sg);
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun 
map_udmabuf(struct dma_buf_attachment * at,enum dma_data_direction direction)88*4882a593Smuzhiyun static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
89*4882a593Smuzhiyun 				    enum dma_data_direction direction)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun 	return get_sg_table(at->dev, at->dmabuf, direction);
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun 
unmap_udmabuf(struct dma_buf_attachment * at,struct sg_table * sg,enum dma_data_direction direction)94*4882a593Smuzhiyun static void unmap_udmabuf(struct dma_buf_attachment *at,
95*4882a593Smuzhiyun 			  struct sg_table *sg,
96*4882a593Smuzhiyun 			  enum dma_data_direction direction)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun 	return put_sg_table(at->dev, sg, direction);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun 
release_udmabuf(struct dma_buf * buf)101*4882a593Smuzhiyun static void release_udmabuf(struct dma_buf *buf)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun 	struct udmabuf *ubuf = buf->priv;
104*4882a593Smuzhiyun 	struct device *dev = ubuf->device->this_device;
105*4882a593Smuzhiyun 	pgoff_t pg;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	if (ubuf->sg)
108*4882a593Smuzhiyun 		put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	for (pg = 0; pg < ubuf->pagecount; pg++)
111*4882a593Smuzhiyun 		put_page(ubuf->pages[pg]);
112*4882a593Smuzhiyun 	kfree(ubuf->pages);
113*4882a593Smuzhiyun 	kfree(ubuf);
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
begin_cpu_udmabuf(struct dma_buf * buf,enum dma_data_direction direction)116*4882a593Smuzhiyun static int begin_cpu_udmabuf(struct dma_buf *buf,
117*4882a593Smuzhiyun 			     enum dma_data_direction direction)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun 	struct udmabuf *ubuf = buf->priv;
120*4882a593Smuzhiyun 	struct device *dev = ubuf->device->this_device;
121*4882a593Smuzhiyun 	int ret = 0;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	if (!ubuf->sg) {
124*4882a593Smuzhiyun 		ubuf->sg = get_sg_table(dev, buf, direction);
125*4882a593Smuzhiyun 		if (IS_ERR(ubuf->sg)) {
126*4882a593Smuzhiyun 			ret = PTR_ERR(ubuf->sg);
127*4882a593Smuzhiyun 			ubuf->sg = NULL;
128*4882a593Smuzhiyun 		}
129*4882a593Smuzhiyun 	} else {
130*4882a593Smuzhiyun 		dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents,
131*4882a593Smuzhiyun 				    direction);
132*4882a593Smuzhiyun 	}
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	return ret;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun 
end_cpu_udmabuf(struct dma_buf * buf,enum dma_data_direction direction)137*4882a593Smuzhiyun static int end_cpu_udmabuf(struct dma_buf *buf,
138*4882a593Smuzhiyun 			   enum dma_data_direction direction)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun 	struct udmabuf *ubuf = buf->priv;
141*4882a593Smuzhiyun 	struct device *dev = ubuf->device->this_device;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	if (!ubuf->sg)
144*4882a593Smuzhiyun 		return -EINVAL;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	dma_sync_sg_for_device(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
147*4882a593Smuzhiyun 	return 0;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun static const struct dma_buf_ops udmabuf_ops = {
151*4882a593Smuzhiyun 	.cache_sgt_mapping = true,
152*4882a593Smuzhiyun 	.map_dma_buf	   = map_udmabuf,
153*4882a593Smuzhiyun 	.unmap_dma_buf	   = unmap_udmabuf,
154*4882a593Smuzhiyun 	.release	   = release_udmabuf,
155*4882a593Smuzhiyun 	.mmap		   = mmap_udmabuf,
156*4882a593Smuzhiyun 	.begin_cpu_access  = begin_cpu_udmabuf,
157*4882a593Smuzhiyun 	.end_cpu_access    = end_cpu_udmabuf,
158*4882a593Smuzhiyun };
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun #define SEALS_WANTED (F_SEAL_SHRINK)
161*4882a593Smuzhiyun #define SEALS_DENIED (F_SEAL_WRITE)
162*4882a593Smuzhiyun 
udmabuf_create(struct miscdevice * device,struct udmabuf_create_list * head,struct udmabuf_create_item * list)163*4882a593Smuzhiyun static long udmabuf_create(struct miscdevice *device,
164*4882a593Smuzhiyun 			   struct udmabuf_create_list *head,
165*4882a593Smuzhiyun 			   struct udmabuf_create_item *list)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
168*4882a593Smuzhiyun 	struct file *memfd = NULL;
169*4882a593Smuzhiyun 	struct udmabuf *ubuf;
170*4882a593Smuzhiyun 	struct dma_buf *buf;
171*4882a593Smuzhiyun 	pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit;
172*4882a593Smuzhiyun 	struct page *page;
173*4882a593Smuzhiyun 	int seals, ret = -EINVAL;
174*4882a593Smuzhiyun 	u32 i, flags;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
177*4882a593Smuzhiyun 	if (!ubuf)
178*4882a593Smuzhiyun 		return -ENOMEM;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
181*4882a593Smuzhiyun 	for (i = 0; i < head->count; i++) {
182*4882a593Smuzhiyun 		if (!IS_ALIGNED(list[i].offset, PAGE_SIZE))
183*4882a593Smuzhiyun 			goto err;
184*4882a593Smuzhiyun 		if (!IS_ALIGNED(list[i].size, PAGE_SIZE))
185*4882a593Smuzhiyun 			goto err;
186*4882a593Smuzhiyun 		ubuf->pagecount += list[i].size >> PAGE_SHIFT;
187*4882a593Smuzhiyun 		if (ubuf->pagecount > pglimit)
188*4882a593Smuzhiyun 			goto err;
189*4882a593Smuzhiyun 	}
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	if (!ubuf->pagecount)
192*4882a593Smuzhiyun 		goto err;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->pages),
195*4882a593Smuzhiyun 				    GFP_KERNEL);
196*4882a593Smuzhiyun 	if (!ubuf->pages) {
197*4882a593Smuzhiyun 		ret = -ENOMEM;
198*4882a593Smuzhiyun 		goto err;
199*4882a593Smuzhiyun 	}
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	pgbuf = 0;
202*4882a593Smuzhiyun 	for (i = 0; i < head->count; i++) {
203*4882a593Smuzhiyun 		ret = -EBADFD;
204*4882a593Smuzhiyun 		memfd = fget(list[i].memfd);
205*4882a593Smuzhiyun 		if (!memfd)
206*4882a593Smuzhiyun 			goto err;
207*4882a593Smuzhiyun 		if (!shmem_mapping(file_inode(memfd)->i_mapping))
208*4882a593Smuzhiyun 			goto err;
209*4882a593Smuzhiyun 		seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
210*4882a593Smuzhiyun 		if (seals == -EINVAL)
211*4882a593Smuzhiyun 			goto err;
212*4882a593Smuzhiyun 		ret = -EINVAL;
213*4882a593Smuzhiyun 		if ((seals & SEALS_WANTED) != SEALS_WANTED ||
214*4882a593Smuzhiyun 		    (seals & SEALS_DENIED) != 0)
215*4882a593Smuzhiyun 			goto err;
216*4882a593Smuzhiyun 		pgoff = list[i].offset >> PAGE_SHIFT;
217*4882a593Smuzhiyun 		pgcnt = list[i].size   >> PAGE_SHIFT;
218*4882a593Smuzhiyun 		for (pgidx = 0; pgidx < pgcnt; pgidx++) {
219*4882a593Smuzhiyun 			page = shmem_read_mapping_page(
220*4882a593Smuzhiyun 				file_inode(memfd)->i_mapping, pgoff + pgidx);
221*4882a593Smuzhiyun 			if (IS_ERR(page)) {
222*4882a593Smuzhiyun 				ret = PTR_ERR(page);
223*4882a593Smuzhiyun 				goto err;
224*4882a593Smuzhiyun 			}
225*4882a593Smuzhiyun 			ubuf->pages[pgbuf++] = page;
226*4882a593Smuzhiyun 		}
227*4882a593Smuzhiyun 		fput(memfd);
228*4882a593Smuzhiyun 		memfd = NULL;
229*4882a593Smuzhiyun 	}
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	exp_info.ops  = &udmabuf_ops;
232*4882a593Smuzhiyun 	exp_info.size = ubuf->pagecount << PAGE_SHIFT;
233*4882a593Smuzhiyun 	exp_info.priv = ubuf;
234*4882a593Smuzhiyun 	exp_info.flags = O_RDWR;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	ubuf->device = device;
237*4882a593Smuzhiyun 	buf = dma_buf_export(&exp_info);
238*4882a593Smuzhiyun 	if (IS_ERR(buf)) {
239*4882a593Smuzhiyun 		ret = PTR_ERR(buf);
240*4882a593Smuzhiyun 		goto err;
241*4882a593Smuzhiyun 	}
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	flags = 0;
244*4882a593Smuzhiyun 	if (head->flags & UDMABUF_FLAGS_CLOEXEC)
245*4882a593Smuzhiyun 		flags |= O_CLOEXEC;
246*4882a593Smuzhiyun 	return dma_buf_fd(buf, flags);
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun err:
249*4882a593Smuzhiyun 	while (pgbuf > 0)
250*4882a593Smuzhiyun 		put_page(ubuf->pages[--pgbuf]);
251*4882a593Smuzhiyun 	if (memfd)
252*4882a593Smuzhiyun 		fput(memfd);
253*4882a593Smuzhiyun 	kfree(ubuf->pages);
254*4882a593Smuzhiyun 	kfree(ubuf);
255*4882a593Smuzhiyun 	return ret;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun 
udmabuf_ioctl_create(struct file * filp,unsigned long arg)258*4882a593Smuzhiyun static long udmabuf_ioctl_create(struct file *filp, unsigned long arg)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun 	struct udmabuf_create create;
261*4882a593Smuzhiyun 	struct udmabuf_create_list head;
262*4882a593Smuzhiyun 	struct udmabuf_create_item list;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	if (copy_from_user(&create, (void __user *)arg,
265*4882a593Smuzhiyun 			   sizeof(create)))
266*4882a593Smuzhiyun 		return -EFAULT;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	head.flags  = create.flags;
269*4882a593Smuzhiyun 	head.count  = 1;
270*4882a593Smuzhiyun 	list.memfd  = create.memfd;
271*4882a593Smuzhiyun 	list.offset = create.offset;
272*4882a593Smuzhiyun 	list.size   = create.size;
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	return udmabuf_create(filp->private_data, &head, &list);
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun 
udmabuf_ioctl_create_list(struct file * filp,unsigned long arg)277*4882a593Smuzhiyun static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun 	struct udmabuf_create_list head;
280*4882a593Smuzhiyun 	struct udmabuf_create_item *list;
281*4882a593Smuzhiyun 	int ret = -EINVAL;
282*4882a593Smuzhiyun 	u32 lsize;
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	if (copy_from_user(&head, (void __user *)arg, sizeof(head)))
285*4882a593Smuzhiyun 		return -EFAULT;
286*4882a593Smuzhiyun 	if (head.count > list_limit)
287*4882a593Smuzhiyun 		return -EINVAL;
288*4882a593Smuzhiyun 	lsize = sizeof(struct udmabuf_create_item) * head.count;
289*4882a593Smuzhiyun 	list = memdup_user((void __user *)(arg + sizeof(head)), lsize);
290*4882a593Smuzhiyun 	if (IS_ERR(list))
291*4882a593Smuzhiyun 		return PTR_ERR(list);
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	ret = udmabuf_create(filp->private_data, &head, list);
294*4882a593Smuzhiyun 	kfree(list);
295*4882a593Smuzhiyun 	return ret;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun 
udmabuf_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)298*4882a593Smuzhiyun static long udmabuf_ioctl(struct file *filp, unsigned int ioctl,
299*4882a593Smuzhiyun 			  unsigned long arg)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun 	long ret;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	switch (ioctl) {
304*4882a593Smuzhiyun 	case UDMABUF_CREATE:
305*4882a593Smuzhiyun 		ret = udmabuf_ioctl_create(filp, arg);
306*4882a593Smuzhiyun 		break;
307*4882a593Smuzhiyun 	case UDMABUF_CREATE_LIST:
308*4882a593Smuzhiyun 		ret = udmabuf_ioctl_create_list(filp, arg);
309*4882a593Smuzhiyun 		break;
310*4882a593Smuzhiyun 	default:
311*4882a593Smuzhiyun 		ret = -ENOTTY;
312*4882a593Smuzhiyun 		break;
313*4882a593Smuzhiyun 	}
314*4882a593Smuzhiyun 	return ret;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun static const struct file_operations udmabuf_fops = {
318*4882a593Smuzhiyun 	.owner		= THIS_MODULE,
319*4882a593Smuzhiyun 	.unlocked_ioctl = udmabuf_ioctl,
320*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
321*4882a593Smuzhiyun 	.compat_ioctl   = udmabuf_ioctl,
322*4882a593Smuzhiyun #endif
323*4882a593Smuzhiyun };
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun static struct miscdevice udmabuf_misc = {
326*4882a593Smuzhiyun 	.minor          = MISC_DYNAMIC_MINOR,
327*4882a593Smuzhiyun 	.name           = "udmabuf",
328*4882a593Smuzhiyun 	.fops           = &udmabuf_fops,
329*4882a593Smuzhiyun };
330*4882a593Smuzhiyun 
udmabuf_dev_init(void)331*4882a593Smuzhiyun static int __init udmabuf_dev_init(void)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun 	int ret;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	ret = misc_register(&udmabuf_misc);
336*4882a593Smuzhiyun 	if (ret < 0) {
337*4882a593Smuzhiyun 		pr_err("Could not initialize udmabuf device\n");
338*4882a593Smuzhiyun 		return ret;
339*4882a593Smuzhiyun 	}
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	ret = dma_coerce_mask_and_coherent(udmabuf_misc.this_device,
342*4882a593Smuzhiyun 					   DMA_BIT_MASK(64));
343*4882a593Smuzhiyun 	if (ret < 0) {
344*4882a593Smuzhiyun 		pr_err("Could not setup DMA mask for udmabuf device\n");
345*4882a593Smuzhiyun 		misc_deregister(&udmabuf_misc);
346*4882a593Smuzhiyun 		return ret;
347*4882a593Smuzhiyun 	}
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	return 0;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun 
udmabuf_dev_exit(void)352*4882a593Smuzhiyun static void __exit udmabuf_dev_exit(void)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun 	misc_deregister(&udmabuf_misc);
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun module_init(udmabuf_dev_init)
358*4882a593Smuzhiyun module_exit(udmabuf_dev_exit)
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
361*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
362