xref: /OK3568_Linux_fs/kernel/drivers/media/v4l2-core/videobuf-vmalloc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * helper functions for vmalloc video4linux capture buffers
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * The functions expect the hardware being able to scatter gather
6*4882a593Smuzhiyun  * (i.e. the buffers are not linear in physical memory, but fragmented
7*4882a593Smuzhiyun  * into PAGE_SIZE chunks).  They also assume the driver does not need
8*4882a593Smuzhiyun  * to touch the video data.
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * (c) 2007 Mauro Carvalho Chehab <mchehab@kernel.org>
11*4882a593Smuzhiyun  */
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <linux/init.h>
14*4882a593Smuzhiyun #include <linux/module.h>
15*4882a593Smuzhiyun #include <linux/moduleparam.h>
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun #include <linux/interrupt.h>
18*4882a593Smuzhiyun #include <linux/pgtable.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #include <linux/pci.h>
21*4882a593Smuzhiyun #include <linux/vmalloc.h>
22*4882a593Smuzhiyun #include <linux/pagemap.h>
23*4882a593Smuzhiyun #include <asm/page.h>
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #include <media/videobuf-vmalloc.h>
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #define MAGIC_DMABUF   0x17760309
28*4882a593Smuzhiyun #define MAGIC_VMAL_MEM 0x18221223
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #define MAGIC_CHECK(is, should)						\
31*4882a593Smuzhiyun 	if (unlikely((is) != (should))) {				\
32*4882a593Smuzhiyun 		printk(KERN_ERR "magic mismatch: %x (expected %x)\n",	\
33*4882a593Smuzhiyun 				is, should);				\
34*4882a593Smuzhiyun 		BUG();							\
35*4882a593Smuzhiyun 	}
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun static int debug;
38*4882a593Smuzhiyun module_param(debug, int, 0644);
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun MODULE_DESCRIPTION("helper module to manage video4linux vmalloc buffers");
41*4882a593Smuzhiyun MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>");
42*4882a593Smuzhiyun MODULE_LICENSE("GPL");
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun #define dprintk(level, fmt, arg...)					\
45*4882a593Smuzhiyun 	if (debug >= level)						\
46*4882a593Smuzhiyun 		printk(KERN_DEBUG "vbuf-vmalloc: " fmt , ## arg)
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun /***************************************************************************/
50*4882a593Smuzhiyun 
videobuf_vm_open(struct vm_area_struct * vma)51*4882a593Smuzhiyun static void videobuf_vm_open(struct vm_area_struct *vma)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun 	struct videobuf_mapping *map = vma->vm_private_data;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	dprintk(2, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", map,
56*4882a593Smuzhiyun 		map->count, vma->vm_start, vma->vm_end);
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	map->count++;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun 
videobuf_vm_close(struct vm_area_struct * vma)61*4882a593Smuzhiyun static void videobuf_vm_close(struct vm_area_struct *vma)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun 	struct videobuf_mapping *map = vma->vm_private_data;
64*4882a593Smuzhiyun 	struct videobuf_queue *q = map->q;
65*4882a593Smuzhiyun 	int i;
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	dprintk(2, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", map,
68*4882a593Smuzhiyun 		map->count, vma->vm_start, vma->vm_end);
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	map->count--;
71*4882a593Smuzhiyun 	if (0 == map->count) {
72*4882a593Smuzhiyun 		struct videobuf_vmalloc_memory *mem;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 		dprintk(1, "munmap %p q=%p\n", map, q);
75*4882a593Smuzhiyun 		videobuf_queue_lock(q);
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 		/* We need first to cancel streams, before unmapping */
78*4882a593Smuzhiyun 		if (q->streaming)
79*4882a593Smuzhiyun 			videobuf_queue_cancel(q);
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 		for (i = 0; i < VIDEO_MAX_FRAME; i++) {
82*4882a593Smuzhiyun 			if (NULL == q->bufs[i])
83*4882a593Smuzhiyun 				continue;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 			if (q->bufs[i]->map != map)
86*4882a593Smuzhiyun 				continue;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 			mem = q->bufs[i]->priv;
89*4882a593Smuzhiyun 			if (mem) {
90*4882a593Smuzhiyun 				/* This callback is called only if kernel has
91*4882a593Smuzhiyun 				   allocated memory and this memory is mmapped.
92*4882a593Smuzhiyun 				   In this case, memory should be freed,
93*4882a593Smuzhiyun 				   in order to do memory unmap.
94*4882a593Smuzhiyun 				 */
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 				MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 				/* vfree is not atomic - can't be
99*4882a593Smuzhiyun 				   called with IRQ's disabled
100*4882a593Smuzhiyun 				 */
101*4882a593Smuzhiyun 				dprintk(1, "%s: buf[%d] freeing (%p)\n",
102*4882a593Smuzhiyun 					__func__, i, mem->vaddr);
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 				vfree(mem->vaddr);
105*4882a593Smuzhiyun 				mem->vaddr = NULL;
106*4882a593Smuzhiyun 			}
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 			q->bufs[i]->map   = NULL;
109*4882a593Smuzhiyun 			q->bufs[i]->baddr = 0;
110*4882a593Smuzhiyun 		}
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 		kfree(map);
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 		videobuf_queue_unlock(q);
115*4882a593Smuzhiyun 	}
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	return;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun static const struct vm_operations_struct videobuf_vm_ops = {
121*4882a593Smuzhiyun 	.open     = videobuf_vm_open,
122*4882a593Smuzhiyun 	.close    = videobuf_vm_close,
123*4882a593Smuzhiyun };
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun /* ---------------------------------------------------------------------
126*4882a593Smuzhiyun  * vmalloc handlers for the generic methods
127*4882a593Smuzhiyun  */
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun /* Allocated area consists on 3 parts:
130*4882a593Smuzhiyun 	struct video_buffer
131*4882a593Smuzhiyun 	struct <driver>_buffer (cx88_buffer, saa7134_buf, ...)
132*4882a593Smuzhiyun 	struct videobuf_dma_sg_memory
133*4882a593Smuzhiyun  */
134*4882a593Smuzhiyun 
__videobuf_alloc_vb(size_t size)135*4882a593Smuzhiyun static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun 	struct videobuf_vmalloc_memory *mem;
138*4882a593Smuzhiyun 	struct videobuf_buffer *vb;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
141*4882a593Smuzhiyun 	if (!vb)
142*4882a593Smuzhiyun 		return vb;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	mem = vb->priv = ((char *)vb) + size;
145*4882a593Smuzhiyun 	mem->magic = MAGIC_VMAL_MEM;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	dprintk(1, "%s: allocated at %p(%ld+%ld) & %p(%ld)\n",
148*4882a593Smuzhiyun 		__func__, vb, (long)sizeof(*vb), (long)size - sizeof(*vb),
149*4882a593Smuzhiyun 		mem, (long)sizeof(*mem));
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	return vb;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun 
__videobuf_iolock(struct videobuf_queue * q,struct videobuf_buffer * vb,struct v4l2_framebuffer * fbuf)154*4882a593Smuzhiyun static int __videobuf_iolock(struct videobuf_queue *q,
155*4882a593Smuzhiyun 			     struct videobuf_buffer *vb,
156*4882a593Smuzhiyun 			     struct v4l2_framebuffer *fbuf)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun 	struct videobuf_vmalloc_memory *mem = vb->priv;
159*4882a593Smuzhiyun 	int pages;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	BUG_ON(!mem);
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	switch (vb->memory) {
166*4882a593Smuzhiyun 	case V4L2_MEMORY_MMAP:
167*4882a593Smuzhiyun 		dprintk(1, "%s memory method MMAP\n", __func__);
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 		/* All handling should be done by __videobuf_mmap_mapper() */
170*4882a593Smuzhiyun 		if (!mem->vaddr) {
171*4882a593Smuzhiyun 			printk(KERN_ERR "memory is not allocated/mmapped.\n");
172*4882a593Smuzhiyun 			return -EINVAL;
173*4882a593Smuzhiyun 		}
174*4882a593Smuzhiyun 		break;
175*4882a593Smuzhiyun 	case V4L2_MEMORY_USERPTR:
176*4882a593Smuzhiyun 		pages = PAGE_ALIGN(vb->size);
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 		dprintk(1, "%s memory method USERPTR\n", __func__);
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 		if (vb->baddr) {
181*4882a593Smuzhiyun 			printk(KERN_ERR "USERPTR is currently not supported\n");
182*4882a593Smuzhiyun 			return -EINVAL;
183*4882a593Smuzhiyun 		}
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 		/* The only USERPTR currently supported is the one needed for
186*4882a593Smuzhiyun 		 * read() method.
187*4882a593Smuzhiyun 		 */
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 		mem->vaddr = vmalloc_user(pages);
190*4882a593Smuzhiyun 		if (!mem->vaddr) {
191*4882a593Smuzhiyun 			printk(KERN_ERR "vmalloc (%d pages) failed\n", pages);
192*4882a593Smuzhiyun 			return -ENOMEM;
193*4882a593Smuzhiyun 		}
194*4882a593Smuzhiyun 		dprintk(1, "vmalloc is at addr %p (%d pages)\n",
195*4882a593Smuzhiyun 			mem->vaddr, pages);
196*4882a593Smuzhiyun 		break;
197*4882a593Smuzhiyun 	case V4L2_MEMORY_OVERLAY:
198*4882a593Smuzhiyun 	default:
199*4882a593Smuzhiyun 		dprintk(1, "%s memory method OVERLAY/unknown\n", __func__);
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 		/* Currently, doesn't support V4L2_MEMORY_OVERLAY */
202*4882a593Smuzhiyun 		printk(KERN_ERR "Memory method currently unsupported.\n");
203*4882a593Smuzhiyun 		return -EINVAL;
204*4882a593Smuzhiyun 	}
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	return 0;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun 
__videobuf_mmap_mapper(struct videobuf_queue * q,struct videobuf_buffer * buf,struct vm_area_struct * vma)209*4882a593Smuzhiyun static int __videobuf_mmap_mapper(struct videobuf_queue *q,
210*4882a593Smuzhiyun 				  struct videobuf_buffer *buf,
211*4882a593Smuzhiyun 				  struct vm_area_struct *vma)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	struct videobuf_vmalloc_memory *mem;
214*4882a593Smuzhiyun 	struct videobuf_mapping *map;
215*4882a593Smuzhiyun 	int retval, pages;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	dprintk(1, "%s\n", __func__);
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	/* create mapping + update buffer list */
220*4882a593Smuzhiyun 	map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
221*4882a593Smuzhiyun 	if (NULL == map)
222*4882a593Smuzhiyun 		return -ENOMEM;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	buf->map = map;
225*4882a593Smuzhiyun 	map->q     = q;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	buf->baddr = vma->vm_start;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	mem = buf->priv;
230*4882a593Smuzhiyun 	BUG_ON(!mem);
231*4882a593Smuzhiyun 	MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	pages = PAGE_ALIGN(vma->vm_end - vma->vm_start);
234*4882a593Smuzhiyun 	mem->vaddr = vmalloc_user(pages);
235*4882a593Smuzhiyun 	if (!mem->vaddr) {
236*4882a593Smuzhiyun 		printk(KERN_ERR "vmalloc (%d pages) failed\n", pages);
237*4882a593Smuzhiyun 		goto error;
238*4882a593Smuzhiyun 	}
239*4882a593Smuzhiyun 	dprintk(1, "vmalloc is at addr %p (%d pages)\n", mem->vaddr, pages);
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	/* Try to remap memory */
242*4882a593Smuzhiyun 	retval = remap_vmalloc_range(vma, mem->vaddr, 0);
243*4882a593Smuzhiyun 	if (retval < 0) {
244*4882a593Smuzhiyun 		printk(KERN_ERR "mmap: remap failed with error %d. ", retval);
245*4882a593Smuzhiyun 		vfree(mem->vaddr);
246*4882a593Smuzhiyun 		goto error;
247*4882a593Smuzhiyun 	}
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	vma->vm_ops          = &videobuf_vm_ops;
250*4882a593Smuzhiyun 	vma->vm_flags       |= VM_DONTEXPAND | VM_DONTDUMP;
251*4882a593Smuzhiyun 	vma->vm_private_data = map;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	dprintk(1, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
254*4882a593Smuzhiyun 		map, q, vma->vm_start, vma->vm_end,
255*4882a593Smuzhiyun 		(long int)buf->bsize,
256*4882a593Smuzhiyun 		vma->vm_pgoff, buf->i);
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	videobuf_vm_open(vma);
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	return 0;
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun error:
263*4882a593Smuzhiyun 	mem = NULL;
264*4882a593Smuzhiyun 	kfree(map);
265*4882a593Smuzhiyun 	return -ENOMEM;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun static struct videobuf_qtype_ops qops = {
269*4882a593Smuzhiyun 	.magic        = MAGIC_QTYPE_OPS,
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	.alloc_vb     = __videobuf_alloc_vb,
272*4882a593Smuzhiyun 	.iolock       = __videobuf_iolock,
273*4882a593Smuzhiyun 	.mmap_mapper  = __videobuf_mmap_mapper,
274*4882a593Smuzhiyun 	.vaddr        = videobuf_to_vmalloc,
275*4882a593Smuzhiyun };
276*4882a593Smuzhiyun 
videobuf_queue_vmalloc_init(struct videobuf_queue * q,const struct videobuf_queue_ops * ops,struct device * dev,spinlock_t * irqlock,enum v4l2_buf_type type,enum v4l2_field field,unsigned int msize,void * priv,struct mutex * ext_lock)277*4882a593Smuzhiyun void videobuf_queue_vmalloc_init(struct videobuf_queue *q,
278*4882a593Smuzhiyun 			 const struct videobuf_queue_ops *ops,
279*4882a593Smuzhiyun 			 struct device *dev,
280*4882a593Smuzhiyun 			 spinlock_t *irqlock,
281*4882a593Smuzhiyun 			 enum v4l2_buf_type type,
282*4882a593Smuzhiyun 			 enum v4l2_field field,
283*4882a593Smuzhiyun 			 unsigned int msize,
284*4882a593Smuzhiyun 			 void *priv,
285*4882a593Smuzhiyun 			 struct mutex *ext_lock)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun 	videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
288*4882a593Smuzhiyun 				 priv, &qops, ext_lock);
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(videobuf_queue_vmalloc_init);
291*4882a593Smuzhiyun 
videobuf_to_vmalloc(struct videobuf_buffer * buf)292*4882a593Smuzhiyun void *videobuf_to_vmalloc(struct videobuf_buffer *buf)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun 	struct videobuf_vmalloc_memory *mem = buf->priv;
295*4882a593Smuzhiyun 	BUG_ON(!mem);
296*4882a593Smuzhiyun 	MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	return mem->vaddr;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(videobuf_to_vmalloc);
301*4882a593Smuzhiyun 
videobuf_vmalloc_free(struct videobuf_buffer * buf)302*4882a593Smuzhiyun void videobuf_vmalloc_free(struct videobuf_buffer *buf)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun 	struct videobuf_vmalloc_memory *mem = buf->priv;
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	/* mmapped memory can't be freed here, otherwise mmapped region
307*4882a593Smuzhiyun 	   would be released, while still needed. In this case, the memory
308*4882a593Smuzhiyun 	   release should happen inside videobuf_vm_close().
309*4882a593Smuzhiyun 	   So, it should free memory only if the memory were allocated for
310*4882a593Smuzhiyun 	   read() operation.
311*4882a593Smuzhiyun 	 */
312*4882a593Smuzhiyun 	if ((buf->memory != V4L2_MEMORY_USERPTR) || buf->baddr)
313*4882a593Smuzhiyun 		return;
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	if (!mem)
316*4882a593Smuzhiyun 		return;
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	vfree(mem->vaddr);
321*4882a593Smuzhiyun 	mem->vaddr = NULL;
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	return;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(videobuf_vmalloc_free);
326*4882a593Smuzhiyun 
327