1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * helper functions for physically contiguous capture buffers
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * The functions support hardware lacking scatter gather support
6*4882a593Smuzhiyun * (i.e. the buffers must be linear in physical memory)
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Copyright (c) 2008 Magnus Damm
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Based on videobuf-vmalloc.c,
11*4882a593Smuzhiyun * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
12*4882a593Smuzhiyun */
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include <linux/init.h>
15*4882a593Smuzhiyun #include <linux/module.h>
16*4882a593Smuzhiyun #include <linux/mm.h>
17*4882a593Smuzhiyun #include <linux/pagemap.h>
18*4882a593Smuzhiyun #include <linux/dma-mapping.h>
19*4882a593Smuzhiyun #include <linux/sched.h>
20*4882a593Smuzhiyun #include <linux/slab.h>
21*4882a593Smuzhiyun #include <media/videobuf-dma-contig.h>
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun struct videobuf_dma_contig_memory {
24*4882a593Smuzhiyun u32 magic;
25*4882a593Smuzhiyun void *vaddr;
26*4882a593Smuzhiyun dma_addr_t dma_handle;
27*4882a593Smuzhiyun unsigned long size;
28*4882a593Smuzhiyun };
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #define MAGIC_DC_MEM 0x0733ac61
31*4882a593Smuzhiyun #define MAGIC_CHECK(is, should) \
32*4882a593Smuzhiyun if (unlikely((is) != (should))) { \
33*4882a593Smuzhiyun pr_err("magic mismatch: %x expected %x\n", (is), (should)); \
34*4882a593Smuzhiyun BUG(); \
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun
__videobuf_dc_alloc(struct device * dev,struct videobuf_dma_contig_memory * mem,unsigned long size,gfp_t flags)37*4882a593Smuzhiyun static int __videobuf_dc_alloc(struct device *dev,
38*4882a593Smuzhiyun struct videobuf_dma_contig_memory *mem,
39*4882a593Smuzhiyun unsigned long size, gfp_t flags)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun mem->size = size;
42*4882a593Smuzhiyun mem->vaddr = dma_alloc_coherent(dev, mem->size,
43*4882a593Smuzhiyun &mem->dma_handle, flags);
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun if (!mem->vaddr) {
46*4882a593Smuzhiyun dev_err(dev, "memory alloc size %ld failed\n", mem->size);
47*4882a593Smuzhiyun return -ENOMEM;
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun dev_dbg(dev, "dma mapped data is at %p (%ld)\n", mem->vaddr, mem->size);
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun return 0;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
__videobuf_dc_free(struct device * dev,struct videobuf_dma_contig_memory * mem)55*4882a593Smuzhiyun static void __videobuf_dc_free(struct device *dev,
56*4882a593Smuzhiyun struct videobuf_dma_contig_memory *mem)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun dma_free_coherent(dev, mem->size, mem->vaddr, mem->dma_handle);
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun mem->vaddr = NULL;
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
videobuf_vm_open(struct vm_area_struct * vma)63*4882a593Smuzhiyun static void videobuf_vm_open(struct vm_area_struct *vma)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun struct videobuf_mapping *map = vma->vm_private_data;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
68*4882a593Smuzhiyun map, map->count, vma->vm_start, vma->vm_end);
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun map->count++;
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun
videobuf_vm_close(struct vm_area_struct * vma)73*4882a593Smuzhiyun static void videobuf_vm_close(struct vm_area_struct *vma)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun struct videobuf_mapping *map = vma->vm_private_data;
76*4882a593Smuzhiyun struct videobuf_queue *q = map->q;
77*4882a593Smuzhiyun int i;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
80*4882a593Smuzhiyun map, map->count, vma->vm_start, vma->vm_end);
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun map->count--;
83*4882a593Smuzhiyun if (0 == map->count) {
84*4882a593Smuzhiyun struct videobuf_dma_contig_memory *mem;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun dev_dbg(q->dev, "munmap %p q=%p\n", map, q);
87*4882a593Smuzhiyun videobuf_queue_lock(q);
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun /* We need first to cancel streams, before unmapping */
90*4882a593Smuzhiyun if (q->streaming)
91*4882a593Smuzhiyun videobuf_queue_cancel(q);
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun for (i = 0; i < VIDEO_MAX_FRAME; i++) {
94*4882a593Smuzhiyun if (NULL == q->bufs[i])
95*4882a593Smuzhiyun continue;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun if (q->bufs[i]->map != map)
98*4882a593Smuzhiyun continue;
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun mem = q->bufs[i]->priv;
101*4882a593Smuzhiyun if (mem) {
102*4882a593Smuzhiyun /* This callback is called only if kernel has
103*4882a593Smuzhiyun allocated memory and this memory is mmapped.
104*4882a593Smuzhiyun In this case, memory should be freed,
105*4882a593Smuzhiyun in order to do memory unmap.
106*4882a593Smuzhiyun */
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /* vfree is not atomic - can't be
111*4882a593Smuzhiyun called with IRQ's disabled
112*4882a593Smuzhiyun */
113*4882a593Smuzhiyun dev_dbg(q->dev, "buf[%d] freeing %p\n",
114*4882a593Smuzhiyun i, mem->vaddr);
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun __videobuf_dc_free(q->dev, mem);
117*4882a593Smuzhiyun mem->vaddr = NULL;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun q->bufs[i]->map = NULL;
121*4882a593Smuzhiyun q->bufs[i]->baddr = 0;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun kfree(map);
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun videobuf_queue_unlock(q);
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun static const struct vm_operations_struct videobuf_vm_ops = {
131*4882a593Smuzhiyun .open = videobuf_vm_open,
132*4882a593Smuzhiyun .close = videobuf_vm_close,
133*4882a593Smuzhiyun };
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun /**
136*4882a593Smuzhiyun * videobuf_dma_contig_user_put() - reset pointer to user space buffer
137*4882a593Smuzhiyun * @mem: per-buffer private videobuf-dma-contig data
138*4882a593Smuzhiyun *
139*4882a593Smuzhiyun * This function resets the user space pointer
140*4882a593Smuzhiyun */
videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory * mem)141*4882a593Smuzhiyun static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun mem->dma_handle = 0;
144*4882a593Smuzhiyun mem->size = 0;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun /**
148*4882a593Smuzhiyun * videobuf_dma_contig_user_get() - setup user space memory pointer
149*4882a593Smuzhiyun * @mem: per-buffer private videobuf-dma-contig data
150*4882a593Smuzhiyun * @vb: video buffer to map
151*4882a593Smuzhiyun *
152*4882a593Smuzhiyun * This function validates and sets up a pointer to user space memory.
153*4882a593Smuzhiyun * Only physically contiguous pfn-mapped memory is accepted.
154*4882a593Smuzhiyun *
155*4882a593Smuzhiyun * Returns 0 if successful.
156*4882a593Smuzhiyun */
videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory * mem,struct videobuf_buffer * vb)157*4882a593Smuzhiyun static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
158*4882a593Smuzhiyun struct videobuf_buffer *vb)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun unsigned long untagged_baddr = untagged_addr(vb->baddr);
161*4882a593Smuzhiyun struct mm_struct *mm = current->mm;
162*4882a593Smuzhiyun struct vm_area_struct *vma;
163*4882a593Smuzhiyun unsigned long prev_pfn, this_pfn;
164*4882a593Smuzhiyun unsigned long pages_done, user_address;
165*4882a593Smuzhiyun unsigned int offset;
166*4882a593Smuzhiyun int ret;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun offset = untagged_baddr & ~PAGE_MASK;
169*4882a593Smuzhiyun mem->size = PAGE_ALIGN(vb->size + offset);
170*4882a593Smuzhiyun ret = -EINVAL;
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun mmap_read_lock(mm);
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun vma = find_vma(mm, untagged_baddr);
175*4882a593Smuzhiyun if (!vma)
176*4882a593Smuzhiyun goto out_up;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun if ((untagged_baddr + mem->size) > vma->vm_end)
179*4882a593Smuzhiyun goto out_up;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun pages_done = 0;
182*4882a593Smuzhiyun prev_pfn = 0; /* kill warning */
183*4882a593Smuzhiyun user_address = untagged_baddr;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun while (pages_done < (mem->size >> PAGE_SHIFT)) {
186*4882a593Smuzhiyun ret = follow_pfn(vma, user_address, &this_pfn);
187*4882a593Smuzhiyun if (ret)
188*4882a593Smuzhiyun break;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun if (pages_done == 0)
191*4882a593Smuzhiyun mem->dma_handle = (this_pfn << PAGE_SHIFT) + offset;
192*4882a593Smuzhiyun else if (this_pfn != (prev_pfn + 1))
193*4882a593Smuzhiyun ret = -EFAULT;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun if (ret)
196*4882a593Smuzhiyun break;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun prev_pfn = this_pfn;
199*4882a593Smuzhiyun user_address += PAGE_SIZE;
200*4882a593Smuzhiyun pages_done++;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun out_up:
204*4882a593Smuzhiyun mmap_read_unlock(current->mm);
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun return ret;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
__videobuf_alloc(size_t size)209*4882a593Smuzhiyun static struct videobuf_buffer *__videobuf_alloc(size_t size)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun struct videobuf_dma_contig_memory *mem;
212*4882a593Smuzhiyun struct videobuf_buffer *vb;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
215*4882a593Smuzhiyun if (vb) {
216*4882a593Smuzhiyun vb->priv = ((char *)vb) + size;
217*4882a593Smuzhiyun mem = vb->priv;
218*4882a593Smuzhiyun mem->magic = MAGIC_DC_MEM;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun return vb;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
__videobuf_to_vaddr(struct videobuf_buffer * buf)224*4882a593Smuzhiyun static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun struct videobuf_dma_contig_memory *mem = buf->priv;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun BUG_ON(!mem);
229*4882a593Smuzhiyun MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun return mem->vaddr;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
__videobuf_iolock(struct videobuf_queue * q,struct videobuf_buffer * vb,struct v4l2_framebuffer * fbuf)234*4882a593Smuzhiyun static int __videobuf_iolock(struct videobuf_queue *q,
235*4882a593Smuzhiyun struct videobuf_buffer *vb,
236*4882a593Smuzhiyun struct v4l2_framebuffer *fbuf)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun struct videobuf_dma_contig_memory *mem = vb->priv;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun BUG_ON(!mem);
241*4882a593Smuzhiyun MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun switch (vb->memory) {
244*4882a593Smuzhiyun case V4L2_MEMORY_MMAP:
245*4882a593Smuzhiyun dev_dbg(q->dev, "%s memory method MMAP\n", __func__);
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun /* All handling should be done by __videobuf_mmap_mapper() */
248*4882a593Smuzhiyun if (!mem->vaddr) {
249*4882a593Smuzhiyun dev_err(q->dev, "memory is not allocated/mmapped.\n");
250*4882a593Smuzhiyun return -EINVAL;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun break;
253*4882a593Smuzhiyun case V4L2_MEMORY_USERPTR:
254*4882a593Smuzhiyun dev_dbg(q->dev, "%s memory method USERPTR\n", __func__);
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun /* handle pointer from user space */
257*4882a593Smuzhiyun if (vb->baddr)
258*4882a593Smuzhiyun return videobuf_dma_contig_user_get(mem, vb);
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun /* allocate memory for the read() method */
261*4882a593Smuzhiyun if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size),
262*4882a593Smuzhiyun GFP_KERNEL))
263*4882a593Smuzhiyun return -ENOMEM;
264*4882a593Smuzhiyun break;
265*4882a593Smuzhiyun case V4L2_MEMORY_OVERLAY:
266*4882a593Smuzhiyun default:
267*4882a593Smuzhiyun dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n", __func__);
268*4882a593Smuzhiyun return -EINVAL;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun return 0;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun
__videobuf_mmap_mapper(struct videobuf_queue * q,struct videobuf_buffer * buf,struct vm_area_struct * vma)274*4882a593Smuzhiyun static int __videobuf_mmap_mapper(struct videobuf_queue *q,
275*4882a593Smuzhiyun struct videobuf_buffer *buf,
276*4882a593Smuzhiyun struct vm_area_struct *vma)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun struct videobuf_dma_contig_memory *mem;
279*4882a593Smuzhiyun struct videobuf_mapping *map;
280*4882a593Smuzhiyun int retval;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun dev_dbg(q->dev, "%s\n", __func__);
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun /* create mapping + update buffer list */
285*4882a593Smuzhiyun map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
286*4882a593Smuzhiyun if (!map)
287*4882a593Smuzhiyun return -ENOMEM;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun buf->map = map;
290*4882a593Smuzhiyun map->q = q;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun buf->baddr = vma->vm_start;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun mem = buf->priv;
295*4882a593Smuzhiyun BUG_ON(!mem);
296*4882a593Smuzhiyun MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize),
299*4882a593Smuzhiyun GFP_KERNEL | __GFP_COMP))
300*4882a593Smuzhiyun goto error;
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun /* Try to remap memory */
303*4882a593Smuzhiyun vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun /* the "vm_pgoff" is just used in v4l2 to find the
306*4882a593Smuzhiyun * corresponding buffer data structure which is allocated
307*4882a593Smuzhiyun * earlier and it does not mean the offset from the physical
308*4882a593Smuzhiyun * buffer start address as usual. So set it to 0 to pass
309*4882a593Smuzhiyun * the sanity check in vm_iomap_memory().
310*4882a593Smuzhiyun */
311*4882a593Smuzhiyun vma->vm_pgoff = 0;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun retval = vm_iomap_memory(vma, mem->dma_handle, mem->size);
314*4882a593Smuzhiyun if (retval) {
315*4882a593Smuzhiyun dev_err(q->dev, "mmap: remap failed with error %d. ",
316*4882a593Smuzhiyun retval);
317*4882a593Smuzhiyun dma_free_coherent(q->dev, mem->size,
318*4882a593Smuzhiyun mem->vaddr, mem->dma_handle);
319*4882a593Smuzhiyun goto error;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun vma->vm_ops = &videobuf_vm_ops;
323*4882a593Smuzhiyun vma->vm_flags |= VM_DONTEXPAND;
324*4882a593Smuzhiyun vma->vm_private_data = map;
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
327*4882a593Smuzhiyun map, q, vma->vm_start, vma->vm_end,
328*4882a593Smuzhiyun (long int)buf->bsize, vma->vm_pgoff, buf->i);
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun videobuf_vm_open(vma);
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun return 0;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun error:
335*4882a593Smuzhiyun kfree(map);
336*4882a593Smuzhiyun return -ENOMEM;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun static struct videobuf_qtype_ops qops = {
340*4882a593Smuzhiyun .magic = MAGIC_QTYPE_OPS,
341*4882a593Smuzhiyun .alloc_vb = __videobuf_alloc,
342*4882a593Smuzhiyun .iolock = __videobuf_iolock,
343*4882a593Smuzhiyun .mmap_mapper = __videobuf_mmap_mapper,
344*4882a593Smuzhiyun .vaddr = __videobuf_to_vaddr,
345*4882a593Smuzhiyun };
346*4882a593Smuzhiyun
videobuf_queue_dma_contig_init(struct videobuf_queue * q,const struct videobuf_queue_ops * ops,struct device * dev,spinlock_t * irqlock,enum v4l2_buf_type type,enum v4l2_field field,unsigned int msize,void * priv,struct mutex * ext_lock)347*4882a593Smuzhiyun void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
348*4882a593Smuzhiyun const struct videobuf_queue_ops *ops,
349*4882a593Smuzhiyun struct device *dev,
350*4882a593Smuzhiyun spinlock_t *irqlock,
351*4882a593Smuzhiyun enum v4l2_buf_type type,
352*4882a593Smuzhiyun enum v4l2_field field,
353*4882a593Smuzhiyun unsigned int msize,
354*4882a593Smuzhiyun void *priv,
355*4882a593Smuzhiyun struct mutex *ext_lock)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
358*4882a593Smuzhiyun priv, &qops, ext_lock);
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
361*4882a593Smuzhiyun
videobuf_to_dma_contig(struct videobuf_buffer * buf)362*4882a593Smuzhiyun dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf)
363*4882a593Smuzhiyun {
364*4882a593Smuzhiyun struct videobuf_dma_contig_memory *mem = buf->priv;
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun BUG_ON(!mem);
367*4882a593Smuzhiyun MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun return mem->dma_handle;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(videobuf_to_dma_contig);
372*4882a593Smuzhiyun
videobuf_dma_contig_free(struct videobuf_queue * q,struct videobuf_buffer * buf)373*4882a593Smuzhiyun void videobuf_dma_contig_free(struct videobuf_queue *q,
374*4882a593Smuzhiyun struct videobuf_buffer *buf)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun struct videobuf_dma_contig_memory *mem = buf->priv;
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun /* mmapped memory can't be freed here, otherwise mmapped region
379*4882a593Smuzhiyun would be released, while still needed. In this case, the memory
380*4882a593Smuzhiyun release should happen inside videobuf_vm_close().
381*4882a593Smuzhiyun So, it should free memory only if the memory were allocated for
382*4882a593Smuzhiyun read() operation.
383*4882a593Smuzhiyun */
384*4882a593Smuzhiyun if (buf->memory != V4L2_MEMORY_USERPTR)
385*4882a593Smuzhiyun return;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun if (!mem)
388*4882a593Smuzhiyun return;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun /* handle user space pointer case */
393*4882a593Smuzhiyun if (buf->baddr) {
394*4882a593Smuzhiyun videobuf_dma_contig_user_put(mem);
395*4882a593Smuzhiyun return;
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun /* read() method */
399*4882a593Smuzhiyun if (mem->vaddr) {
400*4882a593Smuzhiyun __videobuf_dc_free(q->dev, mem);
401*4882a593Smuzhiyun mem->vaddr = NULL;
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(videobuf_dma_contig_free);
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun MODULE_DESCRIPTION("helper module to manage video4linux dma contig buffers");
407*4882a593Smuzhiyun MODULE_AUTHOR("Magnus Damm");
408*4882a593Smuzhiyun MODULE_LICENSE("GPL");
409